sl@0: // Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies). sl@0: // All rights reserved. sl@0: // This component and the accompanying materials are made available sl@0: // under the terms of the License "Eclipse Public License v1.0" sl@0: // which accompanies this distribution, and is available sl@0: // at the URL "http://www.eclipse.org/legal/epl-v10.html". sl@0: // sl@0: // Initial Contributors: sl@0: // Nokia Corporation - initial contribution. sl@0: // sl@0: // Contributors: sl@0: // sl@0: // Description: sl@0: // e32\include\nkern\nk_cpu.h sl@0: // sl@0: // WARNING: This file contains some APIs which are internal and are subject sl@0: // to change without notice. Such APIs should therefore not be used sl@0: // outside the Kernel and Hardware Services package. sl@0: // sl@0: sl@0: /** sl@0: @file sl@0: @publishedPartner sl@0: @released sl@0: */ sl@0: sl@0: #ifndef __NK_CPU_H__ sl@0: #define __NK_CPU_H__ sl@0: sl@0: #include sl@0: sl@0: #ifdef __CPU_ARM sl@0: #if defined(__CPU_GENERIC_ARM4__) sl@0: // no cache no MMU sl@0: #define __CPU_ARM_ABORT_MODEL_RESTORED sl@0: #endif sl@0: sl@0: #if defined(__CPU_ARM710T__) || defined(__CPU_ARM720T__) sl@0: #define __CPU_HAS_MMU sl@0: #define __CPU_HAS_CACHE sl@0: #define __CPU_ARM_ABORT_MODEL_UPDATED sl@0: #define __CPU_WRITE_BUFFER sl@0: #endif sl@0: sl@0: #ifdef __CPU_SA1__ sl@0: #define __CPU_HAS_MMU sl@0: #define __CPU_HAS_CACHE sl@0: #define __CPU_ARM_ABORT_MODEL_RESTORED sl@0: #define __CPU_SPLIT_CACHE sl@0: #define __CPU_SPLIT_TLB sl@0: #define __CPU_WRITE_BUFFER sl@0: #define __CPU_HAS_ALT_D_CACHE sl@0: #define __CPU_WRITE_BACK_CACHE sl@0: #define __CPU_CACHE_FLUSH_BY_DATA_READ sl@0: #define __CPU_HAS_SINGLE_ENTRY_DCACHE_FLUSH sl@0: #endif sl@0: sl@0: #if defined(__CPU_ARM920T__) || defined(__CPU_ARM925T__) || defined(__CPU_ARM926J__) sl@0: #define __CPU_HAS_MMU sl@0: #define __CPU_HAS_CACHE sl@0: #define __CPU_ARM_ABORT_MODEL_RESTORED sl@0: #define __CPU_SPLIT_CACHE sl@0: #define __CPU_SPLIT_TLB sl@0: #define __CPU_WRITE_BUFFER sl@0: #define __CPU_WRITE_BACK_CACHE sl@0: #define __CPU_CACHE_FLUSH_BY_WAY_SET_INDEX sl@0: #define __CPU_CACHE_POLICY_IN_PTE sl@0: #define __CPU_HAS_CACHE_TYPE_REGISTER sl@0: #define __CPU_HAS_SINGLE_ENTRY_ITLB_FLUSH sl@0: #define __CPU_HAS_SINGLE_ENTRY_ICACHE_FLUSH sl@0: #define __CPU_HAS_SINGLE_ENTRY_DCACHE_FLUSH sl@0: #endif sl@0: sl@0: #ifdef __CPU_XSCALE__ sl@0: #define __CPU_HAS_MMU sl@0: #define __CPU_HAS_CACHE sl@0: #define __CPU_ARM_ABORT_MODEL_RESTORED sl@0: #define __CPU_SPLIT_CACHE sl@0: #define __CPU_SPLIT_TLB sl@0: #define __CPU_WRITE_BUFFER sl@0: #ifndef __CPU_XSCALE_MANZANO__ sl@0: #define __CPU_HAS_ALT_D_CACHE sl@0: #endif sl@0: #define __CPU_WRITE_BACK_CACHE sl@0: #define __CPU_CACHE_WRITE_ALLOCATE sl@0: #ifdef __CPU_XSCALE_MANZANO__ sl@0: #define __CPU_CACHE_FLUSH_BY_WAY_SET_INDEX sl@0: #else sl@0: #define __CPU_CACHE_FLUSH_BY_LINE_ALLOC sl@0: #endif sl@0: #define __CPU_CACHE_POLICY_IN_PTE sl@0: #define __CPU_HAS_CACHE_TYPE_REGISTER sl@0: #define __CPU_HAS_SINGLE_ENTRY_ITLB_FLUSH sl@0: #define __CPU_HAS_SINGLE_ENTRY_ICACHE_FLUSH sl@0: #define __CPU_HAS_SINGLE_ENTRY_DCACHE_FLUSH sl@0: #define __CPU_HAS_BTB sl@0: #define __CPU_USE_MMU_TEX_FIELD sl@0: #define __CPU_HAS_COPROCESSOR_ACCESS_REG sl@0: #define __CPU_HAS_ACTLR sl@0: #endif sl@0: sl@0: #if defined(__CPU_ARM1136__) || defined(__CPU_ARM11MP__) || defined(__CPU_ARM1176__) || defined(__CPU_CORTEX_A8__) || defined(__CPU_CORTEX_A9__) sl@0: #define __CPU_HAS_MMU sl@0: #define __CPU_HAS_CACHE sl@0: #define __CPU_CACHE_PHYSICAL_TAG sl@0: #define __CPU_SUPPORTS_FAST_PROCESS_SWITCH sl@0: #define __CPU_ARM_ABORT_MODEL_RESTORED sl@0: #define __CPU_SPLIT_CACHE sl@0: sl@0: #if defined(__CPU_CORTEX_A9__) || defined(__CPU_CORTEX_A8__) || defined(__CPU_ARM1136__) sl@0: #define __CPU_SPLIT_TLB sl@0: #endif sl@0: sl@0: #if defined(__CPU_CORTEX_A8__) sl@0: /* Internal cache controller maintains both inner & outer caches. sl@0: * @internalComponent sl@0: */ sl@0: #define __CPU_OUTER_CACHE_IS_INTERNAL_CACHE sl@0: #endif sl@0: sl@0: sl@0: sl@0: #if defined(__CPU_CORTEX_A9__) || defined(__CPU_ARM11MP__) sl@0: #define __CPU_SUPPORTS_TLBIMVAA sl@0: #endif sl@0: sl@0: #if defined(__CPU_CORTEX_A9__) sl@0: #ifdef __SMP__ sl@0: // #define __CPU_SUPPORTS_PAGE_TABLE_WALK_TO_L1_CACHE sl@0: #endif sl@0: #endif sl@0: sl@0: #if (defined(__CPU_ARM1136__) && defined(__CPU_ARM1136_ERRATUM_399234_FIXED) && !defined(__MEMMODEL_FLEXIBLE__)) || (defined(__CPU_ARM11MP__) && defined (__SMP__) ) sl@0: // Page tables on these platforms are either uncached or write through cached. sl@0: #else sl@0: // Page/directory tables are fully cached (write-back) on these platforms. sl@0: #define __CPU_PAGE_TABLES_FULLY_CACHED sl@0: #endif sl@0: sl@0: #define __CPU_WRITE_BUFFER sl@0: #define __CPU_WRITE_BACK_CACHE sl@0: #define __CPU_CACHE_WRITE_ALLOCATE sl@0: #define __CPU_CACHE_FLUSH_BY_WAY_SET_INDEX sl@0: #define __CPU_CACHE_POLICY_IN_PTE sl@0: #define __CPU_HAS_CACHE_TYPE_REGISTER sl@0: #define __CPU_HAS_SINGLE_ENTRY_ITLB_FLUSH sl@0: #define __CPU_HAS_SINGLE_ENTRY_ICACHE_FLUSH sl@0: #define __CPU_HAS_SINGLE_ENTRY_DCACHE_FLUSH sl@0: #define __CPU_HAS_BTB sl@0: #define __CPU_HAS_COPROCESSOR_ACCESS_REG sl@0: #define __CPU_HAS_PREFETCH_BUFFER sl@0: #define __CPU_HAS_ACTLR sl@0: #define __CPU_HAS_TTBR1 sl@0: sl@0: #if !defined(__CPU_ARM1136__) sl@0: #define __CPU_MEMORY_TYPE_REMAPPING sl@0: #endif sl@0: sl@0: #if defined(__CPU_ARM11MP__) && defined(__SMP__) sl@0: #define __BROADCAST_CACHE_MAINTENANCE__ sl@0: #endif sl@0: sl@0: #if defined(__CPU_ARM11MP__) || defined(__CPU_ARM1176__) sl@0: #define __CPU_NEEDS_BTAC_FLUSH_AFTER_ASID_CHANGE sl@0: #endif sl@0: sl@0: #define __CPU_CACHE_HAS_COLOUR sl@0: #define __CPU_I_CACHE_HAS_COLOUR sl@0: sl@0: #if defined(__CPU_ARM1136__) || defined(__CPU_ARM1176__) sl@0: #define __CPU_D_CACHE_HAS_COLOUR sl@0: #elif defined(__CPU_ARM11MP__) sl@0: // MPCore has physically indexed D cache, so no colour problems sl@0: #else sl@0: // Assume other ARM cores have virtually indexed D cache with broken alias avoidence hardware... sl@0: #define __CPU_D_CACHE_HAS_COLOUR sl@0: #endif sl@0: sl@0: sl@0: #endif sl@0: sl@0: sl@0: #ifdef __FIQ_RESERVED_FOR_SECURE_STATE__ sl@0: #define __FIQ_IS_UNCONTROLLED__ sl@0: #endif sl@0: sl@0: #if defined(__CPU_MEMORY_TYPE_REMAPPING) || defined(__MEMMODEL_FLEXIBLE__) sl@0: #define __MMU_USE_SYMMETRIC_ACCESS_PERMISSIONS sl@0: #endif sl@0: sl@0: sl@0: #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED) && defined(__MMU_USE_SYMMETRIC_ACCESS_PERMISSIONS) sl@0: #define ERRATUM_353494_MODE_CHANGE(cc,r) FLUSH_BTB(cc,r) sl@0: #else sl@0: #define ERRATUM_353494_MODE_CHANGE(cc,r) sl@0: #endif sl@0: sl@0: #ifdef __CPU_HAS_MMU sl@0: #define __CPU_ARM_USE_DOMAINS sl@0: #endif sl@0: sl@0: #if defined(__ARM_L210_CACHE__) || defined(__ARM_L220_CACHE__)|| defined(__ARM_PL310_CACHE__) sl@0: /** sl@0: Indicates the presense of external cache controller. sl@0: @internalTechnology sl@0: */ sl@0: #define __HAS_EXTERNAL_CACHE__ sl@0: #endif sl@0: sl@0: #ifndef __CPU_HAS_MMU sl@0: #define CPWAIT(cc,r) /**< @internalTechnology */ sl@0: #endif sl@0: sl@0: #include sl@0: sl@0: // CP15 definitions sl@0: #if defined(__CPU_ARM710T__) || defined(__CPU_ARM720T__) sl@0: #define FLUSH_DCACHE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c7, 0 "); /**< @internalTechnology */ sl@0: #define FLUSH_ICACHE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c7, 0 "); /**< @internalTechnology */ sl@0: #define FLUSH_IDCACHE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c7, 0 "); /**< @internalTechnology */ sl@0: #define FLUSH_DTLB(cc,r) asm("mcr"#cc" p15, 0, "#r", c8, c7, 0 "); /**< @internalTechnology */ sl@0: #define FLUSH_ITLB(cc,r) asm("mcr"#cc" p15, 0, "#r", c8, c7, 0 "); /**< @internalTechnology */ sl@0: #define FLUSH_IDTLB(cc,r) asm("mcr"#cc" p15, 0, "#r", c8, c7, 0 "); /**< @internalTechnology */ sl@0: #define FLUSH_DTLB_ENTRY(cc,addr) asm("mcr"#cc" p15, 0, "#addr", c8, c7, 1 "); /**< @internalTechnology */ sl@0: #define FLUSH_ITLB_ENTRY(cc,addr) asm("mcr"#cc" p15, 0, "#addr", c8, c7, 1 "); /**< @internalTechnology */ sl@0: #define FLUSH_IDTLB_ENTRY(cc,addr) asm("mcr"#cc" p15, 0, "#addr", c8, c7, 1 "); /**< @internalTechnology */ sl@0: #define DRAIN_WRITE_BUFFER(cc,r,rd) // this seems dodgy on Windermere and it works without it sl@0: #define CPWAIT(cc,r) /**< @internalTechnology */ sl@0: sl@0: #elif defined(__CPU_SA1__) sl@0: #define FLUSH_ICACHE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c5, 0 "); /**< @internalTechnology */ sl@0: #define PURGE_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c6, 1 "); /**< @internalTechnology */ sl@0: #define CLEAN_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c10, 1 "); /**< @internalTechnology */ sl@0: #define FLUSH_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c10, 1 "); asm("mcr"#cc" p15, 0, "#r", c7, c6, 1 ");/**< @internalTechnology */ sl@0: #define FLUSH_DTLB(cc,r) asm("mcr"#cc" p15, 0, "#r", c8, c6, 0 "); /**< @internalTechnology */ sl@0: #define FLUSH_ITLB(cc,r) asm("mcr"#cc" p15, 0, "#r", c8, c5, 0 "); /**< @internalTechnology */ sl@0: #define FLUSH_IDTLB(cc,r) asm("mcr"#cc" p15, 0, "#r", c8, c7, 0 "); /**< @internalTechnology */ sl@0: #define FLUSH_DTLB_ENTRY(cc,addr) asm("mcr"#cc" p15, 0, "#addr", c8, c6, 1 ");/**< @internalTechnology */ sl@0: #define DRAIN_WRITE_BUFFER(cc,r,rd) asm("mcr"#cc" p15, 0, "#r", c7, c10, 4 "); sl@0: #define CPWAIT(cc,r) /**< @internalTechnology */ sl@0: sl@0: #elif defined(__CPU_ARM920T__) || defined(__CPU_ARM925T__) || defined(__CPU_ARM926J__) sl@0: #define FLUSH_ICACHE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c5, 0 "); /**< @internalTechnology */ sl@0: #define FLUSH_ICACHE_LINE(cc,r,tmp) asm("mcr"#cc" p15, 0, "#r", c7, c5, 1 "); /**< @internalTechnology */ sl@0: #define PURGE_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c6, 1 "); /**< @internalTechnology */ sl@0: #define CLEAN_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c10, 1 "); /**< @internalTechnology */ sl@0: #define CLEAN_DCACHE_INDEX(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c10, 2 "); /**< @internalTechnology */ sl@0: #define FLUSH_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c14, 1 "); /**< @internalTechnology */ sl@0: #define FLUSH_DCACHE_INDEX(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c14, 2 "); /**< @internalTechnology */ sl@0: #define FLUSH_DTLB(cc,r) asm("mcr"#cc" p15, 0, "#r", c8, c6, 0 "); /**< @internalTechnology */ sl@0: #define FLUSH_ITLB(cc,r) asm("mcr"#cc" p15, 0, "#r", c8, c5, 0 "); /**< @internalTechnology */ sl@0: #define FLUSH_IDTLB(cc,r) asm("mcr"#cc" p15, 0, "#r", c8, c7, 0 "); /**< @internalTechnology */ sl@0: #define FLUSH_DTLB_ENTRY(cc,addr) asm("mcr"#cc" p15, 0, "#addr", c8, c6, 1 ");/**< @internalTechnology */ sl@0: #define FLUSH_ITLB_ENTRY(cc,addr) asm("mcr"#cc" p15, 0, "#addr", c8, c5, 1 ");/**< @internalTechnology */ sl@0: #define DRAIN_WRITE_BUFFER(cc,r,rd) asm("mcr"#cc" p15, 0, "#r", c7, c10, 4 "); sl@0: #define CPWAIT(cc,r) /**< @internalTechnology */ sl@0: #define CACHE_MAINTENANCE_PDE_PTE_UPDATED(r) DRAIN_WRITE_BUFFER(,r,r); sl@0: sl@0: #elif defined(__CPU_XSCALE__) sl@0: //#define FLUSH_ICACHE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c5, 0 "); sl@0: #define FLUSH_ICACHE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c5, 0; sub"#cc" pc, pc, #4 ");/**< @internalTechnology */ // A step hack sl@0: #define FLUSH_ICACHE_LINE(cc,r,tmp) asm("mcr"#cc" p15, 0, "#r", c7, c5, 1 "); /**< @internalTechnology */ sl@0: #ifdef __CPU_XSCALE_MANZANO__ sl@0: #define PURGE_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c6, 1 "); /**< @internalTechnology */ sl@0: #define CLEAN_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c10, 1 "); /**< @internalTechnology */ sl@0: #define CLEAN_DCACHE_INDEX(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c10, 2 "); /**< @internalTechnology */ sl@0: #define FLUSH_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c14, 1 "); /**< @internalTechnology */ sl@0: #define FLUSH_DCACHE_INDEX(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c14, 2 "); /**< @internalTechnology */ sl@0: #else sl@0: #define PURGE_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c6, 1 "); asm("nop "); /**< @internalTechnology */ // PXA250 ERRATUM 96 sl@0: #define CLEAN_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c10, 1 "); asm("nop ");/**< @internalTechnology */ // PXA250 ERRATUM 96 sl@0: #define FLUSH_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c10, 1 "); asm("mcr"#cc" p15, 0, "#r", c7, c6, 1 "); asm("nop "); /**< @internalTechnology */ // PXA250 ERRATUM 96 sl@0: #define ALLOC_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c2, 5 "); /**< @internalTechnology */ sl@0: #endif sl@0: #define FLUSH_DTLB(cc,r) asm("mcr"#cc" p15, 0, "#r", c8, c6, 0 "); /**< @internalTechnology */ sl@0: #define FLUSH_ITLB(cc,r) asm("mcr"#cc" p15, 0, "#r", c8, c5, 0 "); /**< @internalTechnology */ sl@0: #define FLUSH_IDTLB(cc,r) asm("mcr"#cc" p15, 0, "#r", c8, c7, 0 "); /**< @internalTechnology */ sl@0: #define FLUSH_DTLB_ENTRY(cc,addr) asm("mcr"#cc" p15, 0, "#addr", c8, c6, 1 "); asm("nop "); asm ("nop "); /**< @internalTechnology */ // PXA250 ERRATUM 21 sl@0: #define FLUSH_ITLB_ENTRY(cc,addr) asm("mcr"#cc" p15, 0, "#addr", c8, c5, 1 "); /**< @internalTechnology */ sl@0: sl@0: #ifdef __CPU_XSCALE_MANZANO__ sl@0: #define DRAIN_WRITE_BUFFER(cc,r,rd) asm("mcr"#cc" p15, 0, "#r", c7, c10, 4 "); sl@0: #else //__CPU_XSCALE_MANZANO__ sl@0: // PXA250 ERRATUM 14 sl@0: #define DRAIN_WRITE_BUFFER(cc,r,rd) asm("mcr"#cc" p15, 0, "#r", c7, c10, 4 "); \ sl@0: asm("ldr"#cc" "#rd", [pc] "); \ sl@0: asm("add pc, pc, #0 "); \ sl@0: asm(".word %a0" : : "i" ((TInt)&SuperPageAddress)); \ sl@0: asm("ldr"#cc" "#rd", ["#rd"] "); \ sl@0: asm("ldr"#cc" "#rd", ["#rd", #%a0]" : : "i" _FOFF(TSuperPage,iUncachedAddress)); \ sl@0: asm("ldr"#cc" "#rd", ["#rd"] "); sl@0: #endif //else __CPU_XSCALE_MANZANO__ sl@0: //#define FLUSH_BTB(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c5, 6 "); sl@0: #define FLUSH_BTB(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c5, 6; sub"#cc" pc, pc, #4 "); /**< @internalTechnology */ // A step hack sl@0: #define CPWAIT(cc,r) asm("mrc"#cc" p15, 0, "#r", c2, c0, 0; mov"#cc" "#r","#r"; sub"#cc" pc, pc, #4 "); /**< @internalTechnology */ sl@0: #define GET_CAR(cc,r) asm("mrc"#cc" p15, 0, "#r", c15, c1, 0 "); /**< @internalTechnology */ sl@0: #define SET_CAR(cc,r) asm("mcr"#cc" p15, 0, "#r", c15, c1, 0 "); /**< @internalTechnology */ sl@0: sl@0: #elif defined(__CPU_ARMV6) // end of elif __CPU_XSCALE sl@0: sl@0: #if !defined(__CPU_ARM1136_ERRATUM_411920_FIXED) && (defined(__CPU_ARM1136__) || defined(__CPU_ARM1176__)) sl@0: /** @internalTechnology */ sl@0: #define FLUSH_ICACHE(cc,r,rt) asm("mrs "#rt", cpsr"); \ sl@0: CPSIDAIF; \ sl@0: asm("mcr"#cc" p15, 0, "#r", c7, c5, 0 "); \ sl@0: asm("mcr"#cc" p15, 0, "#r", c7, c5, 0 "); \ sl@0: asm("mcr"#cc" p15, 0, "#r", c7, c5, 0 "); \ sl@0: asm("mcr"#cc" p15, 0, "#r", c7, c5, 0 "); \ sl@0: asm("msr cpsr_c, "#rt); \ sl@0: asm("nop"); \ sl@0: asm("nop"); \ sl@0: asm("nop"); \ sl@0: asm("nop"); \ sl@0: asm("nop"); \ sl@0: asm("nop"); \ sl@0: asm("nop"); \ sl@0: asm("nop"); \ sl@0: asm("nop"); \ sl@0: asm("nop"); \ sl@0: asm("nop"); sl@0: sl@0: #else sl@0: #define FLUSH_ICACHE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c5, 0 "); /**< @internalTechnology */ sl@0: #endif // else !(__CPU_ARM1136_ERRATUM_411920_FIXED) && (__CPU_ARM1136__ || __CPU_ARM1176__) sl@0: #if defined(__CPU_ARM1136_ERRATUM_371025_FIXED) || !defined(__CPU_ARM1136__) sl@0: sl@0: #if !defined(__CPU_ARM1176_ERRATUM_720013_FIXED) && defined(__CPU_ARM1176__) sl@0: #define FLUSH_ICACHE_LINE(cc,r,tmp) asm("mcr"#cc" p15, 0, "#r", c7, c5, 1 "); \ sl@0: asm("mcr"#cc" p15, 0, "#r", c7, c5, 1 "); /**< @internalTechnology */ sl@0: #else sl@0: #define FLUSH_ICACHE_LINE(cc,r,tmp) asm("mcr"#cc" p15, 0, "#r", c7, c5, 1 "); /**< @internalTechnology */ sl@0: #endif // !defined(__CPU_ARM1176_ERRATUM_720013_FIXED) && defined(__CPU_ARM1176__) sl@0: sl@0: #else // workaround for erratum 371025 of 1136... sl@0: /** @internalTechnology */ sl@0: #define FLUSH_ICACHE_LINE(cc,r,tmp) asm("orr"#cc" "#tmp", "#r", #0xC0000000 "); \ sl@0: asm("bic"#cc" "#tmp", "#tmp", #1 "); \ sl@0: asm("mcr"#cc" p15, 0, "#tmp", c7, c5, 2 "); \ sl@0: asm("sub"#cc" "#tmp", "#tmp", #0x40000000 "); \ sl@0: asm("mcr"#cc" p15, 0, "#tmp", c7, c5, 2 "); \ sl@0: asm("sub"#cc" "#tmp", "#tmp", #0x40000000 "); \ sl@0: asm("mcr"#cc" p15, 0, "#tmp", c7, c5, 2 "); \ sl@0: asm("sub"#cc" "#tmp", "#tmp", #0x40000000 "); \ sl@0: asm("mcr"#cc" p15, 0, "#tmp", c7, c5, 2 "); sl@0: #endif //else (__CPU_ARM1136_ERRATUM_371025_FIXED) || !(__CPU_ARM1136__) sl@0: sl@0: #if !defined(__CPU_ARM1176_ERRATUM_720013_FIXED) && defined(__CPU_ARM1176__) sl@0: // It is commented out to ensure it is not used on 1176 cores with 720013 erratum sl@0: // #define FLUSH_ICACHE_INDEX(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c5, 2 "); sl@0: #else sl@0: #define FLUSH_ICACHE_INDEX(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c5, 2 "); /**< @internalTechnology */ sl@0: #endif //!defined(__CPU_ARM1176_ERRATUM_720013_FIXED) && defined(__CPU_ARM1176__) sl@0: #define PURGE_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c6, 1 "); /**< @internalTechnology */ sl@0: #define PURGE_DCACHE_INDEX(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c6, 2 "); /**< @internalTechnology */ sl@0: #define CLEAN_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c10, 1 "); /**< @internalTechnology */ sl@0: sl@0: #define CLEAN_DCACHE_INDEX(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c10, 2 "); /**< @internalTechnology */ sl@0: #define FLUSH_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c14, 1 "); /**< @internalTechnology */ sl@0: #define FLUSH_DCACHE_INDEX(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c14, 2 "); /**< @internalTechnology */ sl@0: #define FLUSH_ITLB(cc,r) asm("mcr"#cc" p15, 0, "#r", c8, c5, 0 "); /**< @internalTechnology */ sl@0: #define FLUSH_DTLB(cc,r) asm("mcr"#cc" p15, 0, "#r", c8, c6, 0 "); /**< @internalTechnology */ sl@0: #define FLUSH_IDTLB(cc,r) asm("mcr"#cc" p15, 0, "#r", c8, c7, 0 "); /**< @internalTechnology */ sl@0: sl@0: sl@0: // addr must include ASID sl@0: #if defined (__CPU_ARM11MP__) sl@0: #define FLUSH_ITLB_ENTRY(cc,addr) asm("mcr"#cc" p15, 0, "#addr", c8, c5, 3 "); /**< @internalTechnology */ sl@0: #define FLUSH_DTLB_ENTRY(cc,addr) asm("mcr"#cc" p15, 0, "#addr", c8, c6, 3 "); /**< @internalTechnology */ sl@0: #else //(__CPU_ARM11MP__) sl@0: #define FLUSH_ITLB_ENTRY(cc,addr) asm("mcr"#cc" p15, 0, "#addr", c8, c5, 1 "); /**< @internalTechnology */ sl@0: #define FLUSH_DTLB_ENTRY(cc,addr) asm("mcr"#cc" p15, 0, "#addr", c8, c6, 1 "); /**< @internalTechnology */ sl@0: #endif // else (__CPU_ARM11MP__) sl@0: #define FLUSH_ITLB_ASID(cc,asid) asm("mcr"#cc" p15, 0, "#asid", c8, c5, 2 "); /**< @internalTechnology */ sl@0: #define FLUSH_DTLB_ASID(cc,asid) asm("mcr"#cc" p15, 0, "#asid", c8, c6, 2 "); /**< @internalTechnology */ sl@0: sl@0: #define DRAIN_WRITE_BUFFER(cc,r,rd) asm("mcr"#cc" p15, 0, "#r", c7, c10, 4 "); sl@0: #define DATA_MEMORY_BARRIER(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c10, 5 "); sl@0: #define FLUSH_PREFETCH_BUFFER(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c5, 4 "); /**< @internalTechnology */ sl@0: #define FLUSH_BTB(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c5, 6 "); /**< @internalTechnology */ sl@0: #define CPWAIT(cc,r) /**< @internalTechnology */ // not sure about this sl@0: #define GET_CAR(cc,r) asm("mrc"#cc" p15, 0, "#r", c1, c0, 2 "); /**< @internalTechnology */ sl@0: #define SET_CAR(cc,r) asm("mcr"#cc" p15, 0, "#r", c1, c0, 2 "); /**< @internalTechnology */ sl@0: sl@0: #if defined(__CPU_PAGE_TABLES_FULLY_CACHED) sl@0: #define CACHE_MAINTENANCE_PDE_PTE_UPDATED(r) CLEAN_DCACHE_LINE(,r);\ sl@0: DRAIN_WRITE_BUFFER(,r,r); sl@0: #else sl@0: #define CACHE_MAINTENANCE_PDE_PTE_UPDATED(r) DRAIN_WRITE_BUFFER(,r,r); sl@0: #endif //end of __CPU_PAGE_TABLES_FULLY_CACHED sl@0: sl@0: #elif defined(__CPU_ARMV7) // end of elif (__CPU_ARMV6) sl@0: sl@0: // Define new-style cache/TLB maintenance instructions sl@0: #if defined(__CPU_CORTEX_A9__) && !defined(__CPU_ARM_A9_ERRATUM_571618_FIXED) sl@0: // ARM Cortex-A9 MPCore erratum 571618 workaround sl@0: // Execute memory barrier before interruptible CP15 operations sl@0: #define ICIALLU asm("mcr p15, 0, r0, c7, c10, 5 "); \ sl@0: asm("mcr p15, 0, r0, c7, c5, 0 "); /**< @internalTechnology */ sl@0: #else sl@0: #define ICIALLU asm("mcr p15, 0, r0, c7, c5, 0 "); /**< @internalTechnology */ sl@0: #endif // end of else (__CPU_CORTEX_A9__) && !(__CPU_ARM_A9_ERRATUM_571618_FIXED) sl@0: #define ICIMVAU(r) asm("mcr p15, 0, "#r", c7, c5, 1 "); /**< @internalTechnology */ sl@0: #define BPIALL asm("mcr p15, 0, r0, c7, c5, 6 "); /**< @internalTechnology */ sl@0: #define BPIMVA(r) asm("mcr p15, 0, "#r", c7, c5, 7 "); /**< @internalTechnology */ sl@0: #define DCIMVAC(r) asm("mcr p15, 0, "#r", c7, c6, 1 "); /**< @internalTechnology */ sl@0: #define DCISW(r) asm("mcr p15, 0, "#r", c7, c6, 2 "); /**< @internalTechnology */ sl@0: #define DCCMVAC(r) asm("mcr p15, 0, "#r", c7, c10, 1 "); /**< @internalTechnology */ sl@0: #define DCCSW(r) asm("mcr p15, 0, "#r", c7, c10, 2 "); /**< @internalTechnology */ sl@0: #define DCCMVAU(r) asm("mcr p15, 0, "#r", c7, c11, 1 "); /**< @internalTechnology */ sl@0: #define DCCIMVAC(r) asm("mcr p15, 0, "#r", c7, c14, 1 "); /**< @internalTechnology */ sl@0: #define DCCISW(r) asm("mcr p15, 0, "#r", c7, c14, 2 "); /**< @internalTechnology */ sl@0: sl@0: #ifdef __SMP__ sl@0: #if defined(__CPU_CORTEX_A9__) && !defined(__CPU_ARM_A9_ERRATUM_571618_FIXED) sl@0: // ARM Cortex-A9 MPCore erratum 571618 workaround sl@0: // Execute memory barrier before interruptible CP15 operations sl@0: #define ICIALLUIS asm("mcr p15, 0, r0, c7, c10, 5 "); \ sl@0: asm("mcr p15, 0, r0, c7, c1, 0 "); /**< @internalTechnology */ sl@0: #else sl@0: #define ICIALLUIS asm("mcr p15, 0, r0, c7, c1, 0 "); /**< @internalTechnology */ sl@0: #endif //end of else (__CPU_CORTEX_A9__) && !(__CPU_ARM_A9_ERRATUM_571618_FIXED) sl@0: #define BPIALLIS asm("mcr p15, 0, r0, c7, c1, 6 "); /**< @internalTechnology */ sl@0: #endif // end of __SMP__ sl@0: sl@0: #ifdef __CPU_SPLIT_TLB sl@0: #define ITLBIALL asm("mcr p15, 0, r0, c8, c5, 0 "); /**< @internalTechnology */ sl@0: #define ITLBIMVA(r) asm("mcr p15, 0, "#r", c8, c5, 1 "); /**< @internalTechnology */ sl@0: #define ITLBIASID(r) asm("mcr p15, 0, "#r", c8, c5, 2 "); /**< @internalTechnology */ sl@0: #define DTLBIALL asm("mcr p15, 0, r0, c8, c6, 0 "); /**< @internalTechnology */ sl@0: #define DTLBIMVA(r) asm("mcr p15, 0, "#r", c8, c6, 1 "); /**< @internalTechnology */ sl@0: #define DTLBIASID(r) asm("mcr p15, 0, "#r", c8, c6, 2 "); /**< @internalTechnology */ sl@0: #endif sl@0: #define UTLBIALL asm("mcr p15, 0, r0, c8, c7, 0 "); /**< @internalTechnology */ sl@0: #define UTLBIMVA(r) asm("mcr p15, 0, "#r", c8, c7, 1 "); /**< @internalTechnology */ sl@0: #if defined(__CPU_CORTEX_A9__) && !defined(__CPU_ARM_A9_ERRATUM_571618_FIXED) sl@0: // ARM Cortex-A9 MPCore erratum 571618 workaround sl@0: // Execute memory barrier before interruptible CP15 operations sl@0: #define UTLBIASID(r) asm("mcr p15, 0, r0, c7, c10, 5 "); \ sl@0: asm("mcr p15, 0, "#r", c8, c7, 2 "); /**< @internalTechnology */ sl@0: #else sl@0: #define UTLBIASID(r) asm("mcr p15, 0, "#r", c8, c7, 2 "); /**< @internalTechnology */ sl@0: #endif // end of else (__CPU_CORTEX_A9__) && !(__CPU_ARM_A9_ERRATUM_571618_FIXED) sl@0: sl@0: #ifdef __CPU_SUPPORTS_TLBIMVAA sl@0: #ifdef __CPU_SPLIT_TLB sl@0: #define ITLBIMVAA(r) asm("mcr p15, 0, "#r", c8, c5, 3 "); /**< @internalTechnology */ sl@0: #define DTLBIMVAA(r) asm("mcr p15, 0, "#r", c8, c6, 3 "); /**< @internalTechnology */ sl@0: #endif // end of __CPU_SPLIT_TLB sl@0: #define UTLBIMVAA(r) asm("mcr p15, 0, "#r", c8, c7, 3 "); /**< @internalTechnology */ sl@0: #endif // end of __CPU_SUPPORTS_TLBIMVAA sl@0: sl@0: #ifdef __SMP__ sl@0: #ifdef __CPU_SPLIT_TLB sl@0: #define ITLBIALLIS asm("mcr p15, 0, r0, c8, c3, 0 "); /**< @internalTechnology */ sl@0: #define ITLBIMVAIS(r) asm("mcr p15, 0, "#r", c8, c3, 1 "); /**< @internalTechnology */ sl@0: #if defined(__CPU_CORTEX_A9__) && !defined(__CPU_ARM_A9_ERRATUM_571618_FIXED) sl@0: // ARM Cortex-A9 MPCore erratum 571618 workaround sl@0: // Execute memory barrier before interruptible CP15 operations sl@0: #define ITLBIASIDIS(r) asm("mcr p15, 0, r0, c7, c10, 5 "); \ sl@0: asm("mcr p15, 0, "#r", c8, c3, 2 "); /**< @internalTechnology */ sl@0: #else sl@0: #define ITLBIASIDIS(r) asm("mcr p15, 0, "#r", c8, c3, 2 "); /**< @internalTechnology */ sl@0: #endif // end of else (__CPU_CORTEX_A9__) && !(__CPU_ARM_A9_ERRATUM_571618_FIXED) sl@0: #define DTLBIALLIS asm("mcr p15, 0, r0, c8, c3, 0 "); /**< @internalTechnology */ sl@0: #define DTLBIMVAIS(r) asm("mcr p15, 0, "#r", c8, c3, 1 "); /**< @internalTechnology */ sl@0: #if defined(__CPU_CORTEX_A9__) && !defined(__CPU_ARM_A9_ERRATUM_571618_FIXED) sl@0: // ARM Cortex-A9 MPCore erratum 571618 workaround sl@0: // Execute memory barrier before interruptible CP15 operations sl@0: #define DTLBIASIDIS(r) asm("mcr p15, 0, r0, c7, c10, 5 "); \ sl@0: asm("mcr p15, 0, "#r", c8, c3, 2 "); /**< @internalTechnology */ sl@0: #else sl@0: #define DTLBIASIDIS(r) asm("mcr p15, 0, "#r", c8, c3, 2 "); /**< @internalTechnology */ sl@0: #endif // end of else (__CPU_CORTEX_A9__) && !(__CPU_ARM_A9_ERRATUM_571618_FIXED) sl@0: #endif // end of __CPU_SPLIT_TLB sl@0: #define UTLBIALLIS asm("mcr p15, 0, r0, c8, c3, 0 "); /**< @internalTechnology */ sl@0: #define UTLBIMVAIS(r) asm("mcr p15, 0, "#r", c8, c3, 1 "); /**< @internalTechnology */ sl@0: #if defined(__CPU_CORTEX_A9__) && !defined(__CPU_ARM_A9_ERRATUM_571618_FIXED) sl@0: // ARM Cortex-A9 MPCore erratum 571618 workaround sl@0: // Execute memory barrier before interruptible CP15 operations sl@0: #define UTLBIASIDIS(r) asm("mcr p15, 0, r0, c7, c10, 5 "); \ sl@0: asm("mcr p15, 0, "#r", c8, c3, 2 "); /**< @internalTechnology */ sl@0: #else sl@0: #define UTLBIASIDIS(r) asm("mcr p15, 0, "#r", c8, c3, 2 "); /**< @internalTechnology */ sl@0: #endif // end of else (__CPU_CORTEX_A9__) && !(__CPU_ARM_A9_ERRATUM_571618_FIXED) sl@0: sl@0: #ifdef __CPU_SUPPORTS_TLBIMVAA sl@0: #ifdef __CPU_SPLIT_TLB sl@0: #define ITLBIMVAAIS(r) asm("mcr p15, 0, "#r", c8, c3, 3 "); /**< @internalTechnology */ sl@0: #define DTLBIMVAAIS(r) asm("mcr p15, 0, "#r", c8, c3, 3 "); /**< @internalTechnology */ sl@0: #endif // end of __CPU_SPLIT_TLB sl@0: #define UTLBIMVAAIS(r) asm("mcr p15, 0, "#r", c8, c3, 3 "); /**< @internalTechnology */ sl@0: #endif // end of __CPU_SUPPORTS_TLBIMVAA sl@0: #endif // end of __SMP__ sl@0: sl@0: sl@0: #define DRAIN_WRITE_BUFFER(cc,r,rd) __DATA_SYNC_BARRIER__(r) sl@0: #define DATA_MEMORY_BARRIER(cc,r) __DATA_MEMORY_BARRIER__(r) sl@0: #define FLUSH_PREFETCH_BUFFER(cc,r) __INST_SYNC_BARRIER__(r) /**< @internalTechnology */ sl@0: //#define FLUSH_BTB(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c5, 6 "); /**< @internalTechnology */ sl@0: sl@0: #define CPWAIT(cc,r) /**< @internalTechnology */ // not sure about this sl@0: #define GET_CAR(cc,r) asm("mrc"#cc" p15, 0, "#r", c1, c0, 2 "); /**< @internalTechnology */ sl@0: #define SET_CAR(cc,r) asm("mcr"#cc" p15, 0, "#r", c1, c0, 2 "); \ sl@0: __INST_SYNC_BARRIER__(r) /**< @internalTechnology */ sl@0: sl@0: #if !defined(__CPU_SUPPORTS_PAGE_TABLE_WALK_TO_L1_CACHE) && defined(__CPU_PAGE_TABLES_FULLY_CACHED) sl@0: #define CACHE_MAINTENANCE_PDE_PTE_UPDATED(r) DCCMVAU(r); \ sl@0: __DATA_SYNC_BARRIER__(r); sl@0: #else sl@0: #define CACHE_MAINTENANCE_PDE_PTE_UPDATED(r) __DATA_SYNC_BARRIER__(r); sl@0: #endif // end of !(__CPU_SUPPORTS_PAGE_TABLE_WALK_TO_L1_CACHE) && (__CPU_PAGE_TABLES_FULLY_CACHED) sl@0: sl@0: #endif // end of of elif (__CPU_ARMV7) sl@0: sl@0: sl@0: /** sl@0: CPU_ARM1136_ERRATUM_317041: Bits [4:3] of Translation Table Base address registers (TTBR0, TTBR1) sl@0: do not read back correctly, but instead always return 0. sl@0: @internalComponent sl@0: @released sl@0: */ sl@0: #if defined(__CPU_ARM1136__) && defined(__HAS_EXTERNAL_CACHE__) && !defined(__CPU_ARM1136_ERRATUM_317041_FIXED) sl@0: #define UPDATE_PW_CACHING_ATTRIBUTES(cc,r) asm("orr"#cc" "#r", "#r", #0x18") sl@0: #else sl@0: #define UPDATE_PW_CACHING_ATTRIBUTES(cc,r) sl@0: #endif sl@0: sl@0: // Instruction macros sl@0: sl@0: #if defined(__CPU_ARMV6) || defined(__CPU_ARMV7) sl@0: #define SRSgen(P,U,W,mode) asm(".word %a0" : : "i" ((TInt)(0xf84d0500|(P<<24)|(U<<23)|(W<<21)|(mode)))); sl@0: #define SRSIA(mode) SRSgen(0,1,0,mode) sl@0: #define SRSIAW(mode) SRSgen(0,1,1,mode) sl@0: #define SRSDB(mode) SRSgen(1,0,0,mode) sl@0: #define SRSDBW(mode) SRSgen(1,0,1,mode) sl@0: #define SRSIB(mode) SRSgen(1,1,0,mode) sl@0: #define SRSIBW(mode) SRSgen(1,1,1,mode) sl@0: #define SRSDA(mode) SRSgen(0,0,0,mode) sl@0: #define SRSDAW(mode) SRSgen(0,0,1,mode) sl@0: #define RFEgen(P,U,W,base) asm(".word %a0" : : "i" ((TInt)(0xf8100a00|(P<<24)|(U<<23)|(W<<21)|(base<<16)))); sl@0: #define RFEIA(base) RFEgen(0,1,0,base) sl@0: #define RFEIAW(base) RFEgen(0,1,1,base) sl@0: #define RFEDB(base) RFEgen(1,0,0,base) sl@0: #define RFEDBW(base) RFEgen(1,0,1,base) sl@0: #define RFEIB(base) RFEgen(1,1,0,base) sl@0: #define RFEIBW(base) RFEgen(1,1,1,base) sl@0: #define RFEDA(base) RFEgen(0,0,0,base) sl@0: #define RFEDAW(base) RFEgen(0,0,1,base) sl@0: #elif defined(__CPU_XSCALE__) // end of (__CPU_ARMV6) || (__CPU_ARMV7) sl@0: #define MAR(acc,RdLo,RdHi) MCRR(0,0,RdLo,RdHi,acc) sl@0: #define MARcc(cc,acc,RdLo,RdHi) MCRR(cc,0,0,RdLo,RdHi,acc) sl@0: #define MRA(acc,RdLo,RdHi) MRRC(0,0,RdLo,RdHi,acc) sl@0: #define MRAcc(cc,acc,RdLo,RdHi) MRRC(cc,0,0,RdLo,RdHi,acc) sl@0: #define MIAgen(cc,acc,Rm,Rs,opc3) asm(".word %a0" : : "i" ((TInt)0x0e200010|((cc)<<28)|((opc3)<<16)|((Rs)<<12)|((acc)<<5)|(Rm))); sl@0: #define MIA(acc,Rm,Rs) MIAgen(CC_AL,acc,Rm,Rs,0) sl@0: #define MIAPH(acc,Rm,Rs) MIAgen(CC_AL,acc,Rm,Rs,8) sl@0: #define MIABB(acc,Rm,Rs) MIAgen(CC_AL,acc,Rm,Rs,12) sl@0: #define MIATB(acc,Rm,Rs) MIAgen(CC_AL,acc,Rm,Rs,13) sl@0: #define MIABT(acc,Rm,Rs) MIAgen(CC_AL,acc,Rm,Rs,14) sl@0: #define MIATT(acc,Rm,Rs) MIAgen(CC_AL,acc,Rm,Rs,15) sl@0: #define MIAcc(cc,acc,Rm,Rs) MIAgen(cc,acc,Rm,Rs,0) sl@0: #define MIAPHcc(cc,acc,Rm,Rs) MIAgen(cc,acc,Rm,Rs,8) sl@0: #define MIABBcc(cc,acc,Rm,Rs) MIAgen(cc,acc,Rm,Rs,12) sl@0: #define MIATBcc(cc,acc,Rm,Rs) MIAgen(cc,acc,Rm,Rs,13) sl@0: #define MIABTcc(cc,acc,Rm,Rs) MIAgen(cc,acc,Rm,Rs,14) sl@0: #define MIATTcc(cc,acc,Rm,Rs) MIAgen(cc,acc,Rm,Rs,15) sl@0: #endif // end of elif (__CPU_XSCALE__) sl@0: sl@0: #ifdef __CPU_ARM_HAS_CPS sl@0: #define CPSgen(im,mm,f,mode) asm(".word %a0" : : "i" ((TInt)(0xf1000000|((im)<<18)|((mm)<<17)|((f)<<6)|(mode)))) sl@0: #if __ARM_ASSEMBLER_ISA__ >= 6 sl@0: #define CPSIDAIF asm("cpsidaif ") sl@0: #define CPSIDAI asm("cpsidai ") sl@0: #define CPSIDIF asm("cpsidif ") sl@0: #define CPSIDI asm("cpsidi ") sl@0: #define CPSIDF asm("cpsidf ") sl@0: #define CPSIEAIF asm("cpsieaif ") sl@0: #define CPSIEI asm("cpsiei ") sl@0: #define CPSIEF asm("cpsief ") sl@0: #define CPSIEIF asm("cpsieif ") sl@0: #else sl@0: #define CPSIDAIF CPSgen(3,0,7,0) // disable all interrupts, leave mode alone sl@0: #define CPSIDAI CPSgen(3,0,6,0) // disable IRQs, leave mode alone sl@0: #define CPSIDIF CPSgen(3,0,3,0) // disable IRQs and FIQs, leave mode alone sl@0: #define CPSIDI CPSgen(3,0,2,0) // disable IRQs, leave mode alone sl@0: #define CPSIDF CPSgen(3,0,1,0) // disable FIQs, leave mode alone sl@0: #define CPSIEAIF CPSgen(2,0,7,0) // enable all interrupts, leave mode alone sl@0: #define CPSIEI CPSgen(2,0,2,0) // enable IRQs, leave mode alone sl@0: #define CPSIEF CPSgen(2,0,1,0) // enable FIQs, leave mode alone sl@0: #define CPSIEIF CPSgen(2,0,3,0) // enable IRQs and FIQs, leave mode alone sl@0: #endif // end of __ARM_ASSEMBLER_ISA__ >= 6 sl@0: #define CPSIDAIFM(mode) CPSgen(3,1,7,mode) // disable all interrupts and change mode sl@0: #define CPSIDIFM(mode) CPSgen(3,1,3,mode) // disable all interrupts and change mode sl@0: #define CPSIDAIM(mode) CPSgen(3,1,6,mode) // disable IRQs and change mode sl@0: #define CPSIDIM(mode) CPSgen(3,1,2,mode) // disable IRQs and change mode sl@0: #define CPSIDFM(mode) CPSgen(3,1,1,mode) // disable FIQs and change mode sl@0: #define CPSIEAIFM(mode) CPSgen(2,1,7,mode) // enable all interrupts and change mode sl@0: #define CPSIEIM(mode) CPSgen(2,1,2,mode) // enable IRQs and change mode sl@0: #define CPSIEFM(mode) CPSgen(2,1,1,mode) // enable FIQs and change mode sl@0: #define CPSIEIFM(mode) CPSgen(2,1,3,mode) // enable IRQs and FIQs, and change mode sl@0: #define CPSCHM(mode) CPSgen(0,1,0,mode) // change mode, leave interrupt masks alone sl@0: #endif // end of __CPU_ARM_HAS_CPS sl@0: sl@0: // Processor modes sl@0: #define MODE_USR 0x10 sl@0: #define MODE_FIQ 0x11 sl@0: #define MODE_IRQ 0x12 sl@0: #define MODE_SVC 0x13 sl@0: #define MODE_ABT 0x17 sl@0: #define MODE_UND 0x1b sl@0: #define MODE_SYS 0x1f sl@0: sl@0: // Macros for changing processor made and interrupt status sl@0: // sl@0: // Two instructions are necessary prior to ARMv6, and these may be interleaved. sl@0: // sl@0: // SET_MODE - sets mode and intrrupts status sl@0: // SET_INTS - sets interrupts status (requires knowing the current mode at compile time) sl@0: // INTS_ON - enables interrupts (requires the cpsr value be available at run time) sl@0: // INTS_OFF - disables interrupts (requires the cpsr value be available at run time) sl@0: sl@0: #ifdef __CPU_ARM_HAS_CPS sl@0: sl@0: #define INTS_ALL_OFF IDIF sl@0: #define INTS_IRQ_OFF IDI sl@0: #define INTS_FIQ_ON IEF sl@0: #define INTS_ALL_ON IEIF sl@0: sl@0: #define CONCAT2(a,b) a##b sl@0: #define CONCAT3(a,b,c) a##b##c sl@0: sl@0: #define SET_MODE_1(rd, newMode, newInts) sl@0: #define SET_MODE_2(rd, newMode, newInts) CONCAT3(CPS, newInts, M)(newMode) sl@0: sl@0: #define SET_INTS_1(rd, currentMode, newInts) sl@0: #define SET_INTS_2(rd, currentMode, newInts) CONCAT2(CPS, newInts) sl@0: sl@0: #define INTS_ON_1(rd, rCpsr, newInts) sl@0: #define INTS_ON_2(rd, rCpsr, newInts) CONCAT2(CPS, newInts) sl@0: sl@0: #define INTS_OFF_1(rd, rCpsr, newInts) sl@0: #define INTS_OFF_2(rd, rCpsr, newInts) CONCAT2(CPS, newInts) sl@0: sl@0: #else // __CPU_ARM_HAS_CPS sl@0: sl@0: #define INTS_ALL_OFF 0xc0 sl@0: #define INTS_IRQ_OFF 0x80 sl@0: #define INTS_FIQ_ON 0x80 sl@0: #define INTS_ALL_ON 0x00 sl@0: sl@0: #define SET_MODE_1(rd, newMode, newInts) asm("mov "#rd", #%a0" : : "i" (newMode | newInts)) sl@0: #define SET_MODE_2(rd, newMode, newInts) asm("msr cpsr_c, "#rd) sl@0: sl@0: #define SET_INTS_1(rd, currentMode, newInts) SET_MODE_1(rd, currentMode, newInts) sl@0: #define SET_INTS_2(rd, currentMode, newInts) SET_MODE_2(rd, currentMode, newInts) sl@0: sl@0: #define INTS_ON_1(rd, rCpsr, newInts) asm("bic "#rd", "#rCpsr", #%a0" : : "i" (newInts ^ 0xc0)) sl@0: #define INTS_ON_2(rd, rCpsr, newInts) asm("msr cpsr_c, "#rd) sl@0: sl@0: #define INTS_OFF_1(rd, rCpsr, newInts) asm("orr "#rd", "#rCpsr", #%a0" : : "i" (newInts)) sl@0: #define INTS_OFF_2(rd, rCpsr, newInts) asm("msr cpsr_c, "#rd) sl@0: sl@0: #endif // end of __CPU_ARM_HAS_CPS sl@0: sl@0: #define SET_MODE(rd, newMode, newInts) SET_MODE_1(rd, newMode, newInts); SET_MODE_2(rd, newMode, newInts) sl@0: #define SET_INTS(rd, currentMode, newInts) SET_INTS_1(rd, currentMode, newInts); SET_INTS_2(rd, currentMode, newInts) sl@0: #define INTS_ON(rd, rCpsr, newInts) INTS_ON_1(rd, rCpsr, newInts); INTS_ON_2(rd, rCpsr, newInts) sl@0: #define INTS_OFF(rd, rCpsr, newInts) INTS_OFF_1(rd, rCpsr, newInts); INTS_OFF_2(rd, rCpsr, newInts) sl@0: sl@0: #define __chill() sl@0: sl@0: #if defined(__SMP__) && !defined(__CPU_ARM_HAS_LDREX_STREX_V6K) sl@0: #error SMP not allowed without v6K sl@0: #endif sl@0: #if defined(__SMP__) && !defined(__CPU_HAS_CP15_THREAD_ID_REG) sl@0: #error SMP not allowed without thread ID registers sl@0: #endif sl@0: sl@0: #endif // end of __CPU_ARM sl@0: sl@0: #if defined(__CPU_X86) && defined(__EPOC32__) sl@0: #define __CPU_HAS_MMU sl@0: #define __CPU_HAS_CACHE sl@0: #define __CPU_SUPPORTS_FAST_PROCESS_SWITCH sl@0: sl@0: // Page/directory tables are cached on X86. sl@0: #define __CPU_PAGE_TABLES_FULLY_CACHED sl@0: sl@0: #if defined(__VC32__) sl@0: #define X86_PAUSE _asm rep nop sl@0: #define __chill() do { _asm rep nop } while(0) sl@0: #elif defined(__GCC32__) sl@0: #define X86_PAUSE __asm__ __volatile__("pause "); sl@0: #define __chill() __asm__ __volatile__("pause ") sl@0: #else sl@0: #error Unknown x86 compiler sl@0: #endif sl@0: sl@0: #if defined(__cplusplus) sl@0: extern "C" { sl@0: #endif sl@0: #if defined(__VC32__) sl@0: extern int _inp(unsigned short); // input byte (compiler intrinsic) sl@0: extern unsigned short _inpw(unsigned short); // input word (compiler intrinsic) sl@0: extern unsigned long _inpd(unsigned short); // input dword (compiler intrinsic) sl@0: extern int _outp(unsigned short, int); // output byte (compiler intrinsic) sl@0: extern unsigned short _outpw(unsigned short, unsigned short); // output word (compiler intrinsic) sl@0: extern unsigned long _outpd(unsigned short, unsigned long); // output dword (compiler intrinsic) sl@0: sl@0: #pragma intrinsic(_inp, _inpw, _inpd, _outp, _outpw, _outpd) sl@0: sl@0: #define x86_in8(port) ((TUint8)_inp(port)) sl@0: #define x86_in16(port) ((TUint16)_inpw(port)) sl@0: #define x86_in32(port) ((TUint32)_inpd(port)) sl@0: #define x86_out8(port,data) ((void)_outp((port),(TUint8)(data))) sl@0: #define x86_out16(port,data) ((void)_outpw((port),(TUint16)(data))) sl@0: #define x86_out32(port,data) ((void)_outpd((port),(TUint32)(data))) sl@0: sl@0: #elif defined(__GCC32__) // end of (__VC32__) sl@0: inline TUint8 _inpb(TUint16 port) sl@0: { sl@0: TUint8 ret; sl@0: __asm__ __volatile__("in al, dx" : "=a" (ret) : "d" (port)); sl@0: return ret; sl@0: } sl@0: sl@0: inline TUint16 _inpw(TUint16 port) sl@0: { sl@0: TUint8 ret; sl@0: __asm__ __volatile__("in ax, dx" : "=a" (ret) : "d" (port)); sl@0: return ret; sl@0: } sl@0: sl@0: inline TUint32 _inpd(TUint16 port) sl@0: { sl@0: TUint32 ret; sl@0: __asm__ __volatile__("in eax, dx" : "=a" (ret) : "d" (port)); sl@0: return ret; sl@0: } sl@0: sl@0: inline void _outpb(TUint16 port, TUint8 data) sl@0: { sl@0: __asm__ __volatile__("out dx, al" : : "d" (port), "a" (data)); sl@0: } sl@0: sl@0: inline void _outpw(TUint16 port, TUint16 data) sl@0: { sl@0: __asm__ __volatile__("out dx, ax" : : "d" (port), "a" (data)); sl@0: } sl@0: sl@0: inline void _outpd(TUint16 port, TUint32 data) sl@0: { sl@0: __asm__ __volatile__("out dx, eax" : : "d" (port), "a" (data)); sl@0: } sl@0: sl@0: #define x86_in8(port) (_inpb(port)) sl@0: #define x86_in16(port) (_inpw(port)) sl@0: #define x86_in32(port) (_inpd(port)) sl@0: #define x86_out8(port,data) (_outpb((port),(TUint8)(data))) sl@0: #define x86_out16(port,data) (_outpw((port),(TUint16)(data))) sl@0: #define x86_out32(port,data) (_outpd((port),(TUint32)(data))) sl@0: sl@0: #else // end of elif (__GCC32__) sl@0: #error Unknown x86 compiler sl@0: #endif sl@0: #if defined(__cplusplus) sl@0: } sl@0: #endif // end of (__VC32__) elif __GCC32__ else sl@0: sl@0: #endif //__CPU_X86 && __EPOC32__ sl@0: sl@0: sl@0: #undef __USER_MEMORY_GUARDS_ENABLED__ sl@0: #if defined(_DEBUG) && !defined(__KERNEL_APIS_DISABLE_USER_MEMORY_GUARDS__) sl@0: #if defined(__MEMMODEL_MULTIPLE__) || defined(__MEMMODEL_FLEXIBLE__) sl@0: #if defined(__CPU_ARM) sl@0: #define __USER_MEMORY_GUARDS_ENABLED__ sl@0: #endif sl@0: #endif sl@0: #endif // end of (_DEBUG) && !(__KERNEL_APIS_DISABLE_USER_MEMORY_GUARDS__) sl@0: sl@0: #ifndef __USER_MEMORY_GUARDS_ENABLED__ sl@0: sl@0: #define USER_MEMORY_GUARD_SAVE_WORDS 0 sl@0: #define USER_MEMORY_DOMAIN 0 sl@0: sl@0: #define USER_MEMORY_GUARD_SAVE(save) sl@0: #define USER_MEMORY_GUARD_RESTORE(save,temp) sl@0: #define USER_MEMORY_GUARD_ON(cc,save,temp) sl@0: #define USER_MEMORY_GUARD_OFF(cc,save,temp) sl@0: #define USER_MEMORY_GUARD_ON_IF_MODE_USR(temp) sl@0: #define USER_MEMORY_GUARD_OFF_IF_MODE_USR(temp) sl@0: #define USER_MEMORY_GUARD_ASSERT_ON(temp) sl@0: #define USER_MEMORY_GUARD_ASSERT_OFF_IF_MODE_USR(psr) sl@0: sl@0: #else // __USER_MEMORY_GUARDS_ENABLED__ sl@0: sl@0: #define USER_MEMORY_GUARD_SAVE_WORDS 2 sl@0: #define USER_MEMORY_DOMAIN 15 sl@0: #define USER_MEMORY_DOMAIN_MASK (3 << (2*USER_MEMORY_DOMAIN)) sl@0: #define USER_MEMORY_DOMAIN_CLIENT (1 << (2*USER_MEMORY_DOMAIN)) sl@0: sl@0: // Save the DACR in the named register sl@0: #define USER_MEMORY_GUARD_SAVE(save) \ sl@0: asm("mrc p15, 0, "#save", c3, c0, 0"); /* save<-DACR */ sl@0: sl@0: // Restore access to domain 15 (user pages) to the state previously saved sl@0: // In this case, 'save' may not be the same register as 'temp' sl@0: #define USER_MEMORY_GUARD_RESTORE(save,temp) \ sl@0: asm("mrc p15, 0, "#temp", c3, c0, 0"); /* temp<-DACR */ \ sl@0: asm("bic "#temp", "#temp", #%a0" : : "i" USER_MEMORY_DOMAIN_MASK); \ sl@0: asm("and "#save", "#save", #%a0" : : "i" USER_MEMORY_DOMAIN_MASK); \ sl@0: asm("orr "#temp", "#temp", "#save ); \ sl@0: asm("mcr p15, 0, "#temp", c3, c0, 0"); /* DACR<-temp */ \ sl@0: __INST_SYNC_BARRIER__(temp) sl@0: sl@0: // Disable access to domain 15 (user pages) sl@0: // 'save' may be the same register as 'temp', but in that case the use as sl@0: // a temporary takes precedence and the value left in 'save' is undefined sl@0: #define USER_MEMORY_GUARD_ON(cc,save,temp) \ sl@0: asm("mrc"#cc" p15, 0, "#save", c3, c0, 0"); /* save<-DACR */ \ sl@0: asm("bic"#cc" "#temp", "#save", #%a0" : : "i" USER_MEMORY_DOMAIN_MASK); \ sl@0: asm("mcr"#cc" p15, 0, "#temp", c3, c0, 0"); /* DACR<-temp */ \ sl@0: __INST_SYNC_BARRIER__(temp) sl@0: sl@0: // Enable access to domain 15 (user pages) as a client sl@0: // 'save' may be the same register as 'temp', but in that case the use as sl@0: // a temporary takes precedence and the value left in 'save' is undefined sl@0: #define USER_MEMORY_GUARD_OFF(cc,save,temp) \ sl@0: asm("mrc"#cc" p15, 0, "#save", c3, c0, 0"); /* save<-DACR */ \ sl@0: asm("orr"#cc" "#temp", "#save", #%a0" : : "i" USER_MEMORY_DOMAIN_CLIENT); \ sl@0: asm("mcr"#cc" p15, 0, "#temp", c3, c0, 0"); /* DACR<-temp */ \ sl@0: __INST_SYNC_BARRIER__(temp) sl@0: sl@0: // Disable access to domain 15 (user pages) if SPSR indicates mode_usr sl@0: // The specified 'temp' register is left with an undefined value sl@0: #define USER_MEMORY_GUARD_ON_IF_MODE_USR(temp) \ sl@0: asm("mrs "#temp", spsr"); \ sl@0: asm("tst "#temp", #0x0f"); \ sl@0: USER_MEMORY_GUARD_ON(eq,temp,temp) sl@0: sl@0: // Enable access to domain 15 (user pages) if SPSR indicates mode_usr sl@0: // The specified 'temp' register is left with an undefined value sl@0: #define USER_MEMORY_GUARD_OFF_IF_MODE_USR(temp) \ sl@0: asm("mrs "#temp", spsr"); \ sl@0: asm("tst "#temp", #0x0f"); \ sl@0: USER_MEMORY_GUARD_OFF(eq,temp,temp) sl@0: sl@0: // Assert that access to domain 15 (user pages) is disabled sl@0: #define USER_MEMORY_GUARD_ASSERT_ON(temp) \ sl@0: asm("mrc p15, 0, "#temp", c3, c0, 0"); /* temp<-DACR */ \ sl@0: asm("tst "#temp", #%a0" : : "i" USER_MEMORY_DOMAIN_MASK); \ sl@0: asm("cdpne p15, 0, c0, c0, c0, 0"); /* fault if nonzero */ sl@0: sl@0: // Assert that access to domain 15 (user pages) is enabled if the value sl@0: // in 'psr' says we came from/are going back to user mode sl@0: #define USER_MEMORY_GUARD_ASSERT_OFF_IF_MODE_USR(psr) \ sl@0: asm("tst "#psr", #0x0f"); /* check for mode_usr */ \ sl@0: asm("mrceq p15, 0, "#psr", c3, c0, 0"); /* psr<-DACR */ \ sl@0: asm("tsteq "#psr", #%a0" : : "i" USER_MEMORY_DOMAIN_MASK); \ sl@0: asm("cdpeq p15, 0, c0, c0, c0, 0"); /* fault if no access */ sl@0: sl@0: #endif // end of else __USER_MEMORY_GUARDS_ENABLED__ sl@0: sl@0: #endif // __NK_CPU_H__