1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/os/kernelhwsrv/kernel/eka/include/nkern/nk_cpu.h Fri Jun 15 03:10:57 2012 +0200
1.3 @@ -0,0 +1,865 @@
1.4 +// Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies).
1.5 +// All rights reserved.
1.6 +// This component and the accompanying materials are made available
1.7 +// under the terms of the License "Eclipse Public License v1.0"
1.8 +// which accompanies this distribution, and is available
1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
1.10 +//
1.11 +// Initial Contributors:
1.12 +// Nokia Corporation - initial contribution.
1.13 +//
1.14 +// Contributors:
1.15 +//
1.16 +// Description:
1.17 +// e32\include\nkern\nk_cpu.h
1.18 +//
1.19 +// WARNING: This file contains some APIs which are internal and are subject
1.20 +// to change without notice. Such APIs should therefore not be used
1.21 +// outside the Kernel and Hardware Services package.
1.22 +//
1.23 +
1.24 +/**
1.25 + @file
1.26 + @publishedPartner
1.27 + @released
1.28 +*/
1.29 +
1.30 +#ifndef __NK_CPU_H__
1.31 +#define __NK_CPU_H__
1.32 +
1.33 +#include <cpudefs.h>
1.34 +
1.35 +#ifdef __CPU_ARM
1.36 +#if defined(__CPU_GENERIC_ARM4__)
1.37 + // no cache no MMU
1.38 + #define __CPU_ARM_ABORT_MODEL_RESTORED
1.39 +#endif
1.40 +
1.41 +#if defined(__CPU_ARM710T__) || defined(__CPU_ARM720T__)
1.42 + #define __CPU_HAS_MMU
1.43 + #define __CPU_HAS_CACHE
1.44 + #define __CPU_ARM_ABORT_MODEL_UPDATED
1.45 + #define __CPU_WRITE_BUFFER
1.46 +#endif
1.47 +
1.48 +#ifdef __CPU_SA1__
1.49 + #define __CPU_HAS_MMU
1.50 + #define __CPU_HAS_CACHE
1.51 + #define __CPU_ARM_ABORT_MODEL_RESTORED
1.52 + #define __CPU_SPLIT_CACHE
1.53 + #define __CPU_SPLIT_TLB
1.54 + #define __CPU_WRITE_BUFFER
1.55 + #define __CPU_HAS_ALT_D_CACHE
1.56 + #define __CPU_WRITE_BACK_CACHE
1.57 + #define __CPU_CACHE_FLUSH_BY_DATA_READ
1.58 + #define __CPU_HAS_SINGLE_ENTRY_DCACHE_FLUSH
1.59 +#endif
1.60 +
1.61 +#if defined(__CPU_ARM920T__) || defined(__CPU_ARM925T__) || defined(__CPU_ARM926J__)
1.62 + #define __CPU_HAS_MMU
1.63 + #define __CPU_HAS_CACHE
1.64 + #define __CPU_ARM_ABORT_MODEL_RESTORED
1.65 + #define __CPU_SPLIT_CACHE
1.66 + #define __CPU_SPLIT_TLB
1.67 + #define __CPU_WRITE_BUFFER
1.68 + #define __CPU_WRITE_BACK_CACHE
1.69 + #define __CPU_CACHE_FLUSH_BY_WAY_SET_INDEX
1.70 + #define __CPU_CACHE_POLICY_IN_PTE
1.71 + #define __CPU_HAS_CACHE_TYPE_REGISTER
1.72 + #define __CPU_HAS_SINGLE_ENTRY_ITLB_FLUSH
1.73 + #define __CPU_HAS_SINGLE_ENTRY_ICACHE_FLUSH
1.74 + #define __CPU_HAS_SINGLE_ENTRY_DCACHE_FLUSH
1.75 +#endif
1.76 +
1.77 +#ifdef __CPU_XSCALE__
1.78 + #define __CPU_HAS_MMU
1.79 + #define __CPU_HAS_CACHE
1.80 + #define __CPU_ARM_ABORT_MODEL_RESTORED
1.81 + #define __CPU_SPLIT_CACHE
1.82 + #define __CPU_SPLIT_TLB
1.83 + #define __CPU_WRITE_BUFFER
1.84 +#ifndef __CPU_XSCALE_MANZANO__
1.85 + #define __CPU_HAS_ALT_D_CACHE
1.86 +#endif
1.87 + #define __CPU_WRITE_BACK_CACHE
1.88 + #define __CPU_CACHE_WRITE_ALLOCATE
1.89 +#ifdef __CPU_XSCALE_MANZANO__
1.90 + #define __CPU_CACHE_FLUSH_BY_WAY_SET_INDEX
1.91 +#else
1.92 + #define __CPU_CACHE_FLUSH_BY_LINE_ALLOC
1.93 +#endif
1.94 + #define __CPU_CACHE_POLICY_IN_PTE
1.95 + #define __CPU_HAS_CACHE_TYPE_REGISTER
1.96 + #define __CPU_HAS_SINGLE_ENTRY_ITLB_FLUSH
1.97 + #define __CPU_HAS_SINGLE_ENTRY_ICACHE_FLUSH
1.98 + #define __CPU_HAS_SINGLE_ENTRY_DCACHE_FLUSH
1.99 + #define __CPU_HAS_BTB
1.100 + #define __CPU_USE_MMU_TEX_FIELD
1.101 + #define __CPU_HAS_COPROCESSOR_ACCESS_REG
1.102 + #define __CPU_HAS_ACTLR
1.103 +#endif
1.104 +
1.105 +#if defined(__CPU_ARM1136__) || defined(__CPU_ARM11MP__) || defined(__CPU_ARM1176__) || defined(__CPU_CORTEX_A8__) || defined(__CPU_CORTEX_A9__)
1.106 + #define __CPU_HAS_MMU
1.107 + #define __CPU_HAS_CACHE
1.108 + #define __CPU_CACHE_PHYSICAL_TAG
1.109 + #define __CPU_SUPPORTS_FAST_PROCESS_SWITCH
1.110 + #define __CPU_ARM_ABORT_MODEL_RESTORED
1.111 + #define __CPU_SPLIT_CACHE
1.112 +
1.113 + #if defined(__CPU_CORTEX_A9__) || defined(__CPU_CORTEX_A8__) || defined(__CPU_ARM1136__)
1.114 + #define __CPU_SPLIT_TLB
1.115 + #endif
1.116 +
1.117 + #if defined(__CPU_CORTEX_A8__)
1.118 + /* Internal cache controller maintains both inner & outer caches.
1.119 + * @internalComponent
1.120 + */
1.121 + #define __CPU_OUTER_CACHE_IS_INTERNAL_CACHE
1.122 + #endif
1.123 +
1.124 +
1.125 +
1.126 + #if defined(__CPU_CORTEX_A9__) || defined(__CPU_ARM11MP__)
1.127 + #define __CPU_SUPPORTS_TLBIMVAA
1.128 + #endif
1.129 +
1.130 + #if defined(__CPU_CORTEX_A9__)
1.131 + #ifdef __SMP__
1.132 +// #define __CPU_SUPPORTS_PAGE_TABLE_WALK_TO_L1_CACHE
1.133 + #endif
1.134 + #endif
1.135 +
1.136 + #if (defined(__CPU_ARM1136__) && defined(__CPU_ARM1136_ERRATUM_399234_FIXED) && !defined(__MEMMODEL_FLEXIBLE__)) || (defined(__CPU_ARM11MP__) && defined (__SMP__) )
1.137 + // Page tables on these platforms are either uncached or write through cached.
1.138 + #else
1.139 + // Page/directory tables are fully cached (write-back) on these platforms.
1.140 + #define __CPU_PAGE_TABLES_FULLY_CACHED
1.141 + #endif
1.142 +
1.143 + #define __CPU_WRITE_BUFFER
1.144 + #define __CPU_WRITE_BACK_CACHE
1.145 + #define __CPU_CACHE_WRITE_ALLOCATE
1.146 + #define __CPU_CACHE_FLUSH_BY_WAY_SET_INDEX
1.147 + #define __CPU_CACHE_POLICY_IN_PTE
1.148 + #define __CPU_HAS_CACHE_TYPE_REGISTER
1.149 + #define __CPU_HAS_SINGLE_ENTRY_ITLB_FLUSH
1.150 + #define __CPU_HAS_SINGLE_ENTRY_ICACHE_FLUSH
1.151 + #define __CPU_HAS_SINGLE_ENTRY_DCACHE_FLUSH
1.152 + #define __CPU_HAS_BTB
1.153 + #define __CPU_HAS_COPROCESSOR_ACCESS_REG
1.154 + #define __CPU_HAS_PREFETCH_BUFFER
1.155 + #define __CPU_HAS_ACTLR
1.156 + #define __CPU_HAS_TTBR1
1.157 +
1.158 + #if !defined(__CPU_ARM1136__)
1.159 + #define __CPU_MEMORY_TYPE_REMAPPING
1.160 + #endif
1.161 +
1.162 + #if defined(__CPU_ARM11MP__) && defined(__SMP__)
1.163 + #define __BROADCAST_CACHE_MAINTENANCE__
1.164 + #endif
1.165 +
1.166 + #if defined(__CPU_ARM11MP__) || defined(__CPU_ARM1176__)
1.167 + #define __CPU_NEEDS_BTAC_FLUSH_AFTER_ASID_CHANGE
1.168 + #endif
1.169 +
1.170 + #define __CPU_CACHE_HAS_COLOUR
1.171 + #define __CPU_I_CACHE_HAS_COLOUR
1.172 +
1.173 + #if defined(__CPU_ARM1136__) || defined(__CPU_ARM1176__)
1.174 + #define __CPU_D_CACHE_HAS_COLOUR
1.175 + #elif defined(__CPU_ARM11MP__)
1.176 + // MPCore has physically indexed D cache, so no colour problems
1.177 + #else
1.178 + // Assume other ARM cores have virtually indexed D cache with broken alias avoidence hardware...
1.179 + #define __CPU_D_CACHE_HAS_COLOUR
1.180 + #endif
1.181 +
1.182 +
1.183 +#endif
1.184 +
1.185 +
1.186 +#ifdef __FIQ_RESERVED_FOR_SECURE_STATE__
1.187 +#define __FIQ_IS_UNCONTROLLED__
1.188 +#endif
1.189 +
1.190 +#if defined(__CPU_MEMORY_TYPE_REMAPPING) || defined(__MEMMODEL_FLEXIBLE__)
1.191 +#define __MMU_USE_SYMMETRIC_ACCESS_PERMISSIONS
1.192 +#endif
1.193 +
1.194 +
1.195 +#if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED) && defined(__MMU_USE_SYMMETRIC_ACCESS_PERMISSIONS)
1.196 +#define ERRATUM_353494_MODE_CHANGE(cc,r) FLUSH_BTB(cc,r)
1.197 +#else
1.198 +#define ERRATUM_353494_MODE_CHANGE(cc,r)
1.199 +#endif
1.200 +
1.201 +#ifdef __CPU_HAS_MMU
1.202 +#define __CPU_ARM_USE_DOMAINS
1.203 +#endif
1.204 +
1.205 +#if defined(__ARM_L210_CACHE__) || defined(__ARM_L220_CACHE__)|| defined(__ARM_PL310_CACHE__)
1.206 +/**
1.207 +Indicates the presense of external cache controller.
1.208 +@internalTechnology
1.209 +*/
1.210 +#define __HAS_EXTERNAL_CACHE__
1.211 +#endif
1.212 +
1.213 +#ifndef __CPU_HAS_MMU
1.214 +#define CPWAIT(cc,r) /**< @internalTechnology */
1.215 +#endif
1.216 +
1.217 +#include <arm_vfp.h>
1.218 +
1.219 +// CP15 definitions
1.220 +#if defined(__CPU_ARM710T__) || defined(__CPU_ARM720T__)
1.221 +#define FLUSH_DCACHE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c7, 0 "); /**< @internalTechnology */
1.222 +#define FLUSH_ICACHE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c7, 0 "); /**< @internalTechnology */
1.223 +#define FLUSH_IDCACHE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c7, 0 "); /**< @internalTechnology */
1.224 +#define FLUSH_DTLB(cc,r) asm("mcr"#cc" p15, 0, "#r", c8, c7, 0 "); /**< @internalTechnology */
1.225 +#define FLUSH_ITLB(cc,r) asm("mcr"#cc" p15, 0, "#r", c8, c7, 0 "); /**< @internalTechnology */
1.226 +#define FLUSH_IDTLB(cc,r) asm("mcr"#cc" p15, 0, "#r", c8, c7, 0 "); /**< @internalTechnology */
1.227 +#define FLUSH_DTLB_ENTRY(cc,addr) asm("mcr"#cc" p15, 0, "#addr", c8, c7, 1 "); /**< @internalTechnology */
1.228 +#define FLUSH_ITLB_ENTRY(cc,addr) asm("mcr"#cc" p15, 0, "#addr", c8, c7, 1 "); /**< @internalTechnology */
1.229 +#define FLUSH_IDTLB_ENTRY(cc,addr) asm("mcr"#cc" p15, 0, "#addr", c8, c7, 1 "); /**< @internalTechnology */
1.230 +#define DRAIN_WRITE_BUFFER(cc,r,rd) // this seems dodgy on Windermere and it works without it
1.231 +#define CPWAIT(cc,r) /**< @internalTechnology */
1.232 +
1.233 +#elif defined(__CPU_SA1__)
1.234 +#define FLUSH_ICACHE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c5, 0 "); /**< @internalTechnology */
1.235 +#define PURGE_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c6, 1 "); /**< @internalTechnology */
1.236 +#define CLEAN_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c10, 1 "); /**< @internalTechnology */
1.237 +#define FLUSH_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c10, 1 "); asm("mcr"#cc" p15, 0, "#r", c7, c6, 1 ");/**< @internalTechnology */
1.238 +#define FLUSH_DTLB(cc,r) asm("mcr"#cc" p15, 0, "#r", c8, c6, 0 "); /**< @internalTechnology */
1.239 +#define FLUSH_ITLB(cc,r) asm("mcr"#cc" p15, 0, "#r", c8, c5, 0 "); /**< @internalTechnology */
1.240 +#define FLUSH_IDTLB(cc,r) asm("mcr"#cc" p15, 0, "#r", c8, c7, 0 "); /**< @internalTechnology */
1.241 +#define FLUSH_DTLB_ENTRY(cc,addr) asm("mcr"#cc" p15, 0, "#addr", c8, c6, 1 ");/**< @internalTechnology */
1.242 +#define DRAIN_WRITE_BUFFER(cc,r,rd) asm("mcr"#cc" p15, 0, "#r", c7, c10, 4 ");
1.243 +#define CPWAIT(cc,r) /**< @internalTechnology */
1.244 +
1.245 +#elif defined(__CPU_ARM920T__) || defined(__CPU_ARM925T__) || defined(__CPU_ARM926J__)
1.246 +#define FLUSH_ICACHE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c5, 0 "); /**< @internalTechnology */
1.247 +#define FLUSH_ICACHE_LINE(cc,r,tmp) asm("mcr"#cc" p15, 0, "#r", c7, c5, 1 "); /**< @internalTechnology */
1.248 +#define PURGE_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c6, 1 "); /**< @internalTechnology */
1.249 +#define CLEAN_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c10, 1 "); /**< @internalTechnology */
1.250 +#define CLEAN_DCACHE_INDEX(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c10, 2 "); /**< @internalTechnology */
1.251 +#define FLUSH_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c14, 1 "); /**< @internalTechnology */
1.252 +#define FLUSH_DCACHE_INDEX(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c14, 2 "); /**< @internalTechnology */
1.253 +#define FLUSH_DTLB(cc,r) asm("mcr"#cc" p15, 0, "#r", c8, c6, 0 "); /**< @internalTechnology */
1.254 +#define FLUSH_ITLB(cc,r) asm("mcr"#cc" p15, 0, "#r", c8, c5, 0 "); /**< @internalTechnology */
1.255 +#define FLUSH_IDTLB(cc,r) asm("mcr"#cc" p15, 0, "#r", c8, c7, 0 "); /**< @internalTechnology */
1.256 +#define FLUSH_DTLB_ENTRY(cc,addr) asm("mcr"#cc" p15, 0, "#addr", c8, c6, 1 ");/**< @internalTechnology */
1.257 +#define FLUSH_ITLB_ENTRY(cc,addr) asm("mcr"#cc" p15, 0, "#addr", c8, c5, 1 ");/**< @internalTechnology */
1.258 +#define DRAIN_WRITE_BUFFER(cc,r,rd) asm("mcr"#cc" p15, 0, "#r", c7, c10, 4 ");
1.259 +#define CPWAIT(cc,r) /**< @internalTechnology */
1.260 +#define CACHE_MAINTENANCE_PDE_PTE_UPDATED(r) DRAIN_WRITE_BUFFER(,r,r);
1.261 +
1.262 +#elif defined(__CPU_XSCALE__)
1.263 +//#define FLUSH_ICACHE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c5, 0 ");
1.264 +#define FLUSH_ICACHE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c5, 0; sub"#cc" pc, pc, #4 ");/**< @internalTechnology */ // A step hack
1.265 +#define FLUSH_ICACHE_LINE(cc,r,tmp) asm("mcr"#cc" p15, 0, "#r", c7, c5, 1 "); /**< @internalTechnology */
1.266 +#ifdef __CPU_XSCALE_MANZANO__
1.267 +#define PURGE_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c6, 1 "); /**< @internalTechnology */
1.268 +#define CLEAN_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c10, 1 "); /**< @internalTechnology */
1.269 +#define CLEAN_DCACHE_INDEX(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c10, 2 "); /**< @internalTechnology */
1.270 +#define FLUSH_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c14, 1 "); /**< @internalTechnology */
1.271 +#define FLUSH_DCACHE_INDEX(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c14, 2 "); /**< @internalTechnology */
1.272 +#else
1.273 +#define PURGE_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c6, 1 "); asm("nop "); /**< @internalTechnology */ // PXA250 ERRATUM 96
1.274 +#define CLEAN_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c10, 1 "); asm("nop ");/**< @internalTechnology */ // PXA250 ERRATUM 96
1.275 +#define FLUSH_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c10, 1 "); asm("mcr"#cc" p15, 0, "#r", c7, c6, 1 "); asm("nop "); /**< @internalTechnology */ // PXA250 ERRATUM 96
1.276 +#define ALLOC_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c2, 5 "); /**< @internalTechnology */
1.277 +#endif
1.278 +#define FLUSH_DTLB(cc,r) asm("mcr"#cc" p15, 0, "#r", c8, c6, 0 "); /**< @internalTechnology */
1.279 +#define FLUSH_ITLB(cc,r) asm("mcr"#cc" p15, 0, "#r", c8, c5, 0 "); /**< @internalTechnology */
1.280 +#define FLUSH_IDTLB(cc,r) asm("mcr"#cc" p15, 0, "#r", c8, c7, 0 "); /**< @internalTechnology */
1.281 +#define FLUSH_DTLB_ENTRY(cc,addr) asm("mcr"#cc" p15, 0, "#addr", c8, c6, 1 "); asm("nop "); asm ("nop "); /**< @internalTechnology */ // PXA250 ERRATUM 21
1.282 +#define FLUSH_ITLB_ENTRY(cc,addr) asm("mcr"#cc" p15, 0, "#addr", c8, c5, 1 "); /**< @internalTechnology */
1.283 +
1.284 +#ifdef __CPU_XSCALE_MANZANO__
1.285 +#define DRAIN_WRITE_BUFFER(cc,r,rd) asm("mcr"#cc" p15, 0, "#r", c7, c10, 4 ");
1.286 +#else //__CPU_XSCALE_MANZANO__
1.287 +// PXA250 ERRATUM 14
1.288 +#define DRAIN_WRITE_BUFFER(cc,r,rd) asm("mcr"#cc" p15, 0, "#r", c7, c10, 4 "); \
1.289 + asm("ldr"#cc" "#rd", [pc] "); \
1.290 + asm("add pc, pc, #0 "); \
1.291 + asm(".word %a0" : : "i" ((TInt)&SuperPageAddress)); \
1.292 + asm("ldr"#cc" "#rd", ["#rd"] "); \
1.293 + asm("ldr"#cc" "#rd", ["#rd", #%a0]" : : "i" _FOFF(TSuperPage,iUncachedAddress)); \
1.294 + asm("ldr"#cc" "#rd", ["#rd"] ");
1.295 +#endif //else __CPU_XSCALE_MANZANO__
1.296 +//#define FLUSH_BTB(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c5, 6 ");
1.297 +#define FLUSH_BTB(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c5, 6; sub"#cc" pc, pc, #4 "); /**< @internalTechnology */ // A step hack
1.298 +#define CPWAIT(cc,r) asm("mrc"#cc" p15, 0, "#r", c2, c0, 0; mov"#cc" "#r","#r"; sub"#cc" pc, pc, #4 "); /**< @internalTechnology */
1.299 +#define GET_CAR(cc,r) asm("mrc"#cc" p15, 0, "#r", c15, c1, 0 "); /**< @internalTechnology */
1.300 +#define SET_CAR(cc,r) asm("mcr"#cc" p15, 0, "#r", c15, c1, 0 "); /**< @internalTechnology */
1.301 +
1.302 +#elif defined(__CPU_ARMV6) // end of elif __CPU_XSCALE
1.303 +
1.304 +#if !defined(__CPU_ARM1136_ERRATUM_411920_FIXED) && (defined(__CPU_ARM1136__) || defined(__CPU_ARM1176__))
1.305 +/** @internalTechnology */
1.306 +#define FLUSH_ICACHE(cc,r,rt) asm("mrs "#rt", cpsr"); \
1.307 + CPSIDAIF; \
1.308 + asm("mcr"#cc" p15, 0, "#r", c7, c5, 0 "); \
1.309 + asm("mcr"#cc" p15, 0, "#r", c7, c5, 0 "); \
1.310 + asm("mcr"#cc" p15, 0, "#r", c7, c5, 0 "); \
1.311 + asm("mcr"#cc" p15, 0, "#r", c7, c5, 0 "); \
1.312 + asm("msr cpsr_c, "#rt); \
1.313 + asm("nop"); \
1.314 + asm("nop"); \
1.315 + asm("nop"); \
1.316 + asm("nop"); \
1.317 + asm("nop"); \
1.318 + asm("nop"); \
1.319 + asm("nop"); \
1.320 + asm("nop"); \
1.321 + asm("nop"); \
1.322 + asm("nop"); \
1.323 + asm("nop");
1.324 +
1.325 +#else
1.326 +#define FLUSH_ICACHE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c5, 0 "); /**< @internalTechnology */
1.327 +#endif // else !(__CPU_ARM1136_ERRATUM_411920_FIXED) && (__CPU_ARM1136__ || __CPU_ARM1176__)
1.328 +#if defined(__CPU_ARM1136_ERRATUM_371025_FIXED) || !defined(__CPU_ARM1136__)
1.329 +
1.330 +#if !defined(__CPU_ARM1176_ERRATUM_720013_FIXED) && defined(__CPU_ARM1176__)
1.331 +#define FLUSH_ICACHE_LINE(cc,r,tmp) asm("mcr"#cc" p15, 0, "#r", c7, c5, 1 "); \
1.332 + asm("mcr"#cc" p15, 0, "#r", c7, c5, 1 "); /**< @internalTechnology */
1.333 +#else
1.334 +#define FLUSH_ICACHE_LINE(cc,r,tmp) asm("mcr"#cc" p15, 0, "#r", c7, c5, 1 "); /**< @internalTechnology */
1.335 +#endif // !defined(__CPU_ARM1176_ERRATUM_720013_FIXED) && defined(__CPU_ARM1176__)
1.336 +
1.337 +#else // workaround for erratum 371025 of 1136...
1.338 +/** @internalTechnology */
1.339 +#define FLUSH_ICACHE_LINE(cc,r,tmp) asm("orr"#cc" "#tmp", "#r", #0xC0000000 "); \
1.340 + asm("bic"#cc" "#tmp", "#tmp", #1 "); \
1.341 + asm("mcr"#cc" p15, 0, "#tmp", c7, c5, 2 "); \
1.342 + asm("sub"#cc" "#tmp", "#tmp", #0x40000000 "); \
1.343 + asm("mcr"#cc" p15, 0, "#tmp", c7, c5, 2 "); \
1.344 + asm("sub"#cc" "#tmp", "#tmp", #0x40000000 "); \
1.345 + asm("mcr"#cc" p15, 0, "#tmp", c7, c5, 2 "); \
1.346 + asm("sub"#cc" "#tmp", "#tmp", #0x40000000 "); \
1.347 + asm("mcr"#cc" p15, 0, "#tmp", c7, c5, 2 ");
1.348 +#endif //else (__CPU_ARM1136_ERRATUM_371025_FIXED) || !(__CPU_ARM1136__)
1.349 +
1.350 +#if !defined(__CPU_ARM1176_ERRATUM_720013_FIXED) && defined(__CPU_ARM1176__)
1.351 +// It is commented out to ensure it is not used on 1176 cores with 720013 erratum
1.352 +// #define FLUSH_ICACHE_INDEX(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c5, 2 ");
1.353 +#else
1.354 +#define FLUSH_ICACHE_INDEX(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c5, 2 "); /**< @internalTechnology */
1.355 +#endif //!defined(__CPU_ARM1176_ERRATUM_720013_FIXED) && defined(__CPU_ARM1176__)
1.356 +#define PURGE_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c6, 1 "); /**< @internalTechnology */
1.357 +#define PURGE_DCACHE_INDEX(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c6, 2 "); /**< @internalTechnology */
1.358 +#define CLEAN_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c10, 1 "); /**< @internalTechnology */
1.359 +
1.360 +#define CLEAN_DCACHE_INDEX(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c10, 2 "); /**< @internalTechnology */
1.361 +#define FLUSH_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c14, 1 "); /**< @internalTechnology */
1.362 +#define FLUSH_DCACHE_INDEX(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c14, 2 "); /**< @internalTechnology */
1.363 +#define FLUSH_ITLB(cc,r) asm("mcr"#cc" p15, 0, "#r", c8, c5, 0 "); /**< @internalTechnology */
1.364 +#define FLUSH_DTLB(cc,r) asm("mcr"#cc" p15, 0, "#r", c8, c6, 0 "); /**< @internalTechnology */
1.365 +#define FLUSH_IDTLB(cc,r) asm("mcr"#cc" p15, 0, "#r", c8, c7, 0 "); /**< @internalTechnology */
1.366 +
1.367 +
1.368 + // addr must include ASID
1.369 +#if defined (__CPU_ARM11MP__)
1.370 +#define FLUSH_ITLB_ENTRY(cc,addr) asm("mcr"#cc" p15, 0, "#addr", c8, c5, 3 "); /**< @internalTechnology */
1.371 +#define FLUSH_DTLB_ENTRY(cc,addr) asm("mcr"#cc" p15, 0, "#addr", c8, c6, 3 "); /**< @internalTechnology */
1.372 +#else //(__CPU_ARM11MP__)
1.373 +#define FLUSH_ITLB_ENTRY(cc,addr) asm("mcr"#cc" p15, 0, "#addr", c8, c5, 1 "); /**< @internalTechnology */
1.374 +#define FLUSH_DTLB_ENTRY(cc,addr) asm("mcr"#cc" p15, 0, "#addr", c8, c6, 1 "); /**< @internalTechnology */
1.375 +#endif // else (__CPU_ARM11MP__)
1.376 +#define FLUSH_ITLB_ASID(cc,asid) asm("mcr"#cc" p15, 0, "#asid", c8, c5, 2 "); /**< @internalTechnology */
1.377 +#define FLUSH_DTLB_ASID(cc,asid) asm("mcr"#cc" p15, 0, "#asid", c8, c6, 2 "); /**< @internalTechnology */
1.378 +
1.379 +#define DRAIN_WRITE_BUFFER(cc,r,rd) asm("mcr"#cc" p15, 0, "#r", c7, c10, 4 ");
1.380 +#define DATA_MEMORY_BARRIER(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c10, 5 ");
1.381 +#define FLUSH_PREFETCH_BUFFER(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c5, 4 "); /**< @internalTechnology */
1.382 +#define FLUSH_BTB(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c5, 6 "); /**< @internalTechnology */
1.383 +#define CPWAIT(cc,r) /**< @internalTechnology */ // not sure about this
1.384 +#define GET_CAR(cc,r) asm("mrc"#cc" p15, 0, "#r", c1, c0, 2 "); /**< @internalTechnology */
1.385 +#define SET_CAR(cc,r) asm("mcr"#cc" p15, 0, "#r", c1, c0, 2 "); /**< @internalTechnology */
1.386 +
1.387 +#if defined(__CPU_PAGE_TABLES_FULLY_CACHED)
1.388 + #define CACHE_MAINTENANCE_PDE_PTE_UPDATED(r) CLEAN_DCACHE_LINE(,r);\
1.389 + DRAIN_WRITE_BUFFER(,r,r);
1.390 +#else
1.391 + #define CACHE_MAINTENANCE_PDE_PTE_UPDATED(r) DRAIN_WRITE_BUFFER(,r,r);
1.392 +#endif //end of __CPU_PAGE_TABLES_FULLY_CACHED
1.393 +
1.394 +#elif defined(__CPU_ARMV7) // end of elif (__CPU_ARMV6)
1.395 +
1.396 +// Define new-style cache/TLB maintenance instructions
1.397 +#if defined(__CPU_CORTEX_A9__) && !defined(__CPU_ARM_A9_ERRATUM_571618_FIXED)
1.398 +// ARM Cortex-A9 MPCore erratum 571618 workaround
1.399 +// Execute memory barrier before interruptible CP15 operations
1.400 +#define ICIALLU asm("mcr p15, 0, r0, c7, c10, 5 "); \
1.401 + asm("mcr p15, 0, r0, c7, c5, 0 "); /**< @internalTechnology */
1.402 +#else
1.403 +#define ICIALLU asm("mcr p15, 0, r0, c7, c5, 0 "); /**< @internalTechnology */
1.404 +#endif // end of else (__CPU_CORTEX_A9__) && !(__CPU_ARM_A9_ERRATUM_571618_FIXED)
1.405 +#define ICIMVAU(r) asm("mcr p15, 0, "#r", c7, c5, 1 "); /**< @internalTechnology */
1.406 +#define BPIALL asm("mcr p15, 0, r0, c7, c5, 6 "); /**< @internalTechnology */
1.407 +#define BPIMVA(r) asm("mcr p15, 0, "#r", c7, c5, 7 "); /**< @internalTechnology */
1.408 +#define DCIMVAC(r) asm("mcr p15, 0, "#r", c7, c6, 1 "); /**< @internalTechnology */
1.409 +#define DCISW(r) asm("mcr p15, 0, "#r", c7, c6, 2 "); /**< @internalTechnology */
1.410 +#define DCCMVAC(r) asm("mcr p15, 0, "#r", c7, c10, 1 "); /**< @internalTechnology */
1.411 +#define DCCSW(r) asm("mcr p15, 0, "#r", c7, c10, 2 "); /**< @internalTechnology */
1.412 +#define DCCMVAU(r) asm("mcr p15, 0, "#r", c7, c11, 1 "); /**< @internalTechnology */
1.413 +#define DCCIMVAC(r) asm("mcr p15, 0, "#r", c7, c14, 1 "); /**< @internalTechnology */
1.414 +#define DCCISW(r) asm("mcr p15, 0, "#r", c7, c14, 2 "); /**< @internalTechnology */
1.415 +
1.416 +#ifdef __SMP__
1.417 +#if defined(__CPU_CORTEX_A9__) && !defined(__CPU_ARM_A9_ERRATUM_571618_FIXED)
1.418 +// ARM Cortex-A9 MPCore erratum 571618 workaround
1.419 +// Execute memory barrier before interruptible CP15 operations
1.420 +#define ICIALLUIS asm("mcr p15, 0, r0, c7, c10, 5 "); \
1.421 + asm("mcr p15, 0, r0, c7, c1, 0 "); /**< @internalTechnology */
1.422 +#else
1.423 +#define ICIALLUIS asm("mcr p15, 0, r0, c7, c1, 0 "); /**< @internalTechnology */
1.424 +#endif //end of else (__CPU_CORTEX_A9__) && !(__CPU_ARM_A9_ERRATUM_571618_FIXED)
1.425 +#define BPIALLIS asm("mcr p15, 0, r0, c7, c1, 6 "); /**< @internalTechnology */
1.426 +#endif // end of __SMP__
1.427 +
1.428 +#ifdef __CPU_SPLIT_TLB
1.429 +#define ITLBIALL asm("mcr p15, 0, r0, c8, c5, 0 "); /**< @internalTechnology */
1.430 +#define ITLBIMVA(r) asm("mcr p15, 0, "#r", c8, c5, 1 "); /**< @internalTechnology */
1.431 +#define ITLBIASID(r) asm("mcr p15, 0, "#r", c8, c5, 2 "); /**< @internalTechnology */
1.432 +#define DTLBIALL asm("mcr p15, 0, r0, c8, c6, 0 "); /**< @internalTechnology */
1.433 +#define DTLBIMVA(r) asm("mcr p15, 0, "#r", c8, c6, 1 "); /**< @internalTechnology */
1.434 +#define DTLBIASID(r) asm("mcr p15, 0, "#r", c8, c6, 2 "); /**< @internalTechnology */
1.435 +#endif
1.436 +#define UTLBIALL asm("mcr p15, 0, r0, c8, c7, 0 "); /**< @internalTechnology */
1.437 +#define UTLBIMVA(r) asm("mcr p15, 0, "#r", c8, c7, 1 "); /**< @internalTechnology */
1.438 +#if defined(__CPU_CORTEX_A9__) && !defined(__CPU_ARM_A9_ERRATUM_571618_FIXED)
1.439 +// ARM Cortex-A9 MPCore erratum 571618 workaround
1.440 +// Execute memory barrier before interruptible CP15 operations
1.441 +#define UTLBIASID(r) asm("mcr p15, 0, r0, c7, c10, 5 "); \
1.442 + asm("mcr p15, 0, "#r", c8, c7, 2 "); /**< @internalTechnology */
1.443 +#else
1.444 +#define UTLBIASID(r) asm("mcr p15, 0, "#r", c8, c7, 2 "); /**< @internalTechnology */
1.445 +#endif // end of else (__CPU_CORTEX_A9__) && !(__CPU_ARM_A9_ERRATUM_571618_FIXED)
1.446 +
1.447 +#ifdef __CPU_SUPPORTS_TLBIMVAA
1.448 +#ifdef __CPU_SPLIT_TLB
1.449 +#define ITLBIMVAA(r) asm("mcr p15, 0, "#r", c8, c5, 3 "); /**< @internalTechnology */
1.450 +#define DTLBIMVAA(r) asm("mcr p15, 0, "#r", c8, c6, 3 "); /**< @internalTechnology */
1.451 +#endif // end of __CPU_SPLIT_TLB
1.452 +#define UTLBIMVAA(r) asm("mcr p15, 0, "#r", c8, c7, 3 "); /**< @internalTechnology */
1.453 +#endif // end of __CPU_SUPPORTS_TLBIMVAA
1.454 +
1.455 +#ifdef __SMP__
1.456 +#ifdef __CPU_SPLIT_TLB
1.457 +#define ITLBIALLIS asm("mcr p15, 0, r0, c8, c3, 0 "); /**< @internalTechnology */
1.458 +#define ITLBIMVAIS(r) asm("mcr p15, 0, "#r", c8, c3, 1 "); /**< @internalTechnology */
1.459 +#if defined(__CPU_CORTEX_A9__) && !defined(__CPU_ARM_A9_ERRATUM_571618_FIXED)
1.460 +// ARM Cortex-A9 MPCore erratum 571618 workaround
1.461 +// Execute memory barrier before interruptible CP15 operations
1.462 +#define ITLBIASIDIS(r) asm("mcr p15, 0, r0, c7, c10, 5 "); \
1.463 + asm("mcr p15, 0, "#r", c8, c3, 2 "); /**< @internalTechnology */
1.464 +#else
1.465 +#define ITLBIASIDIS(r) asm("mcr p15, 0, "#r", c8, c3, 2 "); /**< @internalTechnology */
1.466 +#endif // end of else (__CPU_CORTEX_A9__) && !(__CPU_ARM_A9_ERRATUM_571618_FIXED)
1.467 +#define DTLBIALLIS asm("mcr p15, 0, r0, c8, c3, 0 "); /**< @internalTechnology */
1.468 +#define DTLBIMVAIS(r) asm("mcr p15, 0, "#r", c8, c3, 1 "); /**< @internalTechnology */
1.469 +#if defined(__CPU_CORTEX_A9__) && !defined(__CPU_ARM_A9_ERRATUM_571618_FIXED)
1.470 +// ARM Cortex-A9 MPCore erratum 571618 workaround
1.471 +// Execute memory barrier before interruptible CP15 operations
1.472 +#define DTLBIASIDIS(r) asm("mcr p15, 0, r0, c7, c10, 5 "); \
1.473 + asm("mcr p15, 0, "#r", c8, c3, 2 "); /**< @internalTechnology */
1.474 +#else
1.475 +#define DTLBIASIDIS(r) asm("mcr p15, 0, "#r", c8, c3, 2 "); /**< @internalTechnology */
1.476 +#endif // end of else (__CPU_CORTEX_A9__) && !(__CPU_ARM_A9_ERRATUM_571618_FIXED)
1.477 +#endif // end of __CPU_SPLIT_TLB
1.478 +#define UTLBIALLIS asm("mcr p15, 0, r0, c8, c3, 0 "); /**< @internalTechnology */
1.479 +#define UTLBIMVAIS(r) asm("mcr p15, 0, "#r", c8, c3, 1 "); /**< @internalTechnology */
1.480 +#if defined(__CPU_CORTEX_A9__) && !defined(__CPU_ARM_A9_ERRATUM_571618_FIXED)
1.481 +// ARM Cortex-A9 MPCore erratum 571618 workaround
1.482 +// Execute memory barrier before interruptible CP15 operations
1.483 +#define UTLBIASIDIS(r) asm("mcr p15, 0, r0, c7, c10, 5 "); \
1.484 + asm("mcr p15, 0, "#r", c8, c3, 2 "); /**< @internalTechnology */
1.485 +#else
1.486 +#define UTLBIASIDIS(r) asm("mcr p15, 0, "#r", c8, c3, 2 "); /**< @internalTechnology */
1.487 +#endif // end of else (__CPU_CORTEX_A9__) && !(__CPU_ARM_A9_ERRATUM_571618_FIXED)
1.488 +
1.489 +#ifdef __CPU_SUPPORTS_TLBIMVAA
1.490 +#ifdef __CPU_SPLIT_TLB
1.491 +#define ITLBIMVAAIS(r) asm("mcr p15, 0, "#r", c8, c3, 3 "); /**< @internalTechnology */
1.492 +#define DTLBIMVAAIS(r) asm("mcr p15, 0, "#r", c8, c3, 3 "); /**< @internalTechnology */
1.493 +#endif // end of __CPU_SPLIT_TLB
1.494 +#define UTLBIMVAAIS(r) asm("mcr p15, 0, "#r", c8, c3, 3 "); /**< @internalTechnology */
1.495 +#endif // end of __CPU_SUPPORTS_TLBIMVAA
1.496 +#endif // end of __SMP__
1.497 +
1.498 +
1.499 +#define DRAIN_WRITE_BUFFER(cc,r,rd) __DATA_SYNC_BARRIER__(r)
1.500 +#define DATA_MEMORY_BARRIER(cc,r) __DATA_MEMORY_BARRIER__(r)
1.501 +#define FLUSH_PREFETCH_BUFFER(cc,r) __INST_SYNC_BARRIER__(r) /**< @internalTechnology */
1.502 +//#define FLUSH_BTB(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c5, 6 "); /**< @internalTechnology */
1.503 +
1.504 +#define CPWAIT(cc,r) /**< @internalTechnology */ // not sure about this
1.505 +#define GET_CAR(cc,r) asm("mrc"#cc" p15, 0, "#r", c1, c0, 2 "); /**< @internalTechnology */
1.506 +#define SET_CAR(cc,r) asm("mcr"#cc" p15, 0, "#r", c1, c0, 2 "); \
1.507 + __INST_SYNC_BARRIER__(r) /**< @internalTechnology */
1.508 +
1.509 +#if !defined(__CPU_SUPPORTS_PAGE_TABLE_WALK_TO_L1_CACHE) && defined(__CPU_PAGE_TABLES_FULLY_CACHED)
1.510 + #define CACHE_MAINTENANCE_PDE_PTE_UPDATED(r) DCCMVAU(r); \
1.511 + __DATA_SYNC_BARRIER__(r);
1.512 +#else
1.513 + #define CACHE_MAINTENANCE_PDE_PTE_UPDATED(r) __DATA_SYNC_BARRIER__(r);
1.514 +#endif // end of !(__CPU_SUPPORTS_PAGE_TABLE_WALK_TO_L1_CACHE) && (__CPU_PAGE_TABLES_FULLY_CACHED)
1.515 +
1.516 +#endif // end of of elif (__CPU_ARMV7)
1.517 +
1.518 +
1.519 +/**
1.520 +CPU_ARM1136_ERRATUM_317041: Bits [4:3] of Translation Table Base address registers (TTBR0, TTBR1)
1.521 +do not read back correctly, but instead always return 0.
1.522 +@internalComponent
1.523 +@released
1.524 +*/
1.525 +#if defined(__CPU_ARM1136__) && defined(__HAS_EXTERNAL_CACHE__) && !defined(__CPU_ARM1136_ERRATUM_317041_FIXED)
1.526 +#define UPDATE_PW_CACHING_ATTRIBUTES(cc,r) asm("orr"#cc" "#r", "#r", #0x18")
1.527 +#else
1.528 +#define UPDATE_PW_CACHING_ATTRIBUTES(cc,r)
1.529 +#endif
1.530 +
1.531 +// Instruction macros
1.532 +
1.533 +#if defined(__CPU_ARMV6) || defined(__CPU_ARMV7)
1.534 +#define SRSgen(P,U,W,mode) asm(".word %a0" : : "i" ((TInt)(0xf84d0500|(P<<24)|(U<<23)|(W<<21)|(mode))));
1.535 +#define SRSIA(mode) SRSgen(0,1,0,mode)
1.536 +#define SRSIAW(mode) SRSgen(0,1,1,mode)
1.537 +#define SRSDB(mode) SRSgen(1,0,0,mode)
1.538 +#define SRSDBW(mode) SRSgen(1,0,1,mode)
1.539 +#define SRSIB(mode) SRSgen(1,1,0,mode)
1.540 +#define SRSIBW(mode) SRSgen(1,1,1,mode)
1.541 +#define SRSDA(mode) SRSgen(0,0,0,mode)
1.542 +#define SRSDAW(mode) SRSgen(0,0,1,mode)
1.543 +#define RFEgen(P,U,W,base) asm(".word %a0" : : "i" ((TInt)(0xf8100a00|(P<<24)|(U<<23)|(W<<21)|(base<<16))));
1.544 +#define RFEIA(base) RFEgen(0,1,0,base)
1.545 +#define RFEIAW(base) RFEgen(0,1,1,base)
1.546 +#define RFEDB(base) RFEgen(1,0,0,base)
1.547 +#define RFEDBW(base) RFEgen(1,0,1,base)
1.548 +#define RFEIB(base) RFEgen(1,1,0,base)
1.549 +#define RFEIBW(base) RFEgen(1,1,1,base)
1.550 +#define RFEDA(base) RFEgen(0,0,0,base)
1.551 +#define RFEDAW(base) RFEgen(0,0,1,base)
1.552 +#elif defined(__CPU_XSCALE__) // end of (__CPU_ARMV6) || (__CPU_ARMV7)
1.553 +#define MAR(acc,RdLo,RdHi) MCRR(0,0,RdLo,RdHi,acc)
1.554 +#define MARcc(cc,acc,RdLo,RdHi) MCRR(cc,0,0,RdLo,RdHi,acc)
1.555 +#define MRA(acc,RdLo,RdHi) MRRC(0,0,RdLo,RdHi,acc)
1.556 +#define MRAcc(cc,acc,RdLo,RdHi) MRRC(cc,0,0,RdLo,RdHi,acc)
1.557 +#define MIAgen(cc,acc,Rm,Rs,opc3) asm(".word %a0" : : "i" ((TInt)0x0e200010|((cc)<<28)|((opc3)<<16)|((Rs)<<12)|((acc)<<5)|(Rm)));
1.558 +#define MIA(acc,Rm,Rs) MIAgen(CC_AL,acc,Rm,Rs,0)
1.559 +#define MIAPH(acc,Rm,Rs) MIAgen(CC_AL,acc,Rm,Rs,8)
1.560 +#define MIABB(acc,Rm,Rs) MIAgen(CC_AL,acc,Rm,Rs,12)
1.561 +#define MIATB(acc,Rm,Rs) MIAgen(CC_AL,acc,Rm,Rs,13)
1.562 +#define MIABT(acc,Rm,Rs) MIAgen(CC_AL,acc,Rm,Rs,14)
1.563 +#define MIATT(acc,Rm,Rs) MIAgen(CC_AL,acc,Rm,Rs,15)
1.564 +#define MIAcc(cc,acc,Rm,Rs) MIAgen(cc,acc,Rm,Rs,0)
1.565 +#define MIAPHcc(cc,acc,Rm,Rs) MIAgen(cc,acc,Rm,Rs,8)
1.566 +#define MIABBcc(cc,acc,Rm,Rs) MIAgen(cc,acc,Rm,Rs,12)
1.567 +#define MIATBcc(cc,acc,Rm,Rs) MIAgen(cc,acc,Rm,Rs,13)
1.568 +#define MIABTcc(cc,acc,Rm,Rs) MIAgen(cc,acc,Rm,Rs,14)
1.569 +#define MIATTcc(cc,acc,Rm,Rs) MIAgen(cc,acc,Rm,Rs,15)
1.570 +#endif // end of elif (__CPU_XSCALE__)
1.571 +
1.572 +#ifdef __CPU_ARM_HAS_CPS
1.573 +#define CPSgen(im,mm,f,mode) asm(".word %a0" : : "i" ((TInt)(0xf1000000|((im)<<18)|((mm)<<17)|((f)<<6)|(mode))))
1.574 +#if __ARM_ASSEMBLER_ISA__ >= 6
1.575 +#define CPSIDAIF asm("cpsidaif ")
1.576 +#define CPSIDAI asm("cpsidai ")
1.577 +#define CPSIDIF asm("cpsidif ")
1.578 +#define CPSIDI asm("cpsidi ")
1.579 +#define CPSIDF asm("cpsidf ")
1.580 +#define CPSIEAIF asm("cpsieaif ")
1.581 +#define CPSIEI asm("cpsiei ")
1.582 +#define CPSIEF asm("cpsief ")
1.583 +#define CPSIEIF asm("cpsieif ")
1.584 +#else
1.585 +#define CPSIDAIF CPSgen(3,0,7,0) // disable all interrupts, leave mode alone
1.586 +#define CPSIDAI CPSgen(3,0,6,0) // disable IRQs, leave mode alone
1.587 +#define CPSIDIF CPSgen(3,0,3,0) // disable IRQs and FIQs, leave mode alone
1.588 +#define CPSIDI CPSgen(3,0,2,0) // disable IRQs, leave mode alone
1.589 +#define CPSIDF CPSgen(3,0,1,0) // disable FIQs, leave mode alone
1.590 +#define CPSIEAIF CPSgen(2,0,7,0) // enable all interrupts, leave mode alone
1.591 +#define CPSIEI CPSgen(2,0,2,0) // enable IRQs, leave mode alone
1.592 +#define CPSIEF CPSgen(2,0,1,0) // enable FIQs, leave mode alone
1.593 +#define CPSIEIF CPSgen(2,0,3,0) // enable IRQs and FIQs, leave mode alone
1.594 +#endif // end of __ARM_ASSEMBLER_ISA__ >= 6
1.595 +#define CPSIDAIFM(mode) CPSgen(3,1,7,mode) // disable all interrupts and change mode
1.596 +#define CPSIDIFM(mode) CPSgen(3,1,3,mode) // disable all interrupts and change mode
1.597 +#define CPSIDAIM(mode) CPSgen(3,1,6,mode) // disable IRQs and change mode
1.598 +#define CPSIDIM(mode) CPSgen(3,1,2,mode) // disable IRQs and change mode
1.599 +#define CPSIDFM(mode) CPSgen(3,1,1,mode) // disable FIQs and change mode
1.600 +#define CPSIEAIFM(mode) CPSgen(2,1,7,mode) // enable all interrupts and change mode
1.601 +#define CPSIEIM(mode) CPSgen(2,1,2,mode) // enable IRQs and change mode
1.602 +#define CPSIEFM(mode) CPSgen(2,1,1,mode) // enable FIQs and change mode
1.603 +#define CPSIEIFM(mode) CPSgen(2,1,3,mode) // enable IRQs and FIQs, and change mode
1.604 +#define CPSCHM(mode) CPSgen(0,1,0,mode) // change mode, leave interrupt masks alone
1.605 +#endif // end of __CPU_ARM_HAS_CPS
1.606 +
1.607 +// Processor modes
1.608 +#define MODE_USR 0x10
1.609 +#define MODE_FIQ 0x11
1.610 +#define MODE_IRQ 0x12
1.611 +#define MODE_SVC 0x13
1.612 +#define MODE_ABT 0x17
1.613 +#define MODE_UND 0x1b
1.614 +#define MODE_SYS 0x1f
1.615 +
1.616 +// Macros for changing processor made and interrupt status
1.617 +//
1.618 +// Two instructions are necessary prior to ARMv6, and these may be interleaved.
1.619 +//
1.620 +// SET_MODE - sets mode and intrrupts status
1.621 +// SET_INTS - sets interrupts status (requires knowing the current mode at compile time)
1.622 +// INTS_ON - enables interrupts (requires the cpsr value be available at run time)
1.623 +// INTS_OFF - disables interrupts (requires the cpsr value be available at run time)
1.624 +
1.625 +#ifdef __CPU_ARM_HAS_CPS
1.626 +
1.627 +#define INTS_ALL_OFF IDIF
1.628 +#define INTS_IRQ_OFF IDI
1.629 +#define INTS_FIQ_ON IEF
1.630 +#define INTS_ALL_ON IEIF
1.631 +
1.632 +#define CONCAT2(a,b) a##b
1.633 +#define CONCAT3(a,b,c) a##b##c
1.634 +
1.635 +#define SET_MODE_1(rd, newMode, newInts)
1.636 +#define SET_MODE_2(rd, newMode, newInts) CONCAT3(CPS, newInts, M)(newMode)
1.637 +
1.638 +#define SET_INTS_1(rd, currentMode, newInts)
1.639 +#define SET_INTS_2(rd, currentMode, newInts) CONCAT2(CPS, newInts)
1.640 +
1.641 +#define INTS_ON_1(rd, rCpsr, newInts)
1.642 +#define INTS_ON_2(rd, rCpsr, newInts) CONCAT2(CPS, newInts)
1.643 +
1.644 +#define INTS_OFF_1(rd, rCpsr, newInts)
1.645 +#define INTS_OFF_2(rd, rCpsr, newInts) CONCAT2(CPS, newInts)
1.646 +
1.647 +#else // __CPU_ARM_HAS_CPS
1.648 +
1.649 +#define INTS_ALL_OFF 0xc0
1.650 +#define INTS_IRQ_OFF 0x80
1.651 +#define INTS_FIQ_ON 0x80
1.652 +#define INTS_ALL_ON 0x00
1.653 +
1.654 +#define SET_MODE_1(rd, newMode, newInts) asm("mov "#rd", #%a0" : : "i" (newMode | newInts))
1.655 +#define SET_MODE_2(rd, newMode, newInts) asm("msr cpsr_c, "#rd)
1.656 +
1.657 +#define SET_INTS_1(rd, currentMode, newInts) SET_MODE_1(rd, currentMode, newInts)
1.658 +#define SET_INTS_2(rd, currentMode, newInts) SET_MODE_2(rd, currentMode, newInts)
1.659 +
1.660 +#define INTS_ON_1(rd, rCpsr, newInts) asm("bic "#rd", "#rCpsr", #%a0" : : "i" (newInts ^ 0xc0))
1.661 +#define INTS_ON_2(rd, rCpsr, newInts) asm("msr cpsr_c, "#rd)
1.662 +
1.663 +#define INTS_OFF_1(rd, rCpsr, newInts) asm("orr "#rd", "#rCpsr", #%a0" : : "i" (newInts))
1.664 +#define INTS_OFF_2(rd, rCpsr, newInts) asm("msr cpsr_c, "#rd)
1.665 +
1.666 +#endif // end of __CPU_ARM_HAS_CPS
1.667 +
1.668 +#define SET_MODE(rd, newMode, newInts) SET_MODE_1(rd, newMode, newInts); SET_MODE_2(rd, newMode, newInts)
1.669 +#define SET_INTS(rd, currentMode, newInts) SET_INTS_1(rd, currentMode, newInts); SET_INTS_2(rd, currentMode, newInts)
1.670 +#define INTS_ON(rd, rCpsr, newInts) INTS_ON_1(rd, rCpsr, newInts); INTS_ON_2(rd, rCpsr, newInts)
1.671 +#define INTS_OFF(rd, rCpsr, newInts) INTS_OFF_1(rd, rCpsr, newInts); INTS_OFF_2(rd, rCpsr, newInts)
1.672 +
1.673 +#define __chill()
1.674 +
1.675 +#if defined(__SMP__) && !defined(__CPU_ARM_HAS_LDREX_STREX_V6K)
1.676 +#error SMP not allowed without v6K
1.677 +#endif
1.678 +#if defined(__SMP__) && !defined(__CPU_HAS_CP15_THREAD_ID_REG)
1.679 +#error SMP not allowed without thread ID registers
1.680 +#endif
1.681 +
1.682 +#endif // end of __CPU_ARM
1.683 +
1.684 +#if defined(__CPU_X86) && defined(__EPOC32__)
1.685 + #define __CPU_HAS_MMU
1.686 + #define __CPU_HAS_CACHE
1.687 + #define __CPU_SUPPORTS_FAST_PROCESS_SWITCH
1.688 +
1.689 + // Page/directory tables are cached on X86.
1.690 + #define __CPU_PAGE_TABLES_FULLY_CACHED
1.691 +
1.692 +#if defined(__VC32__)
1.693 + #define X86_PAUSE _asm rep nop
1.694 + #define __chill() do { _asm rep nop } while(0)
1.695 +#elif defined(__GCC32__)
1.696 + #define X86_PAUSE __asm__ __volatile__("pause ");
1.697 + #define __chill() __asm__ __volatile__("pause ")
1.698 +#else
1.699 +#error Unknown x86 compiler
1.700 +#endif
1.701 +
1.702 +#if defined(__cplusplus)
1.703 +extern "C" {
1.704 +#endif
1.705 +#if defined(__VC32__)
1.706 +extern int _inp(unsigned short); // input byte (compiler intrinsic)
1.707 +extern unsigned short _inpw(unsigned short); // input word (compiler intrinsic)
1.708 +extern unsigned long _inpd(unsigned short); // input dword (compiler intrinsic)
1.709 +extern int _outp(unsigned short, int); // output byte (compiler intrinsic)
1.710 +extern unsigned short _outpw(unsigned short, unsigned short); // output word (compiler intrinsic)
1.711 +extern unsigned long _outpd(unsigned short, unsigned long); // output dword (compiler intrinsic)
1.712 +
1.713 +#pragma intrinsic(_inp, _inpw, _inpd, _outp, _outpw, _outpd)
1.714 +
1.715 +#define x86_in8(port) ((TUint8)_inp(port))
1.716 +#define x86_in16(port) ((TUint16)_inpw(port))
1.717 +#define x86_in32(port) ((TUint32)_inpd(port))
1.718 +#define x86_out8(port,data) ((void)_outp((port),(TUint8)(data)))
1.719 +#define x86_out16(port,data) ((void)_outpw((port),(TUint16)(data)))
1.720 +#define x86_out32(port,data) ((void)_outpd((port),(TUint32)(data)))
1.721 +
1.722 +#elif defined(__GCC32__) // end of (__VC32__)
1.723 +inline TUint8 _inpb(TUint16 port)
1.724 + {
1.725 + TUint8 ret;
1.726 + __asm__ __volatile__("in al, dx" : "=a" (ret) : "d" (port));
1.727 + return ret;
1.728 + }
1.729 +
1.730 +inline TUint16 _inpw(TUint16 port)
1.731 + {
1.732 + TUint8 ret;
1.733 + __asm__ __volatile__("in ax, dx" : "=a" (ret) : "d" (port));
1.734 + return ret;
1.735 + }
1.736 +
1.737 +inline TUint32 _inpd(TUint16 port)
1.738 + {
1.739 + TUint32 ret;
1.740 + __asm__ __volatile__("in eax, dx" : "=a" (ret) : "d" (port));
1.741 + return ret;
1.742 + }
1.743 +
1.744 +inline void _outpb(TUint16 port, TUint8 data)
1.745 + {
1.746 + __asm__ __volatile__("out dx, al" : : "d" (port), "a" (data));
1.747 + }
1.748 +
1.749 +inline void _outpw(TUint16 port, TUint16 data)
1.750 + {
1.751 + __asm__ __volatile__("out dx, ax" : : "d" (port), "a" (data));
1.752 + }
1.753 +
1.754 +inline void _outpd(TUint16 port, TUint32 data)
1.755 + {
1.756 + __asm__ __volatile__("out dx, eax" : : "d" (port), "a" (data));
1.757 + }
1.758 +
1.759 +#define x86_in8(port) (_inpb(port))
1.760 +#define x86_in16(port) (_inpw(port))
1.761 +#define x86_in32(port) (_inpd(port))
1.762 +#define x86_out8(port,data) (_outpb((port),(TUint8)(data)))
1.763 +#define x86_out16(port,data) (_outpw((port),(TUint16)(data)))
1.764 +#define x86_out32(port,data) (_outpd((port),(TUint32)(data)))
1.765 +
1.766 +#else // end of elif (__GCC32__)
1.767 +#error Unknown x86 compiler
1.768 +#endif
1.769 +#if defined(__cplusplus)
1.770 +}
1.771 +#endif // end of (__VC32__) elif __GCC32__ else
1.772 +
1.773 +#endif //__CPU_X86 && __EPOC32__
1.774 +
1.775 +
1.776 +#undef __USER_MEMORY_GUARDS_ENABLED__
1.777 +#if defined(_DEBUG) && !defined(__KERNEL_APIS_DISABLE_USER_MEMORY_GUARDS__)
1.778 +#if defined(__MEMMODEL_MULTIPLE__) || defined(__MEMMODEL_FLEXIBLE__)
1.779 +#if defined(__CPU_ARM)
1.780 +#define __USER_MEMORY_GUARDS_ENABLED__
1.781 +#endif
1.782 +#endif
1.783 +#endif // end of (_DEBUG) && !(__KERNEL_APIS_DISABLE_USER_MEMORY_GUARDS__)
1.784 +
1.785 +#ifndef __USER_MEMORY_GUARDS_ENABLED__
1.786 +
1.787 +#define USER_MEMORY_GUARD_SAVE_WORDS 0
1.788 +#define USER_MEMORY_DOMAIN 0
1.789 +
1.790 +#define USER_MEMORY_GUARD_SAVE(save)
1.791 +#define USER_MEMORY_GUARD_RESTORE(save,temp)
1.792 +#define USER_MEMORY_GUARD_ON(cc,save,temp)
1.793 +#define USER_MEMORY_GUARD_OFF(cc,save,temp)
1.794 +#define USER_MEMORY_GUARD_ON_IF_MODE_USR(temp)
1.795 +#define USER_MEMORY_GUARD_OFF_IF_MODE_USR(temp)
1.796 +#define USER_MEMORY_GUARD_ASSERT_ON(temp)
1.797 +#define USER_MEMORY_GUARD_ASSERT_OFF_IF_MODE_USR(psr)
1.798 +
1.799 +#else // __USER_MEMORY_GUARDS_ENABLED__
1.800 +
1.801 +#define USER_MEMORY_GUARD_SAVE_WORDS 2
1.802 +#define USER_MEMORY_DOMAIN 15
1.803 +#define USER_MEMORY_DOMAIN_MASK (3 << (2*USER_MEMORY_DOMAIN))
1.804 +#define USER_MEMORY_DOMAIN_CLIENT (1 << (2*USER_MEMORY_DOMAIN))
1.805 +
1.806 +// Save the DACR in the named register
1.807 +#define USER_MEMORY_GUARD_SAVE(save) \
1.808 + asm("mrc p15, 0, "#save", c3, c0, 0"); /* save<-DACR */
1.809 +
1.810 +// Restore access to domain 15 (user pages) to the state previously saved
1.811 +// In this case, 'save' may not be the same register as 'temp'
1.812 +#define USER_MEMORY_GUARD_RESTORE(save,temp) \
1.813 + asm("mrc p15, 0, "#temp", c3, c0, 0"); /* temp<-DACR */ \
1.814 + asm("bic "#temp", "#temp", #%a0" : : "i" USER_MEMORY_DOMAIN_MASK); \
1.815 + asm("and "#save", "#save", #%a0" : : "i" USER_MEMORY_DOMAIN_MASK); \
1.816 + asm("orr "#temp", "#temp", "#save ); \
1.817 + asm("mcr p15, 0, "#temp", c3, c0, 0"); /* DACR<-temp */ \
1.818 + __INST_SYNC_BARRIER__(temp)
1.819 +
1.820 +// Disable access to domain 15 (user pages)
1.821 +// 'save' may be the same register as 'temp', but in that case the use as
1.822 +// a temporary takes precedence and the value left in 'save' is undefined
1.823 +#define USER_MEMORY_GUARD_ON(cc,save,temp) \
1.824 + asm("mrc"#cc" p15, 0, "#save", c3, c0, 0"); /* save<-DACR */ \
1.825 + asm("bic"#cc" "#temp", "#save", #%a0" : : "i" USER_MEMORY_DOMAIN_MASK); \
1.826 + asm("mcr"#cc" p15, 0, "#temp", c3, c0, 0"); /* DACR<-temp */ \
1.827 + __INST_SYNC_BARRIER__(temp)
1.828 +
1.829 +// Enable access to domain 15 (user pages) as a client
1.830 +// 'save' may be the same register as 'temp', but in that case the use as
1.831 +// a temporary takes precedence and the value left in 'save' is undefined
1.832 +#define USER_MEMORY_GUARD_OFF(cc,save,temp) \
1.833 + asm("mrc"#cc" p15, 0, "#save", c3, c0, 0"); /* save<-DACR */ \
1.834 + asm("orr"#cc" "#temp", "#save", #%a0" : : "i" USER_MEMORY_DOMAIN_CLIENT); \
1.835 + asm("mcr"#cc" p15, 0, "#temp", c3, c0, 0"); /* DACR<-temp */ \
1.836 + __INST_SYNC_BARRIER__(temp)
1.837 +
1.838 +// Disable access to domain 15 (user pages) if SPSR indicates mode_usr
1.839 +// The specified 'temp' register is left with an undefined value
1.840 +#define USER_MEMORY_GUARD_ON_IF_MODE_USR(temp) \
1.841 + asm("mrs "#temp", spsr"); \
1.842 + asm("tst "#temp", #0x0f"); \
1.843 + USER_MEMORY_GUARD_ON(eq,temp,temp)
1.844 +
1.845 +// Enable access to domain 15 (user pages) if SPSR indicates mode_usr
1.846 +// The specified 'temp' register is left with an undefined value
1.847 +#define USER_MEMORY_GUARD_OFF_IF_MODE_USR(temp) \
1.848 + asm("mrs "#temp", spsr"); \
1.849 + asm("tst "#temp", #0x0f"); \
1.850 + USER_MEMORY_GUARD_OFF(eq,temp,temp)
1.851 +
1.852 +// Assert that access to domain 15 (user pages) is disabled
1.853 +#define USER_MEMORY_GUARD_ASSERT_ON(temp) \
1.854 + asm("mrc p15, 0, "#temp", c3, c0, 0"); /* temp<-DACR */ \
1.855 + asm("tst "#temp", #%a0" : : "i" USER_MEMORY_DOMAIN_MASK); \
1.856 + asm("cdpne p15, 0, c0, c0, c0, 0"); /* fault if nonzero */
1.857 +
1.858 +// Assert that access to domain 15 (user pages) is enabled if the value
1.859 +// in 'psr' says we came from/are going back to user mode
1.860 +#define USER_MEMORY_GUARD_ASSERT_OFF_IF_MODE_USR(psr) \
1.861 + asm("tst "#psr", #0x0f"); /* check for mode_usr */ \
1.862 + asm("mrceq p15, 0, "#psr", c3, c0, 0"); /* psr<-DACR */ \
1.863 + asm("tsteq "#psr", #%a0" : : "i" USER_MEMORY_DOMAIN_MASK); \
1.864 + asm("cdpeq p15, 0, c0, c0, c0, 0"); /* fault if no access */
1.865 +
1.866 +#endif // end of else __USER_MEMORY_GUARDS_ENABLED__
1.867 +
1.868 +#endif // __NK_CPU_H__