Update contrib.
1 // Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32\include\nkern\nk_cpu.h
16 // WARNING: This file contains some APIs which are internal and are subject
17 // to change without notice. Such APIs should therefore not be used
18 // outside the Kernel and Hardware Services package.
33 #if defined(__CPU_GENERIC_ARM4__)
35 #define __CPU_ARM_ABORT_MODEL_RESTORED
38 #if defined(__CPU_ARM710T__) || defined(__CPU_ARM720T__)
40 #define __CPU_HAS_CACHE
41 #define __CPU_ARM_ABORT_MODEL_UPDATED
42 #define __CPU_WRITE_BUFFER
47 #define __CPU_HAS_CACHE
48 #define __CPU_ARM_ABORT_MODEL_RESTORED
49 #define __CPU_SPLIT_CACHE
50 #define __CPU_SPLIT_TLB
51 #define __CPU_WRITE_BUFFER
52 #define __CPU_HAS_ALT_D_CACHE
53 #define __CPU_WRITE_BACK_CACHE
54 #define __CPU_CACHE_FLUSH_BY_DATA_READ
55 #define __CPU_HAS_SINGLE_ENTRY_DCACHE_FLUSH
58 #if defined(__CPU_ARM920T__) || defined(__CPU_ARM925T__) || defined(__CPU_ARM926J__)
60 #define __CPU_HAS_CACHE
61 #define __CPU_ARM_ABORT_MODEL_RESTORED
62 #define __CPU_SPLIT_CACHE
63 #define __CPU_SPLIT_TLB
64 #define __CPU_WRITE_BUFFER
65 #define __CPU_WRITE_BACK_CACHE
66 #define __CPU_CACHE_FLUSH_BY_WAY_SET_INDEX
67 #define __CPU_CACHE_POLICY_IN_PTE
68 #define __CPU_HAS_CACHE_TYPE_REGISTER
69 #define __CPU_HAS_SINGLE_ENTRY_ITLB_FLUSH
70 #define __CPU_HAS_SINGLE_ENTRY_ICACHE_FLUSH
71 #define __CPU_HAS_SINGLE_ENTRY_DCACHE_FLUSH
76 #define __CPU_HAS_CACHE
77 #define __CPU_ARM_ABORT_MODEL_RESTORED
78 #define __CPU_SPLIT_CACHE
79 #define __CPU_SPLIT_TLB
80 #define __CPU_WRITE_BUFFER
81 #ifndef __CPU_XSCALE_MANZANO__
82 #define __CPU_HAS_ALT_D_CACHE
84 #define __CPU_WRITE_BACK_CACHE
85 #define __CPU_CACHE_WRITE_ALLOCATE
86 #ifdef __CPU_XSCALE_MANZANO__
87 #define __CPU_CACHE_FLUSH_BY_WAY_SET_INDEX
89 #define __CPU_CACHE_FLUSH_BY_LINE_ALLOC
91 #define __CPU_CACHE_POLICY_IN_PTE
92 #define __CPU_HAS_CACHE_TYPE_REGISTER
93 #define __CPU_HAS_SINGLE_ENTRY_ITLB_FLUSH
94 #define __CPU_HAS_SINGLE_ENTRY_ICACHE_FLUSH
95 #define __CPU_HAS_SINGLE_ENTRY_DCACHE_FLUSH
97 #define __CPU_USE_MMU_TEX_FIELD
98 #define __CPU_HAS_COPROCESSOR_ACCESS_REG
99 #define __CPU_HAS_ACTLR
102 #if defined(__CPU_ARM1136__) || defined(__CPU_ARM11MP__) || defined(__CPU_ARM1176__) || defined(__CPU_CORTEX_A8__) || defined(__CPU_CORTEX_A9__)
103 #define __CPU_HAS_MMU
104 #define __CPU_HAS_CACHE
105 #define __CPU_CACHE_PHYSICAL_TAG
106 #define __CPU_SUPPORTS_FAST_PROCESS_SWITCH
107 #define __CPU_ARM_ABORT_MODEL_RESTORED
108 #define __CPU_SPLIT_CACHE
110 #if defined(__CPU_CORTEX_A9__) || defined(__CPU_CORTEX_A8__) || defined(__CPU_ARM1136__)
111 #define __CPU_SPLIT_TLB
114 #if defined(__CPU_CORTEX_A8__)
115 /* Internal cache controller maintains both inner & outer caches.
118 #define __CPU_OUTER_CACHE_IS_INTERNAL_CACHE
123 #if defined(__CPU_CORTEX_A9__) || defined(__CPU_ARM11MP__)
124 #define __CPU_SUPPORTS_TLBIMVAA
127 #if defined(__CPU_CORTEX_A9__)
129 // #define __CPU_SUPPORTS_PAGE_TABLE_WALK_TO_L1_CACHE
133 #if (defined(__CPU_ARM1136__) && defined(__CPU_ARM1136_ERRATUM_399234_FIXED) && !defined(__MEMMODEL_FLEXIBLE__)) || (defined(__CPU_ARM11MP__) && defined (__SMP__) )
134 // Page tables on these platforms are either uncached or write through cached.
136 // Page/directory tables are fully cached (write-back) on these platforms.
137 #define __CPU_PAGE_TABLES_FULLY_CACHED
140 #define __CPU_WRITE_BUFFER
141 #define __CPU_WRITE_BACK_CACHE
142 #define __CPU_CACHE_WRITE_ALLOCATE
143 #define __CPU_CACHE_FLUSH_BY_WAY_SET_INDEX
144 #define __CPU_CACHE_POLICY_IN_PTE
145 #define __CPU_HAS_CACHE_TYPE_REGISTER
146 #define __CPU_HAS_SINGLE_ENTRY_ITLB_FLUSH
147 #define __CPU_HAS_SINGLE_ENTRY_ICACHE_FLUSH
148 #define __CPU_HAS_SINGLE_ENTRY_DCACHE_FLUSH
149 #define __CPU_HAS_BTB
150 #define __CPU_HAS_COPROCESSOR_ACCESS_REG
151 #define __CPU_HAS_PREFETCH_BUFFER
152 #define __CPU_HAS_ACTLR
153 #define __CPU_HAS_TTBR1
155 #if !defined(__CPU_ARM1136__)
156 #define __CPU_MEMORY_TYPE_REMAPPING
159 #if defined(__CPU_ARM11MP__) && defined(__SMP__)
160 #define __BROADCAST_CACHE_MAINTENANCE__
163 #if defined(__CPU_ARM11MP__) || defined(__CPU_ARM1176__)
164 #define __CPU_NEEDS_BTAC_FLUSH_AFTER_ASID_CHANGE
167 #define __CPU_CACHE_HAS_COLOUR
168 #define __CPU_I_CACHE_HAS_COLOUR
170 #if defined(__CPU_ARM1136__) || defined(__CPU_ARM1176__)
171 #define __CPU_D_CACHE_HAS_COLOUR
172 #elif defined(__CPU_ARM11MP__)
173 // MPCore has physically indexed D cache, so no colour problems
175 // Assume other ARM cores have virtually indexed D cache with broken alias avoidence hardware...
176 #define __CPU_D_CACHE_HAS_COLOUR
183 #ifdef __FIQ_RESERVED_FOR_SECURE_STATE__
184 #define __FIQ_IS_UNCONTROLLED__
187 #if defined(__CPU_MEMORY_TYPE_REMAPPING) || defined(__MEMMODEL_FLEXIBLE__)
188 #define __MMU_USE_SYMMETRIC_ACCESS_PERMISSIONS
192 #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED) && defined(__MMU_USE_SYMMETRIC_ACCESS_PERMISSIONS)
193 #define ERRATUM_353494_MODE_CHANGE(cc,r) FLUSH_BTB(cc,r)
195 #define ERRATUM_353494_MODE_CHANGE(cc,r)
199 #define __CPU_ARM_USE_DOMAINS
202 #if defined(__ARM_L210_CACHE__) || defined(__ARM_L220_CACHE__)|| defined(__ARM_PL310_CACHE__)
204 Indicates the presense of external cache controller.
207 #define __HAS_EXTERNAL_CACHE__
210 #ifndef __CPU_HAS_MMU
211 #define CPWAIT(cc,r) /**< @internalTechnology */
217 #if defined(__CPU_ARM710T__) || defined(__CPU_ARM720T__)
218 #define FLUSH_DCACHE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c7, 0 "); /**< @internalTechnology */
219 #define FLUSH_ICACHE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c7, 0 "); /**< @internalTechnology */
220 #define FLUSH_IDCACHE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c7, 0 "); /**< @internalTechnology */
221 #define FLUSH_DTLB(cc,r) asm("mcr"#cc" p15, 0, "#r", c8, c7, 0 "); /**< @internalTechnology */
222 #define FLUSH_ITLB(cc,r) asm("mcr"#cc" p15, 0, "#r", c8, c7, 0 "); /**< @internalTechnology */
223 #define FLUSH_IDTLB(cc,r) asm("mcr"#cc" p15, 0, "#r", c8, c7, 0 "); /**< @internalTechnology */
224 #define FLUSH_DTLB_ENTRY(cc,addr) asm("mcr"#cc" p15, 0, "#addr", c8, c7, 1 "); /**< @internalTechnology */
225 #define FLUSH_ITLB_ENTRY(cc,addr) asm("mcr"#cc" p15, 0, "#addr", c8, c7, 1 "); /**< @internalTechnology */
226 #define FLUSH_IDTLB_ENTRY(cc,addr) asm("mcr"#cc" p15, 0, "#addr", c8, c7, 1 "); /**< @internalTechnology */
227 #define DRAIN_WRITE_BUFFER(cc,r,rd) // this seems dodgy on Windermere and it works without it
228 #define CPWAIT(cc,r) /**< @internalTechnology */
230 #elif defined(__CPU_SA1__)
231 #define FLUSH_ICACHE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c5, 0 "); /**< @internalTechnology */
232 #define PURGE_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c6, 1 "); /**< @internalTechnology */
233 #define CLEAN_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c10, 1 "); /**< @internalTechnology */
234 #define FLUSH_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c10, 1 "); asm("mcr"#cc" p15, 0, "#r", c7, c6, 1 ");/**< @internalTechnology */
235 #define FLUSH_DTLB(cc,r) asm("mcr"#cc" p15, 0, "#r", c8, c6, 0 "); /**< @internalTechnology */
236 #define FLUSH_ITLB(cc,r) asm("mcr"#cc" p15, 0, "#r", c8, c5, 0 "); /**< @internalTechnology */
237 #define FLUSH_IDTLB(cc,r) asm("mcr"#cc" p15, 0, "#r", c8, c7, 0 "); /**< @internalTechnology */
238 #define FLUSH_DTLB_ENTRY(cc,addr) asm("mcr"#cc" p15, 0, "#addr", c8, c6, 1 ");/**< @internalTechnology */
239 #define DRAIN_WRITE_BUFFER(cc,r,rd) asm("mcr"#cc" p15, 0, "#r", c7, c10, 4 ");
240 #define CPWAIT(cc,r) /**< @internalTechnology */
242 #elif defined(__CPU_ARM920T__) || defined(__CPU_ARM925T__) || defined(__CPU_ARM926J__)
243 #define FLUSH_ICACHE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c5, 0 "); /**< @internalTechnology */
244 #define FLUSH_ICACHE_LINE(cc,r,tmp) asm("mcr"#cc" p15, 0, "#r", c7, c5, 1 "); /**< @internalTechnology */
245 #define PURGE_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c6, 1 "); /**< @internalTechnology */
246 #define CLEAN_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c10, 1 "); /**< @internalTechnology */
247 #define CLEAN_DCACHE_INDEX(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c10, 2 "); /**< @internalTechnology */
248 #define FLUSH_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c14, 1 "); /**< @internalTechnology */
249 #define FLUSH_DCACHE_INDEX(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c14, 2 "); /**< @internalTechnology */
250 #define FLUSH_DTLB(cc,r) asm("mcr"#cc" p15, 0, "#r", c8, c6, 0 "); /**< @internalTechnology */
251 #define FLUSH_ITLB(cc,r) asm("mcr"#cc" p15, 0, "#r", c8, c5, 0 "); /**< @internalTechnology */
252 #define FLUSH_IDTLB(cc,r) asm("mcr"#cc" p15, 0, "#r", c8, c7, 0 "); /**< @internalTechnology */
253 #define FLUSH_DTLB_ENTRY(cc,addr) asm("mcr"#cc" p15, 0, "#addr", c8, c6, 1 ");/**< @internalTechnology */
254 #define FLUSH_ITLB_ENTRY(cc,addr) asm("mcr"#cc" p15, 0, "#addr", c8, c5, 1 ");/**< @internalTechnology */
255 #define DRAIN_WRITE_BUFFER(cc,r,rd) asm("mcr"#cc" p15, 0, "#r", c7, c10, 4 ");
256 #define CPWAIT(cc,r) /**< @internalTechnology */
257 #define CACHE_MAINTENANCE_PDE_PTE_UPDATED(r) DRAIN_WRITE_BUFFER(,r,r);
259 #elif defined(__CPU_XSCALE__)
260 //#define FLUSH_ICACHE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c5, 0 ");
261 #define FLUSH_ICACHE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c5, 0; sub"#cc" pc, pc, #4 ");/**< @internalTechnology */ // A step hack
262 #define FLUSH_ICACHE_LINE(cc,r,tmp) asm("mcr"#cc" p15, 0, "#r", c7, c5, 1 "); /**< @internalTechnology */
263 #ifdef __CPU_XSCALE_MANZANO__
264 #define PURGE_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c6, 1 "); /**< @internalTechnology */
265 #define CLEAN_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c10, 1 "); /**< @internalTechnology */
266 #define CLEAN_DCACHE_INDEX(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c10, 2 "); /**< @internalTechnology */
267 #define FLUSH_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c14, 1 "); /**< @internalTechnology */
268 #define FLUSH_DCACHE_INDEX(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c14, 2 "); /**< @internalTechnology */
270 #define PURGE_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c6, 1 "); asm("nop "); /**< @internalTechnology */ // PXA250 ERRATUM 96
271 #define CLEAN_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c10, 1 "); asm("nop ");/**< @internalTechnology */ // PXA250 ERRATUM 96
272 #define FLUSH_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c10, 1 "); asm("mcr"#cc" p15, 0, "#r", c7, c6, 1 "); asm("nop "); /**< @internalTechnology */ // PXA250 ERRATUM 96
273 #define ALLOC_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c2, 5 "); /**< @internalTechnology */
275 #define FLUSH_DTLB(cc,r) asm("mcr"#cc" p15, 0, "#r", c8, c6, 0 "); /**< @internalTechnology */
276 #define FLUSH_ITLB(cc,r) asm("mcr"#cc" p15, 0, "#r", c8, c5, 0 "); /**< @internalTechnology */
277 #define FLUSH_IDTLB(cc,r) asm("mcr"#cc" p15, 0, "#r", c8, c7, 0 "); /**< @internalTechnology */
278 #define FLUSH_DTLB_ENTRY(cc,addr) asm("mcr"#cc" p15, 0, "#addr", c8, c6, 1 "); asm("nop "); asm ("nop "); /**< @internalTechnology */ // PXA250 ERRATUM 21
279 #define FLUSH_ITLB_ENTRY(cc,addr) asm("mcr"#cc" p15, 0, "#addr", c8, c5, 1 "); /**< @internalTechnology */
281 #ifdef __CPU_XSCALE_MANZANO__
282 #define DRAIN_WRITE_BUFFER(cc,r,rd) asm("mcr"#cc" p15, 0, "#r", c7, c10, 4 ");
283 #else //__CPU_XSCALE_MANZANO__
285 #define DRAIN_WRITE_BUFFER(cc,r,rd) asm("mcr"#cc" p15, 0, "#r", c7, c10, 4 "); \
286 asm("ldr"#cc" "#rd", [pc] "); \
287 asm("add pc, pc, #0 "); \
288 asm(".word %a0" : : "i" ((TInt)&SuperPageAddress)); \
289 asm("ldr"#cc" "#rd", ["#rd"] "); \
290 asm("ldr"#cc" "#rd", ["#rd", #%a0]" : : "i" _FOFF(TSuperPage,iUncachedAddress)); \
291 asm("ldr"#cc" "#rd", ["#rd"] ");
292 #endif //else __CPU_XSCALE_MANZANO__
293 //#define FLUSH_BTB(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c5, 6 ");
294 #define FLUSH_BTB(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c5, 6; sub"#cc" pc, pc, #4 "); /**< @internalTechnology */ // A step hack
295 #define CPWAIT(cc,r) asm("mrc"#cc" p15, 0, "#r", c2, c0, 0; mov"#cc" "#r","#r"; sub"#cc" pc, pc, #4 "); /**< @internalTechnology */
296 #define GET_CAR(cc,r) asm("mrc"#cc" p15, 0, "#r", c15, c1, 0 "); /**< @internalTechnology */
297 #define SET_CAR(cc,r) asm("mcr"#cc" p15, 0, "#r", c15, c1, 0 "); /**< @internalTechnology */
299 #elif defined(__CPU_ARMV6) // end of elif __CPU_XSCALE
301 #if !defined(__CPU_ARM1136_ERRATUM_411920_FIXED) && (defined(__CPU_ARM1136__) || defined(__CPU_ARM1176__))
302 /** @internalTechnology */
303 #define FLUSH_ICACHE(cc,r,rt) asm("mrs "#rt", cpsr"); \
305 asm("mcr"#cc" p15, 0, "#r", c7, c5, 0 "); \
306 asm("mcr"#cc" p15, 0, "#r", c7, c5, 0 "); \
307 asm("mcr"#cc" p15, 0, "#r", c7, c5, 0 "); \
308 asm("mcr"#cc" p15, 0, "#r", c7, c5, 0 "); \
309 asm("msr cpsr_c, "#rt); \
323 #define FLUSH_ICACHE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c5, 0 "); /**< @internalTechnology */
324 #endif // else !(__CPU_ARM1136_ERRATUM_411920_FIXED) && (__CPU_ARM1136__ || __CPU_ARM1176__)
325 #if defined(__CPU_ARM1136_ERRATUM_371025_FIXED) || !defined(__CPU_ARM1136__)
327 #if !defined(__CPU_ARM1176_ERRATUM_720013_FIXED) && defined(__CPU_ARM1176__)
328 #define FLUSH_ICACHE_LINE(cc,r,tmp) asm("mcr"#cc" p15, 0, "#r", c7, c5, 1 "); \
329 asm("mcr"#cc" p15, 0, "#r", c7, c5, 1 "); /**< @internalTechnology */
331 #define FLUSH_ICACHE_LINE(cc,r,tmp) asm("mcr"#cc" p15, 0, "#r", c7, c5, 1 "); /**< @internalTechnology */
332 #endif // !defined(__CPU_ARM1176_ERRATUM_720013_FIXED) && defined(__CPU_ARM1176__)
334 #else // workaround for erratum 371025 of 1136...
335 /** @internalTechnology */
336 #define FLUSH_ICACHE_LINE(cc,r,tmp) asm("orr"#cc" "#tmp", "#r", #0xC0000000 "); \
337 asm("bic"#cc" "#tmp", "#tmp", #1 "); \
338 asm("mcr"#cc" p15, 0, "#tmp", c7, c5, 2 "); \
339 asm("sub"#cc" "#tmp", "#tmp", #0x40000000 "); \
340 asm("mcr"#cc" p15, 0, "#tmp", c7, c5, 2 "); \
341 asm("sub"#cc" "#tmp", "#tmp", #0x40000000 "); \
342 asm("mcr"#cc" p15, 0, "#tmp", c7, c5, 2 "); \
343 asm("sub"#cc" "#tmp", "#tmp", #0x40000000 "); \
344 asm("mcr"#cc" p15, 0, "#tmp", c7, c5, 2 ");
345 #endif //else (__CPU_ARM1136_ERRATUM_371025_FIXED) || !(__CPU_ARM1136__)
347 #if !defined(__CPU_ARM1176_ERRATUM_720013_FIXED) && defined(__CPU_ARM1176__)
348 // It is commented out to ensure it is not used on 1176 cores with 720013 erratum
349 // #define FLUSH_ICACHE_INDEX(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c5, 2 ");
351 #define FLUSH_ICACHE_INDEX(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c5, 2 "); /**< @internalTechnology */
352 #endif //!defined(__CPU_ARM1176_ERRATUM_720013_FIXED) && defined(__CPU_ARM1176__)
353 #define PURGE_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c6, 1 "); /**< @internalTechnology */
354 #define PURGE_DCACHE_INDEX(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c6, 2 "); /**< @internalTechnology */
355 #define CLEAN_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c10, 1 "); /**< @internalTechnology */
357 #define CLEAN_DCACHE_INDEX(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c10, 2 "); /**< @internalTechnology */
358 #define FLUSH_DCACHE_LINE(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c14, 1 "); /**< @internalTechnology */
359 #define FLUSH_DCACHE_INDEX(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c14, 2 "); /**< @internalTechnology */
360 #define FLUSH_ITLB(cc,r) asm("mcr"#cc" p15, 0, "#r", c8, c5, 0 "); /**< @internalTechnology */
361 #define FLUSH_DTLB(cc,r) asm("mcr"#cc" p15, 0, "#r", c8, c6, 0 "); /**< @internalTechnology */
362 #define FLUSH_IDTLB(cc,r) asm("mcr"#cc" p15, 0, "#r", c8, c7, 0 "); /**< @internalTechnology */
365 // addr must include ASID
366 #if defined (__CPU_ARM11MP__)
367 #define FLUSH_ITLB_ENTRY(cc,addr) asm("mcr"#cc" p15, 0, "#addr", c8, c5, 3 "); /**< @internalTechnology */
368 #define FLUSH_DTLB_ENTRY(cc,addr) asm("mcr"#cc" p15, 0, "#addr", c8, c6, 3 "); /**< @internalTechnology */
369 #else //(__CPU_ARM11MP__)
370 #define FLUSH_ITLB_ENTRY(cc,addr) asm("mcr"#cc" p15, 0, "#addr", c8, c5, 1 "); /**< @internalTechnology */
371 #define FLUSH_DTLB_ENTRY(cc,addr) asm("mcr"#cc" p15, 0, "#addr", c8, c6, 1 "); /**< @internalTechnology */
372 #endif // else (__CPU_ARM11MP__)
373 #define FLUSH_ITLB_ASID(cc,asid) asm("mcr"#cc" p15, 0, "#asid", c8, c5, 2 "); /**< @internalTechnology */
374 #define FLUSH_DTLB_ASID(cc,asid) asm("mcr"#cc" p15, 0, "#asid", c8, c6, 2 "); /**< @internalTechnology */
376 #define DRAIN_WRITE_BUFFER(cc,r,rd) asm("mcr"#cc" p15, 0, "#r", c7, c10, 4 ");
377 #define DATA_MEMORY_BARRIER(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c10, 5 ");
378 #define FLUSH_PREFETCH_BUFFER(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c5, 4 "); /**< @internalTechnology */
379 #define FLUSH_BTB(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c5, 6 "); /**< @internalTechnology */
380 #define CPWAIT(cc,r) /**< @internalTechnology */ // not sure about this
381 #define GET_CAR(cc,r) asm("mrc"#cc" p15, 0, "#r", c1, c0, 2 "); /**< @internalTechnology */
382 #define SET_CAR(cc,r) asm("mcr"#cc" p15, 0, "#r", c1, c0, 2 "); /**< @internalTechnology */
384 #if defined(__CPU_PAGE_TABLES_FULLY_CACHED)
385 #define CACHE_MAINTENANCE_PDE_PTE_UPDATED(r) CLEAN_DCACHE_LINE(,r);\
386 DRAIN_WRITE_BUFFER(,r,r);
388 #define CACHE_MAINTENANCE_PDE_PTE_UPDATED(r) DRAIN_WRITE_BUFFER(,r,r);
389 #endif //end of __CPU_PAGE_TABLES_FULLY_CACHED
391 #elif defined(__CPU_ARMV7) // end of elif (__CPU_ARMV6)
393 // Define new-style cache/TLB maintenance instructions
394 #if defined(__CPU_CORTEX_A9__) && !defined(__CPU_ARM_A9_ERRATUM_571618_FIXED)
395 // ARM Cortex-A9 MPCore erratum 571618 workaround
396 // Execute memory barrier before interruptible CP15 operations
397 #define ICIALLU asm("mcr p15, 0, r0, c7, c10, 5 "); \
398 asm("mcr p15, 0, r0, c7, c5, 0 "); /**< @internalTechnology */
400 #define ICIALLU asm("mcr p15, 0, r0, c7, c5, 0 "); /**< @internalTechnology */
401 #endif // end of else (__CPU_CORTEX_A9__) && !(__CPU_ARM_A9_ERRATUM_571618_FIXED)
402 #define ICIMVAU(r) asm("mcr p15, 0, "#r", c7, c5, 1 "); /**< @internalTechnology */
403 #define BPIALL asm("mcr p15, 0, r0, c7, c5, 6 "); /**< @internalTechnology */
404 #define BPIMVA(r) asm("mcr p15, 0, "#r", c7, c5, 7 "); /**< @internalTechnology */
405 #define DCIMVAC(r) asm("mcr p15, 0, "#r", c7, c6, 1 "); /**< @internalTechnology */
406 #define DCISW(r) asm("mcr p15, 0, "#r", c7, c6, 2 "); /**< @internalTechnology */
407 #define DCCMVAC(r) asm("mcr p15, 0, "#r", c7, c10, 1 "); /**< @internalTechnology */
408 #define DCCSW(r) asm("mcr p15, 0, "#r", c7, c10, 2 "); /**< @internalTechnology */
409 #define DCCMVAU(r) asm("mcr p15, 0, "#r", c7, c11, 1 "); /**< @internalTechnology */
410 #define DCCIMVAC(r) asm("mcr p15, 0, "#r", c7, c14, 1 "); /**< @internalTechnology */
411 #define DCCISW(r) asm("mcr p15, 0, "#r", c7, c14, 2 "); /**< @internalTechnology */
414 #if defined(__CPU_CORTEX_A9__) && !defined(__CPU_ARM_A9_ERRATUM_571618_FIXED)
415 // ARM Cortex-A9 MPCore erratum 571618 workaround
416 // Execute memory barrier before interruptible CP15 operations
417 #define ICIALLUIS asm("mcr p15, 0, r0, c7, c10, 5 "); \
418 asm("mcr p15, 0, r0, c7, c1, 0 "); /**< @internalTechnology */
420 #define ICIALLUIS asm("mcr p15, 0, r0, c7, c1, 0 "); /**< @internalTechnology */
421 #endif //end of else (__CPU_CORTEX_A9__) && !(__CPU_ARM_A9_ERRATUM_571618_FIXED)
422 #define BPIALLIS asm("mcr p15, 0, r0, c7, c1, 6 "); /**< @internalTechnology */
423 #endif // end of __SMP__
425 #ifdef __CPU_SPLIT_TLB
426 #define ITLBIALL asm("mcr p15, 0, r0, c8, c5, 0 "); /**< @internalTechnology */
427 #define ITLBIMVA(r) asm("mcr p15, 0, "#r", c8, c5, 1 "); /**< @internalTechnology */
428 #define ITLBIASID(r) asm("mcr p15, 0, "#r", c8, c5, 2 "); /**< @internalTechnology */
429 #define DTLBIALL asm("mcr p15, 0, r0, c8, c6, 0 "); /**< @internalTechnology */
430 #define DTLBIMVA(r) asm("mcr p15, 0, "#r", c8, c6, 1 "); /**< @internalTechnology */
431 #define DTLBIASID(r) asm("mcr p15, 0, "#r", c8, c6, 2 "); /**< @internalTechnology */
433 #define UTLBIALL asm("mcr p15, 0, r0, c8, c7, 0 "); /**< @internalTechnology */
434 #define UTLBIMVA(r) asm("mcr p15, 0, "#r", c8, c7, 1 "); /**< @internalTechnology */
435 #if defined(__CPU_CORTEX_A9__) && !defined(__CPU_ARM_A9_ERRATUM_571618_FIXED)
436 // ARM Cortex-A9 MPCore erratum 571618 workaround
437 // Execute memory barrier before interruptible CP15 operations
438 #define UTLBIASID(r) asm("mcr p15, 0, r0, c7, c10, 5 "); \
439 asm("mcr p15, 0, "#r", c8, c7, 2 "); /**< @internalTechnology */
441 #define UTLBIASID(r) asm("mcr p15, 0, "#r", c8, c7, 2 "); /**< @internalTechnology */
442 #endif // end of else (__CPU_CORTEX_A9__) && !(__CPU_ARM_A9_ERRATUM_571618_FIXED)
444 #ifdef __CPU_SUPPORTS_TLBIMVAA
445 #ifdef __CPU_SPLIT_TLB
446 #define ITLBIMVAA(r) asm("mcr p15, 0, "#r", c8, c5, 3 "); /**< @internalTechnology */
447 #define DTLBIMVAA(r) asm("mcr p15, 0, "#r", c8, c6, 3 "); /**< @internalTechnology */
448 #endif // end of __CPU_SPLIT_TLB
449 #define UTLBIMVAA(r) asm("mcr p15, 0, "#r", c8, c7, 3 "); /**< @internalTechnology */
450 #endif // end of __CPU_SUPPORTS_TLBIMVAA
453 #ifdef __CPU_SPLIT_TLB
454 #define ITLBIALLIS asm("mcr p15, 0, r0, c8, c3, 0 "); /**< @internalTechnology */
455 #define ITLBIMVAIS(r) asm("mcr p15, 0, "#r", c8, c3, 1 "); /**< @internalTechnology */
456 #if defined(__CPU_CORTEX_A9__) && !defined(__CPU_ARM_A9_ERRATUM_571618_FIXED)
457 // ARM Cortex-A9 MPCore erratum 571618 workaround
458 // Execute memory barrier before interruptible CP15 operations
459 #define ITLBIASIDIS(r) asm("mcr p15, 0, r0, c7, c10, 5 "); \
460 asm("mcr p15, 0, "#r", c8, c3, 2 "); /**< @internalTechnology */
462 #define ITLBIASIDIS(r) asm("mcr p15, 0, "#r", c8, c3, 2 "); /**< @internalTechnology */
463 #endif // end of else (__CPU_CORTEX_A9__) && !(__CPU_ARM_A9_ERRATUM_571618_FIXED)
464 #define DTLBIALLIS asm("mcr p15, 0, r0, c8, c3, 0 "); /**< @internalTechnology */
465 #define DTLBIMVAIS(r) asm("mcr p15, 0, "#r", c8, c3, 1 "); /**< @internalTechnology */
466 #if defined(__CPU_CORTEX_A9__) && !defined(__CPU_ARM_A9_ERRATUM_571618_FIXED)
467 // ARM Cortex-A9 MPCore erratum 571618 workaround
468 // Execute memory barrier before interruptible CP15 operations
469 #define DTLBIASIDIS(r) asm("mcr p15, 0, r0, c7, c10, 5 "); \
470 asm("mcr p15, 0, "#r", c8, c3, 2 "); /**< @internalTechnology */
472 #define DTLBIASIDIS(r) asm("mcr p15, 0, "#r", c8, c3, 2 "); /**< @internalTechnology */
473 #endif // end of else (__CPU_CORTEX_A9__) && !(__CPU_ARM_A9_ERRATUM_571618_FIXED)
474 #endif // end of __CPU_SPLIT_TLB
475 #define UTLBIALLIS asm("mcr p15, 0, r0, c8, c3, 0 "); /**< @internalTechnology */
476 #define UTLBIMVAIS(r) asm("mcr p15, 0, "#r", c8, c3, 1 "); /**< @internalTechnology */
477 #if defined(__CPU_CORTEX_A9__) && !defined(__CPU_ARM_A9_ERRATUM_571618_FIXED)
478 // ARM Cortex-A9 MPCore erratum 571618 workaround
479 // Execute memory barrier before interruptible CP15 operations
480 #define UTLBIASIDIS(r) asm("mcr p15, 0, r0, c7, c10, 5 "); \
481 asm("mcr p15, 0, "#r", c8, c3, 2 "); /**< @internalTechnology */
483 #define UTLBIASIDIS(r) asm("mcr p15, 0, "#r", c8, c3, 2 "); /**< @internalTechnology */
484 #endif // end of else (__CPU_CORTEX_A9__) && !(__CPU_ARM_A9_ERRATUM_571618_FIXED)
486 #ifdef __CPU_SUPPORTS_TLBIMVAA
487 #ifdef __CPU_SPLIT_TLB
488 #define ITLBIMVAAIS(r) asm("mcr p15, 0, "#r", c8, c3, 3 "); /**< @internalTechnology */
489 #define DTLBIMVAAIS(r) asm("mcr p15, 0, "#r", c8, c3, 3 "); /**< @internalTechnology */
490 #endif // end of __CPU_SPLIT_TLB
491 #define UTLBIMVAAIS(r) asm("mcr p15, 0, "#r", c8, c3, 3 "); /**< @internalTechnology */
492 #endif // end of __CPU_SUPPORTS_TLBIMVAA
493 #endif // end of __SMP__
496 #define DRAIN_WRITE_BUFFER(cc,r,rd) __DATA_SYNC_BARRIER__(r)
497 #define DATA_MEMORY_BARRIER(cc,r) __DATA_MEMORY_BARRIER__(r)
498 #define FLUSH_PREFETCH_BUFFER(cc,r) __INST_SYNC_BARRIER__(r) /**< @internalTechnology */
499 //#define FLUSH_BTB(cc,r) asm("mcr"#cc" p15, 0, "#r", c7, c5, 6 "); /**< @internalTechnology */
501 #define CPWAIT(cc,r) /**< @internalTechnology */ // not sure about this
502 #define GET_CAR(cc,r) asm("mrc"#cc" p15, 0, "#r", c1, c0, 2 "); /**< @internalTechnology */
503 #define SET_CAR(cc,r) asm("mcr"#cc" p15, 0, "#r", c1, c0, 2 "); \
504 __INST_SYNC_BARRIER__(r) /**< @internalTechnology */
506 #if !defined(__CPU_SUPPORTS_PAGE_TABLE_WALK_TO_L1_CACHE) && defined(__CPU_PAGE_TABLES_FULLY_CACHED)
507 #define CACHE_MAINTENANCE_PDE_PTE_UPDATED(r) DCCMVAU(r); \
508 __DATA_SYNC_BARRIER__(r);
510 #define CACHE_MAINTENANCE_PDE_PTE_UPDATED(r) __DATA_SYNC_BARRIER__(r);
511 #endif // end of !(__CPU_SUPPORTS_PAGE_TABLE_WALK_TO_L1_CACHE) && (__CPU_PAGE_TABLES_FULLY_CACHED)
513 #endif // end of of elif (__CPU_ARMV7)
517 CPU_ARM1136_ERRATUM_317041: Bits [4:3] of Translation Table Base address registers (TTBR0, TTBR1)
518 do not read back correctly, but instead always return 0.
522 #if defined(__CPU_ARM1136__) && defined(__HAS_EXTERNAL_CACHE__) && !defined(__CPU_ARM1136_ERRATUM_317041_FIXED)
523 #define UPDATE_PW_CACHING_ATTRIBUTES(cc,r) asm("orr"#cc" "#r", "#r", #0x18")
525 #define UPDATE_PW_CACHING_ATTRIBUTES(cc,r)
528 // Instruction macros
530 #if defined(__CPU_ARMV6) || defined(__CPU_ARMV7)
531 #define SRSgen(P,U,W,mode) asm(".word %a0" : : "i" ((TInt)(0xf84d0500|(P<<24)|(U<<23)|(W<<21)|(mode))));
532 #define SRSIA(mode) SRSgen(0,1,0,mode)
533 #define SRSIAW(mode) SRSgen(0,1,1,mode)
534 #define SRSDB(mode) SRSgen(1,0,0,mode)
535 #define SRSDBW(mode) SRSgen(1,0,1,mode)
536 #define SRSIB(mode) SRSgen(1,1,0,mode)
537 #define SRSIBW(mode) SRSgen(1,1,1,mode)
538 #define SRSDA(mode) SRSgen(0,0,0,mode)
539 #define SRSDAW(mode) SRSgen(0,0,1,mode)
540 #define RFEgen(P,U,W,base) asm(".word %a0" : : "i" ((TInt)(0xf8100a00|(P<<24)|(U<<23)|(W<<21)|(base<<16))));
541 #define RFEIA(base) RFEgen(0,1,0,base)
542 #define RFEIAW(base) RFEgen(0,1,1,base)
543 #define RFEDB(base) RFEgen(1,0,0,base)
544 #define RFEDBW(base) RFEgen(1,0,1,base)
545 #define RFEIB(base) RFEgen(1,1,0,base)
546 #define RFEIBW(base) RFEgen(1,1,1,base)
547 #define RFEDA(base) RFEgen(0,0,0,base)
548 #define RFEDAW(base) RFEgen(0,0,1,base)
549 #elif defined(__CPU_XSCALE__) // end of (__CPU_ARMV6) || (__CPU_ARMV7)
550 #define MAR(acc,RdLo,RdHi) MCRR(0,0,RdLo,RdHi,acc)
551 #define MARcc(cc,acc,RdLo,RdHi) MCRR(cc,0,0,RdLo,RdHi,acc)
552 #define MRA(acc,RdLo,RdHi) MRRC(0,0,RdLo,RdHi,acc)
553 #define MRAcc(cc,acc,RdLo,RdHi) MRRC(cc,0,0,RdLo,RdHi,acc)
554 #define MIAgen(cc,acc,Rm,Rs,opc3) asm(".word %a0" : : "i" ((TInt)0x0e200010|((cc)<<28)|((opc3)<<16)|((Rs)<<12)|((acc)<<5)|(Rm)));
555 #define MIA(acc,Rm,Rs) MIAgen(CC_AL,acc,Rm,Rs,0)
556 #define MIAPH(acc,Rm,Rs) MIAgen(CC_AL,acc,Rm,Rs,8)
557 #define MIABB(acc,Rm,Rs) MIAgen(CC_AL,acc,Rm,Rs,12)
558 #define MIATB(acc,Rm,Rs) MIAgen(CC_AL,acc,Rm,Rs,13)
559 #define MIABT(acc,Rm,Rs) MIAgen(CC_AL,acc,Rm,Rs,14)
560 #define MIATT(acc,Rm,Rs) MIAgen(CC_AL,acc,Rm,Rs,15)
561 #define MIAcc(cc,acc,Rm,Rs) MIAgen(cc,acc,Rm,Rs,0)
562 #define MIAPHcc(cc,acc,Rm,Rs) MIAgen(cc,acc,Rm,Rs,8)
563 #define MIABBcc(cc,acc,Rm,Rs) MIAgen(cc,acc,Rm,Rs,12)
564 #define MIATBcc(cc,acc,Rm,Rs) MIAgen(cc,acc,Rm,Rs,13)
565 #define MIABTcc(cc,acc,Rm,Rs) MIAgen(cc,acc,Rm,Rs,14)
566 #define MIATTcc(cc,acc,Rm,Rs) MIAgen(cc,acc,Rm,Rs,15)
567 #endif // end of elif (__CPU_XSCALE__)
569 #ifdef __CPU_ARM_HAS_CPS
570 #define CPSgen(im,mm,f,mode) asm(".word %a0" : : "i" ((TInt)(0xf1000000|((im)<<18)|((mm)<<17)|((f)<<6)|(mode))))
571 #if __ARM_ASSEMBLER_ISA__ >= 6
572 #define CPSIDAIF asm("cpsidaif ")
573 #define CPSIDAI asm("cpsidai ")
574 #define CPSIDIF asm("cpsidif ")
575 #define CPSIDI asm("cpsidi ")
576 #define CPSIDF asm("cpsidf ")
577 #define CPSIEAIF asm("cpsieaif ")
578 #define CPSIEI asm("cpsiei ")
579 #define CPSIEF asm("cpsief ")
580 #define CPSIEIF asm("cpsieif ")
582 #define CPSIDAIF CPSgen(3,0,7,0) // disable all interrupts, leave mode alone
583 #define CPSIDAI CPSgen(3,0,6,0) // disable IRQs, leave mode alone
584 #define CPSIDIF CPSgen(3,0,3,0) // disable IRQs and FIQs, leave mode alone
585 #define CPSIDI CPSgen(3,0,2,0) // disable IRQs, leave mode alone
586 #define CPSIDF CPSgen(3,0,1,0) // disable FIQs, leave mode alone
587 #define CPSIEAIF CPSgen(2,0,7,0) // enable all interrupts, leave mode alone
588 #define CPSIEI CPSgen(2,0,2,0) // enable IRQs, leave mode alone
589 #define CPSIEF CPSgen(2,0,1,0) // enable FIQs, leave mode alone
590 #define CPSIEIF CPSgen(2,0,3,0) // enable IRQs and FIQs, leave mode alone
591 #endif // end of __ARM_ASSEMBLER_ISA__ >= 6
592 #define CPSIDAIFM(mode) CPSgen(3,1,7,mode) // disable all interrupts and change mode
593 #define CPSIDIFM(mode) CPSgen(3,1,3,mode) // disable all interrupts and change mode
594 #define CPSIDAIM(mode) CPSgen(3,1,6,mode) // disable IRQs and change mode
595 #define CPSIDIM(mode) CPSgen(3,1,2,mode) // disable IRQs and change mode
596 #define CPSIDFM(mode) CPSgen(3,1,1,mode) // disable FIQs and change mode
597 #define CPSIEAIFM(mode) CPSgen(2,1,7,mode) // enable all interrupts and change mode
598 #define CPSIEIM(mode) CPSgen(2,1,2,mode) // enable IRQs and change mode
599 #define CPSIEFM(mode) CPSgen(2,1,1,mode) // enable FIQs and change mode
600 #define CPSIEIFM(mode) CPSgen(2,1,3,mode) // enable IRQs and FIQs, and change mode
601 #define CPSCHM(mode) CPSgen(0,1,0,mode) // change mode, leave interrupt masks alone
602 #endif // end of __CPU_ARM_HAS_CPS
605 #define MODE_USR 0x10
606 #define MODE_FIQ 0x11
607 #define MODE_IRQ 0x12
608 #define MODE_SVC 0x13
609 #define MODE_ABT 0x17
610 #define MODE_UND 0x1b
611 #define MODE_SYS 0x1f
613 // Macros for changing processor made and interrupt status
615 // Two instructions are necessary prior to ARMv6, and these may be interleaved.
617 // SET_MODE - sets mode and intrrupts status
618 // SET_INTS - sets interrupts status (requires knowing the current mode at compile time)
619 // INTS_ON - enables interrupts (requires the cpsr value be available at run time)
620 // INTS_OFF - disables interrupts (requires the cpsr value be available at run time)
622 #ifdef __CPU_ARM_HAS_CPS
624 #define INTS_ALL_OFF IDIF
625 #define INTS_IRQ_OFF IDI
626 #define INTS_FIQ_ON IEF
627 #define INTS_ALL_ON IEIF
629 #define CONCAT2(a,b) a##b
630 #define CONCAT3(a,b,c) a##b##c
632 #define SET_MODE_1(rd, newMode, newInts)
633 #define SET_MODE_2(rd, newMode, newInts) CONCAT3(CPS, newInts, M)(newMode)
635 #define SET_INTS_1(rd, currentMode, newInts)
636 #define SET_INTS_2(rd, currentMode, newInts) CONCAT2(CPS, newInts)
638 #define INTS_ON_1(rd, rCpsr, newInts)
639 #define INTS_ON_2(rd, rCpsr, newInts) CONCAT2(CPS, newInts)
641 #define INTS_OFF_1(rd, rCpsr, newInts)
642 #define INTS_OFF_2(rd, rCpsr, newInts) CONCAT2(CPS, newInts)
644 #else // __CPU_ARM_HAS_CPS
646 #define INTS_ALL_OFF 0xc0
647 #define INTS_IRQ_OFF 0x80
648 #define INTS_FIQ_ON 0x80
649 #define INTS_ALL_ON 0x00
651 #define SET_MODE_1(rd, newMode, newInts) asm("mov "#rd", #%a0" : : "i" (newMode | newInts))
652 #define SET_MODE_2(rd, newMode, newInts) asm("msr cpsr_c, "#rd)
654 #define SET_INTS_1(rd, currentMode, newInts) SET_MODE_1(rd, currentMode, newInts)
655 #define SET_INTS_2(rd, currentMode, newInts) SET_MODE_2(rd, currentMode, newInts)
657 #define INTS_ON_1(rd, rCpsr, newInts) asm("bic "#rd", "#rCpsr", #%a0" : : "i" (newInts ^ 0xc0))
658 #define INTS_ON_2(rd, rCpsr, newInts) asm("msr cpsr_c, "#rd)
660 #define INTS_OFF_1(rd, rCpsr, newInts) asm("orr "#rd", "#rCpsr", #%a0" : : "i" (newInts))
661 #define INTS_OFF_2(rd, rCpsr, newInts) asm("msr cpsr_c, "#rd)
663 #endif // end of __CPU_ARM_HAS_CPS
665 #define SET_MODE(rd, newMode, newInts) SET_MODE_1(rd, newMode, newInts); SET_MODE_2(rd, newMode, newInts)
666 #define SET_INTS(rd, currentMode, newInts) SET_INTS_1(rd, currentMode, newInts); SET_INTS_2(rd, currentMode, newInts)
667 #define INTS_ON(rd, rCpsr, newInts) INTS_ON_1(rd, rCpsr, newInts); INTS_ON_2(rd, rCpsr, newInts)
668 #define INTS_OFF(rd, rCpsr, newInts) INTS_OFF_1(rd, rCpsr, newInts); INTS_OFF_2(rd, rCpsr, newInts)
672 #if defined(__SMP__) && !defined(__CPU_ARM_HAS_LDREX_STREX_V6K)
673 #error SMP not allowed without v6K
675 #if defined(__SMP__) && !defined(__CPU_HAS_CP15_THREAD_ID_REG)
676 #error SMP not allowed without thread ID registers
679 #endif // end of __CPU_ARM
681 #if defined(__CPU_X86) && defined(__EPOC32__)
682 #define __CPU_HAS_MMU
683 #define __CPU_HAS_CACHE
684 #define __CPU_SUPPORTS_FAST_PROCESS_SWITCH
686 // Page/directory tables are cached on X86.
687 #define __CPU_PAGE_TABLES_FULLY_CACHED
689 #if defined(__VC32__)
690 #define X86_PAUSE _asm rep nop
691 #define __chill() do { _asm rep nop } while(0)
692 #elif defined(__GCC32__)
693 #define X86_PAUSE __asm__ __volatile__("pause ");
694 #define __chill() __asm__ __volatile__("pause ")
696 #error Unknown x86 compiler
699 #if defined(__cplusplus)
702 #if defined(__VC32__)
703 extern int _inp(unsigned short); // input byte (compiler intrinsic)
704 extern unsigned short _inpw(unsigned short); // input word (compiler intrinsic)
705 extern unsigned long _inpd(unsigned short); // input dword (compiler intrinsic)
706 extern int _outp(unsigned short, int); // output byte (compiler intrinsic)
707 extern unsigned short _outpw(unsigned short, unsigned short); // output word (compiler intrinsic)
708 extern unsigned long _outpd(unsigned short, unsigned long); // output dword (compiler intrinsic)
710 #pragma intrinsic(_inp, _inpw, _inpd, _outp, _outpw, _outpd)
712 #define x86_in8(port) ((TUint8)_inp(port))
713 #define x86_in16(port) ((TUint16)_inpw(port))
714 #define x86_in32(port) ((TUint32)_inpd(port))
715 #define x86_out8(port,data) ((void)_outp((port),(TUint8)(data)))
716 #define x86_out16(port,data) ((void)_outpw((port),(TUint16)(data)))
717 #define x86_out32(port,data) ((void)_outpd((port),(TUint32)(data)))
719 #elif defined(__GCC32__) // end of (__VC32__)
720 inline TUint8 _inpb(TUint16 port)
723 __asm__ __volatile__("in al, dx" : "=a" (ret) : "d" (port));
727 inline TUint16 _inpw(TUint16 port)
730 __asm__ __volatile__("in ax, dx" : "=a" (ret) : "d" (port));
734 inline TUint32 _inpd(TUint16 port)
737 __asm__ __volatile__("in eax, dx" : "=a" (ret) : "d" (port));
741 inline void _outpb(TUint16 port, TUint8 data)
743 __asm__ __volatile__("out dx, al" : : "d" (port), "a" (data));
746 inline void _outpw(TUint16 port, TUint16 data)
748 __asm__ __volatile__("out dx, ax" : : "d" (port), "a" (data));
751 inline void _outpd(TUint16 port, TUint32 data)
753 __asm__ __volatile__("out dx, eax" : : "d" (port), "a" (data));
756 #define x86_in8(port) (_inpb(port))
757 #define x86_in16(port) (_inpw(port))
758 #define x86_in32(port) (_inpd(port))
759 #define x86_out8(port,data) (_outpb((port),(TUint8)(data)))
760 #define x86_out16(port,data) (_outpw((port),(TUint16)(data)))
761 #define x86_out32(port,data) (_outpd((port),(TUint32)(data)))
763 #else // end of elif (__GCC32__)
764 #error Unknown x86 compiler
766 #if defined(__cplusplus)
768 #endif // end of (__VC32__) elif __GCC32__ else
770 #endif //__CPU_X86 && __EPOC32__
773 #undef __USER_MEMORY_GUARDS_ENABLED__
774 #if defined(_DEBUG) && !defined(__KERNEL_APIS_DISABLE_USER_MEMORY_GUARDS__)
775 #if defined(__MEMMODEL_MULTIPLE__) || defined(__MEMMODEL_FLEXIBLE__)
776 #if defined(__CPU_ARM)
777 #define __USER_MEMORY_GUARDS_ENABLED__
780 #endif // end of (_DEBUG) && !(__KERNEL_APIS_DISABLE_USER_MEMORY_GUARDS__)
782 #ifndef __USER_MEMORY_GUARDS_ENABLED__
784 #define USER_MEMORY_GUARD_SAVE_WORDS 0
785 #define USER_MEMORY_DOMAIN 0
787 #define USER_MEMORY_GUARD_SAVE(save)
788 #define USER_MEMORY_GUARD_RESTORE(save,temp)
789 #define USER_MEMORY_GUARD_ON(cc,save,temp)
790 #define USER_MEMORY_GUARD_OFF(cc,save,temp)
791 #define USER_MEMORY_GUARD_ON_IF_MODE_USR(temp)
792 #define USER_MEMORY_GUARD_OFF_IF_MODE_USR(temp)
793 #define USER_MEMORY_GUARD_ASSERT_ON(temp)
794 #define USER_MEMORY_GUARD_ASSERT_OFF_IF_MODE_USR(psr)
796 #else // __USER_MEMORY_GUARDS_ENABLED__
798 #define USER_MEMORY_GUARD_SAVE_WORDS 2
799 #define USER_MEMORY_DOMAIN 15
800 #define USER_MEMORY_DOMAIN_MASK (3 << (2*USER_MEMORY_DOMAIN))
801 #define USER_MEMORY_DOMAIN_CLIENT (1 << (2*USER_MEMORY_DOMAIN))
803 // Save the DACR in the named register
804 #define USER_MEMORY_GUARD_SAVE(save) \
805 asm("mrc p15, 0, "#save", c3, c0, 0"); /* save<-DACR */
807 // Restore access to domain 15 (user pages) to the state previously saved
808 // In this case, 'save' may not be the same register as 'temp'
809 #define USER_MEMORY_GUARD_RESTORE(save,temp) \
810 asm("mrc p15, 0, "#temp", c3, c0, 0"); /* temp<-DACR */ \
811 asm("bic "#temp", "#temp", #%a0" : : "i" USER_MEMORY_DOMAIN_MASK); \
812 asm("and "#save", "#save", #%a0" : : "i" USER_MEMORY_DOMAIN_MASK); \
813 asm("orr "#temp", "#temp", "#save ); \
814 asm("mcr p15, 0, "#temp", c3, c0, 0"); /* DACR<-temp */ \
815 __INST_SYNC_BARRIER__(temp)
817 // Disable access to domain 15 (user pages)
818 // 'save' may be the same register as 'temp', but in that case the use as
819 // a temporary takes precedence and the value left in 'save' is undefined
820 #define USER_MEMORY_GUARD_ON(cc,save,temp) \
821 asm("mrc"#cc" p15, 0, "#save", c3, c0, 0"); /* save<-DACR */ \
822 asm("bic"#cc" "#temp", "#save", #%a0" : : "i" USER_MEMORY_DOMAIN_MASK); \
823 asm("mcr"#cc" p15, 0, "#temp", c3, c0, 0"); /* DACR<-temp */ \
824 __INST_SYNC_BARRIER__(temp)
826 // Enable access to domain 15 (user pages) as a client
827 // 'save' may be the same register as 'temp', but in that case the use as
828 // a temporary takes precedence and the value left in 'save' is undefined
829 #define USER_MEMORY_GUARD_OFF(cc,save,temp) \
830 asm("mrc"#cc" p15, 0, "#save", c3, c0, 0"); /* save<-DACR */ \
831 asm("orr"#cc" "#temp", "#save", #%a0" : : "i" USER_MEMORY_DOMAIN_CLIENT); \
832 asm("mcr"#cc" p15, 0, "#temp", c3, c0, 0"); /* DACR<-temp */ \
833 __INST_SYNC_BARRIER__(temp)
835 // Disable access to domain 15 (user pages) if SPSR indicates mode_usr
836 // The specified 'temp' register is left with an undefined value
837 #define USER_MEMORY_GUARD_ON_IF_MODE_USR(temp) \
838 asm("mrs "#temp", spsr"); \
839 asm("tst "#temp", #0x0f"); \
840 USER_MEMORY_GUARD_ON(eq,temp,temp)
842 // Enable access to domain 15 (user pages) if SPSR indicates mode_usr
843 // The specified 'temp' register is left with an undefined value
844 #define USER_MEMORY_GUARD_OFF_IF_MODE_USR(temp) \
845 asm("mrs "#temp", spsr"); \
846 asm("tst "#temp", #0x0f"); \
847 USER_MEMORY_GUARD_OFF(eq,temp,temp)
849 // Assert that access to domain 15 (user pages) is disabled
850 #define USER_MEMORY_GUARD_ASSERT_ON(temp) \
851 asm("mrc p15, 0, "#temp", c3, c0, 0"); /* temp<-DACR */ \
852 asm("tst "#temp", #%a0" : : "i" USER_MEMORY_DOMAIN_MASK); \
853 asm("cdpne p15, 0, c0, c0, c0, 0"); /* fault if nonzero */
855 // Assert that access to domain 15 (user pages) is enabled if the value
856 // in 'psr' says we came from/are going back to user mode
857 #define USER_MEMORY_GUARD_ASSERT_OFF_IF_MODE_USR(psr) \
858 asm("tst "#psr", #0x0f"); /* check for mode_usr */ \
859 asm("mrceq p15, 0, "#psr", c3, c0, 0"); /* psr<-DACR */ \
860 asm("tsteq "#psr", #%a0" : : "i" USER_MEMORY_DOMAIN_MASK); \
861 asm("cdpeq p15, 0, c0, c0, c0, 0"); /* fault if no access */
863 #endif // end of else __USER_MEMORY_GUARDS_ENABLED__
865 #endif // __NK_CPU_H__