1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/os/kernelhwsrv/kernel/eka/memmodel/epoc/flexible/mmu/arm/xmmu.h Fri Jun 15 03:10:57 2012 +0200
1.3 @@ -0,0 +1,830 @@
1.4 +// Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
1.5 +// All rights reserved.
1.6 +// This component and the accompanying materials are made available
1.7 +// under the terms of the License "Eclipse Public License v1.0"
1.8 +// which accompanies this distribution, and is available
1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
1.10 +//
1.11 +// Initial Contributors:
1.12 +// Nokia Corporation - initial contribution.
1.13 +//
1.14 +// Contributors:
1.15 +//
1.16 +// Description:
1.17 +//
1.18 +
1.19 +#include "cache_maintenance.inl"
1.20 +
1.21 +
1.22 +/**
1.23 + @file
1.24 + @internalComponent
1.25 +*/
1.26 +
1.27 +#if defined(GNUC) && !defined(__MARM_ARM4__)
1.28 +#define __VOLATILE__ volatile
1.29 +#else
1.30 +#define __VOLATILE__
1.31 +#endif
1.32 +
1.33 +#if defined(__SMP__) && defined(__CPU_ARM11MP__)
1.34 +#define COARSE_GRAINED_TLB_MAINTENANCE
1.35 +#define BROADCAST_TLB_MAINTENANCE
1.36 +#endif
1.37 +
1.38 +
1.39 +
1.40 +FORCE_INLINE void __arm_dmb()
1.41 + {
1.42 + #if defined(__CPU_ARMV6)
1.43 + // dmb instruction...
1.44 + #ifdef __GNUC__
1.45 + TInt zero = 0;
1.46 + asm __VOLATILE__ ("mcr p15, 0, %0, c7, c10, 5 " : : "r"(zero));
1.47 + #elif defined(__ARMCC__)
1.48 + TInt zero = 0;
1.49 + asm("mcr p15, 0, zero, c7, c10, 5 ");
1.50 + #elif defined(__GCCXML__)
1.51 + // empty
1.52 + #else
1.53 + #error Unknown compiler
1.54 + #endif
1.55 + #elif defined(__CPU_ARMV7)
1.56 + // deprecated CP15 version of DMB...
1.57 + #ifdef __GNUC__
1.58 + TInt zero = 0;
1.59 + asm __VOLATILE__ ("mcr p15, 0, %0, c7, c10, 5 " : : "r"(zero));
1.60 + #elif defined(__ARMCC__)
1.61 + TInt zero = 0;
1.62 + asm("mcr p15, 0, zero, c7, c10, 5 ");
1.63 + #elif defined(__GCCXML__)
1.64 + // empty
1.65 + #else
1.66 + #error Unknown compiler
1.67 + #endif
1.68 + #else
1.69 + // non inline version...
1.70 + __e32_memory_barrier();
1.71 + #endif
1.72 + }
1.73 +
1.74 +
1.75 +FORCE_INLINE void __arm_dsb()
1.76 + {
1.77 + #if defined(__CPU_ARMV6)
1.78 + // drain write buffer...
1.79 + #ifdef __GNUC__
1.80 + TInt zero = 0;
1.81 + asm __VOLATILE__ ("mcr p15, 0, %0, c7, c10, 4 " : : "r"(zero));
1.82 + #elif defined(__ARMCC__)
1.83 + TInt zero = 0;
1.84 + asm("mcr p15, 0, zero, c7, c10, 4 ");
1.85 + #elif defined(__GCCXML__)
1.86 + // empty
1.87 + #else
1.88 + #error Unknown compiler
1.89 + #endif
1.90 + #elif defined(__CPU_ARMV7)
1.91 + // deprecated CP15 version of DSB...
1.92 + #ifdef __GNUC__
1.93 + TInt zero = 0;
1.94 + asm __VOLATILE__ ("mcr p15, 0, %0, c7, c10, 4 " : : "r"(zero));
1.95 + #elif defined(__ARMCC__)
1.96 + TInt zero = 0;
1.97 + asm("mcr p15, 0, zero, c7, c10, 4 ");
1.98 + #elif defined(__GCCXML__)
1.99 + // empty
1.100 + #else
1.101 + #error Unknown compiler
1.102 + #endif
1.103 + #else
1.104 + // non inline version...
1.105 + __e32_io_completion_barrier();
1.106 + #endif
1.107 + }
1.108 +
1.109 +
1.110 +extern "C" void __e32_instruction_barrier();
1.111 +
1.112 +FORCE_INLINE void __arm_isb()
1.113 + {
1.114 + #if defined(__CPU_ARMV6)
1.115 + // prefetch flush...
1.116 + #ifdef __GNUC__
1.117 + TInt zero = 0;
1.118 + asm __VOLATILE__ ("mcr p15, 0, %0, c7, c5, 4 " : : "r"(zero));
1.119 + #elif defined(__ARMCC__)
1.120 + TInt zero = 0;
1.121 + asm("mcr p15, 0, zero, c7, c5, 4 ");
1.122 + #elif defined(__GCCXML__)
1.123 + // empty
1.124 + #else
1.125 + #error Unknown compiler
1.126 + #endif
1.127 + #elif defined(__CPU_ARMV7)
1.128 + // deprecated CP15 version of ISB...
1.129 + #ifdef __GNUC__
1.130 + TInt zero = 0;
1.131 + asm __VOLATILE__ ("mcr p15, 0, %0, c7, c5, 4 " : : "r"(zero));
1.132 + #elif defined(__ARMCC__)
1.133 + TInt zero = 0;
1.134 + asm("mcr p15, 0, zero, c7, c5, 4 ");
1.135 + #elif defined(__GCCXML__)
1.136 + // empty
1.137 + #else
1.138 + #error Unknown compiler
1.139 + #endif
1.140 + #else
1.141 + // non inline version...
1.142 + __e32_instruction_barrier();
1.143 + #endif
1.144 + }
1.145 +
1.146 +
1.147 +/**
1.148 +Branch predictor invalidate all
1.149 +*/
1.150 +FORCE_INLINE void __arm_bpiall()
1.151 + {
1.152 + #ifdef __GNUC__
1.153 + TInt zero = 0;
1.154 + asm __VOLATILE__ ("mcr p15, 0, %0, c7, c5, 6 " : : "r"(zero));
1.155 + #elif defined(__ARMCC__)
1.156 + TInt zero = 0;
1.157 + asm("mcr p15, 0, zero, c7, c5, 6 ");
1.158 + #elif defined(__GCCXML__)
1.159 + // empty
1.160 + #else
1.161 + #error Unknown compiler
1.162 + #endif
1.163 + }
1.164 +
1.165 +
1.166 +#ifdef __SMP__
1.167 +
1.168 +/**
1.169 +Branch predictor invalidate all inner-shareable
1.170 +*/
1.171 +FORCE_INLINE void __arm_bpiallis()
1.172 + {
1.173 + // branch predictor invalidate all inner-shareable
1.174 + #ifdef __GNUC__
1.175 + TInt zero = 0;
1.176 + asm __VOLATILE__ ("mcr p15, 0, %0, c7, c1, 6 " : : "r"(zero));
1.177 + #elif defined(__ARMCC__)
1.178 + TInt zero = 0;
1.179 + asm("mcr p15, 0, zero, c7, c1, 6 ");
1.180 + #elif defined(__GCCXML__)
1.181 + // empty
1.182 + #else
1.183 + #error Unknown compiler
1.184 + #endif
1.185 + }
1.186 +
1.187 +#endif
1.188 +
1.189 +
1.190 +/**
1.191 +This will make sure that the change in page directory is visible by H/W Page-Table Walk.
1.192 +Call this function when a single entry in page directory is changed.
1.193 +*/
1.194 +FORCE_INLINE void SinglePdeUpdated(TPde* aPde)
1.195 + {
1.196 + CacheMaintenance::SinglePdeUpdated((TLinAddr)aPde);
1.197 + }
1.198 +
1.199 +
1.200 +#ifdef BROADCAST_TLB_MAINTENANCE
1.201 +
1.202 +/**
1.203 +Signal other CPU cores to perform TLB maintenance.
1.204 +
1.205 +@param aLinAddrAndAsid If == 0, then InvalidateTLB;
1.206 + if < KMmuAsidCount, then InvalidateTLBForAsid;
1.207 + else InvalidateTLBForPage.
1.208 +*/
1.209 +extern void BroadcastInvalidateTLB(TLinAddr aLinAddrAndAsid=0);
1.210 +
1.211 +#endif
1.212 +
1.213 +
1.214 +/**
1.215 +Invalidate a single I+D TLB entry on this CPU core only.
1.216 +@param aLinAddrAndAsid Virtual address of a page of memory ORed with the ASID value.
1.217 +*/
1.218 +FORCE_INLINE void LocalInvalidateTLBForPage(TLinAddr aLinAddrAndAsid)
1.219 + {
1.220 + #ifdef __GNUC__
1.221 + #if defined(__CPU_ARM11MP__) // why?...
1.222 + asm __VOLATILE__ ("mcr p15, 0, %0, c8, c7, 3 " : : "r"(aLinAddrAndAsid));
1.223 + #else
1.224 + asm __VOLATILE__ ("mcr p15, 0, %0, c8, c7, 1 " : : "r"(aLinAddrAndAsid));
1.225 + #endif
1.226 + #elif defined(__ARMCC__)
1.227 + #if defined(__CPU_ARM11MP__) // why?...
1.228 + asm("mcr p15, 0, aLinAddrAndAsid, c8, c7, 3 ");
1.229 + #else
1.230 + asm("mcr p15, 0, aLinAddrAndAsid, c8, c7, 1 ");
1.231 + #endif
1.232 + #elif defined(__GCCXML__)
1.233 + // empty
1.234 + #else
1.235 + #error Unknown compiler
1.236 + #endif
1.237 + __arm_bpiall();
1.238 + __arm_dsb();
1.239 + __arm_isb();
1.240 + }
1.241 +
1.242 +
1.243 +/**
1.244 +Invalidate a single I+D TLB entry on all CPU cores.
1.245 +@param aLinAddrAndAsid Virtual address of a page of memory ORed with the ASID value.
1.246 +*/
1.247 +FORCE_INLINE void InvalidateTLBForPage(TLinAddr aLinAddrAndAsid)
1.248 + {
1.249 + #ifdef BROADCAST_TLB_MAINTENANCE
1.250 + BroadcastInvalidateTLB(aLinAddrAndAsid);
1.251 + #elif !defined(__SMP__)
1.252 + LocalInvalidateTLBForPage(aLinAddrAndAsid);
1.253 + #else // __SMP__
1.254 + // inner-shareable invalidate...
1.255 + #ifdef __GNUC__
1.256 + asm __VOLATILE__ ("mcr p15, 0, %0, c8, c3, 1 " : : "r"(aLinAddrAndAsid));
1.257 + #elif defined(__ARMCC__)
1.258 + asm("mcr p15, 0, aLinAddrAndAsid, c8, c3, 1 ");
1.259 + #elif defined(__GCCXML__)
1.260 + // empty
1.261 + #else
1.262 + #error Unknown compiler
1.263 + #endif
1.264 + __arm_bpiallis();
1.265 + __arm_dsb();
1.266 + __arm_isb();
1.267 + #endif
1.268 + }
1.269 +
1.270 +
1.271 +/**
1.272 +Invalidate entire TLB on this CPU only
1.273 +*/
1.274 +FORCE_INLINE void LocalInvalidateTLB()
1.275 + {
1.276 + #ifdef __GNUC__
1.277 + asm __VOLATILE__ ("mcr p15, 0, r0, c8, c7, 0 ");
1.278 + #elif defined(__ARMCC__)
1.279 + TInt dummy = 0; // damned RVCT
1.280 + asm("mcr p15, 0, dummy, c8, c7, 0 ");
1.281 + #elif defined(__GCCXML__)
1.282 + // empty
1.283 + #else
1.284 + #error Unknown compiler
1.285 + #endif
1.286 + __arm_bpiall();
1.287 + __arm_dsb();
1.288 + __arm_isb();
1.289 + }
1.290 +
1.291 +
1.292 +/**
1.293 +Invalidate entire TLB on all CPU cores.
1.294 +*/
1.295 +FORCE_INLINE void InvalidateTLB()
1.296 + {
1.297 + #ifdef BROADCAST_TLB_MAINTENANCE
1.298 + BroadcastInvalidateTLB(0);
1.299 + #elif !defined(__SMP__)
1.300 + LocalInvalidateTLB();
1.301 + #else // __SMP__
1.302 + // inner-shareable invalidate...
1.303 + #ifdef __GNUC__
1.304 + TInt zero = 0;
1.305 + asm __VOLATILE__ ("mcr p15, 0, %0, c8, c3, 0 " : : "r"(zero));
1.306 + #elif defined(__ARMCC__)
1.307 + TInt zero = 0;
1.308 + asm("mcr p15, 0, zero, c8, c3, 0 ");
1.309 + #elif defined(__GCCXML__)
1.310 + // empty
1.311 + #else
1.312 + #error Unknown compiler
1.313 + #endif
1.314 + __arm_bpiallis();
1.315 + __arm_dsb();
1.316 + __arm_isb();
1.317 + #endif
1.318 + }
1.319 +
1.320 +
1.321 +#if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_424067_FIXED)
1.322 +#define INVALIDATE_TLB_BY_ASID_BROKEN
1.323 +#endif
1.324 +#if defined(__CPU_ARM1176__) && !defined(__CPU_ARM1176_ERRATUM_424692_FIXED)
1.325 +#define INVALIDATE_TLB_BY_ASID_BROKEN
1.326 +#endif
1.327 +
1.328 +
1.329 +__ASSERT_COMPILE(KKernelOsAsid==0); // InvalidateTLBForAsid assumes this
1.330 +
1.331 +
1.332 +/**
1.333 +Invalidate all TLB entries which match the given ASID value (current CPU only)
1.334 +*/
1.335 +FORCE_INLINE void LocalInvalidateTLBForAsid(TUint aAsid)
1.336 + {
1.337 +#ifndef INVALIDATE_TLB_BY_ASID_BROKEN
1.338 + if(aAsid&=0xff)
1.339 + {
1.340 +#if defined(__CPU_CORTEX_A9__) && !defined(__CPU_ARM_A9_ERRATUM_571618_FIXED)
1.341 + __arm_dmb(); // ARM Cortex-A9 MPCore erratum 571618 workaround
1.342 + // Execute memory barrier before interruptible CP15 operations
1.343 +#endif
1.344 + // invalidate all I+D TLB entries for ASID...
1.345 + #ifdef __GNUC__
1.346 + asm __VOLATILE__ ("mcr p15, 0, %0, c8, c7, 2 " : : "r"(aAsid));
1.347 + #elif defined(__ARMCC__)
1.348 + asm("mcr p15, 0, aAsid, c8, c7, 2 ");
1.349 + #elif defined(__GCCXML__)
1.350 + // empty
1.351 + #else
1.352 + #error Unknown compiler
1.353 + #endif
1.354 + }
1.355 + else
1.356 + // ASID==0 means 'kernel' memory. We have to invalidate the entire TLB here
1.357 + // as this is the only way of getting rid of global entries...
1.358 +#endif
1.359 + {
1.360 + // invalidate entire TLB...
1.361 + #ifdef __GNUC__
1.362 + asm __VOLATILE__ ("mcr p15, 0, r0, c8, c7, 0 ");
1.363 + #elif defined(__ARMCC__)
1.364 + TInt dummy = 0; // damned RVCT
1.365 + asm("mcr p15, 0, dummy, c8, c7, 0 ");
1.366 + #elif defined(__GCCXML__)
1.367 + // empty
1.368 + #else
1.369 + #error Unknown compiler
1.370 + #endif
1.371 + }
1.372 + __arm_bpiall();
1.373 + __arm_dsb();
1.374 + __arm_isb();
1.375 + }
1.376 +
1.377 +
1.378 +/**
1.379 +Invalidate all TLB entries which match the given ASID value on all CPU cores.
1.380 +*/
1.381 +FORCE_INLINE void InvalidateTLBForAsid(TUint aAsid)
1.382 + {
1.383 + aAsid &= 0xff;
1.384 + #ifdef BROADCAST_TLB_MAINTENANCE
1.385 + BroadcastInvalidateTLB(aAsid);
1.386 + #elif !defined(__SMP__)
1.387 + LocalInvalidateTLBForAsid(aAsid);
1.388 + #else // __SMP__
1.389 + if(aAsid!=0)
1.390 + {
1.391 +#if defined(__CPU_CORTEX_A9__) && !defined(__CPU_ARM_A9_ERRATUM_571618_FIXED)
1.392 + __arm_dmb(); // ARM Cortex-A9 MPCore erratum 571618 workaround
1.393 + // Execute memory barrier before interruptible CP15 operations
1.394 +#endif
1.395 + // invalidate all I+D TLB entries for ASID...
1.396 + #ifdef __GNUC__
1.397 + asm __VOLATILE__ ("mcr p15, 0, %0, c8, c3, 2 " : : "r"(aAsid));
1.398 + #elif defined(__ARMCC__)
1.399 + asm("mcr p15, 0, aAsid, c8, c3, 2 ");
1.400 + #elif defined(__GCCXML__)
1.401 + // empty
1.402 + #else
1.403 + #error Unknown compiler
1.404 + #endif
1.405 + }
1.406 + else
1.407 + {
1.408 + // ASID==0 means 'kernel' memory. We have to invalidate the entire TLB here
1.409 + // as this is the only way of getting rid of global entries...
1.410 + #ifdef __GNUC__
1.411 + asm __VOLATILE__ ("mcr p15, 0, %0, c8, c3, 0 " : : "r"(aAsid));
1.412 + #elif defined(__ARMCC__)
1.413 + asm("mcr p15, 0, aAsid, c8, c3, 0 ");
1.414 + #elif defined(__GCCXML__)
1.415 + // empty
1.416 + #else
1.417 + #error Unknown compiler
1.418 + #endif
1.419 + }
1.420 + __arm_bpiallis();
1.421 + __arm_dsb();
1.422 + __arm_isb();
1.423 + #endif
1.424 + }
1.425 +
1.426 +
1.427 +/**
1.428 +Return the virtual address of the page directory used for address space
1.429 +\a aOsAsid. Note, the global page directory is mapped after each
1.430 +address space specific page director in a way which means that it
1.431 +appears to be a single contiguous page directory which maps the
1.432 +entire 32bit virtual address range. I.e. the returned page directory
1.433 +address can be simply indexed by any virtual address without regard
1.434 +to whether it belongs to the given address space or lies in the
1.435 +global region.
1.436 +*/
1.437 +FORCE_INLINE TPde* Mmu::PageDirectory(TInt aOsAsid)
1.438 + {
1.439 + return (TPde*)(KPageDirectoryBase+(aOsAsid<<KPageDirectoryShift));
1.440 + }
1.441 +
1.442 +
1.443 +/**
1.444 +Return the virtual address of the Page Directory Entry (PDE) used to map
1.445 +the region containing the virtual address \a aAddress in the address space
1.446 +\a aOsAsid.
1.447 +*/
1.448 +FORCE_INLINE TPde* Mmu::PageDirectoryEntry(TInt aOsAsid, TLinAddr aAddress)
1.449 + {
1.450 + return PageDirectory(aOsAsid) + (aAddress>>KChunkShift);
1.451 + }
1.452 +
1.453 +
1.454 +/**
1.455 +Return the physical address mapped by the section mapping contained
1.456 +in the given Page Directory Entry \a aPde. If \a aPde is not a
1.457 +section mapping, then KPhysAddrInvalid is returned.
1.458 +*/
1.459 +FORCE_INLINE TPhysAddr Mmu::PdePhysAddr(TPde aPde)
1.460 + {
1.461 + if((aPde&KPdePresentMask)==KArmV6PdeSection)
1.462 + return aPde&KPdeSectionAddrMask;
1.463 + return KPhysAddrInvalid;
1.464 + }
1.465 +
1.466 +
1.467 +#ifdef __CPU_MEMORY_TYPE_REMAPPING
1.468 +
1.469 +/*
1.470 +Bits in a PTE which represent access permissions...
1.471 +
1.472 +AP2 AP1 AP0 usr wr
1.473 +0 0 x n y
1.474 +0 1 x y y
1.475 +1 0 x n n
1.476 +1 1 x y n
1.477 +*/
1.478 +
1.479 +/**
1.480 +Modify a Page Table Entry (PTE) value so it restricts access to the memory it maps.
1.481 +The returned PTE value is the same as \a aPte but with its access permissions set
1.482 +to read-only if \a aReadOnly is true, and set to allow no access if \a aReadOnly is false.
1.483 +*/
1.484 +FORCE_INLINE TPte Mmu::MakePteInaccessible(TPte aPte, TBool aReadOnly)
1.485 + {
1.486 + __NK_ASSERT_DEBUG((aPte&KArmV6PteTypeMask)!=KArmV6PteLargePage);
1.487 + if(aPte&KPtePresentMask)
1.488 + {
1.489 + __NK_ASSERT_DEBUG((bool)(aPte&KArmV6PteSmallTEX1)==(bool)(aPte&KArmV6PteSmallXN)); // TEX1 should be a copy of XN
1.490 + if(aReadOnly)
1.491 + aPte |= KArmV6PteAP2; // make read only
1.492 + else
1.493 + aPte &= ~KPtePresentMask; // make inaccessible
1.494 + }
1.495 + return aPte;
1.496 + }
1.497 +
1.498 +
1.499 +/**
1.500 +Modify a Page Table Entry (PTE) value so it allows greater access to the memory it maps.
1.501 +The returned PTE value is the same as \a aPte but with its access permissions set
1.502 +to read/write if \a aWrite is true, and set to read-only if \a aWrite is false.
1.503 +*/
1.504 +FORCE_INLINE TPte Mmu::MakePteAccessible(TPte aPte, TBool aWrite)
1.505 + {
1.506 + __NK_ASSERT_DEBUG((aPte&KArmV6PteTypeMask)!=KArmV6PteLargePage);
1.507 + if((aPte&KPtePresentMask)==0)
1.508 + {
1.509 + // wasn't accessible, make it so...
1.510 + if(aPte&KArmV6PteSmallTEX1)
1.511 + aPte |= KArmV6PteSmallXN; // restore XN by copying from TEX1
1.512 + aPte |= KArmV6PteSmallPage;
1.513 + aPte |= KArmV6PteAP2; // make read only
1.514 + }
1.515 + if(aWrite)
1.516 + aPte &= ~KArmV6PteAP2; // make writable
1.517 + return aPte;
1.518 + }
1.519 +
1.520 +
1.521 +#else // not __CPU_MEMORY_TYPE_REMAPPING
1.522 +
1.523 +/*
1.524 +Bits in a PTE which represent access permissions...
1.525 +
1.526 +AP2 AP1 AP0 usr wr
1.527 +0 0 0
1.528 +0 0 1 n y
1.529 +0 1 0
1.530 +0 1 1 y y
1.531 +1 0 0
1.532 +1 0 1 n n
1.533 +1 1 0 y n
1.534 +1 1 1
1.535 +*/
1.536 +
1.537 +FORCE_INLINE TPte Mmu::MakePteInaccessible(TPte aPte, TBool aReadOnly)
1.538 + {
1.539 + __NK_ASSERT_DEBUG((aPte&KArmV6PteTypeMask)!=KArmV6PteLargePage);
1.540 + if(aPte&KPtePresentMask)
1.541 + {
1.542 + if(!aReadOnly)
1.543 + {
1.544 + // copy XN to AP0...
1.545 + if(aPte&KArmV6PteSmallXN)
1.546 + aPte |= KArmV6PteAP0;
1.547 + else
1.548 + aPte &= ~KArmV6PteAP0;
1.549 +
1.550 + // make inaccessible...
1.551 + aPte &= ~KPtePresentMask;
1.552 + }
1.553 + else
1.554 + {
1.555 + // make read only...
1.556 + aPte |= KArmV6PteAP2; // make read only
1.557 + if(aPte&KArmV6PteAP1)
1.558 + aPte &= ~KArmV6PteAP0; // correct AP0
1.559 + }
1.560 + }
1.561 + return aPte;
1.562 + }
1.563 +
1.564 +
1.565 +FORCE_INLINE TPte Mmu::MakePteAccessible(TPte aPte, TBool aWrite)
1.566 + {
1.567 + __NK_ASSERT_DEBUG((aPte&KArmV6PteTypeMask)!=KArmV6PteLargePage);
1.568 + if((aPte&KPtePresentMask)==0)
1.569 + {
1.570 + // wasn't accessible, make it so...
1.571 + if(aPte&KArmV6PteAP0)
1.572 + aPte |= KArmV6PteSmallXN; // restore XN by copying from AP0
1.573 + aPte |= KArmV6PteAP0;
1.574 + aPte |= KArmV6PteSmallPage;
1.575 +
1.576 + // make read only...
1.577 + aPte |= KArmV6PteAP2; // make read only
1.578 + if(aPte&KArmV6PteAP1)
1.579 + aPte &= ~KArmV6PteAP0; // correct AP0
1.580 + }
1.581 + if(aWrite)
1.582 + {
1.583 + // make writable...
1.584 + aPte &= ~KArmV6PteAP2;
1.585 + aPte |= KArmV6PteAP0;
1.586 + }
1.587 + return aPte;
1.588 + }
1.589 +
1.590 +#endif // __CPU_MEMORY_TYPE_REMAPPING
1.591 +
1.592 +
1.593 +/**
1.594 +Return true if a Page Table Entry (PTE) only allows read-only access to memory.
1.595 +*/
1.596 +FORCE_INLINE TBool Mmu::IsPteReadOnly(TPte aPte)
1.597 + {
1.598 + __NK_ASSERT_DEBUG(aPte&KPtePresentMask); // read-only state is ambiguous if pte not present
1.599 + return aPte&KArmV6PteAP2;
1.600 + }
1.601 +
1.602 +
1.603 +/**
1.604 +Return true if a Page Table Entry (PTE) doesn't allow any access to the memory.
1.605 +*/
1.606 +FORCE_INLINE TBool Mmu::IsPteInaccessible(TPte aPte)
1.607 + {
1.608 + return !(aPte&KPtePresentMask);
1.609 + }
1.610 +
1.611 +/**
1.612 +Return true if the Page Table Entry \a aNewPte allows greater access to
1.613 +memory that \a aOldPte. Only the permissions read/write, read-only and no-access
1.614 +are considered, not any execute or privileged access.
1.615 +*/
1.616 +FORCE_INLINE TBool Mmu::IsPteMoreAccessible(TPte aNewPte, TPte aOldPte)
1.617 + {
1.618 + if(aNewPte&aOldPte&KPtePresentMask) // if ptes both present
1.619 + return (aOldPte&~aNewPte)&KArmV6PteAP2; // check for more writable
1.620 + else // else
1.621 + return aNewPte&KPtePresentMask; // check for new pte being present
1.622 + }
1.623 +
1.624 +
1.625 +/**
1.626 +Bit flag values representing the memory mapping differences governed by
1.627 +MMU Page Directory Entries (PDEs). Memory which differs in #TPdeType can
1.628 +not be mapped using the same Page Table, as they would share the same PDE
1.629 +entry.
1.630 +*/
1.631 +enum TPdeType
1.632 + {
1.633 + /**
1.634 + Legacy (and little-used/unused?) ARM attribute.
1.635 + This could potentially be removed (see DMemoryMapping::PdeType()).
1.636 + */
1.637 + EPdeTypeECC = 1<<0,
1.638 +
1.639 + /**
1.640 + Total number of combinations of #TPdeType values.
1.641 + */
1.642 + ENumPdeTypes = 2
1.643 + };
1.644 +
1.645 +
1.646 +/**
1.647 +Bit flag values representing the memory mapping differences governed by
1.648 +MMU Page Table Entries (PTEs).
1.649 +*/
1.650 +enum TPteType
1.651 + {
1.652 + /**
1.653 + PTE grants user mode access to memory.
1.654 + */
1.655 + EPteTypeUserAccess = EUser,
1.656 +
1.657 + /**
1.658 + PTE grants write access to memory.
1.659 + */
1.660 + EPteTypeWritable = EReadWrite,
1.661 +
1.662 + /**
1.663 + PTE grants execute access to memory.
1.664 + */
1.665 + EPteTypeExecutable = EExecute,
1.666 +
1.667 + /**
1.668 + PTE is 'global'. I.e. the memory it maps is intended to be accessible
1.669 + in all process contexts, i.e. for mappings at virtual address >= KGlobalMemoryBase.
1.670 + The MMU uses this to tag TLB entries as valid for all ASIDs.
1.671 + */
1.672 + EPteTypeGlobal = 1<<3,
1.673 +
1.674 + /**
1.675 + Total number of combinations of #TPteType values.
1.676 + */
1.677 + ENumPteTypes = 16
1.678 + };
1.679 +
1.680 +__ASSERT_COMPILE(EPteTypeUserAccess==(1<<0));
1.681 +__ASSERT_COMPILE(EPteTypeWritable==(1<<1));
1.682 +__ASSERT_COMPILE(EPteTypeExecutable==(1<<2));
1.683 +
1.684 +
1.685 +#define MMU_SUPPORTS_EXECUTE_NEVER
1.686 +
1.687 +
1.688 +/**
1.689 +Return the #TPdeType for memory with the given attributes value.
1.690 +*/
1.691 +FORCE_INLINE TUint Mmu::PdeType(TMemoryAttributes aAttributes)
1.692 + {
1.693 + return aAttributes&EMemoryAttributeUseECC ? EPdeTypeECC : 0;
1.694 + }
1.695 +
1.696 +
1.697 +/**
1.698 +Return the #TPteType to use for memory mappings requiring the given access permissions
1.699 +and Global attribute. The global flag is true if #EPteTypeGlobal is to be set.
1.700 +*/
1.701 +FORCE_INLINE TUint Mmu::PteType(TMappingPermissions aPermissions, TBool aGlobal)
1.702 + {
1.703 + __NK_ASSERT_DEBUG(aPermissions&EUser || aGlobal); // can't have supervisor local memory
1.704 +
1.705 + TUint pteType = (aPermissions&(EUser|EReadWrite|EExecute));
1.706 + if(aGlobal)
1.707 + pteType |= EPteTypeGlobal;
1.708 +
1.709 + __NK_ASSERT_DEBUG(pteType<ENumPteTypes);
1.710 +
1.711 + return pteType;
1.712 + }
1.713 +
1.714 +
1.715 +/**
1.716 +Test if a memory access is allowed by a given mapping type.
1.717 +
1.718 +@param aPteType #TPteType used for a mapping. E.g. TMemoryMappingBase::PteType()
1.719 +@param aAccessPermissions Flags from #TMappingPermissions indicating the memory access
1.720 + required.
1.721 +
1.722 +@return True if a memory access requested with permissions \a aAccessPermissions
1.723 + is allowed on a mapping of the specified #TPteType.
1.724 + False if the access is not allowed.
1.725 +*/
1.726 +FORCE_INLINE TBool Mmu::CheckPteTypePermissions(TUint aPteType, TUint aAccessPermissions)
1.727 + {
1.728 + aAccessPermissions &= EUser|EReadWrite|EExecute;
1.729 + return (aPteType&aAccessPermissions)==aAccessPermissions;
1.730 + }
1.731 +
1.732 +
1.733 +/**
1.734 +Extract the #TMappingPermissions corresponding to a given #TPteType.
1.735 +*/
1.736 +FORCE_INLINE TMappingPermissions Mmu::PermissionsFromPteType(TUint aPteType)
1.737 + {
1.738 + return (TMappingPermissions)(aPteType&(EPteTypeUserAccess|EPteTypeWritable|EPteTypeExecutable));
1.739 + }
1.740 +
1.741 +extern void UserWriteFault(TLinAddr aAddr);
1.742 +extern void UserReadFault(TLinAddr aAddr);
1.743 +
1.744 +
1.745 +//
1.746 +// TODO: Move these to NKern
1.747 +//
1.748 +
1.749 +FORCE_INLINE void inline_DisableAllInterrupts()
1.750 + {
1.751 +#ifdef __GNUC__
1.752 + #ifdef __CPU_ARM_HAS_CPS
1.753 + CPSIDIF;
1.754 + #else
1.755 + TInt reg;
1.756 + asm __VOLATILE__ ("mrs %0, cpsr" : "=r"(reg));
1.757 + asm __VOLATILE__ ("orr %0, %0, #0xc0" : : "r"(reg));
1.758 + asm __VOLATILE__ ("msr cpsr_c, %0" : : "r"(reg));
1.759 + #endif
1.760 +/*
1.761 +#elif defined(__ARMCC__)
1.762 + #if defined(__CPU_ARM_HAS_CPS) && __ARMCC_VERSION>=300000
1.763 + asm("cpsid if");
1.764 + #else
1.765 + TInt reg;
1.766 + asm("mrs reg, cpsr");
1.767 + asm("orr reg, reg, #0xc0");
1.768 + asm("msr cpsr_c, reg");
1.769 + #endif
1.770 +*/
1.771 +#else
1.772 + NKern::DisableAllInterrupts();
1.773 +#endif
1.774 + }
1.775 +
1.776 +FORCE_INLINE void inline_EnableAllInterrupts()
1.777 + {
1.778 +#ifdef __GNUC__
1.779 + #ifdef __CPU_ARM_HAS_CPS
1.780 + CPSIEIF;
1.781 + #else
1.782 + TInt reg;
1.783 + asm __VOLATILE__ ("mrs %0, cpsr" : "=r"(reg));
1.784 + asm __VOLATILE__ ("bic %0, %0, #0xc0" : : "r"(reg));
1.785 + asm __VOLATILE__ ("msr cpsr_c, %0" : : "r"(reg));
1.786 + #endif
1.787 +/*
1.788 +#elif defined(__ARMCC__)
1.789 + #if defined(__CPU_ARM_HAS_CPS) && __ARMCC_VERSION>=300000
1.790 + asm("cpsie if");
1.791 + #else
1.792 + TInt reg;
1.793 + asm("mrs reg, cpsr");
1.794 + asm("bic reg, reg, #0xc0");
1.795 + asm("msr cpsr_c, reg");
1.796 + #endif
1.797 +*/
1.798 +#else
1.799 + NKern::EnableAllInterrupts();
1.800 +#endif
1.801 + }
1.802 +
1.803 +
1.804 +#ifndef __SMP__
1.805 +#undef __SPIN_LOCK_IRQ
1.806 +#define __SPIN_LOCK_IRQ(lock) (inline_DisableAllInterrupts())
1.807 +#undef __SPIN_UNLOCK_IRQ
1.808 +#define __SPIN_UNLOCK_IRQ(lock) (inline_EnableAllInterrupts())
1.809 +#undef __SPIN_FLASH_IRQ
1.810 +#define __SPIN_FLASH_IRQ(lock) (inline_EnableAllInterrupts(),inline_DisableAllInterrupts(),((TBool)TRUE))
1.811 +#endif
1.812 +
1.813 +
1.814 +/**
1.815 +Indicate whether a PDE entry maps a page table.
1.816 +
1.817 +@param aPde The PDE entry in question.
1.818 +*/
1.819 +FORCE_INLINE TBool Mmu::PdeMapsPageTable(TPde aPde)
1.820 + {
1.821 + return (aPde & KPdeTypeMask) == KArmV6PdePageTable;
1.822 + }
1.823 +
1.824 +
1.825 +/**
1.826 +Indicate whether a PDE entry maps a section.
1.827 +
1.828 +@param aPde The PDE entry in question.
1.829 +*/
1.830 +FORCE_INLINE TBool Mmu::PdeMapsSection(TPde aPde)
1.831 + {
1.832 + return (aPde & KPdeTypeMask) == KArmV6PdeSection;
1.833 + }