sl@0: // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies). sl@0: // All rights reserved. sl@0: // This component and the accompanying materials are made available sl@0: // under the terms of the License "Eclipse Public License v1.0" sl@0: // which accompanies this distribution, and is available sl@0: // at the URL "http://www.eclipse.org/legal/epl-v10.html". sl@0: // sl@0: // Initial Contributors: sl@0: // Nokia Corporation - initial contribution. sl@0: // sl@0: // Contributors: sl@0: // sl@0: // Description: sl@0: // sl@0: sl@0: #include "cache_maintenance.inl" sl@0: sl@0: sl@0: /** sl@0: @file sl@0: @internalComponent sl@0: */ sl@0: sl@0: #if defined(GNUC) && !defined(__MARM_ARM4__) sl@0: #define __VOLATILE__ volatile sl@0: #else sl@0: #define __VOLATILE__ sl@0: #endif sl@0: sl@0: #if defined(__SMP__) && defined(__CPU_ARM11MP__) sl@0: #define COARSE_GRAINED_TLB_MAINTENANCE sl@0: #define BROADCAST_TLB_MAINTENANCE sl@0: #endif sl@0: sl@0: sl@0: sl@0: FORCE_INLINE void __arm_dmb() sl@0: { sl@0: #if defined(__CPU_ARMV6) sl@0: // dmb instruction... sl@0: #ifdef __GNUC__ sl@0: TInt zero = 0; sl@0: asm __VOLATILE__ ("mcr p15, 0, %0, c7, c10, 5 " : : "r"(zero)); sl@0: #elif defined(__ARMCC__) sl@0: TInt zero = 0; sl@0: asm("mcr p15, 0, zero, c7, c10, 5 "); sl@0: #elif defined(__GCCXML__) sl@0: // empty sl@0: #else sl@0: #error Unknown compiler sl@0: #endif sl@0: #elif defined(__CPU_ARMV7) sl@0: // deprecated CP15 version of DMB... sl@0: #ifdef __GNUC__ sl@0: TInt zero = 0; sl@0: asm __VOLATILE__ ("mcr p15, 0, %0, c7, c10, 5 " : : "r"(zero)); sl@0: #elif defined(__ARMCC__) sl@0: TInt zero = 0; sl@0: asm("mcr p15, 0, zero, c7, c10, 5 "); sl@0: #elif defined(__GCCXML__) sl@0: // empty sl@0: #else sl@0: #error Unknown compiler sl@0: #endif sl@0: #else sl@0: // non inline version... sl@0: __e32_memory_barrier(); sl@0: #endif sl@0: } sl@0: sl@0: sl@0: FORCE_INLINE void __arm_dsb() sl@0: { sl@0: #if defined(__CPU_ARMV6) sl@0: // drain write buffer... sl@0: #ifdef __GNUC__ sl@0: TInt zero = 0; sl@0: asm __VOLATILE__ ("mcr p15, 0, %0, c7, c10, 4 " : : "r"(zero)); sl@0: #elif defined(__ARMCC__) sl@0: TInt zero = 0; sl@0: asm("mcr p15, 0, zero, c7, c10, 4 "); sl@0: #elif defined(__GCCXML__) sl@0: // empty sl@0: #else sl@0: #error Unknown compiler sl@0: #endif sl@0: #elif defined(__CPU_ARMV7) sl@0: // deprecated CP15 version of DSB... sl@0: #ifdef __GNUC__ sl@0: TInt zero = 0; sl@0: asm __VOLATILE__ ("mcr p15, 0, %0, c7, c10, 4 " : : "r"(zero)); sl@0: #elif defined(__ARMCC__) sl@0: TInt zero = 0; sl@0: asm("mcr p15, 0, zero, c7, c10, 4 "); sl@0: #elif defined(__GCCXML__) sl@0: // empty sl@0: #else sl@0: #error Unknown compiler sl@0: #endif sl@0: #else sl@0: // non inline version... sl@0: __e32_io_completion_barrier(); sl@0: #endif sl@0: } sl@0: sl@0: sl@0: extern "C" void __e32_instruction_barrier(); sl@0: sl@0: FORCE_INLINE void __arm_isb() sl@0: { sl@0: #if defined(__CPU_ARMV6) sl@0: // prefetch flush... sl@0: #ifdef __GNUC__ sl@0: TInt zero = 0; sl@0: asm __VOLATILE__ ("mcr p15, 0, %0, c7, c5, 4 " : : "r"(zero)); sl@0: #elif defined(__ARMCC__) sl@0: TInt zero = 0; sl@0: asm("mcr p15, 0, zero, c7, c5, 4 "); sl@0: #elif defined(__GCCXML__) sl@0: // empty sl@0: #else sl@0: #error Unknown compiler sl@0: #endif sl@0: #elif defined(__CPU_ARMV7) sl@0: // deprecated CP15 version of ISB... sl@0: #ifdef __GNUC__ sl@0: TInt zero = 0; sl@0: asm __VOLATILE__ ("mcr p15, 0, %0, c7, c5, 4 " : : "r"(zero)); sl@0: #elif defined(__ARMCC__) sl@0: TInt zero = 0; sl@0: asm("mcr p15, 0, zero, c7, c5, 4 "); sl@0: #elif defined(__GCCXML__) sl@0: // empty sl@0: #else sl@0: #error Unknown compiler sl@0: #endif sl@0: #else sl@0: // non inline version... sl@0: __e32_instruction_barrier(); sl@0: #endif sl@0: } sl@0: sl@0: sl@0: /** sl@0: Branch predictor invalidate all sl@0: */ sl@0: FORCE_INLINE void __arm_bpiall() sl@0: { sl@0: #ifdef __GNUC__ sl@0: TInt zero = 0; sl@0: asm __VOLATILE__ ("mcr p15, 0, %0, c7, c5, 6 " : : "r"(zero)); sl@0: #elif defined(__ARMCC__) sl@0: TInt zero = 0; sl@0: asm("mcr p15, 0, zero, c7, c5, 6 "); sl@0: #elif defined(__GCCXML__) sl@0: // empty sl@0: #else sl@0: #error Unknown compiler sl@0: #endif sl@0: } sl@0: sl@0: sl@0: #ifdef __SMP__ sl@0: sl@0: /** sl@0: Branch predictor invalidate all inner-shareable sl@0: */ sl@0: FORCE_INLINE void __arm_bpiallis() sl@0: { sl@0: // branch predictor invalidate all inner-shareable sl@0: #ifdef __GNUC__ sl@0: TInt zero = 0; sl@0: asm __VOLATILE__ ("mcr p15, 0, %0, c7, c1, 6 " : : "r"(zero)); sl@0: #elif defined(__ARMCC__) sl@0: TInt zero = 0; sl@0: asm("mcr p15, 0, zero, c7, c1, 6 "); sl@0: #elif defined(__GCCXML__) sl@0: // empty sl@0: #else sl@0: #error Unknown compiler sl@0: #endif sl@0: } sl@0: sl@0: #endif sl@0: sl@0: sl@0: /** sl@0: This will make sure that the change in page directory is visible by H/W Page-Table Walk. sl@0: Call this function when a single entry in page directory is changed. sl@0: */ sl@0: FORCE_INLINE void SinglePdeUpdated(TPde* aPde) sl@0: { sl@0: CacheMaintenance::SinglePdeUpdated((TLinAddr)aPde); sl@0: } sl@0: sl@0: sl@0: #ifdef BROADCAST_TLB_MAINTENANCE sl@0: sl@0: /** sl@0: Signal other CPU cores to perform TLB maintenance. sl@0: sl@0: @param aLinAddrAndAsid If == 0, then InvalidateTLB; sl@0: if < KMmuAsidCount, then InvalidateTLBForAsid; sl@0: else InvalidateTLBForPage. sl@0: */ sl@0: extern void BroadcastInvalidateTLB(TLinAddr aLinAddrAndAsid=0); sl@0: sl@0: #endif sl@0: sl@0: sl@0: /** sl@0: Invalidate a single I+D TLB entry on this CPU core only. sl@0: @param aLinAddrAndAsid Virtual address of a page of memory ORed with the ASID value. sl@0: */ sl@0: FORCE_INLINE void LocalInvalidateTLBForPage(TLinAddr aLinAddrAndAsid) sl@0: { sl@0: #ifdef __GNUC__ sl@0: #if defined(__CPU_ARM11MP__) // why?... sl@0: asm __VOLATILE__ ("mcr p15, 0, %0, c8, c7, 3 " : : "r"(aLinAddrAndAsid)); sl@0: #else sl@0: asm __VOLATILE__ ("mcr p15, 0, %0, c8, c7, 1 " : : "r"(aLinAddrAndAsid)); sl@0: #endif sl@0: #elif defined(__ARMCC__) sl@0: #if defined(__CPU_ARM11MP__) // why?... sl@0: asm("mcr p15, 0, aLinAddrAndAsid, c8, c7, 3 "); sl@0: #else sl@0: asm("mcr p15, 0, aLinAddrAndAsid, c8, c7, 1 "); sl@0: #endif sl@0: #elif defined(__GCCXML__) sl@0: // empty sl@0: #else sl@0: #error Unknown compiler sl@0: #endif sl@0: __arm_bpiall(); sl@0: __arm_dsb(); sl@0: __arm_isb(); sl@0: } sl@0: sl@0: sl@0: /** sl@0: Invalidate a single I+D TLB entry on all CPU cores. sl@0: @param aLinAddrAndAsid Virtual address of a page of memory ORed with the ASID value. sl@0: */ sl@0: FORCE_INLINE void InvalidateTLBForPage(TLinAddr aLinAddrAndAsid) sl@0: { sl@0: #ifdef BROADCAST_TLB_MAINTENANCE sl@0: BroadcastInvalidateTLB(aLinAddrAndAsid); sl@0: #elif !defined(__SMP__) sl@0: LocalInvalidateTLBForPage(aLinAddrAndAsid); sl@0: #else // __SMP__ sl@0: // inner-shareable invalidate... sl@0: #ifdef __GNUC__ sl@0: asm __VOLATILE__ ("mcr p15, 0, %0, c8, c3, 1 " : : "r"(aLinAddrAndAsid)); sl@0: #elif defined(__ARMCC__) sl@0: asm("mcr p15, 0, aLinAddrAndAsid, c8, c3, 1 "); sl@0: #elif defined(__GCCXML__) sl@0: // empty sl@0: #else sl@0: #error Unknown compiler sl@0: #endif sl@0: __arm_bpiallis(); sl@0: __arm_dsb(); sl@0: __arm_isb(); sl@0: #endif sl@0: } sl@0: sl@0: sl@0: /** sl@0: Invalidate entire TLB on this CPU only sl@0: */ sl@0: FORCE_INLINE void LocalInvalidateTLB() sl@0: { sl@0: #ifdef __GNUC__ sl@0: asm __VOLATILE__ ("mcr p15, 0, r0, c8, c7, 0 "); sl@0: #elif defined(__ARMCC__) sl@0: TInt dummy = 0; // damned RVCT sl@0: asm("mcr p15, 0, dummy, c8, c7, 0 "); sl@0: #elif defined(__GCCXML__) sl@0: // empty sl@0: #else sl@0: #error Unknown compiler sl@0: #endif sl@0: __arm_bpiall(); sl@0: __arm_dsb(); sl@0: __arm_isb(); sl@0: } sl@0: sl@0: sl@0: /** sl@0: Invalidate entire TLB on all CPU cores. sl@0: */ sl@0: FORCE_INLINE void InvalidateTLB() sl@0: { sl@0: #ifdef BROADCAST_TLB_MAINTENANCE sl@0: BroadcastInvalidateTLB(0); sl@0: #elif !defined(__SMP__) sl@0: LocalInvalidateTLB(); sl@0: #else // __SMP__ sl@0: // inner-shareable invalidate... sl@0: #ifdef __GNUC__ sl@0: TInt zero = 0; sl@0: asm __VOLATILE__ ("mcr p15, 0, %0, c8, c3, 0 " : : "r"(zero)); sl@0: #elif defined(__ARMCC__) sl@0: TInt zero = 0; sl@0: asm("mcr p15, 0, zero, c8, c3, 0 "); sl@0: #elif defined(__GCCXML__) sl@0: // empty sl@0: #else sl@0: #error Unknown compiler sl@0: #endif sl@0: __arm_bpiallis(); sl@0: __arm_dsb(); sl@0: __arm_isb(); sl@0: #endif sl@0: } sl@0: sl@0: sl@0: #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_424067_FIXED) sl@0: #define INVALIDATE_TLB_BY_ASID_BROKEN sl@0: #endif sl@0: #if defined(__CPU_ARM1176__) && !defined(__CPU_ARM1176_ERRATUM_424692_FIXED) sl@0: #define INVALIDATE_TLB_BY_ASID_BROKEN sl@0: #endif sl@0: sl@0: sl@0: __ASSERT_COMPILE(KKernelOsAsid==0); // InvalidateTLBForAsid assumes this sl@0: sl@0: sl@0: /** sl@0: Invalidate all TLB entries which match the given ASID value (current CPU only) sl@0: */ sl@0: FORCE_INLINE void LocalInvalidateTLBForAsid(TUint aAsid) sl@0: { sl@0: #ifndef INVALIDATE_TLB_BY_ASID_BROKEN sl@0: if(aAsid&=0xff) sl@0: { sl@0: #if defined(__CPU_CORTEX_A9__) && !defined(__CPU_ARM_A9_ERRATUM_571618_FIXED) sl@0: __arm_dmb(); // ARM Cortex-A9 MPCore erratum 571618 workaround sl@0: // Execute memory barrier before interruptible CP15 operations sl@0: #endif sl@0: // invalidate all I+D TLB entries for ASID... sl@0: #ifdef __GNUC__ sl@0: asm __VOLATILE__ ("mcr p15, 0, %0, c8, c7, 2 " : : "r"(aAsid)); sl@0: #elif defined(__ARMCC__) sl@0: asm("mcr p15, 0, aAsid, c8, c7, 2 "); sl@0: #elif defined(__GCCXML__) sl@0: // empty sl@0: #else sl@0: #error Unknown compiler sl@0: #endif sl@0: } sl@0: else sl@0: // ASID==0 means 'kernel' memory. We have to invalidate the entire TLB here sl@0: // as this is the only way of getting rid of global entries... sl@0: #endif sl@0: { sl@0: // invalidate entire TLB... sl@0: #ifdef __GNUC__ sl@0: asm __VOLATILE__ ("mcr p15, 0, r0, c8, c7, 0 "); sl@0: #elif defined(__ARMCC__) sl@0: TInt dummy = 0; // damned RVCT sl@0: asm("mcr p15, 0, dummy, c8, c7, 0 "); sl@0: #elif defined(__GCCXML__) sl@0: // empty sl@0: #else sl@0: #error Unknown compiler sl@0: #endif sl@0: } sl@0: __arm_bpiall(); sl@0: __arm_dsb(); sl@0: __arm_isb(); sl@0: } sl@0: sl@0: sl@0: /** sl@0: Invalidate all TLB entries which match the given ASID value on all CPU cores. sl@0: */ sl@0: FORCE_INLINE void InvalidateTLBForAsid(TUint aAsid) sl@0: { sl@0: aAsid &= 0xff; sl@0: #ifdef BROADCAST_TLB_MAINTENANCE sl@0: BroadcastInvalidateTLB(aAsid); sl@0: #elif !defined(__SMP__) sl@0: LocalInvalidateTLBForAsid(aAsid); sl@0: #else // __SMP__ sl@0: if(aAsid!=0) sl@0: { sl@0: #if defined(__CPU_CORTEX_A9__) && !defined(__CPU_ARM_A9_ERRATUM_571618_FIXED) sl@0: __arm_dmb(); // ARM Cortex-A9 MPCore erratum 571618 workaround sl@0: // Execute memory barrier before interruptible CP15 operations sl@0: #endif sl@0: // invalidate all I+D TLB entries for ASID... sl@0: #ifdef __GNUC__ sl@0: asm __VOLATILE__ ("mcr p15, 0, %0, c8, c3, 2 " : : "r"(aAsid)); sl@0: #elif defined(__ARMCC__) sl@0: asm("mcr p15, 0, aAsid, c8, c3, 2 "); sl@0: #elif defined(__GCCXML__) sl@0: // empty sl@0: #else sl@0: #error Unknown compiler sl@0: #endif sl@0: } sl@0: else sl@0: { sl@0: // ASID==0 means 'kernel' memory. We have to invalidate the entire TLB here sl@0: // as this is the only way of getting rid of global entries... sl@0: #ifdef __GNUC__ sl@0: asm __VOLATILE__ ("mcr p15, 0, %0, c8, c3, 0 " : : "r"(aAsid)); sl@0: #elif defined(__ARMCC__) sl@0: asm("mcr p15, 0, aAsid, c8, c3, 0 "); sl@0: #elif defined(__GCCXML__) sl@0: // empty sl@0: #else sl@0: #error Unknown compiler sl@0: #endif sl@0: } sl@0: __arm_bpiallis(); sl@0: __arm_dsb(); sl@0: __arm_isb(); sl@0: #endif sl@0: } sl@0: sl@0: sl@0: /** sl@0: Return the virtual address of the page directory used for address space sl@0: \a aOsAsid. Note, the global page directory is mapped after each sl@0: address space specific page director in a way which means that it sl@0: appears to be a single contiguous page directory which maps the sl@0: entire 32bit virtual address range. I.e. the returned page directory sl@0: address can be simply indexed by any virtual address without regard sl@0: to whether it belongs to the given address space or lies in the sl@0: global region. sl@0: */ sl@0: FORCE_INLINE TPde* Mmu::PageDirectory(TInt aOsAsid) sl@0: { sl@0: return (TPde*)(KPageDirectoryBase+(aOsAsid<>KChunkShift); sl@0: } sl@0: sl@0: sl@0: /** sl@0: Return the physical address mapped by the section mapping contained sl@0: in the given Page Directory Entry \a aPde. If \a aPde is not a sl@0: section mapping, then KPhysAddrInvalid is returned. sl@0: */ sl@0: FORCE_INLINE TPhysAddr Mmu::PdePhysAddr(TPde aPde) sl@0: { sl@0: if((aPde&KPdePresentMask)==KArmV6PdeSection) sl@0: return aPde&KPdeSectionAddrMask; sl@0: return KPhysAddrInvalid; sl@0: } sl@0: sl@0: sl@0: #ifdef __CPU_MEMORY_TYPE_REMAPPING sl@0: sl@0: /* sl@0: Bits in a PTE which represent access permissions... sl@0: sl@0: AP2 AP1 AP0 usr wr sl@0: 0 0 x n y sl@0: 0 1 x y y sl@0: 1 0 x n n sl@0: 1 1 x y n sl@0: */ sl@0: sl@0: /** sl@0: Modify a Page Table Entry (PTE) value so it restricts access to the memory it maps. sl@0: The returned PTE value is the same as \a aPte but with its access permissions set sl@0: to read-only if \a aReadOnly is true, and set to allow no access if \a aReadOnly is false. sl@0: */ sl@0: FORCE_INLINE TPte Mmu::MakePteInaccessible(TPte aPte, TBool aReadOnly) sl@0: { sl@0: __NK_ASSERT_DEBUG((aPte&KArmV6PteTypeMask)!=KArmV6PteLargePage); sl@0: if(aPte&KPtePresentMask) sl@0: { sl@0: __NK_ASSERT_DEBUG((bool)(aPte&KArmV6PteSmallTEX1)==(bool)(aPte&KArmV6PteSmallXN)); // TEX1 should be a copy of XN sl@0: if(aReadOnly) sl@0: aPte |= KArmV6PteAP2; // make read only sl@0: else sl@0: aPte &= ~KPtePresentMask; // make inaccessible sl@0: } sl@0: return aPte; sl@0: } sl@0: sl@0: sl@0: /** sl@0: Modify a Page Table Entry (PTE) value so it allows greater access to the memory it maps. sl@0: The returned PTE value is the same as \a aPte but with its access permissions set sl@0: to read/write if \a aWrite is true, and set to read-only if \a aWrite is false. sl@0: */ sl@0: FORCE_INLINE TPte Mmu::MakePteAccessible(TPte aPte, TBool aWrite) sl@0: { sl@0: __NK_ASSERT_DEBUG((aPte&KArmV6PteTypeMask)!=KArmV6PteLargePage); sl@0: if((aPte&KPtePresentMask)==0) sl@0: { sl@0: // wasn't accessible, make it so... sl@0: if(aPte&KArmV6PteSmallTEX1) sl@0: aPte |= KArmV6PteSmallXN; // restore XN by copying from TEX1 sl@0: aPte |= KArmV6PteSmallPage; sl@0: aPte |= KArmV6PteAP2; // make read only sl@0: } sl@0: if(aWrite) sl@0: aPte &= ~KArmV6PteAP2; // make writable sl@0: return aPte; sl@0: } sl@0: sl@0: sl@0: #else // not __CPU_MEMORY_TYPE_REMAPPING sl@0: sl@0: /* sl@0: Bits in a PTE which represent access permissions... sl@0: sl@0: AP2 AP1 AP0 usr wr sl@0: 0 0 0 sl@0: 0 0 1 n y sl@0: 0 1 0 sl@0: 0 1 1 y y sl@0: 1 0 0 sl@0: 1 0 1 n n sl@0: 1 1 0 y n sl@0: 1 1 1 sl@0: */ sl@0: sl@0: FORCE_INLINE TPte Mmu::MakePteInaccessible(TPte aPte, TBool aReadOnly) sl@0: { sl@0: __NK_ASSERT_DEBUG((aPte&KArmV6PteTypeMask)!=KArmV6PteLargePage); sl@0: if(aPte&KPtePresentMask) sl@0: { sl@0: if(!aReadOnly) sl@0: { sl@0: // copy XN to AP0... sl@0: if(aPte&KArmV6PteSmallXN) sl@0: aPte |= KArmV6PteAP0; sl@0: else sl@0: aPte &= ~KArmV6PteAP0; sl@0: sl@0: // make inaccessible... sl@0: aPte &= ~KPtePresentMask; sl@0: } sl@0: else sl@0: { sl@0: // make read only... sl@0: aPte |= KArmV6PteAP2; // make read only sl@0: if(aPte&KArmV6PteAP1) sl@0: aPte &= ~KArmV6PteAP0; // correct AP0 sl@0: } sl@0: } sl@0: return aPte; sl@0: } sl@0: sl@0: sl@0: FORCE_INLINE TPte Mmu::MakePteAccessible(TPte aPte, TBool aWrite) sl@0: { sl@0: __NK_ASSERT_DEBUG((aPte&KArmV6PteTypeMask)!=KArmV6PteLargePage); sl@0: if((aPte&KPtePresentMask)==0) sl@0: { sl@0: // wasn't accessible, make it so... sl@0: if(aPte&KArmV6PteAP0) sl@0: aPte |= KArmV6PteSmallXN; // restore XN by copying from AP0 sl@0: aPte |= KArmV6PteAP0; sl@0: aPte |= KArmV6PteSmallPage; sl@0: sl@0: // make read only... sl@0: aPte |= KArmV6PteAP2; // make read only sl@0: if(aPte&KArmV6PteAP1) sl@0: aPte &= ~KArmV6PteAP0; // correct AP0 sl@0: } sl@0: if(aWrite) sl@0: { sl@0: // make writable... sl@0: aPte &= ~KArmV6PteAP2; sl@0: aPte |= KArmV6PteAP0; sl@0: } sl@0: return aPte; sl@0: } sl@0: sl@0: #endif // __CPU_MEMORY_TYPE_REMAPPING sl@0: sl@0: sl@0: /** sl@0: Return true if a Page Table Entry (PTE) only allows read-only access to memory. sl@0: */ sl@0: FORCE_INLINE TBool Mmu::IsPteReadOnly(TPte aPte) sl@0: { sl@0: __NK_ASSERT_DEBUG(aPte&KPtePresentMask); // read-only state is ambiguous if pte not present sl@0: return aPte&KArmV6PteAP2; sl@0: } sl@0: sl@0: sl@0: /** sl@0: Return true if a Page Table Entry (PTE) doesn't allow any access to the memory. sl@0: */ sl@0: FORCE_INLINE TBool Mmu::IsPteInaccessible(TPte aPte) sl@0: { sl@0: return !(aPte&KPtePresentMask); sl@0: } sl@0: sl@0: /** sl@0: Return true if the Page Table Entry \a aNewPte allows greater access to sl@0: memory that \a aOldPte. Only the permissions read/write, read-only and no-access sl@0: are considered, not any execute or privileged access. sl@0: */ sl@0: FORCE_INLINE TBool Mmu::IsPteMoreAccessible(TPte aNewPte, TPte aOldPte) sl@0: { sl@0: if(aNewPte&aOldPte&KPtePresentMask) // if ptes both present sl@0: return (aOldPte&~aNewPte)&KArmV6PteAP2; // check for more writable sl@0: else // else sl@0: return aNewPte&KPtePresentMask; // check for new pte being present sl@0: } sl@0: sl@0: sl@0: /** sl@0: Bit flag values representing the memory mapping differences governed by sl@0: MMU Page Directory Entries (PDEs). Memory which differs in #TPdeType can sl@0: not be mapped using the same Page Table, as they would share the same PDE sl@0: entry. sl@0: */ sl@0: enum TPdeType sl@0: { sl@0: /** sl@0: Legacy (and little-used/unused?) ARM attribute. sl@0: This could potentially be removed (see DMemoryMapping::PdeType()). sl@0: */ sl@0: EPdeTypeECC = 1<<0, sl@0: sl@0: /** sl@0: Total number of combinations of #TPdeType values. sl@0: */ sl@0: ENumPdeTypes = 2 sl@0: }; sl@0: sl@0: sl@0: /** sl@0: Bit flag values representing the memory mapping differences governed by sl@0: MMU Page Table Entries (PTEs). sl@0: */ sl@0: enum TPteType sl@0: { sl@0: /** sl@0: PTE grants user mode access to memory. sl@0: */ sl@0: EPteTypeUserAccess = EUser, sl@0: sl@0: /** sl@0: PTE grants write access to memory. sl@0: */ sl@0: EPteTypeWritable = EReadWrite, sl@0: sl@0: /** sl@0: PTE grants execute access to memory. sl@0: */ sl@0: EPteTypeExecutable = EExecute, sl@0: sl@0: /** sl@0: PTE is 'global'. I.e. the memory it maps is intended to be accessible sl@0: in all process contexts, i.e. for mappings at virtual address >= KGlobalMemoryBase. sl@0: The MMU uses this to tag TLB entries as valid for all ASIDs. sl@0: */ sl@0: EPteTypeGlobal = 1<<3, sl@0: sl@0: /** sl@0: Total number of combinations of #TPteType values. sl@0: */ sl@0: ENumPteTypes = 16 sl@0: }; sl@0: sl@0: __ASSERT_COMPILE(EPteTypeUserAccess==(1<<0)); sl@0: __ASSERT_COMPILE(EPteTypeWritable==(1<<1)); sl@0: __ASSERT_COMPILE(EPteTypeExecutable==(1<<2)); sl@0: sl@0: sl@0: #define MMU_SUPPORTS_EXECUTE_NEVER sl@0: sl@0: sl@0: /** sl@0: Return the #TPdeType for memory with the given attributes value. sl@0: */ sl@0: FORCE_INLINE TUint Mmu::PdeType(TMemoryAttributes aAttributes) sl@0: { sl@0: return aAttributes&EMemoryAttributeUseECC ? EPdeTypeECC : 0; sl@0: } sl@0: sl@0: sl@0: /** sl@0: Return the #TPteType to use for memory mappings requiring the given access permissions sl@0: and Global attribute. The global flag is true if #EPteTypeGlobal is to be set. sl@0: */ sl@0: FORCE_INLINE TUint Mmu::PteType(TMappingPermissions aPermissions, TBool aGlobal) sl@0: { sl@0: __NK_ASSERT_DEBUG(aPermissions&EUser || aGlobal); // can't have supervisor local memory sl@0: sl@0: TUint pteType = (aPermissions&(EUser|EReadWrite|EExecute)); sl@0: if(aGlobal) sl@0: pteType |= EPteTypeGlobal; sl@0: sl@0: __NK_ASSERT_DEBUG(pteType=300000 sl@0: asm("cpsid if"); sl@0: #else sl@0: TInt reg; sl@0: asm("mrs reg, cpsr"); sl@0: asm("orr reg, reg, #0xc0"); sl@0: asm("msr cpsr_c, reg"); sl@0: #endif sl@0: */ sl@0: #else sl@0: NKern::DisableAllInterrupts(); sl@0: #endif sl@0: } sl@0: sl@0: FORCE_INLINE void inline_EnableAllInterrupts() sl@0: { sl@0: #ifdef __GNUC__ sl@0: #ifdef __CPU_ARM_HAS_CPS sl@0: CPSIEIF; sl@0: #else sl@0: TInt reg; sl@0: asm __VOLATILE__ ("mrs %0, cpsr" : "=r"(reg)); sl@0: asm __VOLATILE__ ("bic %0, %0, #0xc0" : : "r"(reg)); sl@0: asm __VOLATILE__ ("msr cpsr_c, %0" : : "r"(reg)); sl@0: #endif sl@0: /* sl@0: #elif defined(__ARMCC__) sl@0: #if defined(__CPU_ARM_HAS_CPS) && __ARMCC_VERSION>=300000 sl@0: asm("cpsie if"); sl@0: #else sl@0: TInt reg; sl@0: asm("mrs reg, cpsr"); sl@0: asm("bic reg, reg, #0xc0"); sl@0: asm("msr cpsr_c, reg"); sl@0: #endif sl@0: */ sl@0: #else sl@0: NKern::EnableAllInterrupts(); sl@0: #endif sl@0: } sl@0: sl@0: sl@0: #ifndef __SMP__ sl@0: #undef __SPIN_LOCK_IRQ sl@0: #define __SPIN_LOCK_IRQ(lock) (inline_DisableAllInterrupts()) sl@0: #undef __SPIN_UNLOCK_IRQ sl@0: #define __SPIN_UNLOCK_IRQ(lock) (inline_EnableAllInterrupts()) sl@0: #undef __SPIN_FLASH_IRQ sl@0: #define __SPIN_FLASH_IRQ(lock) (inline_EnableAllInterrupts(),inline_DisableAllInterrupts(),((TBool)TRUE)) sl@0: #endif sl@0: sl@0: sl@0: /** sl@0: Indicate whether a PDE entry maps a page table. sl@0: sl@0: @param aPde The PDE entry in question. sl@0: */ sl@0: FORCE_INLINE TBool Mmu::PdeMapsPageTable(TPde aPde) sl@0: { sl@0: return (aPde & KPdeTypeMask) == KArmV6PdePageTable; sl@0: } sl@0: sl@0: sl@0: /** sl@0: Indicate whether a PDE entry maps a section. sl@0: sl@0: @param aPde The PDE entry in question. sl@0: */ sl@0: FORCE_INLINE TBool Mmu::PdeMapsSection(TPde aPde) sl@0: { sl@0: return (aPde & KPdeTypeMask) == KArmV6PdeSection; sl@0: }