Update contrib.
1 // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
16 #include "cache_maintenance.inl"
24 #if defined(GNUC) && !defined(__MARM_ARM4__)
25 #define __VOLATILE__ volatile
30 #if defined(__SMP__) && defined(__CPU_ARM11MP__)
31 #define COARSE_GRAINED_TLB_MAINTENANCE
32 #define BROADCAST_TLB_MAINTENANCE
37 FORCE_INLINE void __arm_dmb()
39 #if defined(__CPU_ARMV6)
43 asm __VOLATILE__ ("mcr p15, 0, %0, c7, c10, 5 " : : "r"(zero));
44 #elif defined(__ARMCC__)
46 asm("mcr p15, 0, zero, c7, c10, 5 ");
47 #elif defined(__GCCXML__)
50 #error Unknown compiler
52 #elif defined(__CPU_ARMV7)
53 // deprecated CP15 version of DMB...
56 asm __VOLATILE__ ("mcr p15, 0, %0, c7, c10, 5 " : : "r"(zero));
57 #elif defined(__ARMCC__)
59 asm("mcr p15, 0, zero, c7, c10, 5 ");
60 #elif defined(__GCCXML__)
63 #error Unknown compiler
66 // non inline version...
67 __e32_memory_barrier();
72 FORCE_INLINE void __arm_dsb()
74 #if defined(__CPU_ARMV6)
75 // drain write buffer...
78 asm __VOLATILE__ ("mcr p15, 0, %0, c7, c10, 4 " : : "r"(zero));
79 #elif defined(__ARMCC__)
81 asm("mcr p15, 0, zero, c7, c10, 4 ");
82 #elif defined(__GCCXML__)
85 #error Unknown compiler
87 #elif defined(__CPU_ARMV7)
88 // deprecated CP15 version of DSB...
91 asm __VOLATILE__ ("mcr p15, 0, %0, c7, c10, 4 " : : "r"(zero));
92 #elif defined(__ARMCC__)
94 asm("mcr p15, 0, zero, c7, c10, 4 ");
95 #elif defined(__GCCXML__)
98 #error Unknown compiler
101 // non inline version...
102 __e32_io_completion_barrier();
107 extern "C" void __e32_instruction_barrier();
109 FORCE_INLINE void __arm_isb()
111 #if defined(__CPU_ARMV6)
115 asm __VOLATILE__ ("mcr p15, 0, %0, c7, c5, 4 " : : "r"(zero));
116 #elif defined(__ARMCC__)
118 asm("mcr p15, 0, zero, c7, c5, 4 ");
119 #elif defined(__GCCXML__)
122 #error Unknown compiler
124 #elif defined(__CPU_ARMV7)
125 // deprecated CP15 version of ISB...
128 asm __VOLATILE__ ("mcr p15, 0, %0, c7, c5, 4 " : : "r"(zero));
129 #elif defined(__ARMCC__)
131 asm("mcr p15, 0, zero, c7, c5, 4 ");
132 #elif defined(__GCCXML__)
135 #error Unknown compiler
138 // non inline version...
139 __e32_instruction_barrier();
145 Branch predictor invalidate all
147 FORCE_INLINE void __arm_bpiall()
151 asm __VOLATILE__ ("mcr p15, 0, %0, c7, c5, 6 " : : "r"(zero));
152 #elif defined(__ARMCC__)
154 asm("mcr p15, 0, zero, c7, c5, 6 ");
155 #elif defined(__GCCXML__)
158 #error Unknown compiler
166 Branch predictor invalidate all inner-shareable
168 FORCE_INLINE void __arm_bpiallis()
170 // branch predictor invalidate all inner-shareable
173 asm __VOLATILE__ ("mcr p15, 0, %0, c7, c1, 6 " : : "r"(zero));
174 #elif defined(__ARMCC__)
176 asm("mcr p15, 0, zero, c7, c1, 6 ");
177 #elif defined(__GCCXML__)
180 #error Unknown compiler
188 This will make sure that the change in page directory is visible by H/W Page-Table Walk.
189 Call this function when a single entry in page directory is changed.
191 FORCE_INLINE void SinglePdeUpdated(TPde* aPde)
193 CacheMaintenance::SinglePdeUpdated((TLinAddr)aPde);
197 #ifdef BROADCAST_TLB_MAINTENANCE
200 Signal other CPU cores to perform TLB maintenance.
202 @param aLinAddrAndAsid If == 0, then InvalidateTLB;
203 if < KMmuAsidCount, then InvalidateTLBForAsid;
204 else InvalidateTLBForPage.
206 extern void BroadcastInvalidateTLB(TLinAddr aLinAddrAndAsid=0);
212 Invalidate a single I+D TLB entry on this CPU core only.
213 @param aLinAddrAndAsid Virtual address of a page of memory ORed with the ASID value.
215 FORCE_INLINE void LocalInvalidateTLBForPage(TLinAddr aLinAddrAndAsid)
218 #if defined(__CPU_ARM11MP__) // why?...
219 asm __VOLATILE__ ("mcr p15, 0, %0, c8, c7, 3 " : : "r"(aLinAddrAndAsid));
221 asm __VOLATILE__ ("mcr p15, 0, %0, c8, c7, 1 " : : "r"(aLinAddrAndAsid));
223 #elif defined(__ARMCC__)
224 #if defined(__CPU_ARM11MP__) // why?...
225 asm("mcr p15, 0, aLinAddrAndAsid, c8, c7, 3 ");
227 asm("mcr p15, 0, aLinAddrAndAsid, c8, c7, 1 ");
229 #elif defined(__GCCXML__)
232 #error Unknown compiler
241 Invalidate a single I+D TLB entry on all CPU cores.
242 @param aLinAddrAndAsid Virtual address of a page of memory ORed with the ASID value.
244 FORCE_INLINE void InvalidateTLBForPage(TLinAddr aLinAddrAndAsid)
246 #ifdef BROADCAST_TLB_MAINTENANCE
247 BroadcastInvalidateTLB(aLinAddrAndAsid);
248 #elif !defined(__SMP__)
249 LocalInvalidateTLBForPage(aLinAddrAndAsid);
251 // inner-shareable invalidate...
253 asm __VOLATILE__ ("mcr p15, 0, %0, c8, c3, 1 " : : "r"(aLinAddrAndAsid));
254 #elif defined(__ARMCC__)
255 asm("mcr p15, 0, aLinAddrAndAsid, c8, c3, 1 ");
256 #elif defined(__GCCXML__)
259 #error Unknown compiler
269 Invalidate entire TLB on this CPU only
271 FORCE_INLINE void LocalInvalidateTLB()
274 asm __VOLATILE__ ("mcr p15, 0, r0, c8, c7, 0 ");
275 #elif defined(__ARMCC__)
276 TInt dummy = 0; // damned RVCT
277 asm("mcr p15, 0, dummy, c8, c7, 0 ");
278 #elif defined(__GCCXML__)
281 #error Unknown compiler
290 Invalidate entire TLB on all CPU cores.
292 FORCE_INLINE void InvalidateTLB()
294 #ifdef BROADCAST_TLB_MAINTENANCE
295 BroadcastInvalidateTLB(0);
296 #elif !defined(__SMP__)
297 LocalInvalidateTLB();
299 // inner-shareable invalidate...
302 asm __VOLATILE__ ("mcr p15, 0, %0, c8, c3, 0 " : : "r"(zero));
303 #elif defined(__ARMCC__)
305 asm("mcr p15, 0, zero, c8, c3, 0 ");
306 #elif defined(__GCCXML__)
309 #error Unknown compiler
318 #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_424067_FIXED)
319 #define INVALIDATE_TLB_BY_ASID_BROKEN
321 #if defined(__CPU_ARM1176__) && !defined(__CPU_ARM1176_ERRATUM_424692_FIXED)
322 #define INVALIDATE_TLB_BY_ASID_BROKEN
326 __ASSERT_COMPILE(KKernelOsAsid==0); // InvalidateTLBForAsid assumes this
330 Invalidate all TLB entries which match the given ASID value (current CPU only)
332 FORCE_INLINE void LocalInvalidateTLBForAsid(TUint aAsid)
334 #ifndef INVALIDATE_TLB_BY_ASID_BROKEN
337 #if defined(__CPU_CORTEX_A9__) && !defined(__CPU_ARM_A9_ERRATUM_571618_FIXED)
338 __arm_dmb(); // ARM Cortex-A9 MPCore erratum 571618 workaround
339 // Execute memory barrier before interruptible CP15 operations
341 // invalidate all I+D TLB entries for ASID...
343 asm __VOLATILE__ ("mcr p15, 0, %0, c8, c7, 2 " : : "r"(aAsid));
344 #elif defined(__ARMCC__)
345 asm("mcr p15, 0, aAsid, c8, c7, 2 ");
346 #elif defined(__GCCXML__)
349 #error Unknown compiler
353 // ASID==0 means 'kernel' memory. We have to invalidate the entire TLB here
354 // as this is the only way of getting rid of global entries...
357 // invalidate entire TLB...
359 asm __VOLATILE__ ("mcr p15, 0, r0, c8, c7, 0 ");
360 #elif defined(__ARMCC__)
361 TInt dummy = 0; // damned RVCT
362 asm("mcr p15, 0, dummy, c8, c7, 0 ");
363 #elif defined(__GCCXML__)
366 #error Unknown compiler
376 Invalidate all TLB entries which match the given ASID value on all CPU cores.
378 FORCE_INLINE void InvalidateTLBForAsid(TUint aAsid)
381 #ifdef BROADCAST_TLB_MAINTENANCE
382 BroadcastInvalidateTLB(aAsid);
383 #elif !defined(__SMP__)
384 LocalInvalidateTLBForAsid(aAsid);
388 #if defined(__CPU_CORTEX_A9__) && !defined(__CPU_ARM_A9_ERRATUM_571618_FIXED)
389 __arm_dmb(); // ARM Cortex-A9 MPCore erratum 571618 workaround
390 // Execute memory barrier before interruptible CP15 operations
392 // invalidate all I+D TLB entries for ASID...
394 asm __VOLATILE__ ("mcr p15, 0, %0, c8, c3, 2 " : : "r"(aAsid));
395 #elif defined(__ARMCC__)
396 asm("mcr p15, 0, aAsid, c8, c3, 2 ");
397 #elif defined(__GCCXML__)
400 #error Unknown compiler
405 // ASID==0 means 'kernel' memory. We have to invalidate the entire TLB here
406 // as this is the only way of getting rid of global entries...
408 asm __VOLATILE__ ("mcr p15, 0, %0, c8, c3, 0 " : : "r"(aAsid));
409 #elif defined(__ARMCC__)
410 asm("mcr p15, 0, aAsid, c8, c3, 0 ");
411 #elif defined(__GCCXML__)
414 #error Unknown compiler
425 Return the virtual address of the page directory used for address space
426 \a aOsAsid. Note, the global page directory is mapped after each
427 address space specific page director in a way which means that it
428 appears to be a single contiguous page directory which maps the
429 entire 32bit virtual address range. I.e. the returned page directory
430 address can be simply indexed by any virtual address without regard
431 to whether it belongs to the given address space or lies in the
434 FORCE_INLINE TPde* Mmu::PageDirectory(TInt aOsAsid)
436 return (TPde*)(KPageDirectoryBase+(aOsAsid<<KPageDirectoryShift));
441 Return the virtual address of the Page Directory Entry (PDE) used to map
442 the region containing the virtual address \a aAddress in the address space
445 FORCE_INLINE TPde* Mmu::PageDirectoryEntry(TInt aOsAsid, TLinAddr aAddress)
447 return PageDirectory(aOsAsid) + (aAddress>>KChunkShift);
452 Return the physical address mapped by the section mapping contained
453 in the given Page Directory Entry \a aPde. If \a aPde is not a
454 section mapping, then KPhysAddrInvalid is returned.
456 FORCE_INLINE TPhysAddr Mmu::PdePhysAddr(TPde aPde)
458 if((aPde&KPdePresentMask)==KArmV6PdeSection)
459 return aPde&KPdeSectionAddrMask;
460 return KPhysAddrInvalid;
464 #ifdef __CPU_MEMORY_TYPE_REMAPPING
467 Bits in a PTE which represent access permissions...
477 Modify a Page Table Entry (PTE) value so it restricts access to the memory it maps.
478 The returned PTE value is the same as \a aPte but with its access permissions set
479 to read-only if \a aReadOnly is true, and set to allow no access if \a aReadOnly is false.
481 FORCE_INLINE TPte Mmu::MakePteInaccessible(TPte aPte, TBool aReadOnly)
483 __NK_ASSERT_DEBUG((aPte&KArmV6PteTypeMask)!=KArmV6PteLargePage);
484 if(aPte&KPtePresentMask)
486 __NK_ASSERT_DEBUG((bool)(aPte&KArmV6PteSmallTEX1)==(bool)(aPte&KArmV6PteSmallXN)); // TEX1 should be a copy of XN
488 aPte |= KArmV6PteAP2; // make read only
490 aPte &= ~KPtePresentMask; // make inaccessible
497 Modify a Page Table Entry (PTE) value so it allows greater access to the memory it maps.
498 The returned PTE value is the same as \a aPte but with its access permissions set
499 to read/write if \a aWrite is true, and set to read-only if \a aWrite is false.
501 FORCE_INLINE TPte Mmu::MakePteAccessible(TPte aPte, TBool aWrite)
503 __NK_ASSERT_DEBUG((aPte&KArmV6PteTypeMask)!=KArmV6PteLargePage);
504 if((aPte&KPtePresentMask)==0)
506 // wasn't accessible, make it so...
507 if(aPte&KArmV6PteSmallTEX1)
508 aPte |= KArmV6PteSmallXN; // restore XN by copying from TEX1
509 aPte |= KArmV6PteSmallPage;
510 aPte |= KArmV6PteAP2; // make read only
513 aPte &= ~KArmV6PteAP2; // make writable
518 #else // not __CPU_MEMORY_TYPE_REMAPPING
521 Bits in a PTE which represent access permissions...
534 FORCE_INLINE TPte Mmu::MakePteInaccessible(TPte aPte, TBool aReadOnly)
536 __NK_ASSERT_DEBUG((aPte&KArmV6PteTypeMask)!=KArmV6PteLargePage);
537 if(aPte&KPtePresentMask)
542 if(aPte&KArmV6PteSmallXN)
543 aPte |= KArmV6PteAP0;
545 aPte &= ~KArmV6PteAP0;
547 // make inaccessible...
548 aPte &= ~KPtePresentMask;
553 aPte |= KArmV6PteAP2; // make read only
554 if(aPte&KArmV6PteAP1)
555 aPte &= ~KArmV6PteAP0; // correct AP0
562 FORCE_INLINE TPte Mmu::MakePteAccessible(TPte aPte, TBool aWrite)
564 __NK_ASSERT_DEBUG((aPte&KArmV6PteTypeMask)!=KArmV6PteLargePage);
565 if((aPte&KPtePresentMask)==0)
567 // wasn't accessible, make it so...
568 if(aPte&KArmV6PteAP0)
569 aPte |= KArmV6PteSmallXN; // restore XN by copying from AP0
570 aPte |= KArmV6PteAP0;
571 aPte |= KArmV6PteSmallPage;
574 aPte |= KArmV6PteAP2; // make read only
575 if(aPte&KArmV6PteAP1)
576 aPte &= ~KArmV6PteAP0; // correct AP0
581 aPte &= ~KArmV6PteAP2;
582 aPte |= KArmV6PteAP0;
587 #endif // __CPU_MEMORY_TYPE_REMAPPING
591 Return true if a Page Table Entry (PTE) only allows read-only access to memory.
593 FORCE_INLINE TBool Mmu::IsPteReadOnly(TPte aPte)
595 __NK_ASSERT_DEBUG(aPte&KPtePresentMask); // read-only state is ambiguous if pte not present
596 return aPte&KArmV6PteAP2;
601 Return true if a Page Table Entry (PTE) doesn't allow any access to the memory.
603 FORCE_INLINE TBool Mmu::IsPteInaccessible(TPte aPte)
605 return !(aPte&KPtePresentMask);
609 Return true if the Page Table Entry \a aNewPte allows greater access to
610 memory that \a aOldPte. Only the permissions read/write, read-only and no-access
611 are considered, not any execute or privileged access.
613 FORCE_INLINE TBool Mmu::IsPteMoreAccessible(TPte aNewPte, TPte aOldPte)
615 if(aNewPte&aOldPte&KPtePresentMask) // if ptes both present
616 return (aOldPte&~aNewPte)&KArmV6PteAP2; // check for more writable
618 return aNewPte&KPtePresentMask; // check for new pte being present
623 Bit flag values representing the memory mapping differences governed by
624 MMU Page Directory Entries (PDEs). Memory which differs in #TPdeType can
625 not be mapped using the same Page Table, as they would share the same PDE
631 Legacy (and little-used/unused?) ARM attribute.
632 This could potentially be removed (see DMemoryMapping::PdeType()).
637 Total number of combinations of #TPdeType values.
644 Bit flag values representing the memory mapping differences governed by
645 MMU Page Table Entries (PTEs).
650 PTE grants user mode access to memory.
652 EPteTypeUserAccess = EUser,
655 PTE grants write access to memory.
657 EPteTypeWritable = EReadWrite,
660 PTE grants execute access to memory.
662 EPteTypeExecutable = EExecute,
665 PTE is 'global'. I.e. the memory it maps is intended to be accessible
666 in all process contexts, i.e. for mappings at virtual address >= KGlobalMemoryBase.
667 The MMU uses this to tag TLB entries as valid for all ASIDs.
669 EPteTypeGlobal = 1<<3,
672 Total number of combinations of #TPteType values.
677 __ASSERT_COMPILE(EPteTypeUserAccess==(1<<0));
678 __ASSERT_COMPILE(EPteTypeWritable==(1<<1));
679 __ASSERT_COMPILE(EPteTypeExecutable==(1<<2));
682 #define MMU_SUPPORTS_EXECUTE_NEVER
686 Return the #TPdeType for memory with the given attributes value.
688 FORCE_INLINE TUint Mmu::PdeType(TMemoryAttributes aAttributes)
690 return aAttributes&EMemoryAttributeUseECC ? EPdeTypeECC : 0;
695 Return the #TPteType to use for memory mappings requiring the given access permissions
696 and Global attribute. The global flag is true if #EPteTypeGlobal is to be set.
698 FORCE_INLINE TUint Mmu::PteType(TMappingPermissions aPermissions, TBool aGlobal)
700 __NK_ASSERT_DEBUG(aPermissions&EUser || aGlobal); // can't have supervisor local memory
702 TUint pteType = (aPermissions&(EUser|EReadWrite|EExecute));
704 pteType |= EPteTypeGlobal;
706 __NK_ASSERT_DEBUG(pteType<ENumPteTypes);
713 Test if a memory access is allowed by a given mapping type.
715 @param aPteType #TPteType used for a mapping. E.g. TMemoryMappingBase::PteType()
716 @param aAccessPermissions Flags from #TMappingPermissions indicating the memory access
719 @return True if a memory access requested with permissions \a aAccessPermissions
720 is allowed on a mapping of the specified #TPteType.
721 False if the access is not allowed.
723 FORCE_INLINE TBool Mmu::CheckPteTypePermissions(TUint aPteType, TUint aAccessPermissions)
725 aAccessPermissions &= EUser|EReadWrite|EExecute;
726 return (aPteType&aAccessPermissions)==aAccessPermissions;
731 Extract the #TMappingPermissions corresponding to a given #TPteType.
733 FORCE_INLINE TMappingPermissions Mmu::PermissionsFromPteType(TUint aPteType)
735 return (TMappingPermissions)(aPteType&(EPteTypeUserAccess|EPteTypeWritable|EPteTypeExecutable));
738 extern void UserWriteFault(TLinAddr aAddr);
739 extern void UserReadFault(TLinAddr aAddr);
743 // TODO: Move these to NKern
746 FORCE_INLINE void inline_DisableAllInterrupts()
749 #ifdef __CPU_ARM_HAS_CPS
753 asm __VOLATILE__ ("mrs %0, cpsr" : "=r"(reg));
754 asm __VOLATILE__ ("orr %0, %0, #0xc0" : : "r"(reg));
755 asm __VOLATILE__ ("msr cpsr_c, %0" : : "r"(reg));
758 #elif defined(__ARMCC__)
759 #if defined(__CPU_ARM_HAS_CPS) && __ARMCC_VERSION>=300000
763 asm("mrs reg, cpsr");
764 asm("orr reg, reg, #0xc0");
765 asm("msr cpsr_c, reg");
769 NKern::DisableAllInterrupts();
773 FORCE_INLINE void inline_EnableAllInterrupts()
776 #ifdef __CPU_ARM_HAS_CPS
780 asm __VOLATILE__ ("mrs %0, cpsr" : "=r"(reg));
781 asm __VOLATILE__ ("bic %0, %0, #0xc0" : : "r"(reg));
782 asm __VOLATILE__ ("msr cpsr_c, %0" : : "r"(reg));
785 #elif defined(__ARMCC__)
786 #if defined(__CPU_ARM_HAS_CPS) && __ARMCC_VERSION>=300000
790 asm("mrs reg, cpsr");
791 asm("bic reg, reg, #0xc0");
792 asm("msr cpsr_c, reg");
796 NKern::EnableAllInterrupts();
802 #undef __SPIN_LOCK_IRQ
803 #define __SPIN_LOCK_IRQ(lock) (inline_DisableAllInterrupts())
804 #undef __SPIN_UNLOCK_IRQ
805 #define __SPIN_UNLOCK_IRQ(lock) (inline_EnableAllInterrupts())
806 #undef __SPIN_FLASH_IRQ
807 #define __SPIN_FLASH_IRQ(lock) (inline_EnableAllInterrupts(),inline_DisableAllInterrupts(),((TBool)TRUE))
812 Indicate whether a PDE entry maps a page table.
814 @param aPde The PDE entry in question.
816 FORCE_INLINE TBool Mmu::PdeMapsPageTable(TPde aPde)
818 return (aPde & KPdeTypeMask) == KArmV6PdePageTable;
823 Indicate whether a PDE entry maps a section.
825 @param aPde The PDE entry in question.
827 FORCE_INLINE TBool Mmu::PdeMapsSection(TPde aPde)
829 return (aPde & KPdeTypeMask) == KArmV6PdeSection;