First public contribution.
1 // Copyright (c) 1997-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
21 #include "cache_maintenance.inl"
25 #ifdef BROADCAST_TLB_MAINTENANCE
26 class TTLBIPI : public TGenericIPI
30 static void InvalidateIsr(TGenericIPI*);
31 static void WaitAndInvalidateIsr(TGenericIPI*);
32 void AddArg(TLinAddr aArg);
43 void TTLBIPI::InvalidateIsr(TGenericIPI* aPtr)
46 TTLBIPI& a = *(TTLBIPI*)aPtr;
47 TLinAddr arg = a.iArg;
51 LocalInvalidateTLBForAsid(arg);
53 LocalInvalidateTLBForPage(arg);
56 void TTLBIPI::WaitAndInvalidateIsr(TGenericIPI* aPtr)
59 TTLBIPI& a = *(TTLBIPI*)aPtr;
65 void TTLBIPI::AddArg(TLinAddr aArg)
70 QueueAllOther(&InvalidateIsr);
75 void BroadcastInvalidateTLB(TLinAddr aLinAddrAndAsid)
78 ipi.AddArg(aLinAddrAndAsid);
80 #endif // BROADCAST_TLB_MAINTENANCE
83 // Functions for class Mmu
87 Return the physical address of the memory mapped by a Page Table Entry (PTE).
89 @param aPte The value contained in the PTE.
90 @param aPteIndex The index of the PTE within its page table.
92 TPhysAddr Mmu::PtePhysAddr(TPte aPte, TUint aPteIndex)
94 if(aPte&KArmV6PteSmallPage)
95 return aPte & KPteSmallPageAddrMask;
96 if(aPte&KArmV6PteLargePage)
97 return (aPte & KPteLargePageAddrMask) + (TPhysAddr(aPteIndex << KPageShift) & KLargePageMask);
98 return KPhysAddrInvalid;
103 Return the virtual address of the page table referenced by the given
104 Page Directory Entry (PDE) \a aPde. If the PDE doesn't refer to a
105 page table then the null-pointer is returned.
107 If the page table was not one allocated by the kernel then the
108 results are unpredictable and may cause a system fault.
112 TPte* Mmu::PageTableFromPde(TPde aPde)
114 if((aPde&KPdePresentMask)==KArmV6PdePageTable)
116 SPageInfo* pi = SPageInfo::FromPhysAddr(aPde);
117 return (TPte*)(KPageTableBase+(pi->Index()<<KPageShift)+(aPde&(KPageMask&~KPageTableMask)));
124 Perform the action of #PageTableFromPde but without the possibility of
125 a system fault caused the page table not being one allocated by the kernel.
129 TPte* Mmu::SafePageTableFromPde(TPde aPde)
131 if((aPde&KPdeTypeMask)==KArmV6PdePageTable)
133 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(aPde&~KPageMask);
135 return (TPte*)(KPageTableBase+(pi->Index()<<KPageShift)+(aPde&(KPageMask&~KPageTableMask)));
142 Return the base phsical address of the section table referenced by the given
143 Page Directory Entry (PDE) \a aPde. If the PDE doesn't refer to a
144 section then KPhysAddrInvalid is returned.
148 TPhysAddr Mmu::SectionBaseFromPde(TPde aPde)
150 if(PdeMapsSection(aPde))
151 return aPde&KPdeSectionAddrMask;
152 return KPhysAddrInvalid;
157 Return a pointer to the Page Table Entry (PTE) which maps the
158 virtual address \a aAddress in the address space \a aOsAsid.
160 If no page table exists or it was not one allocated by the kernel
161 then the results are unpredictable and may cause a system fault.
165 TPte* Mmu::PtePtrFromLinAddr(TLinAddr aAddress, TInt aOsAsid)
167 TPde pde = PageDirectory(aOsAsid)[aAddress>>KChunkShift];
168 SPageInfo* pi = SPageInfo::FromPhysAddr(pde);
169 TPte* pt = (TPte*)(KPageTableBase+(pi->Index()<<KPageShift)+(pde&(KPageMask&~KPageTableMask)));
170 pt += (aAddress>>KPageShift)&(KChunkMask>>KPageShift);
176 Perform the action of #PtePtrFromLinAddr but without the possibility
177 of a system fault. If the page table is not present or not one
178 allocated by the kernel then the null-pointer is returned.
182 TPte* Mmu::SafePtePtrFromLinAddr(TLinAddr aAddress, TInt aOsAsid)
184 TPde pde = PageDirectory(aOsAsid)[aAddress>>KChunkShift];
185 TPte* pt = SafePageTableFromPde(pde);
187 pt += (aAddress>>KPageShift)&(KChunkMask>>KPageShift);
193 Return the physical address for the page table whose virtual
196 If the page table was not one allocated by the kernel then the
197 results are unpredictable and may cause a system fault.
201 TPhysAddr Mmu::PageTablePhysAddr(TPte* aPt)
203 __NK_ASSERT_DEBUG(MmuLock::IsHeld() || PageTablesLockIsHeld());
205 TInt pdeIndex = ((TLinAddr)aPt)>>KChunkShift;
206 TPde pde = PageDirectory(KKernelOsAsid)[pdeIndex];
207 __NK_ASSERT_DEBUG((pde&KPdePresentMask)==KArmV6PdePageTable);
209 SPageInfo* pi = SPageInfo::FromPhysAddr(pde);
210 TPte* pPte = (TPte*)(KPageTableBase+(pi->Index(true)<<KPageShift)+(pde&(KPageMask&~KPageTableMask)));
211 TPte pte = pPte[(((TLinAddr)aPt)&KChunkMask)>>KPageShift];
212 __NK_ASSERT_DEBUG(pte & KArmV6PteSmallPage);
214 return (pte&KPteSmallPageAddrMask)|(((TLinAddr)aPt)&(KPageMask&~KPageTableMask));
219 Perform a page table walk to return the physical address of
220 the memory mapped at virtual address \a aLinAddr in the
221 address space \a aOsAsid.
223 If the page table used was not one allocated by the kernel
224 then the results are unpredictable and may cause a system fault.
226 Use of this function should be avoided, use instead Mmu::LinearToPhysical
227 which contains debug assertions for its preconditions.
231 TPhysAddr Mmu::UncheckedLinearToPhysical(TLinAddr aLinAddr, TInt aOsAsid)
233 TRACE2(("Mmu::UncheckedLinearToPhysical(%08x,%d)",aLinAddr,aOsAsid));
234 TInt pdeIndex = aLinAddr>>KChunkShift;
235 TPde pde = PageDirectory(aOsAsid)[pdeIndex];
236 if ((pde&KPdePresentMask)==KArmV6PdePageTable)
238 SPageInfo* pi = SPageInfo::FromPhysAddr(pde);
239 TPte* pPte = (TPte*)(KPageTableBase+(pi->Index(true)<<KPageShift)+(pde&(KPageMask&~KPageTableMask)));
240 TPte pte = pPte[(aLinAddr&KChunkMask)>>KPageShift];
241 if (pte & KArmV6PteSmallPage)
243 TPhysAddr pa=(pte&KPteSmallPageAddrMask)|(aLinAddr&~KPteSmallPageAddrMask);
244 __KTRACE_OPT(KMMU,Kern::Printf("Mapped with small page - returning %08x",pa));
247 else if (pte & KArmV6PteLargePage)
249 TPhysAddr pa=(pte&KPteLargePageAddrMask)|(aLinAddr&~KPteLargePageAddrMask);
250 __KTRACE_OPT(KMMU,Kern::Printf("Mapped with large page - returning %08x",pa));
254 else if ((pde&KPdePresentMask)==KArmV6PdeSection)
256 TPhysAddr pa=(pde&KPdeSectionAddrMask)|(aLinAddr&~KPdeSectionAddrMask);
257 __KTRACE_OPT(KMMU,Kern::Printf("Mapped with section - returning %08x",pa));
260 return KPhysAddrInvalid;
264 extern TUint32 TTCR();
265 extern TUint32 CPUID(TInt /*aRegNum*/);
270 TRACEB(("Mmu::Init1"));
272 // check page local/global page directory split is correct...
273 __NK_ASSERT_ALWAYS(TTCR()==1);
275 // check cache type is supported and consistent with compile time macros...
276 TInt iColourCount = 0;
277 TInt dColourCount = 0;
278 TUint32 ctr = InternalCache::TypeRegister();
279 TRACEB(("CacheTypeRegister = %08x",ctr));
281 __NK_ASSERT_ALWAYS((ctr>>29)==0); // check ARMv6 format
287 __NK_ASSERT_ALWAYS((ctr>>29)==4); // check ARMv7 format
288 TUint l1ip = (ctr>>14)&3; // L1 instruction cache indexing and tagging policy
289 __NK_ASSERT_ALWAYS(l1ip>=2); // check I cache is physically tagged
291 TUint32 clidr = InternalCache::LevelIDRegister();
292 TRACEB(("CacheLevelIDRegister = %08x",clidr));
293 TUint l1type = clidr&7;
296 if(l1type==2 || l1type==3 || l1type==4)
298 // we have an L1 data cache...
299 TUint32 csir = InternalCache::SizeIdRegister(0,0);
300 TUint sets = ((csir>>13)&0x7fff)+1;
301 TUint ways = ((csir>>3)&0x3ff)+1;
302 TUint lineSizeShift = (csir&7)+4;
303 // assume L1 data cache is VIPT and alias checks broken and so we need data cache colouring...
304 dColourCount = (sets<<lineSizeShift)>>KPageShift;
305 if(l1type==4) // unified cache, so set instruction cache colour as well...
306 iColourCount = (sets<<lineSizeShift)>>KPageShift;
307 TRACEB(("L1DCache = 0x%x,0x%x,%d colourCount=%d",sets,ways,lineSizeShift,(sets<<lineSizeShift)>>KPageShift));
310 if(l1type==1 || l1type==3)
312 // we have a separate L1 instruction cache...
313 TUint32 csir = InternalCache::SizeIdRegister(1,0);
314 TUint sets = ((csir>>13)&0x7fff)+1;
315 TUint ways = ((csir>>3)&0x3ff)+1;
316 TUint lineSizeShift = (csir&7)+4;
317 iColourCount = (sets<<lineSizeShift)>>KPageShift;
318 TRACEB(("L1ICache = 0x%x,0x%x,%d colourCount=%d",sets,ways,lineSizeShift,(sets<<lineSizeShift)>>KPageShift));
323 // PIPT cache, so no colouring restrictions...
324 TRACEB(("L1ICache is PIPT"));
330 TRACEB(("L1ICache is VIPT"));
333 TRACEB(("page colouring counts I=%d, D=%d",iColourCount,dColourCount));
334 __NK_ASSERT_ALWAYS(iColourCount<=KPageColourCount);
335 __NK_ASSERT_ALWAYS(dColourCount<=KPageColourCount);
336 #ifndef __CPU_I_CACHE_HAS_COLOUR
337 __NK_ASSERT_ALWAYS(iColourCount==0);
339 #ifndef __CPU_D_CACHE_HAS_COLOUR
340 __NK_ASSERT_ALWAYS(dColourCount==0);
342 #ifndef __CPU_CACHE_HAS_COLOUR
343 __NK_ASSERT_ALWAYS(iColourCount==0);
344 __NK_ASSERT_ALWAYS(dColourCount==0);
347 // check MMU attributes match our assumptions...
348 if(((CPUID(-1)>>16)&0xf)==0xf) // if have new CPUID format....
350 TUint mmfr1 = CPUID(5);
351 TRACEB(("mmfr1 = %08x",mmfr1));
352 #ifdef __CPU_NEEDS_BTAC_FLUSH_AFTER_ASID_CHANGE
353 __NK_ASSERT_ALWAYS(((mmfr1>>28)&0xf)==1); // Branch Predictor needs invalidating after ASID change
355 __NK_ASSERT_ALWAYS(((mmfr1>>28)&0xf)>=2); // Branch Predictor doesn't needs invalidating after ASID change
358 TUint mmfr2 = CPUID(6);
359 TRACEB(("mmfr2 = %08x",mmfr2));
360 __NK_ASSERT_ALWAYS(((mmfr2>>20)&0xf)>=2); // check Mem Barrier instructions are supported in CP15
362 TUint mmfr3 = CPUID(7);
363 TRACEB(("mmfr3 = %08x",mmfr3));
366 #if defined(__SMP__) && !defined(__CPU_ARM11MP__)
367 __NK_ASSERT_ALWAYS(((mmfr3>>12)&0xf)>=2); // check Maintenance Broadcast is for all cache and TLB operations
369 #ifdef __CPU_SUPPORTS_PAGE_TABLE_WALK_TO_L1_CACHE
370 __NK_ASSERT_ALWAYS(((mmfr3>>20)&0xf)>=1); // check Coherent Walk for page tables
374 Arm::DefaultDomainAccess = KDefaultDomainAccess;
378 for (i=0; i<KMaxCpus; ++i)
380 TSubScheduler& ss = TheSubSchedulers[i];
381 TLinAddr a = KIPCAlias + (i<<KChunkShift);
382 ss.i_AliasLinAddr = (TAny*)a;
383 ss.i_AliasPdePtr = (TAny*)(KPageDirectoryBase + (a>>KChunkShift)*sizeof(TPde));
392 TRACEB(("Mmu::Init2"));
397 DMemoryObject* ExceptionStacks;
399 void Mmu::Init2Final()
401 TRACEB(("Mmu::Init2Final"));
405 // initialise memory object for exception stacks...
406 TMappingCreateFlags mapFlags = (TMappingCreateFlags)(EMappingCreateFixedVirtual|EMappingCreateReserveAllResources);
407 TMemoryAttributes memAttr = EMemoryAttributeStandard;
408 TUint size = 4*2*KPageSize; // 4 exception stacks each of one guard page and one mapped page
409 size |= 1; // lower bit of size is set if region to be claimed contains gaps
410 TInt r = MM::InitFixedKernelMemory(ExceptionStacks, KExcptStacksLinearBase, KExcptStacksLinearEnd, size, EMemoryObjectUnpaged, EMemoryCreateNoWipe, memAttr, mapFlags);
411 __NK_ASSERT_ALWAYS(r==KErrNone);
416 Return the page directory entry (PDE) value to use for when mapping page tables intended
417 to map memory with the given attributes.
418 The returned value has the physical address component being zero, so a page table's physical
419 address can be simply ORed in.
421 TPde Mmu::BlankPde(TMemoryAttributes aAttributes)
423 TPde pde = KArmV6PdePageTable;
424 if(aAttributes&EMemoryAttributeUseECC)
427 TRACE2(("Mmu::BlankPde(%x) returns 0x%x",aAttributes,pde));
433 Return the page directory entry (PDE) value to use for when creating a section mapping for memory
434 with the given attributes and #TPteType.
435 The returned value has the physical address component being zero, so the section's physical address
436 can be simply ORed in.
438 TPde Mmu::BlankSectionPde(TMemoryAttributes aAttributes, TUint aPteType)
440 // reuse existing functions rather than duplicating the logic
441 TPde pde = BlankPde(aAttributes);
442 TPte pte = BlankPte(aAttributes, aPteType);
443 return PageToSectionEntry(pte, pde);
448 Return the page table entry (PTE) to use when mapping memory pages
449 with the given attributes and #TPteType.
450 This value has the physical address component being zero, so a page's physical
451 address can be simply ORed in.
454 TPte Mmu::BlankPte(TMemoryAttributes aAttributes, TUint aPteType)
456 TUint attr = CanonicalMemoryAttributes(aAttributes);
458 // common PTE setup...
459 TPte pte = KArmV6PteSmallPage|KArmV6PteAP0;
460 if(aPteType&EPteTypeUserAccess)
461 pte |= KArmV6PteAP1; // AP1 = user access
462 if((aPteType&EPteTypeWritable)==false)
463 pte |= KArmV6PteAP2; // AP2 = !writable
464 if(attr&EMemoryAttributeShareable)
466 if((aPteType&EPteTypeGlobal)==false)
468 if((aPteType&EPteTypeExecutable)==false)
469 pte |= KArmV6PteSmallXN;
471 #if defined(__CPU_MEMORY_TYPE_REMAPPING)
474 if(pte&KArmV6PteSmallXN)
475 pte |= KArmV6PteSmallTEX1; // TEX1 is a copy of the XN
477 // process memory type...
478 TUint type = attr&EMemoryAttributeTypeMask;
479 pte |= ((type&3)<<2) | ((type&4)<<4);
484 if((pte&(KArmV6PteAP2|KArmV6PteAP1))==(KArmV6PteAP2|KArmV6PteAP1))
485 pte &= ~KArmV6PteAP0; // clear AP0 if user r/o
487 // process memory type...
489 switch((TMemoryType)(attr&EMemoryAttributeTypeMask))
491 case EMemAttStronglyOrdered:
492 texcb = KArmV6MemAttSO;
495 if(attr&EMemoryAttributeShareable)
496 texcb = KArmV6MemAttSD;
498 texcb = KArmV6MemAttSD; // should be KArmV6MemAttNSD? (but this made H4 go bang)
500 case EMemAttNormalUncached:
501 texcb = KArmV6MemAttNCNC;
503 case EMemAttNormalCached:
504 texcb = KArmV6MemAttWBWAWBWA;
507 __NK_ASSERT_ALWAYS(0); // undefined memory type
508 texcb = KArmV6MemAttSO;
511 pte |= ((texcb&0x1c)<<4) | ((texcb&0x03)<<2);
515 TRACE2(("Mmu::BlankPte(%x,%x) returns 0x%x",aAttributes,aPteType,pte));
521 Calculate PDE and PTE which represent a page table mapping for an existing
524 @param[in] aPde The PDE for the existing section mapping.
525 @param[out] aPde A PDE for a page table mapping, with physical address == 0.
527 @return The PTE value for the first entry in the page table.
529 TPte Mmu::SectionToPageEntry(TPde& aPde)
533 // calculate new PTE...
534 TPte pte = pde&0xc; // copy CB bits
535 if(pde&KArmV6PdeSectionXN)
536 pte |= KArmV6PteSmallXN; // copy XN bit
537 pte |= (pde&(0xff<<10))>>6; // copy NG, S, APX, TEX, AP bits
538 pte |= KArmV6PteSmallPage;
540 // calculate new PDE...
541 pde &= 0x3e0; // keep IMP and DOMAIN
542 pde |= KArmV6PdePageTable;
550 Calculate a PDE entry which represents a section mapping for an existing
553 @pre The existing page table contains mappings for a chunk sized and
554 aligned contiguous region.
556 @param aPte A PTE from the existing page table.
557 @param aPde The existing PDE for the page table mappings.
558 (Physical address portion is ignored.)
560 @return A PDE entry value for a section mapping.
562 TPde Mmu::PageToSectionEntry(TPte aPte, TPde aPde)
564 TPde pde = aPde&0x3e0; // keep IMP and DOMAIN
565 pde |= aPte&(KPdeSectionAddrMask|0xc); // copy address and CB bits
566 if(aPte&KArmV6PteSmallXN)
567 pde |= KArmV6PdeSectionXN; // copy XN bit
568 pde |= (aPte&(0xff<<4))<<6; // copy NG, S, APX, TEX, AP bits
569 pde |= KArmV6PdeSection;
575 Tranform the specified memory attributes into the canonical form relevant to
576 the platform the code is running on. This applies defaults and overrides to
577 the attributes to return what should be used with the MMU.
579 TMemoryAttributes Mmu::CanonicalMemoryAttributes(TMemoryAttributes aAttr)
582 if(attr&EMemoryAttributeDefaultShareable)
584 // sharing not specified, use default...
585 #if defined (__CPU_USE_SHARED_MEMORY)
586 attr |= EMemoryAttributeShareable;
588 attr &= ~EMemoryAttributeShareable;
592 #if defined(FAULTY_NONSHARED_DEVICE_MEMORY)
593 if((attr&(EMemoryAttributeShareable|EMemoryAttributeTypeMask))==EMemoryAttributeDevice)
595 // make unshared device memory into shared strongly ordered memory...
596 attr ^= EMemoryAttributeShareable;
597 attr ^= EMemoryAttributeDevice^EMemoryAttributeStronglyOrdered;
601 #if defined(__SMP__) || defined(__CPU_FORCE_SHARED_MEMORY_IF_CACHED)
602 TMemoryType type = (TMemoryType)(attr&KMemoryTypeMask);
603 if(CacheMaintenance::IsCached(type))
605 // force cached memory to be shared memory on SMP systems...
606 attr |= EMemoryAttributeShareable;
610 return (TMemoryAttributes)(attr&EMemoryAttributeMask);
614 Method called to initialise RAM pages when they are allocated for a new use.
615 This performs any cache synchronisation required to remove old entries
616 and also wipes the contents of the memory (if requested via \a aFlags).
618 @param aPageList Pointer to a list of physical addresses for the RAM pages,
619 or, if the least significant bit of this value is set, then
620 the rest of the value is the physical address of a contiguous
621 region of RAM pages being allocated.
623 @param aCount The number of pages.
625 @param aFlags A set of flag values from #TRamAllocFlags which indicate
626 the memory type the pages will be used for and whether
627 the contents should be wiped.
629 @param aReallocate True, if the RAM pages have already been previously allocated
630 and are being reinitilised e.g. by DMemoryManager::ReAllocDecommitted.
631 False, to indicate that these pages have been newly allocated (are in
632 the SPageInfo::EUnused state.)
634 @pre #RamAllocLock held.
636 void Mmu::PagesAllocated(TPhysAddr* aPageList, TUint aCount, TRamAllocFlags aFlags, TBool aReallocate)
638 TRACE2(("Mmu::PagesAllocated(0x%08x,%d,0x%x,%d)",aPageList, aCount, aFlags, (bool)aReallocate));
639 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
641 TBool wipe = !(aFlags&EAllocNoWipe); // do we need to wipe page contents?
642 TUint8 wipeByte = (aFlags&EAllocUseCustomWipeByte) ? (aFlags>>EAllocWipeByteShift)&0xff : 0x03; // value to wipe memory with
644 // process each page in turn...
647 // get physical address of next page...
649 if((TPhysAddr)aPageList&1)
651 // aPageList is actually the physical address to use...
652 pagePhys = (TPhysAddr)aPageList&~1;
653 *(TPhysAddr*)&aPageList += KPageSize;
656 pagePhys = *aPageList++;
657 __NK_ASSERT_DEBUG((pagePhys&KPageMask)==0);
659 // get info about page...
660 SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys);
661 TMemoryType oldType = (TMemoryType)(pi->Flags(true)&KMemoryTypeMask);
662 TBool oldTypeNormal = CacheMaintenance::IsNormal(oldType);
664 TRACE2(("Mmu::PagesAllocated page=0x%08x, oldType=%d, wipe=%d, colour=%d",pagePhys,oldType,wipe,pi->Index(true)&KPageColourMask));
665 if(wipe || oldTypeNormal)
667 // work out temporary mapping values...
668 TUint colour = pi->Index(true)&KPageColourMask;
669 TLinAddr tempLinAddr = iTempMap[0].iLinAddr+colour*KPageSize;
670 TPte* tempPte = iTempMap[0].iPtePtr+colour;
674 // cache maintenance required. Prepare temporary mapping.
675 *tempPte = pagePhys | iTempPteCacheMaintenance;
676 CacheMaintenance::SinglePteUpdated((TLinAddr)tempPte);
677 InvalidateTLBForPage(tempLinAddr|KKernelOsAsid);
679 // will hold additional arguments in CacheMaintenance::PageToReuse call
680 TInt pageToReuseMask = 0;
682 // check if old and new mappings are the same. (Wiping needs temporary
683 // mapping which may not be the same as the old and new mapping.)
684 TMemoryType newType = (TMemoryType)(aFlags&KMemoryTypeMask); // memory type that pages will be used for
685 if (!wipe && (newType ==oldType))
686 pageToReuseMask |= CacheMaintenance::EOldAndNewMappingMatch;
690 // decide wether to trigger maintenance of entire cache(s).
691 if(CacheMaintenance::IsPageToReuseThresholdReached(iCacheInvalidatePageCount))
693 // enough pages to make it worth triggering maintenance of entire cache(s)
694 pageToReuseMask |= CacheMaintenance::EThresholdReached;
695 ++iCacheInvalidateCounter;
696 iCacheInvalidatePageCount = 0; // all pages will be partially synced
699 if(CacheMaintenance::IsCached(oldType) && !aReallocate)
701 if(pi->CacheInvalidateCounter()==(TUint32)iCacheInvalidateCounter)
703 // one less unused page in the L1 cache...
704 __NK_ASSERT_DEBUG(iCacheInvalidatePageCount);
705 --iCacheInvalidatePageCount;
709 // our page has been already partially maintained in cache
710 // by a previous PageToReuse call.
711 pageToReuseMask |= CacheMaintenance::EPageHasBeenPartiallySynced;
717 TBool pageRemovedFromCache = CacheMaintenance::PageToReuse(tempLinAddr, oldType, pagePhys, pageToReuseMask);
718 if(pageRemovedFromCache && !aReallocate)
724 //We need uncached normal temporary mapping to wipe. Change it if necessary.
725 //or , in case of !oldTypeNormal it is not configured yet.
726 if (!oldTypeNormal || (CacheMaintenance::TemporaryMapping()!=EMemAttNormalUncached))
728 *tempPte = pagePhys | iTempPteUncached;
729 CacheMaintenance::SinglePteUpdated((TLinAddr)tempPte);
730 InvalidateTLBForPage(tempLinAddr|KKernelOsAsid);
732 // wipe contents of memory...
733 memset((TAny*)tempLinAddr, wipeByte, KPageSize);
734 CacheMaintenance::PageToReuse(tempLinAddr, EMemAttNormalUncached, pagePhys);
737 // invalidate temporary mapping...
738 *tempPte = KPteUnallocatedEntry;
739 CacheMaintenance::SinglePteUpdated((TLinAddr)tempPte);
740 InvalidateTLBForPage(tempLinAddr|KKernelOsAsid);
743 // indicate page has been allocated...
747 // loop round for next page...
748 } // end of while(aCount--)
753 Method called to update the state of a RAM page when it is freed.
754 This sets the page state to SPageInfo::EUnused.
756 @param aPageInfo The page information structure for the RAM page.
760 void Mmu::PageFreed(SPageInfo* aPageInfo)
762 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
764 if(aPageInfo->Type()==SPageInfo::EUnused)
767 aPageInfo->SetUnused();
769 TMemoryType type = (TMemoryType)(aPageInfo->Flags()&KMemoryTypeMask);
770 if(CacheMaintenance::IsCached(type))
772 // another unused page with L1 cache entries...
773 aPageInfo->SetCacheInvalidateCounter(iCacheInvalidateCounter);
774 ++iCacheInvalidatePageCount;
777 TRACE2(("Mmu::PageFreed page=0x%08x type=%d colour=%d",aPageInfo->PhysAddr(),aPageInfo->Flags()&KMemoryTypeMask,aPageInfo->Index()&KPageColourMask));
781 Remove the contents of RAM pages from any memory caches.
783 @param aPages Pointer to a list of physical addresses for the RAM pages,
784 or, if the least significant bit of this value is set, then
785 the rest of the value is the physical address of a contiguous
788 @param aCount The number of pages.
790 @param aAttributes The memory attributes of the pages.
792 @param aColour The colour for the first page;
793 consecutive pages will be coloured accordingly.
794 Only #KPageColourShift least significant bits are used,
795 therefore an index into a memory object's memory can be
798 void Mmu::CleanAndInvalidatePages(TPhysAddr* aPages, TUint aCount, TMemoryAttributes aAttributes, TUint aColour)
800 TMemoryType type = (TMemoryType)(aAttributes&EMemoryAttributeTypeMask);
802 if(!CacheMaintenance::IsNormal(type))
804 TRACE2(("Mmu::CleanAndInvalidatePages - nothing to do"));
808 RamAllocLock::Lock();
812 TPhysAddr pagePhys = *aPages++;
813 TRACE2(("Mmu::CleanAndInvalidatePages 0x%08x",pagePhys));
815 // work out temporary mapping values...
816 aColour &= KPageColourMask;
817 TLinAddr tempLinAddr = iTempMap[0].iLinAddr+aColour*KPageSize;
818 TPte* tempPte = iTempMap[0].iPtePtr+aColour;
821 // temporarily map page...
822 *tempPte = pagePhys | iTempPteCacheMaintenance;
823 CacheMaintenance::SinglePteUpdated((TLinAddr)tempPte);
824 InvalidateTLBForPage(tempLinAddr|KKernelOsAsid);
826 // preserve memory content and remove from cache...
827 CacheMaintenance::PageToPreserveAndReuse(tempLinAddr, type, pagePhys);
829 // invalidate temporary mapping...
830 *tempPte = KPteUnallocatedEntry;
831 CacheMaintenance::SinglePteUpdated((TLinAddr)tempPte);
832 InvalidateTLBForPage(tempLinAddr|KKernelOsAsid);
834 RamAllocLock::Flash();
836 RamAllocLock::Unlock();
840 extern void UnlockIPCAlias();
841 extern void LockIPCAlias();
844 TInt DMemModelThread::Alias(TLinAddr aAddr, DMemModelProcess* aProcess, TInt aSize, TLinAddr& aAliasAddr, TUint& aAliasSize)
846 // Set up an alias mapping starting at address aAddr in specified process.
847 // Note: Alias is removed if an exception is trapped by DThread::IpcExcHandler.
850 TRACE2(("Thread %O Alias %08x+%x Process %O",this,aAddr,aSize,aProcess));
851 __NK_ASSERT_DEBUG(this==TheCurrentThread); // many bad things can happen if false
852 // If there is an existing alias it should be on the same process otherwise
853 // the os asid reference may be leaked.
854 __NK_ASSERT_DEBUG(!iAliasLinAddr || aProcess == iAliasProcess);
856 if(TUint(aAddr^KIPCAlias)<TUint(KIPCAliasAreaSize))
857 return KErrBadDescriptor; // prevent access to alias region
859 // Grab the mmu lock before opening a reference on os asid so that this thread
860 // is in an implicit critical section and therefore can't leak the reference by
861 // dying before iAliasLinAddr is set.
866 {// There isn't any existing alias.
867 // Open a reference on the aProcess's os asid so that it is not freed and/or reused
868 // while we are aliasing an address belonging to it.
869 osAsid = aProcess->TryOpenOsAsid();
871 {// Couldn't open os asid so aProcess is no longer running.
873 return KErrBadDescriptor;
878 // Just read the os asid of the process being aliased we already have a reference on it.
879 osAsid = aProcess->OsAsid();
882 // Now we have the os asid check access to kernel memory.
883 if(aAddr >= KUserMemoryLimit && osAsid != (TUint)KKernelOsAsid)
885 NKern::ThreadEnterCS();
888 {// Close the new reference as RemoveAlias won't do as iAliasLinAddr is not set.
889 aProcess->AsyncCloseOsAsid(); // Asynchronous close as this method should be quick.
891 NKern::ThreadLeaveCS();
892 return KErrBadDescriptor; // prevent access to supervisor only memory
895 // Now we know all accesses to global memory are safe so check if aAddr is global.
896 if(aAddr >= KGlobalMemoryBase)
898 // address is in global section, don't bother aliasing it...
900 {// Close the new reference as not required.
901 NKern::ThreadEnterCS();
903 aProcess->AsyncCloseOsAsid(); // Asynchronous close as this method should be quick.
904 NKern::ThreadLeaveCS();
907 {// Remove the existing alias as it is not required.
908 DoRemoveAlias(iAliasLinAddr); // Releases mmulock.
911 TInt maxSize = KChunkSize-(aAddr&KChunkMask);
912 aAliasSize = aSize<maxSize ? aSize : maxSize;
913 TRACE2(("DMemModelThread::Alias() abandoned as memory is globally mapped"));
917 TPde* pd = Mmu::PageDirectory(osAsid);
918 TInt pdeIndex = aAddr>>KChunkShift;
919 TPde pde = pd[pdeIndex];
920 pde = PDE_IN_DOMAIN(pde, KIPCAliasDomain); // change domain for PDE
921 // Get os asid, this is the current thread's process so no need for reference.
922 TUint32 local_asid = ((DMemModelProcess*)iOwningProcess)->OsAsid();
926 TLinAddr aliasAddr = KIPCAlias+(aAddr&(KChunkMask & ~KPageMask));
928 if(pde==iAliasPde && iAliasLinAddr)
930 // pde already aliased, so just update linear address...
932 __NK_ASSERT_DEBUG(iCpuRestoreCookie>=0);
933 aliasAddr = iAliasLinAddr & ~KChunkMask;
934 aliasAddr |= (aAddr & (KChunkMask & ~KPageMask));
936 iAliasLinAddr = aliasAddr;
940 // alias PDE changed...
944 TheMmu.iAliasList.Add(&iAliasLink); // add to list if not already aliased
946 __NK_ASSERT_DEBUG(iCpuRestoreCookie==-1);
947 iCpuRestoreCookie = NKern::FreezeCpu(); // temporarily lock current thread to this processor
951 iAliasProcess = aProcess;
953 TSubScheduler& ss = SubScheduler(); // OK since we are locked to this CPU
954 aliasAddr = TLinAddr(ss.i_AliasLinAddr) + (aAddr & (KChunkMask & ~KPageMask));
955 iAliasPdePtr = (TPde*)(TLinAddr(ss.i_AliasPdePtr) + (local_asid << KPageDirectoryShift));
957 iAliasLinAddr = aliasAddr;
959 SinglePdeUpdated(iAliasPdePtr);
962 TRACE2(("DMemModelThread::Alias() PDEntry=%x, iAliasLinAddr=%x",pde, aliasAddr));
963 LocalInvalidateTLBForPage(aliasAddr | local_asid);
964 TInt offset = aAddr&KPageMask;
965 aAliasAddr = aliasAddr | offset;
966 TInt maxSize = KPageSize - offset;
967 aAliasSize = aSize<maxSize ? aSize : maxSize;
968 iAliasTarget = aAddr & ~KPageMask;
976 void DMemModelThread::RemoveAlias()
978 // Remove alias mapping (if present)
981 TRACE2(("Thread %O RemoveAlias", this));
982 __NK_ASSERT_DEBUG(this==TheCurrentThread); // many bad things can happen if false
984 TLinAddr addr = iAliasLinAddr;
989 DoRemoveAlias(addr); // Unlocks mmulock.
995 Remove the alias mapping.
998 @post MmuLock released.
1000 void DMemModelThread::DoRemoveAlias(TLinAddr aAddr)
1004 iAliasPde = KPdeUnallocatedEntry;
1005 *iAliasPdePtr = KPdeUnallocatedEntry;
1006 SinglePdeUpdated(iAliasPdePtr);
1007 __NK_ASSERT_DEBUG((aAddr&KPageMask)==0);
1008 // Invalidate the tlb using os asid, no need to open a reference as this
1009 // is the current thread's process os asid.
1010 LocalInvalidateTLBForPage(aAddr | ((DMemModelProcess*)iOwningProcess)->OsAsid());
1013 __NK_ASSERT_DEBUG(iCpuRestoreCookie>=0);
1014 NKern::EndFreezeCpu(iCpuRestoreCookie);
1015 iCpuRestoreCookie = -1;
1018 // Must close the os asid while in critical section to prevent it being
1019 // leaked. However, we can't hold the mmu lock so we have to enter an
1020 // explict crtical section. It is ok to release the mmu lock as the
1021 // iAliasLinAddr and iAliasProcess members are only ever updated by the
1023 NKern::ThreadEnterCS();
1025 iAliasProcess->AsyncCloseOsAsid(); // Asynchronous close as this method should be quick.
1026 NKern::ThreadLeaveCS();
1030 TInt M::DemandPagingFault(TAny* aExceptionInfo)
1032 TArmExcInfo& exc=*(TArmExcInfo*)aExceptionInfo;
1034 // permissions required by faulting memory access...
1035 TUint accessPermissions = EUser; // we only allow paging of user memory
1037 // get faulting address...
1038 TLinAddr faultAddress = exc.iFaultAddress;
1039 if(exc.iExcCode==EArmExceptionPrefetchAbort)
1041 // fault trying to read code to execute...
1042 accessPermissions |= EExecute;
1044 else if(exc.iExcCode!=EArmExceptionDataAbort)
1045 return KErrUnknown; // not prefetch or data abort
1047 // check fault type...
1048 if((exc.iFaultStatus&0x405) != 5 && (exc.iFaultStatus&0x40f) != 4)
1049 return KErrUnknown; // not translation, permission or instruction cache maintenance fault.
1051 // check access type...
1052 if(exc.iFaultStatus&(1<<11))
1053 accessPermissions |= EReadWrite;
1055 // let TheMmu handle the fault...
1056 return TheMmu.HandlePageFault(exc.iR15, faultAddress, accessPermissions, aExceptionInfo);