Update contrib.
1 // Copyright (c) 1997-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
17 #include "cache_maintenance.inl"
25 TPte PteGlobal; // =0x100 on processors which support global pages, 0 on processors which don't
28 extern "C" void __DebugMsgFlushTLB()
30 __KTRACE_OPT(KMMU,Kern::Printf("FlushTLB"));
33 extern "C" void __DebugMsgLocalFlushTLB()
35 __KTRACE_OPT(KMMU,Kern::Printf("FlushTLB"));
38 extern "C" void __DebugMsgINVLPG(int a)
40 __KTRACE_OPT(KMMU,Kern::Printf("INVLPG(%08x)",a));
46 extern void DoLocalInvalidateTLB();
52 FORCE_INLINE void LocalInvalidateTLB()
54 DoLocalInvalidateTLB();
61 const TInt KMaxPages = 1;
63 class TTLBIPI : public TGenericIPI
67 static void InvalidateForPagesIsr(TGenericIPI*);
68 static void LocalInvalidateIsr(TGenericIPI*);
69 static void InvalidateIsr(TGenericIPI*);
70 static void WaitAndInvalidateIsr(TGenericIPI*);
71 void AddAddress(TLinAddr aAddr);
72 void InvalidateList();
76 TLinAddr iAddr[KMaxPages];
84 void TTLBIPI::LocalInvalidateIsr(TGenericIPI*)
86 TRACE2(("TLBLocInv"));
87 DoLocalInvalidateTLB();
90 void TTLBIPI::InvalidateIsr(TGenericIPI*)
96 void TTLBIPI::WaitAndInvalidateIsr(TGenericIPI* aTLBIPI)
99 TTLBIPI& a = *(TTLBIPI*)aTLBIPI;
103 DoInvalidateTLBForPage(a.iAddr[0]);
108 void TTLBIPI::InvalidateForPagesIsr(TGenericIPI* aTLBIPI)
110 TTLBIPI& a = *(TTLBIPI*)aTLBIPI;
112 for (i=0; i<a.iCount; ++i)
114 TRACE2(("TLBInv %08x", a.iAddr[i]));
115 DoInvalidateTLBForPage(a.iAddr[i]);
119 void TTLBIPI::AddAddress(TLinAddr aAddr)
121 iAddr[iCount] = aAddr;
122 if (++iCount == KMaxPages)
126 void TTLBIPI::InvalidateList()
129 InvalidateForPagesIsr(this);
130 QueueAllOther(&InvalidateForPagesIsr);
136 void LocalInvalidateTLB()
140 DoLocalInvalidateTLB();
141 ipi.QueueAllOther(&TTLBIPI::LocalInvalidateIsr);
143 ipi.WaitCompletion();
151 ipi.QueueAllOther(&TTLBIPI::InvalidateIsr);
153 ipi.WaitCompletion();
156 void InvalidateTLBForPage(TLinAddr aAddr)
159 ipi.AddAddress(aAddr);
160 ipi.InvalidateList();
167 void InvalidateTLBForAsid(TUint aAsid)
169 if(aAsid==KKernelOsAsid)
172 LocalInvalidateTLB();
176 void SinglePdeUpdated(TPde* aPde)
178 CacheMaintenance::SinglePdeUpdated((TLinAddr)aPde);
179 PageDirectories.GlobalPdeChanged(aPde);
184 // Functions for class Mmu
187 TPhysAddr Mmu::PtePhysAddr(TPte aPte, TUint /*aPteIndex*/)
189 if(aPte&KPdePtePresent)
190 return aPte & KPdePtePhysAddrMask;
191 return KPhysAddrInvalid;
195 TPte* Mmu::PageTableFromPde(TPde aPde)
197 if((aPde&(KPdeLargePage|KPdePtePresent)) == KPdePtePresent)
199 SPageInfo* pi = SPageInfo::FromPhysAddr(aPde);
200 TInt id = (pi->Index()<<KPtClusterShift) | ((aPde>>KPageTableShift)&KPtClusterMask);
201 return (TPte*)(KPageTableBase+(id<<KPageTableShift));
207 TPte* Mmu::SafePageTableFromPde(TPde aPde)
209 if((aPde&(KPdeLargePage|KPdePtePresent)) == KPdePtePresent)
211 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(aPde&~KPageMask);
214 TInt id = (pi->Index()<<KPtClusterShift) | ((aPde>>KPageTableShift)&KPtClusterMask);
215 return (TPte*)(KPageTableBase+(id<<KPageTableShift));
223 Return the base phsical address of the section table referenced by the given
224 Page Directory Entry (PDE) \a aPde. If the PDE doesn't refer to a
225 section then KPhysAddrInvalid is returned.
229 TPhysAddr Mmu::SectionBaseFromPde(TPde aPde)
231 if(PdeMapsSection(aPde))
232 return aPde&KPdeLargePagePhysAddrMask;
233 return KPhysAddrInvalid;
237 TPte* Mmu::PtePtrFromLinAddr(TLinAddr aAddress, TInt aOsAsid)
239 TPde pde = PageDirectory(aOsAsid)[aAddress>>KChunkShift];
240 SPageInfo* pi = SPageInfo::FromPhysAddr(pde);
241 TPte* pt = (TPte*)(KPageTableBase+(pi->Index()<<KPageShift)+(pde&(KPageMask&~KPageTableMask)));
242 pt += (aAddress>>KPageShift)&(KChunkMask>>KPageShift);
247 TPte* Mmu::SafePtePtrFromLinAddr(TLinAddr aAddress, TInt aOsAsid)
249 TPde pde = PageDirectory(aOsAsid)[aAddress>>KChunkShift];
250 TPte* pt = SafePageTableFromPde(pde);
252 pt += (aAddress>>KPageShift)&(KChunkMask>>KPageShift);
257 TPhysAddr Mmu::PageTablePhysAddr(TPte* aPt)
259 __NK_ASSERT_DEBUG(MmuLock::IsHeld() || PageTablesLockIsHeld());
261 TInt pdeIndex = ((TLinAddr)aPt)>>KChunkShift;
262 TPde pde = PageDirectory(KKernelOsAsid)[pdeIndex];
263 __NK_ASSERT_DEBUG((pde&(KPdePtePresent|KPdeLargePage))==KPdePtePresent);
265 SPageInfo* pi = SPageInfo::FromPhysAddr(pde);
266 TPte* pPte = (TPte*)(KPageTableBase+(pi->Index(true)<<KPageShift));
267 TPte pte = pPte[(((TLinAddr)aPt)&KChunkMask)>>KPageShift];
268 __NK_ASSERT_DEBUG(pte & KPdePtePresent);
270 return pte&KPdePtePhysAddrMask;
274 TPhysAddr Mmu::UncheckedLinearToPhysical(TLinAddr aLinAddr, TInt aOsAsid)
276 TRACE2(("Mmu::UncheckedLinearToPhysical(%08x,%d)",aLinAddr,aOsAsid));
277 TInt pdeIndex = aLinAddr>>KChunkShift;
278 TPde pde = PageDirectory(aOsAsid)[pdeIndex];
279 TPhysAddr pa=KPhysAddrInvalid;
280 if (pde & KPdePtePresent)
282 if(pde&KPdeLargePage)
284 pa=(pde&KPdeLargePagePhysAddrMask)+(aLinAddr&~KPdeLargePagePhysAddrMask);
285 __KTRACE_OPT(KMMU,Kern::Printf("Mapped with large table - returning %08x",pa));
289 SPageInfo* pi = SPageInfo::FromPhysAddr(pde);
290 TInt id = (pi->Index(true)<<KPtClusterShift) | ((pde>>KPageTableShift)&KPtClusterMask);
291 TPte* pPte = (TPte*)(KPageTableBase+(id<<KPageTableShift));
292 TPte pte = pPte[(aLinAddr&KChunkMask)>>KPageShift];
293 if (pte & KPdePtePresent)
295 pa=(pte&KPdePtePhysAddrMask)+(aLinAddr&KPageMask);
296 __KTRACE_OPT(KMMU,Kern::Printf("Mapped with page table - returning %08x",pa));
306 TRACEB(("Mmu::Init1"));
308 TUint pge = TheSuperPage().iCpuId & EX86Feat_PGE;
309 PteGlobal = pge ? KPdePteGlobal : 0;
310 X86_UseGlobalPTEs = pge!=0;
313 ApTrampolinePage = KApTrampolinePageLin;
316 for (i=0; i<KMaxCpus; ++i)
318 TSubScheduler& ss = TheSubSchedulers[i];
319 TLinAddr a = KIPCAlias + (i<<KChunkShift);
320 ss.i_AliasLinAddr = (TAny*)a;
321 ss.i_AliasPdePtr = (TAny*)(KPageDirectoryBase + (a>>KChunkShift)*sizeof(TPde));
330 TRACEB(("Mmu::Init2"));
335 void Mmu::Init2Final()
337 TRACEB(("Mmu::Init2Final"));
343 const TPde KPdeForBlankPageTable = KPdePtePresent|KPdePteWrite|KPdePteUser;
345 TPde Mmu::BlankPde(TMemoryAttributes aAttributes)
348 TPde pde = KPdeForBlankPageTable;
349 TRACE2(("Mmu::BlankPde(%x) returns 0x%x",aAttributes,pde));
354 TPde Mmu::BlankSectionPde(TMemoryAttributes aAttributes, TUint aPteType)
356 return PageToSectionEntry(BlankPte(aAttributes, aPteType), KPdeForBlankPageTable);
360 TPte Mmu::BlankPte(TMemoryAttributes aAttributes, TUint aPteType)
362 TPte pte = KPdePtePresent;
363 if(aPteType&EPteTypeUserAccess)
365 if(aPteType&EPteTypeWritable)
367 if(aPteType&EPteTypeGlobal)
370 switch((TMemoryType)(aAttributes&EMemoryAttributeTypeMask))
372 case EMemAttStronglyOrdered:
374 case EMemAttNormalUncached:
375 pte |= KPdePteUncached;
377 case EMemAttNormalCached:
380 __NK_ASSERT_ALWAYS(0);
384 TRACE2(("Mmu::BlankPte(%x,%x) returns 0x%x",aAttributes,aPteType,pte));
389 TPte Mmu::SectionToPageEntry(TPde& aPde)
391 TPte pte = aPde&~(KPdePtePhysAddrMask|KPdeLargePage);
392 aPde = KPdeForBlankPageTable;
397 TPde Mmu::PageToSectionEntry(TPte aPte, TPde /*aPde*/)
399 TPte pde = aPte&~KPdeLargePagePhysAddrMask;
400 pde |= KPdeLargePage;
405 TMemoryAttributes Mmu::CanonicalMemoryAttributes(TMemoryAttributes aAttr)
408 if(attr&EMemoryAttributeDefaultShareable)
410 // sharing not specified, use default...
411 #if defined (__CPU_USE_SHARED_MEMORY)
412 attr |= EMemoryAttributeShareable;
414 attr &= ~EMemoryAttributeShareable;
418 // remove invalid attributes...
419 attr &= ~(EMemoryAttributeUseECC);
421 return (TMemoryAttributes)(attr&EMemoryAttributeMask);
425 void Mmu::PagesAllocated(TPhysAddr* aPageList, TUint aCount, TRamAllocFlags aFlags, TBool aReallocate)
427 TRACE2(("Mmu::PagesAllocated(0x%08x,%d,0x%x,%d)",aPageList, aCount, aFlags, (bool)aReallocate));
428 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
430 TBool wipe = !(aFlags&EAllocNoWipe); // do we need to wipe page contents?
431 TMemoryType newType = (TMemoryType)(aFlags&KMemoryTypeMask); // memory type that pages will be used for
432 TUint8 wipeByte = (aFlags&EAllocUseCustomWipeByte) ? (aFlags>>EAllocWipeByteShift)&0xff : 0x03; // value to wipe memory with
434 // process each page in turn...
437 // get physical address of next page...
439 if((TPhysAddr)aPageList&1)
441 // aPageList is actually the physical address to use...
442 pagePhys = (TPhysAddr)aPageList&~1;
443 *(TPhysAddr*)&aPageList += KPageSize;
446 pagePhys = *aPageList++;
447 __NK_ASSERT_DEBUG((pagePhys&KPageMask)==0);
449 // get info about page...
450 SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys);
451 TMemoryType oldType = (TMemoryType)(pi->Flags(true)&KMemoryTypeMask);
453 TRACE2(("Mmu::PagesAllocated page=0x%08x, oldType=%d, wipe=%d",pagePhys,oldType,wipe));
456 // work out temporary mapping values...
457 TLinAddr tempLinAddr = iTempMap[0].iLinAddr;
458 TPte* tempPte = iTempMap[0].iPtePtr;
460 // temporarily map page...
461 *tempPte = pagePhys | iTempPteCached;
462 CacheMaintenance::SinglePteUpdated((TLinAddr)tempPte);
463 InvalidateTLBForPage(tempLinAddr|KKernelOsAsid);
465 // wipe contents of memory...
466 memset((TAny*)tempLinAddr, wipeByte, KPageSize);
467 __e32_io_completion_barrier();
469 // invalidate temporary mapping...
470 *tempPte = KPteUnallocatedEntry;
471 CacheMaintenance::SinglePteUpdated((TLinAddr)tempPte);
472 InvalidateTLBForPage(tempLinAddr|KKernelOsAsid);
475 // indicate page has been allocated...
476 if(aReallocate==false)
482 void Mmu::PageFreed(SPageInfo* aPageInfo)
484 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
486 if(aPageInfo->Type()==SPageInfo::EUnused)
489 aPageInfo->SetUnused();
491 TRACE2(("Mmu::PageFreed page=0x%08x type=%d colour=%d",aPageInfo->PhysAddr(),aPageInfo->Flags()&KMemoryTypeMask,aPageInfo->Index()&KPageColourMask));
495 void Mmu::CleanAndInvalidatePages(TPhysAddr* aPages, TUint aCount, TMemoryAttributes aAttributes, TUint aColour)
497 TMemoryType type = (TMemoryType)(aAttributes&EMemoryAttributeTypeMask);
498 if(!CacheMaintenance::IsCached(type))
500 TRACE2(("Mmu::CleanAndInvalidatePages - nothing to do"));
504 RamAllocLock::Lock();
508 TPhysAddr pagePhys = *aPages++;
509 TRACE2(("Mmu::CleanAndInvalidatePages 0x%08x",pagePhys));
511 // work out temporary mapping values...
512 TLinAddr tempLinAddr = iTempMap[0].iLinAddr;
513 TPte* tempPte = iTempMap[0].iPtePtr;
515 // temporarily map page...
516 *tempPte = pagePhys | iTempPteCached;
517 CacheMaintenance::SinglePteUpdated((TLinAddr)tempPte);
518 InvalidateTLBForPage(tempLinAddr|KKernelOsAsid);
520 // sort out cache for memory reuse...
521 CacheMaintenance::PageToPreserveAndReuse(tempLinAddr, type, KPageSize);
523 // invalidate temporary mapping...
524 *tempPte = KPteUnallocatedEntry;
525 CacheMaintenance::SinglePteUpdated((TLinAddr)tempPte);
526 InvalidateTLBForPage(tempLinAddr|KKernelOsAsid);
528 RamAllocLock::Flash();
530 RamAllocLock::Unlock();
534 TInt DMemModelThread::Alias(TLinAddr aAddr, DMemModelProcess* aProcess, TInt aSize, TLinAddr& aAliasAddr, TUint& aAliasSize)
536 // Set up an alias mapping starting at address aAddr in specified process.
537 // Note: Alias is removed if an exception is trapped by DThread::IpcExcHandler.
540 TRACE2(("Thread %O Alias %08x+%x Process %O",this,aAddr,aSize,aProcess));
541 __NK_ASSERT_DEBUG(this==TheCurrentThread); // many bad things can happen if false
542 // If there is an existing alias it should be on the same process otherwise
543 // the os asid reference may be leaked.
544 __NK_ASSERT_DEBUG(!iAliasLinAddr || aProcess == iAliasProcess);
546 if(TUint(aAddr^KIPCAlias)<TUint(KIPCAliasAreaSize))
547 return KErrBadDescriptor; // prevent access to alias region
549 // Grab the mmu lock before opening a reference on os asid so that this thread
550 // is in an implicit critical section and therefore can't leak the reference by
551 // dying before iAliasLinAddr is set.
556 {// There isn't any existing alias.
557 // Open a reference on the aProcess's os asid so that it is not freed and/or reused
558 // while we are aliasing an address belonging to it.
559 osAsid = aProcess->TryOpenOsAsid();
561 {// Couldn't open os asid so aProcess is no longer running.
563 return KErrBadDescriptor;
568 // Just read the os asid of the process being aliased we already have a reference on it.
569 osAsid = aProcess->OsAsid();
572 // Now we have the os asid check access to kernel memory.
573 if(aAddr >= KUserMemoryLimit && osAsid != (TUint)KKernelOsAsid)
575 NKern::ThreadEnterCS();
578 {// Close the new reference as RemoveAlias won't do as iAliasLinAddr is not set.
579 aProcess->AsyncCloseOsAsid(); // Asynchronous close as this method should be quick.
581 NKern::ThreadLeaveCS();
582 return KErrBadDescriptor; // prevent access to supervisor only memory
585 // Now we know all accesses to global memory are safe so check if aAddr is global.
586 if(aAddr >= KGlobalMemoryBase)
588 // address is in global section, don't bother aliasing it...
590 {// Close the new reference as not required.
591 NKern::ThreadEnterCS();
593 aProcess->AsyncCloseOsAsid(); // Asynchronous close as this method should be quick.
594 NKern::ThreadLeaveCS();
597 {// Remove the existing alias as it is not required.
598 DoRemoveAlias(iAliasLinAddr); // Releases mmulock.
601 TInt maxSize = KChunkSize-(aAddr&KChunkMask);
602 aAliasSize = aSize<maxSize ? aSize : maxSize;
603 TRACE2(("DMemModelThread::Alias() abandoned as memory is globally mapped"));
607 TPde* pd = Mmu::PageDirectory(osAsid);
608 TInt pdeIndex = aAddr>>KChunkShift;
609 TPde pde = pd[pdeIndex];
613 TLinAddr aliasAddr = KIPCAlias+(aAddr&(KChunkMask & ~KPageMask));
615 if(pde==iAliasPde && iAliasLinAddr)
617 // pde already aliased, so just update linear address...
619 __NK_ASSERT_DEBUG(iCpuRestoreCookie>=0);
620 aliasAddr = iAliasLinAddr & ~KChunkMask;
621 aliasAddr |= (aAddr & (KChunkMask & ~KPageMask));
623 iAliasLinAddr = aliasAddr;
627 // alias PDE changed...
630 TheMmu.iAliasList.Add(&iAliasLink); // add to list if not already aliased
632 __NK_ASSERT_DEBUG(iCpuRestoreCookie==-1);
633 iCpuRestoreCookie = NKern::FreezeCpu(); // temporarily lock current thread to this processor
637 iAliasProcess = aProcess;
639 TSubScheduler& ss = SubScheduler(); // OK since we are locked to this CPU
640 aliasAddr = TLinAddr(ss.i_AliasLinAddr) + (aAddr & (KChunkMask & ~KPageMask));
641 iAliasPdePtr = (TPde*)(TLinAddr(ss.i_AliasPdePtr) + (osAsid << KPageTableShift));
643 iAliasLinAddr = aliasAddr;
646 TRACE2(("DMemModelThread::Alias() PDEntry=%x, iAliasLinAddr=%x",pde, aliasAddr));
647 LocalInvalidateTLBForPage(aliasAddr);
648 TInt offset = aAddr&KPageMask;
649 aAliasAddr = aliasAddr | offset;
650 TInt maxSize = KPageSize - offset;
651 aAliasSize = aSize<maxSize ? aSize : maxSize;
652 iAliasTarget = aAddr & ~KPageMask;
660 void DMemModelThread::RemoveAlias()
662 // Remove alias mapping (if present)
665 TRACE2(("Thread %O RemoveAlias", this));
666 __NK_ASSERT_DEBUG(this==TheCurrentThread); // many bad things can happen if false
668 TLinAddr addr = iAliasLinAddr;
673 DoRemoveAlias(addr); // Unlocks mmulock.
679 Remove the alias mapping.
683 void DMemModelThread::DoRemoveAlias(TLinAddr aAddr)
686 iAliasPde = KPdeUnallocatedEntry;
687 *iAliasPdePtr = KPdeUnallocatedEntry;
688 SinglePdeUpdated(iAliasPdePtr);
689 __NK_ASSERT_DEBUG((aAddr&KPageMask)==0);
690 LocalInvalidateTLBForPage(aAddr);
693 __NK_ASSERT_DEBUG(iCpuRestoreCookie>=0);
694 NKern::EndFreezeCpu(iCpuRestoreCookie);
695 iCpuRestoreCookie = -1;
698 // Must close the os asid while in critical section to prevent it being
699 // leaked. However, we can't hold the mmu lock so we have to enter an
700 // explict crtical section. It is ok to release the mmu lock as the
701 // iAliasLinAddr and iAliasProcess members are only ever updated by the
703 NKern::ThreadEnterCS();
705 iAliasProcess->AsyncCloseOsAsid(); // Asynchronous close as this method should be quick.
706 NKern::ThreadLeaveCS();
710 TInt M::DemandPagingFault(TAny* aExceptionInfo)
712 TX86ExcInfo& exc=*(TX86ExcInfo*)aExceptionInfo;
713 if(exc.iExcId!=EX86VectorPageFault)
714 return KErrAbort; // not a page fault
717 Meanings of exc.iExcErrorCode when exception type is EX86VectorPageFault...
719 Bit 0 0 The fault was caused by a non-present page.
720 1 The fault was caused by a page-level protection violation.
721 Bit 1 0 The access causing the fault was a read.
722 1 The access causing the fault was a write.
723 Bit 2 0 The access causing the fault originated when the processor was executing in supervisor mode.
724 1 The access causing the fault originated when the processor was executing in user mode.
725 Bit 3 0 The fault was not caused by reserved bit violation.
726 1 The fault was caused by reserved bits set to 1 in a page directory.
727 Bit 4 0 The fault was not caused by an instruction fetch.
728 1 The fault was caused by an instruction fetch.
731 // check access type...
732 TUint accessPermissions = EUser; // we only allow paging of user memory
733 if(exc.iExcErrorCode&(1<<1))
734 accessPermissions |= EReadWrite;
736 // let TheMmu handle the fault...
737 return TheMmu.HandlePageFault(exc.iEip, exc.iFaultAddress, accessPermissions, aExceptionInfo);