sl@0: // Copyright (c) 1997-2009 Nokia Corporation and/or its subsidiary(-ies). sl@0: // All rights reserved. sl@0: // This component and the accompanying materials are made available sl@0: // under the terms of the License "Eclipse Public License v1.0" sl@0: // which accompanies this distribution, and is available sl@0: // at the URL "http://www.eclipse.org/legal/epl-v10.html". sl@0: // sl@0: // Initial Contributors: sl@0: // Nokia Corporation - initial contribution. sl@0: // sl@0: // Contributors: sl@0: // sl@0: // Description: sl@0: // sl@0: sl@0: #include sl@0: #include "cache_maintenance.inl" sl@0: #include "execs.h" sl@0: #include "mm.h" sl@0: #include "mmu.h" sl@0: #include "mpager.h" sl@0: #include "mpdalloc.h" sl@0: sl@0: sl@0: TPte PteGlobal; // =0x100 on processors which support global pages, 0 on processors which don't sl@0: sl@0: #if defined(KMMU) sl@0: extern "C" void __DebugMsgFlushTLB() sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("FlushTLB")); sl@0: } sl@0: sl@0: extern "C" void __DebugMsgLocalFlushTLB() sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("FlushTLB")); sl@0: } sl@0: sl@0: extern "C" void __DebugMsgINVLPG(int a) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("INVLPG(%08x)",a)); sl@0: } sl@0: #endif sl@0: sl@0: sl@0: sl@0: extern void DoLocalInvalidateTLB(); sl@0: sl@0: sl@0: #ifndef __SMP__ sl@0: sl@0: sl@0: FORCE_INLINE void LocalInvalidateTLB() sl@0: { sl@0: DoLocalInvalidateTLB(); sl@0: } sl@0: sl@0: sl@0: #else // __SMP__ sl@0: sl@0: sl@0: const TInt KMaxPages = 1; sl@0: sl@0: class TTLBIPI : public TGenericIPI sl@0: { sl@0: public: sl@0: TTLBIPI(); sl@0: static void InvalidateForPagesIsr(TGenericIPI*); sl@0: static void LocalInvalidateIsr(TGenericIPI*); sl@0: static void InvalidateIsr(TGenericIPI*); sl@0: static void WaitAndInvalidateIsr(TGenericIPI*); sl@0: void AddAddress(TLinAddr aAddr); sl@0: void InvalidateList(); sl@0: public: sl@0: volatile TInt iFlag; sl@0: TInt iCount; sl@0: TLinAddr iAddr[KMaxPages]; sl@0: }; sl@0: sl@0: TTLBIPI::TTLBIPI() sl@0: : iFlag(0), iCount(0) sl@0: { sl@0: } sl@0: sl@0: void TTLBIPI::LocalInvalidateIsr(TGenericIPI*) sl@0: { sl@0: TRACE2(("TLBLocInv")); sl@0: DoLocalInvalidateTLB(); sl@0: } sl@0: sl@0: void TTLBIPI::InvalidateIsr(TGenericIPI*) sl@0: { sl@0: TRACE2(("TLBInv")); sl@0: DoInvalidateTLB(); sl@0: } sl@0: sl@0: void TTLBIPI::WaitAndInvalidateIsr(TGenericIPI* aTLBIPI) sl@0: { sl@0: TRACE2(("TLBWtInv")); sl@0: TTLBIPI& a = *(TTLBIPI*)aTLBIPI; sl@0: while (!a.iFlag) sl@0: {} sl@0: if (a.iCount == 1) sl@0: DoInvalidateTLBForPage(a.iAddr[0]); sl@0: else sl@0: DoInvalidateTLB(); sl@0: } sl@0: sl@0: void TTLBIPI::InvalidateForPagesIsr(TGenericIPI* aTLBIPI) sl@0: { sl@0: TTLBIPI& a = *(TTLBIPI*)aTLBIPI; sl@0: TInt i; sl@0: for (i=0; iIndex()<>KPageTableShift)&KPtClusterMask); sl@0: return (TPte*)(KPageTableBase+(id<Index()<>KPageTableShift)&KPtClusterMask); sl@0: return (TPte*)(KPageTableBase+(id<>KChunkShift]; sl@0: SPageInfo* pi = SPageInfo::FromPhysAddr(pde); sl@0: TPte* pt = (TPte*)(KPageTableBase+(pi->Index()<>KPageShift)&(KChunkMask>>KPageShift); sl@0: return pt; sl@0: } sl@0: sl@0: sl@0: TPte* Mmu::SafePtePtrFromLinAddr(TLinAddr aAddress, TInt aOsAsid) sl@0: { sl@0: TPde pde = PageDirectory(aOsAsid)[aAddress>>KChunkShift]; sl@0: TPte* pt = SafePageTableFromPde(pde); sl@0: if(pt) sl@0: pt += (aAddress>>KPageShift)&(KChunkMask>>KPageShift); sl@0: return pt; sl@0: } sl@0: sl@0: sl@0: TPhysAddr Mmu::PageTablePhysAddr(TPte* aPt) sl@0: { sl@0: __NK_ASSERT_DEBUG(MmuLock::IsHeld() || PageTablesLockIsHeld()); sl@0: sl@0: TInt pdeIndex = ((TLinAddr)aPt)>>KChunkShift; sl@0: TPde pde = PageDirectory(KKernelOsAsid)[pdeIndex]; sl@0: __NK_ASSERT_DEBUG((pde&(KPdePtePresent|KPdeLargePage))==KPdePtePresent); sl@0: sl@0: SPageInfo* pi = SPageInfo::FromPhysAddr(pde); sl@0: TPte* pPte = (TPte*)(KPageTableBase+(pi->Index(true)<>KPageShift]; sl@0: __NK_ASSERT_DEBUG(pte & KPdePtePresent); sl@0: sl@0: return pte&KPdePtePhysAddrMask; sl@0: } sl@0: sl@0: sl@0: TPhysAddr Mmu::UncheckedLinearToPhysical(TLinAddr aLinAddr, TInt aOsAsid) sl@0: { sl@0: TRACE2(("Mmu::UncheckedLinearToPhysical(%08x,%d)",aLinAddr,aOsAsid)); sl@0: TInt pdeIndex = aLinAddr>>KChunkShift; sl@0: TPde pde = PageDirectory(aOsAsid)[pdeIndex]; sl@0: TPhysAddr pa=KPhysAddrInvalid; sl@0: if (pde & KPdePtePresent) sl@0: { sl@0: if(pde&KPdeLargePage) sl@0: { sl@0: pa=(pde&KPdeLargePagePhysAddrMask)+(aLinAddr&~KPdeLargePagePhysAddrMask); sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Mapped with large table - returning %08x",pa)); sl@0: } sl@0: else sl@0: { sl@0: SPageInfo* pi = SPageInfo::FromPhysAddr(pde); sl@0: TInt id = (pi->Index(true)<>KPageTableShift)&KPtClusterMask); sl@0: TPte* pPte = (TPte*)(KPageTableBase+(id<>KPageShift]; sl@0: if (pte & KPdePtePresent) sl@0: { sl@0: pa=(pte&KPdePtePhysAddrMask)+(aLinAddr&KPageMask); sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Mapped with page table - returning %08x",pa)); sl@0: } sl@0: } sl@0: } sl@0: return pa; sl@0: } sl@0: sl@0: sl@0: void Mmu::Init1() sl@0: { sl@0: TRACEB(("Mmu::Init1")); sl@0: sl@0: TUint pge = TheSuperPage().iCpuId & EX86Feat_PGE; sl@0: PteGlobal = pge ? KPdePteGlobal : 0; sl@0: X86_UseGlobalPTEs = pge!=0; sl@0: sl@0: #ifdef __SMP__ sl@0: ApTrampolinePage = KApTrampolinePageLin; sl@0: sl@0: TInt i; sl@0: for (i=0; i>KChunkShift)*sizeof(TPde)); sl@0: } sl@0: #endif sl@0: sl@0: Init1Common(); sl@0: } sl@0: sl@0: void Mmu::Init2() sl@0: { sl@0: TRACEB(("Mmu::Init2")); sl@0: sl@0: Init2Common(); sl@0: } sl@0: sl@0: void Mmu::Init2Final() sl@0: { sl@0: TRACEB(("Mmu::Init2Final")); sl@0: sl@0: Init2FinalCommon(); sl@0: } sl@0: sl@0: sl@0: const TPde KPdeForBlankPageTable = KPdePtePresent|KPdePteWrite|KPdePteUser; sl@0: sl@0: TPde Mmu::BlankPde(TMemoryAttributes aAttributes) sl@0: { sl@0: (void)aAttributes; sl@0: TPde pde = KPdeForBlankPageTable; sl@0: TRACE2(("Mmu::BlankPde(%x) returns 0x%x",aAttributes,pde)); sl@0: return pde; sl@0: } sl@0: sl@0: sl@0: TPde Mmu::BlankSectionPde(TMemoryAttributes aAttributes, TUint aPteType) sl@0: { sl@0: return PageToSectionEntry(BlankPte(aAttributes, aPteType), KPdeForBlankPageTable); sl@0: } sl@0: sl@0: sl@0: TPte Mmu::BlankPte(TMemoryAttributes aAttributes, TUint aPteType) sl@0: { sl@0: TPte pte = KPdePtePresent; sl@0: if(aPteType&EPteTypeUserAccess) sl@0: pte |= KPdePteUser; sl@0: if(aPteType&EPteTypeWritable) sl@0: pte |= KPdePteWrite; sl@0: if(aPteType&EPteTypeGlobal) sl@0: pte |= PteGlobal; sl@0: sl@0: switch((TMemoryType)(aAttributes&EMemoryAttributeTypeMask)) sl@0: { sl@0: case EMemAttStronglyOrdered: sl@0: case EMemAttDevice: sl@0: case EMemAttNormalUncached: sl@0: pte |= KPdePteUncached; sl@0: break; sl@0: case EMemAttNormalCached: sl@0: break; sl@0: default: sl@0: __NK_ASSERT_ALWAYS(0); sl@0: break; sl@0: } sl@0: sl@0: TRACE2(("Mmu::BlankPte(%x,%x) returns 0x%x",aAttributes,aPteType,pte)); sl@0: return pte; sl@0: } sl@0: sl@0: sl@0: TPte Mmu::SectionToPageEntry(TPde& aPde) sl@0: { sl@0: TPte pte = aPde&~(KPdePtePhysAddrMask|KPdeLargePage); sl@0: aPde = KPdeForBlankPageTable; sl@0: return pte; sl@0: } sl@0: sl@0: sl@0: TPde Mmu::PageToSectionEntry(TPte aPte, TPde /*aPde*/) sl@0: { sl@0: TPte pde = aPte&~KPdeLargePagePhysAddrMask; sl@0: pde |= KPdeLargePage; sl@0: return pde; sl@0: } sl@0: sl@0: sl@0: TMemoryAttributes Mmu::CanonicalMemoryAttributes(TMemoryAttributes aAttr) sl@0: { sl@0: TUint attr = aAttr; sl@0: if(attr&EMemoryAttributeDefaultShareable) sl@0: { sl@0: // sharing not specified, use default... sl@0: #if defined (__CPU_USE_SHARED_MEMORY) sl@0: attr |= EMemoryAttributeShareable; sl@0: #else sl@0: attr &= ~EMemoryAttributeShareable; sl@0: #endif sl@0: } sl@0: sl@0: // remove invalid attributes... sl@0: attr &= ~(EMemoryAttributeUseECC); sl@0: sl@0: return (TMemoryAttributes)(attr&EMemoryAttributeMask); sl@0: } sl@0: sl@0: sl@0: void Mmu::PagesAllocated(TPhysAddr* aPageList, TUint aCount, TRamAllocFlags aFlags, TBool aReallocate) sl@0: { sl@0: TRACE2(("Mmu::PagesAllocated(0x%08x,%d,0x%x,%d)",aPageList, aCount, aFlags, (bool)aReallocate)); sl@0: __NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); sl@0: sl@0: TBool wipe = !(aFlags&EAllocNoWipe); // do we need to wipe page contents? sl@0: TMemoryType newType = (TMemoryType)(aFlags&KMemoryTypeMask); // memory type that pages will be used for sl@0: TUint8 wipeByte = (aFlags&EAllocUseCustomWipeByte) ? (aFlags>>EAllocWipeByteShift)&0xff : 0x03; // value to wipe memory with sl@0: sl@0: // process each page in turn... sl@0: while(aCount--) sl@0: { sl@0: // get physical address of next page... sl@0: TPhysAddr pagePhys; sl@0: if((TPhysAddr)aPageList&1) sl@0: { sl@0: // aPageList is actually the physical address to use... sl@0: pagePhys = (TPhysAddr)aPageList&~1; sl@0: *(TPhysAddr*)&aPageList += KPageSize; sl@0: } sl@0: else sl@0: pagePhys = *aPageList++; sl@0: __NK_ASSERT_DEBUG((pagePhys&KPageMask)==0); sl@0: sl@0: // get info about page... sl@0: SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys); sl@0: TMemoryType oldType = (TMemoryType)(pi->Flags(true)&KMemoryTypeMask); sl@0: sl@0: TRACE2(("Mmu::PagesAllocated page=0x%08x, oldType=%d, wipe=%d",pagePhys,oldType,wipe)); sl@0: if(wipe) sl@0: { sl@0: // work out temporary mapping values... sl@0: TLinAddr tempLinAddr = iTempMap[0].iLinAddr; sl@0: TPte* tempPte = iTempMap[0].iPtePtr; sl@0: sl@0: // temporarily map page... sl@0: *tempPte = pagePhys | iTempPteCached; sl@0: CacheMaintenance::SinglePteUpdated((TLinAddr)tempPte); sl@0: InvalidateTLBForPage(tempLinAddr|KKernelOsAsid); sl@0: sl@0: // wipe contents of memory... sl@0: memset((TAny*)tempLinAddr, wipeByte, KPageSize); sl@0: __e32_io_completion_barrier(); sl@0: sl@0: // invalidate temporary mapping... sl@0: *tempPte = KPteUnallocatedEntry; sl@0: CacheMaintenance::SinglePteUpdated((TLinAddr)tempPte); sl@0: InvalidateTLBForPage(tempLinAddr|KKernelOsAsid); sl@0: } sl@0: sl@0: // indicate page has been allocated... sl@0: if(aReallocate==false) sl@0: pi->SetAllocated(); sl@0: } sl@0: } sl@0: sl@0: sl@0: void Mmu::PageFreed(SPageInfo* aPageInfo) sl@0: { sl@0: __NK_ASSERT_DEBUG(MmuLock::IsHeld()); sl@0: sl@0: if(aPageInfo->Type()==SPageInfo::EUnused) sl@0: return; sl@0: sl@0: aPageInfo->SetUnused(); sl@0: sl@0: TRACE2(("Mmu::PageFreed page=0x%08x type=%d colour=%d",aPageInfo->PhysAddr(),aPageInfo->Flags()&KMemoryTypeMask,aPageInfo->Index()&KPageColourMask)); sl@0: } sl@0: sl@0: sl@0: void Mmu::CleanAndInvalidatePages(TPhysAddr* aPages, TUint aCount, TMemoryAttributes aAttributes, TUint aColour) sl@0: { sl@0: TMemoryType type = (TMemoryType)(aAttributes&EMemoryAttributeTypeMask); sl@0: if(!CacheMaintenance::IsCached(type)) sl@0: { sl@0: TRACE2(("Mmu::CleanAndInvalidatePages - nothing to do")); sl@0: return; sl@0: } sl@0: sl@0: RamAllocLock::Lock(); sl@0: sl@0: while(aCount--) sl@0: { sl@0: TPhysAddr pagePhys = *aPages++; sl@0: TRACE2(("Mmu::CleanAndInvalidatePages 0x%08x",pagePhys)); sl@0: sl@0: // work out temporary mapping values... sl@0: TLinAddr tempLinAddr = iTempMap[0].iLinAddr; sl@0: TPte* tempPte = iTempMap[0].iPtePtr; sl@0: sl@0: // temporarily map page... sl@0: *tempPte = pagePhys | iTempPteCached; sl@0: CacheMaintenance::SinglePteUpdated((TLinAddr)tempPte); sl@0: InvalidateTLBForPage(tempLinAddr|KKernelOsAsid); sl@0: sl@0: // sort out cache for memory reuse... sl@0: CacheMaintenance::PageToPreserveAndReuse(tempLinAddr, type, KPageSize); sl@0: sl@0: // invalidate temporary mapping... sl@0: *tempPte = KPteUnallocatedEntry; sl@0: CacheMaintenance::SinglePteUpdated((TLinAddr)tempPte); sl@0: InvalidateTLBForPage(tempLinAddr|KKernelOsAsid); sl@0: sl@0: RamAllocLock::Flash(); sl@0: } sl@0: RamAllocLock::Unlock(); sl@0: } sl@0: sl@0: sl@0: TInt DMemModelThread::Alias(TLinAddr aAddr, DMemModelProcess* aProcess, TInt aSize, TLinAddr& aAliasAddr, TUint& aAliasSize) sl@0: // sl@0: // Set up an alias mapping starting at address aAddr in specified process. sl@0: // Note: Alias is removed if an exception is trapped by DThread::IpcExcHandler. sl@0: // sl@0: { sl@0: TRACE2(("Thread %O Alias %08x+%x Process %O",this,aAddr,aSize,aProcess)); sl@0: __NK_ASSERT_DEBUG(this==TheCurrentThread); // many bad things can happen if false sl@0: // If there is an existing alias it should be on the same process otherwise sl@0: // the os asid reference may be leaked. sl@0: __NK_ASSERT_DEBUG(!iAliasLinAddr || aProcess == iAliasProcess); sl@0: sl@0: if(TUint(aAddr^KIPCAlias)TryOpenOsAsid(); sl@0: if (osAsid < 0) sl@0: {// Couldn't open os asid so aProcess is no longer running. sl@0: MmuLock::Unlock(); sl@0: return KErrBadDescriptor; sl@0: } sl@0: } sl@0: else sl@0: { sl@0: // Just read the os asid of the process being aliased we already have a reference on it. sl@0: osAsid = aProcess->OsAsid(); sl@0: } sl@0: sl@0: // Now we have the os asid check access to kernel memory. sl@0: if(aAddr >= KUserMemoryLimit && osAsid != (TUint)KKernelOsAsid) sl@0: { sl@0: NKern::ThreadEnterCS(); sl@0: MmuLock::Unlock(); sl@0: if (!iAliasLinAddr) sl@0: {// Close the new reference as RemoveAlias won't do as iAliasLinAddr is not set. sl@0: aProcess->AsyncCloseOsAsid(); // Asynchronous close as this method should be quick. sl@0: } sl@0: NKern::ThreadLeaveCS(); sl@0: return KErrBadDescriptor; // prevent access to supervisor only memory sl@0: } sl@0: sl@0: // Now we know all accesses to global memory are safe so check if aAddr is global. sl@0: if(aAddr >= KGlobalMemoryBase) sl@0: { sl@0: // address is in global section, don't bother aliasing it... sl@0: if (!iAliasLinAddr) sl@0: {// Close the new reference as not required. sl@0: NKern::ThreadEnterCS(); sl@0: MmuLock::Unlock(); sl@0: aProcess->AsyncCloseOsAsid(); // Asynchronous close as this method should be quick. sl@0: NKern::ThreadLeaveCS(); sl@0: } sl@0: else sl@0: {// Remove the existing alias as it is not required. sl@0: DoRemoveAlias(iAliasLinAddr); // Releases mmulock. sl@0: } sl@0: aAliasAddr = aAddr; sl@0: TInt maxSize = KChunkSize-(aAddr&KChunkMask); sl@0: aAliasSize = aSize>KChunkShift; sl@0: TPde pde = pd[pdeIndex]; sl@0: #ifdef __SMP__ sl@0: TLinAddr aliasAddr; sl@0: #else sl@0: TLinAddr aliasAddr = KIPCAlias+(aAddr&(KChunkMask & ~KPageMask)); sl@0: #endif sl@0: if(pde==iAliasPde && iAliasLinAddr) sl@0: { sl@0: // pde already aliased, so just update linear address... sl@0: #ifdef __SMP__ sl@0: __NK_ASSERT_DEBUG(iCpuRestoreCookie>=0); sl@0: aliasAddr = iAliasLinAddr & ~KChunkMask; sl@0: aliasAddr |= (aAddr & (KChunkMask & ~KPageMask)); sl@0: #endif sl@0: iAliasLinAddr = aliasAddr; sl@0: } sl@0: else sl@0: { sl@0: // alias PDE changed... sl@0: if(!iAliasLinAddr) sl@0: { sl@0: TheMmu.iAliasList.Add(&iAliasLink); // add to list if not already aliased sl@0: #ifdef __SMP__ sl@0: __NK_ASSERT_DEBUG(iCpuRestoreCookie==-1); sl@0: iCpuRestoreCookie = NKern::FreezeCpu(); // temporarily lock current thread to this processor sl@0: #endif sl@0: } sl@0: iAliasPde = pde; sl@0: iAliasProcess = aProcess; sl@0: #ifdef __SMP__ sl@0: TSubScheduler& ss = SubScheduler(); // OK since we are locked to this CPU sl@0: aliasAddr = TLinAddr(ss.i_AliasLinAddr) + (aAddr & (KChunkMask & ~KPageMask)); sl@0: iAliasPdePtr = (TPde*)(TLinAddr(ss.i_AliasPdePtr) + (osAsid << KPageTableShift)); sl@0: #endif sl@0: iAliasLinAddr = aliasAddr; sl@0: *iAliasPdePtr = pde; sl@0: } sl@0: TRACE2(("DMemModelThread::Alias() PDEntry=%x, iAliasLinAddr=%x",pde, aliasAddr)); sl@0: LocalInvalidateTLBForPage(aliasAddr); sl@0: TInt offset = aAddr&KPageMask; sl@0: aAliasAddr = aliasAddr | offset; sl@0: TInt maxSize = KPageSize - offset; sl@0: aAliasSize = aSize=0); sl@0: NKern::EndFreezeCpu(iCpuRestoreCookie); sl@0: iCpuRestoreCookie = -1; sl@0: #endif sl@0: sl@0: // Must close the os asid while in critical section to prevent it being sl@0: // leaked. However, we can't hold the mmu lock so we have to enter an sl@0: // explict crtical section. It is ok to release the mmu lock as the sl@0: // iAliasLinAddr and iAliasProcess members are only ever updated by the sl@0: // current thread. sl@0: NKern::ThreadEnterCS(); sl@0: MmuLock::Unlock(); sl@0: iAliasProcess->AsyncCloseOsAsid(); // Asynchronous close as this method should be quick. sl@0: NKern::ThreadLeaveCS(); sl@0: } sl@0: sl@0: sl@0: TInt M::DemandPagingFault(TAny* aExceptionInfo) sl@0: { sl@0: TX86ExcInfo& exc=*(TX86ExcInfo*)aExceptionInfo; sl@0: if(exc.iExcId!=EX86VectorPageFault) sl@0: return KErrAbort; // not a page fault sl@0: sl@0: /* sl@0: Meanings of exc.iExcErrorCode when exception type is EX86VectorPageFault... sl@0: sl@0: Bit 0 0 The fault was caused by a non-present page. sl@0: 1 The fault was caused by a page-level protection violation. sl@0: Bit 1 0 The access causing the fault was a read. sl@0: 1 The access causing the fault was a write. sl@0: Bit 2 0 The access causing the fault originated when the processor was executing in supervisor mode. sl@0: 1 The access causing the fault originated when the processor was executing in user mode. sl@0: Bit 3 0 The fault was not caused by reserved bit violation. sl@0: 1 The fault was caused by reserved bits set to 1 in a page directory. sl@0: Bit 4 0 The fault was not caused by an instruction fetch. sl@0: 1 The fault was caused by an instruction fetch. sl@0: */ sl@0: sl@0: // check access type... sl@0: TUint accessPermissions = EUser; // we only allow paging of user memory sl@0: if(exc.iExcErrorCode&(1<<1)) sl@0: accessPermissions |= EReadWrite; sl@0: sl@0: // let TheMmu handle the fault... sl@0: return TheMmu.HandlePageFault(exc.iEip, exc.iFaultAddress, accessPermissions, aExceptionInfo); sl@0: } sl@0: sl@0: