sl@0: // Copyright (c) 1997-2009 Nokia Corporation and/or its subsidiary(-ies). sl@0: // All rights reserved. sl@0: // This component and the accompanying materials are made available sl@0: // under the terms of the License "Eclipse Public License v1.0" sl@0: // which accompanies this distribution, and is available sl@0: // at the URL "http://www.eclipse.org/legal/epl-v10.html". sl@0: // sl@0: // Initial Contributors: sl@0: // Nokia Corporation - initial contribution. sl@0: // sl@0: // Contributors: sl@0: // sl@0: // Description: sl@0: // e32\memmodel\epoc\multiple\arm\xmmu.cpp sl@0: // sl@0: // sl@0: sl@0: #include "arm_mem.h" sl@0: #include sl@0: #include sl@0: #include sl@0: #include "execs.h" sl@0: #include sl@0: #include "cache_maintenance.inl" sl@0: sl@0: #undef __MMU_MACHINE_CODED__ sl@0: sl@0: // SECTION_PDE(perm, attr, domain, execute, global) sl@0: // PT_PDE(domain) sl@0: // LP_PTE(perm, attr, execute, global) sl@0: // SP_PTE(perm, attr, execute, global) sl@0: sl@0: const TInt KPageColourShift=2; sl@0: const TInt KPageColourCount=(1<> KPageShift) & (KChunkMask >> KPageShift)); sl@0: } sl@0: sl@0: inline TLinAddr PageDirectoryLinAddr(TInt aOsAsid) sl@0: { sl@0: return (KPageDirectoryBase+(aOsAsid<> KChunkShift); sl@0: } sl@0: sl@0: extern void InvalidateTLBForPage(TLinAddr /*aLinAddr*/, TInt /*aAsid*/); sl@0: extern void FlushTLBs(); sl@0: extern TUint32 TTCR(); sl@0: sl@0: TPte* SafePageTableFromPde(TPde aPde) sl@0: { sl@0: if((aPde&KPdeTypeMask)==KArmV6PdePageTable) sl@0: { sl@0: SPageInfo* pi = SPageInfo::SafeFromPhysAddr(aPde); sl@0: if(pi) sl@0: { sl@0: TInt id = (pi->Offset()<>KPageTableShift)&KPtClusterMask); sl@0: return PageTable(id); sl@0: } sl@0: } sl@0: return 0; sl@0: } sl@0: sl@0: TPte* SafePtePtrFromLinAddr(TLinAddr aAddress, TInt aOsAsid=0) sl@0: { sl@0: if ((TInt)(aAddress>>KChunkShift)>=(TheMmu.iLocalPdSize>>2)) sl@0: aOsAsid = 0; sl@0: TPde pde = PageDirectory(aOsAsid)[aAddress>>KChunkShift]; sl@0: TPte* pt = SafePageTableFromPde(pde); sl@0: if(pt) sl@0: pt += (aAddress>>KPageShift)&(KChunkMask>>KPageShift); sl@0: return pt; sl@0: } sl@0: sl@0: #ifndef _DEBUG sl@0: // inline in UREL builds... sl@0: #ifdef __ARMCC__ sl@0: __forceinline /* RVCT ignores normal inline qualifier :-( */ sl@0: #else sl@0: inline sl@0: #endif sl@0: #endif sl@0: TPte* PtePtrFromLinAddr(TLinAddr aAddress, TInt aOsAsid=0) sl@0: { sl@0: // this function only works for process local memory addresses, or for kernel memory (asid==0). sl@0: __NK_ASSERT_DEBUG(aOsAsid==0 || (TInt)(aAddress>>KChunkShift)<(TheMmu.iLocalPdSize>>2)); sl@0: TPde pde = PageDirectory(aOsAsid)[aAddress>>KChunkShift]; sl@0: SPageInfo* pi = SPageInfo::FromPhysAddr(pde); sl@0: TInt id = (pi->Offset()<>KPageTableShift)&KPtClusterMask); sl@0: TPte* pt = PageTable(id); sl@0: pt += (aAddress>>KPageShift)&(KChunkMask>>KPageShift); sl@0: return pt; sl@0: } sl@0: sl@0: sl@0: TInt ArmMmu::LinearToPhysical(TLinAddr aLinAddr, TInt aSize, TPhysAddr& aPhysicalAddress, TPhysAddr* aPhysicalPageList, TInt aOsAsid) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::LinearToPhysical %08x+%08x, asid=%d",aLinAddr,aSize,aOsAsid)); sl@0: TPhysAddr physStart = ArmMmu::LinearToPhysical(aLinAddr,aOsAsid); sl@0: TPhysAddr nextPhys = physStart&~KPageMask; sl@0: sl@0: TUint32* pageList = aPhysicalPageList; sl@0: sl@0: TInt pageIndex = aLinAddr>>KPageShift; sl@0: TInt pagesLeft = ((aLinAddr+aSize-1)>>KPageShift)+1 - pageIndex; sl@0: TInt pdeIndex = aLinAddr>>KChunkShift; sl@0: TPde* pdePtr = (pdeIndex<(iLocalPdSize>>2) || (iAsidInfo[aOsAsid]&1)) sl@0: ? PageDirectory(aOsAsid) sl@0: : ::InitPageDirectory; sl@0: pdePtr += pdeIndex; sl@0: while(pagesLeft) sl@0: { sl@0: pageIndex &= KChunkMask>>KPageShift; sl@0: TInt pagesLeftInChunk = (1<<(KChunkShift-KPageShift))-pageIndex; sl@0: if(pagesLeftInChunk>pagesLeft) sl@0: pagesLeftInChunk = pagesLeft; sl@0: pagesLeft -= pagesLeftInChunk; sl@0: sl@0: TPhysAddr phys; sl@0: TPde pde = *pdePtr++; sl@0: TUint pdeType = pde&KPdeTypeMask; sl@0: if(pdeType==KArmV6PdeSection) sl@0: { sl@0: phys = (pde & KPdeSectionAddrMask) + (pageIndex*KPageSize); sl@0: __KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::LinearToPhysical Section phys=%8x",phys)); sl@0: TInt n=pagesLeftInChunk; sl@0: phys==nextPhys ? nextPhys+=n*KPageSize : nextPhys=KPhysAddrInvalid; sl@0: if(pageList) sl@0: { sl@0: TUint32* pageEnd = pageList+n; sl@0: do sl@0: { sl@0: *pageList++ = phys; sl@0: phys+=KPageSize; sl@0: } sl@0: while(pageList= KArmV6PteSmallPage) sl@0: { sl@0: phys = (pte & KPteSmallPageAddrMask); sl@0: __KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::LinearToPhysical Small Page phys=%8x",phys)); sl@0: phys==nextPhys ? nextPhys+=KPageSize : nextPhys=KPhysAddrInvalid; sl@0: if(pageList) sl@0: *pageList++ = phys; sl@0: if(--pagesLeftInChunk) sl@0: continue; sl@0: break; sl@0: } sl@0: if (pte_type == KArmV6PteLargePage) sl@0: { sl@0: --pt; // back up ptr sl@0: TUint pageOffset = ((TUint)pt>>2)&(KLargeSmallPageRatio-1); sl@0: phys = (pte & KPteLargePageAddrMask) + pageOffset*KPageSize; sl@0: __KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::LinearToPhysical Large Page phys=%8x",phys)); sl@0: TInt n=KLargeSmallPageRatio-pageOffset; sl@0: if(n>pagesLeftInChunk) sl@0: n = pagesLeftInChunk; sl@0: phys==nextPhys ? nextPhys+=n*KPageSize : nextPhys=KPhysAddrInvalid; sl@0: if(pageList) sl@0: { sl@0: TUint32* pageEnd = pageList+n; sl@0: do sl@0: { sl@0: *pageList++ = phys; sl@0: phys+=KPageSize; sl@0: } sl@0: while(pageList> KPageShift; // Index of the page within the section sl@0: TInt pagesLeft = ((aLinAddr & KPageMask) + aSize + KPageMask) >> KPageShift; sl@0: sl@0: TInt pdeIndex = aLinAddr>>KChunkShift; sl@0: sl@0: sl@0: MmuBase::Wait(); // RamAlloc Mutex for accessing page/directory tables. sl@0: NKern::LockSystem();// SystemlLock for accessing SPageInfo objects. sl@0: sl@0: TPde* pdePtr = (pdeIndex<(iLocalPdSize>>2) || (iAsidInfo[aOsAsid]&1)) ? PageDirectory(aOsAsid) : ::InitPageDirectory; sl@0: pdePtr += pdeIndex;//This points to the first pde sl@0: sl@0: while(pagesLeft) sl@0: { sl@0: TInt pagesLeftInChunk = (1<<(KChunkShift-KPageShift))-pageIndex; sl@0: if(pagesLeftInChunk>pagesLeft) sl@0: pagesLeftInChunk = pagesLeft; sl@0: sl@0: pagesLeft -= pagesLeftInChunk; sl@0: sl@0: TPte* pt = SafePageTableFromPde(*pdePtr++); sl@0: if(!pt) { err = KErrNotFound; goto fail; }// Cannot get page table. sl@0: sl@0: pt += pageIndex; sl@0: sl@0: for(;pagesLeftInChunk--;) sl@0: { sl@0: TPhysAddr phys = (*pt++ & KPteSmallPageAddrMask); sl@0: pi = SPageInfo::SafeFromPhysAddr(phys); sl@0: if(!pi) { err = KErrNotFound; goto fail; }// Invalid address sl@0: sl@0: __KTRACE_OPT(KMMU2,Kern::Printf("PageInfo: PA:%x T:%x S:%x O:%x C:%x",phys, pi->Type(), pi->State(), pi->Owner(), pi->LockCount())); sl@0: if (chunk==NULL) sl@0: {//This is the first page. Check 'trusted' bit. sl@0: if (pi->Type()!= SPageInfo::EChunk) sl@0: { err = KErrAccessDenied; goto fail; }// The first page do not belong to chunk. sl@0: sl@0: chunk = (DChunk*)pi->Owner(); sl@0: if ( (chunk == NULL) || ((chunk->iAttributes & DChunk::ETrustedChunk)== 0) ) sl@0: { err = KErrAccessDenied; goto fail; }// Not a trusted chunk sl@0: } sl@0: pi->Lock(); sl@0: sl@0: *pageList++ = phys; sl@0: if ( (++pagesInList&127) == 0) //release system lock temporarily on every 512K sl@0: NKern::FlashSystem(); sl@0: } sl@0: pageIndex = 0; sl@0: } sl@0: sl@0: if (pi->Type()!= SPageInfo::EChunk) sl@0: { err = KErrAccessDenied; goto fail; }// The last page do not belong to chunk. sl@0: sl@0: if (chunk && (chunk != (DChunk*)pi->Owner())) sl@0: { err = KErrArgument; goto fail; }//The first & the last page do not belong to the same chunk. sl@0: sl@0: NKern::UnlockSystem(); sl@0: MmuBase::Signal(); sl@0: return KErrNone; sl@0: sl@0: fail: sl@0: __KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::PreparePagesForDMA failed")); sl@0: NKern::UnlockSystem(); sl@0: MmuBase::Signal(); sl@0: ReleasePagesFromDMA(aPhysicalPageList, pagesInList); sl@0: return err; sl@0: } sl@0: sl@0: TInt ArmMmu::ReleasePagesFromDMA(TPhysAddr* aPhysicalPageList, TInt aPageCount) sl@0: // Unlocks physical pages. sl@0: // @param aPhysicalPageList - points to the list of physical pages that should be released. sl@0: // @param aPageCount - the number of physical pages in the list. sl@0: { sl@0: NKern::LockSystem(); sl@0: __KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::ReleasePagesFromDMA count:%d",aPageCount)); sl@0: sl@0: while (aPageCount--) sl@0: { sl@0: SPageInfo* pi = SPageInfo::SafeFromPhysAddr(*aPhysicalPageList++); sl@0: if(!pi) sl@0: { sl@0: NKern::UnlockSystem(); sl@0: return KErrArgument; sl@0: } sl@0: __KTRACE_OPT(KMMU2,Kern::Printf("PageInfo: T:%x S:%x O:%x C:%x",pi->Type(), pi->State(), pi->Owner(), pi->LockCount())); sl@0: pi->Unlock(); sl@0: } sl@0: NKern::UnlockSystem(); sl@0: return KErrNone; sl@0: } sl@0: sl@0: TPhysAddr ArmMmu::LinearToPhysical(TLinAddr aLinAddr, TInt aOsAsid) sl@0: // sl@0: // Find the physical address corresponding to a given linear address in a specified OS sl@0: // address space. Call with system locked. sl@0: // sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::LinearToPhysical(%08x,%d)",aLinAddr,aOsAsid)); sl@0: TInt pdeIndex=aLinAddr>>KChunkShift; sl@0: TPde pde = (pdeIndex<(iLocalPdSize>>2) || (iAsidInfo[aOsAsid]&1)) ? PageDirectory(aOsAsid)[pdeIndex] : ::InitPageDirectory[pdeIndex]; sl@0: TPhysAddr pa=KPhysAddrInvalid; sl@0: if ((pde&KPdePresentMask)==KArmV6PdePageTable) sl@0: { sl@0: SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pde); sl@0: if (pi) sl@0: { sl@0: TInt id = (pi->Offset()<>KPageTableShift)&KPtClusterMask); sl@0: TPte* pPte=PageTable(id); sl@0: TPte pte=pPte[(aLinAddr&KChunkMask)>>KPageShift]; sl@0: if (pte & KArmV6PteSmallPage) sl@0: { sl@0: pa=(pte&KPteSmallPageAddrMask)+(aLinAddr&~KPteSmallPageAddrMask); sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Mapped with small page - returning %08x",pa)); sl@0: } sl@0: else if ((pte & KArmV6PteTypeMask) == KArmV6PteLargePage) sl@0: { sl@0: pa=(pte&KPteLargePageAddrMask)+(aLinAddr&~KPteLargePageAddrMask); sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Mapped with large page - returning %08x",pa)); sl@0: } sl@0: } sl@0: } sl@0: else if ((pde&KPdePresentMask)==KArmV6PdeSection) sl@0: { sl@0: pa=(pde&KPdeSectionAddrMask)|(aLinAddr&~KPdeSectionAddrMask); sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Mapped with section - returning %08x",pa)); sl@0: } sl@0: return pa; sl@0: } sl@0: sl@0: // permission table indexed by XN:APX:AP1:AP0 sl@0: static const TInt PermissionLookup[16]= sl@0: { //XN:APX:AP1:AP0 sl@0: 0, //0 0 0 0 no access sl@0: EMapAttrWriteSup|EMapAttrReadSup|EMapAttrExecSup, //0 0 0 1 RW sup execute sl@0: EMapAttrWriteSup|EMapAttrReadUser|EMapAttrExecUser, //0 0 1 0 supRW usrR execute sl@0: EMapAttrWriteUser|EMapAttrReadUser|EMapAttrExecUser,//0 0 1 1 supRW usrRW execute sl@0: 0, //0 1 0 0 reserved sl@0: EMapAttrReadSup|EMapAttrExecSup, //0 1 0 1 supR execute sl@0: EMapAttrReadUser|EMapAttrExecUser, //0 1 1 0 supR usrR execute sl@0: 0, //0 1 1 1 reserved sl@0: 0, //1 0 0 0 no access sl@0: EMapAttrWriteSup|EMapAttrReadSup, //1 0 0 1 RW sup sl@0: EMapAttrWriteSup|EMapAttrReadUser, //1 0 1 0 supRW usrR sl@0: EMapAttrWriteUser|EMapAttrReadUser, //1 0 1 1 supRW usrRW sl@0: 0, //1 1 0 0 reserved sl@0: EMapAttrReadSup, //1 1 0 1 supR sl@0: EMapAttrReadUser, //1 1 1 0 supR usrR sl@0: EMapAttrReadUser, //1 1 1 1 supR usrR sl@0: }; sl@0: sl@0: TInt ArmMmu::PageTableId(TLinAddr aAddr, TInt aOsAsid) sl@0: { sl@0: TInt id=-1; sl@0: __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::PageTableId(%08x,%d)",aAddr,aOsAsid)); sl@0: TInt pdeIndex=aAddr>>KChunkShift; sl@0: TPde pde = (pdeIndex<(iLocalPdSize>>2) || (iAsidInfo[aOsAsid]&1)) ? PageDirectory(aOsAsid)[pdeIndex] : ::InitPageDirectory[pdeIndex]; sl@0: if ((pde&KArmV6PdeTypeMask)==KArmV6PdePageTable) sl@0: { sl@0: SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pde); sl@0: if (pi) sl@0: id = (pi->Offset()<>KPageTableShift)&KPtClusterMask); sl@0: } sl@0: __KTRACE_OPT(KMMU,Kern::Printf("ID=%d",id)); sl@0: return id; sl@0: } sl@0: sl@0: // Used only during boot for recovery of RAM drive sl@0: TInt ArmMmu::BootPageTableId(TLinAddr aAddr, TPhysAddr& aPtPhys) sl@0: { sl@0: TInt id=KErrNotFound; sl@0: __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu:BootPageTableId(%08x,&)",aAddr)); sl@0: TPde* kpd=(TPde*)KPageDirectoryBase; // kernel page directory sl@0: TInt pdeIndex=aAddr>>KChunkShift; sl@0: TPde pde = kpd[pdeIndex]; sl@0: if ((pde & KArmV6PdeTypeMask) == KArmV6PdePageTable) sl@0: { sl@0: aPtPhys = pde & KPdePageTableAddrMask; sl@0: SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pde); sl@0: if (pi) sl@0: { sl@0: SPageInfo::TType type = pi->Type(); sl@0: if (type == SPageInfo::EPageTable) sl@0: id = (pi->Offset()<>KPageTableShift)&KPtClusterMask); sl@0: else if (type == SPageInfo::EUnused) sl@0: id = KErrUnknown; sl@0: } sl@0: } sl@0: __KTRACE_OPT(KMMU,Kern::Printf("ID=%d",id)); sl@0: return id; sl@0: } sl@0: sl@0: TBool ArmMmu::PteIsPresent(TPte aPte) sl@0: { sl@0: return aPte & KArmV6PteTypeMask; sl@0: } sl@0: sl@0: TPhysAddr ArmMmu::PtePhysAddr(TPte aPte, TInt aPteIndex) sl@0: { sl@0: TUint32 pte_type = aPte & KArmV6PteTypeMask; sl@0: if (pte_type == KArmV6PteLargePage) sl@0: return (aPte & KPteLargePageAddrMask) + (TPhysAddr(aPteIndex << KPageShift) & KLargePageMask); sl@0: else if (pte_type != 0) sl@0: return aPte & KPteSmallPageAddrMask; sl@0: return KPhysAddrInvalid; sl@0: } sl@0: sl@0: TPhysAddr ArmMmu::PdePhysAddr(TLinAddr aAddr) sl@0: { sl@0: TPde* kpd = (TPde*)KPageDirectoryBase; // kernel page directory sl@0: TPde pde = kpd[aAddr>>KChunkShift]; sl@0: if ((pde & KPdePresentMask) == KArmV6PdeSection) sl@0: return pde & KPdeSectionAddrMask; sl@0: return KPhysAddrInvalid; sl@0: } sl@0: sl@0: void ArmMmu::Init1() sl@0: { sl@0: __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("ArmMmu::Init1")); sl@0: sl@0: // MmuBase data sl@0: iPageSize=KPageSize; sl@0: iPageMask=KPageMask; sl@0: iPageShift=KPageShift; sl@0: iChunkSize=KChunkSize; sl@0: iChunkMask=KChunkMask; sl@0: iChunkShift=KChunkShift; sl@0: iPageTableSize=KPageTableSize; sl@0: iPageTableMask=KPageTableMask; sl@0: iPageTableShift=KPageTableShift; sl@0: iPtClusterSize=KPtClusterSize; sl@0: iPtClusterMask=KPtClusterMask; sl@0: iPtClusterShift=KPtClusterShift; sl@0: iPtBlockSize=KPtBlockSize; sl@0: iPtBlockMask=KPtBlockMask; sl@0: iPtBlockShift=KPtBlockShift; sl@0: iPtGroupSize=KChunkSize/KPageTableSize; sl@0: iPtGroupMask=iPtGroupSize-1; sl@0: iPtGroupShift=iChunkShift-iPageTableShift; sl@0: //TInt* iPtBlockCount; // dynamically allocated - Init2 sl@0: //TInt* iPtGroupCount; // dynamically allocated - Init2 sl@0: iPtInfo=(SPageTableInfo*)KPageTableInfoBase; sl@0: iPageTableLinBase=KPageTableBase; sl@0: //iRamPageAllocator; // dynamically allocated - Init2 sl@0: //iAsyncFreeList; // dynamically allocated - Init2 sl@0: //iPageTableAllocator; // dynamically allocated - Init2 sl@0: //iPageTableLinearAllocator;// dynamically allocated - Init2 sl@0: iPtInfoPtePerm=KPtInfoPtePerm; sl@0: iPtPtePerm=KPtPtePerm; sl@0: iPtPdePerm=KPtPdePerm; sl@0: iUserCodeLoadPtePerm=KUserCodeLoadPte; sl@0: iKernelCodePtePerm=KKernelCodeRunPte; sl@0: iTempAddr=KTempAddr; sl@0: iSecondTempAddr=KSecondTempAddr; sl@0: iMapSizes=KPageSize|KLargePageSize|KChunkSize; sl@0: iRomLinearBase = ::RomHeaderAddress; sl@0: iRomLinearEnd = KRomLinearEnd; sl@0: iShadowPtePerm = KShadowPtePerm; sl@0: iShadowPdePerm = KShadowPdePerm; sl@0: sl@0: // Mmu data sl@0: TInt total_ram=TheSuperPage().iTotalRamSize; sl@0: sl@0: // Large or small configuration? sl@0: // This is determined by the bootstrap based on RAM size sl@0: TUint32 ttcr=TTCR(); sl@0: __NK_ASSERT_ALWAYS(ttcr==1 || ttcr==2); sl@0: TBool large = (ttcr==1); sl@0: sl@0: // calculate cache colouring... sl@0: TInt iColourCount = 0; sl@0: TInt dColourCount = 0; sl@0: TUint32 ctr = InternalCache::TypeRegister(); sl@0: __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("CacheTypeRegister = %08x",ctr)); sl@0: #ifdef __CPU_ARMV6 sl@0: __NK_ASSERT_ALWAYS((ctr>>29)==0); // check ARMv6 format sl@0: if(ctr&0x800) sl@0: iColourCount = 4; sl@0: if(ctr&0x800000) sl@0: dColourCount = 4; sl@0: #else sl@0: __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("CacheTypeRegister = %08x",ctr)); sl@0: __NK_ASSERT_ALWAYS((ctr>>29)==4); // check ARMv7 format sl@0: TUint l1ip = (ctr>>14)&3; // L1 instruction cache indexing and tagging policy sl@0: __NK_ASSERT_ALWAYS(l1ip>=2); // check I cache is physically tagged sl@0: sl@0: TUint32 clidr = InternalCache::LevelIDRegister(); sl@0: __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("CacheLevelIDRegister = %08x",clidr)); sl@0: TUint l1type = clidr&7; sl@0: if(l1type) sl@0: { sl@0: if(l1type==2 || l1type==3 || l1type==4) sl@0: { sl@0: // we have an L1 data cache... sl@0: TUint32 csir = InternalCache::SizeIdRegister(0,0); sl@0: TUint sets = ((csir>>13)&0x7fff)+1; sl@0: TUint ways = ((csir>>3)&0x3ff)+1; sl@0: TUint lineSizeShift = (csir&7)+4; sl@0: // assume L1 data cache is VIPT and alias checks broken and so we need data cache colouring... sl@0: dColourCount = (sets<>KPageShift; sl@0: if(l1type==4) // unified cache, so set instruction cache colour as well... sl@0: iColourCount = (sets<>KPageShift; sl@0: __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("L1DCache = 0x%x,0x%x,%d colourCount=%d",sets,ways,lineSizeShift,(sets<>KPageShift)); sl@0: } sl@0: sl@0: if(l1type==1 || l1type==3) sl@0: { sl@0: // we have a separate L1 instruction cache... sl@0: TUint32 csir = InternalCache::SizeIdRegister(1,0); sl@0: TUint sets = ((csir>>13)&0x7fff)+1; sl@0: TUint ways = ((csir>>3)&0x3ff)+1; sl@0: TUint lineSizeShift = (csir&7)+4; sl@0: iColourCount = (sets<>KPageShift; sl@0: __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("L1ICache = 0x%x,0x%x,%d colourCount=%d",sets,ways,lineSizeShift,(sets<>KPageShift)); sl@0: } sl@0: } sl@0: if(l1ip==3) sl@0: { sl@0: // PIPT cache, so no colouring restrictions... sl@0: __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("L1ICache is PIPT")); sl@0: iColourCount = 0; sl@0: } sl@0: else sl@0: { sl@0: // VIPT cache... sl@0: __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("L1ICache is VIPT")); sl@0: } sl@0: #endif sl@0: TUint colourShift = 0; sl@0: for(TUint colourCount=Max(iColourCount,dColourCount); colourCount!=0; colourCount>>=1) sl@0: ++colourShift; sl@0: iAliasSize=KPageSize<>KPageShift); sl@0: iSecondTempPte=PageTable(PageTableId(iSecondTempAddr,0))+((iSecondTempAddr&KChunkMask)>>KPageShift); sl@0: __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("iTempAddr=%08x, iTempPte=%08x, iSecondTempAddr=%08x, iSecondTempPte=%08x", sl@0: iTempAddr, iTempPte, iSecondTempAddr, iSecondTempPte)); sl@0: CreateKernelSection(KKernelSectionEnd, iAliasShift); sl@0: CreateUserGlobalSection(KUserGlobalDataBase, KUserGlobalDataEnd); sl@0: Mmu::DoInit2(); sl@0: } sl@0: sl@0: #ifndef __MMU_MACHINE_CODED__ sl@0: void ArmMmu::MapRamPages(TInt aId, SPageInfo::TType aType, TAny* aPtr, TUint32 aOffset, const TPhysAddr* aPageList, TInt aNumPages, TPte aPtePerm) sl@0: // sl@0: // Map a list of physical RAM pages into a specified page table with specified PTE permissions. sl@0: // Update the page information array. sl@0: // Call this with the system locked. sl@0: // sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::MapRamPages() id=%d type=%d ptr=%08x off=%08x n=%d perm=%08x", sl@0: aId, aType, aPtr, aOffset, aNumPages, aPtePerm)); sl@0: sl@0: SPageTableInfo& ptinfo=iPtInfo[aId]; sl@0: ptinfo.iCount+=aNumPages; sl@0: aOffset>>=KPageShift; sl@0: TInt ptOffset=aOffset & KPagesInPDEMask; // entry number in page table sl@0: TPte* pPte=PageTable(aId)+ptOffset; // address of first PTE sl@0: sl@0: TLinAddr firstPte = (TLinAddr)pPte; //Will need this to clean page table changes in cache. sl@0: sl@0: while(aNumPages--) sl@0: { sl@0: TPhysAddr pa = *aPageList++; sl@0: if(pa==KPhysAddrInvalid) sl@0: { sl@0: ++pPte; sl@0: __NK_ASSERT_DEBUG(aType==SPageInfo::EInvalid); sl@0: continue; sl@0: } sl@0: *pPte++ = pa | aPtePerm; // insert PTE sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x",pPte[-1],pPte-1)); sl@0: if (aType!=SPageInfo::EInvalid) sl@0: { sl@0: SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pa); sl@0: if(pi) sl@0: { sl@0: pi->Set(aType,aPtr,aOffset); sl@0: __KTRACE_OPT(KMMU,Kern::Printf("I: %d %08x %08x",aType,aPtr,aOffset)); sl@0: ++aOffset; // increment offset for next page sl@0: } sl@0: } sl@0: } sl@0: CacheMaintenance::MultiplePtesUpdated(firstPte, (TUint)pPte-firstPte); sl@0: } sl@0: sl@0: void ArmMmu::MapPhysicalPages(TInt aId, SPageInfo::TType aType, TAny* aPtr, TUint32 aOffset, TPhysAddr aPhysAddr, TInt aNumPages, TPte aPtePerm) sl@0: // sl@0: // Map consecutive physical pages into a specified page table with specified PTE permissions. sl@0: // Update the page information array if RAM pages are being mapped. sl@0: // Call this with the system locked. sl@0: // sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::MapPhysicalPages() id=%d type=%d ptr=%08x off=%08x phys=%08x n=%d perm=%08x", sl@0: aId, aType, aPtr, aOffset, aPhysAddr, aNumPages, aPtePerm)); sl@0: SPageTableInfo& ptinfo=iPtInfo[aId]; sl@0: ptinfo.iCount+=aNumPages; sl@0: aOffset>>=KPageShift; sl@0: TInt ptOffset=aOffset & KPagesInPDEMask; // entry number in page table sl@0: TPte* pPte=(TPte*)(PageTableLinAddr(aId))+ptOffset; // address of first PTE sl@0: sl@0: TLinAddr firstPte = (TLinAddr)pPte; //Will need this to clean page table changes in cache sl@0: sl@0: SPageInfo* pi; sl@0: if(aType==SPageInfo::EInvalid) sl@0: pi = NULL; sl@0: else sl@0: pi = SPageInfo::SafeFromPhysAddr(aPhysAddr); sl@0: while(aNumPages--) sl@0: { sl@0: *pPte++ = aPhysAddr|aPtePerm; // insert PTE sl@0: aPhysAddr+=KPageSize; sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x",pPte[-1],pPte-1)); sl@0: if (pi) sl@0: { sl@0: pi->Set(aType,aPtr,aOffset); sl@0: __KTRACE_OPT(KMMU,Kern::Printf("I: %d %08x %08x",aType,aPtr,aOffset)); sl@0: ++aOffset; // increment offset for next page sl@0: ++pi; sl@0: } sl@0: } sl@0: sl@0: CacheMaintenance::MultiplePtesUpdated(firstPte, (TUint)pPte-firstPte); sl@0: } sl@0: sl@0: void ArmMmu::MapVirtual(TInt aId, TInt aNumPages) sl@0: // sl@0: // Called in place of MapRamPages or MapPhysicalPages to update mmu data structures when committing sl@0: // virtual address space to a chunk. No pages are mapped. sl@0: // Call this with the system locked. sl@0: // sl@0: { sl@0: SPageTableInfo& ptinfo=iPtInfo[aId]; sl@0: ptinfo.iCount+=aNumPages; sl@0: } sl@0: sl@0: void ArmMmu::RemapPage(TInt aId, TUint32 aAddr, TPhysAddr aOldAddr, TPhysAddr aNewAddr, TPte aPtePerm, DProcess* aProcess) sl@0: // sl@0: // Replace the mapping at address aAddr in page table aId. sl@0: // Update the page information array for both the old and new pages. sl@0: // Return physical address of old page if it is now ready to be freed. sl@0: // Call this with the system locked. sl@0: // May be called with interrupts disabled, do not enable/disable them. sl@0: // sl@0: { sl@0: TInt ptOffset=(aAddr&KChunkMask)>>KPageShift; // entry number in page table sl@0: TPte* pPte=PageTable(aId)+ptOffset; // address of PTE sl@0: TPte pte=*pPte; sl@0: TInt asid=aProcess ? ((DMemModelProcess*)aProcess)->iOsAsid : sl@0: (aAddrLockCount()==0,Panic(ERemapPageFailed)); sl@0: sl@0: // remap page sl@0: *pPte = aNewAddr | aPtePerm; // overwrite PTE sl@0: CacheMaintenance::SinglePteUpdated((TLinAddr)pPte); sl@0: InvalidateTLBForPage(aAddr,asid); // flush TLB entry sl@0: sl@0: // update new pageinfo, clear old sl@0: SPageInfo* pi = SPageInfo::FromPhysAddr(aNewAddr); sl@0: pi->Set(oldpi->Type(),oldpi->Owner(),oldpi->Offset()); sl@0: oldpi->SetUnused(); sl@0: } sl@0: else sl@0: { sl@0: Panic(ERemapPageFailed); sl@0: } sl@0: } sl@0: sl@0: void ArmMmu::RemapPageByAsid(TBitMapAllocator* aOsAsids, TLinAddr aLinAddr, TPhysAddr aOldAddr, TPhysAddr aNewAddr, TPte aPtePerm) sl@0: // sl@0: // Replace the mapping at address aLinAddr in the relevant page table for all sl@0: // ASIDs specified in aOsAsids, but only if the currently mapped address is sl@0: // aOldAddr. sl@0: // Update the page information array for both the old and new pages. sl@0: // Call this with the system unlocked. sl@0: // sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::RemapPageByAsid() linaddr=%08x oldaddr=%08x newaddr=%08x perm=%08x", aLinAddr, aOldAddr, aNewAddr, aPtePerm)); sl@0: sl@0: TInt asid = -1; sl@0: TInt lastAsid = KArmV6NumAsids - 1; sl@0: TUint32* ptr = aOsAsids->iMap; sl@0: NKern::LockSystem(); sl@0: do sl@0: { sl@0: TUint32 bits = *ptr++; sl@0: do sl@0: { sl@0: ++asid; sl@0: if(bits & 0x80000000u) sl@0: { sl@0: // mapped in this address space, so update PTE... sl@0: TPte* pPte = PtePtrFromLinAddr(aLinAddr, asid); sl@0: TPte pte = *pPte; sl@0: if ((pte&~KPageMask) == aOldAddr) sl@0: { sl@0: *pPte = aNewAddr | aPtePerm; sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x in asid %d",*pPte,pPte,asid)); sl@0: CacheMaintenance::SinglePteUpdated((TLinAddr)pPte); sl@0: InvalidateTLBForPage(aLinAddr,asid); // flush TLB entry sl@0: } sl@0: } sl@0: } sl@0: while(bits<<=1); sl@0: NKern::FlashSystem(); sl@0: asid |= 31; sl@0: } sl@0: while(asidSet(oldpi->Type(),oldpi->Owner(),oldpi->Offset()); sl@0: oldpi->SetUnused(); sl@0: sl@0: NKern::UnlockSystem(); sl@0: } sl@0: sl@0: TInt ArmMmu::UnmapPages(TInt aId, TUint32 aAddr, TInt aNumPages, TPhysAddr* aPageList, TBool aSetPagesFree, TInt& aNumPtes, TInt& aNumFree, DProcess* aProcess) sl@0: // sl@0: // Unmap a specified area at address aAddr in page table aId. Place physical addresses of unmapped sl@0: // pages into aPageList, and count of unmapped pages into aNumPtes. sl@0: // Return number of pages still mapped using this page table. sl@0: // Call this with the system locked. sl@0: // On multiple memory model, do not call this method with aSetPagesFree false. Call UnmapUnownedPages instead. sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::UnmapPages() id=%d addr=%08x n=%d pl=%08x set-free=%d",aId,aAddr,aNumPages,aPageList,aSetPagesFree)); sl@0: TInt ptOffset=(aAddr&KChunkMask)>>KPageShift; // entry number in page table sl@0: TPte* pPte=PageTable(aId)+ptOffset; // address of first PTE sl@0: TInt np=0; sl@0: TInt nf=0; sl@0: TUint32 ng=0; sl@0: TInt asid=aProcess ? ((DMemModelProcess*)aProcess)->iOsAsid : sl@0: (aAddr= 0) //otherwise, KUnmapPagesTLBFlushDeferred will be returned. sl@0: InvalidateTLBForPage(aAddr,asid); // flush any corresponding TLB entry sl@0: #endif sl@0: TPhysAddr pa=pte & KPteSmallPageAddrMask; // physical address of unmapped page sl@0: if (aSetPagesFree) sl@0: { sl@0: SPageInfo* pi = SPageInfo::FromPhysAddr(pa); sl@0: if(iRamCache->PageUnmapped(pi)) sl@0: { sl@0: pi->SetUnused(); // mark page as unused sl@0: if (pi->LockCount()==0) sl@0: { sl@0: *aPageList++=pa; // store in page list sl@0: ++nf; // count free pages sl@0: } sl@0: } sl@0: } sl@0: else sl@0: *aPageList++=pa; // store in page list sl@0: } sl@0: aAddr+=KPageSize; sl@0: } sl@0: sl@0: aNumPtes=np; sl@0: aNumFree=nf; sl@0: SPageTableInfo& ptinfo=iPtInfo[aId]; sl@0: TInt r=(ptinfo.iCount-=np); sl@0: if (asid<0) sl@0: r|=KUnmapPagesTLBFlushDeferred; sl@0: sl@0: sl@0: #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED) sl@0: __FlushBtb(); sl@0: #endif sl@0: sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Unmapped %d; Freed: %d; Return %08x",np,nf,r)); sl@0: return r; // return number of pages remaining in this page table sl@0: } sl@0: sl@0: TInt ArmMmu::UnmapVirtual(TInt aId, TUint32 aAddr, TInt aNumPages, TPhysAddr* aPageList, TBool aSetPagesFree, TInt& aNumPtes, TInt& aNumFree, DProcess* aProcess) sl@0: // sl@0: // Unmap a specified area at address aAddr in page table aId. Place physical addresses of unmapped sl@0: // pages into aPageList, and count of unmapped pages into aNumPtes. sl@0: // Adjust the page table reference count as if aNumPages pages were unmapped. sl@0: // Return number of pages still mapped using this page table. sl@0: // Call this with the system locked. sl@0: // On multiple memory model, do not call this method with aSetPagesFree false. Call UnmapUnownedVirtual instead. sl@0: // sl@0: { sl@0: SPageTableInfo& ptinfo=iPtInfo[aId]; sl@0: TInt newCount = ptinfo.iCount - aNumPages; sl@0: UnmapPages(aId, aAddr, aNumPages, aPageList, aSetPagesFree, aNumPtes, aNumFree, aProcess); sl@0: ptinfo.iCount = newCount; sl@0: aNumPtes = aNumPages; sl@0: return newCount; sl@0: } sl@0: sl@0: TInt ArmMmu::UnmapUnownedPages(TInt aId, TUint32 aAddr, TInt aNumPages, sl@0: TPhysAddr* aPageList, TLinAddr* aLAPageList,TInt& aNumPtes, TInt& aNumFree, DProcess* aProcess) sl@0: /* sl@0: * Unmaps specified area at address aAddr in page table aId. sl@0: * Places physical addresses of not-demaned-paged unmapped pages into aPageList. sl@0: * Corresponding linear addresses are placed into aLAPageList. sl@0: * 'Old' demand-paged pages (holds invalid PE entry with physucal address) are neither unmapped nor sl@0: * encountered in aPageList but are still counted in aNumPtes. sl@0: * sl@0: * This method should be called to decommit physical memory not owned by the chunk. As we do not know sl@0: * the origin of such memory, PtInfo could be invalid (or does't exist) so cache maintenance may not be sl@0: * able to obtain mapping colour. For that reason, this also returns former linear address of each page sl@0: * in aPageList. sl@0: * sl@0: * @pre All pages are mapped within a single page table identified by aId. sl@0: * @pre On entry, system locked is held and is not released during the execution. sl@0: * sl@0: * @arg aId Id of the page table that maps tha pages. sl@0: * @arg aAddr Linear address of the start of the area. sl@0: * @arg aNumPages The number of pages to unmap. sl@0: * @arg aProcess The owning process of the mamory area to unmap. sl@0: * @arg aPageList On exit, holds the list of unmapped pages. sl@0: * @arg aLAPageList On exit, holds the list of linear addresses of unmapped pages. sl@0: * @arg aNumFree On exit, holds the number of pages in aPageList. sl@0: * @arg aNumPtes On exit, holds the number of unmapped pages. This includes demand-paged 'old' sl@0: * pages (with invalid page table entry still holding the address of physical page.) sl@0: * sl@0: * @return The number of pages still mapped using this page table. It is orred by sl@0: * KUnmapPagesTLBFlushDeferred if TLB flush is not executed - which requires sl@0: * the caller to do global TLB flush. sl@0: */ sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::UnmapUnownedPages() id=%d addr=%08x n=%d pl=%08x",aId,aAddr,aNumPages,aPageList)); sl@0: TInt ptOffset=(aAddr&KChunkMask)>>KPageShift; // entry number in page table sl@0: TPte* pPte=PageTable(aId)+ptOffset; // address of first PTE sl@0: TInt np=0; sl@0: TInt nf=0; sl@0: TUint32 ng=0; sl@0: TInt asid=aProcess ? ((DMemModelProcess*)aProcess)->iOsAsid : sl@0: (aAddr= 0) //otherwise, KUnmapPagesTLBFlushDeferred will be returned. sl@0: InvalidateTLBForPage(aAddr,asid); // flush any corresponding TLB entry sl@0: #endif sl@0: TPhysAddr pa=pte & KPteSmallPageAddrMask; // physical address of unmapped page sl@0: ++nf; sl@0: *aPageList++=pa; // store physical aaddress in page list sl@0: *aLAPageList++=aAddr; // store linear address in page list sl@0: } sl@0: aAddr+=KPageSize; sl@0: } sl@0: sl@0: aNumPtes=np; sl@0: aNumFree=nf; sl@0: SPageTableInfo& ptinfo=iPtInfo[aId]; sl@0: TInt r=(ptinfo.iCount-=np); sl@0: if (asid<0) sl@0: r|=KUnmapPagesTLBFlushDeferred; sl@0: sl@0: sl@0: #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED) sl@0: __FlushBtb(); sl@0: #endif sl@0: sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Unmapped %d; Freed: %d; Return %08x",np,nf,r)); sl@0: return r; // return number of pages remaining in this page table sl@0: } sl@0: sl@0: sl@0: TInt ArmMmu::UnmapUnownedVirtual(TInt aId, TUint32 aAddr, TInt aNumPages, sl@0: TPhysAddr* aPageList, TLinAddr* aLAPageList,TInt& aNumPtes, TInt& aNumFree, DProcess* aProcess) sl@0: // sl@0: // Unmap a specified area at address aAddr in page table aId. Place physical addresses of unmapped sl@0: // pages into aPageList, and count of unmapped pages into aNumPtes. sl@0: // Adjust the page table reference count as if aNumPages pages were unmapped. sl@0: // Return number of pages still mapped using this page table. sl@0: // Call this with the system locked. sl@0: // sl@0: { sl@0: SPageTableInfo& ptinfo=iPtInfo[aId]; sl@0: TInt newCount = ptinfo.iCount - aNumPages; sl@0: UnmapUnownedPages(aId, aAddr, aNumPages, aPageList, aLAPageList, aNumPtes, aNumFree, aProcess); sl@0: ptinfo.iCount = newCount; sl@0: aNumPtes = aNumPages; sl@0: return newCount; sl@0: } sl@0: sl@0: void ArmMmu::DoAssignPageTable(TInt aId, TLinAddr aAddr, TPde aPdePerm, const TAny* aOsAsids) sl@0: // sl@0: // Assign an allocated page table to map a given linear address with specified permissions. sl@0: // This should be called with the system unlocked and the MMU mutex held. sl@0: // sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::DoAssignPageTable %d to %08x perm %08x asid %08x",aId,aAddr,aPdePerm,aOsAsids)); sl@0: TLinAddr ptLin=PageTableLinAddr(aId); sl@0: TPhysAddr ptPhys=LinearToPhysical(ptLin,0); sl@0: TInt pdeIndex=TInt(aAddr>>KChunkShift); sl@0: TBool gpd=(pdeIndex>=(iLocalPdSize>>2)); sl@0: TInt os_asid=(TInt)aOsAsids; sl@0: if (TUint32(os_asid)iSize-pB->iAvail; sl@0: for (os_asid=0; num_os_asids; ++os_asid) sl@0: { sl@0: if (pB->NotAllocated(os_asid,1)) sl@0: continue; // os_asid is not needed sl@0: TPde* pageDir=PageDirectory(os_asid); sl@0: NKern::LockSystem(); sl@0: pageDir[pdeIndex]=ptPhys|aPdePerm; sl@0: CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex)); sl@0: NKern::UnlockSystem(); sl@0: sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x",ptPhys|aPdePerm,pageDir+pdeIndex)); sl@0: --num_os_asids; sl@0: } sl@0: } sl@0: } sl@0: sl@0: void ArmMmu::RemapPageTableSingle(TPhysAddr aOld, TPhysAddr aNew, TLinAddr aAddr, TInt aOsAsid) sl@0: // sl@0: // Replace a single page table mapping the specified linear address. sl@0: // This should be called with the system locked and the MMU mutex held. sl@0: // sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::RemapPageTableSingle %08x to %08x at %08x asid %d",aOld,aNew,aAddr,aOsAsid)); sl@0: TPde* pageDir=PageDirectory(aOsAsid); sl@0: TInt pdeIndex=TInt(aAddr>>KChunkShift); sl@0: TPde pde=pageDir[pdeIndex]; sl@0: __ASSERT_ALWAYS((pde & KPdePageTableAddrMask) == aOld, Panic(ERemapPageTableFailed)); sl@0: TPde newPde=aNew|(pde&~KPdePageTableAddrMask); sl@0: pageDir[pdeIndex]=newPde; // will blow up here if address is in global region aOsAsid doesn't have a global PD sl@0: CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex)); sl@0: sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x",newPde,pageDir+pdeIndex)); sl@0: } sl@0: sl@0: void ArmMmu::RemapPageTableGlobal(TPhysAddr aOld, TPhysAddr aNew, TLinAddr aAddr) sl@0: // sl@0: // Replace a global page table mapping the specified linear address. sl@0: // This should be called with the system locked and the MMU mutex held. sl@0: // sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::RemapPageTableGlobal %08x to %08x at %08x",aOld,aNew,aAddr)); sl@0: TInt pdeIndex=TInt(aAddr>>KChunkShift); sl@0: TInt num_os_asids=iNumGlobalPageDirs; sl@0: const TBitMapAllocator& b=*(const TBitMapAllocator*)iOsAsidAllocator; sl@0: for (TInt os_asid=0; num_os_asids; ++os_asid) sl@0: { sl@0: if (!b.NotAllocated(os_asid,1) && (iAsidInfo[os_asid]&1)) sl@0: { sl@0: // this OS ASID exists and has a global page directory sl@0: TPde* pageDir=PageDirectory(os_asid); sl@0: TPde pde=pageDir[pdeIndex]; sl@0: if ((pde & KPdePageTableAddrMask) == aOld) sl@0: { sl@0: TPde newPde=aNew|(pde&~KPdePageTableAddrMask); sl@0: pageDir[pdeIndex]=newPde; sl@0: CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex)); sl@0: sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x",newPde,pageDir+pdeIndex)); sl@0: } sl@0: --num_os_asids; sl@0: } sl@0: if ((os_asid&31)==31) sl@0: NKern::FlashSystem(); sl@0: } sl@0: } sl@0: sl@0: void ArmMmu::RemapPageTableMultiple(TPhysAddr aOld, TPhysAddr aNew, TLinAddr aAddr, const TAny* aOsAsids) sl@0: // sl@0: // Replace multiple page table mappings of the specified linear address. sl@0: // This should be called with the system locked and the MMU mutex held. sl@0: // sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::RemapPageTableMultiple %08x to %08x at %08x asids %08x",aOld,aNew,aAddr,aOsAsids)); sl@0: TInt pdeIndex=TInt(aAddr>>KChunkShift); sl@0: const TBitMapAllocator* pB=(const TBitMapAllocator*)aOsAsids; sl@0: if ((TInt)aOsAsids==-1) sl@0: pB=iOsAsidAllocator; // 0's in positions which exist sl@0: sl@0: TInt asid = -1; sl@0: TInt lastAsid = KArmV6NumAsids - 1; sl@0: const TUint32* ptr = pB->iMap; sl@0: do sl@0: { sl@0: TUint32 bits = *ptr++; sl@0: do sl@0: { sl@0: ++asid; sl@0: if ((bits & 0x80000000u) == 0) sl@0: { sl@0: // mapped in this address space - bitmap is inverted sl@0: TPde* pageDir=PageDirectory(asid); sl@0: TPde pde=pageDir[pdeIndex]; sl@0: if ((pde & KPdePageTableAddrMask) == aOld) sl@0: { sl@0: TPde newPde=aNew|(pde&~KPdePageTableAddrMask); sl@0: pageDir[pdeIndex]=newPde; sl@0: CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex)); sl@0: sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x",newPde,pageDir+pdeIndex)); sl@0: } sl@0: } sl@0: } sl@0: while(bits<<=1); sl@0: NKern::FlashSystem(); sl@0: asid |= 31; sl@0: } sl@0: while(asidDeque(); sl@0: checkedList.Add(next); sl@0: DMemModelThread* thread = (DMemModelThread*)((TInt)next-_FOFF(DMemModelThread,iAliasLink)); sl@0: TPde pde = thread->iAliasPde; sl@0: if ((pde & ~KPageMask) == aOld) sl@0: { sl@0: // a page table in this page is being aliased by the thread, so update it... sl@0: thread->iAliasPde = (pde & KPageMask) | aNew; sl@0: } sl@0: NKern::FlashSystem(); sl@0: } sl@0: sl@0: // copy checkedList back to iAliasList sl@0: iAliasList.MoveFrom(&checkedList); sl@0: } sl@0: sl@0: void ArmMmu::DoUnassignPageTable(TLinAddr aAddr, const TAny* aOsAsids) sl@0: // sl@0: // Unassign a now-empty page table currently mapping the specified linear address. sl@0: // We assume that TLB and/or cache flushing has been done when any RAM pages were unmapped. sl@0: // This should be called with the system unlocked and the MMU mutex held. sl@0: // sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::DoUnassignPageTable at %08x a=%08x",aAddr,aOsAsids)); sl@0: TInt pdeIndex=TInt(aAddr>>KChunkShift); sl@0: TBool gpd=(pdeIndex>=(iLocalPdSize>>2)); sl@0: TInt os_asid=(TInt)aOsAsids; sl@0: TUint pde=0; sl@0: sl@0: SDblQue checkedList; sl@0: SDblQueLink* next; sl@0: sl@0: if (TUint32(os_asid)>KPageTableShift; sl@0: while(!iAliasList.IsEmpty()) sl@0: { sl@0: next = iAliasList.First()->Deque(); sl@0: checkedList.Add(next); sl@0: DMemModelThread* thread = (DMemModelThread*)((TInt)next-_FOFF(DMemModelThread,iAliasLink)); sl@0: if(thread->iAliasOsAsid==os_asid && (thread->iAliasPde>>KPageTableShift)==ptId) sl@0: { sl@0: // the page table is being aliased by the thread, so remove it... sl@0: thread->iAliasPde = 0; sl@0: } sl@0: NKern::FlashSystem(); sl@0: } sl@0: } sl@0: else if (os_asid==-1 && gpd) sl@0: { sl@0: // all OS ASIDs, address in global region sl@0: TInt num_os_asids=iNumGlobalPageDirs; sl@0: const TBitMapAllocator& b=*(const TBitMapAllocator*)iOsAsidAllocator; sl@0: for (os_asid=0; num_os_asids; ++os_asid) sl@0: { sl@0: if (!b.NotAllocated(os_asid,1) && (iAsidInfo[os_asid]&1)) sl@0: { sl@0: // this OS ASID exists and has a global page directory sl@0: TPde* pageDir=PageDirectory(os_asid); sl@0: NKern::LockSystem(); sl@0: pageDir[pdeIndex]=0; sl@0: CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex)); sl@0: NKern::UnlockSystem(); sl@0: sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Clearing PDE at %08x",pageDir+pdeIndex)); sl@0: --num_os_asids; sl@0: } sl@0: } sl@0: // we don't need to look for aliases in this case, because these aren't sl@0: // created for page tables in the global region. sl@0: NKern::LockSystem(); sl@0: } sl@0: else sl@0: { sl@0: // selection of OS ASIDs or all OS ASIDs sl@0: const TBitMapAllocator* pB=(const TBitMapAllocator*)aOsAsids; sl@0: if (os_asid==-1) sl@0: pB=iOsAsidAllocator; // 0's in positions which exist sl@0: TInt num_os_asids=pB->iSize-pB->iAvail; sl@0: for (os_asid=0; num_os_asids; ++os_asid) sl@0: { sl@0: if (pB->NotAllocated(os_asid,1)) sl@0: continue; // os_asid is not needed sl@0: TPde* pageDir=PageDirectory(os_asid); sl@0: NKern::LockSystem(); sl@0: pde = pageDir[pdeIndex]; sl@0: pageDir[pdeIndex]=0; sl@0: CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex)); sl@0: NKern::UnlockSystem(); sl@0: sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Clearing PDE at %08x",pageDir+pdeIndex)); sl@0: --num_os_asids; sl@0: } sl@0: sl@0: // remove any aliases of the page table... sl@0: TUint ptId = pde>>KPageTableShift; sl@0: NKern::LockSystem(); sl@0: while(!iAliasList.IsEmpty()) sl@0: { sl@0: next = iAliasList.First()->Deque(); sl@0: checkedList.Add(next); sl@0: DMemModelThread* thread = (DMemModelThread*)((TInt)next-_FOFF(DMemModelThread,iAliasLink)); sl@0: if((thread->iAliasPde>>KPageTableShift)==ptId && !pB->NotAllocated(thread->iAliasOsAsid,1)) sl@0: { sl@0: // the page table is being aliased by the thread, so remove it... sl@0: thread->iAliasPde = 0; sl@0: } sl@0: NKern::FlashSystem(); sl@0: } sl@0: } sl@0: sl@0: // copy checkedList back to iAliasList sl@0: iAliasList.MoveFrom(&checkedList); sl@0: sl@0: NKern::UnlockSystem(); sl@0: } sl@0: #endif sl@0: sl@0: // Initialise page table at physical address aXptPhys to be used as page table aXptId sl@0: // to expand the virtual address range used for mapping page tables. Map the page table sl@0: // at aPhysAddr as page table aId using the expanded range. sl@0: // Assign aXptPhys to kernel's Page Directory. sl@0: // Called with system unlocked and MMU mutex held. sl@0: void ArmMmu::BootstrapPageTable(TInt aXptId, TPhysAddr aXptPhys, TInt aId, TPhysAddr aPhysAddr) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::BootstrapPageTable xptid=%04x, xptphys=%08x, id=%04x, phys=%08x", sl@0: aXptId, aXptPhys, aId, aPhysAddr)); sl@0: sl@0: // put in a temporary mapping for aXptPhys sl@0: // make it noncacheable sl@0: TPhysAddr pa=aXptPhys&~KPageMask; sl@0: *iTempPte = pa | SP_PTE(KArmV6PermRWNO, KNormalUncachedAttr, 0, 1); sl@0: CacheMaintenance::SinglePteUpdated((TLinAddr)iTempPte); sl@0: sl@0: // clear XPT sl@0: TPte* xpt=(TPte*)(iTempAddr+(aXptPhys&KPageMask)); sl@0: memclr(xpt, KPageTableSize); sl@0: sl@0: // must in fact have aXptPhys and aPhysAddr in same physical page sl@0: __ASSERT_ALWAYS( TUint32(aXptPhys^aPhysAddr)>KPtClusterShift)&KPagesInPDEMask] = pa | KPtPtePerm; sl@0: CacheMaintenance::MultiplePtesUpdated((TLinAddr)xpt, KPageTableSize); sl@0: sl@0: // remove temporary mapping sl@0: *iTempPte=0; sl@0: CacheMaintenance::SinglePteUpdated((TLinAddr)iTempPte); sl@0: sl@0: InvalidateTLBForPage(iTempAddr, KERNEL_MAPPING); sl@0: sl@0: // initialise PtInfo... sl@0: TLinAddr xptAddr = PageTableLinAddr(aXptId); sl@0: iPtInfo[aXptId].SetGlobal(xptAddr>>KChunkShift); sl@0: sl@0: // map xpt... sl@0: TInt pdeIndex=TInt(xptAddr>>KChunkShift); sl@0: TPde* pageDir=PageDirectory(0); sl@0: NKern::LockSystem(); sl@0: pageDir[pdeIndex]=aXptPhys|KPtPdePerm; sl@0: CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex)); sl@0: sl@0: NKern::UnlockSystem(); sl@0: } sl@0: sl@0: // Edit the self-mapping entry in page table aId, mapped at aTempMap, to sl@0: // change the physical address from aOld to aNew. Used when moving page sl@0: // tables which were created by BootstrapPageTable. sl@0: // Called with system locked and MMU mutex held. sl@0: void ArmMmu::FixupXPageTable(TInt aId, TLinAddr aTempMap, TPhysAddr aOld, TPhysAddr aNew) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::FixupXPageTable id=%04x, tempmap=%08x, old=%08x, new=%08x", sl@0: aId, aTempMap, aOld, aNew)); sl@0: sl@0: // find correct page table inside the page sl@0: TPte* xpt=(TPte*)(aTempMap + ((aId & KPtClusterMask) << KPageTableShift)); sl@0: // find the pte in that page table sl@0: xpt += (aId>>KPtClusterShift)&KPagesInPDEMask; sl@0: sl@0: // switch the mapping sl@0: __ASSERT_ALWAYS((*xpt&~KPageMask)==aOld, Panic(EFixupXPTFailed)); sl@0: *xpt = aNew | KPtPtePerm; sl@0: // mapped with MapTemp, and thus not mapped as a PTE - have to do real cache clean. sl@0: CacheMaintenance::SinglePteUpdated((TLinAddr)xpt); sl@0: } sl@0: sl@0: TInt ArmMmu::NewPageDirectory(TInt aOsAsid, TBool aSeparateGlobal, TPhysAddr& aPhysAddr, TInt& aNumPages) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::NewPageDirectory(%d,%d)",aOsAsid,aSeparateGlobal)); sl@0: TInt r=0; sl@0: TInt nlocal=iLocalPdSize>>KPageShift; sl@0: aNumPages=aSeparateGlobal ? KPageDirectorySize/KPageSize : nlocal; sl@0: __KTRACE_OPT(KMMU,Kern::Printf("nlocal=%d, aNumPages=%d",nlocal,aNumPages)); sl@0: if (aNumPages>1) sl@0: { sl@0: TInt align=aSeparateGlobal ? KPageDirectoryShift : KPageDirectoryShift-1; sl@0: r=AllocContiguousRam(aNumPages<>KChunkShift), aSrc+(aBase>>KChunkShift), ((aEnd-aBase)>>KChunkShift)*sizeof(TPde)); sl@0: CacheMaintenance::MultiplePtesUpdated((TLinAddr)(aDest+(aBase>>KChunkShift)), ((aEnd-aBase)>>KChunkShift)*sizeof(TPde)); sl@0: } sl@0: sl@0: inline void ZeroPdes(TPde* aDest, TLinAddr aBase, TLinAddr aEnd) sl@0: { sl@0: memclr(aDest+(aBase>>KChunkShift), ((aEnd-aBase)>>KChunkShift)*sizeof(TPde)); sl@0: CacheMaintenance::MultiplePtesUpdated((TLinAddr)(aDest+(aBase>>KChunkShift)), ((aEnd-aBase)>>KChunkShift)*sizeof(TPde)); sl@0: } sl@0: sl@0: void ArmMmu::InitPageDirectory(TInt aOsAsid, TBool aSeparateGlobal) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::InitPageDirectory(%d,%d)",aOsAsid,aSeparateGlobal)); sl@0: TPde* newpd=PageDirectory(aOsAsid); // new page directory sl@0: memclr(newpd, iLocalPdSize); // clear local page directory sl@0: CacheMaintenance::MultiplePtesUpdated((TLinAddr)newpd, iLocalPdSize); sl@0: if (aSeparateGlobal) sl@0: { sl@0: const TPde* kpd=(const TPde*)KPageDirectoryBase; // kernel page directory sl@0: if (iLocalPdSize==KPageSize) sl@0: ZeroPdes(newpd, KUserSharedDataEnd1GB, KUserSharedDataEnd2GB); sl@0: ZeroPdes(newpd, KRamDriveStartAddress, KRamDriveEndAddress); // don't copy RAM drive sl@0: CopyPdes(newpd, kpd, KRomLinearBase, KUserGlobalDataEnd); // copy ROM + user global sl@0: CopyPdes(newpd, kpd, KRamDriveEndAddress, 0x00000000); // copy kernel mappings sl@0: } sl@0: } sl@0: sl@0: void ArmMmu::ClearPageTable(TInt aId, TInt aFirstIndex) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::ClearPageTable(%d,%d)",aId,aFirstIndex)); sl@0: TPte* pte=PageTable(aId); sl@0: memclr(pte+aFirstIndex, KPageTableSize-aFirstIndex*sizeof(TPte)); sl@0: CacheMaintenance::MultiplePtesUpdated((TLinAddr)(pte+aFirstIndex), KPageTableSize-aFirstIndex*sizeof(TPte)); sl@0: } sl@0: sl@0: void ArmMmu::ApplyTopLevelPermissions(TLinAddr aAddr, TInt aOsAsid, TInt aNumPdes, TPde aPdePerm) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::ApplyTopLevelPermissions %04x:%08x->%08x count %d", sl@0: aOsAsid, aAddr, aPdePerm, aNumPdes)); sl@0: TInt ix=aAddr>>KChunkShift; sl@0: TPde* pPde=PageDirectory(aOsAsid)+ix; sl@0: TLinAddr firstPde = (TLinAddr)pPde; //Will need this to clean page table memory region in cache sl@0: sl@0: TPde* pPdeEnd=pPde+aNumPdes; sl@0: NKern::LockSystem(); sl@0: for (; pPde>KChunkShift); sl@0: TPde newpde = ptPhys | KShadowPdePerm; sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x", newpde, ppde)); sl@0: TInt irq=NKern::DisableAllInterrupts(); sl@0: *ppde = newpde; // map in the page table sl@0: CacheMaintenance::SinglePteUpdated((TLinAddr)ppde); sl@0: sl@0: FlushTLBs(); // flush both TLBs (no need to flush cache yet) sl@0: NKern::RestoreInterrupts(irq); sl@0: } sl@0: sl@0: void ArmMmu::DoUnmapShadowPage(TInt aId, TLinAddr aRomAddr, TPhysAddr aOrigPhys) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu:DoUnmapShadowPage, id=%04x lin=%08x origphys=%08x", aId, aRomAddr, aOrigPhys)); sl@0: TPte* ppte = PageTable(aId) + ((aRomAddr & KChunkMask)>>KPageShift); sl@0: TPte newpte = aOrigPhys | KRomPtePerm; sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x", newpte, ppte)); sl@0: TInt irq=NKern::DisableAllInterrupts(); sl@0: *ppte = newpte; sl@0: CacheMaintenance::SinglePteUpdated((TLinAddr)ppte); sl@0: sl@0: InvalidateTLBForPage(aRomAddr, KERNEL_MAPPING); sl@0: #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED) sl@0: __FlushBtb(); sl@0: #endif sl@0: sl@0: CacheMaintenance::CodeChanged(aRomAddr, KPageSize, CacheMaintenance::EMemoryRemap); sl@0: CacheMaintenance::PageToReuse(aRomAddr, EMemAttNormalCached, KPhysAddrInvalid); sl@0: NKern::RestoreInterrupts(irq); sl@0: } sl@0: sl@0: TInt ArmMmu::UnassignShadowPageTable(TLinAddr aRomAddr, TPhysAddr aOrigPhys) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu:UnassignShadowPageTable, lin=%08x origphys=%08x", aRomAddr, aOrigPhys)); sl@0: TPde* ppde = ::InitPageDirectory + (aRomAddr>>KChunkShift); sl@0: TPde newpde = (aOrigPhys &~ KChunkMask) | KRomSectionPermissions; sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x", newpde, ppde)); sl@0: TInt irq=NKern::DisableAllInterrupts(); sl@0: *ppde = newpde; // revert to section mapping sl@0: CacheMaintenance::SinglePteUpdated((TLinAddr)ppde); sl@0: sl@0: FlushTLBs(); // flush both TLBs sl@0: NKern::RestoreInterrupts(irq); sl@0: return KErrNone; sl@0: } sl@0: sl@0: sl@0: #if defined(__CPU_MEMORY_TYPE_REMAPPING) // arm1176, arm11mcore, armv7, ... sl@0: /** sl@0: Shadow pages on platforms with remapping (mpcore, 1176, cortex...) are not writable. sl@0: This will map the region into writable memory first. sl@0: @pre No Fast Mutex held sl@0: */ sl@0: TInt ArmMmu::CopyToShadowMemory(TLinAddr aDest, TLinAddr aSrc, TUint32 aLength) sl@0: { sl@0: __KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:CopyToShadowMemory aDest=%08x aSrc=%08x aLength=%08x", aDest, aSrc, aLength)); sl@0: sl@0: // Check that destination is ROM sl@0: if (aDest iRomLinearEnd) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu:CopyToShadowMemory: Destination not entirely in ROM")); sl@0: return KErrArgument; sl@0: } sl@0: // do operation with RamAlloc mutex held (to prevent shadow pages from being released from under us) sl@0: MmuBase::Wait(); sl@0: sl@0: sl@0: TInt r = KErrNone; sl@0: while (aLength) sl@0: { sl@0: // Calculate memory size to copy in this loop. A single page region will be copied per loop sl@0: TInt copySize = Min(aLength, iPageSize - (aDest&iPageMask)); sl@0: sl@0: // Get physical address sl@0: TPhysAddr physAddr = LinearToPhysical(aDest&~iPageMask, 0); sl@0: if (KPhysAddrInvalid==physAddr) sl@0: { sl@0: r = KErrArgument; sl@0: break; sl@0: } sl@0: sl@0: //check whether it is shadowed rom sl@0: SPageInfo* pi = SPageInfo::SafeFromPhysAddr(physAddr); sl@0: if (pi==0 || pi->Type()!=SPageInfo::EShadow) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu:CopyToShadowMemory: No shadow page at this address")); sl@0: r = KErrArgument; sl@0: break; sl@0: } sl@0: sl@0: //Temporarily map into writable memory and copy data. RamAllocator DMutex is required sl@0: TLinAddr tempAddr = MapTemp (physAddr, aDest&~iPageMask); sl@0: __KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:CopyToShadowMemory Copy aDest=%08x aSrc=%08x aSize=%08x", tempAddr+(aDest&iPageMask), aSrc, copySize)); sl@0: memcpy ((TAny*)(tempAddr+(aDest&iPageMask)), (const TAny*)aSrc, copySize); //Kernel-to-Kernel copy is presumed sl@0: UnmapTemp(); sl@0: sl@0: //Update variables for the next loop/page sl@0: aDest+=copySize; sl@0: aSrc+=copySize; sl@0: aLength-=copySize; sl@0: } sl@0: MmuBase::Signal(); sl@0: return r; sl@0: } sl@0: #endif sl@0: sl@0: void ArmMmu::DoFreezeShadowPage(TInt aId, TLinAddr aRomAddr) sl@0: { sl@0: #if defined(__CPU_MEMORY_TYPE_REMAPPING) //arm1176, arm11mcore, armv7 and later sl@0: __KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:DoFreezeShadowPage not required with MEMORY_TYPE_REMAPPING")); sl@0: #else sl@0: __KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:DoFreezeShadowPage aId=%04x aRomAddr=%08x", sl@0: aId, aRomAddr)); sl@0: TPte* ppte = PageTable(aId) + ((aRomAddr & KChunkMask)>>KPageShift); sl@0: TPte newpte = (*ppte & KPteSmallPageAddrMask) | KRomPtePerm; sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x", newpte, ppte)); sl@0: *ppte = newpte; sl@0: CacheMaintenance::SinglePteUpdated((TLinAddr)ppte); sl@0: InvalidateTLBForPage(aRomAddr, KERNEL_MAPPING); sl@0: #endif sl@0: } sl@0: sl@0: /** Replaces large page(64K) entry in page table with small page(4K) entries.*/ sl@0: void ArmMmu::Pagify(TInt aId, TLinAddr aLinAddr) sl@0: { sl@0: __KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:Pagify aId=%04x aLinAddr=%08x", aId, aLinAddr)); sl@0: sl@0: TInt pteIndex = (aLinAddr & KChunkMask)>>KPageShift; sl@0: TPte* pte = PageTable(aId); sl@0: if ((pte[pteIndex] & KArmV6PteTypeMask) == KArmV6PteLargePage) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Converting 64K page to 4K pages")); sl@0: pteIndex &= ~0xf; sl@0: TPte source = pte[pteIndex]; sl@0: source = (source & KPteLargePageAddrMask) | SP_PTE_FROM_LP_PTE(source); sl@0: pte += pteIndex; sl@0: for (TInt entry=0; entry<16; entry++) sl@0: { sl@0: pte[entry] = source | (entry<<12); sl@0: } sl@0: CacheMaintenance::MultiplePtesUpdated((TLinAddr)pte, 16*sizeof(TPte)); sl@0: FlushTLBs(); sl@0: } sl@0: } sl@0: sl@0: void ArmMmu::FlushShadow(TLinAddr aRomAddr) sl@0: { sl@0: CacheMaintenance::CodeChanged(aRomAddr, KPageSize, CacheMaintenance::EMemoryRemap); sl@0: CacheMaintenance::PageToReuse(aRomAddr, EMemAttNormalCached, KPhysAddrInvalid); sl@0: InvalidateTLBForPage(aRomAddr, KERNEL_MAPPING); // remove all TLB references to original ROM page sl@0: } sl@0: sl@0: sl@0: #if defined(__CPU_MEMORY_TYPE_REMAPPING) //arm1176, arm11mcore, armv7 sl@0: /** sl@0: Calculates page directory/table entries for memory type described in aMapAttr. sl@0: Global, small page (4KB) mapping is assumed. sl@0: (All magic numbers come from ARM page table descriptions.) sl@0: @param aMapAttr On entry, holds description(memory type, access permisions,...) of the memory. sl@0: It is made up of TMappingAttributes constants or TMappingAttributes2 object. If TMappingAttributes, sl@0: may be altered on exit to hold the actual cache attributes & access permissions. sl@0: @param aPde On exit, holds page-table-entry for the 1st level descriptor sl@0: for given type of memory, with base address set to 0. sl@0: @param aPte On exit, holds small-page-entry (4K) for the 2nd level descriptor sl@0: for given type of memory, with base address set to 0. sl@0: @return KErrNotSupported If memory described in aMapAttr is not supported sl@0: KErrNone Otherwise sl@0: */ sl@0: TInt ArmMmu::PdePtePermissions(TUint& aMapAttr, TPde& aPde, TPte& aPte) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf(">ArmMmu::PdePtePermissions, mapattr=%08x",aMapAttr)); sl@0: sl@0: TMappingAttributes2& memory = (TMappingAttributes2&)aMapAttr; sl@0: sl@0: if(memory.ObjectType2()) sl@0: { sl@0: //---------Memory described by TMappingAttributes2 object----------------- sl@0: aPde = KArmV6PdePageTable | sl@0: (memory.Parity() ? KArmV6PdeECCEnable : 0); sl@0: #if defined(FAULTY_NONSHARED_DEVICE_MEMORY) sl@0: if(!memory.Shared() && (memory.Type() == EMemAttDevice )) sl@0: { sl@0: aMapAttr ^= EMapAttrBufferedNC; sl@0: aMapAttr |= EMapAttrFullyBlocking; sl@0: // Clear EMemAttDevice sl@0: aMapAttr ^= (EMemAttDevice << 26); sl@0: aMapAttr |= (EMemAttStronglyOrdered << 26); sl@0: } sl@0: #endif sl@0: aPte = KArmV6PteSmallPage | sl@0: KArmV6PteAP0 | // AP0 bit always 1 sl@0: ((memory.Type()&3)<<2) | ((memory.Type()&4)<<4) | // memory type sl@0: (memory.Executable() ? 0 : KArmV6PteSmallXN) | // eXecuteNever bit sl@0: #if defined (__CPU_USE_SHARED_MEMORY) sl@0: KArmV6PteS | // Memory is always shared. sl@0: #else sl@0: (memory.Shared() ? KArmV6PteS : 0) | // Shared bit sl@0: #endif sl@0: (memory.Writable() ? 0 : KArmV6PteAPX) | // APX = !Writable sl@0: (memory.UserAccess() ? KArmV6PteAP1: 0); // AP1 = UserAccess sl@0: // aMapAttr remains the same sl@0: } sl@0: else sl@0: { sl@0: //---------Memory described by TMappingAttributes bitmask----------------- sl@0: #if defined(FAULTY_NONSHARED_DEVICE_MEMORY) sl@0: if(((aMapAttr & EMapAttrL1CacheMask) == EMapAttrBufferedNC) && !(aMapAttr & EMapAttrShared)) sl@0: { sl@0: // Clear EMapAttrBufferedNC attribute sl@0: aMapAttr ^= EMapAttrBufferedNC; sl@0: aMapAttr |= EMapAttrFullyBlocking; sl@0: } sl@0: #endif sl@0: // 1. Calculate TEX0:C:B bits in page table and actual cache attributes. sl@0: // Only L1 cache attribute from aMapAttr matters. Outer (L2) cache policy will be the same as inner one. sl@0: TUint l1cache=aMapAttr & EMapAttrL1CacheMask; // Inner cache attributes. May change to actual value. sl@0: TUint l2cache; // Will hold actual L2 cache attributes (in terms of TMappingAttributes constants) sl@0: TUint tex0_c_b; // Will hold TEX[0]:C:B value in page table sl@0: sl@0: switch (l1cache) sl@0: { sl@0: case EMapAttrFullyBlocking: sl@0: tex0_c_b = EMemAttStronglyOrdered; sl@0: l2cache = EMapAttrL2Uncached; sl@0: break; sl@0: case EMapAttrBufferedNC: sl@0: tex0_c_b = EMemAttDevice; sl@0: l2cache = EMapAttrL2Uncached; sl@0: break; sl@0: case EMapAttrBufferedC: sl@0: case EMapAttrL1Uncached: sl@0: case EMapAttrCachedWTRA: sl@0: case EMapAttrCachedWTWA: sl@0: tex0_c_b = EMemAttNormalUncached; sl@0: l1cache = EMapAttrBufferedC; sl@0: l2cache = EMapAttrL2Uncached; sl@0: break; sl@0: case EMapAttrCachedWBRA: sl@0: case EMapAttrCachedWBWA: sl@0: case EMapAttrL1CachedMax: sl@0: tex0_c_b = EMemAttNormalCached; sl@0: l1cache = EMapAttrCachedWBWA; sl@0: l2cache = EMapAttrL2CachedWBWA; sl@0: break; sl@0: default: sl@0: return KErrNotSupported; sl@0: } sl@0: sl@0: // 2. Step 2 has been removed :) sl@0: sl@0: // 3. Calculate access permissions (apx:ap bits in page table + eXecute it) sl@0: TUint read=aMapAttr & EMapAttrReadMask; sl@0: TUint write=(aMapAttr & EMapAttrWriteMask)>>4; sl@0: TUint exec=(aMapAttr & EMapAttrExecMask)>>8; sl@0: sl@0: read|=exec; // User/Sup execute access requires User/Sup read access. sl@0: if (exec) exec = 1; // There is a single eXecute bit in page table. Set to one if User or Sup exec is required. sl@0: sl@0: TUint apxap=0; sl@0: if (write==0) // no write required sl@0: { sl@0: if (read>=4) apxap=KArmV6PermRORO; // user read required sl@0: else if (read==1) apxap=KArmV6PermRONO; // supervisor read required sl@0: else return KErrNotSupported; // no read required sl@0: } sl@0: else if (write<4) // supervisor write required sl@0: { sl@0: if (read<4) apxap=KArmV6PermRWNO; // user read not required sl@0: else return KErrNotSupported; // user read required sl@0: } sl@0: else // user & supervisor writes required sl@0: { sl@0: apxap=KArmV6PermRWRW; sl@0: } sl@0: sl@0: // 4. Calculate page-table-entry for the 1st level (aka page directory) descriptor sl@0: aPde=((aMapAttr&EMapAttrUseECC)>>8)|KArmV6PdePageTable; sl@0: sl@0: // 5. Calculate small-page-entry for the 2nd level (aka page table) descriptor sl@0: aPte=SP_PTE(apxap, tex0_c_b, exec, 1); // always global sl@0: if (aMapAttr&EMapAttrShared) sl@0: aPte |= KArmV6PteS; sl@0: sl@0: // 6. Fix aMapAttr to hold the actual values for access permission & cache attributes sl@0: TUint xnapxap=((aPte<<3)&8)|((aPte>>7)&4)|((aPte>>4)&3); sl@0: aMapAttr &= ~(EMapAttrAccessMask|EMapAttrL1CacheMask|EMapAttrL2CacheMask); sl@0: aMapAttr |= PermissionLookup[xnapxap]; // Set actual access permissions sl@0: aMapAttr |= l1cache; // Set actual inner cache attributes sl@0: aMapAttr |= l2cache; // Set actual outer cache attributes sl@0: } sl@0: sl@0: __KTRACE_OPT(KMMU,Kern::Printf(">12); sl@0: const TUint BFNC=(EMapAttrBufferedNC>>12); sl@0: //const TUint BUFC=(EMapAttrBufferedC>>12); sl@0: const TUint L1UN=(EMapAttrL1Uncached>>12); sl@0: const TUint WTRA=(EMapAttrCachedWTRA>>12); sl@0: //const TUint WTWA=(EMapAttrCachedWTWA>>12); sl@0: const TUint WBRA=(EMapAttrCachedWBRA>>12); sl@0: const TUint WBWA=(EMapAttrCachedWBWA>>12); sl@0: const TUint AWTR=(EMapAttrAltCacheWTRA>>12); sl@0: //const TUint AWTW=(EMapAttrAltCacheWTWA>>12); sl@0: //const TUint AWBR=(EMapAttrAltCacheWBRA>>12); sl@0: const TUint AWBW=(EMapAttrAltCacheWBWA>>12); sl@0: const TUint MAXC=(EMapAttrL1CachedMax>>12); sl@0: sl@0: const TUint L2UN=(EMapAttrL2Uncached>>16); sl@0: sl@0: const TUint8 UNS=0xffu; // Unsupported attribute sl@0: sl@0: //Maps L1 & L2 cache attributes into TEX[4:2]:CB[1:0] sl@0: //ARMv6 doesn't do WTWA so we use WTRA instead sl@0: sl@0: #if !defined(__CPU_ARM1136_ERRATUM_399234_FIXED) sl@0: // L1 Write-Through mode is outlawed, L1WT acts as L1UN. sl@0: static const TUint8 CBTEX[40]= sl@0: { // L1CACHE: sl@0: // FBLK BFNC BUFC L1UN WTRA WTWA WBRA WBWA L2CACHE: sl@0: 0x00, 0x01, 0x01, 0x04, 0x04, 0x04, 0x13, 0x11, //NC sl@0: 0x00, 0x01, 0x01, 0x18, 0x18, 0x18, 0x1b, 0x19, //WTRA sl@0: 0x00, 0x01, 0x01, 0x18, 0x18, 0x18, 0x1b, 0x19, //WTWA sl@0: 0x00, 0x01, 0x01, 0x1c, 0x1c, 0x1c, 0x1f, 0x1d, //WBRA sl@0: 0x00, 0x01, 0x01, 0x14, 0x14, 0x14, 0x17, 0x15 //WBWA sl@0: }; sl@0: #else sl@0: static const TUint8 CBTEX[40]= sl@0: { // L1CACHE: sl@0: // FBLK BFNC BUFC L1UN WTRA WTWA WBRA WBWA L2CACHE: sl@0: 0x00, 0x01, 0x01, 0x04, 0x12, 0x12, 0x13, 0x11, //NC sl@0: 0x00, 0x01, 0x01, 0x18, 0x02, 0x02, 0x1b, 0x19, //WTRA sl@0: 0x00, 0x01, 0x01, 0x18, 0x02, 0x02, 0x1b, 0x19, //WTWA sl@0: 0x00, 0x01, 0x01, 0x1c, 0x1e, 0x1e, 0x1f, 0x1d, //WBRA sl@0: 0x00, 0x01, 0x01, 0x14, 0x16, 0x16, 0x17, 0x15 //WBWA sl@0: }; sl@0: #endif sl@0: sl@0: //Maps TEX[4:2]:CB[1:0] value into L1 cache attributes sl@0: static const TUint8 L1Actual[32]= sl@0: { sl@0: //CB 00 01 10 11 //TEX sl@0: FBLK, BFNC, WTRA, WBRA, //000 sl@0: L1UN, UNS, UNS, WBWA, //001 sl@0: BFNC, UNS, UNS, UNS, //010 sl@0: UNS, UNS, UNS, UNS, //011 sl@0: L1UN, WBWA, WTRA, WBRA, //100 sl@0: L1UN, WBWA, WTRA, WBRA, //101 sl@0: L1UN, WBWA, WTRA, WBRA, //110 sl@0: L1UN, WBWA, WTRA, WBRA //111 sl@0: }; sl@0: sl@0: //Maps TEX[4:2]:CB[1:0] value into L2 cache attributes sl@0: static const TUint8 L2Actual[32]= sl@0: { sl@0: //CB 00 01 10 11 //TEX sl@0: L2UN, L2UN, WTRA, WBRA, //000 sl@0: L2UN, UNS, UNS, WBWA, //001 sl@0: L2UN, UNS, UNS, UNS, //010 sl@0: UNS, UNS, UNS, UNS, //011 sl@0: L2UN, L2UN, L2UN, L2UN, //100 sl@0: WBWA, WBWA, WBWA, WBWA, //101 sl@0: WTRA, WTRA, WTRA, WTRA, //110 sl@0: WBRA, WBRA, WBRA, WBRA //111 sl@0: }; sl@0: sl@0: TInt ArmMmu::PdePtePermissions(TUint& aMapAttr, TPde& aPde, TPte& aPte) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf(">ArmMmu::PdePtePermissions, mapattr=%08x",aMapAttr)); sl@0: sl@0: TUint read=aMapAttr & EMapAttrReadMask; sl@0: TUint write=(aMapAttr & EMapAttrWriteMask)>>4; sl@0: TUint exec=(aMapAttr & EMapAttrExecMask)>>8; sl@0: TUint l1cache=(aMapAttr & EMapAttrL1CacheMask)>>12; sl@0: TUint l2cache=(aMapAttr & EMapAttrL2CacheMask)>>16; sl@0: if (l1cache==MAXC) l1cache=WBRA; // map max cache to WBRA sl@0: if (l1cache>AWBW) sl@0: return KErrNotSupported; // undefined attribute sl@0: if (l1cache>=AWTR) l1cache-=4; // no alternate cache, so use normal cache sl@0: if (l1cacheWBWA) sl@0: return KErrNotSupported; // undefined attribute sl@0: if (l2cache) l2cache-=(WTRA-1); // l2cache now in range 0-4 sl@0: aPde=((aMapAttr&EMapAttrUseECC)>>8)|KArmV6PdePageTable; sl@0: sl@0: #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED) sl@0: // if broken 1136, can't have supervisor only code sl@0: if (exec) sl@0: exec = TUint(EMapAttrExecUser>>8); sl@0: #endif sl@0: sl@0: // if any execute access, must have read=execute sl@0: if (exec) sl@0: (void)(read>=exec || (read=exec)!=0), exec=1; sl@0: sl@0: // l1cache between 0 and 7, l2cache between 0 and 4; look up CBTEX sl@0: TUint cbtex=CBTEX[(l2cache<<3)|l1cache]; sl@0: sl@0: // work out apx:ap sl@0: TUint apxap; sl@0: if (write==0) sl@0: apxap=(read>=4)?KArmV6PermRORO:(read?KArmV6PermRONO:KArmV6PermNONO); sl@0: else if (write<4) sl@0: apxap=(read>=4)?KArmV6PermRWRO:KArmV6PermRWNO; sl@0: else sl@0: apxap=KArmV6PermRWRW; sl@0: TPte pte=SP_PTE(apxap, cbtex, exec, 1); // always global sl@0: if (aMapAttr&EMapAttrShared) sl@0: pte |= KArmV6PteS; sl@0: sl@0: // Translate back to get actual map attributes sl@0: TUint xnapxap=((pte<<3)&8)|((pte>>7)&4)|((pte>>4)&3); sl@0: cbtex=((pte>>4)&0x1c)|((pte>>2)&3); // = TEX[4:2]::CB[1:0] sl@0: aMapAttr &= ~(EMapAttrAccessMask|EMapAttrL1CacheMask|EMapAttrL2CacheMask); sl@0: aMapAttr |= PermissionLookup[xnapxap]; sl@0: aMapAttr |= (L1Actual[cbtex]<<12); sl@0: aMapAttr |= (L2Actual[cbtex]<<16); sl@0: aPte=pte; sl@0: __KTRACE_OPT(KMMU,Kern::Printf("=KChunkShift && (la & KChunkMask)==0 && remain>=KChunkSize) sl@0: { sl@0: // use sections - ASSUMES ADDRESS IS IN GLOBAL REGION sl@0: TInt npdes=remain>>KChunkShift; sl@0: const TBitMapAllocator& b=*iOsAsidAllocator; sl@0: TInt num_os_asids=iNumGlobalPageDirs; sl@0: TInt os_asid=0; sl@0: for (; num_os_asids; ++os_asid) sl@0: { sl@0: if (b.NotAllocated(os_asid,1) || (iAsidInfo[os_asid]&1)==0) sl@0: continue; // os_asid is not needed sl@0: TPde* p_pde=PageDirectory(os_asid)+(la>>KChunkShift); sl@0: TPde* p_pde_E=p_pde+npdes; sl@0: TPde pde=pa|section_pde; sl@0: TLinAddr firstPde = (TLinAddr)p_pde; //Will need this to clean page table memory region from cache sl@0: sl@0: NKern::LockSystem(); sl@0: for (; p_pde < p_pde_E; pde+=KChunkSize) sl@0: { sl@0: __ASSERT_DEBUG(*p_pde==0, MM::Panic(MM::EPdeAlreadyInUse)); sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x", pde, p_pde)); sl@0: *p_pde++=pde; sl@0: } sl@0: CacheMaintenance::MultiplePtesUpdated(firstPde, (TUint)p_pde-firstPde); sl@0: NKern::UnlockSystem(); sl@0: --num_os_asids; sl@0: } sl@0: npdes<<=KChunkShift; sl@0: la+=npdes, pa+=npdes, remain-=npdes; sl@0: continue; sl@0: } sl@0: TInt block_size = Min(remain, KChunkSize-(la&KChunkMask)); sl@0: TPte pa_mask=~KPageMask; sl@0: TPte pte_perm=sp_pte; sl@0: if (aMapShift>=KLargePageShift && block_size>=KLargePageSize) sl@0: { sl@0: if ((la & KLargePageMask)==0) sl@0: { sl@0: // use 64K large pages sl@0: pa_mask=~KLargePageMask; sl@0: pte_perm=lp_pte; sl@0: } sl@0: else sl@0: block_size = Min(remain, KLargePageSize-(la&KLargePageMask)); sl@0: } sl@0: block_size &= pa_mask; sl@0: sl@0: // use pages (large or small) sl@0: TInt id=PageTableId(la, 0); sl@0: __ASSERT_DEBUG(id>=0, MM::Panic(MM::EMmuMapNoPageTable)); sl@0: TPte* p_pte=PageTable(id)+((la&KChunkMask)>>KPageShift); sl@0: TPte* p_pte_E=p_pte + (block_size>>KPageShift); sl@0: SPageTableInfo& ptinfo=iPtInfo[id]; sl@0: TLinAddr firstPte = (TLinAddr)p_pte; //Will need this to clean page table memory region from cache sl@0: sl@0: NKern::LockSystem(); sl@0: for (; p_pte < p_pte_E; pa+=KPageSize) sl@0: { sl@0: __ASSERT_DEBUG(*p_pte==0, MM::Panic(MM::EPteAlreadyInUse)); sl@0: TPte pte = (pa & pa_mask) | pte_perm; sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x", pte, p_pte)); sl@0: *p_pte++=pte; sl@0: ++ptinfo.iCount; sl@0: NKern::FlashSystem(); sl@0: } sl@0: CacheMaintenance::MultiplePtesUpdated(firstPte, (TUint)p_pte-firstPte); sl@0: NKern::UnlockSystem(); sl@0: la+=block_size, remain-=block_size; sl@0: } sl@0: } sl@0: sl@0: void ArmMmu::Unmap(TLinAddr aLinAddr, TInt aSize) sl@0: // sl@0: // Remove all mappings in the specified range of addresses. sl@0: // Assumes there are only global mappings involved. sl@0: // Don't free page tables. sl@0: // aLinAddr, aSize must be page-aligned. sl@0: // sl@0: { sl@0: __KTRACE_OPT(KMMU, Kern::Printf("ArmMmu::Unmap lin=%08x size=%08x", aLinAddr, aSize)); sl@0: TLinAddr a=aLinAddr; sl@0: TLinAddr end=a+aSize; sl@0: __KTRACE_OPT(KMMU,Kern::Printf("a=%08x end=%08x",a,end)); sl@0: NKern::LockSystem(); sl@0: while(a!=end) sl@0: { sl@0: TInt pdeIndex=a>>KChunkShift; sl@0: TLinAddr next=(pdeIndex<>KPageShift; sl@0: __KTRACE_OPT(KMMU,Kern::Printf("a=%08x next=%08x to_do=%d",a,next,to_do)); sl@0: TPde pde=::InitPageDirectory[pdeIndex]; sl@0: if ( (pde&KArmV6PdeTypeMask)==KArmV6PdeSection ) sl@0: { sl@0: __ASSERT_DEBUG(!(a&KChunkMask), MM::Panic(MM::EUnmapBadAlignment)); sl@0: #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED) sl@0: remove_and_invalidate_section(::InitPageDirectory + pdeIndex, a, KERNEL_MAPPING); sl@0: #else sl@0: ::InitPageDirectory[pdeIndex]=0; sl@0: CacheMaintenance::SinglePteUpdated(TLinAddr(::InitPageDirectory + pdeIndex)); sl@0: InvalidateTLBForPage(a, KERNEL_MAPPING); // ASID irrelevant since global sl@0: #endif sl@0: a=next; sl@0: NKern::FlashSystem(); sl@0: continue; sl@0: } sl@0: TInt ptid=PageTableId(a,0); sl@0: SPageTableInfo& ptinfo=iPtInfo[ptid]; sl@0: if (ptid>=0) sl@0: { sl@0: TPte* ppte=PageTable(ptid)+((a&KChunkMask)>>KPageShift); sl@0: TPte* ppte_End=ppte+to_do; sl@0: for (; ppte= 0) sl@0: { sl@0: TPhysAddr pa; sl@0: if((TInt)aPageList&1) sl@0: { sl@0: pa = (TPhysAddr)aPageList&~1; sl@0: *(TPhysAddr*)&aPageList += iPageSize; sl@0: } sl@0: else sl@0: pa = *aPageList++; sl@0: sl@0: *iTempPte = pa | SP_PTE(KArmV6PermRWNO, KNormalUncachedAttr, 0, 1); sl@0: CacheMaintenance::SinglePteUpdated((TLinAddr)iTempPte); sl@0: InvalidateTLBForPage(iTempAddr, KERNEL_MAPPING); sl@0: memset((TAny*)iTempAddr, aClearByte, iPageSize); sl@0: // This temporary mapping is noncached => No need to flush cache here. sl@0: // Still, we have to make sure that write buffer(s) are drained. sl@0: CacheMaintenance::MemoryToPreserveAndReuse((TLinAddr)iTempAddr, iPageSize, EMapAttrBufferedC); sl@0: } sl@0: *iTempPte=0; sl@0: CacheMaintenance::SinglePteUpdated((TLinAddr)iTempPte); sl@0: InvalidateTLBForPage(iTempAddr, KERNEL_MAPPING); sl@0: } sl@0: sl@0: sl@0: /** sl@0: Create a temporary mapping of one or more contiguous physical pages. sl@0: Fully cached memory attributes apply. sl@0: The RamAllocatorMutex must be held before this function is called and not released sl@0: until after UnmapTemp has been called. sl@0: sl@0: @param aPage The physical address of the pages to be mapped. sl@0: @param aLinAddr The linear address of any existing location where the page is mapped. sl@0: If the page isn't already mapped elsewhere as a cachable page then sl@0: this value irrelevent. (It is used for page colouring.) sl@0: @param aPages Number of pages to map. sl@0: sl@0: @return The linear address of where the pages have been mapped. sl@0: */ sl@0: TLinAddr ArmMmu::MapTemp(TPhysAddr aPage,TLinAddr aLinAddr,TInt aPages) sl@0: { sl@0: __ASSERT_MUTEX(RamAllocatorMutex); sl@0: __ASSERT_DEBUG(!*iTempPte,MM::Panic(MM::ETempMappingAlreadyInUse)); sl@0: iTempMapColor = (aLinAddr>>KPageShift)&KPageColourMask; sl@0: iTempMapCount = aPages; sl@0: if (aPages==1) sl@0: { sl@0: iTempPte[iTempMapColor] = (aPage&~KPageMask) | SP_PTE(KArmV6PermRWNO, KNormalCachedAttr, 0, 1); sl@0: CacheMaintenance::SinglePteUpdated((TLinAddr)(iTempPte+iTempMapColor)); sl@0: } sl@0: else sl@0: { sl@0: __ASSERT_DEBUG(iTempMapColor+aPages<=KPageColourCount,MM::Panic(MM::ETempMappingNoRoom)); sl@0: for (TInt i=0; i>KPageShift)&KPageColourMask; sl@0: iTempMapCount = aPages; sl@0: TUint pte = SP_PTE(KArmV6PermRWNO, aMemType, 0, 1); sl@0: if (aPages==1) sl@0: { sl@0: iTempPte[iTempMapColor] = (aPage&~KPageMask) | SP_PTE(KArmV6PermRWNO, pte, 0, 1); sl@0: CacheMaintenance::SinglePteUpdated((TLinAddr)(iTempPte+iTempMapColor)); sl@0: } sl@0: else sl@0: { sl@0: __ASSERT_DEBUG(iTempMapColor+aPages<=KPageColourCount,MM::Panic(MM::ETempMappingNoRoom)); sl@0: for (TInt i=0; i>KPageShift)&KPageColourMask; sl@0: iSecondTempMapCount = aPages; sl@0: if (aPages==1) sl@0: { sl@0: iSecondTempPte[iSecondTempMapColor] = (aPage&~KPageMask) | SP_PTE(KArmV6PermRWNO, KNormalCachedAttr, 0, 1); sl@0: CacheMaintenance::SinglePteUpdated((TLinAddr)(iSecondTempPte+iSecondTempMapColor)); sl@0: } sl@0: else sl@0: { sl@0: __ASSERT_DEBUG(iSecondTempMapColor+aPages<=KPageColourCount,MM::Panic(MM::ETempMappingNoRoom)); sl@0: for (TInt i=0; iRemoveAlias(); sl@0: NKern::UnlockSystem(); sl@0: // access memory, which will cause an exception... sl@0: if(!(TUint(aAddr^KIPCAlias)iOwningProcess)->iOsAsid); sl@0: if(aWrite) sl@0: *(volatile TUint8*)aAddr = 0; sl@0: else sl@0: aWrite = *(volatile TUint8*)aAddr; sl@0: // can't get here sl@0: __NK_ASSERT_DEBUG(0); sl@0: } sl@0: sl@0: TUint32 local_mask; sl@0: DMemModelProcess* process=(DMemModelProcess*)TheCurrentThread->iOwningProcess; sl@0: if(aWrite) sl@0: local_mask = process->iAddressCheckMaskW; sl@0: else sl@0: local_mask = process->iAddressCheckMaskR; sl@0: TUint32 mask = 2<<(end>>27); sl@0: mask -= 1<<(aAddr>>27); sl@0: if((local_mask&mask)!=mask) sl@0: return EFalse; sl@0: sl@0: if(!aWrite) sl@0: return ETrue; // reads are ok sl@0: sl@0: // writes need further checking... sl@0: TLinAddr userCodeStart = iUserCodeBase; sl@0: TLinAddr userCodeEnd = userCodeStart+iMaxUserCodeSize; sl@0: if(end>=userCodeStart && aAddr>27; sl@0: if(!(aPerm&EMapAttrWriteUser)) sl@0: { sl@0: // reading with user permissions... sl@0: okForSupervisorAccess = (aProcess->iAddressCheckMaskR>>shift)&1; sl@0: } sl@0: else sl@0: { sl@0: // writing with user permissions... sl@0: okForSupervisorAccess = (aProcess->iAddressCheckMaskW>>shift)&1; sl@0: if(okForSupervisorAccess) sl@0: { sl@0: // check for user code, because this is supervisor r/w and so sl@0: // is not safe to write to access with supervisor permissions. sl@0: if(TUint(aAddr-m.iUserCodeBase)>KChunkShift; sl@0: if(pdeIndex>=(m.iLocalPdSize>>2)) sl@0: { sl@0: // address is in global section, don't bother aliasing it... sl@0: if(iAliasLinAddr) sl@0: RemoveAlias(); sl@0: aAliasAddr = aAddr; sl@0: TInt maxSize = KChunkSize-(aAddr&KChunkMask); sl@0: aAliasSize = aSizeiOsAsid; sl@0: TPde* pd = PageDirectory(asid); sl@0: TPde pde = pd[pdeIndex]; sl@0: if ((TPhysAddr)(pde&~KPageMask) == AliasRemapOld) sl@0: pde = AliasRemapNew|(pde&KPageMask); sl@0: pde = PDE_IN_DOMAIN(pde, KIPCAliasDomain); sl@0: TLinAddr aliasAddr = KIPCAlias+(aAddr&(KChunkMask & ~KPageMask)); sl@0: if(pde==iAliasPde && iAliasLinAddr) sl@0: { sl@0: // pde already aliased, so just update linear address... sl@0: iAliasLinAddr = aliasAddr; sl@0: } sl@0: else sl@0: { sl@0: // alias PDE changed... sl@0: iAliasPde = pde; sl@0: iAliasOsAsid = asid; sl@0: if(!iAliasLinAddr) sl@0: { sl@0: ArmMmu::UnlockAlias(); sl@0: ::TheMmu.iAliasList.Add(&iAliasLink); // add to list if not already aliased sl@0: } sl@0: iAliasLinAddr = aliasAddr; sl@0: *iAliasPdePtr = pde; sl@0: CacheMaintenance::SinglePteUpdated((TLinAddr)iAliasPdePtr); sl@0: } sl@0: sl@0: __KTRACE_OPT(KMMU2,Kern::Printf("DMemModelThread::Alias() PDEntry=%x, iAliasLinAddr=%x",pde, aliasAddr)); sl@0: InvalidateTLBForPage(aliasAddr, ((DMemModelProcess*)iOwningProcess)->iOsAsid); sl@0: TInt offset = aAddr&KPageMask; sl@0: aAliasAddr = aliasAddr | offset; sl@0: TInt maxSize = KPageSize - offset; sl@0: aAliasSize = aSizeiOsAsid); sl@0: iAliasLink.Deque(); sl@0: } sl@0: } sl@0: sl@0: /* sl@0: * Performs cache maintenance for physical page that is going to be reused. sl@0: * Fully cached attributes are assumed. sl@0: */ sl@0: void ArmMmu::CacheMaintenanceOnDecommit(TPhysAddr a) sl@0: { sl@0: // purge a single page from the cache following decommit sl@0: ArmMmu& m=::TheMmu; sl@0: TInt colour = SPageInfo::FromPhysAddr(a)->Offset()&KPageColourMask; sl@0: TPte& pte=m.iTempPte[colour]; sl@0: TLinAddr va=m.iTempAddr+(colour<=0) sl@0: ArmMmu::CacheMaintenanceOnDecommit(*al++); sl@0: } sl@0: sl@0: /* sl@0: * Performs cache maintenance to preserve physical page that is going to be reused. sl@0: */ sl@0: void ArmMmu::CacheMaintenanceOnPreserve(TPhysAddr a, TUint aMapAttr) sl@0: { sl@0: // purge a single page from the cache following decommit sl@0: ArmMmu& m=::TheMmu; sl@0: TInt colour = SPageInfo::FromPhysAddr(a)->Offset()&KPageColourMask; sl@0: TPte& pte=m.iTempPte[colour]; sl@0: TLinAddr va=m.iTempAddr+(colour<=0) sl@0: ArmMmu::CacheMaintenanceOnPreserve(*al++, aMapAttr); sl@0: } sl@0: sl@0: /* sl@0: * Performs cache maintenance of physical memory that has been decommited and has to be preserved. sl@0: * Call this method for physical pages with no page info updated (or no page info at all). sl@0: * @arg aPhysAddr The address of contiguous physical memory to be preserved. sl@0: * @arg aSize The size of the region sl@0: * @arg aLinAddr Former linear address of the region. As said above, the physical memory is sl@0: * already remapped from this linear address. sl@0: * @arg aMapAttr Mapping attributes of the region when it was mapped in aLinAddr. sl@0: * @pre MMU mutex is held. sl@0: */ sl@0: void ArmMmu::CacheMaintenanceOnPreserve(TPhysAddr aPhysAddr, TInt aSize, TLinAddr aLinAddr, TUint aMapAttr ) sl@0: { sl@0: __NK_ASSERT_DEBUG((aPhysAddr&KPageMask)==0); sl@0: __NK_ASSERT_DEBUG((aSize&KPageMask)==0); sl@0: __NK_ASSERT_DEBUG((aLinAddr&KPageMask)==0); sl@0: sl@0: TPhysAddr pa = aPhysAddr; sl@0: TInt size = aSize; sl@0: TInt colour = (aLinAddr>>KPageShift)&KPageColourMask; sl@0: TPte* pte = &(iTempPte[colour]); sl@0: while (size) sl@0: { sl@0: pte=&(iTempPte[colour]); sl@0: TLinAddr va=iTempAddr+(colour<iOsAsid; sl@0: TInt page = aLinAddr>>KPageShift; sl@0: NKern::LockSystem(); sl@0: for(;;) sl@0: { sl@0: TPde* pd = PageDirectory(asid)+(page>>(KChunkShift-KPageShift)); sl@0: TPte* pt = SafePageTableFromPde(*pd++); sl@0: TInt pteIndex = page&(KChunkMask>>KPageShift); sl@0: if(!pt) sl@0: { sl@0: // whole page table has gone, so skip all pages in it... sl@0: TInt pagesInPt = (KChunkSize>>KPageShift)-pteIndex; sl@0: aNumPages -= pagesInPt; sl@0: page += pagesInPt; sl@0: if(aNumPages>0) sl@0: continue; sl@0: NKern::UnlockSystem(); sl@0: return KErrNone; sl@0: } sl@0: pt += pteIndex; sl@0: do sl@0: { sl@0: TInt pagesInPt = (KChunkSize>>KPageShift)-pteIndex; sl@0: if(pagesInPt>aNumPages) sl@0: pagesInPt = aNumPages; sl@0: if(pagesInPt>KMaxPages) sl@0: pagesInPt = KMaxPages; sl@0: sl@0: aNumPages -= pagesInPt; sl@0: page += pagesInPt; sl@0: sl@0: do sl@0: { sl@0: TPte pte = *pt++; sl@0: if(pte) // pte may be null if page has already been unlocked and reclaimed by system sl@0: iRamCache->DonateRamCachePage(SPageInfo::FromPhysAddr(pte)); sl@0: } sl@0: while(--pagesInPt); sl@0: sl@0: if(!aNumPages) sl@0: { sl@0: NKern::UnlockSystem(); sl@0: return KErrNone; sl@0: } sl@0: sl@0: pteIndex = page&(KChunkMask>>KPageShift); sl@0: } sl@0: while(!NKern::FlashSystem() && pteIndex); sl@0: } sl@0: } sl@0: sl@0: sl@0: TInt ArmMmu::LockRamCachePages(TLinAddr aLinAddr, TInt aNumPages, DProcess* aProcess) sl@0: { sl@0: TInt asid = ((DMemModelProcess*)aProcess)->iOsAsid; sl@0: TInt page = aLinAddr>>KPageShift; sl@0: NKern::LockSystem(); sl@0: for(;;) sl@0: { sl@0: TPde* pd = PageDirectory(asid)+(page>>(KChunkShift-KPageShift)); sl@0: TPte* pt = SafePageTableFromPde(*pd++); sl@0: TInt pteIndex = page&(KChunkMask>>KPageShift); sl@0: if(!pt) sl@0: goto not_found; sl@0: pt += pteIndex; sl@0: do sl@0: { sl@0: TInt pagesInPt = (KChunkSize>>KPageShift)-pteIndex; sl@0: if(pagesInPt>aNumPages) sl@0: pagesInPt = aNumPages; sl@0: if(pagesInPt>KMaxPages) sl@0: pagesInPt = KMaxPages; sl@0: sl@0: aNumPages -= pagesInPt; sl@0: page += pagesInPt; sl@0: sl@0: do sl@0: { sl@0: TPte pte = *pt++; sl@0: if(pte==0) sl@0: goto not_found; sl@0: if(!iRamCache->ReclaimRamCachePage(SPageInfo::FromPhysAddr(pte))) sl@0: goto not_found; sl@0: } sl@0: while(--pagesInPt); sl@0: sl@0: if(!aNumPages) sl@0: { sl@0: NKern::UnlockSystem(); sl@0: return KErrNone; sl@0: } sl@0: sl@0: pteIndex = page&(KChunkMask>>KPageShift); sl@0: } sl@0: while(!NKern::FlashSystem() && pteIndex); sl@0: } sl@0: not_found: sl@0: NKern::UnlockSystem(); sl@0: return KErrNotFound; sl@0: } sl@0: sl@0: sl@0: void RamCache::SetFree(SPageInfo* aPageInfo) sl@0: { sl@0: ArmMmu& m=::TheMmu; sl@0: // Make a page free sl@0: SPageInfo::TType type = aPageInfo->Type(); sl@0: if(type==SPageInfo::EPagedCache) sl@0: { sl@0: TInt offset = aPageInfo->Offset()<Owner(); sl@0: __NK_ASSERT_DEBUG(TUint(offset)iMaxSize)); sl@0: TLinAddr lin = ((TLinAddr)chunk->iBase)+offset; sl@0: TInt asid = ((DMemModelProcess*)chunk->iOwningProcess)->iOsAsid; sl@0: TPte* pt = PtePtrFromLinAddr(lin,asid); sl@0: TPhysAddr phys = (*pt)&~KPageMask; sl@0: *pt = KPteNotPresentEntry; sl@0: CacheMaintenance::SinglePteUpdated((TLinAddr)pt); sl@0: InvalidateTLBForPage(lin,asid); sl@0: m.CacheMaintenanceOnDecommit(phys); sl@0: sl@0: // actually decommit it from chunk... sl@0: TInt ptid = ((TLinAddr)pt-KPageTableBase)>>KPageTableShift; sl@0: SPageTableInfo& ptinfo=((ArmMmu*)iMmu)->iPtInfo[ptid]; sl@0: if(!--ptinfo.iCount) sl@0: { sl@0: chunk->iPageTables[offset>>KChunkShift] = 0xffff; sl@0: NKern::UnlockSystem(); sl@0: ((ArmMmu*)iMmu)->DoUnassignPageTable(lin, (TAny*)asid); sl@0: ((ArmMmu*)iMmu)->FreePageTable(ptid); sl@0: NKern::LockSystem(); sl@0: } sl@0: } sl@0: else sl@0: { sl@0: __KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: SetFree() with bad page type = %d",aPageInfo->Type())); sl@0: Panic(EUnexpectedPageType); sl@0: } sl@0: } sl@0: sl@0: sl@0: // sl@0: // MemModelDemandPaging sl@0: // sl@0: sl@0: class MemModelDemandPaging : public DemandPaging sl@0: { sl@0: public: sl@0: // From RamCacheBase sl@0: virtual void Init2(); sl@0: virtual TInt Init3(); sl@0: virtual TBool PageUnmapped(SPageInfo* aPageInfo); sl@0: // From DemandPaging sl@0: virtual TInt Fault(TAny* aExceptionInfo); sl@0: virtual void SetOld(SPageInfo* aPageInfo); sl@0: virtual void SetFree(SPageInfo* aPageInfo); sl@0: virtual void NotifyPageFree(TPhysAddr aPage); sl@0: virtual TInt EnsurePagePresent(TLinAddr aPage, DProcess* aProcess); sl@0: virtual TPhysAddr LinearToPhysical(TLinAddr aPage, DProcess* aProcess); sl@0: virtual void AllocLoadAddress(DPagingRequest& aReq, TInt aDeviceId); sl@0: virtual TInt PageState(TLinAddr aAddr); sl@0: virtual TBool NeedsMutexOrderCheck(TLinAddr aStartAddr, TUint aLength); sl@0: // New sl@0: inline ArmMmu& Mmu() { return (ArmMmu&)*iMmu; } sl@0: void InitRomPaging(); sl@0: void InitCodePaging(); sl@0: TInt HandleFault(TArmExcInfo& aExc, TLinAddr aFaultAddress, TInt aAsid); sl@0: TInt PageIn(TLinAddr aAddress, TInt aAsid, DMemModelCodeSegMemory* aCodeSegMemory); sl@0: public: sl@0: // use of the folowing members is protected by the system lock.. sl@0: TPte* iPurgePte; // PTE used for temporary mappings during cache purge operations sl@0: TLinAddr iPurgeAddr; // address corresponding to iPurgePte sl@0: }; sl@0: sl@0: extern void MakeGlobalPTEInaccessible(TPte* aPtePtr, TPte aNewPte, TLinAddr aLinAddr); sl@0: extern void MakePTEInaccessible(TPte* aPtePtr, TPte aNewPte, TLinAddr aLinAddr, TInt aAsid); sl@0: sl@0: // sl@0: // MemModelDemandPaging sl@0: // sl@0: sl@0: sl@0: DemandPaging* DemandPaging::New() sl@0: { sl@0: return new MemModelDemandPaging(); sl@0: } sl@0: sl@0: sl@0: void MemModelDemandPaging::Init2() sl@0: { sl@0: __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf(">MemModelDemandPaging::Init2")); sl@0: DemandPaging::Init2(); sl@0: sl@0: iPurgeAddr = KDemandPagingTempAddr; sl@0: iPurgePte = PtePtrFromLinAddr(iPurgeAddr); sl@0: sl@0: __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("iLinAddr + colourMask) & ~colourMask; sl@0: sl@0: if(RomPagingRequested()) sl@0: InitRomPaging(); sl@0: sl@0: if (CodePagingRequested()) sl@0: InitCodePaging(); sl@0: sl@0: __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("=0); sl@0: Mmu().PtInfo(ptid).SetGlobal(lin >> KChunkShift); sl@0: } sl@0: sl@0: // Get new page table addresses sl@0: TPte* pt = PageTable(ptid); sl@0: TPhysAddr ptPhys=Mmu().LinearToPhysical((TLinAddr)pt,0); sl@0: sl@0: // Pointer to page directory entry sl@0: TPde* ppde = ::InitPageDirectory + (lin>>KChunkShift); sl@0: sl@0: // Fill in Page Table sl@0: TPte* ptEnd = pt+(1<<(KChunkShift-KPageShift)); sl@0: pt += (lin&KChunkMask)>>KPageShift; sl@0: TLinAddr firstPte = (TLinAddr)pt; // Will need this to clean page table memory region from cache sl@0: sl@0: do sl@0: { sl@0: if(linType(); sl@0: sl@0: // Only have to deal with cache pages - pages containg code don't get returned to the system sl@0: // when they are decommitted from an individual process, only when the code segment is destroyed sl@0: if(type!=SPageInfo::EPagedCache) sl@0: { sl@0: __NK_ASSERT_DEBUG(type!=SPageInfo::EPagedCode); // shouldn't happen sl@0: __NK_ASSERT_DEBUG(type!=SPageInfo::EPagedData); // not supported yet sl@0: return ETrue; sl@0: } sl@0: sl@0: RemovePage(aPageInfo); sl@0: AddAsFreePage(aPageInfo); sl@0: // Return false to stop DMemModelChunk::DoDecommit from freeing this page sl@0: return EFalse; sl@0: } sl@0: sl@0: sl@0: void DoSetCodeOld(SPageInfo* aPageInfo, DMemModelCodeSegMemory* aCodeSegMemory, TLinAddr aLinAddr) sl@0: { sl@0: NThread* currentThread = NKern::CurrentThread(); sl@0: aPageInfo->SetModifier(currentThread); sl@0: // scan all address spaces... sl@0: TInt asid = -1; sl@0: TInt lastAsid = KArmV6NumAsids-1; sl@0: TUint32* ptr = aCodeSegMemory->iOsAsids->iMap; sl@0: do sl@0: { sl@0: TUint32 bits = *ptr++; sl@0: do sl@0: { sl@0: ++asid; sl@0: if(bits&0x80000000u) sl@0: { sl@0: // codeseg is mapped in this address space, so update PTE... sl@0: TPte* pt = PtePtrFromLinAddr(aLinAddr,asid); sl@0: TPte pte = *pt; sl@0: if(pte&KPtePresentMask) sl@0: { sl@0: __NK_ASSERT_DEBUG((pte&~KPageMask) == aPageInfo->PhysAddr()); sl@0: MakePTEInaccessible(pt, pte&~KPtePresentMask, aLinAddr, asid); sl@0: } sl@0: } sl@0: } sl@0: while(bits<<=1); sl@0: if(NKern::FlashSystem() && aPageInfo->CheckModified(currentThread)) sl@0: return; // page was modified by another thread sl@0: asid |= 31; sl@0: } sl@0: while(asidState() == SPageInfo::EStatePagedOld); sl@0: sl@0: SPageInfo::TType type = aPageInfo->Type(); sl@0: sl@0: if(type==SPageInfo::EPagedROM) sl@0: { sl@0: // get linear address of page... sl@0: TInt offset = aPageInfo->Offset()<Offset()<Owner(); sl@0: __NK_ASSERT_DEBUG(codeSegMemory && codeSegMemory->iPages && codeSegMemory->iIsDemandPaged); sl@0: sl@0: #ifdef _DEBUG sl@0: TInt pageNumber = (lin - codeSegMemory->iRamInfo.iCodeRunAddr) >> KPageShift; sl@0: __NK_ASSERT_DEBUG(codeSegMemory->iPages[pageNumber] == aPageInfo->PhysAddr()); sl@0: #endif sl@0: sl@0: // make page inaccessible... sl@0: DoSetCodeOld(aPageInfo,codeSegMemory,lin); sl@0: sl@0: END_PAGING_BENCHMARK(this, EPagingBmSetCodePageOld); sl@0: } sl@0: else if(type==SPageInfo::EPagedCache) sl@0: { sl@0: // leave page accessible sl@0: } sl@0: else if(type!=SPageInfo::EPagedFree) sl@0: { sl@0: __KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: SetOld() with bad page type = %d",aPageInfo->Type())); sl@0: Panic(EUnexpectedPageType); sl@0: } sl@0: NKern::FlashSystem(); sl@0: } sl@0: sl@0: sl@0: void DoSetCodeFree(SPageInfo* aPageInfo, TPhysAddr aPhysAddr, DMemModelCodeSegMemory* aCodeSegMemory, TLinAddr aLinAddr) sl@0: { sl@0: NThread* currentThread = NKern::CurrentThread(); sl@0: aPageInfo->SetModifier(currentThread); sl@0: // scan all address spaces... sl@0: TInt asid = -1; sl@0: TInt lastAsid = KArmV6NumAsids-1; sl@0: TUint32* ptr = aCodeSegMemory->iOsAsids->iMap; sl@0: do sl@0: { sl@0: TUint32 bits = *ptr++; sl@0: do sl@0: { sl@0: ++asid; sl@0: if(bits&0x80000000u) sl@0: { sl@0: // codeseg is mapped in this address space, so update PTE... sl@0: TPte* pt = PtePtrFromLinAddr(aLinAddr,asid); sl@0: TPte pte = *pt; sl@0: if (pte!=KPteNotPresentEntry && (pte&~KPageMask) == aPhysAddr) sl@0: MakePTEInaccessible(pt, KPteNotPresentEntry, aLinAddr, asid); sl@0: } sl@0: } sl@0: while(bits<<=1); sl@0: if(NKern::FlashSystem()) sl@0: { sl@0: // nobody else should modify page! sl@0: __NK_ASSERT_DEBUG(!aPageInfo->CheckModified(currentThread)); sl@0: } sl@0: asid |= 31; sl@0: } sl@0: while(asidState() == SPageInfo::EStatePagedDead); sl@0: if(aPageInfo->LockCount()) sl@0: Panic(ERamPageLocked); sl@0: sl@0: SPageInfo::TType type = aPageInfo->Type(); sl@0: TPhysAddr phys = aPageInfo->PhysAddr(); sl@0: sl@0: if(type==SPageInfo::EPagedROM) sl@0: { sl@0: // get linear address of page... sl@0: TInt offset = aPageInfo->Offset()<Offset()<Owner(); sl@0: __NK_ASSERT_DEBUG(codeSegMemory && codeSegMemory->iPages && codeSegMemory->iIsDemandPaged); sl@0: sl@0: // remove page from CodeSegMemory (must come before System Lock is released)... sl@0: TInt pageNumber = (lin - codeSegMemory->iRamInfo.iCodeRunAddr) >> KPageShift; sl@0: __NK_ASSERT_DEBUG(codeSegMemory->iPages[pageNumber] == aPageInfo->PhysAddr()); sl@0: codeSegMemory->iPages[pageNumber] = KPhysAddrInvalid; sl@0: sl@0: // unmap page from all processes it's mapped into... sl@0: DoSetCodeFree(aPageInfo,phys,codeSegMemory,lin); sl@0: sl@0: END_PAGING_BENCHMARK(this, EPagingBmSetCodePageFree); sl@0: #ifdef BTRACE_PAGING sl@0: BTraceContext8(BTrace::EPaging,BTrace::EPagingPageOutCode,phys,lin); sl@0: #endif sl@0: } sl@0: else if(type==SPageInfo::EPagedCache) sl@0: { sl@0: // get linear address of page... sl@0: TInt offset = aPageInfo->Offset()<Owner(); sl@0: __NK_ASSERT_DEBUG(TUint(offset)iMaxSize)); sl@0: TLinAddr lin = ((TLinAddr)chunk->iBase)+offset; sl@0: sl@0: // unmap it... sl@0: TInt asid = ((DMemModelProcess*)chunk->iOwningProcess)->iOsAsid; sl@0: TPte* pt = PtePtrFromLinAddr(lin,asid); sl@0: *pt = KPteNotPresentEntry; sl@0: CacheMaintenance::SinglePteUpdated((TLinAddr)pt); sl@0: sl@0: InvalidateTLBForPage(lin,asid); sl@0: sl@0: // actually decommit it from chunk... sl@0: TInt ptid = ((TLinAddr)pt-KPageTableBase)>>KPageTableShift; sl@0: SPageTableInfo& ptinfo=Mmu().iPtInfo[ptid]; sl@0: if(!--ptinfo.iCount) sl@0: { sl@0: chunk->iPageTables[offset>>KChunkShift] = 0xffff; sl@0: NKern::UnlockSystem(); sl@0: Mmu().DoUnassignPageTable(lin, (TAny*)asid); sl@0: Mmu().FreePageTable(ptid); sl@0: NKern::LockSystem(); sl@0: } sl@0: sl@0: #ifdef BTRACE_PAGING sl@0: BTraceContext8(BTrace::EPaging,BTrace::EPagingPageOutCache,phys,lin); sl@0: #endif sl@0: } sl@0: else if(type==SPageInfo::EPagedFree) sl@0: { sl@0: // already free... sl@0: #ifdef BTRACE_PAGING sl@0: BTraceContext4(BTrace::EPaging,BTrace::EPagingPageOutFree,phys); sl@0: #endif sl@0: // fall through to cache purge code because cache may not have been sl@0: // cleaned for this page if PageUnmapped called sl@0: } sl@0: else sl@0: { sl@0: __KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: SetFree() with bad page type = %d",aPageInfo->Type())); sl@0: Panic(EUnexpectedPageType); sl@0: return; sl@0: } sl@0: sl@0: NKern::UnlockSystem(); sl@0: sl@0: // purge cache for page... sl@0: TInt colour = aPageInfo->Offset()&KPageColourMask; sl@0: TPte& pte=iPurgePte[colour]; sl@0: TLinAddr va=iPurgeAddr+(colour<Type()==SPageInfo::EPagedCode, MM::Panic(MM::EUnexpectedPageType)); sl@0: RemovePage(pageInfo); sl@0: SetFree(pageInfo); sl@0: AddAsFreePage(pageInfo); sl@0: } sl@0: sl@0: sl@0: TInt MemModelDemandPaging::Fault(TAny* aExceptionInfo) sl@0: { sl@0: TArmExcInfo& exc=*(TArmExcInfo*)aExceptionInfo; sl@0: sl@0: // Get faulting address sl@0: TLinAddr faultAddress = exc.iFaultAddress; sl@0: if(exc.iExcCode==EArmExceptionDataAbort) sl@0: { sl@0: // Let writes take an exception rather than page in any memory... sl@0: if(exc.iFaultStatus&(1<<11)) sl@0: return KErrUnknown; sl@0: } sl@0: else if (exc.iExcCode != EArmExceptionPrefetchAbort) sl@0: return KErrUnknown; // Not prefetch or data abort sl@0: sl@0: // Only handle page translation faults sl@0: if((exc.iFaultStatus & 0x40f) != 0x7) sl@0: return KErrUnknown; sl@0: sl@0: DMemModelThread* thread = (DMemModelThread*)TheCurrentThread; sl@0: sl@0: // check which ragion fault occured in... sl@0: TInt asid = 0; // asid != 0 => code paging fault sl@0: if(TUint(faultAddress-iRomPagedLinearBase)iOsAsid; sl@0: } sl@0: else if (thread->iAliasLinAddr && TUint(faultAddress - thread->iAliasLinAddr) < TUint(KPageSize)) sl@0: { sl@0: // in aliased memory sl@0: faultAddress = (faultAddress - thread->iAliasLinAddr) + thread->iAliasTarget; sl@0: if(TUint(faultAddress-iCodeLinearBase)>=iCodeSize) sl@0: return KErrUnknown; // not in alias of code sl@0: asid = thread->iAliasOsAsid; sl@0: __NK_ASSERT_DEBUG(asid != 0); sl@0: } sl@0: else sl@0: return KErrUnknown; // Not in pageable region sl@0: sl@0: // Check if thread holds fast mutex and claim system lock sl@0: NFastMutex* fm = NKern::HeldFastMutex(); sl@0: TPagingExcTrap* trap = thread->iPagingExcTrap; sl@0: if(!fm) sl@0: NKern::LockSystem(); sl@0: else sl@0: { sl@0: if(!trap || fm!=&TheScheduler.iLock) sl@0: { sl@0: __KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: Fault with FM Held! %x (%O pc=%x)",faultAddress,&Kern::CurrentThread(),exc.iR15)); sl@0: Panic(EPageFaultWhilstFMHeld); // Not allowed to hold mutexes sl@0: } sl@0: // restore address space on multiple memory model (because the trap will sl@0: // bypass any code which would have done this.)... sl@0: DMemModelThread::RestoreAddressSpace(); sl@0: sl@0: // Current thread already has the system lock... sl@0: NKern::FlashSystem(); // Let someone else have a go with the system lock. sl@0: } sl@0: sl@0: // System locked here sl@0: sl@0: TInt r = KErrNone; sl@0: if(thread->IsRealtime()) sl@0: r = CheckRealtimeThreadFault(thread, aExceptionInfo); sl@0: if (r == KErrNone) sl@0: r = HandleFault(exc, faultAddress, asid); sl@0: sl@0: // Restore system lock state sl@0: if (fm != NKern::HeldFastMutex()) sl@0: { sl@0: if (fm) sl@0: NKern::LockSystem(); sl@0: else sl@0: NKern::UnlockSystem(); sl@0: } sl@0: sl@0: // Deal with XTRAP_PAGING sl@0: if(r == KErrNone && trap) sl@0: { sl@0: trap->Exception(1); // Return from exception trap with result '1' (value>0) sl@0: // code doesn't continue beyond this point. sl@0: } sl@0: sl@0: return r; sl@0: } sl@0: sl@0: sl@0: sl@0: TInt MemModelDemandPaging::HandleFault(TArmExcInfo& aExc, TLinAddr aFaultAddress, TInt aAsid) sl@0: { sl@0: ++iEventInfo.iPageFaultCount; sl@0: sl@0: // get page table entry... sl@0: TPte* pt = SafePtePtrFromLinAddr(aFaultAddress, aAsid); sl@0: if(!pt) sl@0: return KErrNotFound; sl@0: TPte pte = *pt; sl@0: sl@0: // Do what is required to make page accessible... sl@0: sl@0: if(pte&KPtePresentMask) sl@0: { sl@0: // PTE is present, so assume it has already been dealt with sl@0: #ifdef BTRACE_PAGING sl@0: BTraceContext12(BTrace::EPaging,BTrace::EPagingPageNop,pte&~KPageMask,aFaultAddress,aExc.iR15); sl@0: #endif sl@0: return KErrNone; sl@0: } sl@0: sl@0: if(pte!=KPteNotPresentEntry) sl@0: { sl@0: // PTE alread has a page sl@0: SPageInfo* pageInfo = SPageInfo::FromPhysAddr(pte); sl@0: if(pageInfo->State()==SPageInfo::EStatePagedDead) sl@0: { sl@0: // page currently being unmapped, so do that here... sl@0: MakePTEInaccessible(pt, KPteNotPresentEntry, aFaultAddress, aAsid); sl@0: } sl@0: else sl@0: { sl@0: // page just needs making young again... sl@0: *pt = TPte(pte|KArmV6PteSmallPage); // Update page table sl@0: CacheMaintenance::SinglePteUpdated((TLinAddr)pt); sl@0: Rejuvenate(pageInfo); sl@0: #ifdef BTRACE_PAGING sl@0: BTraceContext12(BTrace::EPaging,BTrace::EPagingRejuvenate,pte&~KPageMask,aFaultAddress,aExc.iR15); sl@0: #endif sl@0: return KErrNone; sl@0: } sl@0: } sl@0: sl@0: // PTE not present, so page it in... sl@0: // check if fault in a CodeSeg... sl@0: DMemModelCodeSegMemory* codeSegMemory = NULL; sl@0: if (!aAsid) sl@0: NKern::ThreadEnterCS(); sl@0: else sl@0: { sl@0: // find CodeSeg... sl@0: DMemModelCodeSeg* codeSeg = (DMemModelCodeSeg*)DCodeSeg::CodeSegsByAddress.Find(aFaultAddress); sl@0: if (!codeSeg) sl@0: return KErrNotFound; sl@0: codeSegMemory = codeSeg->Memory(); sl@0: if (codeSegMemory==0 || !codeSegMemory->iIsDemandPaged || codeSegMemory->iOsAsids->NotFree(aAsid, 1)) sl@0: return KErrNotFound; sl@0: sl@0: // check if it's paged in but not yet mapped into this process... sl@0: TInt pageNumber = (aFaultAddress - codeSegMemory->iRamInfo.iCodeRunAddr) >> KPageShift; sl@0: TPhysAddr page = codeSegMemory->iPages[pageNumber]; sl@0: if (page != KPhysAddrInvalid) sl@0: { sl@0: // map it into this process... sl@0: SPageInfo* pageInfo = SPageInfo::FromPhysAddr(page); sl@0: __NK_ASSERT_DEBUG(pageInfo->State()!=SPageInfo::EStatePagedDead); sl@0: *pt = page | (codeSegMemory->iCreator ? KUserCodeLoadPte : KUserCodeRunPte); sl@0: CacheMaintenance::SinglePteUpdated((TLinAddr)pt); sl@0: Rejuvenate(pageInfo); sl@0: #ifdef BTRACE_PAGING sl@0: BTraceContext8(BTrace::EPaging,BTrace::EPagingMapCode,page,aFaultAddress); sl@0: #endif sl@0: return KErrNone; sl@0: } sl@0: sl@0: // open reference on CodeSegMemory sl@0: NKern::ThreadEnterCS(); sl@0: #ifdef _DEBUG sl@0: TInt r = sl@0: #endif sl@0: codeSegMemory->Open(); sl@0: __NK_ASSERT_DEBUG(r==KErrNone); sl@0: NKern::FlashSystem(); sl@0: } sl@0: sl@0: #ifdef BTRACE_PAGING sl@0: BTraceContext8(BTrace::EPaging,BTrace::EPagingPageInBegin,aFaultAddress,aExc.iR15); sl@0: #endif sl@0: TInt r = PageIn(aFaultAddress, aAsid, codeSegMemory); sl@0: sl@0: NKern::UnlockSystem(); sl@0: sl@0: if(codeSegMemory) sl@0: codeSegMemory->Close(); sl@0: sl@0: NKern::ThreadLeaveCS(); sl@0: sl@0: return r; sl@0: } sl@0: sl@0: sl@0: TInt MemModelDemandPaging::PageIn(TLinAddr aAddress, TInt aAsid, DMemModelCodeSegMemory* aCodeSegMemory) sl@0: { sl@0: // Get a request object - this may block until one is available sl@0: DPagingRequest* req = AcquireRequestObject(); sl@0: sl@0: // Get page table entry sl@0: TPte* pt = SafePtePtrFromLinAddr(aAddress, aAsid); sl@0: sl@0: // Check page is still required... sl@0: if(!pt || *pt!=KPteNotPresentEntry) sl@0: { sl@0: #ifdef BTRACE_PAGING sl@0: BTraceContext0(BTrace::EPaging,BTrace::EPagingPageInUnneeded); sl@0: #endif sl@0: ReleaseRequestObject(req); sl@0: return pt ? KErrNone : KErrNotFound; sl@0: } sl@0: sl@0: ++iEventInfo.iPageInReadCount; sl@0: sl@0: // Get a free page sl@0: SPageInfo* pageInfo = AllocateNewPage(); sl@0: __NK_ASSERT_DEBUG(pageInfo); sl@0: sl@0: // Get physical address of free page sl@0: TPhysAddr phys = pageInfo->PhysAddr(); sl@0: __NK_ASSERT_DEBUG(phys!=KPhysAddrInvalid); sl@0: sl@0: // Temporarily map free page sl@0: TInt colour = (aAddress>>KPageShift)&KPageColourMask; sl@0: __NK_ASSERT_DEBUG((req->iLoadAddr & (KPageColourMask << KPageShift)) == 0); sl@0: req->iLoadAddr |= colour << KPageShift; sl@0: TLinAddr loadAddr = req->iLoadAddr; sl@0: pt = req->iLoadPte+colour; sl@0: // *pt = phys | SP_PTE(KArmV6PermRWNO, KArmV6MemAttWTWAWTWA, 0, 1); sl@0: *pt = phys | SP_PTE(KArmV6PermRWNO, KNormalUncachedAttr, 0, 1); sl@0: CacheMaintenance::SinglePteUpdated((TLinAddr)pt); sl@0: sl@0: // Read page from backing store sl@0: aAddress &= ~KPageMask; sl@0: NKern::UnlockSystem(); sl@0: sl@0: TInt r; sl@0: if (!aCodeSegMemory) sl@0: r = ReadRomPage(req, aAddress); sl@0: else sl@0: { sl@0: r = ReadCodePage(req, aCodeSegMemory, aAddress); sl@0: if (r == KErrNone) sl@0: aCodeSegMemory->ApplyCodeFixups((TUint32*)loadAddr, aAddress); sl@0: } sl@0: if(r!=KErrNone) sl@0: Panic(EPageInFailed); sl@0: sl@0: // make caches consistant... sl@0: // Cache::IMB_Range(loadAddr, KPageSize); sl@0: *pt = phys | SP_PTE(KArmV6PermRWNO, KNormalCachedAttr, 0, 1); sl@0: CacheMaintenance::SinglePteUpdated((TLinAddr)pt); sl@0: InvalidateTLBForPage(loadAddr,KERNEL_MAPPING); sl@0: CacheMaintenance::CodeChanged(loadAddr, KPageSize, CacheMaintenance::ECPUUncached); sl@0: sl@0: NKern::LockSystem(); sl@0: sl@0: // Invalidate temporary mapping sl@0: MakeGlobalPTEInaccessible(pt, KPteNotPresentEntry, loadAddr); sl@0: sl@0: // Release request object now we're finished with it sl@0: req->iLoadAddr &= ~(KPageColourMask << KPageShift); sl@0: ReleaseRequestObject(req); sl@0: sl@0: // Get page table entry sl@0: pt = SafePtePtrFromLinAddr(aAddress, aAsid); sl@0: sl@0: // Check page still needs updating sl@0: TBool notNeeded = pt==0 || *pt!=KPteNotPresentEntry; sl@0: if(aCodeSegMemory) sl@0: notNeeded |= aCodeSegMemory->iOsAsids->NotFree(aAsid, 1); sl@0: if(notNeeded) sl@0: { sl@0: // We don't need the new page after all, so put it on the active list as a free page sl@0: __KTRACE_OPT(KPAGING,Kern::Printf("DP: PageIn (New page not used)")); sl@0: #ifdef BTRACE_PAGING sl@0: BTraceContext0(BTrace::EPaging,BTrace::EPagingPageInUnneeded); sl@0: #endif sl@0: AddAsFreePage(pageInfo); sl@0: return pt ? KErrNone : KErrNotFound; sl@0: } sl@0: sl@0: // Update page info sl@0: if (!aCodeSegMemory) sl@0: pageInfo->SetPagedROM((aAddress-iRomLinearBase)>>KPageShift); sl@0: else sl@0: { sl@0: // Check if page has been paged in and mapped into another process while we were waiting sl@0: TInt pageNumber = (aAddress - aCodeSegMemory->iRamInfo.iCodeRunAddr) >> KPageShift; sl@0: TPhysAddr page = aCodeSegMemory->iPages[pageNumber]; sl@0: if (page != KPhysAddrInvalid) sl@0: { sl@0: // don't need page we've just paged in... sl@0: AddAsFreePage(pageInfo); sl@0: sl@0: // map existing page into this process... sl@0: pageInfo = SPageInfo::FromPhysAddr(page); sl@0: __NK_ASSERT_DEBUG(pageInfo->State()!=SPageInfo::EStatePagedDead); sl@0: *pt = page | (aCodeSegMemory->iCreator ? KUserCodeLoadPte : KUserCodeRunPte); sl@0: CacheMaintenance::SinglePteUpdated((TLinAddr)pt); sl@0: #ifdef BTRACE_PAGING sl@0: BTraceContext0(BTrace::EPaging,BTrace::EPagingPageInUnneeded); sl@0: #endif sl@0: Rejuvenate(pageInfo); sl@0: return KErrNone; sl@0: } sl@0: aCodeSegMemory->iPages[pageNumber] = phys; sl@0: sl@0: pageInfo->SetPagedCode(aCodeSegMemory,(aAddress-Mmu().iUserCodeBase)>>KPageShift); sl@0: } sl@0: sl@0: // Map page into final location sl@0: *pt = phys | (aCodeSegMemory ? (aCodeSegMemory->iCreator ? KUserCodeLoadPte : KUserCodeRunPte) : KRomPtePerm); sl@0: CacheMaintenance::SinglePteUpdated((TLinAddr)pt); sl@0: #ifdef BTRACE_PAGING sl@0: TInt subCat = aCodeSegMemory ? BTrace::EPagingPageInCode : BTrace::EPagingPageInROM; sl@0: BTraceContext8(BTrace::EPaging,subCat,phys,aAddress); sl@0: #endif sl@0: sl@0: AddAsYoungest(pageInfo); sl@0: BalanceAges(); sl@0: sl@0: return KErrNone; sl@0: } sl@0: sl@0: sl@0: inline TUint8 ReadByte(TLinAddr aAddress) sl@0: { return *(volatile TUint8*)aAddress; } sl@0: sl@0: sl@0: TInt MemModelDemandPaging::EnsurePagePresent(TLinAddr aPage, DProcess* aProcess) sl@0: { sl@0: TInt r = KErrBadDescriptor; sl@0: XTRAPD(exc,XT_DEFAULT, sl@0: if (!aProcess) sl@0: { sl@0: XTRAP_PAGING_RETRY(CHECK_PAGING_SAFE; ReadByte(aPage);); sl@0: r = KErrNone; sl@0: } sl@0: else sl@0: { sl@0: DMemModelThread& t=*(DMemModelThread*)TheCurrentThread; sl@0: retry: sl@0: TInt pagingFault; sl@0: XTRAP_PAGING_START(pagingFault); sl@0: CHECK_PAGING_SAFE; sl@0: // make alias of page in this process sl@0: TLinAddr alias_src; sl@0: TInt alias_size; sl@0: TInt aliasResult = t.Alias(aPage, (DMemModelProcess*)aProcess, 1, EMapAttrReadUser, alias_src, alias_size); sl@0: if (aliasResult>=0) sl@0: { sl@0: // ensure page to be locked is mapped in, by reading from it... sl@0: ReadByte(alias_src); sl@0: r = KErrNone; sl@0: } sl@0: XTRAP_PAGING_END; sl@0: t.RemoveAlias(); sl@0: if(pagingFault>0) sl@0: goto retry; sl@0: } sl@0: ); // end of XTRAPD sl@0: if(exc) sl@0: return KErrBadDescriptor; sl@0: return r; sl@0: } sl@0: sl@0: sl@0: TPhysAddr MemModelDemandPaging::LinearToPhysical(TLinAddr aPage, DProcess* aProcess) sl@0: { sl@0: TInt asid = 0; sl@0: if (aProcess) sl@0: asid = ((DMemModelProcess*)aProcess)->iOsAsid; sl@0: return Mmu().LinearToPhysical(aPage, asid); sl@0: } sl@0: sl@0: sl@0: TInt MemModelDemandPaging::PageState(TLinAddr aAddr) sl@0: { sl@0: DMemModelProcess* process = (DMemModelProcess*)TheCurrentThread->iOwningProcess; sl@0: TInt asid = 0; sl@0: TPte* ptePtr = 0; sl@0: TPte pte = 0; sl@0: TInt r = 0; sl@0: SPageInfo* pageInfo = NULL; sl@0: sl@0: NKern::LockSystem(); sl@0: sl@0: DMemModelCodeSegMemory* codeSegMemory = 0; sl@0: if(TUint(aAddr-iRomPagedLinearBase)Memory(); sl@0: asid = process->iOsAsid; sl@0: if (codeSegMemory && codeSegMemory->iOsAsids->NotAllocated(asid, 1)) sl@0: { sl@0: r |= EPageStateInRamCode; sl@0: if (codeSegMemory->iIsDemandPaged) sl@0: r |= EPageStatePaged; sl@0: } sl@0: if(process->iCodeChunk) sl@0: r |= EPageStateCodeChunkPresent; sl@0: } sl@0: sl@0: ptePtr = SafePtePtrFromLinAddr(aAddr,asid); sl@0: if (!ptePtr) sl@0: goto done; sl@0: r |= EPageStatePageTablePresent; sl@0: pte = *ptePtr; sl@0: if (pte == KPteNotPresentEntry) sl@0: goto done; sl@0: r |= EPageStatePtePresent; sl@0: if (pte & KPtePresentMask) sl@0: r |= EPageStatePteValid; sl@0: sl@0: pageInfo = SPageInfo::FromPhysAddr(pte); sl@0: r |= pageInfo->Type(); sl@0: r |= pageInfo->State()<<8; sl@0: sl@0: if (codeSegMemory && codeSegMemory->iPages) sl@0: { sl@0: TPhysAddr phys = pte & ~KPageMask; sl@0: TInt pageNumber = (aAddr - codeSegMemory->iRamInfo.iCodeRunAddr) >> KPageShift; sl@0: if (codeSegMemory->iPages[pageNumber] == phys) sl@0: r |= EPageStatePhysAddrPresent; sl@0: } sl@0: sl@0: done: sl@0: NKern::UnlockSystem(); sl@0: return r; sl@0: } sl@0: sl@0: sl@0: TBool MemModelDemandPaging::NeedsMutexOrderCheck(TLinAddr aStartAddr, TUint aLength) sl@0: { sl@0: // Don't check mutex order for reads from global area, except for the paged part of rom sl@0: TBool rangeInGlobalArea = aStartAddr >= KRomLinearBase; sl@0: TBool rangeInPagedRom = iRomPagedLinearBase != 0 && aStartAddr < (iRomLinearBase + iRomSize) && (aStartAddr + aLength) > iRomPagedLinearBase; sl@0: return !rangeInGlobalArea || rangeInPagedRom; sl@0: } sl@0: sl@0: sl@0: EXPORT_C TBool DDemandPagingLock::Lock(DThread* aThread, TLinAddr aStart, TInt aSize) sl@0: { sl@0: MemModelDemandPaging* pager = (MemModelDemandPaging*)iThePager; sl@0: if(pager) sl@0: { sl@0: ArmMmu& m = pager->Mmu(); sl@0: TLinAddr end = aStart+aSize; sl@0: sl@0: if ((aStart < TUint(pager->iRomPagedLinearBase+pager->iRomPagedSize) && end > pager->iRomPagedLinearBase) || sl@0: (aStart < TUint(m.iUserCodeBase + m.iMaxUserCodeSize) && end > m.iUserCodeBase)) sl@0: return pager->ReserveLock(aThread,aStart,aSize,*this); sl@0: } sl@0: return EFalse; sl@0: } sl@0: sl@0: void ArmMmu::DisablePageModification(DMemModelChunk* aChunk, TInt aOffset) sl@0: // sl@0: // Mark the page at aOffset in aChunk read-only to prevent it being sl@0: // modified while defrag is in progress. Save the required information sl@0: // to allow the fault handler to deal with this. sl@0: // Call this with the system unlocked. sl@0: // sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::DisablePageModification() offset=%08x", aOffset)); sl@0: sl@0: TInt ptid = aChunk->iPageTables[aOffset>>KChunkShift]; sl@0: if(ptid == 0xffff) sl@0: Panic(EDefragDisablePageFailed); sl@0: sl@0: NKern::LockSystem(); sl@0: TPte* pPte = PageTable(ptid) + ((aOffset&KChunkMask)>>KPageShift); sl@0: TPte pte = *pPte; sl@0: if ((pte & KArmV6PteSmallPage) != KArmV6PteSmallPage sl@0: || SP_PTE_PERM_GET(pte) != (TUint)KArmV6PermRWRW) sl@0: Panic(EDefragDisablePageFailed); sl@0: sl@0: iDisabledAddr = (TLinAddr)(aChunk->iBase) + aOffset; sl@0: if (aChunk->iOwningProcess) sl@0: iDisabledAddrAsid = ((DMemModelProcess*)(aChunk->iOwningProcess))->iOsAsid; sl@0: else sl@0: iDisabledAddrAsid = iDisabledAddriOsAsid; sl@0: sl@0: TBool aliased = EFalse; sl@0: if (thread->iAliasLinAddr && TUint(faultAddress - thread->iAliasLinAddr) < TUint(KPageSize)) sl@0: { sl@0: // in aliased memory sl@0: aliased = ETrue; sl@0: faultAddress = (faultAddress - thread->iAliasLinAddr) + thread->iAliasTarget; sl@0: asid = thread->iAliasOsAsid; sl@0: __NK_ASSERT_DEBUG(asid != 0); sl@0: } sl@0: sl@0: // Take system lock if not already held sl@0: NFastMutex* fm = NKern::HeldFastMutex(); sl@0: if(!fm) sl@0: NKern::LockSystem(); sl@0: else if(fm!=&TheScheduler.iLock) sl@0: { sl@0: __KTRACE_OPT2(KMMU,KPANIC,Kern::Printf("Defrag: Fault with FM Held! %x (%O pc=%x)",faultAddress,&Kern::CurrentThread(),exc.iR15)); sl@0: Panic(EDefragFaultWhilstFMHeld); // Not allowed to hold mutexes sl@0: } sl@0: sl@0: TInt r = KErrUnknown; sl@0: sl@0: // check if write access to the page has already been restored and retry if so sl@0: TPte* pt = SafePtePtrFromLinAddr(faultAddress, asid); sl@0: if(!pt) sl@0: { sl@0: r = KErrNotFound; sl@0: goto leave; sl@0: } sl@0: if (SP_PTE_PERM_GET(*pt) == (TUint)KArmV6PermRWRW) sl@0: { sl@0: r = KErrNone; sl@0: goto leave; sl@0: } sl@0: sl@0: // check if the fault occurred in the page we are moving sl@0: if ( iDisabledPte sl@0: && TUint(faultAddress - iDisabledAddr) < TUint(KPageSize) sl@0: && (iDisabledAddrAsid < 0 || asid == iDisabledAddrAsid) ) sl@0: { sl@0: // restore access to the page sl@0: *iDisabledPte = iDisabledOldVal; sl@0: CacheMaintenance::SinglePteUpdated((TLinAddr)iDisabledPte); sl@0: InvalidateTLBForPage(iDisabledAddr, iDisabledAddrAsid); sl@0: if (aliased) sl@0: InvalidateTLBForPage(exc.iFaultAddress, ((DMemModelProcess*)TheScheduler.iAddressSpace)->iOsAsid); sl@0: iDisabledAddr = 0; sl@0: iDisabledAddrAsid = -1; sl@0: iDisabledPte = NULL; sl@0: iDisabledOldVal = 0; sl@0: r = KErrNone; sl@0: } sl@0: sl@0: leave: sl@0: // Restore system lock state sl@0: if (!fm) sl@0: NKern::UnlockSystem(); sl@0: sl@0: return r; sl@0: }