sl@0: // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies). sl@0: // All rights reserved. sl@0: // This component and the accompanying materials are made available sl@0: // under the terms of the License "Eclipse Public License v1.0" sl@0: // which accompanies this distribution, and is available sl@0: // at the URL "http://www.eclipse.org/legal/epl-v10.html". sl@0: // sl@0: // Initial Contributors: sl@0: // Nokia Corporation - initial contribution. sl@0: // sl@0: // Contributors: sl@0: // sl@0: // Description: sl@0: // sl@0: sl@0: #include "mlargemappings.h" sl@0: #include "cache_maintenance.inl" sl@0: sl@0: sl@0: // sl@0: // DLargeMappedMemory sl@0: // sl@0: sl@0: sl@0: DLargeMappedMemory::DLargeMappedMemory(DMemoryManager* aManager, TUint aSizeInPages, TMemoryAttributes aAttributes, TMemoryCreateFlags aCreateFlags) sl@0: : DCoarseMemory(aManager, aSizeInPages, aAttributes, aCreateFlags) sl@0: { sl@0: } sl@0: sl@0: sl@0: DLargeMappedMemory* DLargeMappedMemory::New(DMemoryManager* aManager, TUint aSizeInPages, TMemoryAttributes aAttributes, TMemoryCreateFlags aCreateFlags) sl@0: { sl@0: TRACE(("DLargeMappedMemory::New()")); sl@0: TUint chunkCount = (aSizeInPages + KPagesInPDE - 1) >> KPagesInPDEShift; sl@0: TUint wordCount = (chunkCount + 31) >> 5; sl@0: TUint size = sizeof(DLargeMappedMemory) + sizeof(TUint) * (wordCount - 1); sl@0: DLargeMappedMemory* self = (DLargeMappedMemory*)Kern::AllocZ(size); sl@0: if(self) sl@0: { sl@0: new (self) DLargeMappedMemory(aManager, aSizeInPages, aAttributes, aCreateFlags); sl@0: if(self->Construct()!=KErrNone) sl@0: { sl@0: self->Close(); sl@0: self = NULL; sl@0: } sl@0: } sl@0: TRACE(("DLargeMappedMemory::New() returns 0x%08x", self)); sl@0: return self; sl@0: } sl@0: sl@0: sl@0: DLargeMappedMemory::~DLargeMappedMemory() sl@0: { sl@0: TRACE2(("DLargeMappedMemory[0x%08x]::~DLargeMappedMemory()",this)); sl@0: } sl@0: sl@0: sl@0: DMemoryMapping* DLargeMappedMemory::CreateMapping(TUint aIndex, TUint aCount) sl@0: { sl@0: TRACE(("DLargeMappedMemory[0x%08x]::CreateMapping()",this)); sl@0: if (((aIndex|aCount)&(KChunkMask>>KPageShift))==0) sl@0: return new DLargeMapping(); sl@0: else sl@0: return new DFineMapping(); sl@0: } sl@0: sl@0: sl@0: TInt DLargeMappedMemory::ClaimInitialPages(TLinAddr aBase, sl@0: TUint aSize, sl@0: TMappingPermissions aPermissions, sl@0: TBool aAllowGaps, sl@0: TBool aAllowNonRamPages) sl@0: { sl@0: TRACE(("DLargeMappedMemory[0x%08x]::ClaimInitialPages(0x%08x,0x%08x,0x%08x,%d,%d)", sl@0: this,aBase,aSize,aPermissions,aAllowGaps,aAllowNonRamPages)); sl@0: TInt r = DCoarseMemory::ClaimInitialPages(aBase,aSize,aPermissions,aAllowGaps, sl@0: aAllowNonRamPages); sl@0: if (r != KErrNone) sl@0: return r; sl@0: sl@0: // set initial contiguous state by checking which pages were section mapped by the bootstrap sl@0: MmuLock::Lock(); sl@0: TPde* pPde = Mmu::PageDirectoryEntry(KKernelOsAsid,aBase); sl@0: TUint endChunk = aSize >> KChunkShift; sl@0: for (TUint chunk = 0 ; chunk < endChunk ; ++chunk) sl@0: { sl@0: SetChunkContiguous(chunk, Mmu::PdeMapsSection(*pPde++)); sl@0: TRACE((" chunk %d contiguous state is %d", chunk, IsChunkContiguous(chunk))); sl@0: } sl@0: MmuLock::Unlock(); sl@0: sl@0: return KErrNone; sl@0: } sl@0: sl@0: sl@0: TInt DLargeMappedMemory::MapPages(RPageArray::TIter aPages) sl@0: { sl@0: TRACE2(("DLargeMappedMemory[0x%08x]::MapPages(?) index=0x%x count=0x%x",this,aPages.Index(),aPages.Count())); sl@0: sl@0: // for now: assert pages do not overlapped a contiguous area sl@0: // todo: update contiguous state, update page tables and call MapPages on large mappings sl@0: #ifdef _DEBUG sl@0: for (TUint index = aPages.Index() ; index < aPages.IndexEnd() ; index += KPagesInPDE) sl@0: { sl@0: MmuLock::Lock(); sl@0: __NK_ASSERT_DEBUG(!IsChunkContiguous(index >> KPagesInPDEShift)); sl@0: MmuLock::Unlock(); sl@0: } sl@0: #endif sl@0: sl@0: // map pages in all page tables and fine mappings sl@0: return DCoarseMemory::MapPages(aPages); sl@0: } sl@0: sl@0: sl@0: void DLargeMappedMemory::RemapPage(TPhysAddr& aPageArray, TUint aIndex, TBool aInvalidateTLB) sl@0: { sl@0: TRACE2(("DLargeMappedMemory[0x%08x]::RemapPage() index=0x%x",this, aIndex)); sl@0: sl@0: // update contiguous state... sl@0: // todo: for now we will assume that remapping a page makes it non-contiguous sl@0: MmuLock::Lock(); sl@0: SetChunkContiguous(aIndex >> KPagesInPDEShift, EFalse); sl@0: MmuLock::Unlock(); sl@0: sl@0: // remap pages in all page tables and call RemapPage on large mappings... sl@0: MmuLock::Lock(); sl@0: TUint pteType = 0; sl@0: do sl@0: { sl@0: DPageTables* tables = iPageTables[pteType]; sl@0: if(tables) sl@0: { sl@0: tables->Open(); sl@0: MmuLock::Unlock(); sl@0: tables->RemapPage(aPageArray, aIndex, aInvalidateTLB); sl@0: tables->iMappings.RemapPage(aPageArray, aIndex, aInvalidateTLB); sl@0: tables->AsyncClose(); sl@0: MmuLock::Lock(); sl@0: } sl@0: } sl@0: while(++pteType> KPagesInPDEShift)); sl@0: MmuLock::Unlock(); sl@0: } sl@0: #endif sl@0: sl@0: // unmap pages in all page tables and fine mappings sl@0: DCoarseMemory::UnmapPages(aPages, aDecommitting); sl@0: } sl@0: sl@0: sl@0: void DLargeMappedMemory::RestrictPages(RPageArray::TIter aPages, TRestrictPagesType aRestriction) sl@0: { sl@0: TRACE2(("DLargeMappedMemory[0x%08x]::RestrictPages(?,%d) index=0x%x count=0x%x",this,aRestriction,aPages.Index(),aPages.Count())); sl@0: sl@0: // assert pages do not overlapped a contiguous area... sl@0: #ifdef _DEBUG sl@0: for (TUint index = aPages.Index() ; index < aPages.IndexEnd() ; index += KPagesInPDE) sl@0: { sl@0: MmuLock::Lock(); sl@0: __NK_ASSERT_DEBUG(!IsChunkContiguous(index >> KPagesInPDEShift)); sl@0: MmuLock::Unlock(); sl@0: } sl@0: #endif sl@0: sl@0: DCoarseMemory::RestrictPages(aPages, aRestriction); sl@0: } sl@0: sl@0: sl@0: TBool DLargeMappedMemory::IsChunkContiguous(TInt aChunkIndex) sl@0: { sl@0: __NK_ASSERT_DEBUG(MmuLock::IsHeld()); sl@0: TUint index = aChunkIndex >> 5; sl@0: TUint mask = 1 << (aChunkIndex & 31); sl@0: return (iContiguousState[index] & mask) != 0; sl@0: } sl@0: sl@0: sl@0: void DLargeMappedMemory::SetChunkContiguous(TInt aChunkIndex, TBool aIsContiguous) sl@0: { sl@0: __NK_ASSERT_DEBUG(MmuLock::IsHeld()); sl@0: TUint index = aChunkIndex >> 5; sl@0: TUint mask = 1 << (aChunkIndex & 31); sl@0: iContiguousState[index] = (iContiguousState[index] & ~mask) | (aIsContiguous ? mask : 0); sl@0: } sl@0: sl@0: sl@0: // sl@0: // DLargeMapping sl@0: // sl@0: sl@0: sl@0: DLargeMapping::DLargeMapping() : DCoarseMapping(ELargeMapping) sl@0: { sl@0: } sl@0: sl@0: sl@0: TInt DLargeMapping::DoMap() sl@0: { sl@0: TRACE(("DLargeMapping[0x%08x]::DoMap()", this)); sl@0: __NK_ASSERT_DEBUG(((iStartIndex|iSizeInPages)&(KChunkMask>>KPageShift))==0); // be extra paranoid about alignment sl@0: sl@0: MmuLock::Lock(); sl@0: sl@0: TPde* pPde = Mmu::PageDirectoryEntry(OsAsid(),Base()); sl@0: DLargeMappedMemory* memory = (DLargeMappedMemory*)Memory(ETrue); // safe because we're called from code which has added mapping to memory sl@0: sl@0: TUint flash = 0; sl@0: TUint chunk = iStartIndex >> KPagesInPDEShift; sl@0: TUint endChunk = (iStartIndex + iSizeInPages) >> KPagesInPDEShift; sl@0: sl@0: while(chunk < endChunk) sl@0: { sl@0: MmuLock::Flash(flash,KMaxPdesInOneGo*2); sl@0: TPde pde = KPdeUnallocatedEntry; sl@0: TPte* pt = memory->GetPageTable(PteType(), chunk); sl@0: if (memory->IsChunkContiguous(chunk)) sl@0: pde = Mmu::PageToSectionEntry(pt[0],iBlankPde); // todo: use get phys addr? sl@0: else if (pt) sl@0: pde = Mmu::PageTablePhysAddr(pt)|iBlankPde; sl@0: sl@0: if (pde == KPdeUnallocatedEntry) sl@0: { sl@0: TRACE2(("!PDE %x=%x (was %x)",pPde,KPdeUnallocatedEntry,*pPde)); sl@0: __NK_ASSERT_DEBUG(*pPde==KPdeUnallocatedEntry); sl@0: } sl@0: else sl@0: { sl@0: TRACE2(("!PDE %x=%x (was %x)",pPde,pde,*pPde)); sl@0: __NK_ASSERT_DEBUG(*pPde==KPdeUnallocatedEntry || ((*pPde^pde)&~KPdeMatchMask)==0); sl@0: *pPde = pde; sl@0: SinglePdeUpdated(pPde); sl@0: flash += 3; // increase flash rate because we've done quite a bit more work sl@0: } sl@0: sl@0: ++pPde; sl@0: ++chunk; sl@0: } sl@0: MmuLock::Unlock(); sl@0: sl@0: return KErrNone; sl@0: } sl@0: sl@0: sl@0: void DLargeMapping::RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB) sl@0: { sl@0: TRACE(("DLargeMapping[0x%08x]::RemapPage(%08x, %d, %d, %d)", this, aPageArray, aIndex, aMapInstanceCount, aInvalidateTLB)); sl@0: sl@0: TInt chunkIndex = aIndex >> KPagesInPDEShift; sl@0: sl@0: MmuLock::Lock(); sl@0: DLargeMappedMemory* memory = (DLargeMappedMemory*)Memory(); // safe because we're called from code which has reference on tables, which has reference on memory sl@0: TPte* pt = memory->GetPageTable(PteType(), chunkIndex); sl@0: sl@0: // check the page is still mapped and mapping isn't being detached sl@0: // or hasn't been reused for another purpose... sl@0: if(!pt || BeingDetached() || aMapInstanceCount != MapInstanceCount()) sl@0: { sl@0: // can't map pages to this mapping any more so just exit. sl@0: TRACE((" page no longer mapped")); sl@0: MmuLock::Unlock(); sl@0: return; sl@0: } sl@0: sl@0: TPde* pPde = Mmu::PageDirectoryEntry(OsAsid(),Base() + (chunkIndex << KChunkShift)); sl@0: TPde currentPde = *pPde; sl@0: sl@0: if (!memory->IsChunkContiguous(chunkIndex) && Mmu::PdeMapsSection(currentPde)) sl@0: { sl@0: // break section mapping and replace with page table... sl@0: TRACE2((" breaking section mapping")); sl@0: TPde pde = Mmu::PageTablePhysAddr(pt)|iBlankPde; sl@0: TRACE2(("!PDE %x=%x (was %x)",pPde,pde,*pPde)); sl@0: // can't assert old value if the first page has been remapped sl@0: __NK_ASSERT_DEBUG((aIndex & (KPagesInPDE - 1)) == 0 || sl@0: *pPde == Mmu::PageToSectionEntry(pt[0],iBlankPde)); sl@0: *pPde = pde; sl@0: SinglePdeUpdated(pPde); sl@0: MmuLock::Unlock(); sl@0: #ifndef COARSE_GRAINED_TLB_MAINTENANCE sl@0: if (aInvalidateTLB) sl@0: { sl@0: // invalidate chunk... sl@0: TUint start = (chunkIndex << KPagesInPDEShift) - iStartIndex; sl@0: TLinAddr addr = LinAddrAndOsAsid() + (start << KPageShift); sl@0: TLinAddr endAddr = addr + KChunkSize; sl@0: do InvalidateTLBForPage(addr); sl@0: while((addr+=KPageSize)IsChunkContiguous(chunkIndex) && Mmu::PdeMapsPageTable(currentPde)) sl@0: { sl@0: // reform section mapping... sl@0: TRACE2((" reforming section mapping")); sl@0: __NK_ASSERT_ALWAYS(0); // todo: not yet implemented sl@0: } sl@0: else sl@0: { sl@0: // remap already handled by page table update in DPageTables... sl@0: MmuLock::Unlock(); sl@0: #ifndef COARSE_GRAINED_TLB_MAINTENANCE sl@0: if (aInvalidateTLB) sl@0: { sl@0: // invalidate page... sl@0: TUint start = aIndex - iStartIndex; sl@0: TLinAddr addr = LinAddrAndOsAsid() + (start << KPageShift); sl@0: InvalidateTLBForPage(addr); sl@0: } sl@0: #endif sl@0: } sl@0: sl@0: } sl@0: sl@0: sl@0: TInt DLargeMapping::PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount) sl@0: { sl@0: TRACE(("DLargeMapping[0x%08x]::PageIn(%d, %d, ?, %d)", this, aPages.Index(), aPages.Count(), aMapInstanceCount)); sl@0: #ifdef _DEBUG sl@0: // assert that we're not trying to page in any section mapped pages sl@0: TUint startIndex = aPages.Index(); sl@0: TUint endIndex = startIndex + aPages.Count(); sl@0: for (TUint index = startIndex ; index < endIndex ; index += KPagesInPDE) sl@0: { sl@0: TLinAddr addr = Base() + ((index - iStartIndex) << KPageShift); sl@0: TRACE2((" checking page %d at %08x", index, addr)); sl@0: TPde* pPde = Mmu::PageDirectoryEntry(OsAsid(),addr); sl@0: __NK_ASSERT_DEBUG(!Mmu::PdeMapsSection(*pPde)); sl@0: } sl@0: #endif sl@0: return DCoarseMapping::PageIn(aPages, aPinArgs, aMapInstanceCount); sl@0: } sl@0: sl@0: sl@0: TBool DLargeMapping::MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex) sl@0: { sl@0: // this shouldn't ever be called as it's only used by ram defrag sl@0: __NK_ASSERT_DEBUG(EFalse); sl@0: return EFalse; sl@0: } sl@0: sl@0: sl@0: TPte* DLargeMapping::FindPageTable(TLinAddr aLinAddr, TUint aMemoryIndex) sl@0: { sl@0: // this shouldn't ever be called as it's only used by ram defrag sl@0: __NK_ASSERT_DEBUG(EFalse); sl@0: return NULL; sl@0: }