Update contrib.
1 // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
16 #include "mlargemappings.h"
17 #include "cache_maintenance.inl"
25 DLargeMappedMemory::DLargeMappedMemory(DMemoryManager* aManager, TUint aSizeInPages, TMemoryAttributes aAttributes, TMemoryCreateFlags aCreateFlags)
26 : DCoarseMemory(aManager, aSizeInPages, aAttributes, aCreateFlags)
31 DLargeMappedMemory* DLargeMappedMemory::New(DMemoryManager* aManager, TUint aSizeInPages, TMemoryAttributes aAttributes, TMemoryCreateFlags aCreateFlags)
33 TRACE(("DLargeMappedMemory::New()"));
34 TUint chunkCount = (aSizeInPages + KPagesInPDE - 1) >> KPagesInPDEShift;
35 TUint wordCount = (chunkCount + 31) >> 5;
36 TUint size = sizeof(DLargeMappedMemory) + sizeof(TUint) * (wordCount - 1);
37 DLargeMappedMemory* self = (DLargeMappedMemory*)Kern::AllocZ(size);
40 new (self) DLargeMappedMemory(aManager, aSizeInPages, aAttributes, aCreateFlags);
41 if(self->Construct()!=KErrNone)
47 TRACE(("DLargeMappedMemory::New() returns 0x%08x", self));
52 DLargeMappedMemory::~DLargeMappedMemory()
54 TRACE2(("DLargeMappedMemory[0x%08x]::~DLargeMappedMemory()",this));
58 DMemoryMapping* DLargeMappedMemory::CreateMapping(TUint aIndex, TUint aCount)
60 TRACE(("DLargeMappedMemory[0x%08x]::CreateMapping()",this));
61 if (((aIndex|aCount)&(KChunkMask>>KPageShift))==0)
62 return new DLargeMapping();
64 return new DFineMapping();
68 TInt DLargeMappedMemory::ClaimInitialPages(TLinAddr aBase,
70 TMappingPermissions aPermissions,
72 TBool aAllowNonRamPages)
74 TRACE(("DLargeMappedMemory[0x%08x]::ClaimInitialPages(0x%08x,0x%08x,0x%08x,%d,%d)",
75 this,aBase,aSize,aPermissions,aAllowGaps,aAllowNonRamPages));
76 TInt r = DCoarseMemory::ClaimInitialPages(aBase,aSize,aPermissions,aAllowGaps,
81 // set initial contiguous state by checking which pages were section mapped by the bootstrap
83 TPde* pPde = Mmu::PageDirectoryEntry(KKernelOsAsid,aBase);
84 TUint endChunk = aSize >> KChunkShift;
85 for (TUint chunk = 0 ; chunk < endChunk ; ++chunk)
87 SetChunkContiguous(chunk, Mmu::PdeMapsSection(*pPde++));
88 TRACE((" chunk %d contiguous state is %d", chunk, IsChunkContiguous(chunk)));
96 TInt DLargeMappedMemory::MapPages(RPageArray::TIter aPages)
98 TRACE2(("DLargeMappedMemory[0x%08x]::MapPages(?) index=0x%x count=0x%x",this,aPages.Index(),aPages.Count()));
100 // for now: assert pages do not overlapped a contiguous area
101 // todo: update contiguous state, update page tables and call MapPages on large mappings
103 for (TUint index = aPages.Index() ; index < aPages.IndexEnd() ; index += KPagesInPDE)
106 __NK_ASSERT_DEBUG(!IsChunkContiguous(index >> KPagesInPDEShift));
111 // map pages in all page tables and fine mappings
112 return DCoarseMemory::MapPages(aPages);
116 void DLargeMappedMemory::RemapPage(TPhysAddr& aPageArray, TUint aIndex, TBool aInvalidateTLB)
118 TRACE2(("DLargeMappedMemory[0x%08x]::RemapPage() index=0x%x",this, aIndex));
120 // update contiguous state...
121 // todo: for now we will assume that remapping a page makes it non-contiguous
123 SetChunkContiguous(aIndex >> KPagesInPDEShift, EFalse);
126 // remap pages in all page tables and call RemapPage on large mappings...
131 DPageTables* tables = iPageTables[pteType];
136 tables->RemapPage(aPageArray, aIndex, aInvalidateTLB);
137 tables->iMappings.RemapPage(aPageArray, aIndex, aInvalidateTLB);
138 tables->AsyncClose();
142 while(++pteType<ENumPteTypes);
145 // remap page in all fine mappings...
146 DMemoryObject::RemapPage(aPageArray, aIndex, aInvalidateTLB);
150 void DLargeMappedMemory::UnmapPages(RPageArray::TIter aPages, TBool aDecommitting)
152 TRACE2(("DLargeMappedMemory[0x%08x]::UnmapPages(?,%d) index=0x%x count=0x%x",this,(bool)aDecommitting,aPages.Index(),aPages.Count()));
154 // for now: assert pages do not overlapped a contiguous area
155 // todo: update contiguous state, update page tables and call UnmapPages on large mappings
157 for (TUint index = aPages.Index() ; index < aPages.IndexEnd() ; index += KPagesInPDE)
160 __NK_ASSERT_DEBUG(!IsChunkContiguous(index >> KPagesInPDEShift));
165 // unmap pages in all page tables and fine mappings
166 DCoarseMemory::UnmapPages(aPages, aDecommitting);
170 void DLargeMappedMemory::RestrictPages(RPageArray::TIter aPages, TRestrictPagesType aRestriction)
172 TRACE2(("DLargeMappedMemory[0x%08x]::RestrictPages(?,%d) index=0x%x count=0x%x",this,aRestriction,aPages.Index(),aPages.Count()));
174 // assert pages do not overlapped a contiguous area...
176 for (TUint index = aPages.Index() ; index < aPages.IndexEnd() ; index += KPagesInPDE)
179 __NK_ASSERT_DEBUG(!IsChunkContiguous(index >> KPagesInPDEShift));
184 DCoarseMemory::RestrictPages(aPages, aRestriction);
188 TBool DLargeMappedMemory::IsChunkContiguous(TInt aChunkIndex)
190 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
191 TUint index = aChunkIndex >> 5;
192 TUint mask = 1 << (aChunkIndex & 31);
193 return (iContiguousState[index] & mask) != 0;
197 void DLargeMappedMemory::SetChunkContiguous(TInt aChunkIndex, TBool aIsContiguous)
199 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
200 TUint index = aChunkIndex >> 5;
201 TUint mask = 1 << (aChunkIndex & 31);
202 iContiguousState[index] = (iContiguousState[index] & ~mask) | (aIsContiguous ? mask : 0);
211 DLargeMapping::DLargeMapping() : DCoarseMapping(ELargeMapping)
216 TInt DLargeMapping::DoMap()
218 TRACE(("DLargeMapping[0x%08x]::DoMap()", this));
219 __NK_ASSERT_DEBUG(((iStartIndex|iSizeInPages)&(KChunkMask>>KPageShift))==0); // be extra paranoid about alignment
223 TPde* pPde = Mmu::PageDirectoryEntry(OsAsid(),Base());
224 DLargeMappedMemory* memory = (DLargeMappedMemory*)Memory(ETrue); // safe because we're called from code which has added mapping to memory
227 TUint chunk = iStartIndex >> KPagesInPDEShift;
228 TUint endChunk = (iStartIndex + iSizeInPages) >> KPagesInPDEShift;
230 while(chunk < endChunk)
232 MmuLock::Flash(flash,KMaxPdesInOneGo*2);
233 TPde pde = KPdeUnallocatedEntry;
234 TPte* pt = memory->GetPageTable(PteType(), chunk);
235 if (memory->IsChunkContiguous(chunk))
236 pde = Mmu::PageToSectionEntry(pt[0],iBlankPde); // todo: use get phys addr?
238 pde = Mmu::PageTablePhysAddr(pt)|iBlankPde;
240 if (pde == KPdeUnallocatedEntry)
242 TRACE2(("!PDE %x=%x (was %x)",pPde,KPdeUnallocatedEntry,*pPde));
243 __NK_ASSERT_DEBUG(*pPde==KPdeUnallocatedEntry);
247 TRACE2(("!PDE %x=%x (was %x)",pPde,pde,*pPde));
248 __NK_ASSERT_DEBUG(*pPde==KPdeUnallocatedEntry || ((*pPde^pde)&~KPdeMatchMask)==0);
250 SinglePdeUpdated(pPde);
251 flash += 3; // increase flash rate because we've done quite a bit more work
263 void DLargeMapping::RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB)
265 TRACE(("DLargeMapping[0x%08x]::RemapPage(%08x, %d, %d, %d)", this, aPageArray, aIndex, aMapInstanceCount, aInvalidateTLB));
267 TInt chunkIndex = aIndex >> KPagesInPDEShift;
270 DLargeMappedMemory* memory = (DLargeMappedMemory*)Memory(); // safe because we're called from code which has reference on tables, which has reference on memory
271 TPte* pt = memory->GetPageTable(PteType(), chunkIndex);
273 // check the page is still mapped and mapping isn't being detached
274 // or hasn't been reused for another purpose...
275 if(!pt || BeingDetached() || aMapInstanceCount != MapInstanceCount())
277 // can't map pages to this mapping any more so just exit.
278 TRACE((" page no longer mapped"));
283 TPde* pPde = Mmu::PageDirectoryEntry(OsAsid(),Base() + (chunkIndex << KChunkShift));
284 TPde currentPde = *pPde;
286 if (!memory->IsChunkContiguous(chunkIndex) && Mmu::PdeMapsSection(currentPde))
288 // break section mapping and replace with page table...
289 TRACE2((" breaking section mapping"));
290 TPde pde = Mmu::PageTablePhysAddr(pt)|iBlankPde;
291 TRACE2(("!PDE %x=%x (was %x)",pPde,pde,*pPde));
292 // can't assert old value if the first page has been remapped
293 __NK_ASSERT_DEBUG((aIndex & (KPagesInPDE - 1)) == 0 ||
294 *pPde == Mmu::PageToSectionEntry(pt[0],iBlankPde));
296 SinglePdeUpdated(pPde);
298 #ifndef COARSE_GRAINED_TLB_MAINTENANCE
301 // invalidate chunk...
302 TUint start = (chunkIndex << KPagesInPDEShift) - iStartIndex;
303 TLinAddr addr = LinAddrAndOsAsid() + (start << KPageShift);
304 TLinAddr endAddr = addr + KChunkSize;
305 do InvalidateTLBForPage(addr);
306 while((addr+=KPageSize)<endAddr);
307 InvalidateTLBForPage(addr);
311 else if (memory->IsChunkContiguous(chunkIndex) && Mmu::PdeMapsPageTable(currentPde))
313 // reform section mapping...
314 TRACE2((" reforming section mapping"));
315 __NK_ASSERT_ALWAYS(0); // todo: not yet implemented
319 // remap already handled by page table update in DPageTables...
321 #ifndef COARSE_GRAINED_TLB_MAINTENANCE
324 // invalidate page...
325 TUint start = aIndex - iStartIndex;
326 TLinAddr addr = LinAddrAndOsAsid() + (start << KPageShift);
327 InvalidateTLBForPage(addr);
335 TInt DLargeMapping::PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount)
337 TRACE(("DLargeMapping[0x%08x]::PageIn(%d, %d, ?, %d)", this, aPages.Index(), aPages.Count(), aMapInstanceCount));
339 // assert that we're not trying to page in any section mapped pages
340 TUint startIndex = aPages.Index();
341 TUint endIndex = startIndex + aPages.Count();
342 for (TUint index = startIndex ; index < endIndex ; index += KPagesInPDE)
344 TLinAddr addr = Base() + ((index - iStartIndex) << KPageShift);
345 TRACE2((" checking page %d at %08x", index, addr));
346 TPde* pPde = Mmu::PageDirectoryEntry(OsAsid(),addr);
347 __NK_ASSERT_DEBUG(!Mmu::PdeMapsSection(*pPde));
350 return DCoarseMapping::PageIn(aPages, aPinArgs, aMapInstanceCount);
354 TBool DLargeMapping::MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex)
356 // this shouldn't ever be called as it's only used by ram defrag
357 __NK_ASSERT_DEBUG(EFalse);
362 TPte* DLargeMapping::FindPageTable(TLinAddr aLinAddr, TUint aMemoryIndex)
364 // this shouldn't ever be called as it's only used by ram defrag
365 __NK_ASSERT_DEBUG(EFalse);