First public contribution.
1 // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
16 #include <plat_priv.h>
24 #include "cache_maintenance.inl"
26 const TUint KMaxMappingsInOneGo = KMaxPageInfoUpdatesInOneGo; // must be power-of-2
35 The mutex pool used to assign locks to memory objects.
36 @see #MemoryObjectLock.
38 DMutexPool MemoryObjectMutexPool;
40 void MemoryObjectLock::Lock(DMemoryObject* aMemory)
42 TRACE2(("MemoryObjectLock::Lock(0x%08x) try",aMemory));
43 MemoryObjectMutexPool.Wait(aMemory->iLock);
44 TRACE2(("MemoryObjectLock::Lock(0x%08x) acquired",aMemory));
47 void MemoryObjectLock::Unlock(DMemoryObject* aMemory)
49 TRACE2(("MemoryObjectLock::Unlock(0x%08x)",aMemory));
50 MemoryObjectMutexPool.Signal(aMemory->iLock);
53 TBool MemoryObjectLock::IsHeld(DMemoryObject* aMemory)
55 return MemoryObjectMutexPool.IsHeld(aMemory->iLock);
64 DMemoryObject::DMemoryObject(DMemoryManager* aManager, TUint aFlags, TUint aSizeInPages, TMemoryAttributes aAttributes, TMemoryCreateFlags aCreateFlags)
65 : iManager(aManager), iFlags(aFlags), iAttributes(Mmu::CanonicalMemoryAttributes(aAttributes)),
66 iSizeInPages(aSizeInPages)
68 __ASSERT_COMPILE(EMemoryAttributeMask<0x100); // make sure aAttributes fits into a TUint8
70 TMemoryType type = (TMemoryType)(aAttributes&EMemoryAttributeTypeMask);
71 iRamAllocFlags = type;
72 if(aCreateFlags&EMemoryCreateNoWipe)
73 iRamAllocFlags |= Mmu::EAllocNoWipe;
74 else if(aCreateFlags&EMemoryCreateUseCustomWipeByte)
76 TUint8 wipeByte = (aCreateFlags>>EMemoryCreateWipeByteShift)&0xff;
77 iRamAllocFlags |= wipeByte<<Mmu::EAllocWipeByteShift;
78 iRamAllocFlags |= Mmu::EAllocUseCustomWipeByte;
81 if(aCreateFlags&EMemoryCreateDemandPaged)
82 iFlags |= EDemandPaged;
83 if(aCreateFlags&EMemoryCreateReserveAllResources)
84 iFlags |= EReserveResources;
85 if(aCreateFlags&EMemoryCreateDisallowPinning)
86 iFlags |= EDenyPinning;
87 if(aCreateFlags&EMemoryCreateReadOnly)
88 iFlags |= EDenyWriteMappings;
89 if(!(aCreateFlags&EMemoryCreateAllowExecution))
90 iFlags |= EDenyExecuteMappings;
94 TInt DMemoryObject::Construct()
96 TBool preAllocateMemory = iFlags&(EReserveResources|EDemandPaged);
97 TInt r = iPages.Construct(iSizeInPages,preAllocateMemory);
102 DMemoryObject::~DMemoryObject()
104 TRACE(("DMemoryObject[0x%08x]::~DMemoryObject()",this));
105 __NK_ASSERT_DEBUG(iMappings.IsEmpty());
109 TBool DMemoryObject::CheckRegion(TUint aIndex, TUint aCount)
111 TUint end = aIndex+aCount;
112 return end>=aIndex && end<=iSizeInPages;
116 void DMemoryObject::ClipRegion(TUint& aIndex, TUint& aCount)
118 TUint end = aIndex+aCount;
119 if(end<aIndex) // overflow?
129 void DMemoryObject::SetLock(DMutex* aLock)
131 __NK_ASSERT_DEBUG(!iLock);
133 TRACE(("MemoryObject[0x%08x]::SetLock(0x%08x) \"%O\"",this,aLock,aLock));
137 DMemoryMapping* DMemoryObject::CreateMapping(TUint, TUint)
139 return new DFineMapping();
143 TInt DMemoryObject::MapPages(RPageArray::TIter aPages)
145 TRACE2(("DMemoryObject[0x%08x]::MapPages(?) index=0x%x count=0x%x",this,aPages.Index(),aPages.Count()));
147 TUint offset = aPages.Index();
148 TUint offsetEnd = aPages.IndexEnd();
152 TMappingListIter iter;
153 DMemoryMappingBase* mapping = iter.Start(iMappings);
156 if(mapping->IsPinned())
158 // pinned mappings don't change, so nothing to do...
163 // get region where pages overlap the mapping...
164 TUint start = mapping->iStartIndex;
165 TUint end = start+mapping->iSizeInPages;
172 // the mapping doesn't contain the pages...
177 // map pages in the mapping...
179 TUint mapInstanceCount = mapping->MapInstanceCount();
181 r = mapping->MapPages(aPages.Slice(start,end),mapInstanceCount);
182 mapping->AsyncClose();
191 mapping = iter.Next();
200 void DMemoryObject::RemapPage(TPhysAddr& aPageArray, TUint aIndex, TBool aInvalidateTLB)
202 TRACE2(("DMemoryObject[0x%08x]::RemapPage(0x%x,%d,%d)",this,aPageArray,aIndex,aInvalidateTLB));
204 iMappings.RemapPage(aPageArray, aIndex, aInvalidateTLB);
206 #ifdef COARSE_GRAINED_TLB_MAINTENANCE
213 void DMemoryObject::UnmapPages(RPageArray::TIter aPages, TBool aDecommitting)
215 TRACE2(("DMemoryObject[0x%08x]::UnmapPages(?,%d) index=0x%x count=0x%x",this,(bool)aDecommitting,aPages.Index(),aPages.Count()));
217 TUint offset = aPages.Index();
218 TUint offsetEnd = aPages.IndexEnd();
219 if(offset==offsetEnd)
223 TMappingListIter iter;
224 DMemoryMappingBase* mapping = iter.Start(iMappings);
227 // get region where pages overlap the mapping...
228 TUint start = mapping->iStartIndex;
229 TUint end = start+mapping->iSizeInPages;
236 // the mapping doesn't contain the pages...
241 RPageArray::TIter pages = aPages.Slice(start,end);
242 if(mapping->IsPinned())
244 // pinned mappings veto page unmapping...
246 __e32_atomic_ior_ord8(&mapping->Flags(), (TUint8)DMemoryMapping::EPageUnmapVetoed);
248 TRACE2(("DFineMemoryMapping[0x%08x] veto UnmapPages, index=0x%x count=0x%x",mapping,pages.Index(),pages.Count()));
253 // unmap pages in the mapping...
255 TUint mapInstanceCount = mapping->MapInstanceCount();
257 mapping->UnmapPages(pages,mapInstanceCount);
258 mapping->AsyncClose();
262 mapping = iter.Next();
267 #ifdef COARSE_GRAINED_TLB_MAINTENANCE
273 void DMemoryObject::RestrictPages(RPageArray::TIter aPages, TRestrictPagesType aRestriction)
275 TRACE2(("DMemoryObject[0x%08x]::RestrictPages(?,%d) index=0x%x count=0x%x",this,aRestriction,aPages.Index(),aPages.Count()));
277 TUint offset = aPages.Index();
278 TUint offsetEnd = aPages.IndexEnd();
279 if(offset==offsetEnd)
283 TMappingListIter iter;
284 DMemoryMappingBase* mapping = iter.Start(iMappings);
287 // get region where pages overlap the mapping...
288 TUint start = mapping->iStartIndex;
289 TUint end = start+mapping->iSizeInPages;
296 // the mapping doesn't contain the pages...
301 RPageArray::TIter pages = aPages.Slice(start,end);
302 if(mapping->IsPhysicalPinning() ||
303 (!(aRestriction & ERestrictPagesForMovingFlag) && mapping->IsPinned()))
305 // Pinned mappings veto page restrictions except for page moving
306 // where only physically pinned mappings block page moving.
308 TRACE2(("DFineMemoryMapping[0x%08x] veto RestrictPages, index=0x%x count=0x%x",mapping,pages.Index(),pages.Count()));
309 pages.VetoRestrict(aRestriction & ERestrictPagesForMovingFlag);
310 // Mappings lock required for iter.Finish() as iter will be removed from the mappings list.
316 // pages not pinned so do they need restricting...
317 if(aRestriction == ERestrictPagesForMovingFlag)
319 // nothing to do when just checking for pinned mappings for
320 // page moving purposes and not restricting to NA.
325 // restrict pages in the mapping...
327 TUint mapInstanceCount = mapping->MapInstanceCount();
329 mapping->RestrictPagesNA(pages, mapInstanceCount);
330 mapping->AsyncClose();
335 mapping = iter.Next();
338 if(aRestriction & ERestrictPagesForMovingFlag)
339 {// Clear the mappings addded flag so page moving can detect whether any
340 // new mappings have been added
341 ClearMappingAddedFlag();
347 #ifdef COARSE_GRAINED_TLB_MAINTENANCE
348 // Writable memory objects will have been restricted no access so invalidate TLB.
349 if (aRestriction != ERestrictPagesForMovingFlag)
355 TInt DMemoryObject::CheckNewMapping(DMemoryMappingBase* aMapping)
357 if(iFlags&EDenyPinning && aMapping->IsPinned())
358 return KErrAccessDenied;
359 if(iFlags&EDenyMappings)
360 return KErrAccessDenied;
361 if(iFlags&EDenyWriteMappings && !aMapping->IsReadOnly())
362 return KErrAccessDenied;
363 #ifdef MMU_SUPPORTS_EXECUTE_NEVER
364 if((iFlags&EDenyExecuteMappings) && aMapping->IsExecutable())
365 return KErrAccessDenied;
371 TInt DMemoryObject::AddMapping(DMemoryMappingBase* aMapping)
373 __NK_ASSERT_DEBUG(!aMapping->IsCoarse());
375 // check mapping allowed...
379 TInt r = CheckNewMapping(aMapping);
383 aMapping->LinkToMemory(this, iMappings);
389 TRACE(("DMemoryObject[0x%08x]::AddMapping(0x%08x) returns %d", this, aMapping, r));
395 void DMemoryObject::RemoveMapping(DMemoryMappingBase* aMapping)
397 aMapping->UnlinkFromMemory(iMappings);
402 TInt DMemoryObject::SetReadOnly()
404 TRACE(("DMemoryObject[0x%08x]::SetReadOnly()",this));
405 __NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(this));
409 if (iFlags & EDenyWriteMappings)
410 {// The object is already read only.
415 TMappingListIter iter;
416 DMemoryMappingBase* mapping = iter.Start(iMappings);
419 if (!mapping->IsReadOnly())
424 // This will flash iMappings.Lock to stop it being held too long.
425 // This is safe as new mappings will be added to the end of the list so we
427 mapping = iter.Next();
429 // Block any writable mapping from being added to this memory object.
430 // Use atomic operation as iMappings.Lock protects EDenyWriteMappings
431 // but not the whole word.
432 __e32_atomic_ior_ord8(&iFlags, (TUint8)EDenyWriteMappings);
441 void DMemoryObject::DenyMappings()
443 TRACE(("DMemoryObject[0x%08x]::LockMappings()",this));
445 // Use atomic operation as MmuLock protects EDenyMappings
446 // but not the whole word.
447 __e32_atomic_ior_ord8(&iFlags, (TUint8)EDenyMappings);
452 TInt DMemoryObject::PhysAddr(TUint aIndex, TUint aCount, TPhysAddr& aPhysicalAddress, TPhysAddr* aPhysicalPageList)
454 TRACE2(("DMemoryObject[0x%08x]::PhysAddr(0x%x,0x%x,?,?)",this,aIndex,aCount));
455 TInt r = iPages.PhysAddr(aIndex,aCount,aPhysicalAddress,aPhysicalPageList);
456 TRACE2(("DMemoryObject[0x%08x]::PhysAddr(0x%x,0x%x,?,?) returns %d aPhysicalAddress=0x%08x",this,aIndex,aCount,r,aPhysicalAddress));
461 void DMemoryObject::BTraceCreate()
463 BTraceContext8(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectCreate,this,iSizeInPages);
467 TUint DMemoryObject::PagingManagerData(TUint aIndex)
469 TRACE2(("DMemoryObject[0x%08x]::PagingManagerData(0x%x)",this,aIndex));
470 __NK_ASSERT_DEBUG(IsDemandPaged());
471 TUint value = iPages.PagingManagerData(aIndex);
472 TRACE2(("DMemoryObject[0x%08x]::PagingManagerData(0x%x) returns 0x%x",this,aIndex,value));
477 void DMemoryObject::SetPagingManagerData(TUint aIndex, TUint aValue)
479 TRACE(("DMemoryObject[0x%08x]::SetPagingManagerData(0x%x,0x%08x)",this,aIndex,aValue));
480 __NK_ASSERT_DEBUG(IsDemandPaged());
481 iPages.SetPagingManagerData(aIndex, aValue);
482 __NK_ASSERT_DEBUG(iPages.PagingManagerData(aIndex)==aValue);
488 // DCoarseMemory::DPageTables
491 DCoarseMemory::DPageTables::DPageTables(DCoarseMemory* aMemory, TInt aNumPts, TUint aPteType)
492 : iMemory(aMemory), iPteType(aPteType), iPermanenceCount(0), iNumPageTables(aNumPts)
495 iBlankPte = Mmu::BlankPte(aMemory->Attributes(),aPteType);
499 DCoarseMemory::DPageTables* DCoarseMemory::DPageTables::New(DCoarseMemory* aMemory, TUint aNumPages, TUint aPteType)
501 TRACE2(("DCoarseMemory::DPageTables::New(0x%08x,0x%x,0x%08x)",aMemory, aNumPages, aPteType));
502 __NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
503 __NK_ASSERT_DEBUG((aNumPages&(KChunkMask>>KPageShift))==0);
504 TUint numPts = aNumPages>>(KChunkShift-KPageShift);
505 DPageTables* self = (DPageTables*)Kern::AllocZ(sizeof(DPageTables)+(numPts-1)*sizeof(TPte*));
508 new (self) DPageTables(aMemory,numPts,aPteType);
509 TInt r = self->Construct();
516 TRACE2(("DCoarseMemory::DPageTables::New(0x%08x,0x%x,0x%08x) returns 0x%08x",aMemory, aNumPages, aPteType, self));
521 TInt DCoarseMemory::DPageTables::Construct()
523 if(iMemory->IsDemandPaged())
525 // do nothing, allow pages to be mapped on demand...
529 RPageArray::TIter pageIter;
530 iMemory->iPages.FindStart(0,iMemory->iSizeInPages,pageIter);
536 // find some pages...
537 RPageArray::TIter pageList;
538 TUint n = pageIter.Find(pageList);
543 r = MapPages(pageList);
545 // done with pages...
546 pageIter.FindRelease(n);
552 iMemory->iPages.FindEnd(0,iMemory->iSizeInPages);
558 void DCoarseMemory::DPageTables::Close()
560 __NK_ASSERT_DEBUG(CheckCloseIsSafe());
562 if (__e32_atomic_tas_ord32(&iReferenceCount, 1, -1, 0) != 1)
567 DCoarseMemory* memory = iMemory;
570 iMemory->iPageTables[iPteType] = 0;
580 void DCoarseMemory::DPageTables::AsyncClose()
582 __NK_ASSERT_DEBUG(CheckAsyncCloseIsSafe());
584 if (__e32_atomic_tas_ord32(&iReferenceCount, 1, -1, 0) != 1)
589 DCoarseMemory* memory = iMemory;
592 iMemory->iPageTables[iPteType] = 0;
597 memory->AsyncClose();
602 DCoarseMemory::DPageTables::~DPageTables()
604 TRACE2(("DCoarseMemory::DPageTables[0x%08x]::~DPageTables()",this));
605 __NK_ASSERT_DEBUG(!iMemory);
606 __NK_ASSERT_DEBUG(iMappings.IsEmpty());
608 while(i<iNumPageTables)
610 TPte* pt = iTables[i];
615 ::PageTables.Free(pt);
616 ::PageTables.Unlock();
623 TPte* DCoarseMemory::DPageTables::GetOrAllocatePageTable(TUint aChunkIndex)
625 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
628 TPte* pt = GetPageTable(aChunkIndex);
630 pt = AllocatePageTable(aChunkIndex, iMemory->IsDemandPaged());
636 TPte* DCoarseMemory::DPageTables::GetOrAllocatePageTable(TUint aChunkIndex, TPinArgs& aPinArgs)
638 __NK_ASSERT_DEBUG(aPinArgs.iPinnedPageTables);
640 if(!aPinArgs.HaveSufficientPages(KNumPagesToPinOnePageTable))
646 TPte* pt = GetOrAllocatePageTable(aChunkIndex);
648 if(pinnedPt && pinnedPt!=pt)
650 // previously pinned page table not needed...
651 ::PageTables.UnpinPageTable(pinnedPt,aPinArgs);
653 // make sure we have memory for next pin attempt...
655 aPinArgs.AllocReplacementPages(KNumPagesToPinOnePageTable);
656 if(!aPinArgs.HaveSufficientPages(KNumPagesToPinOnePageTable)) // if out of memory...
658 // make sure we free any unneeded page table we allocated...
660 FreePageTable(aChunkIndex);
668 return 0; // out of memory
672 // we got a page table and it was pinned...
673 *aPinArgs.iPinnedPageTables++ = pt;
674 ++aPinArgs.iNumPinnedPageTables;
678 // don't pin page table if it's not paged (e.g. unpaged part of ROM)...
679 SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt);
680 if(!pti->IsDemandPaged())
683 // pin the page table...
684 if (::PageTables.PinPageTable(pt,aPinArgs) != KErrNone)
686 // Couldn't pin the page table...
688 // make sure we free any unneeded page table we allocated...
689 FreePageTable(aChunkIndex);
698 TPte* DCoarseMemory::DPageTables::AllocatePageTable(TUint aChunkIndex, TBool aDemandPaged, TBool aPermanent)
700 TRACE2(("DCoarseMemory::DPageTables[0x%08x]::AllocatePageTable(0x%08x,%d,%d)",this,aChunkIndex,aDemandPaged,aPermanent));
705 // acquire page table lock...
709 // see if we still need to allocate a page table...
710 pt = iTables[aChunkIndex];
713 // allocate page table...
714 pt = ::PageTables.Alloc(aDemandPaged);
718 ::PageTables.Unlock();
722 AssignPageTable(aChunkIndex,pt);
725 // release page table lock...
726 ::PageTables.Unlock();
730 pt = iTables[aChunkIndex];
734 // we have a page table...
737 __NK_ASSERT_ALWAYS(!aDemandPaged);
738 SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt);
739 pti->IncPermanenceCount();
745 void DCoarseMemory::DPageTables::AssignPageTable(TUint aChunkIndex, TPte* aPageTable)
747 __NK_ASSERT_DEBUG(PageTablesLockIsHeld());
751 // get physical address of page table now, this can't change whilst we have the page table allocator mutex...
752 TPhysAddr ptPhys = Mmu::PageTablePhysAddr(aPageTable);
754 // update mappings with new page table...
755 TUint offset = aChunkIndex<<(KChunkShift-KPageShift);
757 TMappingListIter iter;
758 DMemoryMapping* mapping = (DMemoryMapping*)iter.Start(iMappings);
762 TUint size = mapping->iSizeInPages;
763 TUint start = offset-mapping->iStartIndex;
764 if(start<size && !mapping->BeingDetached())
766 // page table is used by this mapping, so set PDE...
767 TLinAddr linAddrAndOsAsid = mapping->LinAddrAndOsAsid()+start*KPageSize;
768 TPde* pPde = Mmu::PageDirectoryEntry(linAddrAndOsAsid&KPageMask,linAddrAndOsAsid);
769 TPde pde = ptPhys|mapping->BlankPde();
770 #ifdef __USER_MEMORY_GUARDS_ENABLED__
771 if (mapping->IsUserMapping())
772 pde = PDE_IN_DOMAIN(pde, USER_MEMORY_DOMAIN);
774 TRACE2(("!PDE %x=%x",pPde,pde));
775 __NK_ASSERT_DEBUG(((*pPde^pde)&~KPdeMatchMask)==0 || *pPde==KPdeUnallocatedEntry);
777 SinglePdeUpdated(pPde);
779 ++flash; // increase flash rate because we've done quite a bit more work
782 MmuLock::Flash(flash,KMaxMappingsInOneGo);
784 mapping = (DMemoryMapping*)iter.Next();
789 // next, assign page table to us...
790 // NOTE: Must happen before MmuLock is released after reaching the end of the mapping list
791 // otherwise it would be possible for a new mapping to be added and mapped before we manage
792 // to update iTables with the page table it should use.
793 SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPageTable);
794 pti->SetCoarse(iMemory,aChunkIndex,iPteType);
795 __NK_ASSERT_DEBUG(!iTables[aChunkIndex]);
796 iTables[aChunkIndex] = aPageTable; // new mappings can now see the page table
802 void DCoarseMemory::DPageTables::FreePageTable(TUint aChunkIndex)
804 TRACE2(("DCoarseMemory::DPageTables[0x%08x]::FreePageTable(0x%08x)",this,aChunkIndex));
810 // test if page table still needs freeing...
811 TPte* pt = iTables[aChunkIndex];
814 SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt);
815 if(pti->PageCount()==0 && pti->PermanenceCount()==0)
817 // page table needs freeing...
818 UnassignPageTable(aChunkIndex);
820 ::PageTables.Free(pt);
821 ::PageTables.Unlock();
826 // page table doesn't need freeing...
828 ::PageTables.Unlock();
833 void DCoarseMemory::StealPageTable(TUint aChunkIndex, TUint aPteType)
835 __NK_ASSERT_DEBUG(PageTablesLockIsHeld());
836 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
837 __NK_ASSERT_DEBUG(iPageTables[aPteType]);
838 iPageTables[aPteType]->StealPageTable(aChunkIndex);
842 void DCoarseMemory::DPageTables::StealPageTable(TUint aChunkIndex)
844 __NK_ASSERT_DEBUG(PageTablesLockIsHeld());
845 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
847 TPte* pt = iTables[aChunkIndex];
848 __NK_ASSERT_DEBUG(pt);
849 SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt);
850 __NK_ASSERT_DEBUG(pti->PageCount()==0);
851 __NK_ASSERT_DEBUG(pti->PermanenceCount()==0);
853 UnassignPageTable(aChunkIndex);
857 void DCoarseMemory::DPageTables::UnassignPageTable(TUint aChunkIndex)
859 __NK_ASSERT_DEBUG(PageTablesLockIsHeld());
860 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
863 TPhysAddr ptPhys = Mmu::PageTablePhysAddr(iTables[aChunkIndex]);
866 // zero page table pointer immediately so new mappings or memory commits will be force to
867 // create a new one (which will block until we've finished here because it also needs the
869 iTables[aChunkIndex] = 0;
871 // remove page table from mappings...
872 TUint offset = aChunkIndex<<(KChunkShift-KPageShift);
874 TMappingListIter iter;
875 DMemoryMapping* mapping = (DMemoryMapping*)iter.Start(iMappings);
879 __NK_ASSERT_DEBUG(iTables[aChunkIndex]==0); // can't have been recreated because we hold PageTablesLock
880 TUint size = mapping->iSizeInPages;
881 TUint start = offset-mapping->iStartIndex;
884 // page table is used by this mapping, so clear PDE...
885 TLinAddr linAddrAndOsAsid = mapping->LinAddrAndOsAsid()+start*KPageSize;
886 TPde* pPde = Mmu::PageDirectoryEntry(linAddrAndOsAsid&KPageMask,linAddrAndOsAsid);
887 TPde pde = KPdeUnallocatedEntry;
888 TRACE2(("!PDE %x=%x",pPde,pde));
889 __NK_ASSERT_DEBUG(*pPde==pde || (*pPde&~KPageTableMask)==ptPhys);
891 SinglePdeUpdated(pPde);
893 ++flash; // increase flash rate because we've done quite a bit more work
896 MmuLock::Flash(flash,KMaxMappingsInOneGo);
898 mapping = (DMemoryMapping*)iter.Next();
906 TInt DCoarseMemory::DPageTables::AllocatePermanentPageTables()
908 __NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(iMemory));
909 __NK_ASSERT_ALWAYS(!iMemory->IsDemandPaged());
911 if(iPermanenceCount++)
913 // page tables already marked permanent, so end...
917 // allocate all page tables...
921 for(i=0; i<iNumPageTables; ++i)
923 TPte* pt = iTables[i];
926 // already have page table...
927 SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt);
928 pti->IncPermanenceCount();
929 MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo);
933 // allocate new page table...
934 pt = AllocatePageTable(i,EFalse,ETrue);
939 FreePermanentPageTables(0,i);
950 void DCoarseMemory::DPageTables::FreePermanentPageTables(TUint aChunkIndex, TUint aChunkCount)
956 for(i=aChunkIndex; i<aChunkIndex+aChunkCount; ++i)
958 TPte* pt = iTables[i];
959 SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt);
960 if(pti->DecPermanenceCount() || pti->PageCount())
963 MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo);
967 // page table no longer used for anything...
978 void DCoarseMemory::DPageTables::FreePermanentPageTables()
980 __NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(iMemory));
982 if(--iPermanenceCount)
984 // page tables still permanent, so end...
988 FreePermanentPageTables(0,iNumPageTables);
992 TInt DCoarseMemory::DPageTables::AddMapping(DCoarseMapping* aMapping)
994 TRACE(("DCoarseMemory::DPageTables[0x%08x]::AddMapping(0x%08x)",this,aMapping));
995 __NK_ASSERT_DEBUG(aMapping->IsCoarse());
999 aMapping->LinkToMemory(iMemory,iMappings);
1006 void DCoarseMemory::DPageTables::RemoveMapping(DCoarseMapping* aMapping)
1008 aMapping->UnlinkFromMemory(iMappings);
1013 void DCoarseMemory::DPageTables::RemapPage(TPhysAddr& aPageArray, TUint aIndex, TBool aInvalidateTLB)
1015 TUint pteIndex = aIndex & (KChunkMask>>KPageShift);
1017 // get address of page table...
1019 TUint i = aIndex>>(KChunkShift-KPageShift);
1020 TPte* pPte = GetPageTable(i);
1023 {// This page has been unmapped so just return.
1028 // remap the page...
1030 Mmu::RemapPage(pPte, aPageArray, iBlankPte);
1035 FlushTLB(aIndex, aIndex + 1);
1039 TInt DCoarseMemory::DPageTables::MapPages(RPageArray::TIter aPages)
1041 __NK_ASSERT_DEBUG(aPages.Count());
1045 TUint pteIndex = aPages.Index()&(KChunkMask>>KPageShift);
1047 // calculate max number of pages to do...
1048 TUint n = (KChunkSize>>KPageShift)-pteIndex; // pages left in page table
1049 if(n>KMaxPagesInOneGo)
1050 n = KMaxPagesInOneGo;
1052 // get some pages...
1054 n = aPages.Pages(pages,n);
1058 // get address of page table...
1060 TUint i = aPages.Index()>>(KChunkShift-KPageShift);
1061 TPte* pPte = GetOrAllocatePageTable(i);
1067 return KErrNoMemory;
1070 // map some pages...
1072 TBool keepPt = Mmu::MapPages(pPte, n, pages, iBlankPte);
1075 // free page table if no longer needed...
1087 void DCoarseMemory::DPageTables::UnmapPages(RPageArray::TIter aPages, TBool aDecommitting)
1089 __NK_ASSERT_DEBUG(aPages.Count());
1091 TUint startIndex = aPages.Index();
1095 TUint pteIndex = aPages.Index()&(KChunkMask>>KPageShift);
1097 // calculate max number of pages to do...
1098 TUint n = (KChunkSize>>KPageShift)-pteIndex; // pages left in page table
1099 if(n>KMaxPagesInOneGo)
1100 n = KMaxPagesInOneGo;
1102 // get some pages...
1104 n = aPages.Pages(pages,n);
1108 // get address of PTE for pages...
1110 TUint i = aPages.Index()>>(KChunkShift-KPageShift);
1111 TPte* pPte = iTables[i];
1114 // unmap some pages...
1116 TBool keepPt = Mmu::UnmapPages(pPte,n,pages);
1119 // free page table if no longer needed...
1125 // no page table found...
1133 FlushTLB(startIndex,aPages.IndexEnd());
1137 void DCoarseMemory::DPageTables::RestrictPagesNA(RPageArray::TIter aPages)
1139 __NK_ASSERT_DEBUG(aPages.Count());
1141 TUint startIndex = aPages.Index();
1145 TUint pteIndex = aPages.Index()&(KChunkMask>>KPageShift);
1147 // calculate max number of pages to do...
1148 TUint n = (KChunkSize>>KPageShift)-pteIndex; // pages left in page table
1149 if(n>KMaxPagesInOneGo)
1150 n = KMaxPagesInOneGo;
1152 // get some pages...
1154 n = aPages.Pages(pages,n);
1158 // get address of PTE for pages...
1160 TUint i = aPages.Index()>>(KChunkShift-KPageShift);
1161 TPte* pPte = iTables[i];
1164 // restrict some pages...
1166 Mmu::RestrictPagesNA(pPte,n,pages);
1174 FlushTLB(startIndex,aPages.IndexEnd());
1178 TInt DCoarseMemory::DPageTables::PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs,
1179 DMemoryMappingBase* aMapping, TUint aMapInstanceCount)
1181 __NK_ASSERT_DEBUG(aPages.Count());
1183 TBool pinPageTable = aPinArgs.iPinnedPageTables!=0; // check if we need to pin the first page table
1186 TUint pteIndex = aPages.Index()&(KChunkMask>>KPageShift);
1188 pinPageTable = aPinArgs.iPinnedPageTables!=0; // started a new page table, check if we need to pin it
1190 // calculate max number of pages to do...
1191 TUint n = (KChunkSize>>KPageShift)-pteIndex; // pages left in page table
1192 if(n>KMaxPagesInOneGo)
1193 n = KMaxPagesInOneGo;
1195 // get some pages...
1197 n = aPages.Pages(pages,n);
1201 // make sure we have memory to pin the page table if required...
1203 aPinArgs.AllocReplacementPages(KNumPagesToPinOnePageTable);
1205 // get address of page table...
1207 TUint i = aPages.Index()>>(KChunkShift-KPageShift);
1210 pPte = GetOrAllocatePageTable(i,aPinArgs);
1212 pPte = GetOrAllocatePageTable(i);
1218 return KErrNoMemory;
1221 if (aMapInstanceCount != aMapping->MapInstanceCount())
1222 {// The mapping that took the page fault has been reused.
1224 FreePageTable(i); // This will only free if this is the only pt referencer.
1225 return KErrNotFound;
1228 // map some pages...
1230 TPte blankPte = iBlankPte;
1231 if(aPinArgs.iReadOnly)
1232 blankPte = Mmu::MakePteInaccessible(blankPte,true);
1233 TBool keepPt = Mmu::PageInPages(pPte, n, pages, blankPte);
1236 // free page table if no longer needed...
1242 pinPageTable = false;
1249 TBool DCoarseMemory::DPageTables::MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex)
1251 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1253 TUint pteIndex = aIndex & (KChunkMask >> KPageShift);
1255 // get address of page table...
1256 TUint i = aIndex >> (KChunkShift - KPageShift);
1257 TPte* pPte = GetPageTable(i);
1259 // Check the page is still mapped..
1265 Mmu::RemapPage(pPte, aPageArrayPtr, iBlankPte);
1270 void DCoarseMemory::DPageTables::FlushTLB(TUint aStartIndex, TUint aEndIndex)
1272 #ifndef COARSE_GRAINED_TLB_MAINTENANCE
1274 TMappingListIter iter;
1275 DMemoryMapping* mapping = (DMemoryMapping*)iter.Start(iMappings);
1278 // get region which overlaps the mapping...
1279 TUint start = mapping->iStartIndex;
1280 TUint end = start+mapping->iSizeInPages;
1281 if(start<aStartIndex)
1282 start = aStartIndex;
1287 // the mapping doesn't contain the pages...
1292 // flush TLB for pages in the mapping...
1293 TUint size = end-start;
1294 start -= mapping->iStartIndex;
1295 TLinAddr addr = mapping->LinAddrAndOsAsid()+start*KPageSize;
1296 TLinAddr endAddr = addr+size*KPageSize;
1300 InvalidateTLBForPage(addr);
1302 while((addr+=KPageSize)<endAddr);
1305 mapping = (DMemoryMapping*)iter.Next();
1318 DCoarseMemory::DCoarseMemory(DMemoryManager* aManager, TUint aSizeInPages, TMemoryAttributes aAttributes, TMemoryCreateFlags aCreateFlags)
1319 : DMemoryObject(aManager,ECoarseObject,aSizeInPages,aAttributes,aCreateFlags)
1324 DCoarseMemory* DCoarseMemory::New(DMemoryManager* aManager, TUint aSizeInPages, TMemoryAttributes aAttributes, TMemoryCreateFlags aCreateFlags)
1326 DCoarseMemory* self = new DCoarseMemory(aManager, aSizeInPages, aAttributes, aCreateFlags);
1329 if(self->Construct()==KErrNone)
1337 DCoarseMemory::~DCoarseMemory()
1339 TRACE2(("DCoarseMemory[0x%08x]::~DCoarseMemory()",this));
1341 for(TUint i=0; i<ENumPteTypes; i++)
1343 __NK_ASSERT_DEBUG(!iPageTables[i]);
1349 DMemoryMapping* DCoarseMemory::CreateMapping(TUint aIndex, TUint aCount)
1351 if (((aIndex|aCount)&(KChunkMask>>KPageShift))==0)
1352 return new DCoarseMapping();
1354 return new DFineMapping();
1358 TInt DCoarseMemory::MapPages(RPageArray::TIter aPages)
1360 TRACE2(("DCoarseMemory[0x%08x]::MapPages(?) index=0x%x count=0x%x",this,aPages.Index(),aPages.Count()));
1362 // map pages in all page tables for coarse mapping...
1367 DPageTables* tables = iPageTables[pteType];
1372 TInt r = tables->MapPages(aPages);
1373 tables->AsyncClose();
1379 while(++pteType<ENumPteTypes);
1382 // map page in all fine mappings...
1383 return DMemoryObject::MapPages(aPages);
1387 void DCoarseMemory::RemapPage(TPhysAddr& aPageArray, TUint aIndex, TBool aInvalidateTLB)
1389 TRACE2(("DCoarseMemory[0x%08x]::RemapPage() index=0x%x",this, aIndex));
1391 // remap pages in all page tables for coarse mapping...
1396 DPageTables* tables = iPageTables[pteType];
1401 tables->RemapPage(aPageArray, aIndex, aInvalidateTLB);
1402 tables->AsyncClose();
1406 while(++pteType<ENumPteTypes);
1409 // remap page in all fine mappings...
1410 DMemoryObject::RemapPage(aPageArray, aIndex, aInvalidateTLB);
1414 void DCoarseMemory::UnmapPages(RPageArray::TIter aPages, TBool aDecommitting)
1416 TRACE2(("DCoarseMemory[0x%08x]::UnmapPages(?,%d) index=0x%x count=0x%x",this,(bool)aDecommitting,aPages.Index(),aPages.Count()));
1421 // unmap pages from all page tables for coarse mapping...
1426 DPageTables* tables = iPageTables[pteType];
1431 tables->UnmapPages(aPages,aDecommitting);
1432 tables->AsyncClose();
1436 while(++pteType<ENumPteTypes);
1439 // unmap pages from all fine mappings...
1440 DMemoryObject::UnmapPages(aPages,aDecommitting);
1444 void DCoarseMemory::RestrictPages(RPageArray::TIter aPages, TRestrictPagesType aRestriction)
1446 TRACE2(("DCoarseMemory[0x%08x]::RestrictPages(?,%d) index=0x%x count=0x%x",this,aRestriction,aPages.Index(),aPages.Count()));
1447 __ASSERT_COMPILE(ERestrictPagesForMovingFlag != ERestrictPagesNoAccessForMoving);
1452 if (aRestriction != ERestrictPagesForMovingFlag)
1453 {// restrict pages in all the page tables for the coarse mapping...
1458 DPageTables* tables = iPageTables[pteType];
1463 tables->RestrictPagesNA(aPages);
1464 tables->AsyncClose();
1468 while(++pteType<ENumPteTypes);
1472 // restrict pages in all fine mappings, will also check for pinned mappings...
1473 DMemoryObject::RestrictPages(aPages,aRestriction);
1477 TPte* DCoarseMemory::GetPageTable(TUint aPteType, TUint aChunkIndex)
1479 __NK_ASSERT_DEBUG(aChunkIndex < (iSizeInPages >> KPagesInPDEShift));
1480 return iPageTables[aPteType]->GetPageTable(aChunkIndex);
1484 TInt DCoarseMemory::PageIn(DCoarseMapping* aMapping, RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount)
1486 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1488 DPageTables* tables = iPageTables[aMapping->PteType()];
1493 #ifndef COARSE_GRAINED_TLB_MAINTENANCE
1494 TLinAddr startAddr = aMapping->Base()+(aPages.Index()-aMapping->iStartIndex)*KPageSize;
1495 TLinAddr endAddr = startAddr+aPages.Count()*KPageSize;
1498 TInt r = tables->PageIn(aPages, aPinArgs, aMapping, aMapInstanceCount);
1501 #ifdef COARSE_GRAINED_TLB_MAINTENANCE
1502 InvalidateTLBForAsid(aMapping->OsAsid());
1504 TLinAddr addr = startAddr+aMapping->OsAsid();
1505 do InvalidateTLBForPage(addr);
1506 while((addr+=KPageSize)<endAddr);
1509 tables->AsyncClose();
1515 TBool DCoarseMemory::MovingPageIn(DCoarseMapping* aMapping, TPhysAddr& aPageArrayPtr, TUint aIndex)
1517 DCoarseMemory::DPageTables* tables = iPageTables[aMapping->PteType()];
1518 return tables->MovingPageIn(aPageArrayPtr, aIndex);
1522 TPte* DCoarseMemory::FindPageTable(DCoarseMapping* aMapping, TLinAddr aLinAddr, TUint aMemoryIndex)
1524 DCoarseMemory::DPageTables* tables = iPageTables[aMapping->PteType()];
1526 // get address of page table...
1527 TUint i = aMemoryIndex >> (KChunkShift - KPageShift);
1528 return tables->GetPageTable(i);
1532 TInt DCoarseMemory::ClaimInitialPages(TLinAddr aBase, TUint aSize, TMappingPermissions aPermissions, TBool aAllowGaps, TBool aAllowNonRamPages)
1534 TRACE(("DCoarseMemory[0x%08x]::ClaimInitialPages(0x%08x,0x%08x,0x%08x,%d,%d)",this,aBase,aSize,aPermissions,aAllowGaps,aAllowNonRamPages));
1536 // validate arguments...
1537 if(aBase&KChunkMask || aBase<KGlobalMemoryBase)
1538 return KErrArgument;
1539 if(aSize&KPageMask || aSize>iSizeInPages*KPageSize)
1540 return KErrArgument;
1542 // get DPageTables object...
1543 TUint pteType = Mmu::PteType(aPermissions,true);
1544 MemoryObjectLock::Lock(this);
1545 DPageTables* tables = GetOrAllocatePageTables(pteType);
1546 MemoryObjectLock::Unlock(this);
1547 __NK_ASSERT_DEBUG(tables);
1549 // check and allocate page array entries...
1550 RPageArray::TIter pageIter;
1551 TInt r = iPages.AddStart(0,aSize>>KPageShift,pageIter);
1552 __NK_ASSERT_ALWAYS(r==KErrNone);
1554 // hold MmuLock for long time, shouldn't matter as this is only done during boot
1555 ::PageTables.Lock();
1558 TPte blankPte = tables->iBlankPte;
1559 TPte** pPt = tables->iTables;
1560 TPde* pPde = Mmu::PageDirectoryEntry(KKernelOsAsid,aBase);
1566 TRACE(("DCoarseMemory::ClaimInitialPages: %08x: 0x%08x",aBase+offset,pde));
1569 SPageTableInfo* pti = NULL;
1571 if (Mmu::PdeMapsSection(pde))
1573 TPhysAddr sectionBase = Mmu::SectionBaseFromPde(pde);
1574 TRACE((" chunk is section mapped, base at %08x", sectionBase));
1575 __NK_ASSERT_DEBUG(sectionBase != KPhysAddrInvalid);
1577 TPde pde = sectionBase | Mmu::BlankSectionPde(Attributes(),pteType);
1578 __NK_ASSERT_DEBUG(((*pPde^pde)&~KPdeMatchMask)==0);
1580 SinglePdeUpdated(pPde);
1583 // We allocate and populate a page table for the section even though it won't be mapped
1584 // initially because the presense of the page table is used to check whether RAM is
1585 // mapped in a chunk, and because it makes it possible to break the section mapping
1586 // without allocating memory. This may change in the future.
1588 // Note these page table are always unpaged here regardless of paged bit in iFlags
1589 // (e.g. ROM object is marked as paged despite initial pages being unpaged)
1590 pPte = tables->AllocatePageTable(offset >> KChunkShift, EFalse, EFalse);
1594 return KErrNoMemory;
1596 pti = SPageTableInfo::FromPtPtr(pPte);
1598 else if (Mmu::PdeMapsPageTable(pde))
1600 pPte = Mmu::PageTableFromPde(*pPde);
1601 TRACE((" page table found at %08x", pPte));
1602 __NK_ASSERT_DEBUG(pPte);
1603 pti = SPageTableInfo::FromPtPtr(pPte);
1604 pti->SetCoarse(this,offset>>KChunkShift,pteType);
1613 TPhysAddr pagePhys = Mmu::LinearToPhysical(aBase+offset);
1615 if(pagePhys==KPhysAddrInvalid)
1619 __NK_ASSERT_ALWAYS(aAllowGaps); // we have a gap, check this is allowed
1623 pte = KPteUnallocatedEntry;
1627 __NK_ASSERT_ALWAYS(size); // pages can't be mapped above aSize
1629 pageIter.Add(1,&pagePhys);
1631 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pagePhys);
1632 __NK_ASSERT_ALWAYS(pi || aAllowNonRamPages);
1635 __NK_ASSERT_ALWAYS(pi->Type()==SPageInfo::EFixed);
1636 pi->SetManaged(this,offset>>KPageShift,PageInfoFlags());
1640 pte = pagePhys|blankPte;
1645 TRACE2(("!PTE %x=%x (was %x)",pPte,pte,*pPte));
1646 __NK_ASSERT_DEBUG(((*pPte^pte)&~KPteMatchMask)==0 || *pPte==KPteUnallocatedEntry);
1648 CacheMaintenance::SinglePteUpdated((TLinAddr)pPte);
1652 offset += KPageSize;
1656 while(offset&(KChunkMask&~KPageMask));
1660 pti->IncPageCount(numPages);
1661 TRACE2(("pt %x page count=%d",TLinAddr(pPte)-KPageTableSize,numPages));
1662 __NK_ASSERT_DEBUG(pti->CheckPageCount());
1666 InvalidateTLBForAsid(KKernelOsAsid);
1669 ::PageTables.Unlock();
1671 // release page array entries...
1672 iPages.AddEnd(0,aSize>>KPageShift);
1678 DCoarseMemory::DPageTables* DCoarseMemory::GetOrAllocatePageTables(TUint aPteType)
1680 __NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(this));
1683 DPageTables* tables = iPageTables[aPteType];
1690 // allocate a new one if required...
1691 tables = DPageTables::New(this, iSizeInPages, aPteType);
1694 __NK_ASSERT_DEBUG(!iPageTables[aPteType]);
1695 iPageTables[aPteType] = tables;
1703 TInt DCoarseMemory::AddMapping(DMemoryMappingBase* aMapping)
1705 if(!aMapping->IsCoarse())
1707 // not coarse mapping...
1708 return DMemoryObject::AddMapping(aMapping);
1711 __NK_ASSERT_DEBUG(aMapping->IsPinned()==false); // coarse mappings can't pin
1713 // Check mapping allowed. Must hold memory object lock to prevent changes
1714 // to object's restrictions.
1715 MemoryObjectLock::Lock(this);
1716 TInt r = CheckNewMapping(aMapping);
1719 MemoryObjectLock::Unlock(this);
1723 // get DPageTable for mapping...
1724 DPageTables* tables = GetOrAllocatePageTables(aMapping->PteType());
1726 // Safe to release here as no restrictions to this type of mapping can be added as
1727 // we now have an iPageTables entry for this type of mapping.
1728 MemoryObjectLock::Unlock(this);
1730 return KErrNoMemory;
1732 // add mapping to DPageTable...
1733 r = tables->AddMapping((DCoarseMapping*)aMapping);
1736 // allocate permanent page tables if required...
1737 if(aMapping->Flags()&DMemoryMapping::EPermanentPageTables)
1739 MemoryObjectLock::Lock(this);
1740 r = tables->AllocatePermanentPageTables();
1741 MemoryObjectLock::Unlock(this);
1744 __e32_atomic_ior_ord8(&aMapping->Flags(), (TUint8)DMemoryMapping::EPageTablesAllocated);
1746 tables->RemoveMapping((DCoarseMapping*)aMapping);
1756 void DCoarseMemory::RemoveMapping(DMemoryMappingBase* aMapping)
1758 if(!aMapping->IsCoarse())
1760 // not coarse mapping...
1761 DMemoryObject::RemoveMapping(aMapping);
1765 // need a temporary reference on self because we may be removing the last mapping
1766 // which will delete this...
1769 // get DPageTable the mapping is attached to...
1770 DPageTables* tables = iPageTables[aMapping->PteType()];
1771 __NK_ASSERT_DEBUG(tables); // must exist because aMapping has a reference on it
1773 // free permanent page tables if required...
1774 if(aMapping->Flags()&DMemoryMapping::EPageTablesAllocated)
1776 MemoryObjectLock::Lock(this);
1777 tables->FreePermanentPageTables();
1778 MemoryObjectLock::Unlock(this);
1781 // remove mapping from page tables object...
1782 tables->RemoveMapping((DCoarseMapping*)aMapping);
1784 Close(); // may delete this memory object
1788 TInt DCoarseMemory::SetReadOnly()
1790 __NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(this));
1792 // Search for writable iPageTable entries.
1793 // We hold the MemoryObjectLock so iPageTable entries can't be added or removed.
1798 if((pteType & EPteTypeWritable) && iPageTables[pteType])
1804 while(++pteType < ENumPteTypes);
1807 // unmap pages from all fine mappings...
1808 return DMemoryObject::SetReadOnly();
1816 DFineMemory::DFineMemory(DMemoryManager* aManager, TUint aSizeInPages, TMemoryAttributes aAttributes, TMemoryCreateFlags aCreateFlags)
1817 : DMemoryObject(aManager,0,aSizeInPages,aAttributes,aCreateFlags)
1822 DFineMemory* DFineMemory::New(DMemoryManager* aManager, TUint aSizeInPages, TMemoryAttributes aAttributes, TMemoryCreateFlags aCreateFlags)
1824 DFineMemory* self = new DFineMemory(aManager,aSizeInPages,aAttributes,aCreateFlags);
1827 if(self->Construct()==KErrNone)
1835 DFineMemory::~DFineMemory()
1837 TRACE2(("DFineMemory[0x%08x]::~DFineMemory",this));
1841 TInt DFineMemory::ClaimInitialPages(TLinAddr aBase, TUint aSize, TMappingPermissions aPermissions, TBool aAllowGaps, TBool aAllowNonRamPages)
1843 TRACE(("DFineMemory[0x%08x]::ClaimInitialPages(0x%08x,0x%08x,0x%08x,%d,%d)",this,aBase,aSize,aPermissions,aAllowGaps,aAllowNonRamPages));
1846 // validate arguments...
1847 if(aBase&KPageMask || aBase<KGlobalMemoryBase)
1848 return KErrArgument;
1849 if(aSize&KPageMask || aSize>iSizeInPages*KPageSize)
1850 return KErrArgument;
1853 // calculate 'blankPte', the correct PTE value for pages in this memory object...
1854 TUint pteType = Mmu::PteType(aPermissions,true);
1855 TPte blankPte = Mmu::BlankPte(Attributes(),pteType);
1858 // get page table...
1859 TPde* pPde = Mmu::PageDirectoryEntry(KKernelOsAsid,aBase);
1860 TPte* pPte = Mmu::PageTableFromPde(*pPde);
1862 return KErrNone; // no pages mapped
1864 // check and allocate page array entries...
1865 RPageArray::TIter pageIter;
1866 TInt r = iPages.AddStart(0,aSize>>KPageShift,pageIter);
1867 __NK_ASSERT_ALWAYS(r==KErrNone);
1869 // hold MmuLock for long time, shouldn't matter as this is only done during boot
1872 // setup page table for fine mappings...
1873 SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pPte);
1874 __NK_ASSERT_DEBUG(pti->CheckPageCount());
1875 TBool pageTableOk = pti->ClaimFine(aBase&~KChunkMask,KKernelOsAsid);
1876 __NK_ASSERT_ALWAYS(pageTableOk);
1877 TRACE(("DFineMemory::ClaimInitialPages page table = 0x%08x",pPte));
1879 TUint pteIndex = (aBase>>KPageShift)&(KChunkMask>>KPageShift);
1880 TUint pageIndex = 0;
1882 while(pageIndex<iSizeInPages)
1884 TPhysAddr pagePhys = Mmu::PtePhysAddr(pPte[pteIndex],pteIndex);
1885 if(pagePhys==KPhysAddrInvalid)
1889 __NK_ASSERT_ALWAYS(aAllowGaps); // we have a gap, check this is allowed
1893 // check PTE is correct...
1894 __NK_ASSERT_DEBUG(pPte[pteIndex]==KPteUnallocatedEntry);
1898 __NK_ASSERT_ALWAYS(size); // pages can't be mapped above aSize
1900 pageIter.Add(1,&pagePhys);
1902 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pagePhys);
1905 __NK_ASSERT_ALWAYS(aAllowNonRamPages);
1908 __NK_ASSERT_ALWAYS(pi->Type()==SPageInfo::EFixed);
1909 pi->SetManaged(this,pageIndex,PageInfoFlags());
1913 // check PTE is correct...
1914 TPte pte = pagePhys|blankPte;
1915 __NK_ASSERT_DEBUG(((pPte[pteIndex]^pte)&~KPteMatchMask)==0);
1919 // move on to next page...
1921 __NK_ASSERT_ALWAYS(pteIndex<(KChunkSize>>KPageShift));
1929 // release page array entries...
1930 iPages.AddEnd(0,aSize>>KPageShift);