First public contribution.
1 // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
16 #include <plat_priv.h>
17 #include "cache_maintenance.h"
18 #include "decompress.h" // include for the generic BytePairDecompress().
32 TInt DMemoryManager::New(DMemoryObject*& aMemory, TUint aSizeInPages, TMemoryAttributes aAttributes, TMemoryCreateFlags aCreateFlags)
34 DMemoryObject* memory;
35 if(aSizeInPages&(KChunkMask>>KPageShift))
36 memory = DFineMemory::New(this,aSizeInPages,aAttributes,aCreateFlags);
38 memory = DCoarseMemory::New(this,aSizeInPages,aAttributes,aCreateFlags);
46 TInt DMemoryManager::Alloc(DMemoryObject* /*aMemory*/, TUint /*aIndex*/, TUint /*aCount*/)
48 return KErrNotSupported;
52 TInt DMemoryManager::AllocContiguous(DMemoryObject* /*aMemory*/, TUint /*aIndex*/, TUint /*aCount*/, TUint /*aAlign*/, TPhysAddr& /*aPhysAddr*/)
54 return KErrNotSupported;
58 void DMemoryManager::Free(DMemoryObject* /*aMemory*/, TUint /*aIndex*/, TUint /*aCount*/)
63 TInt DMemoryManager::Wipe(DMemoryObject* /*aMemory*/)
65 return KErrNotSupported;
69 TInt DMemoryManager::AddPages(DMemoryObject* /*aMemory*/, TUint /*aIndex*/, TUint /*aCount*/, TPhysAddr* /*aPages*/)
71 return KErrNotSupported;
75 TInt DMemoryManager::AddContiguous(DMemoryObject* /*aMemory*/, TUint /*aIndex*/, TUint /*aCount*/, TPhysAddr /*aPhysAddr*/)
77 return KErrNotSupported;
81 TInt DMemoryManager::RemovePages(DMemoryObject* /*aMemory*/, TUint /*aIndex*/, TUint /*aCount*/, TPhysAddr* /*aPages*/)
83 return KErrNotSupported;
87 TInt DMemoryManager::AllowDiscard(DMemoryObject* /*aMemory*/, TUint /*aIndex*/, TUint /*aCount*/)
89 return KErrNotSupported;
93 TInt DMemoryManager::DisallowDiscard(DMemoryObject* /*aMemory*/, TUint /*aIndex*/, TUint /*aCount*/)
95 return KErrNotSupported;
99 TInt DMemoryManager::StealPage(DMemoryObject* /*aMemory*/, SPageInfo* /*aPageInfo*/)
101 return KErrNotSupported;
105 TInt DMemoryManager::RestrictPage(DMemoryObject* /*aMemory*/, SPageInfo* /*aPageInfo*/, TRestrictPagesType /*aRestriction*/)
107 return KErrNotSupported;
111 TInt DMemoryManager::CleanPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TPhysAddr*& /*aPageArrayEntry*/)
113 if(aPageInfo->IsDirty()==false)
115 __NK_ASSERT_DEBUG(0);
116 return KErrNotSupported;
120 TInt DMemoryManager::HandleFault( DMemoryObject* aMemory, TUint aIndex, DMemoryMapping* aMapping,
121 TUint aMapInstanceCount, TUint aAccessPermissions)
126 (void)aMapInstanceCount;
127 (void)aAccessPermissions;
128 // Kern::Printf("DMemoryManager::HandlePageFault(0x%08x,0x%x,0x%08x,%d)",aMemory,aIndex,aMapping,aAccessPermissions);
133 TInt DMemoryManager::MovePage( DMemoryObject* aMemory, SPageInfo* aOldPageInfo,
134 TPhysAddr& aNewPage, TUint aBlockZoneId, TBool aBlockRest)
136 return KErrNotSupported;
139 TZonePageType DMemoryManager::PageType()
140 {// This should not be invoked on memory managers that do not use the methods
141 // AllocPages() and FreePages().
142 __NK_ASSERT_DEBUG(0);
146 static TMemoryCleanup Cleanup;
148 DMemoryObject* DMemoryManager::iCleanupHead = 0;
149 TSpinLock DMemoryManager::iCleanupLock(TSpinLock::EOrderGenericIrqHigh3);
151 void DMemoryManager::CleanupFunction(TAny*)
155 __SPIN_LOCK_IRQ(iCleanupLock);
157 // get an object from queue...
158 DMemoryObject* memory = iCleanupHead;
161 // none left, so end...
162 __SPIN_UNLOCK_IRQ(iCleanupLock);
166 if(memory->iCleanupFlags&ECleanupDecommitted)
168 // object requires cleanup of decommitted pages...
169 memory->iCleanupFlags &= ~ECleanupDecommitted;
170 __SPIN_UNLOCK_IRQ(iCleanupLock);
171 memory->iManager->DoCleanupDecommitted(memory);
175 // object has no more cleanup operations to perform,
176 // so remove it from the cleanup queue...
177 __NK_ASSERT_DEBUG(memory->iCleanupFlags==ECleanupIsQueued); // no operations left, just flag to say its in the cleanup queue
178 memory->iCleanupFlags &= ~ECleanupIsQueued;
179 iCleanupHead = memory->iCleanupNext;
180 memory->iCleanupNext = NULL;
181 __SPIN_UNLOCK_IRQ(iCleanupLock);
183 // close reference which was added when object was queued...
190 void DMemoryManager::QueueCleanup(DMemoryObject* aMemory, TCleanupOperationFlag aCleanupOp)
192 // add new cleanup operation...
193 __SPIN_LOCK_IRQ(iCleanupLock);
194 TUint32 oldFlags = aMemory->iCleanupFlags;
195 aMemory->iCleanupFlags = oldFlags|aCleanupOp|ECleanupIsQueued;
196 __SPIN_UNLOCK_IRQ(iCleanupLock);
198 // if cleanup was already requested...
200 return; // nothing more to do
202 // increase reference count...
205 // add object to cleanup queue...
206 __SPIN_LOCK_IRQ(iCleanupLock);
207 aMemory->iCleanupNext = iCleanupHead;
208 iCleanupHead = aMemory;
209 __SPIN_UNLOCK_IRQ(iCleanupLock);
211 // queue cleanup function to run...
212 Cleanup.Add((TMemoryCleanupCallback)CleanupFunction,0);
216 void DMemoryManager::DoCleanupDecommitted(DMemoryObject* aMemory)
218 TRACE2(("DMemoryManager::DoCleanupDecommitted(0x%08x)",aMemory));
219 __NK_ASSERT_DEBUG(0);
223 void DMemoryManager::ReAllocDecommitted(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
225 __NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
227 // make iterator for region...
228 RPageArray::TIter pageIter;
229 aMemory->iPages.FindStart(aIndex,aCount,pageIter);
233 // find some pages...
234 RPageArray::TIter pageList;
235 TUint n = pageIter.Find(pageList);
239 // check each existing page...
240 RamAllocLock::Lock();
242 while(pageList.Pages(pages))
244 TPhysAddr page = *pages;
245 if(RPageArray::State(page)==RPageArray::EDecommitted)
247 // decommitted pages need re-initialising...
248 TPhysAddr pagePhys = page&~KPageMask;
249 *pages = pagePhys|RPageArray::ECommitted;
250 TheMmu.PagesAllocated(&pagePhys,1,aMemory->RamAllocFlags(),true);
254 RamAllocLock::Unlock();
257 pageIter.FindRelease(n);
260 aMemory->iPages.FindEnd(aIndex,aCount);
264 void DMemoryManager::FreeDecommitted(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
266 TRACE2(("DMemoryManager::FreeDecommitted(0x%08x,0x%x,0x%x)",aMemory, aIndex, aCount));
267 __NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
269 // make iterator for region...
270 RPageArray::TIter pageIter;
271 aMemory->iPages.FindStart(aIndex,aCount,pageIter);
275 // find some pages...
276 RPageArray::TIter pageList;
277 TUint findCount = pageIter.Find(pageList);
281 // search for decommitted pages...
282 RamAllocLock::Lock();
285 while((numPages=pageList.Pages(pages))!=0)
288 if(RPageArray::State(pages[n])!=RPageArray::EDecommitted)
290 // skip pages which aren't EDecommitted...
291 while(++n<numPages && RPageArray::State(pages[n])!=RPageArray::EDecommitted)
296 // find range of pages which are EDecommitted...
297 while(++n<numPages && RPageArray::State(pages[n])==RPageArray::EDecommitted)
299 RPageArray::TIter decommittedList(pageList.Left(n));
302 TUint freedCount = FreePages(aMemory,decommittedList);
304 TRACE2(("DMemoryManager::FreeDecommitted(0x%08x) freed %d in 0x%x..0x%x",aMemory,freedCount,decommittedList.Index(),decommittedList.IndexEnd()));
308 RamAllocLock::Unlock();
311 pageIter.FindRelease(findCount);
314 aMemory->iPages.FindEnd(aIndex,aCount);
318 void DMemoryManager::DoFree(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
320 TRACE2(("DMemoryManager::DoFree(0x%08x,0x%x,0x%x)",aMemory, aIndex, aCount));
321 __NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
323 RPageArray::TIter pageIter;
324 aMemory->iPages.FindStart(aIndex,aCount,pageIter);
328 // find some pages...
329 RPageArray::TIter pageList;
330 TUint n = pageIter.RemoveFind(pageList);
335 FreePages(aMemory,pageList);
338 pageIter.FindRelease(n);
341 aMemory->iPages.FindEnd(aIndex,aCount);
345 TInt DMemoryManager::FreePages(DMemoryObject* aMemory, RPageArray::TIter aPageList)
347 // unmap the pages...
348 aMemory->UnmapPages(aPageList,true);
350 RamAllocLock::Lock();
352 // remove and free pages...
355 TPhysAddr pages[KMaxPagesInOneGo];
357 while((n=aPageList.Remove(KMaxPagesInOneGo,pages))!=0)
360 m.FreeRam(pages, n, aMemory->iManager->PageType());
363 RamAllocLock::Unlock();
371 Manager for memory objects containing normal unpaged program memory (RAM) which
372 is allocated from a system wide pool. The physical pages allocated to this
373 memory are fixed until explicitly freed.
375 This is normally used for kernel memory and any other situation where it
376 is not permissible for memory accesses to generate page faults of any kind.
378 class DUnpagedMemoryManager : public DMemoryManager
381 // from DMemoryManager...
382 virtual void Destruct(DMemoryObject* aMemory);
383 virtual TInt Alloc(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
384 virtual TInt AllocContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TUint aAlign, TPhysAddr& aPhysAddr);
385 virtual void Free(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
386 virtual TInt Pin(DMemoryObject* aMemory, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs);
387 virtual void Unpin(DMemoryObject* aMemory, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs);
388 virtual TInt Wipe(DMemoryObject* aMemory);
389 virtual TZonePageType PageType();
392 // from DMemoryManager...
393 virtual void DoCleanupDecommitted(DMemoryObject* aMemory);
396 Implementation factor for implementation of #Alloc.
398 static TInt AllocPages(DMemoryObject* aMemory, RPageArray::TIter aPageList);
401 Implementation factor for implementation of #AllocContiguous.
403 static TInt AllocContiguousPages(DMemoryObject* aMemory, RPageArray::TIter aPageList, TUint aAlign, TPhysAddr& aPhysAddr);
406 Implementation factor for implementation of #Wipe.
408 static void WipePages(DMemoryObject* aMemory, RPageArray::TIter aPageList);
412 The single instance of this manager class.
414 static DUnpagedMemoryManager TheManager;
418 DUnpagedMemoryManager DUnpagedMemoryManager::TheManager;
419 DMemoryManager* TheUnpagedMemoryManager = &DUnpagedMemoryManager::TheManager;
422 void DUnpagedMemoryManager::Destruct(DMemoryObject* aMemory)
424 MemoryObjectLock::Lock(aMemory);
425 Free(aMemory,0,aMemory->iSizeInPages);
426 MemoryObjectLock::Unlock(aMemory);
431 TInt DUnpagedMemoryManager::Alloc(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
433 TRACE2(("DUnpagedMemoryManager::Alloc(0x%08x,0x%x,0x%x)",aMemory, aIndex, aCount));
434 __NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
436 // re-initialise any decommitted pages which we may still own because they were pinned...
437 ReAllocDecommitted(aMemory,aIndex,aCount);
439 // check and allocate page array entries...
440 RPageArray::TIter pageList;
441 TInt r = aMemory->iPages.AddStart(aIndex,aCount,pageList,true);
445 // allocate RAM and add it to page array...
446 r = AllocPages(aMemory,pageList);
450 r = aMemory->MapPages(pageList);
452 // release page array entries...
453 aMemory->iPages.AddEnd(aIndex,aCount);
455 // revert if error...
457 Free(aMemory,aIndex,aCount);
463 TInt DUnpagedMemoryManager::AllocContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TUint aAlign, TPhysAddr& aPhysAddr)
465 TRACE2(("DUnpagedMemoryManager::AllocContiguous(0x%08x,0x%x,0x%x,%d,?)",aMemory, aIndex, aCount, aAlign));
466 __NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
468 // set invalid memory in case of error...
469 aPhysAddr = KPhysAddrInvalid;
471 // check and allocate page array entries...
472 RPageArray::TIter pageList;
473 TInt r = aMemory->iPages.AddStart(aIndex,aCount,pageList);
477 // allocate memory...
479 r = AllocContiguousPages(aMemory, pageList, aAlign, physAddr);
484 r = aMemory->MapPages(pageList);
486 aPhysAddr = physAddr;
489 // release page array entries...
490 aMemory->iPages.AddEnd(aIndex,aCount);
492 // revert if error...
494 Free(aMemory,aIndex,aCount);
500 void DUnpagedMemoryManager::Free(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
502 DoFree(aMemory,aIndex,aCount);
506 TInt DUnpagedMemoryManager::AllocPages(DMemoryObject* aMemory, RPageArray::TIter aPageList)
509 RamAllocLock::Lock();
514 // find entries in page array to allocate...
515 RPageArray::TIter allocList;
516 TUint n = aPageList.AddFind(allocList);
523 TPhysAddr pages[KMaxPagesInOneGo];
524 if(n>KMaxPagesInOneGo)
525 n = KMaxPagesInOneGo;
526 r = m.AllocRam(pages, n, aMemory->RamAllocFlags(), aMemory->iManager->PageType());
530 // assign pages to memory object...
532 TUint index = allocList.Index();
533 TUint flags = aMemory->PageInfoFlags();
538 SPageInfo* pi = SPageInfo::FromPhysAddr(pages[i]);
539 pi->SetManaged(aMemory,index+i,flags);
545 // add pages to page array...
546 allocList.Add(n,pages);
548 while((n=allocList.Count())!=0);
551 RamAllocLock::Unlock();
556 TInt DUnpagedMemoryManager::AllocContiguousPages(DMemoryObject* aMemory, RPageArray::TIter aPageList, TUint aAlign, TPhysAddr& aPhysAddr)
558 TUint size = aPageList.Count();
559 RamAllocLock::Lock();
561 // allocate memory...
564 TInt r = m.AllocContiguousRam(physAddr, size, aAlign, aMemory->RamAllocFlags());
567 // assign pages to memory object...
568 TUint index = aPageList.Index();
569 TUint flags = aMemory->PageInfoFlags();
570 SPageInfo* pi = SPageInfo::FromPhysAddr(physAddr);
571 SPageInfo* piEnd = pi+size;
576 MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo);
577 pi->SetManaged(aMemory,index++,flags);
582 // add pages to page array...
583 aPageList.AddContiguous(size,physAddr);
586 aPhysAddr = physAddr;
589 RamAllocLock::Unlock();
594 TInt DUnpagedMemoryManager::Pin(DMemoryObject* aMemory, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs)
596 RPageArray::TIter pageList;
597 aMemory->iPages.FindStart(aMapping->iStartIndex,aMapping->iSizeInPages,pageList);
604 while((n=pageList.Pages(pages,KMaxPageInfoUpdatesInOneGo))!=0)
606 TPhysAddr* p = pages;
607 TPhysAddr* pEnd = p+n;
610 TPhysAddr page = *p++;
611 if(RPageArray::TargetStateIsDecommitted(page))
612 goto stop; // page is being decommitted, so can't pin it
617 MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo);
622 aMemory->iPages.FindEnd(aMapping->iStartIndex,aMapping->iSizeInPages);
624 return pageList.Count() ? KErrNotFound : KErrNone;
628 void DUnpagedMemoryManager::Unpin(DMemoryObject* aMemory, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs)
633 void DUnpagedMemoryManager::DoCleanupDecommitted(DMemoryObject* aMemory)
635 MemoryObjectLock::Lock(aMemory);
636 FreeDecommitted(aMemory,0,aMemory->iSizeInPages);
637 MemoryObjectLock::Unlock(aMemory);
641 TInt DUnpagedMemoryManager::Wipe(DMemoryObject* aMemory)
643 __NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
645 // make iterator for region...
646 RPageArray::TIter pageIter;
647 aMemory->iPages.FindStart(0,aMemory->iSizeInPages,pageIter);
651 // find some pages...
652 RPageArray::TIter pageList;
653 TUint n = pageIter.Find(pageList);
657 // wipe some pages...
658 WipePages(aMemory,pageList);
661 pageIter.FindRelease(n);
664 aMemory->iPages.FindEnd(0,aMemory->iSizeInPages);
670 void DUnpagedMemoryManager::WipePages(DMemoryObject* aMemory, RPageArray::TIter aPageList)
672 TUint index = aPageList.Index();
673 TUint count = aPageList.Count();
674 TRACE(("DUnpagedMemoryManager::WipePages(0x%08x,0x%x,0x%x)",aMemory,index,count));
676 __NK_ASSERT_ALWAYS(!aMemory->IsReadOnly()); // trap wiping read-only memory
678 RamAllocLock::Lock();
682 // get some physical page addresses...
683 TPhysAddr pages[KMaxPagesInOneGo];
686 if(n>KMaxPagesInOneGo)
687 n = KMaxPagesInOneGo;
688 TInt r = aMemory->iPages.PhysAddr(index,n,physAddr,pages);
689 __NK_ASSERT_ALWAYS(r>=0); // caller should have ensured all pages are present
691 // wipe some pages...
692 TPhysAddr* pagesToWipe = r!=0 ? pages : (TPhysAddr*)((TLinAddr)physAddr|1);
693 TheMmu.PagesAllocated(pagesToWipe,n,aMemory->RamAllocFlags(),true);
700 RamAllocLock::Unlock();
704 TZonePageType DUnpagedMemoryManager::PageType()
705 {// Unpaged memory cannot be moved or discarded therefore it is fixed.
711 Manager for memory objects containing normal unpaged RAM, as
712 #DUnpagedMemoryManager, but which may be 'moved' by RAM
713 defragmentation. I.e. have the physical pages used to store its content
714 substituted for others.
716 Such memory may cause transient page faults if it is accessed whilst its
717 contents are being moved, this makes it unsuitable for most kernel-side
718 usage. This is the memory management scheme normally used for unpaged user
721 class DMovableMemoryManager : public DUnpagedMemoryManager
724 // from DMemoryManager...
725 virtual TInt MovePage(DMemoryObject* aMemory, SPageInfo* aOldPageInfo, TPhysAddr& aNewPage, TUint aBlockZoneId, TBool aBlockRest);
726 virtual TInt HandleFault( DMemoryObject* aMemory, TUint aIndex, DMemoryMapping* aMapping,
727 TUint aMapInstanceCount, TUint aAccessPermissions);
728 virtual TZonePageType PageType();
731 The single instance of this manager class.
733 static DMovableMemoryManager TheManager;
737 DMovableMemoryManager DMovableMemoryManager::TheManager;
738 DMemoryManager* TheMovableMemoryManager = &DMovableMemoryManager::TheManager;
741 TInt DMovableMemoryManager::MovePage( DMemoryObject* aMemory, SPageInfo* aOldPageInfo,
742 TPhysAddr& aNewPage, TUint aBlockZoneId, TBool aBlockRest)
744 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
746 // Allocate the new page to move to, ensuring that we use the page type of the
747 // manager assigned to this page.
750 TInt r = m.AllocRam(&newPage, 1, aMemory->RamAllocFlags(), aMemory->iManager->PageType(),
751 aBlockZoneId, aBlockRest);
753 {// Failed to allocate a new page to move the page to so can't continue.
760 TUint index = aOldPageInfo->Index();
761 TRACE( ("DMovableMemoryManager::MovePage(0x%08x,0x%08x,?,0x%08x,%d) index=0x%x",
762 aMemory,aOldPageInfo,aBlockZoneId,aBlockRest,index));
763 __NK_ASSERT_DEBUG(aMemory==aOldPageInfo->Owner());
765 // Mark the page as being moved and get a pointer to the page array entry.
766 RPageArray::TIter pageIter;
767 TPhysAddr* const movingPageArrayPtr = aMemory->iPages.MovePageStart(index, pageIter);
768 if (!movingPageArrayPtr)
769 {// Can't move the page another operation is being performed on it.
771 TheMmu.FreeRam(&newPage, 1, aMemory->iManager->PageType());
774 __NK_ASSERT_DEBUG(RPageArray::IsPresent(*movingPageArrayPtr));
775 TPhysAddr oldPageEntry = *movingPageArrayPtr;
776 TPhysAddr oldPage = oldPageEntry & ~KPageMask;
778 if (oldPage != aOldPageInfo->PhysAddr())
779 {// The address of page array entry and the page info should match except
780 // when the page is being shadowed.
781 __NK_ASSERT_DEBUG(SPageInfo::FromPhysAddr(oldPage)->Type() == SPageInfo::EShadow);
784 __NK_ASSERT_DEBUG((newPage & KPageMask) == 0);
785 __NK_ASSERT_DEBUG(newPage != oldPage);
787 // Set the modifier so we can detect if the page state is updated.
788 aOldPageInfo->SetModifier(&pageIter);
790 // Restrict the page ready for moving.
791 // Read only memory objects don't need to be restricted but we still need
792 // to discover any physically pinned mappings.
793 TBool pageRestrictedNA = !aMemory->IsReadOnly();
794 TRestrictPagesType restrictType = pageRestrictedNA ?
795 ERestrictPagesNoAccessForMoving :
796 ERestrictPagesForMovingFlag;
798 // This page's contents may be changed so restrict the page to no access
799 // so we can detect any access to it while we are moving it.
801 // This will clear the memory objects mapping added flag so we can detect any new mappings.
802 aMemory->RestrictPages(pageIter, restrictType);
804 const TUint KOldMappingSlot = 0;
805 const TUint KNewMappingSlot = 1;
806 const TAny* tmpPtrOld = NULL;
808 // Verify that page restricting wasn't interrupted, if it was then the page
809 // can't be moved so remap it.
810 // If the page array entry (*movingPageArrayPtr) has been modified then a pinning
811 // veto'd the preparation.
813 if (aOldPageInfo->CheckModified(&pageIter) || oldPageEntry != *movingPageArrayPtr)
814 {// Page is pinned or has been modified by another operation.
816 TheMmu.FreeRam(&newPage, 1, aMemory->iManager->PageType());
821 // Copy the contents of the page using some temporary mappings.
822 tmpPtrOld = (TAny*)TheMmu.MapTemp(oldPage, index, KOldMappingSlot);
823 tmpPtrNew = (TAny*)TheMmu.MapTemp(newPage, index, KNewMappingSlot);
824 pagecpy(tmpPtrNew, tmpPtrOld);
826 // Unmap and perform cache maintenance if the memory object is executable.
827 // Must do cache maintenance before we add any new mappings to the new page
828 // to ensure that any old instruction cache entries for the new page aren't
829 // picked up by any remapped executable mappings.
830 if (aMemory->IsExecutable())
831 CacheMaintenance::CodeChanged((TLinAddr)tmpPtrNew, KPageSize);
832 TheMmu.UnmapTemp(KNewMappingSlot);
834 TheMmu.UnmapTemp(KOldMappingSlot);
838 if (!aOldPageInfo->CheckModified(&pageIter) && oldPageEntry == *movingPageArrayPtr &&
839 !aMemory->MappingAddedFlag())
841 // The page has been copied without anyone modifying it so set the page
842 // array entry to new physical address and map the page.
843 RPageArray::PageMoveNewAddr(*movingPageArrayPtr, newPage);
845 // Copy across the page info data from the old page to the new.
846 SPageInfo& newPageInfo = *SPageInfo::FromPhysAddr(newPage);
847 newPageInfo = *aOldPageInfo;
848 if (aMemory->IsDemandPaged())
849 {// Let the pager deal with the live list links for this page if required.
850 ThePager.ReplacePage(*aOldPageInfo, newPageInfo);
860 TheMmu.FreeRam(&newPage, 1, aMemory->iManager->PageType());
863 // Remap all mappings to the new physical address if the move was successful or
864 // back to the old page if the move failed.
865 // Invalidate the TLB for the page if old mappings still exist or new
866 // mappings were added but will be removed as the page can't be moved.
867 TBool invalidateTLB = !pageRestrictedNA || r != KErrNone;
868 aMemory->RemapPage(*movingPageArrayPtr, index, invalidateTLB);
871 {// Must wait until here as read only memory objects' mappings aren't
872 // all guaranteed to point to the new page until after RemapPage().
873 TheMmu.FreeRam(&oldPage, 1, aMemory->iManager->PageType());
875 // For testing purposes clear the old page to help detect any
876 // erroneous mappings to the old page.
877 memclr((TAny*)tmpPtrOld, KPageSize);
879 TheMmu.UnmapTemp(KOldMappingSlot); // Will invalidate the TLB entry for the mapping.
883 // indicate we've stopped moving memory now...
885 RPageArray::MovePageEnd(*movingPageArrayPtr);
892 TInt DMovableMemoryManager::HandleFault(DMemoryObject* aMemory, TUint aIndex, DMemoryMapping* aMapping,
893 TUint aMapInstanceCount, TUint aAccessPermissions)
895 TInt r = KErrNotFound;
898 __UNLOCK_GUARD_START(MmuLock);
899 TPhysAddr* const pageEntry = aMemory->iPages.PageEntry(aIndex);
900 if (!pageEntry || !RPageArray::IsPresent(*pageEntry) ||
901 aMapInstanceCount != aMapping->MapInstanceCount() || aMapping->BeingDetached())
902 {// The page isn't present or has been unmapped so invalid access.
906 if (aMapping->MovingPageIn(*pageEntry, aIndex))
907 {// The page was has been paged in as it was still mapped.
908 pageInfo = SPageInfo::FromPhysAddr(*pageEntry & ~KPageMask);
909 pageInfo->SetModifier(0); // Signal to MovePage() that the page has been paged in.
914 __UNLOCK_GUARD_END(MmuLock);
920 TZonePageType DMovableMemoryManager::PageType()
921 {// Movable memory object pages are movable.
927 Manager for memory objects containing normal unpaged RAM, which
928 as well as being 'movable', like #DMovableMemoryManager,
929 may also have regions marked as 'discardable'. Discardable pages may be
930 reclaimed (removed) by the system at any time; this state is controlled using
931 the functions #AllowDiscard and #DisallowDiscard.
933 This is used for the memory containing file system caches. Discardable memory
934 is managed using similar
936 class DDiscardableMemoryManager : public DMovableMemoryManager
939 // from DMemoryManager...
940 virtual TInt AllowDiscard(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
941 virtual TInt DisallowDiscard(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
942 virtual TInt StealPage(DMemoryObject* aMemory, SPageInfo* aPageInfo);
943 virtual TInt RestrictPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TRestrictPagesType aRestriction);
944 virtual TZonePageType PageType();
947 The single instance of this manager class.
949 static DDiscardableMemoryManager TheManager;
953 DDiscardableMemoryManager DDiscardableMemoryManager::TheManager;
954 DMemoryManager* TheDiscardableMemoryManager = &DDiscardableMemoryManager::TheManager;
957 TInt DDiscardableMemoryManager::AllowDiscard(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
959 TRACE2(("DDiscardableMemoryManager::AllowDiscard(0x%08x,0x%x,0x%x)",aMemory, aIndex, aCount));
960 __NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
962 // make iterator for region...
963 RPageArray::TIter pageIter;
964 aMemory->iPages.FindStart(aIndex,aCount,pageIter);
968 // find some pages...
969 RPageArray::TIter pageList;
970 TUint nFound = pageIter.Find(pageList);
977 while((n=pageList.Pages(pages,KMaxPagesInOneGo))!=0)
980 ThePager.DonatePages(n,pages);
984 pageIter.FindRelease(nFound);
988 aMemory->iPages.FindEnd(aIndex,aCount);
994 TInt DDiscardableMemoryManager::DisallowDiscard(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
996 TRACE2(("DDiscardableMemoryManager::DisallowDiscard(0x%08x,0x%x,0x%x)",aMemory, aIndex, aCount));
997 __NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
1002 RPageArray::TIter pageIter;
1003 aMemory->iPages.FindStart(aIndex,aCount,pageIter);
1005 RPageArray::TIter pageList;
1006 TUint numPages = pageIter.Find(pageList);
1008 if(numPages!=aCount)
1010 // not all pages are present...
1017 while((n=pageList.Pages(pages,KMaxPagesInOneGo))!=0)
1020 r = ThePager.ReclaimPages(n,pages);
1026 // done with pages...
1028 pageIter.FindRelease(numPages);
1029 aMemory->iPages.FindEnd(aIndex,aCount);
1035 TInt DDiscardableMemoryManager::StealPage(DMemoryObject* aMemory, SPageInfo* aPageInfo)
1037 TRACE2(("DDiscardableMemoryManager::StealPage(0x%08x,0x%08x)",aMemory,aPageInfo));
1038 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
1039 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1040 __UNLOCK_GUARD_START(MmuLock);
1042 TUint index = aPageInfo->Index();
1045 RPageArray::TIter pageList;
1046 TPhysAddr* p = aMemory->iPages.StealPageStart(index,pageList);
1047 __NK_ASSERT_DEBUG((*p&~KPageMask)==aPageInfo->PhysAddr()); // object should have our page
1049 aPageInfo->SetModifier(&pageList);
1051 __UNLOCK_GUARD_END(MmuLock);
1054 // unmap the page...
1055 aMemory->UnmapPages(pageList,false);
1059 __NK_ASSERT_DEBUG((*p&~KPageMask)==aPageInfo->PhysAddr()); // object should still have our page because freeing a page requires the RamAllocLock, which we hold
1061 if(aPageInfo->CheckModified(&pageList))
1063 // page state was changed, this can only happen if a page fault put this page
1064 // back into the committed state or if the page was pinned.
1065 // From either of these states it's possible to subsequently change
1066 // to any other state or use (so we can't assert anything here).
1071 // nobody else has modified page state, so we can...
1072 TPhysAddr page = *p;
1073 __NK_ASSERT_DEBUG(RPageArray::TargetStateIsDecommitted(page));
1074 if(page&RPageArray::EUnmapVetoed)
1076 // operation was vetoed, which means page had a pinned mapping but the pin
1077 // operation hadn't got around to removing the page from the live list,
1078 // we need to restore correct state...
1079 if(RPageArray::State(page)==RPageArray::EStealing)
1080 *p = (page&~(RPageArray::EStateMask|RPageArray::EUnmapVetoed))|RPageArray::ECommitted;
1082 // leave page in state it was before we attempted to steal it
1084 // put page back on live list so it doesn't get lost.
1085 // We put it at the start as if it were recently accessed because being pinned
1086 // counts as an access and we can't put it anywhere else otherwise when
1087 // page stealing retries it may get this same page again, potentially causing
1089 __NK_ASSERT_DEBUG(aPageInfo->PagedState()==SPageInfo::EUnpaged); // no one else has changed page since we removed it in DPager::StealPage
1090 ThePager.PagedIn(aPageInfo);
1096 // page successfully unmapped...
1097 aPageInfo->SetReadOnly(); // page not mapped, so must be read-only
1099 // if the page can be made clean...
1100 r = aMemory->iManager->CleanPage(aMemory,aPageInfo,p);
1104 // page successfully stolen...
1105 __NK_ASSERT_DEBUG((*p^page)<(TUint)KPageSize); // sanity check, page should still be allocated to us
1106 __NK_ASSERT_DEBUG(aPageInfo->IsDirty()==false);
1107 __NK_ASSERT_DEBUG(aPageInfo->IsWritable()==false);
1109 TPhysAddr pagerInfo = aPageInfo->PagingManagerData();
1111 __NK_ASSERT_ALWAYS((pagerInfo&(RPageArray::EFlagsMask|RPageArray::EStateMask)) == RPageArray::ENotPresent);
1113 TheMmu.PageFreed(aPageInfo);
1117 // only legitimate reason for failing the clean is if the page state was changed
1118 // by a page fault or by pinning, this should return KErrInUse...
1119 __NK_ASSERT_DEBUG(r==KErrInUse);
1124 aMemory->iPages.StealPageEnd(index,r==KErrNone ? 1 : 0);
1128 TRACE2(("DDiscardableMemoryManager::StealPage fail because preempted"));
1131 TRACE2(("DDiscardableMemoryManager::StealPage returns %d",r));
1136 TInt DDiscardableMemoryManager::RestrictPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TRestrictPagesType aRestriction)
1138 if(aRestriction==ERestrictPagesNoAccessForOldPage)
1140 // Lie to pager when it sets an old page inaccessible as we don't want to rejunvanate
1141 // the page if it is accessed as RChunk::Lock() should be used to remove the page from
1142 // the live list before accessing the page.
1145 return DMovableMemoryManager::RestrictPage(aMemory, aPageInfo, aRestriction);
1149 TZonePageType DDiscardableMemoryManager::PageType()
1150 {// Discardable memory objects page are movable unless they are donated to the pager.
1151 return EPageMovable;
1157 Manager for memory objects containing memory mapped hardware devices or special
1158 purpose memory for which the physical addresses are fixed.
1160 class DHardwareMemoryManager : public DMemoryManager
1163 // from DMemoryManager...
1164 virtual void Destruct(DMemoryObject* aMemory);
1165 virtual TInt AddPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages);
1166 virtual TInt AddContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr aPhysAddr);
1167 virtual TInt RemovePages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages);
1168 virtual TInt Pin(DMemoryObject* aMemory, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs);
1169 virtual void Unpin(DMemoryObject* aMemory, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs);
1173 Update the page information structure for RAM added with #AddPages and #AddContiguous.
1175 This performs debug checks to ensure that any physical memory which is added to more than
1176 one memory object meets with the restriction imposed by the MMU and cache hardware.
1177 It also verifies that the RAM pages are of type SPageInfo::EPhysAlloc,
1178 i.e. were allocated with Epoc::AllocPhysicalRam or similar.
1180 This is only used when the physical addresses of the page being added to a memory
1181 object corresponds to RAM being managed by the kernel, i.e. physical addresses
1182 with an associated #SPageInfo structure.
1184 @param aMemory A memory object associated with this manager.
1185 @param aIndex Page index, within the memory, for the page.
1186 @param aPageInfo The page information structure of the RAM page.
1189 @post #MmuLock held.
1191 static void AssignPage(DMemoryObject* aMemory, TUint aIndex, SPageInfo* aPageInfo);
1194 Update the page information structure for RAM removed with #RemovePages.
1196 This is only used when the physical addresses of the page being removed from a memory
1197 object corresponds to RAM being managed by the kernel, i.e. physical addresses
1198 with an associated #SPageInfo structure.
1200 @param aMemory A memory object associated with this manager.
1201 @param aIndex Page index, within the memory, for the page.
1202 @param aPageInfo The page information structure of the RAM page.
1205 @post #MmuLock held.
1207 static void UnassignPage(DMemoryObject* aMemory, TUint aIndex, SPageInfo* aPageInfo);
1211 The single instance of this manager class.
1213 static DHardwareMemoryManager TheManager;
1217 DHardwareMemoryManager DHardwareMemoryManager::TheManager;
1218 DMemoryManager* TheHardwareMemoryManager = &DHardwareMemoryManager::TheManager;
1221 void DHardwareMemoryManager::Destruct(DMemoryObject* aMemory)
1223 MemoryObjectLock::Lock(aMemory);
1224 RemovePages(aMemory,0,aMemory->iSizeInPages,0);
1225 MemoryObjectLock::Unlock(aMemory);
1230 TInt DHardwareMemoryManager::AddPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages)
1232 TRACE2(("DHardwareMemoryManager::AddPages(0x%08x,0x%x,0x%x,?)",aMemory, aIndex, aCount));
1233 __NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
1235 // validate arguments...
1236 TPhysAddr* pages = aPages;
1237 TPhysAddr* pagesEnd = aPages+aCount;
1238 TPhysAddr checkMask = 0;
1239 do checkMask |= *pages++;
1240 while(pages<pagesEnd);
1241 if(checkMask&KPageMask)
1242 return KErrArgument;
1244 // check and allocate page array entries...
1245 RPageArray::TIter pageIter;
1246 TInt r = aMemory->iPages.AddStart(aIndex,aCount,pageIter);
1252 TUint index = aIndex;
1257 MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo/2); // flash twice as often because we're doing about twice the work as a simple page info update
1258 TPhysAddr pagePhys = *pages++;
1259 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pagePhys);
1261 AssignPage(aMemory,index,pi);
1264 while(pages<pagesEnd);
1268 RPageArray::TIter pageList = pageIter;
1269 pageIter.Add(aCount,aPages);
1270 r = aMemory->MapPages(pageList);
1272 // release page array entries...
1273 aMemory->iPages.AddEnd(aIndex,aCount);
1275 // revert if error...
1277 RemovePages(aMemory,aIndex,aCount,0);
1283 TInt DHardwareMemoryManager::AddContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr aPhysAddr)
1285 TRACE2(("DHardwareMemoryManager::AddContiguous(0x%08x,0x%x,0x%x,0x%08x)",aMemory, aIndex, aCount, aPhysAddr));
1286 __NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
1288 // validate arguments...
1289 if(aPhysAddr&KPageMask)
1290 return KErrArgument;
1292 // check and allocate page array entries...
1293 RPageArray::TIter pageIter;
1294 TInt r = aMemory->iPages.AddStart(aIndex,aCount,pageIter);
1298 RPageArray::TIter pageList = pageIter;
1301 SPageInfo* piStart = SPageInfo::SafeFromPhysAddr(aPhysAddr);
1302 SPageInfo* piEnd = piStart+aCount;
1305 SPageInfo* pi = piStart;
1306 TUint index = aIndex;
1311 MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo/2); // flash twice as often because we're doing about twice the work as a simple page info update
1312 AssignPage(aMemory,index,pi);
1320 pageIter.AddContiguous(aCount,aPhysAddr);
1321 r = aMemory->MapPages(pageList);
1323 // release page array entries...
1324 aMemory->iPages.AddEnd(aIndex,aCount);
1326 // revert if error...
1328 RemovePages(aMemory,aIndex,aCount,0);
1334 TInt DHardwareMemoryManager::RemovePages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages)
1336 TRACE2(("DHardwareMemoryManager::RemovePages(0x%08x,0x%x,0x%x,?)",aMemory, aIndex, aCount));
1337 __NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
1339 RPageArray::TIter pageIter;
1340 aMemory->iPages.FindStart(aIndex,aCount,pageIter);
1345 // find some pages...
1346 RPageArray::TIter pageList;
1347 TUint n = pageIter.RemoveFind(pageList);
1351 // unmap some pages...
1352 aMemory->UnmapPages(pageList,true);
1356 while(pageList.Remove(1,&pagePhys))
1359 *aPages++ = pagePhys;
1362 __NK_ASSERT_DEBUG((pagePhys&KPageMask)==0);
1364 TUint index = pageList.Index()-1;
1365 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pagePhys);
1367 TheMmu.CleanAndInvalidatePages(&pagePhys,1,aMemory->Attributes(),index);
1371 UnassignPage(aMemory,index,pi);
1377 pageIter.FindRelease(n);
1380 aMemory->iPages.FindEnd(aIndex,aCount);
1386 void DHardwareMemoryManager::AssignPage(DMemoryObject* aMemory, TUint aIndex, SPageInfo* aPageInfo)
1388 TRACE2(("DHardwareMemoryManager::AssignPage(0x%08x,0x%x,phys=0x%08x)",aMemory, aIndex, aPageInfo->PhysAddr()));
1389 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1390 __NK_ASSERT_DEBUG(aPageInfo->Type()==SPageInfo::EPhysAlloc);
1391 TUint flags = aMemory->PageInfoFlags();
1392 if(aPageInfo->UseCount()==0)
1394 // not mapped yet...
1395 aPageInfo->SetMapped(aIndex,flags);
1399 // already mapped somewhere...
1400 TMemoryType type = (TMemoryType)(flags&KMemoryTypeMask);
1401 if(CacheMaintenance::IsCached(type))
1403 // memory is cached at L1, check colour matches existing mapping...
1404 if( (aPageInfo->Index()^aIndex) & KPageColourMask )
1407 Kern::Printf("DHardwareMemoryManager::AssignPage BAD COLOUR");
1410 __NK_ASSERT_ALWAYS(0);
1413 // check memory type matches existing mapping...
1414 if( (aPageInfo->Flags()^flags) & EMemoryAttributeMask )
1417 Kern::Printf("DHardwareMemoryManager::AssignPage BAD MEMORY TYPE");
1420 __NK_ASSERT_ALWAYS(0);
1423 aPageInfo->IncUseCount();
1424 TRACE2(("DHardwareMemoryManager::AssignPage iUseCount=%d",aPageInfo->UseCount()));
1428 void DHardwareMemoryManager::UnassignPage(DMemoryObject* aMemory, TUint aIndex, SPageInfo* aPageInfo)
1430 TRACE2(("DHardwareMemoryManager::UnassignPage(0x%08x,0x%x,phys=0x%08x)",aMemory, aIndex, aPageInfo->PhysAddr()));
1431 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1432 TRACE2(("DHardwareMemoryManager::UnassignPage iUseCount=%d",aPageInfo->UseCount()));
1433 __NK_ASSERT_DEBUG(aPageInfo->UseCount());
1434 if(!aPageInfo->DecUseCount())
1436 // page no longer being used by any memory object, make sure it's contents
1437 // are purged from the cache...
1438 TPhysAddr pagePhys = aPageInfo->PhysAddr();
1439 aPageInfo->SetModifier(&pagePhys);
1441 TheMmu.CleanAndInvalidatePages(&pagePhys,1,aMemory->Attributes(),aIndex);
1443 if(!aPageInfo->CheckModified(&pagePhys)) // if page has not been reused...
1444 aPageInfo->SetUncached(); // we know the memory is not in the cache
1449 TInt DHardwareMemoryManager::Pin(DMemoryObject* aMemory, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs)
1451 return ((DUnpagedMemoryManager*)this)->DUnpagedMemoryManager::Pin(aMemory,aMapping,aPinArgs);
1455 void DHardwareMemoryManager::Unpin(DMemoryObject* aMemory, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs)
1457 ((DUnpagedMemoryManager*)this)->DUnpagedMemoryManager::Unpin(aMemory,aMapping,aPinArgs);
1463 // DPagedMemoryManager
1466 TInt DPagedMemoryManager::New(DMemoryObject*& aMemory, TUint aSizeInPages, TMemoryAttributes aAttributes, TMemoryCreateFlags aCreateFlags)
1468 return DMemoryManager::New(aMemory, aSizeInPages, aAttributes, (TMemoryCreateFlags)(aCreateFlags | EMemoryCreateDemandPaged));
1472 void DPagedMemoryManager::Destruct(DMemoryObject* aMemory)
1474 ((DUnpagedMemoryManager*)this)->DUnpagedMemoryManager::Destruct(aMemory);
1478 TInt DPagedMemoryManager::StealPage(DMemoryObject* aMemory, SPageInfo* aPageInfo)
1480 return ((DDiscardableMemoryManager*)this)->DDiscardableMemoryManager::StealPage(aMemory,aPageInfo);
1484 TInt DPagedMemoryManager::MovePage( DMemoryObject* aMemory, SPageInfo* aOldPageInfo,
1485 TPhysAddr& aNewPage, TUint aBlockZoneId, TBool aBlockRest)
1487 return TheMovableMemoryManager->MovePage(aMemory, aOldPageInfo, aNewPage, aBlockZoneId, aBlockRest);
1491 TInt DPagedMemoryManager::RestrictPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TRestrictPagesType aRestriction)
1493 TRACE2(("DPagedMemoryManager::RestrictPage(0x%08x,0x%08x,%d)",aMemory,aPageInfo,aRestriction));
1494 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1496 TUint index = aPageInfo->Index();
1497 TInt r = KErrNotFound;
1500 TPhysAddr originalPage;
1501 RPageArray::TIter pageList;
1502 TPhysAddr* p = aMemory->iPages.RestrictPageNAStart(index,pageList);
1506 __NK_ASSERT_DEBUG((originalPage&~KPageMask)==aPageInfo->PhysAddr());
1508 aPageInfo->SetModifier(&pageList);
1513 aMemory->RestrictPages(pageList,aRestriction);
1518 if(aPageInfo->CheckModified(&pageList) || page!=originalPage/*page state changed*/)
1520 // page state was changed by someone else...
1525 // nobody else has modified page state, so restrictions successfully applied...
1526 *p = (page&~RPageArray::EStateMask)|RPageArray::ECommitted; // restore state
1527 aPageInfo->SetReadOnly();
1531 aMemory->iPages.RestrictPageNAEnd(index);
1535 TRACE2(("DPagedMemoryManager::RestrictPage fail because preempted or vetoed"));
1539 TRACE2(("DPagedMemoryManager::RestrictPage returns %d",r));
1544 TInt DPagedMemoryManager::HandleFault( DMemoryObject* aMemory, TUint aIndex, DMemoryMapping* aMapping,
1545 TUint aMapInstanceCount, TUint aAccessPermissions)
1548 pinArgs.iReadOnly = !(aAccessPermissions&EReadWrite);
1552 RPageArray::TIter pageList;
1553 TPhysAddr* p = aMemory->iPages.AddPageStart(aIndex,pageList);
1554 __NK_ASSERT_ALWAYS(p); // we should never run out of memory handling a paging fault
1556 TInt r = 1; // positive value to indicate nothing done
1558 // if memory object already has page, then we can use it...
1560 if(RPageArray::IsPresent(*p))
1562 r = PageInDone(aMemory,aIndex,0,p);
1563 __NK_ASSERT_DEBUG(r<=0); // can't return >0 as we didn't supply a new page
1569 // need to read page from backing store...
1571 // get paging request object...
1572 DPageReadRequest* req;
1575 r = AcquirePageReadRequest(req,aMemory,aIndex,1);
1576 __NK_ASSERT_DEBUG(r!=KErrNoMemory); // not allowed to allocated memory, therefore can't fail with KErrNoMemory
1579 // if someone else has since read our page, then we can use it...
1582 if(RPageArray::IsPresent(*p))
1584 r = PageInDone(aMemory,aIndex,0,p);
1585 __NK_ASSERT_DEBUG(r<=0); // can't return >0 as we didn't supply a new page
1590 while(r>0 && !req); // while not paged in && don't have a request object
1594 // still need to read page from backing store...
1598 r = ThePager.PageInAllocPages(&pagePhys,1,aMemory->RamAllocFlags());
1599 __NK_ASSERT_DEBUG(r!=KErrNoMemory);
1602 // read data for page...
1603 r = ReadPages(aMemory,aIndex,1,&pagePhys,req);
1604 __NK_ASSERT_DEBUG(r!=KErrNoMemory); // not allowed to allocated memory, therefore can't fail with KErrNoMemory
1607 // error, so free unused pages...
1608 ThePager.PageInFreePages(&pagePhys,1);
1614 r = PageInDone(aMemory,aIndex,SPageInfo::FromPhysAddr(pagePhys),p);
1618 // new page actually used...
1626 // done with paging request object...
1632 if(r==KErrNone && aMapping)
1634 r = aMapping->PageIn(pageList, pinArgs, aMapInstanceCount);
1635 __NK_ASSERT_ALWAYS(r!=KErrNoMemory); // we should never run out of memory handling a paging fault
1636 #ifdef COARSE_GRAINED_TLB_MAINTENANCE
1641 // finished with this page...
1642 aMemory->iPages.AddPageEnd(aIndex,usedNew);
1644 __NK_ASSERT_ALWAYS(r!=KErrNoMemory); // we should never run out of memory handling a paging fault
1649 TInt DPagedMemoryManager::Pin(DMemoryObject* aMemory, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs)
1652 return DoPin(aMemory,aMapping->iStartIndex,aMapping->iSizeInPages,aMapping,aPinArgs);
1656 TInt DPagedMemoryManager::DoPin(DMemoryObject* aMemory, TUint aIndex, TUint aCount, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs)
1658 TRACE(("DPagedMemoryManager::DoPin(0x%08x,0x%08x,0x%08x,0x%08x)",aMemory, aIndex, aCount, aMapping));
1660 __NK_ASSERT_DEBUG(aPinArgs.HaveSufficientPages(aCount));
1662 // check and allocate page array entries...
1663 RPageArray::TIter pageList;
1664 TInt r = aMemory->iPages.AddStart(aIndex,aCount,pageList,true);
1668 RPageArray::TIter pageIter = pageList;
1671 while((n=pageIter.Pages(pages,DPageReadRequest::EMaxPages))!=0)
1675 if(RPageArray::IsPresent(*pages))
1677 // pin page which is already committed to memory object...
1678 r = PageInPinnedDone(aMemory,pageIter.Index(),0,pages,aPinArgs);
1679 __NK_ASSERT_DEBUG(r<=0); // can't return >0 as we didn't supply a new page
1683 // count consecutive pages which need to be read...
1686 if(RPageArray::IsPresent(pages[i]))
1689 r = 1; // positive value to indicate nothing done
1696 // successfully pinned one page, so move on to next one...
1706 // need to read pages from backing store...
1708 // get paging request object...
1709 DPageReadRequest* req;
1714 r = AcquirePageReadRequest(req,aMemory,pageIter.Index(),n);
1717 // see if someone else has since read any of our pages...
1720 if(RPageArray::IsPresent(pages[i]))
1725 while(i==n && !req); // while still need all pages && don't have a request object
1727 // if don't need all pages any more...
1736 // keep count of number of pages actually added to memory object...
1740 TPhysAddr newPages[DPageReadRequest::EMaxPages];
1741 __NK_ASSERT_DEBUG(n<=DPageReadRequest::EMaxPages);
1742 r = ThePager.PageInAllocPages(newPages,n,aMemory->RamAllocFlags());
1745 // read data for pages...
1746 r = ReadPages(aMemory,pageIter.Index(),n,newPages,req);
1749 // error, so free unused pages...
1750 ThePager.PageInFreePages(newPages,n);
1758 r = PageInPinnedDone(aMemory,
1760 SPageInfo::FromPhysAddr(newPages[i]),
1767 // new page actually used...
1773 // error, so free remaining unused pages...
1774 ThePager.PageInFreePages(newPages+(i+1),n-(i+1));
1775 // and update array for any pages already added...
1777 pageIter.Added(i,usedNew);
1784 // done with paging request object...
1789 break; // error, so give up
1791 // move on to next set of pages...
1792 pageIter.Added(n,usedNew);
1797 {// Page in the page with the pinning mapping, OK to get the instance count here
1798 // without any locking as the pinned mapping can't be reused for another purpose
1799 // during this method.
1800 r = aMapping->PageIn(pageList, aPinArgs, aMapping->MapInstanceCount());
1801 #ifdef COARSE_GRAINED_TLB_MAINTENANCE
1806 // release page array entries...
1807 aMemory->iPages.AddEnd(aIndex,aCount);
1811 // set EPagesPinned flag to indicate success...
1812 __NK_ASSERT_DEBUG((aMapping->Flags()&DMemoryMapping::EPagesPinned)==0);
1813 __e32_atomic_ior_ord8(&aMapping->Flags(), (TUint8)DMemoryMapping::EPagesPinned);
1817 // cleanup on error...
1818 TUint pinnedCount = pageIter.Index()-aIndex; // number of pages actually pinned
1819 DoUnpin(aMemory,aIndex,pinnedCount,aMapping,aPinArgs);
1826 void DPagedMemoryManager::Unpin(DMemoryObject* aMemory, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs)
1829 // if mapping successfully pinned...
1830 if(aMapping->Flags()&DMemoryMapping::EPagesPinned)
1832 // then undo pinning...
1833 DoUnpin(aMemory,aMapping->iStartIndex,aMapping->iSizeInPages,aMapping,aPinArgs);
1838 void DPagedMemoryManager::DoUnpin(DMemoryObject* aMemory, TUint aIndex, TUint aCount, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs)
1840 TRACE(("DPagedMemoryManager::DoUnpin(0x%08x,0x%08x,0x%08x,0x%08x,?)",aMemory, aIndex, aCount, aMapping));
1844 TUint endIndex = aIndex+aCount;
1845 for(TUint i=aIndex; i<endIndex; ++i)
1847 TPhysAddr page = aMemory->iPages.Page(i);
1848 __NK_ASSERT_DEBUG(RPageArray::IsPresent(page));
1849 __NK_ASSERT_DEBUG(SPageInfo::SafeFromPhysAddr(page&~KPageMask));
1850 ThePager.Unpin(SPageInfo::FromPhysAddr(page),aPinArgs);
1855 // clear EPagesPinned flag...
1856 __e32_atomic_and_ord8(&aMapping->Flags(), TUint8(~DMemoryMapping::EPagesPinned));
1860 void DPagedMemoryManager::DoCleanupDecommitted(DMemoryObject* aMemory)
1862 MemoryObjectLock::Lock(aMemory);
1863 FreeDecommitted(aMemory,0,aMemory->iSizeInPages);
1864 MemoryObjectLock::Unlock(aMemory);
1868 TInt DPagedMemoryManager::PageInDone(DMemoryObject* aMemory, TUint aIndex, SPageInfo* aPageInfo, TPhysAddr* aPageArrayEntry)
1870 TInt r = DoPageInDone(aMemory,aIndex,aPageInfo,aPageArrayEntry,false);
1873 ThePager.PagedIn(aPageInfo);
1875 // check page assigned correctly...
1877 if(RPageArray::IsPresent(*aPageArrayEntry))
1879 SPageInfo* pi = SPageInfo::FromPhysAddr(*aPageArrayEntry);
1880 __NK_ASSERT_DEBUG(pi->Owner()==aMemory);
1881 __NK_ASSERT_DEBUG(pi->Index()==aIndex);
1889 TInt DPagedMemoryManager::PageInPinnedDone(DMemoryObject* aMemory, TUint aIndex, SPageInfo* aPageInfo, TPhysAddr* aPageArrayEntry, TPinArgs& aPinArgs)
1891 TInt r = DoPageInDone(aMemory,aIndex,aPageInfo,aPageArrayEntry,true);
1894 ThePager.PagedInPinned(aPageInfo,aPinArgs);
1896 // check page assigned correctly...
1898 if(RPageArray::IsPresent(*aPageArrayEntry))
1900 SPageInfo* pi = SPageInfo::FromPhysAddr(*aPageArrayEntry);
1901 __NK_ASSERT_DEBUG(pi->Owner()==aMemory);
1902 __NK_ASSERT_DEBUG(pi->Index()==aIndex);
1904 __NK_ASSERT_DEBUG(pi->PagedState()==SPageInfo::EPagedPinned);
1912 TInt DPagedMemoryManager::DoPageInDone(DMemoryObject* aMemory, TUint aIndex, SPageInfo*& aPageInfo, TPhysAddr* aPageArrayEntry, TBool aPinning)
1914 TRACE(("DPagedMemoryManager::DoPageInDone(0x%08x,0x%08x,0x%08x,?,%d)",aMemory,aIndex,aPageInfo,aPinning));
1915 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1917 __UNLOCK_GUARD_START(MmuLock);
1919 SPageInfo* pi = aPageInfo;
1921 if(!IsAllocated(aMemory,aIndex,1))
1923 // memory has been decommitted from memory object...
1925 ThePager.PagedInUnneeded(pi);
1926 __UNLOCK_GUARD_END(MmuLock);
1928 return KErrNotFound;
1931 TPhysAddr oldPage = *aPageArrayEntry;
1932 TBool useNew = (bool)!RPageArray::IsPresent(oldPage);
1937 __UNLOCK_GUARD_END(MmuLock);
1938 // aPageInfo = 0; // this is already set to zero
1939 return KErrNotFound; // no new page to use
1942 // assign page to memory object...
1943 pi->SetManaged(aMemory,aIndex,aMemory->PageInfoFlags());
1945 ThePager.Event(DPager::EEventPageInNew,pi);
1947 // save any paging manager data stored in page array before we overwrite it...
1948 pi->SetPagingManagerData(*aPageArrayEntry);
1952 __NK_ASSERT_DEBUG(!pi); // should only have read new page if none present
1954 // discard new page...
1956 ThePager.PagedInUnneeded(pi);
1958 // check existing page can be committed...
1959 if(RPageArray::State(oldPage)<=RPageArray::EDecommitting)
1961 __UNLOCK_GUARD_END(MmuLock);
1963 return KErrNotFound;
1966 // and use one we already have...
1967 SPageInfo* newPage = SPageInfo::FromPhysAddr(oldPage);
1969 if(!pi && !aPinning)
1970 ThePager.Event(DPager::EEventPageInAgain,newPage);
1973 pi->SetModifier(0); // so observers see page state has changed
1976 // set page array entry...
1977 TPhysAddr pagePhys = pi->PhysAddr();
1978 *aPageArrayEntry = pagePhys|RPageArray::ECommitted;
1980 // return the page we actually used...
1983 __UNLOCK_GUARD_END(MmuLock);
1988 TInt DPagedMemoryManager::Decompress(TUint32 aCompressionType, TLinAddr aDst, TUint aDstBytes, TLinAddr aSrc, TUint aSrcBytes)
1990 #ifdef BTRACE_PAGING_VERBOSE
1991 BTraceContext4(BTrace::EPaging, BTrace::EPagingDecompressStart, aCompressionType);
1994 switch(aCompressionType)
1997 __NK_ASSERT_DEBUG(aSrcBytes == aDstBytes);
1998 memcpy((void*)aDst, (void*)aSrc, aSrcBytes);
2002 case SRomPageInfo::EBytePair:
2003 case KUidCompressionBytePair:
2005 TUint8* srcNext = 0;
2006 START_PAGING_BENCHMARK;
2007 r = BytePairDecompress((TUint8*)aDst, aDstBytes, (TUint8*)aSrc, aSrcBytes, srcNext);
2008 END_PAGING_BENCHMARK(EPagingBmDecompress);
2011 // decompression successful so check srcNext points to the end of the compressed data...
2012 __NK_ASSERT_ALWAYS((TLinAddr)srcNext == aSrc + aSrcBytes);
2018 r = KErrNotSupported;
2021 #ifdef BTRACE_PAGING_VERBOSE
2022 BTraceContext0(BTrace::EPaging, BTrace::EPagingDecompressEnd);
2028 TInt DPagedMemoryManager::AcquirePageWriteRequest(DPageWriteRequest*& aRequest, DMemoryObject* aMemory, TUint aIndex, TUint aCount)
2030 __NK_ASSERT_ALWAYS(0);
2031 return KErrNotSupported;
2035 TInt DPagedMemoryManager::WritePages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages, DPageWriteRequest* aRequest)
2037 __NK_ASSERT_ALWAYS(0);
2038 return KErrNotSupported;
2041 TZonePageType DPagedMemoryManager::PageType()
2042 {// Paged manager's pages should be discardable and will actaully be freed by
2043 // the pager so this value won't be used.
2044 return EPageDiscard;