First public contribution.
1 // Copyright (c) 2005-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
24 #include "maddressspace.h"
27 #include "mpagearray.h"
30 #include "cache_maintenance.inl"
33 const TUint16 KDefaultYoungOldRatio = 3;
34 const TUint16 KDefaultMinPages = 256;
35 #ifdef _USE_OLDEST_LISTS
36 const TUint16 KDefaultOldOldestRatio = 3;
39 const TUint KMinOldPages = 1;
41 /* On a 32 bit system without PAE can't have more than 2^(32-KPageShift) pages.
42 * Subtract 1 so it doesn't overflow when converted to bytes.
44 const TUint KAbsoluteMaxPageCount = (1u<<(32-KPageShift))-1u;
52 : iMinimumPageCount(0), iMaximumPageCount(0), iYoungOldRatio(0),
53 iYoungCount(0),iOldCount(0),iNumberOfFreePages(0)
60 TRACEB(("DPager::Init2()"));
62 #if defined(__CPU_ARM)
64 /** Minimum number of young pages the demand paging live list may have.
65 Need at least 4 mapped pages to guarantee to be able to execute all ARM instructions,
66 plus enough pages for 4 page tables to map those pages, plus enough pages for the
67 page table info structures of those page tables.
68 (Worst case is a Thumb-2 STM instruction with both instruction and data straddling chunk
71 iMinYoungPages = 4 // pages
72 +(4+KPtClusterSize-1)/KPtClusterSize // page table pages
73 +(4+KPageTableInfosPerPage-1)/KPageTableInfosPerPage; // page table info pages
75 #elif defined(__CPU_X86)
77 /* Need at least 6 mapped pages to guarantee to be able to execute all ARM instructions,
78 plus enough pages for 6 page tables to map those pages, plus enough pages for the
79 page table info structures of those page tables.
80 (Worst case is (?) a MOV [X],[Y] instruction with instruction, 'X' and 'Y' all
81 straddling chunk boundaries.)
83 iMinYoungPages = 6 // pages
84 +(6+KPtClusterSize-1)/KPtClusterSize // page table pages
85 +(6+KPageTableInfosPerPage-1)/KPageTableInfosPerPage; // page table info pages
92 // Adjust min page count so that all CPUs are guaranteed to make progress.
93 // NOTE: Can't use NKern::NumberOfCpus here because we haven't finished booting yet and will
94 // always have only one CPU running at this point...
96 // TODO: Before we can enable this the base test configuration needs
97 // updating to have a sufficient minimum page size...
99 // iMinYoungPages *= KMaxCpus;
102 // A minimum young/old ratio of 1 means that we need at least twice iMinYoungPages pages...
103 iAbsoluteMinPageCount = 2*iMinYoungPages;
105 __NK_ASSERT_DEBUG(KMinOldPages<=iAbsoluteMinPageCount/2);
107 // initialise live list...
108 TUint minimumPageCount = 0;
109 TUint maximumPageCount = 0;
111 SDemandPagingConfig config = TheRomHeader().iDemandPagingConfig;
113 iMinimumPageCount = KDefaultMinPages;
115 iMinimumPageCount = minimumPageCount;
117 iMinimumPageCount = config.iMinPages;
118 if(iMinimumPageCount<iAbsoluteMinPageCount)
119 iMinimumPageCount = iAbsoluteMinPageCount;
120 iInitMinimumPageCount = iMinimumPageCount;
122 iMaximumPageCount = KMaxTInt;
124 iMaximumPageCount = maximumPageCount;
126 iMaximumPageCount = config.iMaxPages;
127 if (iMaximumPageCount > KAbsoluteMaxPageCount)
128 iMaximumPageCount = KAbsoluteMaxPageCount;
129 iInitMaximumPageCount = iMaximumPageCount;
131 iYoungOldRatio = KDefaultYoungOldRatio;
132 if(config.iYoungOldRatio)
133 iYoungOldRatio = config.iYoungOldRatio;
134 TInt ratioLimit = (iMinimumPageCount-KMinOldPages)/KMinOldPages;
135 if(iYoungOldRatio>ratioLimit)
136 iYoungOldRatio = ratioLimit;
138 #ifdef _USE_OLDEST_LISTS
139 iOldOldestRatio = KDefaultOldOldestRatio;
141 iOldOldestRatio = config.iSpare[2];
144 iMinimumPageLimit = (iMinYoungPages * (1 + iYoungOldRatio)) / iYoungOldRatio;
145 if(iMinimumPageLimit<iAbsoluteMinPageCount)
146 iMinimumPageLimit = iAbsoluteMinPageCount;
148 TRACEB(("DPager::Init2() live list min=%d max=%d ratio=%d",iMinimumPageCount,iMaximumPageCount,iYoungOldRatio));
150 if(iMaximumPageCount<iMinimumPageCount)
151 __NK_ASSERT_ALWAYS(0);
154 // This routine doesn't acquire any mutexes because it should be called before the system
155 // is fully up and running. I.e. called before another thread can preempt this.
158 // Calculate page counts
159 TUint minOldAndOldest = iMinimumPageCount / (1 + iYoungOldRatio);
160 if(minOldAndOldest < KMinOldPages)
161 __NK_ASSERT_ALWAYS(0);
162 if (iMinimumPageCount < minOldAndOldest)
163 __NK_ASSERT_ALWAYS(0);
164 TUint minYoung = iMinimumPageCount - minOldAndOldest;
165 if(minYoung < iMinYoungPages)
166 __NK_ASSERT_ALWAYS(0); // Need at least iMinYoungPages pages mapped to execute worst case CPU instruction
167 #ifdef _USE_OLDEST_LISTS
168 // There should always be enough old pages to allow the oldest lists ratio.
169 TUint oldestCount = minOldAndOldest / (1 + iOldOldestRatio);
171 __NK_ASSERT_ALWAYS(0);
173 iNumberOfFreePages = 0;
174 iNumberOfDirtyPages = 0;
176 // Allocate RAM pages and put them all on the old list
177 RamAllocLock::Lock();
180 #ifdef _USE_OLDEST_LISTS
181 iOldestCleanCount = 0;
182 iOldestDirtyCount = 0;
185 for(TUint i=0; i<iMinimumPageCount; i++)
187 // Allocate a single page
189 TInt r = m.AllocRam(&pagePhys, 1,
190 (Mmu::TRamAllocFlags)(EMemAttNormalCached|Mmu::EAllocNoWipe|Mmu::EAllocNoPagerReclaim),
193 __NK_ASSERT_ALWAYS(0);
195 AddAsFreePage(SPageInfo::FromPhysAddr(pagePhys));
198 RamAllocLock::Unlock();
200 #ifdef _USE_OLDEST_LISTS
201 TRACEB(("DPager::Init2() end with young=%d old=%d oldClean=%d oldDirty=%d min=%d free=%d max=%d",iYoungCount,iOldCount,iOldestCleanCount,iOldestDirtyCount,iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
203 TRACEB(("DPager::Init2() end with young=%d old=%d min=%d free=%d max=%d",iYoungCount,iOldCount,iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
209 TBool DPager::CheckLists()
212 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
213 SDblQueLink* head = &iOldList.iA;
215 SDblQueLink* link = head;
226 head = &iYoungList.iA;
239 // TRACEP(("DP: y=%d o=%d f=%d",iYoungCount,iOldCount,iNumberOfFreePages));
245 void DPager::TraceCounts()
247 TRACEP(("DP: y=%d o=%d f=%d min=%d max=%d ml=%d res=%d",
248 iYoungCount,iOldCount,iNumberOfFreePages,iMinimumPageCount,
249 iMaximumPageCount,iMinimumPageLimit,iReservePageCount));
255 TBool DPager::HaveTooManyPages()
257 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
258 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
259 return iMinimumPageCount+iNumberOfFreePages > iMaximumPageCount;
263 TBool DPager::HaveMaximumPages()
265 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
266 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
267 return iMinimumPageCount+iNumberOfFreePages >= iMaximumPageCount;
271 void DPager::AddAsYoungestPage(SPageInfo* aPageInfo)
273 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
274 __NK_ASSERT_DEBUG(CheckLists());
275 __NK_ASSERT_DEBUG(aPageInfo->PagedState()==SPageInfo::EUnpaged);
277 aPageInfo->SetPagedState(SPageInfo::EPagedYoung);
278 iYoungList.AddHead(&aPageInfo->iLink);
283 void DPager::AddAsFreePage(SPageInfo* aPageInfo)
285 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
286 __NK_ASSERT_DEBUG(CheckLists());
288 __NK_ASSERT_DEBUG(aPageInfo->PagedState()==SPageInfo::EUnpaged);
289 TheMmu.PageFreed(aPageInfo);
290 __NK_ASSERT_DEBUG(aPageInfo->PagedState()==SPageInfo::EUnpaged);
292 // add as oldest page...
293 #ifdef _USE_OLDEST_LISTS
294 aPageInfo->SetPagedState(SPageInfo::EPagedOldestClean);
295 iOldestCleanList.Add(&aPageInfo->iLink);
298 aPageInfo->SetPagedState(SPageInfo::EPagedOld);
299 iOldList.Add(&aPageInfo->iLink);
303 Event(EEventPageInFree,aPageInfo);
307 TInt DPager::PageFreed(SPageInfo* aPageInfo)
309 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
310 __NK_ASSERT_DEBUG(CheckLists());
312 switch(aPageInfo->PagedState())
314 case SPageInfo::EUnpaged:
317 case SPageInfo::EPagedYoung:
318 __NK_ASSERT_DEBUG(iYoungCount);
319 aPageInfo->iLink.Deque();
323 case SPageInfo::EPagedOld:
324 __NK_ASSERT_DEBUG(iOldCount);
325 aPageInfo->iLink.Deque();
329 #ifdef _USE_OLDEST_LISTS
330 case SPageInfo::EPagedOldestClean:
331 __NK_ASSERT_DEBUG(iOldestCleanCount);
332 aPageInfo->iLink.Deque();
336 case SPageInfo::EPagedOldestDirty:
337 __NK_ASSERT_DEBUG(iOldestDirtyCount);
338 aPageInfo->iLink.Deque();
343 case SPageInfo::EPagedPinned:
344 // this can occur if a pinned mapping is being unmapped when memory is decommitted.
345 // the decommit will have succeeded because the the mapping no longer vetoes this,
346 // however the unpinning hasn't yet got around to changing the page state.
347 // When the state change happens the page will be put back on the live list so
348 // we don't have to do anything now...
351 case SPageInfo::EPagedPinnedMoved:
352 // This page was pinned when it was moved but it has not been returned
353 // to the free pool yet so make sure it is...
354 aPageInfo->SetPagedState(SPageInfo::EUnpaged); // Must be unpaged before returned to free pool.
358 __NK_ASSERT_DEBUG(0);
362 // Update the dirty page count as required...
363 if (aPageInfo->IsDirty())
364 SetClean(*aPageInfo);
366 // add as oldest page...
367 #ifdef _USE_OLDEST_LISTS
368 aPageInfo->SetPagedState(SPageInfo::EPagedOldestClean);
369 iOldestCleanList.Add(&aPageInfo->iLink);
372 aPageInfo->SetPagedState(SPageInfo::EPagedOld);
373 iOldList.Add(&aPageInfo->iLink);
381 extern TBool IsPageTableUnpagedRemoveAllowed(SPageInfo* aPageInfo);
383 void DPager::RemovePage(SPageInfo* aPageInfo)
385 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
386 __NK_ASSERT_DEBUG(CheckLists());
388 switch(aPageInfo->PagedState())
390 case SPageInfo::EPagedYoung:
391 __NK_ASSERT_DEBUG(iYoungCount);
392 aPageInfo->iLink.Deque();
396 case SPageInfo::EPagedOld:
397 __NK_ASSERT_DEBUG(iOldCount);
398 aPageInfo->iLink.Deque();
402 #ifdef _USE_OLDEST_LISTS
403 case SPageInfo::EPagedOldestClean:
404 __NK_ASSERT_DEBUG(iOldestCleanCount);
405 aPageInfo->iLink.Deque();
409 case SPageInfo::EPagedOldestDirty:
410 __NK_ASSERT_DEBUG(iOldestDirtyCount);
411 aPageInfo->iLink.Deque();
416 case SPageInfo::EPagedPinned:
417 __NK_ASSERT_DEBUG(0);
418 case SPageInfo::EUnpaged:
420 if (!IsPageTableUnpagedRemoveAllowed(aPageInfo))
421 __NK_ASSERT_DEBUG(0);
425 __NK_ASSERT_DEBUG(0);
429 aPageInfo->SetPagedState(SPageInfo::EUnpaged);
433 void DPager::ReplacePage(SPageInfo& aOldPageInfo, SPageInfo& aNewPageInfo)
435 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
436 __NK_ASSERT_DEBUG(CheckLists());
438 __NK_ASSERT_DEBUG(aOldPageInfo.PagedState() == aNewPageInfo.PagedState());
439 switch(aOldPageInfo.PagedState())
441 case SPageInfo::EPagedYoung:
442 case SPageInfo::EPagedOld:
443 case SPageInfo::EPagedOldestClean:
444 case SPageInfo::EPagedOldestDirty:
445 {// Update the list links point to the new page.
446 __NK_ASSERT_DEBUG(iYoungCount);
447 SDblQueLink* prevLink = aOldPageInfo.iLink.iPrev;
449 SDblQueLink* nextLink = aOldPageInfo.iLink.iNext;
450 __NK_ASSERT_DEBUG(prevLink == aOldPageInfo.iLink.iPrev);
451 __NK_ASSERT_DEBUG(prevLink->iNext == &aOldPageInfo.iLink);
452 __NK_ASSERT_DEBUG(nextLink == aOldPageInfo.iLink.iNext);
453 __NK_ASSERT_DEBUG(nextLink->iPrev == &aOldPageInfo.iLink);
455 aOldPageInfo.iLink.Deque();
456 aNewPageInfo.iLink.InsertAfter(prevLink);
457 aOldPageInfo.SetPagedState(SPageInfo::EUnpaged);
459 __NK_ASSERT_DEBUG(prevLink == aNewPageInfo.iLink.iPrev);
460 __NK_ASSERT_DEBUG(prevLink->iNext == &aNewPageInfo.iLink);
461 __NK_ASSERT_DEBUG(nextLink == aNewPageInfo.iLink.iNext);
462 __NK_ASSERT_DEBUG(nextLink->iPrev == &aNewPageInfo.iLink);
466 case SPageInfo::EPagedPinned:
467 // Mark the page as 'pinned moved' so that when the page moving invokes
468 // Mmu::FreeRam() it returns this page to the free pool.
469 aOldPageInfo.ClearPinCount();
470 aOldPageInfo.SetPagedState(SPageInfo::EPagedPinnedMoved);
472 case SPageInfo::EPagedPinnedMoved:
473 // Shouldn't happen as the ram alloc mutex will be held for the
474 // entire time the page's is paged state == EPagedPinnedMoved.
475 case SPageInfo::EUnpaged:
476 // Shouldn't happen as we only move pinned memory and unpinning will
477 // atomically add the page to the live list and it can't be removed
478 // from the live list without the ram alloc mutex.
479 __NK_ASSERT_DEBUG(0);
485 SPageInfo* DPager::StealOldestPage()
487 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
488 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
492 // find oldest page in list...
494 #ifdef _USE_OLDEST_LISTS
495 if (iOldestCleanCount)
497 __NK_ASSERT_DEBUG(!iOldestCleanList.IsEmpty());
498 link = iOldestCleanList.Last();
500 else if (iOldestDirtyCount)
502 __NK_ASSERT_DEBUG(!iOldestDirtyList.IsEmpty());
503 link = iOldestDirtyList.Last();
510 __NK_ASSERT_DEBUG(!iOldList.IsEmpty());
511 link = iOldList.Last();
515 __NK_ASSERT_DEBUG(iYoungCount);
516 __NK_ASSERT_ALWAYS(!iYoungList.IsEmpty());
517 link = iYoungList.Last();
519 SPageInfo* pageInfo = SPageInfo::FromLink(link);
521 // steal it from owning object...
522 TInt r = StealPage(pageInfo);
527 return pageInfo; // done
529 // loop back and try again
534 TInt DPager::RestrictPage(SPageInfo* aPageInfo, TRestrictPagesType aRestriction)
536 TRACE(("DPager::RestrictPage(0x%08x,%d)",aPageInfo,aRestriction));
537 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
540 if(aPageInfo->Type()==SPageInfo::EUnused)
542 // page was unused, so nothing to do...
547 // get memory object which owns the page...
548 __NK_ASSERT_DEBUG(aPageInfo->Type()==SPageInfo::EManaged);
549 DMemoryObject* memory = aPageInfo->Owner();
552 // try restricting access to page...
553 r = memory->iManager->RestrictPage(memory,aPageInfo,aRestriction);
554 __NK_ASSERT_DEBUG(r!=KErrNotSupported);
556 // close memory object...
558 memory->AsyncClose();
562 TRACE(("DPager::RestrictPage returns %d",r));
567 TInt DPager::StealPage(SPageInfo* aPageInfo)
569 TRACE(("DPager::StealPage(0x%08x)",aPageInfo));
570 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
571 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
573 __UNLOCK_GUARD_START(MmuLock);
574 RemovePage(aPageInfo);
577 if(aPageInfo->Type()==SPageInfo::EUnused)
579 // page was unused, so nothing to do...
581 __UNLOCK_GUARD_END(MmuLock);
586 // get memory object which owns the page...
587 __NK_ASSERT_DEBUG(aPageInfo->Type()==SPageInfo::EManaged);
588 DMemoryObject* memory = aPageInfo->Owner();
591 // try and steal page from memory object...
592 __UNLOCK_GUARD_END(MmuLock); // StealPage must be called without releasing the MmuLock
593 r = memory->iManager->StealPage(memory,aPageInfo);
594 __NK_ASSERT_DEBUG(r!=KErrNotSupported);
596 // close memory object...
598 memory->AsyncClose();
604 Event(EEventPageOut,aPageInfo);
606 TRACE(("DPager::StealPage returns %d",r));
611 TInt DPager::DiscardPage(SPageInfo* aOldPageInfo, TUint aBlockZoneId, TBool aBlockRest)
613 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
614 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
617 // If the page is pinned or if the page is dirty and a general defrag is being
618 // performed then don't attempt to steal it.
619 if (aOldPageInfo->Type() != SPageInfo::EUnused &&
620 (aOldPageInfo->PagedState() == SPageInfo::EPagedPinned ||
621 (aBlockRest && aOldPageInfo->IsDirty())))
622 {// The page is pinned or is dirty and this is a general defrag so move the page.
623 DMemoryObject* memory = aOldPageInfo->Owner();
624 // Page must be managed if it is pinned or dirty.
625 __NK_ASSERT_DEBUG(aOldPageInfo->Type()==SPageInfo::EManaged);
626 __NK_ASSERT_DEBUG(memory);
629 return memory->iManager->MovePage(memory, aOldPageInfo, newAddr, aBlockZoneId, aBlockRest);
632 if (!iNumberOfFreePages)
634 // Allocate a new page for the live list as it has reached its minimum size.
636 SPageInfo* newPageInfo = GetPageFromSystem((Mmu::TRamAllocFlags)(EMemAttNormalCached|Mmu::EAllocNoWipe),
637 aBlockZoneId, aBlockRest);
641 // Re-acquire the mmulock and re-check that the page is not pinned or dirty.
643 if (aOldPageInfo->Type() != SPageInfo::EUnused &&
644 (aOldPageInfo->PagedState() == SPageInfo::EPagedPinned ||
645 (aBlockRest && aOldPageInfo->IsDirty())))
646 {// Page is now pinned or dirty so give up as it is inuse.
647 ReturnPageToSystem(*newPageInfo);
652 // Attempt to steal the page
653 r = StealPage(aOldPageInfo);
654 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
656 if (r == KErrCompletion)
657 {// This was a page table that has been freed but added to the
658 // live list as a free page. Remove from live list and continue.
659 __NK_ASSERT_DEBUG(!aOldPageInfo->IsDirty());
660 RemovePage(aOldPageInfo);
665 {// Add the new page to the live list as discarding the old page
666 // will reduce the live list below the minimum.
667 AddAsFreePage(newPageInfo);
668 // We've successfully discarded the page so return it to the free pool.
669 ReturnPageToSystem(*aOldPageInfo);
674 // New page not required so just return it to the system. This is safe as
675 // iNumberOfFreePages will have this page counted but as it is not on the live list
676 // noone else can touch it.
677 ReturnPageToSystem(*newPageInfo);
682 // Attempt to steal the page
683 r = StealPage(aOldPageInfo);
685 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
687 if (r == KErrCompletion)
688 {// This was a page table that has been freed but added to the
689 // live list as a free page. Remove from live list.
690 __NK_ASSERT_DEBUG(!aOldPageInfo->IsDirty());
691 RemovePage(aOldPageInfo);
696 {// We've successfully discarded the page so return it to the free pool.
697 ReturnPageToSystem(*aOldPageInfo);
706 TBool DPager::TryGrowLiveList()
708 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
709 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
712 SPageInfo* sparePage = GetPageFromSystem((Mmu::TRamAllocFlags)(EMemAttNormalCached|Mmu::EAllocNoWipe));
718 // add page to live list...
719 AddAsFreePage(sparePage);
724 SPageInfo* DPager::GetPageFromSystem(Mmu::TRamAllocFlags aAllocFlags, TUint aBlockZoneId, TBool aBlockRest)
726 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
729 TInt r = TheMmu.AllocRam(&pagePhys, 1,
730 (Mmu::TRamAllocFlags)(aAllocFlags|Mmu::EAllocNoPagerReclaim),
731 EPageDiscard, aBlockZoneId, aBlockRest);
736 ++iNumberOfFreePages;
739 return SPageInfo::FromPhysAddr(pagePhys);
743 void DPager::ReturnPageToSystem()
745 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
746 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
748 ReturnPageToSystem(*StealOldestPage());
752 void DPager::ReturnPageToSystem(SPageInfo& aPageInfo)
754 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
755 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
757 __NK_ASSERT_DEBUG(iNumberOfFreePages>0);
758 --iNumberOfFreePages;
762 TPhysAddr pagePhys = aPageInfo.PhysAddr();
763 TheMmu.FreeRam(&pagePhys, 1, EPageDiscard);
769 SPageInfo* DPager::PageInAllocPage(Mmu::TRamAllocFlags aAllocFlags)
774 RamAllocLock::Lock();
777 // try getting a free page from our live list...
778 #ifdef _USE_OLDEST_LISTS
779 if (iOldestCleanCount)
781 pageInfo = SPageInfo::FromLink(iOldestCleanList.Last());
782 if(pageInfo->Type()==SPageInfo::EUnused)
788 pageInfo = SPageInfo::FromLink(iOldList.Last());
789 if(pageInfo->Type()==SPageInfo::EUnused)
794 // try getting a free page from the system pool...
795 if(!HaveMaximumPages())
798 pageInfo = GetPageFromSystem(aAllocFlags);
804 // as a last resort, steal a page from the live list...
806 #ifdef _USE_OLDEST_LISTS
807 __NK_ASSERT_ALWAYS(iOldestCleanCount|iOldestDirtyCount|iOldCount|iYoungCount);
809 __NK_ASSERT_ALWAYS(iOldCount|iYoungCount);
811 pageInfo = StealOldestPage();
814 // make page state same as a freshly allocated page...
815 pagePhys = pageInfo->PhysAddr();
816 TheMmu.PagesAllocated(&pagePhys,1,aAllocFlags);
819 RamAllocLock::Unlock();
824 TBool DPager::GetFreePages(TInt aNumPages)
826 TRACE(("DPager::GetFreePages(%d)",aNumPages));
828 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
831 while(aNumPages>0 && (TInt)NumberOfFreePages()>=aNumPages)
833 ReturnPageToSystem();
838 TRACE(("DPager::GetFreePages returns %d",!aNumPages));
843 void DPager::DonatePages(TUint aCount, TPhysAddr* aPages)
845 TRACE(("DPager::DonatePages(%d,?)",aCount));
847 RamAllocLock::Lock();
850 TPhysAddr* end = aPages+aCount;
853 TPhysAddr pagePhys = *aPages++;
854 if(RPageArray::State(pagePhys)!=RPageArray::ECommitted)
855 continue; // page is not present
858 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pagePhys&~KPageMask);
859 __NK_ASSERT_DEBUG(pi);
861 SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys);
863 switch(pi->PagedState())
865 case SPageInfo::EUnpaged:
866 // Change the type of this page to discardable and
867 // then add it to live list.
868 // Only the DDiscardableMemoryManager should be invoking this and
869 // its pages will be movable before they are donated.
870 __NK_ASSERT_DEBUG(pi->Owner()->iManager->PageType() == EPageMovable);
871 TheMmu.ChangePageType(pi, EPageMovable, EPageDiscard);
874 case SPageInfo::EPagedYoung:
875 case SPageInfo::EPagedOld:
876 #ifdef _USE_OLDEST_LISTS
877 case SPageInfo::EPagedOldestDirty:
878 case SPageInfo::EPagedOldestClean:
880 continue; // discard already been allowed
882 case SPageInfo::EPagedPinned:
883 __NK_ASSERT_DEBUG(0);
885 __NK_ASSERT_DEBUG(0);
889 // put page on live list...
890 AddAsYoungestPage(pi);
891 ++iNumberOfFreePages;
893 Event(EEventPageDonate,pi);
895 // re-balance live list...
901 RamAllocLock::Unlock();
905 TInt DPager::ReclaimPages(TUint aCount, TPhysAddr* aPages)
907 TRACE(("DPager::ReclaimPages(%d,?)",aCount));
909 RamAllocLock::Lock();
913 TPhysAddr* end = aPages+aCount;
916 TPhysAddr pagePhys = *aPages++;
917 TBool changeType = EFalse;
919 if(RPageArray::State(pagePhys)!=RPageArray::ECommitted)
921 r = KErrNotFound; // too late, page has gone
926 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pagePhys&~KPageMask);
927 __NK_ASSERT_DEBUG(pi);
929 SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys);
931 switch(pi->PagedState())
933 case SPageInfo::EUnpaged:
934 continue; // discard already been disallowed
936 case SPageInfo::EPagedYoung:
937 case SPageInfo::EPagedOld:
938 #ifdef _USE_OLDEST_LISTS
939 case SPageInfo::EPagedOldestClean:
940 case SPageInfo::EPagedOldestDirty:
943 break; // remove from live list
945 case SPageInfo::EPagedPinned:
946 __NK_ASSERT_DEBUG(0);
948 __NK_ASSERT_DEBUG(0);
952 // check paging list has enough pages before we remove one...
953 if(iNumberOfFreePages<1)
955 // need more pages so get a page from the system...
956 if(!TryGrowLiveList())
962 // retry the page reclaim...
968 {// Change the type of this page to movable, wait until any retries
969 // have been attempted as we can't change a page's type twice.
970 // Only the DDiscardableMemoryManager should be invoking this and
971 // its pages should be movable once they are reclaimed.
972 __NK_ASSERT_DEBUG(pi->Owner()->iManager->PageType() == EPageMovable);
973 TheMmu.ChangePageType(pi, EPageDiscard, EPageMovable);
976 // remove page from paging list...
977 __NK_ASSERT_DEBUG(iNumberOfFreePages>0);
978 --iNumberOfFreePages;
981 Event(EEventPageReclaim,pi);
983 // re-balance live list...
987 // we may have added a spare free page to the live list without removing one,
988 // this could cause us to have too many pages, so deal with this...
992 RamAllocLock::Unlock();
997 TInt VMHalFunction(TAny*, TInt aFunction, TAny* a1, TAny* a2);
1001 TRACEB(("DPager::Init3()"));
1002 TheRomMemoryManager->Init3();
1003 TheDataPagedMemoryManager->Init3();
1004 TheCodePagedMemoryManager->Init3();
1005 TInt r = Kern::AddHalEntry(EHalGroupVM, VMHalFunction, 0);
1006 __NK_ASSERT_ALWAYS(r==KErrNone);
1010 void DPager::Fault(TFault aFault)
1012 Kern::Fault("DPager",aFault);
1016 void DPager::BalanceAges()
1018 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1019 TBool restrictPage = EFalse;
1020 SPageInfo* pageInfo = NULL;
1021 #ifdef _USE_OLDEST_LISTS
1022 TUint oldestCount = iOldestCleanCount + iOldestDirtyCount;
1023 if((iOldCount + oldestCount) * iYoungOldRatio < iYoungCount)
1025 if (iOldCount * iYoungOldRatio < iYoungCount)
1028 // Need more old pages so make one young page into an old page...
1029 __NK_ASSERT_DEBUG(!iYoungList.IsEmpty());
1030 __NK_ASSERT_DEBUG(iYoungCount);
1031 SDblQueLink* link = iYoungList.Last()->Deque();
1034 pageInfo = SPageInfo::FromLink(link);
1035 pageInfo->SetPagedState(SPageInfo::EPagedOld);
1037 iOldList.AddHead(link);
1040 Event(EEventPageAged,pageInfo);
1041 // Delay restricting the page until it is safe to release the MmuLock.
1042 restrictPage = ETrue;
1045 #ifdef _USE_OLDEST_LISTS
1046 // Check we have enough oldest pages.
1047 if (oldestCount * iOldOldestRatio < iOldCount)
1049 __NK_ASSERT_DEBUG(!iOldList.IsEmpty());
1050 __NK_ASSERT_DEBUG(iOldCount);
1051 SDblQueLink* link = iOldList.Last()->Deque();
1054 SPageInfo* oldestPageInfo = SPageInfo::FromLink(link);
1055 if (oldestPageInfo->IsDirty())
1057 oldestPageInfo->SetPagedState(SPageInfo::EPagedOldestDirty);
1058 iOldestDirtyList.AddHead(link);
1059 ++iOldestDirtyCount;
1060 Event(EEventPageAgedDirty,oldestPageInfo);
1064 oldestPageInfo->SetPagedState(SPageInfo::EPagedOldestClean);
1065 iOldestCleanList.AddHead(link);
1066 ++iOldestCleanCount;
1067 Event(EEventPageAgedClean,oldestPageInfo);
1073 // Make the recently aged old page inaccessible. This is done last as it
1074 // will release the MmuLock and therefore the page counts may otherwise change.
1075 RestrictPage(pageInfo,ERestrictPagesNoAccessForOldPage);
1080 void DPager::RemoveExcessPages()
1082 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
1083 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1084 while(HaveTooManyPages())
1085 ReturnPageToSystem();
1089 void DPager::RejuvenatePageTable(TPte* aPt)
1091 SPageInfo* pi = SPageInfo::FromPhysAddr(Mmu::PageTablePhysAddr(aPt));
1093 SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPt);
1094 if(!pti->IsDemandPaged())
1096 __NK_ASSERT_DEBUG(pi->PagedState()==SPageInfo::EUnpaged);
1100 TRACE2(("DP: %O Rejuvenate PT 0x%08x 0x%08x",TheCurrentThread,pi->PhysAddr(),aPt));
1101 switch(pi->PagedState())
1103 case SPageInfo::EPagedYoung:
1104 case SPageInfo::EPagedOld:
1105 #ifdef _USE_OLDEST_LISTS
1106 case SPageInfo::EPagedOldestClean:
1107 case SPageInfo::EPagedOldestDirty:
1110 AddAsYoungestPage(pi);
1114 case SPageInfo::EUnpaged:
1115 AddAsYoungestPage(pi);
1119 case SPageInfo::EPagedPinned:
1123 __NK_ASSERT_DEBUG(0);
1128 TInt DPager::PteAndInfoFromLinAddr( TInt aOsAsid, TLinAddr aAddress, DMemoryMappingBase* aMapping,
1129 TUint aMapInstanceCount, TPte*& aPte, SPageInfo*& aPageInfo)
1131 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1133 // Verify the mapping is still mapped and has not been reused.
1134 if (aMapInstanceCount != aMapping->MapInstanceCount() || aMapping->BeingDetached())
1137 aPte = Mmu::SafePtePtrFromLinAddr(aAddress,aOsAsid);
1139 return KErrNotFound;
1142 if(pte==KPteUnallocatedEntry)
1143 return KErrNotFound;
1145 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pte & ~KPageMask);
1147 return KErrNotFound;
1153 TInt DPager::TryRejuvenate( TInt aOsAsid, TLinAddr aAddress, TUint aAccessPermissions, TLinAddr aPc,
1154 DMemoryMappingBase* aMapping, TUint aMapInstanceCount, DThread* aThread,
1155 TAny* aExceptionInfo)
1157 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1162 TInt r = PteAndInfoFromLinAddr(aOsAsid, aAddress, aMapping, aMapInstanceCount, pPte, pi);
1165 if (aThread->IsRealtime())
1166 {// This thread is real time so it shouldn't be accessing paged out paged memory
1167 // unless there is a paging trap.
1169 // Ensure that we abort when the thread is not allowed to access paged out pages.
1170 if (CheckRealtimeThreadFault(aThread, aExceptionInfo) != KErrNone)
1177 SPageInfo::TType type = pi->Type();
1178 SPageInfo::TPagedState state = pi->PagedState();
1180 if (aThread->IsRealtime() &&
1181 state != SPageInfo::EPagedPinned &&
1182 state != SPageInfo::EPagedPinnedMoved)
1183 {// This thread is real time so it shouldn't be accessing unpinned paged memory
1184 // unless there is a paging trap.
1186 r = CheckRealtimeThreadFault(aThread, aExceptionInfo);
1190 // We had to release the MmuLock have to reverify the status of the page and mappings.
1191 r = PteAndInfoFromLinAddr(aOsAsid, aAddress, aMapping, aMapInstanceCount, pPte, pi);
1196 state = pi->PagedState();
1199 if (type != SPageInfo::EManaged)
1200 return KErrNotFound;
1202 if(state==SPageInfo::EUnpaged)
1203 return KErrNotFound;
1205 DMemoryObject* memory = pi->Owner();
1206 TUint index = pi->Index();
1208 TPhysAddr page = memory->iPages.Page(index);
1209 if(!RPageArray::IsPresent(page))
1210 return KErrNotFound;
1212 TPhysAddr physAddr = pi->PhysAddr();
1213 if ((page^physAddr) >= (TPhysAddr)KPageSize)
1214 {// Page array entry should contain same physical address as PTE unless the
1215 // page has or is being moved and this mapping accessed the page.
1216 // Get the page info for the page that we should be using.
1217 physAddr = page & ~KPageMask;
1218 pi = SPageInfo::SafeFromPhysAddr(physAddr);
1220 return KErrNotFound;
1223 if (type!=SPageInfo::EManaged)
1224 return KErrNotFound;
1226 state = pi->PagedState();
1227 if(state==SPageInfo::EUnpaged)
1228 return KErrNotFound;
1230 memory = pi->Owner();
1231 index = pi->Index();
1233 // Update pte to point to the correct physical address for this memory object's page.
1234 pte = (pte & KPageMask) | physAddr;
1237 if(aAccessPermissions&EReadWrite)
1238 {// The mapping that took the fault permits writes and is still attached
1239 // to the memory object therefore the object can't be read only.
1240 __NK_ASSERT_DEBUG(!memory->IsReadOnly());
1244 pte = Mmu::MakePteAccessible(pte,aAccessPermissions&EReadWrite);
1245 TRACE2(("!PTE %x=%x",pPte,pte));
1247 CacheMaintenance::SinglePteUpdated((TLinAddr)pPte);
1248 InvalidateTLBForPage((aAddress&~KPageMask)|aOsAsid);
1250 Event(EEventPageRejuvenate,pi,aPc,aAddress,aAccessPermissions);
1252 TBool balance = false;
1253 #ifdef _USE_OLDEST_LISTS
1254 if( state==SPageInfo::EPagedYoung || state==SPageInfo::EPagedOld ||
1255 state==SPageInfo::EPagedOldestClean || state==SPageInfo::EPagedOldestDirty)
1257 if(state==SPageInfo::EPagedYoung || state==SPageInfo::EPagedOld)
1261 AddAsYoungestPage(pi);
1262 // delay BalanceAges because we don't want to release MmuLock until after
1263 // RejuvenatePageTable has chance to look at the page table page...
1267 {// Clear the modifier so that if this page is being moved then this
1268 // access is detected. For non-pinned pages the modifier is cleared
1270 __NK_ASSERT_DEBUG(state==SPageInfo::EPagedPinned);
1274 RejuvenatePageTable(pPte);
1283 TInt DPager::PageInAllocPages(TPhysAddr* aPages, TUint aCount, Mmu::TRamAllocFlags aAllocFlags)
1288 SPageInfo* pi = PageInAllocPage(aAllocFlags);
1291 aPages[n++] = pi->PhysAddr();
1295 PageInFreePages(aPages,n);
1296 return KErrNoMemory;
1300 void DPager::PageInFreePages(TPhysAddr* aPages, TUint aCount)
1305 SPageInfo* pi = SPageInfo::FromPhysAddr(aPages[aCount]);
1306 switch(pi->PagedState())
1308 case SPageInfo::EPagedYoung:
1309 case SPageInfo::EPagedOld:
1310 #ifdef _USE_OLDEST_LISTS
1311 case SPageInfo::EPagedOldestClean:
1312 case SPageInfo::EPagedOldestDirty:
1316 case SPageInfo::EUnpaged:
1320 case SPageInfo::EPagedPinned:
1321 __NK_ASSERT_DEBUG(0);
1324 __NK_ASSERT_DEBUG(0);
1332 void DPager::PagedInUnneeded(SPageInfo* aPageInfo)
1334 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1335 Event(EEventPageInUnneeded,aPageInfo);
1336 AddAsFreePage(aPageInfo);
1340 void DPager::PagedIn(SPageInfo* aPageInfo)
1342 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1343 switch(aPageInfo->PagedState())
1345 case SPageInfo::EPagedYoung:
1346 case SPageInfo::EPagedOld:
1347 #ifdef _USE_OLDEST_LISTS
1348 case SPageInfo::EPagedOldestClean:
1349 case SPageInfo::EPagedOldestDirty:
1351 RemovePage(aPageInfo);
1352 AddAsYoungestPage(aPageInfo);
1356 case SPageInfo::EUnpaged:
1357 AddAsYoungestPage(aPageInfo);
1361 case SPageInfo::EPagedPinned:
1362 // Clear the modifier so that if this page is being moved then this
1363 // access is detected. For non-pinned pages the modifier is cleared by RemovePage().
1364 aPageInfo->SetModifier(0);
1368 __NK_ASSERT_DEBUG(0);
1374 void DPager::PagedInPinned(SPageInfo* aPageInfo, TPinArgs& aPinArgs)
1376 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1377 Pin(aPageInfo,aPinArgs);
1381 void DPager::Pin(SPageInfo* aPageInfo, TPinArgs& aPinArgs)
1384 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1385 __NK_ASSERT_DEBUG(aPinArgs.HaveSufficientPages(1));
1387 aPageInfo->IncPinCount();
1388 Event(EEventPagePin,aPageInfo);
1390 // remove page from live list...
1391 switch(aPageInfo->PagedState())
1393 case SPageInfo::EPagedYoung:
1394 __NK_ASSERT_DEBUG(iYoungCount);
1395 aPageInfo->iLink.Deque();
1397 __NK_ASSERT_DEBUG(aPageInfo->PinCount()==1);
1400 case SPageInfo::EPagedOld:
1401 __NK_ASSERT_DEBUG(iOldCount);
1402 aPageInfo->iLink.Deque();
1404 __NK_ASSERT_DEBUG(aPageInfo->PinCount()==1);
1407 #ifdef _USE_OLDEST_LISTS
1408 case SPageInfo::EPagedOldestClean:
1409 __NK_ASSERT_DEBUG(iOldestCleanCount);
1410 aPageInfo->iLink.Deque();
1411 --iOldestCleanCount;
1412 __NK_ASSERT_DEBUG(aPageInfo->PinCount()==1);
1415 case SPageInfo::EPagedOldestDirty:
1416 __NK_ASSERT_DEBUG(iOldestDirtyCount);
1417 aPageInfo->iLink.Deque();
1418 --iOldestDirtyCount;
1419 __NK_ASSERT_DEBUG(aPageInfo->PinCount()==1);
1423 case SPageInfo::EPagedPinned:
1424 // nothing more to do...
1425 __NK_ASSERT_DEBUG(aPageInfo->PinCount()>1);
1428 case SPageInfo::EUnpaged:
1429 __NK_ASSERT_DEBUG(aPageInfo->PinCount()==1);
1430 TRACE2(("DPager::PinPage page was unpaged"));
1431 // This could be a page in the process of being stolen.
1432 // Could also be page for storing page table infos, which aren't necessarily
1433 // on the live list.
1437 __NK_ASSERT_DEBUG(0);
1441 // page has now been removed from the live list and is pinned...
1442 aPageInfo->SetPagedState(SPageInfo::EPagedPinned);
1444 if(aPinArgs.iReplacementPages==TPinArgs::EUseReserveForPinReplacementPages)
1446 // pinned paged counts as coming from reserve pool...
1447 aPageInfo->SetPinnedReserve();
1451 // we used up a replacement page...
1452 --aPinArgs.iReplacementPages;
1459 void DPager::Unpin(SPageInfo* aPageInfo, TPinArgs& aPinArgs)
1462 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1463 __NK_ASSERT_DEBUG(aPageInfo->PagedState()==SPageInfo::EPagedPinned);
1464 __NK_ASSERT_DEBUG(aPageInfo->PinCount()>0);
1466 TUint pinCount = aPageInfo->DecPinCount();
1467 Event(EEventPageUnpin,aPageInfo);
1472 aPageInfo->SetPagedState(SPageInfo::EUnpaged);
1474 if(!aPageInfo->ClearPinnedReserve())
1476 // was not a pinned reserve page, so we how have a spare replacement page,
1477 // which can be used again or freed later ...
1478 __NK_ASSERT_DEBUG(aPinArgs.iReplacementPages!=TPinArgs::EUseReserveForPinReplacementPages);
1479 ++aPinArgs.iReplacementPages;
1482 AddAsYoungestPage(aPageInfo);
1487 TInt TPinArgs::AllocReplacementPages(TUint aNumPages)
1491 __NK_ASSERT_DEBUG(iReplacementPages==0 || iReplacementPages==EUseReserveForPinReplacementPages);
1492 iReplacementPages = EUseReserveForPinReplacementPages;
1496 if(aNumPages>iReplacementPages)
1498 if(!ThePager.AllocPinReplacementPages(aNumPages-iReplacementPages))
1499 return KErrNoMemory;
1500 iReplacementPages = aNumPages;
1507 void TPinArgs::FreeReplacementPages()
1509 if(iReplacementPages!=0 && iReplacementPages!=EUseReserveForPinReplacementPages)
1510 ThePager.FreePinReplacementPages(iReplacementPages);
1511 iReplacementPages = 0;
1515 TBool DPager::AllocPinReplacementPages(TUint aNumPages)
1517 TRACE2(("DPager::AllocPinReplacementPages(0x%x)",aNumPages));
1519 RamAllocLock::Lock();
1525 if(iNumberOfFreePages>=aNumPages)
1527 iNumberOfFreePages -= aNumPages;
1532 while(TryGrowLiveList());
1535 RamAllocLock::Unlock();
1540 void DPager::FreePinReplacementPages(TUint aNumPages)
1542 TRACE2(("DPager::FreePinReplacementPage(0x%x)",aNumPages));
1545 RamAllocLock::Lock();
1548 iNumberOfFreePages += aNumPages;
1549 RemoveExcessPages();
1552 RamAllocLock::Unlock();
1556 TBool DPager::ReservePage()
1558 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
1559 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1561 __NK_ASSERT_DEBUG(iMinimumPageCount >= iMinimumPageLimit+iReservePageCount);
1562 while(iMinimumPageCount==iMinimumPageLimit+iReservePageCount && iNumberOfFreePages==0)
1564 if(!TryGrowLiveList())
1567 if(iMinimumPageCount==iMinimumPageLimit+iReservePageCount)
1569 ++iMinimumPageCount;
1570 --iNumberOfFreePages;
1571 if(iMinimumPageCount>iMaximumPageCount)
1572 iMaximumPageCount = iMinimumPageCount;
1574 ++iReservePageCount;
1575 __NK_ASSERT_DEBUG(iMinimumPageCount >= iMinimumPageLimit+iReservePageCount);
1576 __NK_ASSERT_DEBUG(iMinimumPageCount+iNumberOfFreePages <= iMaximumPageCount);
1581 TBool DPager::ReservePages(TUint aRequiredCount, TUint& aCount)
1585 RamAllocLock::Lock();
1587 while(aCount<aRequiredCount)
1594 TBool enoughPages = aCount==aRequiredCount;
1596 RamAllocLock::Unlock();
1599 UnreservePages(aCount);
1605 void DPager::UnreservePages(TUint& aCount)
1608 iReservePageCount -= aCount;
1614 TInt DPager::CheckRealtimeThreadFault(DThread* aThread, TAny* aExceptionInfo)
1616 // realtime threads shouldn't take paging faults...
1617 DThread* client = aThread->iIpcClient;
1619 // If iIpcClient is set then we are accessing the address space of a remote thread. If we are
1620 // in an IPC trap, this will contain information the local and remote addresses being accessed.
1621 // If this is not set then we assume than any fault must be the fault of a bad remote address.
1622 TIpcExcTrap* ipcTrap = (TIpcExcTrap*)aThread->iExcTrap;
1623 if (ipcTrap && !ipcTrap->IsTIpcExcTrap())
1625 if (client && (!ipcTrap || ipcTrap->ExcLocation(aThread, aExceptionInfo) == TIpcExcTrap::EExcRemote))
1627 // kill client thread...
1628 if(K::IllegalFunctionForRealtimeThread(client,"Access to Paged Memory (by other thread)"))
1630 // treat memory access as bad...
1633 // else thread is in 'warning only' state so allow paging...
1637 // kill current thread...
1638 if(K::IllegalFunctionForRealtimeThread(NULL,"Access to Paged Memory"))
1640 // if current thread is in critical section, then the above kill will be deferred
1641 // and we will continue executing. We will handle this by returning an error
1642 // which means that the thread will take an exception (which hopefully is XTRAPed!)
1645 // else thread is in 'warning only' state so allow paging...
1651 TInt DPager::HandlePageFault( TLinAddr aPc, TLinAddr aFaultAddress, TUint aFaultAsid, TUint aFaultIndex,
1652 TUint aAccessPermissions, DMemoryObject* aMemory, DMemoryMapping* aMapping,
1653 TUint aMapInstanceCount, DThread* aThread, TAny* aExceptionInfo)
1656 TInt r = TryRejuvenate( aFaultAsid, aFaultAddress, aAccessPermissions, aPc, aMapping, aMapInstanceCount,
1657 aThread, aExceptionInfo);
1658 if(r == KErrNone || r == KErrAbort)
1664 // rejuvenate failed, call memory manager to page in memory...
1665 Event(EEventPageInStart, 0, aPc, aFaultAddress, aAccessPermissions);
1667 TheThrashMonitor.NotifyStartPaging();
1669 DMemoryManager* manager = aMemory->iManager;
1670 r = manager->HandleFault(aMemory, aFaultIndex, aMapping, aMapInstanceCount, aAccessPermissions);
1672 TheThrashMonitor.NotifyEndPaging();
1678 TInt DPager::ResizeLiveList()
1681 TUint min = iMinimumPageCount;
1682 TUint max = iMaximumPageCount;
1684 return ResizeLiveList(min,max);
1688 TInt DPager::ResizeLiveList(TUint aMinimumPageCount, TUint aMaximumPageCount)
1690 TRACE(("DPager::ResizeLiveList(%d,%d) current young=%d old=%d min=%d free=%d max=%d",aMinimumPageCount,aMaximumPageCount,iYoungCount,iOldCount,iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
1691 if(!aMaximumPageCount)
1693 aMinimumPageCount = iInitMinimumPageCount;
1694 aMaximumPageCount = iInitMaximumPageCount;
1696 if (aMaximumPageCount > KAbsoluteMaxPageCount)
1697 aMaximumPageCount = KAbsoluteMaxPageCount;
1699 // Min must not be greater than max...
1700 if(aMinimumPageCount>aMaximumPageCount)
1701 return KErrArgument;
1703 NKern::ThreadEnterCS();
1704 RamAllocLock::Lock();
1708 // Make sure aMinimumPageCount is not less than absolute minimum we can cope with...
1709 iMinimumPageLimit = iMinYoungPages * (1 + iYoungOldRatio) / iYoungOldRatio
1710 + DPageReadRequest::ReservedPagesRequired();
1711 if(iMinimumPageLimit<iAbsoluteMinPageCount)
1712 iMinimumPageLimit = iAbsoluteMinPageCount;
1713 if(aMinimumPageCount<iMinimumPageLimit+iReservePageCount)
1714 aMinimumPageCount = iMinimumPageLimit+iReservePageCount;
1715 if(aMaximumPageCount<aMinimumPageCount)
1716 aMaximumPageCount=aMinimumPageCount;
1718 // Increase iMaximumPageCount?
1719 TInt extra = aMaximumPageCount-iMaximumPageCount;
1721 iMaximumPageCount += extra;
1723 // Reduce iMinimumPageCount?
1724 TInt spare = iMinimumPageCount-aMinimumPageCount;
1727 iMinimumPageCount -= spare;
1728 iNumberOfFreePages += spare;
1731 // Increase iMinimumPageCount?
1733 while(iMinimumPageCount<aMinimumPageCount)
1735 TUint newMin = aMinimumPageCount;
1736 TUint maxMin = iMinimumPageCount+iNumberOfFreePages;
1740 TUint delta = newMin-iMinimumPageCount;
1743 iMinimumPageCount = newMin;
1744 iNumberOfFreePages -= delta;
1748 if(!TryGrowLiveList())
1755 // Reduce iMaximumPageCount?
1756 while(iMaximumPageCount>aMaximumPageCount)
1758 TUint newMax = aMaximumPageCount;
1759 TUint minMax = iMinimumPageCount+iNumberOfFreePages;
1763 TUint delta = iMaximumPageCount-newMax;
1766 iMaximumPageCount = newMax;
1770 ReturnPageToSystem();
1773 TRACE(("DPager::ResizeLiveList end with young=%d old=%d min=%d free=%d max=%d",iYoungCount,iOldCount,iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
1775 #ifdef BTRACE_KERNEL_MEMORY
1776 BTrace4(BTrace::EKernelMemory,BTrace::EKernelMemoryDemandPagingCache,iMinimumPageCount << KPageShift);
1781 RamAllocLock::Unlock();
1782 NKern::ThreadLeaveCS();
1788 void DPager::FlushAll()
1790 NKern::ThreadEnterCS();
1791 RamAllocLock::Lock();
1793 TRACE(("DPager::FlushAll() live list young=%d old=%d min=%d free=%d max=%d",iYoungCount,iOldCount,iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
1795 // look at all RAM pages in the system, and unmap all those used for paging
1796 const TUint32* piMap = (TUint32*)KPageInfoMap;
1797 const TUint32* piMapEnd = piMap+(KNumPageInfoPages>>5);
1798 SPageInfo* pi = (SPageInfo*)KPageInfoLinearBase;
1802 SPageInfo* piNext = pi+(KPageInfosPerPage<<5);
1803 for(TUint32 piFlags=*piMap++; piFlags; piFlags>>=1)
1807 pi += KPageInfosPerPage;
1810 SPageInfo* piEnd = pi+KPageInfosPerPage;
1813 SPageInfo::TPagedState state = pi->PagedState();
1814 #ifdef _USE_OLDEST_LISTS
1815 if (state==SPageInfo::EPagedYoung || state==SPageInfo::EPagedOld ||
1816 state==SPageInfo::EPagedOldestClean || state==SPageInfo::EPagedOldestDirty)
1818 if(state==SPageInfo::EPagedYoung || state==SPageInfo::EPagedOld)
1821 if (pi->Type() != SPageInfo::EUnused)
1823 TInt r = StealPage(pi);
1830 if(((TUint)pi&(0xf<<KPageInfoShift))==0)
1831 MmuLock::Flash(); // every 16 page infos
1837 while(piMap<piMapEnd);
1840 // reduce live page list to a minimum
1841 while(GetFreePages(1)) {};
1843 TRACE(("DPager::FlushAll() end with young=%d old=%d min=%d free=%d max=%d",iYoungCount,iOldCount,iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
1845 RamAllocLock::Unlock();
1846 NKern::ThreadLeaveCS();
1850 void DPager::GetLiveListInfo(SVMCacheInfo& aInfo)
1852 MmuLock::Lock(); // ensure consistent set of values are read...
1853 aInfo.iMinSize = iMinimumPageCount<<KPageShift;
1854 aInfo.iMaxSize = iMaximumPageCount<<KPageShift;
1855 aInfo.iCurrentSize = (iMinimumPageCount+iNumberOfFreePages)<<KPageShift;
1856 aInfo.iMaxFreeSize = iNumberOfFreePages<<KPageShift;
1861 void DPager::GetEventInfo(SVMEventInfo& aInfoOut)
1863 MmuLock::Lock(); // ensure consistent set of values are read...
1864 aInfoOut = iEventInfo;
1869 void DPager::ResetEventInfo()
1872 memclr(&iEventInfo, sizeof(iEventInfo));
1877 TInt TestPageState(TLinAddr aAddr)
1879 DMemModelProcess* process = (DMemModelProcess*)TheCurrentThread->iOwningProcess;
1880 // Get the os asid of current thread's process so no need to open a reference on it.
1881 TInt osAsid = process->OsAsid();
1885 SPageInfo* pageInfo = NULL;
1887 NKern::ThreadEnterCS();
1889 TUint offsetInMapping;
1890 TUint mapInstanceCount;
1891 DMemoryMapping* mapping = MM::FindMappingInAddressSpace(osAsid, aAddr, 1, offsetInMapping, mapInstanceCount);
1897 DMemoryObject* memory = mapping->Memory();
1898 if(mapInstanceCount == mapping->MapInstanceCount() && memory)
1900 DMemoryManager* manager = memory->iManager;
1901 if(manager==TheCodePagedMemoryManager)
1902 r |= EPageStateInRamCode|EPageStatePaged;
1906 ptePtr = Mmu::SafePtePtrFromLinAddr(aAddr,osAsid);
1910 if (pte == KPteUnallocatedEntry)
1912 r |= EPageStatePtePresent;
1913 if (pte!=Mmu::MakePteInaccessible(pte,0))
1914 r |= EPageStatePteValid;
1916 pageInfo = SPageInfo::SafeFromPhysAddr(pte&~KPageMask);
1919 r |= pageInfo->Type();
1920 r |= pageInfo->PagedState()<<8;
1926 NKern::ThreadLeaveCS();
1932 TInt VMHalFunction(TAny*, TInt aFunction, TAny* a1, TAny* a2)
1936 case EVMHalFlushCache:
1937 if(!TheCurrentThread->HasCapability(ECapabilityWriteDeviceData,__PLATSEC_DIAGNOSTIC_STRING("Checked by VMHalFunction(EVMHalFlushCache)")))
1938 K::UnlockedPlatformSecurityPanic();
1939 ThePager.FlushAll();
1942 case EVMHalSetCacheSize:
1944 if(!TheCurrentThread->HasCapability(ECapabilityWriteDeviceData,__PLATSEC_DIAGNOSTIC_STRING("Checked by VMHalFunction(EVMHalSetCacheSize)")))
1945 K::UnlockedPlatformSecurityPanic();
1946 TUint min = TUint(a1)>>KPageShift;
1947 if(TUint(a1)&KPageMask)
1949 TUint max = TUint(a2)>>KPageShift;
1950 if(TUint(a2)&KPageMask)
1952 return ThePager.ResizeLiveList(min,max);
1955 case EVMHalGetCacheSize:
1958 ThePager.GetLiveListInfo(info);
1959 kumemput32(a1,&info,sizeof(info));
1963 case EVMHalGetEventInfo:
1966 ThePager.GetEventInfo(info);
1967 Kern::InfoCopy(*(TDes8*)a1,(TUint8*)&info,sizeof(info));
1971 case EVMHalResetEventInfo:
1972 ThePager.ResetEventInfo();
1975 #ifdef __SUPPORT_DEMAND_PAGING_EMULATION__
1976 case EVMHalGetOriginalRomPages:
1977 RomOriginalPages(*((TPhysAddr**)a1), *((TUint*)a2));
1982 return TestPageState((TLinAddr)a1);
1984 case EVMHalGetSwapInfo:
1986 if ((K::MemModelAttributes & EMemModelAttrDataPaging) == 0)
1987 return KErrNotSupported;
1990 kumemput32(a1,&info,sizeof(info));
1994 case EVMHalGetThrashLevel:
1995 return TheThrashMonitor.ThrashLevel();
1997 case EVMHalSetSwapThresholds:
1999 if(!TheCurrentThread->HasCapability(ECapabilityWriteDeviceData,__PLATSEC_DIAGNOSTIC_STRING("Checked by VMHalFunction(EVMHalSetSwapThresholds)")))
2000 K::UnlockedPlatformSecurityPanic();
2001 if ((K::MemModelAttributes & EMemModelAttrDataPaging) == 0)
2002 return KErrNotSupported;
2003 SVMSwapThresholds thresholds;
2004 kumemget32(&thresholds,a1,sizeof(thresholds));
2005 return SetSwapThresholds(thresholds);
2008 case EVMHalSetThrashThresholds:
2009 if(!TheCurrentThread->HasCapability(ECapabilityWriteDeviceData,__PLATSEC_DIAGNOSTIC_STRING("Checked by VMHalFunction(EVMHalSetThrashThresholds)")))
2010 K::UnlockedPlatformSecurityPanic();
2011 return TheThrashMonitor.SetThresholds((TUint)a1, (TUint)a2);
2013 #ifdef __DEMAND_PAGING_BENCHMARKS__
2014 case EVMHalGetPagingBenchmark:
2016 TUint index = (TInt) a1;
2017 if (index >= EMaxPagingBm)
2018 return KErrNotFound;
2019 NKern::LockSystem();
2020 SPagingBenchmarkInfo info = ThePager.iBenchmarkInfo[index];
2021 NKern::UnlockSystem();
2022 kumemput32(a2,&info,sizeof(info));
2026 case EVMHalResetPagingBenchmark:
2028 TUint index = (TInt) a1;
2029 if (index >= EMaxPagingBm)
2030 return KErrNotFound;
2031 NKern::LockSystem();
2032 ThePager.ResetBenchmarkData((TPagingBenchmark)index);
2033 NKern::UnlockSystem();
2039 return KErrNotSupported;
2044 #ifdef __DEMAND_PAGING_BENCHMARKS__
2046 void DPager::ResetBenchmarkData(TPagingBenchmark aBm)
2048 SPagingBenchmarkInfo& info = iBenchmarkInfo[aBm];
2050 info.iTotalTime = 0;
2052 info.iMinTime = KMaxTInt;
2055 void DPager::RecordBenchmarkData(TPagingBenchmark aBm, TUint32 aStartTime, TUint32 aEndTime)
2057 SPagingBenchmarkInfo& info = iBenchmarkInfo[aBm];
2059 #if !defined(HIGH_RES_TIMER) || defined(HIGH_RES_TIMER_COUNTS_UP)
2060 TInt64 elapsed = aEndTime - aStartTime;
2062 TInt64 elapsed = aStartTime - aEndTime;
2064 info.iTotalTime += elapsed;
2065 if (elapsed > info.iMaxTime)
2066 info.iMaxTime = elapsed;
2067 if (elapsed < info.iMinTime)
2068 info.iMinTime = elapsed;
2071 #endif //__DEMAND_PAGING_BENCHMARKS__
2075 // Paging request management...
2082 DPagingRequest::DPagingRequest(DPagingRequestPool::TGroup& aPoolGroup)
2083 : iPoolGroup(aPoolGroup), iUseRegionMemory(0), iUseRegionIndex(0), iUseRegionCount(0)
2088 FORCE_INLINE void DPagingRequest::SetUse(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
2090 __ASSERT_SYSTEM_LOCK;
2091 iUseRegionMemory = aMemory;
2092 iUseRegionIndex = aIndex;
2093 iUseRegionCount = aCount;
2097 TBool DPagingRequest::CheckUse(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
2099 return aMemory==iUseRegionMemory
2100 && TUint(aIndex-iUseRegionIndex) < iUseRegionCount
2101 && TUint(iUseRegionCount-TUint(aIndex-iUseRegionIndex)) <= aCount;
2105 void DPagingRequest::Release()
2107 NKern::LockSystem();
2113 void DPagingRequest::Wait()
2115 __ASSERT_SYSTEM_LOCK;
2117 TInt r = iMutex->Wait();
2118 __NK_ASSERT_ALWAYS(r == KErrNone);
2122 void DPagingRequest::Signal()
2124 __ASSERT_SYSTEM_LOCK;
2125 iPoolGroup.Signal(this);
2129 FORCE_INLINE TBool DPagingRequest::IsCollision(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
2131 __ASSERT_SYSTEM_LOCK;
2132 DMemoryObject* memory = iUseRegionMemory;
2133 TUint index = iUseRegionIndex;
2134 TUint count = iUseRegionCount;
2135 // note, this comparison would fail if either region includes page number KMaxTUint,
2136 // but it isn't possible to create a memory object which is > KMaxTUint pages...
2137 return memory == aMemory && index+count > aIndex && index < aIndex+aCount;
2141 TLinAddr DPagingRequest::MapPages(TUint aColour, TUint aCount, TPhysAddr* aPages)
2143 __NK_ASSERT_DEBUG(iMutex->iCleanup.iThread == &Kern::CurrentThread());
2144 return iTempMapping.Map(aPages,aCount,aColour);
2148 void DPagingRequest::UnmapPages(TBool aIMBRequired)
2150 __NK_ASSERT_DEBUG(iMutex->iCleanup.iThread == &Kern::CurrentThread());
2151 iTempMapping.Unmap(aIMBRequired);
2159 TInt DPageReadRequest::iAllocNext = 0;
2161 TInt DPageReadRequest::Construct()
2163 // allocate id and mutex...
2164 TUint id = (TUint)__e32_atomic_add_ord32(&iAllocNext, 1);
2165 _LIT(KLitPagingRequest,"PageReadRequest-");
2166 TBuf<sizeof("PageReadRequest-")+10> mutexName(KLitPagingRequest);
2167 mutexName.AppendNum(id);
2168 TInt r = K::MutexCreate(iMutex, mutexName, NULL, EFalse, KMutexOrdPageIn);
2172 // allocate space for mapping pages whilst they're being loaded...
2173 iTempMapping.Alloc(EMaxPages);
2175 // create memory buffer...
2176 TUint bufferSize = EMaxPages+1;
2177 DMemoryObject* bufferMemory;
2178 r = MM::MemoryNew(bufferMemory,EMemoryObjectUnpaged,bufferSize,EMemoryCreateNoWipe);
2181 MM::MemorySetLock(bufferMemory,iMutex);
2183 r = MM::MemoryAllocContiguous(bufferMemory,0,bufferSize,0,physAddr);
2187 DMemoryMapping* bufferMapping;
2188 r = MM::MappingNew(bufferMapping,bufferMemory,ESupervisorReadWrite,KKernelOsAsid);
2191 iBuffer = MM::MappingBase(bufferMapping);
2193 // ensure there are enough young pages to cope with new request object...
2194 r = ThePager.ResizeLiveList();
2203 // DPageWriteRequest
2206 TInt DPageWriteRequest::iAllocNext = 0;
2208 TInt DPageWriteRequest::Construct()
2210 // allocate id and mutex...
2211 TUint id = (TUint)__e32_atomic_add_ord32(&iAllocNext, 1);
2212 _LIT(KLitPagingRequest,"PageWriteRequest-");
2213 TBuf<sizeof("PageWriteRequest-")+10> mutexName(KLitPagingRequest);
2214 mutexName.AppendNum(id);
2215 TInt r = K::MutexCreate(iMutex, mutexName, NULL, EFalse, KMutexOrdPageOut);
2219 // allocate space for mapping pages whilst they're being loaded...
2220 iTempMapping.Alloc(EMaxPages);
2227 // DPagingRequestPool
2230 DPagingRequestPool::DPagingRequestPool(TUint aNumPageReadRequest,TUint aNumPageWriteRequest)
2231 : iPageReadRequests(aNumPageReadRequest), iPageWriteRequests(aNumPageWriteRequest)
2235 for(i=0; i<aNumPageReadRequest; ++i)
2237 DPageReadRequest* req = new DPageReadRequest(iPageReadRequests);
2238 __NK_ASSERT_ALWAYS(req);
2239 TInt r = req->Construct();
2240 __NK_ASSERT_ALWAYS(r==KErrNone);
2241 iPageReadRequests.iRequests[i] = req;
2242 iPageReadRequests.iFreeList.Add(req);
2245 for(i=0; i<aNumPageWriteRequest; ++i)
2247 DPageWriteRequest* req = new DPageWriteRequest(iPageWriteRequests);
2248 __NK_ASSERT_ALWAYS(req);
2249 TInt r = req->Construct();
2250 __NK_ASSERT_ALWAYS(r==KErrNone);
2251 iPageWriteRequests.iRequests[i] = req;
2252 iPageWriteRequests.iFreeList.Add(req);
2257 DPagingRequestPool::~DPagingRequestPool()
2259 __NK_ASSERT_ALWAYS(0); // deletion not implemented
2263 DPageReadRequest* DPagingRequestPool::AcquirePageReadRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
2265 NKern::LockSystem();
2267 DPagingRequest* req;
2269 // if we collide with page write operation...
2270 req = iPageWriteRequests.FindCollision(aMemory,aIndex,aCount);
2273 // wait until write completes...
2276 return 0; // caller expected to retry if needed
2279 // get a request object to use...
2280 req = iPageReadRequests.GetRequest(aMemory,aIndex,aCount);
2282 // check no new requests collide with us...
2283 if(iPageWriteRequests.FindCollision(aMemory,aIndex,aCount)
2284 || iPageReadRequests.FindCollision(aMemory,aIndex,aCount))
2286 // another operation is colliding with this region, give up and retry...
2288 return 0; // caller expected to retry if needed
2291 // we have a request object which we can use...
2292 req->SetUse(aMemory,aIndex,aCount);
2294 NKern::UnlockSystem();
2295 return (DPageReadRequest*)req;
2299 DPageWriteRequest* DPagingRequestPool::AcquirePageWriteRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
2301 NKern::LockSystem();
2303 DPagingRequest* req;
2307 // get a request object to use...
2308 req = iPageWriteRequests.GetRequest(aMemory,aIndex,aCount);
2310 if(iPageWriteRequests.FindCollision(aMemory,aIndex,aCount))
2312 // another write operation is colliding with this region, give up and retry...
2314 // Reacquire the system lock as Signal() above will release it.
2315 NKern::LockSystem();
2322 // we have a request object which we can use...
2323 req->SetUse(aMemory,aIndex,aCount);
2325 NKern::UnlockSystem();
2326 return (DPageWriteRequest*)req;
2330 DPagingRequestPool::TGroup::TGroup(TUint aNumRequests)
2332 iNumRequests = aNumRequests;
2333 iRequests = new DPagingRequest*[aNumRequests];
2334 __NK_ASSERT_ALWAYS(iRequests);
2338 DPagingRequest* DPagingRequestPool::TGroup::FindCollision(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
2340 __ASSERT_SYSTEM_LOCK;
2341 DPagingRequest** ptr = iRequests;
2342 DPagingRequest** ptrEnd = ptr+iNumRequests;
2345 DPagingRequest* req = *ptr++;
2346 if(req->IsCollision(aMemory,aIndex,aCount))
2353 static TUint32 RandomSeed = 33333;
2355 DPagingRequest* DPagingRequestPool::TGroup::GetRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
2357 __NK_ASSERT_DEBUG(iNumRequests > 0);
2359 // try using an existing request which collides with this region...
2360 DPagingRequest* req = FindCollision(aMemory,aIndex,aCount);
2363 // use a free request...
2364 req = (DPagingRequest*)iFreeList.GetFirst();
2367 // free requests aren't being used...
2368 __NK_ASSERT_DEBUG(req->iUsageCount == 0);
2372 // pick a random request...
2373 RandomSeed = RandomSeed*69069+1; // next 'random' number
2374 TUint index = (TUint64(RandomSeed) * TUint64(iNumRequests)) >> 32;
2375 req = iRequests[index];
2376 __NK_ASSERT_DEBUG(req->iUsageCount > 0); // we only pick random when none are free
2380 // wait for chosen request object...
2387 void DPagingRequestPool::TGroup::Signal(DPagingRequest* aRequest)
2389 // if there are no threads waiting on the mutex then return it to the free pool...
2390 __NK_ASSERT_DEBUG(aRequest->iUsageCount > 0);
2391 if (--aRequest->iUsageCount==0)
2392 iFreeList.AddHead(aRequest);
2394 aRequest->iMutex->Signal();
2399 Register the specified paging device with the kernel.
2401 @param aDevice A pointer to the paging device to install
2403 @return KErrNone on success
2405 EXPORT_C TInt Kern::InstallPagingDevice(DPagingDevice* aDevice)
2407 TRACEB(("Kern::InstallPagingDevice(0x%08x) name='%s' type=%d",aDevice,aDevice->iName,aDevice->iType));
2409 __NK_ASSERT_ALWAYS(aDevice->iReadUnitShift <= KPageShift);
2411 TInt r = KErrNotSupported; // Will return this if unsupported device type is installed
2413 // create the pools of page out and page in requests...
2414 const TInt writeReqs = (aDevice->iType & DPagingDevice::EData) ? KPagingRequestsPerDevice : 0;
2415 aDevice->iRequestPool = new DPagingRequestPool(KPagingRequestsPerDevice,writeReqs);
2416 if(!aDevice->iRequestPool)
2422 if(aDevice->iType & DPagingDevice::ERom)
2424 r = TheRomMemoryManager->InstallPagingDevice(aDevice);
2429 if(aDevice->iType & DPagingDevice::ECode)
2431 r = TheCodePagedMemoryManager->InstallPagingDevice(aDevice);
2436 if(aDevice->iType & DPagingDevice::EData)
2438 r = TheDataPagedMemoryManager->InstallPagingDevice(aDevice);
2443 if (K::MemModelAttributes & (EMemModelAttrRomPaging | EMemModelAttrCodePaging | EMemModelAttrDataPaging))
2444 TheThrashMonitor.Start();
2447 TRACEB(("Kern::InstallPagingDevice returns %d",r));
2454 // DDemandPagingLock
2457 EXPORT_C DDemandPagingLock::DDemandPagingLock()
2458 : iReservedPageCount(0), iLockedPageCount(0), iPinMapping(0)
2463 EXPORT_C TInt DDemandPagingLock::Alloc(TInt aSize)
2465 TRACEP(("DDemandPagingLock[0x%08x]::Alloc(0x%x)",this,aSize));
2466 iMaxPageCount = ((aSize-1+KPageMask)>>KPageShift)+1;
2468 TInt r = KErrNoMemory;
2470 NKern::ThreadEnterCS();
2472 TUint maxPt = DVirtualPinMapping::MaxPageTables(iMaxPageCount);
2473 // Note, we need to reserve whole pages even for page tables which are smaller
2474 // because pinning can remove the page from live list...
2475 TUint reserve = iMaxPageCount+maxPt*KNumPagesToPinOnePageTable;
2476 if(ThePager.ReservePages(reserve,(TUint&)iReservedPageCount))
2478 iPinMapping = DVirtualPinMapping::New(iMaxPageCount);
2482 ThePager.UnreservePages((TUint&)iReservedPageCount);
2485 NKern::ThreadLeaveCS();
2486 TRACEP(("DDemandPagingLock[0x%08x]::Alloc returns %d, iMaxPageCount=%d, iReservedPageCount=%d",this,r,iMaxPageCount,iReservedPageCount));
2491 EXPORT_C void DDemandPagingLock::Free()
2493 TRACEP(("DDemandPagingLock[0x%08x]::Free()"));
2495 NKern::ThreadEnterCS();
2496 DVirtualPinMapping* pinMapping = (DVirtualPinMapping*)__e32_atomic_swp_ord_ptr(&iPinMapping, 0);
2498 pinMapping->Close();
2499 NKern::ThreadLeaveCS();
2500 ThePager.UnreservePages((TUint&)iReservedPageCount);
2504 EXPORT_C TInt DDemandPagingLock::Lock(DThread* aThread, TLinAddr aStart, TInt aSize)
2506 // TRACEP(("DDemandPagingLock[0x%08x]::Lock(0x%08x,0x%08x,0x%08x)",this,aThread,aStart,aSize));
2507 if(iLockedPageCount)
2508 __NK_ASSERT_ALWAYS(0); // lock already used
2510 // calculate the number of pages that need to be locked...
2511 TUint mask=KPageMask;
2512 TUint offset=aStart&mask;
2513 TInt numPages = (aSize+offset+mask)>>KPageShift;
2514 if(numPages>iMaxPageCount)
2515 __NK_ASSERT_ALWAYS(0);
2517 NKern::ThreadEnterCS();
2519 // find mapping which covers the specified region...
2520 TUint offsetInMapping;
2521 TUint mapInstanceCount;
2522 DMemoryMapping* mapping = MM::FindMappingInThread((DMemModelThread*)aThread, aStart, aSize, offsetInMapping, mapInstanceCount);
2525 NKern::ThreadLeaveCS();
2526 return KErrBadDescriptor;
2530 DMemoryObject* memory = mapping->Memory();
2531 if(mapInstanceCount != mapping->MapInstanceCount() || !memory)
2532 {// Mapping has been reused or no memory.
2535 NKern::ThreadLeaveCS();
2536 return KErrBadDescriptor;
2539 if(!memory->IsDemandPaged())
2541 // memory not demand paged, so we have nothing to do...
2544 NKern::ThreadLeaveCS();
2548 // Open a reference on the memory so it doesn't get deleted.
2553 TUint index = (offsetInMapping>>KPageShift)+mapping->iStartIndex;
2554 TUint count = ((offsetInMapping&KPageMask)+aSize+KPageMask)>>KPageShift;
2555 TInt r = ((DVirtualPinMapping*)iPinMapping)->Pin( memory,index,count,mapping->Permissions(),
2556 mapping, mapInstanceCount);
2560 // some memory wasn't present, so treat this as an error...
2563 NKern::ThreadLeaveCS();
2564 return KErrBadDescriptor;
2567 // we can't fail to pin otherwise...
2568 __NK_ASSERT_DEBUG(r!=KErrNoMemory); // separate OOM assert to aid debugging
2569 __NK_ASSERT_ALWAYS(r==KErrNone);
2571 // indicate that we have actually pinned...
2572 __NK_ASSERT_DEBUG(iLockedPageCount==0);
2573 iLockedPageCount = count;
2578 NKern::ThreadLeaveCS();
2584 EXPORT_C void DDemandPagingLock::DoUnlock()
2586 NKern::ThreadEnterCS();
2587 ((DVirtualPinMapping*)iPinMapping)->Unpin();
2588 __NK_ASSERT_DEBUG(iLockedPageCount);
2589 iLockedPageCount = 0;
2590 NKern::ThreadLeaveCS();