First public contribution.
1 // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
16 #include <plat_priv.h>
26 #include "cache_maintenance.inl"
29 @class PageTableAllocator
34 Page tables are mapped into a sparse array in the virtual address range
35 #KPageTableBase..#KPageTableEnd. For each present page table there is a
36 corresponding #SPageTableInfo object mapped from #KPageTableInfoBase upwards.
38 Page tables for demand paged content are kept separate from other page tables,
39 this enables the memory for these to be freed when the page tables no longer map
40 any memory i.e. when it has all been paged-out. Pages with these 'paged' page
41 tables are stored in the demand paging live list, so it participates in the page
44 The 'unpaged' page tables are allocated from the bottom of the array upwards,
45 via TPtPageAllocator::iLowerAllocator; the 'paged' page tables are allocated
46 from the top of the array downwards, via TPtPageAllocator::iUpperAllocator.
47 These two regions are prevented from overlapping, or from coming close enough
48 together so that the #SPageTableInfo struct for paged and unpaged page tables
49 lie in the same page. This means that the SPageTableInfo memory for paged page
50 tables can be discarded when it's page tables are discarded.
52 Memory for page tables and page table info objects is managed by
53 #ThePageTableMemoryManager. When allocating memory for demand paged use, this
54 uses memory from #ThePager which will reclaim paged memory if necessary.
55 Providing the live list always has #DPager::iMinYoungPages, this guarantees that
56 handling page faults can never fail by running out of memory.
58 TODO: In really pathological situations page table allocation can fail due to
59 being out of virtual address space to map the table, this needs to be prevented
60 from happening when handling demand paging faults.
64 PageTableAllocator PageTables;
68 TBool PageTablesLockIsHeld()
70 return ::PageTables.LockIsHeld();
75 Minimum number of page tables to keep in reserve.
77 const TUint KNumReservedPageTables = 0; // none needed - page tables for mapping page tables and infos are permanently allocated
81 Manager for the memory object used to store all the MMU page tables.
83 class DPageTableMemoryManager : public DMemoryManager
87 Not implemented - page table memory is never destroyed.
89 virtual void Destruct(DMemoryObject* aMemory)
92 virtual TInt StealPage(DMemoryObject* aMemory, SPageInfo* aPageInfo)
93 { return PageTables.StealPage(aPageInfo); }
96 Does nothing, returns KErrNone.
97 The RAM containing page tables does not need access restrictions applied for demand paging
98 purposes. Page table life-time is implicitly managed through the pages it maps.
100 virtual TInt RestrictPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TRestrictPagesType aRestriction)
104 Does nothing, returns KErrNone.
105 The contents of page tables never need saving as their contents are dynamically generated.
107 virtual TInt CleanPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TPhysAddr*& aPageArrayEntry)
111 Not implemented, returns KErrNotSupported.
113 virtual TInt Pin(DMemoryObject* aMemory, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs)
114 { return KErrNotSupported; }
119 virtual void Unpin(DMemoryObject* aMemory, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs)
123 virtual TInt MovePage( DMemoryObject* aMemory, SPageInfo* aOldPageInfo,
124 TPhysAddr& aNewPage, TUint aBlockZoneId, TBool aBlockRest);
127 Allocate a page of RAM for storing page tables in.
129 @param aMemory A memory object associated with this manager.
130 @param aIndex Page index, within the memory, to allocate the page at.
131 @param aDemandPaged True if the memory is to be used for page tables mapping
132 demand paged content.
134 @return KErrNone if successful, otherwise one of the system wide error codes.
136 TInt Alloc(DMemoryObject* aMemory, TUint aIndex, TBool aDemandPaged);
139 Allocate a page of RAM being used for storing page tables in.
141 @param aMemory A memory object associated with this manager.
142 @param aIndex Page index, within the memory, to free the page from.
143 @param aDemandPaged True if the memory is being used for page tables mapping
144 demand paged content.
146 @return KErrNone if successful, otherwise one of the system wide error codes.
148 TInt Free(DMemoryObject* aMemory, TUint aIndex, TBool aDemandPaged);
152 The single instance of the #DPageTableMemoryManager class.
154 DPageTableMemoryManager ThePageTableMemoryManager;
157 TInt DPageTableMemoryManager::Alloc(DMemoryObject* aMemory, TUint aIndex, TBool aDemandPaged)
159 TRACE2(("DPageTableMemoryManager::Alloc(0x%08x,0x%x,%d)",aMemory, aIndex, aDemandPaged));
160 __NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
162 // allocate page array entry...
163 RPageArray::TIter pageList;
164 TPhysAddr* p = aMemory->iPages.AddPageStart(aIndex,pageList);
169 RamAllocLock::Lock();
174 r = ThePager.PageInAllocPages(&pagePhys,1,aMemory->RamAllocFlags());
175 __NK_ASSERT_DEBUG(r!=KErrNoMemory);
178 {// Allocate fixed paged as page tables aren't movable.
179 r = TheMmu.AllocRam(&pagePhys, 1, aMemory->RamAllocFlags(), EPageFixed);
181 RamAllocLock::Unlock();
186 // add RAM to page array...
189 ThePager.Event(DPager::EEventPagePageTableAlloc,SPageInfo::FromPhysAddr(pagePhys));
190 SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys);
191 pi->SetManaged(aMemory,aIndex,aMemory->PageInfoFlags());
192 RPageArray::AddPage(p,pagePhys);
197 r = aMemory->MapPages(pageList);
200 // release page array entry...
201 aMemory->iPages.AddPageEnd(aIndex,usedNew);
203 // revert if error...
205 Free(aMemory,aIndex,aDemandPaged);
211 TInt DPageTableMemoryManager::Free(DMemoryObject* aMemory, TUint aIndex, TBool aDemandPaged)
213 TRACE2(("DPageTableMemoryManager::Free(0x%08x,0x%x,%d)",aMemory, aIndex, aDemandPaged));
214 __NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
216 // find page array entry...
217 RPageArray::TIter pageList;
218 TPhysAddr* p = aMemory->iPages.RemovePageStart(aIndex,pageList);
223 aMemory->UnmapPages(pageList,true);
225 RamAllocLock::Lock();
229 TPhysAddr pagePhys = RPageArray::RemovePage(p);
233 if(pagePhys==KPhysAddrInvalid)
235 // no page removed...
240 // free the removed page...
242 ThePager.PageInFreePages(&pagePhys,1);
244 TheMmu.FreeRam(&pagePhys, 1, EPageFixed);
248 RamAllocLock::Unlock();
251 aMemory->iPages.RemovePageEnd(aIndex,r);
255 TInt DPageTableMemoryManager::MovePage( DMemoryObject* aMemory, SPageInfo* aOldPageInfo,
256 TPhysAddr& aNewPage, TUint aBlockZoneId, TBool aBlockRest)
258 // This could be a demand paged page table info which can be discarded
259 // but let the PageTableAllocator handle that.
260 return ::PageTables.MovePage(aMemory, aOldPageInfo, aBlockZoneId, aBlockRest);
265 // PageTableAllocator
268 void PageTableAllocator::Init2(DMutex* aLock)
270 TRACEB(("PageTableAllocator::Init2(0x%x)",aLock));
273 __NK_ASSERT_DEBUG(iUnpagedAllocator.CheckFreeList());
275 // scan for already allocated page tables
276 // (assumes the first page table is used to map page tables)...
277 SPageTableInfo* pti = (SPageTableInfo*)KPageTableInfoBase;
281 TPte pte = ((TPte*)KPageTableBase)[pages];
282 if(pte==KPteUnallocatedEntry)
283 break; // end (assumes no gaps in page table allocation)
285 // process free page tables in this page...
291 pti->PtClusterAlloc();
292 iUnpagedAllocator.iFreeList.Add(&pti->FreeLink());
297 __NK_ASSERT_DEBUG(pti->IsPtClusterAllocated());
300 while(!(++pti)->IsFirstInPage());
301 iUnpagedAllocator.iFreeCount += freeCount;
302 __NK_ASSERT_DEBUG(iUnpagedAllocator.CheckFreeList());
303 TRACE2(("PT page 0x%08x has %d free tables",pti[-KPtClusterSize].PageTable(),freeCount));
305 // count page, and move on to next one...
307 __NK_ASSERT_DEBUG(pages<KChunkSize/KPageSize); // we've assumed less than one page table of page tables
310 // construct allocator for page table pages...
311 iPtPageAllocator.Init2(pages);
313 // initialise allocator page table infos...
314 iPageTableGroupCounts[0] = pages;
315 __NK_ASSERT_DEBUG(pages/KPageTableGroupSize==0); // we've assumed less than 1 page of page table infos
317 // FOLLOWING CODE WILL USE THIS OBJECT TO ALLOCATE SOME PAGE TABLES,
318 // SO ALLOCATOR MUST BE INITIALISED TO A FIT STATE BEFORE THIS POINT!
320 // construct memory object for page tables...
321 TMappingCreateFlags mapFlags = (TMappingCreateFlags)(EMappingCreateFixedVirtual|EMappingCreateReserveAllResources);
322 #if defined(__CPU_PAGE_TABLES_FULLY_CACHED)
323 TMemoryAttributes memAttr = EMemoryAttributeStandard;
325 TMemoryAttributes memAttr = (TMemoryAttributes)(EMemoryAttributeNormalUncached|EMemoryAttributeDefaultShareable);
327 TMemoryCreateFlags createFlags = (TMemoryCreateFlags)(EMemoryCreateNoWipe|EMemoryCreateCustomManager);
328 TInt r = MM::InitFixedKernelMemory(iPageTableMemory, KPageTableBase, KPageTableEnd, pages<<KPageShift, (TMemoryObjectType)(T_UintPtr)&ThePageTableMemoryManager, createFlags, memAttr, mapFlags);
329 __NK_ASSERT_ALWAYS(r==KErrNone);
330 MM::MemorySetLock(iPageTableMemory,aLock);
332 // construct memory object for page table infos...
333 memAttr = EMemoryAttributeStandard;
334 TUint size = pages*KPtClusterSize*sizeof(SPageTableInfo);
335 size = (size+KPageMask)&~KPageMask;
336 r = MM::InitFixedKernelMemory(iPageTableInfoMemory, KPageTableInfoBase, KPageTableInfoEnd, size, (TMemoryObjectType)(T_UintPtr)&ThePageTableMemoryManager, createFlags, memAttr, mapFlags);
337 __NK_ASSERT_ALWAYS(r==KErrNone);
338 MM::MemorySetLock(iPageTableInfoMemory,aLock);
340 // make sure we have enough reserve page tables...
342 iUnpagedAllocator.Init2(this,KNumReservedPageTables,false);
343 iPagedAllocator.Init2(this,0,true);
346 TRACEB(("PageTableAllocator::Init2 done"));
350 void PageTableAllocator::Init2B()
352 TRACEB(("PageTableAllocator::Init2B()"));
353 TInt r = iPageTableMemory->iPages.PreallocateMemory();
354 __NK_ASSERT_ALWAYS(r==KErrNone);
355 r = iPageTableInfoMemory->iPages.PreallocateMemory();
356 __NK_ASSERT_ALWAYS(r==KErrNone);
357 TRACEB(("PageTableAllocator::Init2B done"));
361 void PageTableAllocator::TSubAllocator::Init2(PageTableAllocator* aAllocator, TUint aReserveCount, TBool aDemandPaged)
363 iReserveCount = aReserveCount;
364 iDemandPaged = aDemandPaged;
365 while(iFreeCount<aReserveCount)
366 if(!aAllocator->AllocReserve(*this))
368 __NK_ASSERT_ALWAYS(0);
373 void PageTableAllocator::TPtPageAllocator::Init2(TUint aNumInitPages)
375 iLowerAllocator = TBitMapAllocator::New(KMaxPageTablePages,ETrue);
376 __NK_ASSERT_ALWAYS(iLowerAllocator);
377 iLowerAllocator->Alloc(0,aNumInitPages);
378 iLowerWaterMark = aNumInitPages-1;
380 iUpperAllocator = TBitMapAllocator::New(KMaxPageTablePages,ETrue);
381 __NK_ASSERT_ALWAYS(iUpperAllocator);
383 __ASSERT_COMPILE(KMaxPageTablePages > (TUint)KMinUnpinnedPagedPtPages); // Unlikely to be untrue.
384 iUpperWaterMark = KMaxPageTablePages - KMinUnpinnedPagedPtPages;
385 iPinnedPageTablePages = 0; // OK to clear this without MmuLock as only one thread running so far.
389 static TUint32 RandomSeed = 33333;
391 TUint PageTableAllocator::TPtPageAllocator::RandomPagedPtPage()
393 __NK_ASSERT_DEBUG(PageTablesLockIsHeld());
395 // Pick an allocated page at random, from iUpperWaterMark - KMaxPageTablePages.
396 RandomSeed = RandomSeed * 69069 + 1; // next 'random' number
397 TUint allocRange = KMaxPageTablePages - iUpperWaterMark - 1;
398 TUint bit = (TUint64(RandomSeed) * TUint64(allocRange)) >> 32;
400 // All page table pages should be allocated or we shouldn't be stealing one at random.
401 __NK_ASSERT_DEBUG(iUpperAllocator->NotFree(bit, 1));
403 return KMaxPageTablePages - 1 - bit;
407 TInt PageTableAllocator::TPtPageAllocator::Alloc(TBool aDemandPaged)
409 __NK_ASSERT_DEBUG(PageTablesLockIsHeld());
413 TInt bit = iUpperAllocator->Alloc();
414 // There are always unpaged page tables so iUpperAllocator will always have
415 // at least one free bit.
416 __NK_ASSERT_DEBUG(bit >= 0);
418 pageIndex = KMaxPageTablePages - 1 - bit;
420 if(pageIndex < iUpperWaterMark)
422 // new upper watermark...
423 if((pageIndex & ~(KPageTableGroupSize - 1)) <= iLowerWaterMark)
425 // clashes with other bitmap allocator, so fail..
426 iUpperAllocator->Free(bit);
427 TRACE(("TPtPageAllocator::Alloc too low iUpperWaterMark %d ",iUpperWaterMark));
430 // Hold mmu lock so iUpperWaterMark isn't read by pinning before we've updated it.
432 iUpperWaterMark = pageIndex;
434 TRACE(("TPtPageAllocator::Alloc new iUpperWaterMark=%d",pageIndex));
439 TInt bit = iLowerAllocator->Alloc();
443 if(pageIndex > iLowerWaterMark)
444 {// iLowerAllocator->Alloc() should only pick the next bit after iLowerWaterMark.
445 __NK_ASSERT_DEBUG(pageIndex == iLowerWaterMark + 1);
447 // new lower watermark...
448 if( pageIndex >= (iUpperWaterMark & ~(KPageTableGroupSize - 1)) ||
449 AtPinnedPagedPtsLimit(iUpperWaterMark, pageIndex, iPinnedPageTablePages))
451 // clashes with other bitmap allocator or it would reduce the amount
452 // of available unpinned paged page tables too far, so fail..
454 iLowerAllocator->Free(bit);
455 TRACE(("TPtPageAllocator::Alloc iLowerWaterMark=%d",iLowerWaterMark));
458 iLowerWaterMark = pageIndex;
460 TRACE(("TPtPageAllocator::Alloc new iLowerWaterMark=%d", iLowerWaterMark));
467 void PageTableAllocator::TPtPageAllocator::Free(TUint aPageIndex, TBool aDemandPaged)
469 __NK_ASSERT_DEBUG(PageTablesLockIsHeld());
471 iUpperAllocator->Free(KMaxPageTablePages-1-aPageIndex);
473 iLowerAllocator->Free(aPageIndex);
477 void PageTableAllocator::Lock()
479 Kern::MutexWait(*iLock);
483 void PageTableAllocator::Unlock()
485 Kern::MutexSignal(*iLock);
489 TBool PageTableAllocator::LockIsHeld()
491 return iLock->iCleanup.iThread == &Kern::CurrentThread();
495 TBool PageTableAllocator::AllocReserve(TSubAllocator& aSubAllocator)
497 __NK_ASSERT_DEBUG(LockIsHeld());
499 TBool demandPaged = aSubAllocator.iDemandPaged;
502 TInt ptPageIndex = iPtPageAllocator.Alloc(demandPaged);
510 // Can't fail to find a demand paged page table, otherwise a page fault
511 // could fail with KErrNoMemory. Therefore, keep attempting to steal a
512 // demand paged page table page until successful.
513 TUint index = iPtPageAllocator.RandomPagedPtPage();
515 TLinAddr pageTableLin = KPageTableBase + (index << (KPtClusterShift + KPageTableShift));
516 TPhysAddr ptPhysAddr = Mmu::LinearToPhysical(pageTableLin);
517 // Page tables must be allocated otherwise we shouldn't be stealing the page.
518 __NK_ASSERT_DEBUG(ptPhysAddr != KPhysAddrInvalid);
519 SPageInfo* ptSPageInfo = SPageInfo::FromPhysAddr(ptPhysAddr);
520 r = StealPage(ptSPageInfo);
523 while(r != KErrCompletion);
524 // Retry the allocation now that we've stolen a page table page.
525 ptPageIndex = iPtPageAllocator.Alloc(demandPaged);
526 __NK_ASSERT_DEBUG(ptPageIndex >= 0);
534 // commit memory for page...
535 __NK_ASSERT_DEBUG(iPageTableMemory); // check we've initialised iPageTableMemory
536 TInt r = ThePageTableMemoryManager.Alloc(iPageTableMemory,ptPageIndex,aSubAllocator.iDemandPaged);
539 iPtPageAllocator.Free(ptPageIndex,aSubAllocator.iDemandPaged);
542 __NK_ASSERT_DEBUG(r==KErrNone);
544 // allocate page table info...
545 TUint ptgIndex = ptPageIndex/KPageTableGroupSize;
546 if(!iPageTableGroupCounts[ptgIndex])
548 __NK_ASSERT_DEBUG(iPageTableInfoMemory); // check we've initialised iPageTableInfoMemory
549 r = ThePageTableMemoryManager.Alloc(iPageTableInfoMemory,ptgIndex,aSubAllocator.iDemandPaged);
553 r = ThePageTableMemoryManager.Free(iPageTableMemory,ptPageIndex,aSubAllocator.iDemandPaged);
554 __NK_ASSERT_DEBUG(r==1);
555 iPtPageAllocator.Free(ptPageIndex,aSubAllocator.iDemandPaged);
558 __NK_ASSERT_DEBUG(r==KErrNone);
559 // For paged page tables set all the page table infos in this page as unused
560 // and their page table clusters as not allocated.
561 if (aSubAllocator.iDemandPaged)
563 SPageTableInfo* ptiBase = (SPageTableInfo*)KPageTableInfoBase + (ptgIndex*KPageTableInfosPerPage);
564 memclr(ptiBase, KPageSize);
567 ++iPageTableGroupCounts[ptgIndex];
569 SPageTableInfo* pti = (SPageTableInfo*)KPageTableInfoBase+ptPageIndex*KPtClusterSize;
570 aSubAllocator.AllocPage(pti);
575 void PageTableAllocator::TSubAllocator::AllocPage(SPageTableInfo* aPageTableInfo)
577 SPageTableInfo* pti = aPageTableInfo;
578 __NK_ASSERT_DEBUG(pti->IsFirstInPage());
580 TRACE2(("Alloc PT page (%d) 0x%08x",iDemandPaged,pti->PageTable()));
582 // initialise page table infos...
583 do pti->New(iDemandPaged);
584 while(!(++pti)->IsFirstInPage());
585 pti -= KPtClusterSize;
587 // all page tables initially unused, so start them off on iCleanupList...
588 pti->AddToCleanupList(iCleanupList);
589 iFreeCount += KPtClusterSize;
590 __NK_ASSERT_DEBUG(CheckFreeList());
594 SPageTableInfo* PageTableAllocator::TSubAllocator::FreePage()
596 if(!IsCleanupRequired())
599 // get a completely free page...
600 SDblQueLink* link = iCleanupList.Last();
601 __NK_ASSERT_DEBUG(link);
602 SPageTableInfo* pti = SPageTableInfo::FromFreeLink(link);
603 __NK_ASSERT_DEBUG(pti->IsFirstInPage());
604 pti->RemoveFromCleanupList();
605 iFreeCount -= KPtClusterSize;
606 __NK_ASSERT_DEBUG(CheckFreeList());
608 TRACE2(("Free PT page (%d) 0x%08x",iDemandPaged,pti->PageTable()));
610 // Mark each page table info as no longer having its page table cluster allocated.
612 {// make sure all page tables in page are unused...
613 __NK_ASSERT_DEBUG(pti->IsUnused());
614 pti->PtClusterFreed();
616 while(!(++pti)->IsFirstInPage());
617 pti -= KPtClusterSize;
623 TBool PageTableAllocator::FreeReserve(TSubAllocator& aSubAllocator)
625 __NK_ASSERT_DEBUG(LockIsHeld());
627 // get a page which needs freeing...
628 SPageTableInfo* pti = aSubAllocator.FreePage();
633 TUint ptPageIndex = ((TLinAddr)pti-KPageTableInfoBase)>>(KPageTableInfoShift+KPtClusterShift);
634 iPtPageAllocator.Free(ptPageIndex,aSubAllocator.iDemandPaged);
635 TInt r = ThePageTableMemoryManager.Free(iPageTableMemory,ptPageIndex,aSubAllocator.iDemandPaged);
637 __NK_ASSERT_DEBUG(r==1);
639 // free page table info...
640 TUint ptgIndex = ptPageIndex/KPageTableGroupSize;
641 TUint groupCount = iPageTableGroupCounts[ptgIndex]; // compiler handles half-word values stupidly, so give it a hand
643 iPageTableGroupCounts[ptgIndex] = groupCount;
645 r = ThePageTableMemoryManager.Free(iPageTableInfoMemory,ptgIndex,aSubAllocator.iDemandPaged);
651 TPte* PageTableAllocator::Alloc(TBool aDemandPaged)
653 TRACE(("PageTableAllocator::Alloc(%d)",(bool)aDemandPaged));
654 TPte* pt = DoAlloc(aDemandPaged);
655 TRACE(("PageTableAllocator::Alloc() returns 0x%08x phys=0x%08x",pt,pt?Mmu::PageTablePhysAddr(pt):KPhysAddrInvalid));
660 TPte* PageTableAllocator::DoAlloc(TBool aDemandPaged)
662 __NK_ASSERT_DEBUG(LockIsHeld());
665 // simulated OOM, but not if demand paged as this can't fail under normal circumstances...
668 RamAllocLock::Lock();
669 TBool fail = K::CheckForSimulatedAllocFail();
670 RamAllocLock::Unlock();
676 TSubAllocator& allocator = aDemandPaged ? iPagedAllocator : iUnpagedAllocator;
678 __NK_ASSERT_DEBUG(!iAllocating || !aDemandPaged); // can't recursively allocate demand paged tables
680 __NK_ASSERT_DEBUG(iAllocating<=allocator.iReserveCount); // can't recursively allocate more than the reserve
682 // keep up enough spare page tables...
683 if(!iAllocating++) // if we haven't gone recursive...
685 // make sure we have a page table to allocate...
686 while(allocator.iFreeCount<=allocator.iReserveCount)
687 if(!AllocReserve(allocator))
690 return 0; // out of memory
695 TRACE(("PageTableAllocator::DoAlloc recurse=%d",iAllocating));
698 // allocate a page table...
699 SPageTableInfo* pti = allocator.Alloc();
701 // initialise page table info...
704 // initialise page table...
705 TPte* pt = pti->PageTable();
706 memclr(pt,KPageTableSize);
707 CacheMaintenance::MultiplePtesUpdated((TLinAddr)pt,KPageTableSize);
715 SPageTableInfo* PageTableAllocator::TSubAllocator::Alloc()
717 __NK_ASSERT_DEBUG(PageTablesLockIsHeld());
718 __NK_ASSERT_DEBUG(iFreeCount);
719 __NK_ASSERT_DEBUG(CheckFreeList());
721 // get next free page table...
722 SDblQueLink* link = iFreeList.GetFirst();
725 pti = SPageTableInfo::FromFreeLink(link);
728 // need to get one back from the cleanup list...
729 link = iCleanupList.First();
730 __NK_ASSERT_DEBUG(link); // we can't be out of page tables
731 pti = SPageTableInfo::FromFreeLink(link);
732 __NK_ASSERT_DEBUG(pti->IsFirstInPage());
733 pti->RemoveFromCleanupList();
735 // add other page tables in the page to the free list...
736 SPageTableInfo* free = pti+1;
737 while(!free->IsFirstInPage())
739 __NK_ASSERT_DEBUG(free->IsUnused());
740 iFreeList.Add(&free->FreeLink());
745 // count page as allocated...
747 __NK_ASSERT_DEBUG(pti->IsUnused());
748 __NK_ASSERT_DEBUG(CheckFreeList());
754 void PageTableAllocator::Free(TPte* aPageTable)
756 TRACE(("PageTableAllocator::Free(0x%08x)",aPageTable));
761 void PageTableAllocator::DoFree(TPte* aPageTable)
763 __NK_ASSERT_DEBUG(LockIsHeld());
765 // make sure page table isn't being aliased...
766 TPhysAddr pagePhys = Mmu::PageTablePhysAddr(aPageTable);
767 __NK_ASSERT_DEBUG(pagePhys!=KPhysAddrInvalid);
768 TheMmu.RemoveAliasesForPageTable(pagePhys);
770 // free page table...
771 SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPageTable);
772 TSubAllocator& allocator = pti->IsDemandPaged() ? iPagedAllocator : iUnpagedAllocator;
775 // check for surplus pages...
776 if(allocator.IsCleanupRequired())
778 iCleanup.Add(CleanupTrampoline,this);
783 void PageTableAllocator::TSubAllocator::Free(SPageTableInfo* aPageTableInfo)
785 __NK_ASSERT_DEBUG(PageTablesLockIsHeld());
786 __NK_ASSERT_DEBUG(CheckFreeList());
788 SPageTableInfo* pti = aPageTableInfo;
790 // clear the page table info...
792 __NK_ASSERT_DEBUG(!pti->PermanenceCount());
796 // scan other page tables in same page...
797 SPageTableInfo* first = pti->FirstInPage();
798 SPageTableInfo* last = pti->LastInPage();
799 SPageTableInfo* prev;
800 SPageTableInfo* next;
802 // try insert page table after previous free page table in same page...
809 pti->FreeLink().InsertAfter(&prev->FreeLink());
814 // try insert page table before next free page table in same page...
821 pti->FreeLink().InsertBefore(&next->FreeLink());
826 // only free page table in page, so link into start of free list...
827 pti->FreeLink().InsertAfter(&iFreeList.iA);
831 __NK_ASSERT_DEBUG(CheckFreeList());
833 // see if all page tables in page are empty...
838 return; // some page tables still in use, so end
840 while(!(++pti)->IsFirstInPage());
841 pti -= KPtClusterSize;
843 // check if page with page table in is pinned...
845 TPte* pt = pti->PageTable();
846 TPhysAddr pagePhys = Mmu::PageTablePhysAddr(pt);
847 SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys);
848 TBool pinned = pi->PagedState()==SPageInfo::EPagedPinned;
850 // Note, the pinned state can't change even though we've now released the MmuLock.
851 // This is because all page tables in the page are unused and we don't pin unused
852 // page tables. Also, the page table(s) can't become used again whilst this function
853 // executes as we hold the page table allocator lock.
856 // return now and leave page table(s) in free list if their page is pinned...
857 // Note, when page is unpinned it will end up in the paging live list and
858 // eventually be reclaimed for other use (if the page tables in the page
859 // don't get reallocated before then).
860 __NK_ASSERT_DEBUG(pti->IsDemandPaged()); // only paged page tables should have been pinned
864 // the page with our page table in it is no longer in use...
869 void PageTableAllocator::TSubAllocator::MoveToCleanup(SPageTableInfo* aPageTableInfo)
871 __NK_ASSERT_DEBUG(PageTablesLockIsHeld());
872 __NK_ASSERT_DEBUG(CheckFreeList());
874 SPageTableInfo* pti = aPageTableInfo;
875 __NK_ASSERT_DEBUG(pti->IsFirstInPage());
877 TRACE2(("Cleanup PT page (%d) 0x%08x",iDemandPaged,pti->PageTable()));
879 // make sure all page tables in page are unused...
881 do __NK_ASSERT_DEBUG(pti->IsUnused());
882 while(!(++pti)->IsFirstInPage());
883 pti -= KPtClusterSize;
886 // unlink all page tables in page...
887 SDblQueLink* prev = pti->FreeLink().iPrev;
888 SDblQueLink* next = pti->LastInPage()->FreeLink().iNext;
892 // add page tables to cleanup list...
893 __NK_ASSERT_DEBUG(!pti->IsOnCleanupList());
894 pti->AddToCleanupList(iCleanupList);
895 __NK_ASSERT_DEBUG(CheckFreeList());
900 TBool PageTableAllocator::TSubAllocator::IsCleanupRequired()
902 return iFreeCount>=iReserveCount+KPtClusterSize && !iCleanupList.IsEmpty();
908 TBool PageTableAllocator::TSubAllocator::CheckFreeList()
910 TUint count = iFreeCount;
912 // count page tables in iCleanupList...
913 SDblQueLink* head = &iCleanupList.iA;
914 SDblQueLink* link = head;
920 SPageTableInfo* pti = SPageTableInfo::FromFreeLink(link);
921 __NK_ASSERT_DEBUG(pti->IsFirstInPage());
922 __NK_ASSERT_DEBUG(pti->IsOnCleanupList());
923 if(count<(TUint)KPtClusterSize)
925 count -= KPtClusterSize;
928 // count page tables in iFreeList...
929 head = &iFreeList.iA;
937 // check next free page table in page is linked in correct order...
938 SPageTableInfo* pti = SPageTableInfo::FromFreeLink(link);
939 SPageTableInfo* last = pti->LastInPage();
940 SPageTableInfo* next = pti;
946 __NK_ASSERT_DEBUG(pti->FreeLink().iNext==&next->FreeLink());
947 __NK_ASSERT_DEBUG(next->FreeLink().iPrev==&pti->FreeLink());
955 return link->iNext==head;
963 // Paged page table handling
966 TInt SPageTableInfo::ForcedFree()
968 __NK_ASSERT_DEBUG(PageTablesLockIsHeld());
969 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
970 __NK_ASSERT_DEBUG(IsDemandPaged());
977 __NK_ASSERT_DEBUG(iPermanenceCount==0);
979 // clear all PTEs in page table...
980 TPte* pt = PageTable();
981 memclr(pt,KPageTableSize);
982 __e32_memory_barrier(); // make sure all CPUs read zeros from pt so forcing a page-in (rather than a rejuvenate) if accessed
985 if(type==ECoarseMapping)
987 TRACE2(("SPageTableInfo::ForcedFree() coarse 0x%08x 0x%x %d",iCoarse.iMemoryObject,iCoarse.iChunkIndex,iCoarse.iPteType));
988 // mustn't release MmuLock between clearing page table and calling this
989 // (otherwise page table may get updated before its actually removed from
990 // the memory object)...
991 iCoarse.iMemoryObject->StealPageTable(iCoarse.iChunkIndex,iCoarse.iPteType);
993 else if(type==EFineMapping)
995 // need to remove page table from address spaces's page directory...
996 TLinAddr addr = iFine.iLinAddrAndOsAsid;
997 TUint osAsid = addr&KPageMask;
998 TPde* pPde = Mmu::PageDirectoryEntry(osAsid,addr);
1000 TRACE2(("SPageTableInfo::ForcedFree() fine %d 0x%08x",osAsid,addr&~KPageMask));
1002 TPde pde = KPdeUnallocatedEntry;
1003 TRACE2(("!PDE %x=%x",pPde,pde));
1005 SinglePdeUpdated(pPde);
1010 __NK_ASSERT_DEBUG(0);
1011 return KErrNotSupported;
1016 // make sure page table updates visible to MMU...
1017 CacheMaintenance::MultiplePtesUpdated((TLinAddr)pt,KPageTableSize);
1020 // free the page table back to the allocator,
1021 // this will also remove any IPC alias using it...
1022 __NK_ASSERT_DEBUG(iPageCount==0); // should still be unused
1023 ::PageTables.Free(pt);
1031 TInt PageTableAllocator::StealPage(SPageInfo* aPageInfo)
1033 TRACE2(("PageTableAllocator::StealPage(0x%08x)",aPageInfo));
1034 __NK_ASSERT_DEBUG(LockIsHeld()); // only works if PageTableAllocator lock is the RamAllocLock
1035 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1037 if (aPageInfo->Owner() == iPageTableInfoMemory)
1038 return StealPageTableInfo(aPageInfo);
1040 __UNLOCK_GUARD_START(MmuLock);
1042 // This must be a page table page so steal it.
1043 __NK_ASSERT_ALWAYS(aPageInfo->Owner()==iPageTableMemory);
1044 TUint ptPageIndex = aPageInfo->Index();
1045 SPageTableInfo* pti = (SPageTableInfo*)KPageTableInfoBase+ptPageIndex*KPtClusterSize;
1047 aPageInfo->SetModifier(&pti);
1048 __UNLOCK_GUARD_END(MmuLock);
1050 // forcibly free each page table in the page...
1053 {// Check for pinning, ForcedFree() releases MmuLock so must check for
1055 if (aPageInfo->PagedState() == SPageInfo::EPagedPinned)
1056 {// The page table page is pinned so can't steal it.
1060 r = pti->ForcedFree();
1063 if(aPageInfo->CheckModified(&pti))
1069 while(!(++pti)->IsFirstInPage());
1070 pti -= KPtClusterSize; // restore pti back to first page table
1075 if(!pti->IsOnCleanupList())
1077 // the page might not already be on the cleanup list in the case where
1078 // it was previously freed whilst it was pinned.
1079 // In this case, a later unpinning would have put it back into the paging live
1080 // list from where it is now subsequently being stolen...
1081 iPagedAllocator.MoveToCleanup(pti);
1083 // free the page from allocator so it ends up back in the paging pool as a free page...
1084 while(FreeReserve(iPagedAllocator))
1086 // return an 'error' to indicate page has not been stolen.
1087 // We have however achieved the main aim of making the page 'free' and
1088 // it will be available if page stealing attempts to steal the page again...
1093 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1094 TRACE2(("PageTableAllocator::StealPage returns %d",r));
1099 TInt PageTableAllocator::StealPageTableInfo(SPageInfo* aPageInfo)
1101 // Need to steal every page table for every page table info in this page.
1102 // This page can't be modified or removed as we hold the lock, however
1103 // the page table pages being freed may be rejuvenated and therefore their
1104 // SPageInfos may be marked as modified.
1106 TUint ptiOffset = aPageInfo->Index() * KPageTableInfosPerPage;
1107 SPageTableInfo* ptiBase = (SPageTableInfo*)KPageTableInfoBase + ptiOffset;
1108 SPageTableInfo* ptiEnd = ptiBase + KPageTableInfosPerPage;
1110 for (SPageTableInfo* pti = ptiBase; pti < ptiEnd;)
1111 {// Free each page table cluster that is allocated.
1112 if (pti->IsPtClusterAllocated())
1114 TPhysAddr ptPhysAddr = Mmu::LinearToPhysical((TLinAddr)pti->PageTable());
1115 SPageInfo* ptSPageInfo = SPageInfo::FromPhysAddr(ptPhysAddr);
1116 ptSPageInfo->SetModifier(&flash);
1119 __NK_ASSERT_DEBUG(pti->IsPtClusterAllocated());
1120 if (aPageInfo->PagedState() == SPageInfo::EPagedPinned ||
1121 ptSPageInfo->PagedState() == SPageInfo::EPagedPinned)
1122 {// The page table or page table info is pinned so can't steal info page.
1126 r = pti->ForcedFree();
1129 if(ptSPageInfo->CheckModified(&flash))
1130 {// The page table page has been rejunvenated so can't steal it.
1135 while (!(++pti)->IsFirstInPage());
1138 SPageTableInfo* ptiTmp = pti - KPtClusterSize;
1140 if(!ptiTmp->IsOnCleanupList())
1142 // the page might not already be on the cleanup list in the case where
1143 // it was previously freed whilst it was pinned.
1144 // In this case, a later unpinning would have put it back into the paging live
1145 // list from where it is now subsequently being stolen...
1146 iPagedAllocator.MoveToCleanup(ptiTmp);
1149 flash = 0; // The MmuLock has been flashed at least once.
1152 {// Move onto the next cluster this page of page table infos refers to.
1153 __NK_ASSERT_DEBUG(pti->IsFirstInPage());
1154 pti += KPtClusterSize;
1155 MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo);
1158 // free the pages discarded from allocator so they end up back in the paging pool as free pages...
1160 while(FreeReserve(iPagedAllocator))
1163 r = KErrCompletion; // The pager needs to remove the page from the live list.
1169 TInt PageTableAllocator::MovePage(DMemoryObject* aMemory, SPageInfo* aOldPageInfo,
1170 TUint aBlockZoneId, TBool aBlockRest)
1172 // We don't move page table or page table info pages, however, if this page
1173 // is demand paged then we may be able to discard it.
1175 if (aOldPageInfo->Owner() == iPageTableInfoMemory)
1177 if (!(iPtPageAllocator.IsDemandPagedPtInfo(aOldPageInfo)))
1180 return KErrNotSupported;
1185 __NK_ASSERT_DEBUG(aOldPageInfo->Owner() == iPageTableMemory);
1186 if (!(iPtPageAllocator.IsDemandPagedPt(aOldPageInfo)))
1189 return KErrNotSupported;
1192 if (aOldPageInfo->PagedState() == SPageInfo::EPagedPinned)
1193 {// The page is pinned so don't attempt to discard it as pinned pages
1194 // can't be discarded. Also, the pager will invoke this method again.
1198 // Let the pager discard the page as it controls the size of the live list.
1199 // If the size of the live list allows then eventually
1200 // PageTableAllocator::StealPage() will be invoked on this page.
1201 return ThePager.DiscardPage(aOldPageInfo, aBlockZoneId, aBlockRest);
1205 TInt PageTableAllocator::PinPageTable(TPte* aPageTable, TPinArgs& aPinArgs)
1207 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1208 __NK_ASSERT_DEBUG(SPageTableInfo::FromPtPtr(aPageTable)->IsDemandPaged());
1209 __NK_ASSERT_DEBUG(!SPageTableInfo::FromPtPtr(aPageTable)->IsUnused());
1210 __NK_ASSERT_DEBUG(aPinArgs.HaveSufficientPages(KNumPagesToPinOnePageTable));
1212 // pin page with page table in...
1213 TPhysAddr pagePhys = Mmu::PageTablePhysAddr(aPageTable);
1214 SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys);
1215 if (!pi->PinCount())
1216 {// Page is being pinned having previously been unpinned.
1217 TInt r = iPtPageAllocator.PtPagePinCountInc();
1221 ThePager.Pin(pi,aPinArgs);
1223 // pin page with page table info in...
1224 SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPageTable);
1225 pagePhys = Mmu::UncheckedLinearToPhysical((TLinAddr)pti,KKernelOsAsid);
1226 pi = SPageInfo::FromPhysAddr(pagePhys);
1227 ThePager.Pin(pi,aPinArgs);
1232 void PageTableAllocator::UnpinPageTable(TPte* aPageTable, TPinArgs& aPinArgs)
1234 // unpin page with page table info in...
1235 SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPageTable);
1236 TPhysAddr pagePhys = Mmu::UncheckedLinearToPhysical((TLinAddr)pti,KKernelOsAsid);
1237 SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys);
1238 ThePager.Unpin(pi,aPinArgs);
1240 // unpin page with page table in...
1241 pagePhys = Mmu::PageTablePhysAddr(aPageTable);
1242 pi = SPageInfo::FromPhysAddr(pagePhys);
1243 ThePager.Unpin(pi,aPinArgs);
1245 if (!pi->PinCount())
1246 {// This page table page is no longer pinned.
1247 iPtPageAllocator.PtPagePinCountDec();
1253 TBool IsPageTableUnpagedRemoveAllowed(SPageInfo* aPageInfo)
1254 { return ::PageTables.IsPageTableUnpagedRemoveAllowed(aPageInfo); }
1256 TBool PageTableAllocator::IsPageTableUnpagedRemoveAllowed(SPageInfo* aPageInfo)
1258 if (aPageInfo->Owner() == iPageTableInfoMemory)
1259 {// Page table info pages are never added to the live list but can be
1260 // stolen via DPager::StealPage()
1264 if (aPageInfo->Owner() == iPageTableMemory)
1265 {// Page table pages are added to the live list but only after the page they
1266 // map has been paged in. Therefore, a pde can reference a pte before it has been
1267 // added to the live list so allow this but for uninitialised page table pages only.
1268 TUint ptPageIndex = aPageInfo->Index();
1269 SPageTableInfo* pti = (SPageTableInfo*)KPageTableInfoBase+ptPageIndex*KPtClusterSize;
1272 if (!pti->IsUnused())
1274 TPte* pte = pti->PageTable();
1275 TPte* pteEnd = pte + (KPageTableSize/sizeof(TPte));
1276 while (pte < pteEnd)
1277 if (*pte++ != KPteUnallocatedEntry)
1281 while(!(++pti)->IsFirstInPage());
1293 void PageTableAllocator::CleanupTrampoline(TAny* aSelf)
1295 ((PageTableAllocator*)aSelf)->Cleanup();
1299 void PageTableAllocator::Cleanup()
1301 // free any surplus pages...
1303 while(FreeReserve(iPagedAllocator) || FreeReserve(iUnpagedAllocator))