os/kernelhwsrv/kernel/eka/memmodel/epoc/flexible/mmu/mptalloc.cpp
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200 (2012-06-15)
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
     1 // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
     2 // All rights reserved.
     3 // This component and the accompanying materials are made available
     4 // under the terms of the License "Eclipse Public License v1.0"
     5 // which accompanies this distribution, and is available
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
     7 //
     8 // Initial Contributors:
     9 // Nokia Corporation - initial contribution.
    10 //
    11 // Contributors:
    12 //
    13 // Description:
    14 //
    15 
    16 #include <plat_priv.h>
    17 #include "mm.h"
    18 #include "mmu.h"
    19 #include "mpager.h"
    20 
    21 #include "mmanager.h"
    22 #include "mmapping.h"
    23 #include "mobject.h"
    24 
    25 #include "mptalloc.h"
    26 #include "cache_maintenance.inl"
    27 
    28 /**
    29 @class PageTableAllocator
    30 @details
    31 
    32 NOTES
    33 
    34 Page tables are mapped into a sparse array in the virtual address range
    35 #KPageTableBase..#KPageTableEnd. For each present page table there is a
    36 corresponding #SPageTableInfo object mapped from #KPageTableInfoBase upwards.
    37 
    38 Page tables for demand paged content are kept separate from other page tables,
    39 this enables the memory for these to be freed when the page tables no longer map
    40 any memory i.e. when it has all been paged-out. Pages with these 'paged' page
    41 tables are stored in the demand paging live list, so it participates in the page
    42 aging process.
    43 
    44 The 'unpaged' page tables are allocated from the bottom of the array upwards,
    45 via TPtPageAllocator::iLowerAllocator; the 'paged' page tables are allocated
    46 from the top of the array downwards, via TPtPageAllocator::iUpperAllocator.
    47 These two regions are prevented from overlapping, or from coming close enough
    48 together so that the #SPageTableInfo struct for paged and unpaged page tables
    49 lie in the same page. This means that the SPageTableInfo memory for paged page
    50 tables can be discarded when it's page tables are discarded.
    51 
    52 Memory for page tables and page table info objects is managed by
    53 #ThePageTableMemoryManager. When allocating memory for demand paged use, this
    54 uses memory from #ThePager which will reclaim paged memory if necessary.
    55 Providing the live list always has #DPager::iMinYoungPages, this guarantees that
    56 handling page faults can never fail by running out of memory.
    57 
    58 TODO: In really pathological situations page table allocation can fail due to
    59 being out of virtual address space to map the table, this needs to be prevented
    60 from happening when handling demand paging faults.
    61 */
    62 
    63 
    64 PageTableAllocator PageTables;
    65 
    66 
    67 
    68 TBool PageTablesLockIsHeld()
    69 	{
    70 	return ::PageTables.LockIsHeld();
    71 	}
    72 
    73 
    74 /**
    75 Minimum number of page tables to keep in reserve.
    76 */
    77 const TUint KNumReservedPageTables = 0; // none needed - page tables for mapping page tables and infos are permanently allocated
    78 
    79 
    80 /**
    81 Manager for the memory object used to store all the MMU page tables.
    82 */
    83 class DPageTableMemoryManager : public DMemoryManager
    84 	{
    85 public:
    86 	/**
    87 	Not implemented - page table memory is never destroyed.
    88 	*/
    89 	virtual void Destruct(DMemoryObject* aMemory)
    90 		{}
    91 
    92 	virtual TInt StealPage(DMemoryObject* aMemory, SPageInfo* aPageInfo)
    93 		{ return PageTables.StealPage(aPageInfo); }
    94 
    95 	/**
    96 	Does nothing, returns KErrNone.
    97 	The RAM containing page tables does not need access restrictions applied for demand paging
    98 	purposes. Page table life-time is implicitly managed through the pages it maps.
    99 	*/
   100 	virtual TInt RestrictPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TRestrictPagesType aRestriction)
   101 		{ return KErrNone; }
   102 
   103 	/**
   104 	Does nothing, returns KErrNone.
   105 	The contents of page tables never need saving as their contents are dynamically generated.
   106 	*/
   107 	virtual TInt CleanPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TPhysAddr*& aPageArrayEntry)
   108 		{ return KErrNone; }
   109 
   110 	/**
   111 	Not implemented, returns KErrNotSupported.
   112 	*/
   113 	virtual TInt Pin(DMemoryObject* aMemory, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs)
   114 		{ return KErrNotSupported; }
   115 
   116 	/**
   117 	Not implemented.
   118 	*/
   119 	virtual void Unpin(DMemoryObject* aMemory, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs)
   120 		{ }
   121 
   122 
   123 	virtual TInt MovePage(	DMemoryObject* aMemory, SPageInfo* aOldPageInfo, 
   124 							TPhysAddr& aNewPage, TUint aBlockZoneId, TBool aBlockRest);
   125 public:
   126 	/**
   127 	Allocate a page of RAM for storing page tables in.
   128 
   129 	@param aMemory		A memory object associated with this manager.
   130 	@param aIndex		Page index, within the memory, to allocate the page at.
   131 	@param aDemandPaged	True if the memory is to be used for page tables mapping
   132 						demand paged content.
   133 
   134 	@return KErrNone if successful, otherwise one of the system wide error codes.
   135 	*/
   136 	TInt Alloc(DMemoryObject* aMemory, TUint aIndex, TBool aDemandPaged);
   137 
   138 	/**
   139 	Allocate a page of RAM being used for storing page tables in.
   140 
   141 	@param aMemory		A memory object associated with this manager.
   142 	@param aIndex		Page index, within the memory, to free the page from.
   143 	@param aDemandPaged	True if the memory is being used for page tables mapping
   144 						demand paged content.
   145 
   146 	@return KErrNone if successful, otherwise one of the system wide error codes.
   147 	*/
   148 	TInt Free(DMemoryObject* aMemory, TUint aIndex, TBool aDemandPaged);
   149 	};
   150 
   151 /**
   152 The single instance of the #DPageTableMemoryManager class.
   153 */
   154 DPageTableMemoryManager ThePageTableMemoryManager;
   155 
   156 
   157 TInt DPageTableMemoryManager::Alloc(DMemoryObject* aMemory, TUint aIndex, TBool aDemandPaged)
   158 	{
   159 	TRACE2(("DPageTableMemoryManager::Alloc(0x%08x,0x%x,%d)",aMemory, aIndex, aDemandPaged));
   160 	__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
   161 
   162 	// allocate page array entry...
   163 	RPageArray::TIter pageList;
   164 	TPhysAddr* p = aMemory->iPages.AddPageStart(aIndex,pageList);
   165 	if(!p)
   166 		return KErrNoMemory;
   167 
   168 	// allocate RAM...
   169 	RamAllocLock::Lock();
   170 	TPhysAddr pagePhys;
   171 	TInt r;
   172 	if(aDemandPaged)
   173 		{
   174 		r = ThePager.PageInAllocPages(&pagePhys,1,aMemory->RamAllocFlags());
   175 		__NK_ASSERT_DEBUG(r!=KErrNoMemory);
   176 		}
   177 	else
   178 		{// Allocate fixed paged as page tables aren't movable.
   179 		r = TheMmu.AllocRam(&pagePhys, 1, aMemory->RamAllocFlags(), EPageFixed);
   180 		}
   181 	RamAllocLock::Unlock();
   182 
   183 	TUint usedNew = 0;
   184 	if(r==KErrNone)
   185 		{
   186 		// add RAM to page array...
   187 		MmuLock::Lock();
   188 		if(aDemandPaged)
   189 			ThePager.Event(DPager::EEventPagePageTableAlloc,SPageInfo::FromPhysAddr(pagePhys));
   190 		SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys);
   191 		pi->SetManaged(aMemory,aIndex,aMemory->PageInfoFlags());
   192 		RPageArray::AddPage(p,pagePhys);
   193 		MmuLock::Unlock();
   194 		usedNew = 1;
   195 
   196 		// map page...
   197 		r = aMemory->MapPages(pageList);
   198 		}
   199 
   200 	// release page array entry...
   201 	aMemory->iPages.AddPageEnd(aIndex,usedNew);
   202 
   203 	// revert if error...
   204 	if(r!=KErrNone)
   205 		Free(aMemory,aIndex,aDemandPaged);
   206 
   207 	return r;
   208 	}
   209 
   210 
   211 TInt DPageTableMemoryManager::Free(DMemoryObject* aMemory, TUint aIndex, TBool aDemandPaged)
   212 	{
   213 	TRACE2(("DPageTableMemoryManager::Free(0x%08x,0x%x,%d)",aMemory, aIndex, aDemandPaged));
   214 	__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
   215 
   216 	// find page array entry...
   217 	RPageArray::TIter pageList;
   218 	TPhysAddr* p = aMemory->iPages.RemovePageStart(aIndex,pageList);
   219 	if(!p)
   220 		return KErrNoMemory;
   221 
   222 	// unmap page...
   223 	aMemory->UnmapPages(pageList,true);
   224 
   225 	RamAllocLock::Lock();
   226 
   227 	// remove page...
   228 	MmuLock::Lock();
   229 	TPhysAddr pagePhys = RPageArray::RemovePage(p);
   230 	MmuLock::Unlock();
   231 
   232 	TInt r;
   233 	if(pagePhys==KPhysAddrInvalid)
   234 		{
   235 		// no page removed...
   236 		r = 0;
   237 		}
   238 	else
   239 		{
   240 		// free the removed page...
   241 		if(aDemandPaged)
   242 			ThePager.PageInFreePages(&pagePhys,1);
   243 		else
   244 			TheMmu.FreeRam(&pagePhys, 1, EPageFixed);
   245 		r = 1;
   246 		}
   247 
   248 	RamAllocLock::Unlock();
   249 
   250 	// cleanup...
   251 	aMemory->iPages.RemovePageEnd(aIndex,r);
   252 	return r;
   253 	}
   254 
   255 TInt DPageTableMemoryManager::MovePage(	DMemoryObject* aMemory, SPageInfo* aOldPageInfo, 
   256 										TPhysAddr& aNewPage, TUint aBlockZoneId, TBool aBlockRest)
   257 		{
   258 		// This could be a demand paged page table info which can be discarded 
   259 		// but let the PageTableAllocator handle that.
   260 		return ::PageTables.MovePage(aMemory, aOldPageInfo, aBlockZoneId, aBlockRest);
   261 		}
   262 
   263 
   264 //
   265 // PageTableAllocator
   266 //
   267 
   268 void PageTableAllocator::Init2(DMutex* aLock)
   269 	{
   270 	TRACEB(("PageTableAllocator::Init2(0x%x)",aLock));
   271 	iLock = aLock;
   272 
   273 	__NK_ASSERT_DEBUG(iUnpagedAllocator.CheckFreeList());
   274 
   275 	// scan for already allocated page tables
   276 	// (assumes the first page table is used to map page tables)...
   277 	SPageTableInfo* pti = (SPageTableInfo*)KPageTableInfoBase;
   278 	TUint pages = 0;
   279 	for(;;)
   280 		{
   281 		TPte pte = ((TPte*)KPageTableBase)[pages];
   282 		if(pte==KPteUnallocatedEntry)
   283 			break; // end (assumes no gaps in page table allocation) 
   284 
   285 		// process free page tables in this page...
   286 		TUint freeCount = 0;
   287 		do
   288 			{
   289 			if(pti->IsUnused())
   290 				{
   291 				pti->PtClusterAlloc();
   292 				iUnpagedAllocator.iFreeList.Add(&pti->FreeLink());
   293 				++freeCount;
   294 				}
   295 #ifdef _DEBUG
   296 			else
   297 				__NK_ASSERT_DEBUG(pti->IsPtClusterAllocated());
   298 #endif
   299 			}
   300 		while(!(++pti)->IsFirstInPage());
   301 		iUnpagedAllocator.iFreeCount += freeCount;
   302 		__NK_ASSERT_DEBUG(iUnpagedAllocator.CheckFreeList());
   303 		TRACE2(("PT page 0x%08x has %d free tables",pti[-KPtClusterSize].PageTable(),freeCount));
   304 
   305 		// count page, and move on to next one...
   306 		++pages;
   307 		__NK_ASSERT_DEBUG(pages<KChunkSize/KPageSize); // we've assumed less than one page table of page tables
   308 		}
   309 
   310 	// construct allocator for page table pages...
   311 	iPtPageAllocator.Init2(pages);
   312 
   313 	// initialise allocator page table infos...
   314 	iPageTableGroupCounts[0] = pages;
   315 	__NK_ASSERT_DEBUG(pages/KPageTableGroupSize==0); // we've assumed less than 1 page of page table infos
   316 
   317 	// FOLLOWING CODE WILL USE THIS OBJECT TO ALLOCATE SOME PAGE TABLES,
   318 	// SO ALLOCATOR MUST BE INITIALISED TO A FIT STATE BEFORE THIS POINT!
   319 
   320 	// construct memory object for page tables...
   321 	TMappingCreateFlags mapFlags = (TMappingCreateFlags)(EMappingCreateFixedVirtual|EMappingCreateReserveAllResources);
   322 #if defined(__CPU_PAGE_TABLES_FULLY_CACHED)
   323 	TMemoryAttributes memAttr = EMemoryAttributeStandard;
   324 #else
   325 	TMemoryAttributes memAttr = (TMemoryAttributes)(EMemoryAttributeNormalUncached|EMemoryAttributeDefaultShareable);
   326 #endif
   327 	TMemoryCreateFlags createFlags = (TMemoryCreateFlags)(EMemoryCreateNoWipe|EMemoryCreateCustomManager);
   328 	TInt r = MM::InitFixedKernelMemory(iPageTableMemory, KPageTableBase, KPageTableEnd, pages<<KPageShift, (TMemoryObjectType)(T_UintPtr)&ThePageTableMemoryManager, createFlags, memAttr, mapFlags);
   329 	__NK_ASSERT_ALWAYS(r==KErrNone);
   330 	MM::MemorySetLock(iPageTableMemory,aLock);
   331 
   332 	// construct memory object for page table infos...
   333 	memAttr = EMemoryAttributeStandard;
   334 	TUint size = pages*KPtClusterSize*sizeof(SPageTableInfo);
   335 	size = (size+KPageMask)&~KPageMask;
   336 	r = MM::InitFixedKernelMemory(iPageTableInfoMemory, KPageTableInfoBase, KPageTableInfoEnd, size, (TMemoryObjectType)(T_UintPtr)&ThePageTableMemoryManager, createFlags, memAttr, mapFlags);
   337 	__NK_ASSERT_ALWAYS(r==KErrNone);
   338 	MM::MemorySetLock(iPageTableInfoMemory,aLock);
   339 
   340 	// make sure we have enough reserve page tables...
   341 	Lock();
   342 	iUnpagedAllocator.Init2(this,KNumReservedPageTables,false);
   343 	iPagedAllocator.Init2(this,0,true);
   344 	Unlock();
   345 
   346 	TRACEB(("PageTableAllocator::Init2 done"));
   347 	}
   348 
   349 
   350 void PageTableAllocator::Init2B()
   351 	{
   352 	TRACEB(("PageTableAllocator::Init2B()"));
   353 	TInt r = iPageTableMemory->iPages.PreallocateMemory();
   354 	__NK_ASSERT_ALWAYS(r==KErrNone);
   355 	r = iPageTableInfoMemory->iPages.PreallocateMemory();
   356 	__NK_ASSERT_ALWAYS(r==KErrNone);
   357 	TRACEB(("PageTableAllocator::Init2B done"));
   358 	}
   359 
   360 
   361 void PageTableAllocator::TSubAllocator::Init2(PageTableAllocator* aAllocator, TUint aReserveCount, TBool aDemandPaged)
   362 	{
   363 	iReserveCount = aReserveCount;
   364 	iDemandPaged = aDemandPaged;
   365 	while(iFreeCount<aReserveCount)
   366 		if(!aAllocator->AllocReserve(*this))
   367 			{
   368 			__NK_ASSERT_ALWAYS(0);
   369 			}
   370 	}
   371 
   372 
   373 void PageTableAllocator::TPtPageAllocator::Init2(TUint aNumInitPages)
   374 	{
   375 	iLowerAllocator = TBitMapAllocator::New(KMaxPageTablePages,ETrue);
   376 	__NK_ASSERT_ALWAYS(iLowerAllocator);
   377 	iLowerAllocator->Alloc(0,aNumInitPages);
   378 	iLowerWaterMark = aNumInitPages-1;
   379 
   380 	iUpperAllocator = TBitMapAllocator::New(KMaxPageTablePages,ETrue);
   381 	__NK_ASSERT_ALWAYS(iUpperAllocator);
   382 	
   383 	__ASSERT_COMPILE(KMaxPageTablePages > (TUint)KMinUnpinnedPagedPtPages);	// Unlikely to be untrue.
   384 	iUpperWaterMark = KMaxPageTablePages - KMinUnpinnedPagedPtPages;
   385 	iPinnedPageTablePages = 0;	// OK to clear this without MmuLock as only one thread running so far.
   386 	}
   387 
   388 
   389 static TUint32 RandomSeed = 33333;
   390 
   391 TUint PageTableAllocator::TPtPageAllocator::RandomPagedPtPage()
   392 	{
   393 	__NK_ASSERT_DEBUG(PageTablesLockIsHeld());
   394 
   395 	// Pick an allocated page at random, from iUpperWaterMark - KMaxPageTablePages.
   396 	RandomSeed = RandomSeed * 69069 + 1; // next 'random' number
   397 	TUint allocRange = KMaxPageTablePages - iUpperWaterMark - 1;
   398 	TUint bit = (TUint64(RandomSeed) * TUint64(allocRange)) >> 32;
   399 
   400 	// All page table pages should be allocated or we shouldn't be stealing one at random.
   401 	__NK_ASSERT_DEBUG(iUpperAllocator->NotFree(bit, 1));
   402 
   403 	return KMaxPageTablePages - 1 - bit;
   404 	}
   405 
   406 
   407 TInt PageTableAllocator::TPtPageAllocator::Alloc(TBool aDemandPaged)
   408 	{
   409 	__NK_ASSERT_DEBUG(PageTablesLockIsHeld());
   410 	TUint pageIndex;
   411 	if(aDemandPaged)
   412 		{
   413 		TInt bit = iUpperAllocator->Alloc();
   414 		// There are always unpaged page tables so iUpperAllocator will always have 
   415 		// at least one free bit.
   416 		__NK_ASSERT_DEBUG(bit >= 0);
   417 
   418 		pageIndex = KMaxPageTablePages - 1 - bit;
   419 
   420 		if(pageIndex < iUpperWaterMark)
   421 			{
   422 			// new upper watermark...
   423 			if((pageIndex & ~(KPageTableGroupSize - 1)) <= iLowerWaterMark)
   424 				{
   425 				// clashes with other bitmap allocator, so fail..
   426 				iUpperAllocator->Free(bit);
   427 				TRACE(("TPtPageAllocator::Alloc too low iUpperWaterMark %d ",iUpperWaterMark));
   428 				return -1;
   429 				}
   430 			// Hold mmu lock so iUpperWaterMark isn't read by pinning before we've updated it.
   431 			MmuLock::Lock();
   432 			iUpperWaterMark = pageIndex;
   433 			MmuLock::Unlock();
   434 			TRACE(("TPtPageAllocator::Alloc new iUpperWaterMark=%d",pageIndex));
   435 			}
   436 		}
   437 	else
   438 		{
   439 		TInt bit = iLowerAllocator->Alloc();
   440 		if(bit < 0)
   441 			return bit;
   442 		pageIndex = bit;
   443 		if(pageIndex > iLowerWaterMark)
   444 			{// iLowerAllocator->Alloc() should only pick the next bit after iLowerWaterMark.
   445 			__NK_ASSERT_DEBUG(pageIndex == iLowerWaterMark + 1);
   446 			MmuLock::Lock();
   447 			// new lower watermark...
   448 			if(	pageIndex >= (iUpperWaterMark & ~(KPageTableGroupSize - 1)) ||
   449 				AtPinnedPagedPtsLimit(iUpperWaterMark, pageIndex, iPinnedPageTablePages))
   450 				{
   451 				// clashes with other bitmap allocator or it would reduce the amount 
   452 				// of available unpinned paged page tables too far, so fail..
   453 				MmuLock::Unlock();
   454 				iLowerAllocator->Free(bit);
   455 				TRACE(("TPtPageAllocator::Alloc iLowerWaterMark=%d",iLowerWaterMark));
   456 				return -1;
   457 				}
   458 			iLowerWaterMark = pageIndex;
   459 			MmuLock::Unlock();
   460 			TRACE(("TPtPageAllocator::Alloc new iLowerWaterMark=%d", iLowerWaterMark));
   461 			}
   462 		}
   463 	return pageIndex;
   464 	}
   465 
   466 
   467 void PageTableAllocator::TPtPageAllocator::Free(TUint aPageIndex, TBool aDemandPaged)
   468 	{
   469 	__NK_ASSERT_DEBUG(PageTablesLockIsHeld());
   470 	if(aDemandPaged)
   471 		iUpperAllocator->Free(KMaxPageTablePages-1-aPageIndex);
   472 	else
   473 		iLowerAllocator->Free(aPageIndex);
   474 	}
   475 
   476 
   477 void PageTableAllocator::Lock()
   478 	{
   479 	Kern::MutexWait(*iLock);
   480 	}
   481 
   482 
   483 void PageTableAllocator::Unlock()
   484 	{
   485 	Kern::MutexSignal(*iLock);
   486 	}
   487 
   488 
   489 TBool PageTableAllocator::LockIsHeld()
   490 	{
   491 	return iLock->iCleanup.iThread == &Kern::CurrentThread();
   492 	}
   493 
   494 
   495 TBool PageTableAllocator::AllocReserve(TSubAllocator& aSubAllocator)
   496 	{
   497 	__NK_ASSERT_DEBUG(LockIsHeld());
   498 
   499 	TBool demandPaged = aSubAllocator.iDemandPaged;
   500 
   501 	// allocate page...
   502 	TInt ptPageIndex = iPtPageAllocator.Alloc(demandPaged);
   503 	if (ptPageIndex < 0) 
   504 		{
   505 		if (demandPaged)
   506 			{
   507 			TInt r;
   508 			do
   509 				{
   510 				// Can't fail to find a demand paged page table, otherwise a page fault 
   511 				// could fail with KErrNoMemory.  Therefore, keep attempting to steal a 
   512 				// demand paged page table page until successful.
   513 				TUint index = iPtPageAllocator.RandomPagedPtPage();
   514 				MmuLock::Lock();
   515 				TLinAddr pageTableLin = KPageTableBase + (index << (KPtClusterShift + KPageTableShift));
   516 				TPhysAddr ptPhysAddr = Mmu::LinearToPhysical(pageTableLin);
   517 				// Page tables must be allocated otherwise we shouldn't be stealing the page.
   518 				__NK_ASSERT_DEBUG(ptPhysAddr != KPhysAddrInvalid);
   519 				SPageInfo* ptSPageInfo = SPageInfo::FromPhysAddr(ptPhysAddr);
   520 				r = StealPage(ptSPageInfo);
   521 				MmuLock::Unlock();
   522 				}
   523 			while(r != KErrCompletion);
   524 			// Retry the allocation now that we've stolen a page table page.
   525 			ptPageIndex = iPtPageAllocator.Alloc(demandPaged);
   526 			__NK_ASSERT_DEBUG(ptPageIndex >= 0);
   527 			}		
   528 		else
   529 			{
   530 			return EFalse;
   531 			}
   532 		}
   533 
   534 	// commit memory for page...
   535 	__NK_ASSERT_DEBUG(iPageTableMemory); // check we've initialised iPageTableMemory
   536 	TInt r = ThePageTableMemoryManager.Alloc(iPageTableMemory,ptPageIndex,aSubAllocator.iDemandPaged);
   537 	if(r==KErrNoMemory)
   538 		{
   539 		iPtPageAllocator.Free(ptPageIndex,aSubAllocator.iDemandPaged);
   540 		return false;
   541 		}
   542 	__NK_ASSERT_DEBUG(r==KErrNone);
   543 
   544 	// allocate page table info...
   545 	TUint ptgIndex = ptPageIndex/KPageTableGroupSize;
   546 	if(!iPageTableGroupCounts[ptgIndex])
   547 		{
   548 		__NK_ASSERT_DEBUG(iPageTableInfoMemory); // check we've initialised iPageTableInfoMemory
   549 		r = ThePageTableMemoryManager.Alloc(iPageTableInfoMemory,ptgIndex,aSubAllocator.iDemandPaged);
   550 
   551 		if(r==KErrNoMemory)
   552 			{
   553 			r = ThePageTableMemoryManager.Free(iPageTableMemory,ptPageIndex,aSubAllocator.iDemandPaged);
   554 			__NK_ASSERT_DEBUG(r==1);
   555 			iPtPageAllocator.Free(ptPageIndex,aSubAllocator.iDemandPaged);
   556 			return false;
   557 			}
   558 		__NK_ASSERT_DEBUG(r==KErrNone);
   559 		// For paged page tables set all the page table infos in this page as unused 
   560 		// and their page table clusters as not allocated.
   561 		if (aSubAllocator.iDemandPaged)
   562 			{
   563 			SPageTableInfo* ptiBase = (SPageTableInfo*)KPageTableInfoBase + (ptgIndex*KPageTableInfosPerPage);
   564 			memclr(ptiBase, KPageSize);
   565 			}
   566 		}
   567 	++iPageTableGroupCounts[ptgIndex];
   568 
   569 	SPageTableInfo* pti = (SPageTableInfo*)KPageTableInfoBase+ptPageIndex*KPtClusterSize;
   570 	aSubAllocator.AllocPage(pti);
   571 	return true;
   572 	}
   573 
   574 
   575 void PageTableAllocator::TSubAllocator::AllocPage(SPageTableInfo* aPageTableInfo)
   576 	{
   577 	SPageTableInfo* pti = aPageTableInfo;
   578 	__NK_ASSERT_DEBUG(pti->IsFirstInPage());
   579 
   580 	TRACE2(("Alloc PT page (%d) 0x%08x",iDemandPaged,pti->PageTable()));
   581 
   582 	// initialise page table infos...
   583 	do pti->New(iDemandPaged);
   584 	while(!(++pti)->IsFirstInPage());
   585 	pti -= KPtClusterSize;
   586 
   587 	// all page tables initially unused, so start them off on iCleanupList...
   588 	pti->AddToCleanupList(iCleanupList);
   589 	iFreeCount += KPtClusterSize;
   590 	__NK_ASSERT_DEBUG(CheckFreeList());
   591 	}
   592 
   593 
   594 SPageTableInfo* PageTableAllocator::TSubAllocator::FreePage()
   595 	{
   596 	if(!IsCleanupRequired())
   597 		return 0;
   598 
   599 	// get a completely free page...
   600 	SDblQueLink* link = iCleanupList.Last();
   601 	__NK_ASSERT_DEBUG(link);
   602 	SPageTableInfo* pti = SPageTableInfo::FromFreeLink(link);
   603 	__NK_ASSERT_DEBUG(pti->IsFirstInPage());
   604 	pti->RemoveFromCleanupList();
   605 	iFreeCount -= KPtClusterSize;
   606 	__NK_ASSERT_DEBUG(CheckFreeList());
   607 
   608 	TRACE2(("Free PT page (%d) 0x%08x",iDemandPaged,pti->PageTable()));
   609 
   610 	// Mark each page table info as no longer having its page table cluster allocated.
   611 	do 
   612 		{// make sure all page tables in page are unused...
   613 		__NK_ASSERT_DEBUG(pti->IsUnused());
   614 		pti->PtClusterFreed();
   615 		}
   616 	while(!(++pti)->IsFirstInPage());
   617 	pti -= KPtClusterSize;
   618 
   619 	return pti;
   620 	}
   621 
   622 
   623 TBool PageTableAllocator::FreeReserve(TSubAllocator& aSubAllocator)
   624 	{
   625 	__NK_ASSERT_DEBUG(LockIsHeld());
   626 
   627 	// get a page which needs freeing...
   628 	SPageTableInfo* pti = aSubAllocator.FreePage();
   629 	if(!pti)
   630 		return false;
   631 
   632 	// free the page...
   633 	TUint ptPageIndex = ((TLinAddr)pti-KPageTableInfoBase)>>(KPageTableInfoShift+KPtClusterShift);
   634 	iPtPageAllocator.Free(ptPageIndex,aSubAllocator.iDemandPaged);
   635 	TInt r = ThePageTableMemoryManager.Free(iPageTableMemory,ptPageIndex,aSubAllocator.iDemandPaged);
   636 	(void)r;
   637 	__NK_ASSERT_DEBUG(r==1);
   638 
   639 	// free page table info...
   640 	TUint ptgIndex = ptPageIndex/KPageTableGroupSize;
   641 	TUint groupCount = iPageTableGroupCounts[ptgIndex]; // compiler handles half-word values stupidly, so give it a hand
   642 	--groupCount;
   643 	iPageTableGroupCounts[ptgIndex] = groupCount;
   644 	if(!groupCount)
   645 		r = ThePageTableMemoryManager.Free(iPageTableInfoMemory,ptgIndex,aSubAllocator.iDemandPaged);
   646 
   647 	return true;
   648 	}
   649 
   650 
   651 TPte* PageTableAllocator::Alloc(TBool aDemandPaged)
   652 	{
   653 	TRACE(("PageTableAllocator::Alloc(%d)",(bool)aDemandPaged));
   654 	TPte* pt = DoAlloc(aDemandPaged);
   655 	TRACE(("PageTableAllocator::Alloc() returns 0x%08x phys=0x%08x",pt,pt?Mmu::PageTablePhysAddr(pt):KPhysAddrInvalid));
   656 	return pt;
   657 	}
   658 
   659 
   660 TPte* PageTableAllocator::DoAlloc(TBool aDemandPaged)
   661 	{
   662 	__NK_ASSERT_DEBUG(LockIsHeld());
   663 
   664 #ifdef _DEBUG
   665 	// simulated OOM, but not if demand paged as this can't fail under normal circumstances...
   666 	if(!aDemandPaged)
   667 		{
   668 		RamAllocLock::Lock();
   669 		TBool fail = K::CheckForSimulatedAllocFail();
   670 		RamAllocLock::Unlock();
   671 		if(fail)
   672 			return 0;
   673 		}
   674 #endif
   675 
   676 	TSubAllocator& allocator = aDemandPaged ? iPagedAllocator : iUnpagedAllocator;
   677 
   678 	__NK_ASSERT_DEBUG(!iAllocating || !aDemandPaged); // can't recursively allocate demand paged tables
   679 
   680 	__NK_ASSERT_DEBUG(iAllocating<=allocator.iReserveCount); // can't recursively allocate more than the reserve
   681 
   682 	// keep up enough spare page tables...
   683 	if(!iAllocating++) // if we haven't gone recursive...
   684 		{
   685 		// make sure we have a page table to allocate...
   686 		while(allocator.iFreeCount<=allocator.iReserveCount)
   687 			if(!AllocReserve(allocator))
   688 				{
   689 				--iAllocating;
   690 				return 0; // out of memory
   691 				}
   692 		}
   693 	else
   694 		{
   695 		TRACE(("PageTableAllocator::DoAlloc recurse=%d",iAllocating));
   696 		}
   697 
   698 	// allocate a page table...
   699 	SPageTableInfo* pti = allocator.Alloc();
   700 
   701 	// initialise page table info...
   702 	pti->Init();
   703 
   704 	// initialise page table...
   705 	TPte* pt = pti->PageTable();
   706 	memclr(pt,KPageTableSize);
   707 	CacheMaintenance::MultiplePtesUpdated((TLinAddr)pt,KPageTableSize);
   708 
   709 	// done...
   710 	--iAllocating;
   711 	return pt;
   712 	}
   713 
   714 
   715 SPageTableInfo* PageTableAllocator::TSubAllocator::Alloc()
   716 	{
   717 	__NK_ASSERT_DEBUG(PageTablesLockIsHeld());
   718 	__NK_ASSERT_DEBUG(iFreeCount);
   719 	__NK_ASSERT_DEBUG(CheckFreeList());
   720 
   721 	// get next free page table...
   722 	SDblQueLink* link = iFreeList.GetFirst();
   723 	SPageTableInfo* pti; 
   724 	if(link)
   725 		pti = SPageTableInfo::FromFreeLink(link);
   726 	else
   727 		{
   728 		// need to get one back from the cleanup list...
   729 		link = iCleanupList.First();
   730 		__NK_ASSERT_DEBUG(link); // we can't be out of page tables
   731 		pti = SPageTableInfo::FromFreeLink(link);
   732 		__NK_ASSERT_DEBUG(pti->IsFirstInPage());
   733 		pti->RemoveFromCleanupList();
   734 
   735 		// add other page tables in the page to the free list...
   736 		SPageTableInfo* free = pti+1;
   737 		while(!free->IsFirstInPage())
   738 			{
   739 			__NK_ASSERT_DEBUG(free->IsUnused());
   740 			iFreeList.Add(&free->FreeLink());
   741 			++free;
   742 			}
   743 		}
   744 
   745 	// count page as allocated...
   746 	--iFreeCount;
   747 	__NK_ASSERT_DEBUG(pti->IsUnused());
   748 	__NK_ASSERT_DEBUG(CheckFreeList());
   749 
   750 	return pti;
   751 	}
   752 
   753 
   754 void PageTableAllocator::Free(TPte* aPageTable)
   755 	{
   756 	TRACE(("PageTableAllocator::Free(0x%08x)",aPageTable));
   757 	DoFree(aPageTable);
   758 	}
   759 
   760 
   761 void PageTableAllocator::DoFree(TPte* aPageTable)
   762 	{
   763 	__NK_ASSERT_DEBUG(LockIsHeld());
   764 
   765 	// make sure page table isn't being aliased...
   766 	TPhysAddr pagePhys = Mmu::PageTablePhysAddr(aPageTable);
   767 	__NK_ASSERT_DEBUG(pagePhys!=KPhysAddrInvalid);
   768 	TheMmu.RemoveAliasesForPageTable(pagePhys);
   769 
   770 	// free page table...
   771 	SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPageTable);
   772 	TSubAllocator& allocator = pti->IsDemandPaged() ? iPagedAllocator : iUnpagedAllocator;
   773 	allocator.Free(pti);
   774 
   775 	// check for surplus pages...
   776 	if(allocator.IsCleanupRequired())
   777 		{
   778 		iCleanup.Add(CleanupTrampoline,this);
   779 		}
   780 	}
   781 
   782 
   783 void PageTableAllocator::TSubAllocator::Free(SPageTableInfo* aPageTableInfo)
   784 	{
   785 	__NK_ASSERT_DEBUG(PageTablesLockIsHeld());
   786 	__NK_ASSERT_DEBUG(CheckFreeList());
   787 
   788 	SPageTableInfo* pti = aPageTableInfo;
   789 
   790 	// clear the page table info...
   791 	MmuLock::Lock();
   792 	__NK_ASSERT_DEBUG(!pti->PermanenceCount());
   793 	pti->SetUnused();
   794 	MmuLock::Unlock();
   795 
   796 	// scan other page tables in same page...
   797 	SPageTableInfo* first = pti->FirstInPage();
   798 	SPageTableInfo* last = pti->LastInPage();
   799 	SPageTableInfo* prev;
   800 	SPageTableInfo* next;
   801 
   802 	// try insert page table after previous free page table in same page...
   803 	prev = pti;
   804 	while(prev>first)
   805 		{
   806 		--prev;
   807 		if(prev->IsUnused())
   808 			{
   809 			pti->FreeLink().InsertAfter(&prev->FreeLink());
   810 			goto inserted;
   811 			}
   812 		}
   813 
   814 	// try insert page table before next free page table in same page...
   815 	next = pti;
   816 	while(next<last)
   817 		{
   818 		++next;
   819 		if(next->IsUnused())
   820 			{
   821 			pti->FreeLink().InsertBefore(&next->FreeLink());
   822 			goto inserted;
   823 			}
   824 		}
   825 
   826 	// only free page table in page, so link into start of free list...
   827 	pti->FreeLink().InsertAfter(&iFreeList.iA);
   828 
   829 inserted:
   830 	++iFreeCount;
   831 	__NK_ASSERT_DEBUG(CheckFreeList());
   832 
   833 	// see if all page tables in page are empty...
   834 	pti = first;
   835 	do
   836 		{
   837 		if(!pti->IsUnused())
   838 			return; // some page tables still in use, so end
   839 		}
   840 	while(!(++pti)->IsFirstInPage());
   841 	pti -= KPtClusterSize;
   842 
   843 	// check if page with page table in is pinned...
   844 	MmuLock::Lock();
   845 	TPte* pt = pti->PageTable();
   846 	TPhysAddr pagePhys = Mmu::PageTablePhysAddr(pt);
   847 	SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys);
   848 	TBool pinned = pi->PagedState()==SPageInfo::EPagedPinned;
   849 	MmuLock::Unlock();
   850 	// Note, the pinned state can't change even though we've now released the MmuLock.
   851 	// This is because all page tables in the page are unused and we don't pin unused
   852 	// page tables. Also, the page table(s) can't become used again whilst this function
   853 	// executes as we hold the page table allocator lock.
   854 	if(pinned)
   855 		{
   856 		// return now and leave page table(s) in free list if their page is pinned...
   857 		// Note, when page is unpinned it will end up in the paging live list and
   858 		// eventually be reclaimed for other use (if the page tables in the page
   859 		// don't get reallocated before then).
   860 		__NK_ASSERT_DEBUG(pti->IsDemandPaged()); // only paged page tables should have been pinned
   861 		return; 
   862 		}
   863 
   864 	// the page with our page table in it is no longer in use...
   865 	MoveToCleanup(pti);
   866 	}
   867 
   868 
   869 void PageTableAllocator::TSubAllocator::MoveToCleanup(SPageTableInfo* aPageTableInfo)
   870 	{
   871 	__NK_ASSERT_DEBUG(PageTablesLockIsHeld());
   872 	__NK_ASSERT_DEBUG(CheckFreeList());
   873 
   874 	SPageTableInfo* pti = aPageTableInfo;
   875 	__NK_ASSERT_DEBUG(pti->IsFirstInPage());
   876 
   877 	TRACE2(("Cleanup PT page (%d) 0x%08x",iDemandPaged,pti->PageTable()));
   878 
   879 	// make sure all page tables in page are unused...
   880 #ifdef _DEBUG
   881 	do __NK_ASSERT_DEBUG(pti->IsUnused());
   882 	while(!(++pti)->IsFirstInPage());
   883 	pti -= KPtClusterSize;
   884 #endif
   885 
   886 	// unlink all page tables in page...
   887 	SDblQueLink* prev = pti->FreeLink().iPrev;
   888 	SDblQueLink* next = pti->LastInPage()->FreeLink().iNext;
   889 	prev->iNext = next;
   890 	next->iPrev = prev;
   891 
   892 	// add page tables to cleanup list...
   893 	__NK_ASSERT_DEBUG(!pti->IsOnCleanupList());
   894 	pti->AddToCleanupList(iCleanupList);
   895 	__NK_ASSERT_DEBUG(CheckFreeList());
   896 	}
   897 
   898 
   899 
   900 TBool PageTableAllocator::TSubAllocator::IsCleanupRequired()
   901 	{
   902 	return iFreeCount>=iReserveCount+KPtClusterSize && !iCleanupList.IsEmpty();
   903 	}
   904 
   905 
   906 #ifdef _DEBUG
   907 
   908 TBool PageTableAllocator::TSubAllocator::CheckFreeList()
   909 	{
   910 	TUint count = iFreeCount;
   911 
   912 	// count page tables in iCleanupList...
   913 	SDblQueLink* head = &iCleanupList.iA;
   914 	SDblQueLink* link = head;
   915 	for(;;)
   916 		{
   917 		link = link->iNext;
   918 		if(link==head)
   919 			break;
   920 		SPageTableInfo* pti = SPageTableInfo::FromFreeLink(link);
   921 		__NK_ASSERT_DEBUG(pti->IsFirstInPage());
   922 		__NK_ASSERT_DEBUG(pti->IsOnCleanupList());
   923 		if(count<(TUint)KPtClusterSize)
   924 			return false;
   925 		count -= KPtClusterSize;
   926 		}
   927 
   928 	// count page tables in iFreeList...	
   929 	head = &iFreeList.iA;
   930 	link = head;
   931 	while(count)
   932 		{
   933 		link = link->iNext;
   934 		if(link==head)
   935 			return false;
   936 
   937 		// check next free page table in page is linked in correct order...
   938 		SPageTableInfo* pti = SPageTableInfo::FromFreeLink(link);
   939 		SPageTableInfo* last = pti->LastInPage();
   940 		SPageTableInfo* next = pti;
   941 		while(next<last)
   942 			{
   943 			++next;
   944 			if(next->IsUnused())
   945 				{
   946 				__NK_ASSERT_DEBUG(pti->FreeLink().iNext==&next->FreeLink());
   947 				__NK_ASSERT_DEBUG(next->FreeLink().iPrev==&pti->FreeLink());
   948 				break;
   949 				}
   950 			}
   951 
   952 		--count;
   953 		}
   954 
   955 	return link->iNext==head;
   956 	}
   957 
   958 #endif
   959 
   960 
   961 
   962 //
   963 // Paged page table handling
   964 //
   965 
   966 TInt SPageTableInfo::ForcedFree()
   967 	{
   968 	__NK_ASSERT_DEBUG(PageTablesLockIsHeld());
   969 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   970 	__NK_ASSERT_DEBUG(IsDemandPaged());
   971 
   972 	TUint type = iType;
   973 
   974 	if(type==EUnused)
   975 		return KErrNone;
   976 
   977 	__NK_ASSERT_DEBUG(iPermanenceCount==0);
   978 
   979 	// clear all PTEs in page table...
   980 	TPte* pt = PageTable();
   981 	memclr(pt,KPageTableSize);
   982 	__e32_memory_barrier(); // make sure all CPUs read zeros from pt so forcing a page-in (rather than a rejuvenate) if accessed
   983 	iPageCount = 0;
   984 
   985 	if(type==ECoarseMapping)
   986 		{
   987 		TRACE2(("SPageTableInfo::ForcedFree() coarse 0x%08x 0x%x %d",iCoarse.iMemoryObject,iCoarse.iChunkIndex,iCoarse.iPteType));
   988 		// mustn't release MmuLock between clearing page table and calling this
   989 		// (otherwise page table may get updated before its actually removed from
   990 		// the memory object)...
   991 		iCoarse.iMemoryObject->StealPageTable(iCoarse.iChunkIndex,iCoarse.iPteType);
   992 		}
   993 	else if(type==EFineMapping)
   994 		{
   995 		// need to remove page table from address spaces's page directory...
   996 		TLinAddr addr = iFine.iLinAddrAndOsAsid;
   997 		TUint osAsid = addr&KPageMask;
   998 		TPde* pPde = Mmu::PageDirectoryEntry(osAsid,addr);
   999 
  1000 		TRACE2(("SPageTableInfo::ForcedFree() fine %d 0x%08x",osAsid,addr&~KPageMask));
  1001 
  1002 		TPde pde = KPdeUnallocatedEntry;
  1003 		TRACE2(("!PDE %x=%x",pPde,pde));
  1004 		*pPde = pde;
  1005 		SinglePdeUpdated(pPde);
  1006 		}
  1007 	else
  1008 		{
  1009 		// invalid type...
  1010 		__NK_ASSERT_DEBUG(0);
  1011 		return KErrNotSupported;
  1012 		}
  1013 
  1014 	MmuLock::Unlock();
  1015 
  1016 	// make sure page table updates visible to MMU...
  1017 	CacheMaintenance::MultiplePtesUpdated((TLinAddr)pt,KPageTableSize);
  1018 	InvalidateTLB();
  1019 
  1020 	// free the page table back to the allocator,
  1021 	// this will also remove any IPC alias using it...
  1022 	__NK_ASSERT_DEBUG(iPageCount==0); // should still be unused
  1023 	::PageTables.Free(pt);
  1024 
  1025 	MmuLock::Lock();
  1026 
  1027 	return KErrNone;
  1028 	}
  1029 
  1030 
  1031 TInt PageTableAllocator::StealPage(SPageInfo* aPageInfo)
  1032 	{
  1033 	TRACE2(("PageTableAllocator::StealPage(0x%08x)",aPageInfo));
  1034 	__NK_ASSERT_DEBUG(LockIsHeld()); // only works if PageTableAllocator lock is the RamAllocLock
  1035 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1036 
  1037 	if (aPageInfo->Owner() == iPageTableInfoMemory)
  1038 		return StealPageTableInfo(aPageInfo);
  1039 
  1040 	__UNLOCK_GUARD_START(MmuLock);
  1041 
  1042 	// This must be a page table page so steal it.
  1043 	__NK_ASSERT_ALWAYS(aPageInfo->Owner()==iPageTableMemory);
  1044 	TUint ptPageIndex = aPageInfo->Index();
  1045 	SPageTableInfo* pti = (SPageTableInfo*)KPageTableInfoBase+ptPageIndex*KPtClusterSize;
  1046 
  1047 	aPageInfo->SetModifier(&pti);
  1048 	__UNLOCK_GUARD_END(MmuLock);
  1049 
  1050 	// forcibly free each page table in the page...
  1051 	TInt r;
  1052 	do
  1053 		{// Check for pinning, ForcedFree() releases MmuLock so must check for
  1054 		// each page table.
  1055 		if (aPageInfo->PagedState() == SPageInfo::EPagedPinned)
  1056 			{// The page table page is pinned so can't steal it.
  1057 			r = KErrInUse;
  1058 			break;
  1059 			}
  1060 		r = pti->ForcedFree();
  1061 		if(r!=KErrNone)
  1062 			break;
  1063 		if(aPageInfo->CheckModified(&pti))
  1064 			{
  1065 			r = KErrInUse;
  1066 			break;
  1067 			}
  1068 		}
  1069 	while(!(++pti)->IsFirstInPage());
  1070 	pti -= KPtClusterSize; // restore pti back to first page table
  1071 
  1072 	if(r==KErrNone)
  1073 		{
  1074 		MmuLock::Unlock();
  1075 		if(!pti->IsOnCleanupList())
  1076 			{
  1077 			// the page might not already be on the cleanup list in the case where
  1078 			// it was previously freed whilst it was pinned.
  1079 			// In this case, a later unpinning would have put it back into the paging live
  1080 			// list from where it is now subsequently being stolen...
  1081 			iPagedAllocator.MoveToCleanup(pti);
  1082 			}
  1083 		// free the page from allocator so it ends up back in the paging pool as a free page...
  1084 		while(FreeReserve(iPagedAllocator))
  1085 			{}
  1086 		// return an 'error' to indicate page has not been stolen.
  1087 		// We have however achieved the main aim of making the page 'free' and
  1088 		// it will be available if page stealing attempts to steal the page again...
  1089 		r = KErrCompletion;
  1090 		MmuLock::Lock();
  1091 		}
  1092 
  1093 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1094 	TRACE2(("PageTableAllocator::StealPage returns %d",r));
  1095 	return r;
  1096 	}
  1097 
  1098 
  1099 TInt PageTableAllocator::StealPageTableInfo(SPageInfo* aPageInfo)
  1100 	{
  1101 	// Need to steal every page table for every page table info in this page.
  1102 	// This page can't be modified or removed as we hold the lock, however
  1103 	// the page table pages being freed may be rejuvenated and therefore their 
  1104 	// SPageInfos may be marked as modified.
  1105 	TInt r = KErrNone;
  1106 	TUint ptiOffset = aPageInfo->Index() * KPageTableInfosPerPage;
  1107 	SPageTableInfo* ptiBase = (SPageTableInfo*)KPageTableInfoBase + ptiOffset;
  1108 	SPageTableInfo* ptiEnd = ptiBase + KPageTableInfosPerPage;
  1109 	TUint flash = 0;
  1110 	for (SPageTableInfo* pti = ptiBase; pti < ptiEnd;)
  1111 		{// Free each page table cluster that is allocated.
  1112 		if (pti->IsPtClusterAllocated())
  1113 			{
  1114 			TPhysAddr ptPhysAddr = Mmu::LinearToPhysical((TLinAddr)pti->PageTable());
  1115 			SPageInfo* ptSPageInfo = SPageInfo::FromPhysAddr(ptPhysAddr);
  1116 			ptSPageInfo->SetModifier(&flash);
  1117 			do 
  1118 				{
  1119 				__NK_ASSERT_DEBUG(pti->IsPtClusterAllocated());
  1120 				if (aPageInfo->PagedState() == SPageInfo::EPagedPinned || 
  1121 					ptSPageInfo->PagedState() == SPageInfo::EPagedPinned)
  1122 					{// The page table or page table info is pinned so can't steal info page.
  1123 					r = KErrInUse;
  1124 					break;
  1125 					}
  1126 				r = pti->ForcedFree();
  1127 				if(r!=KErrNone)
  1128 					break;
  1129 				if(ptSPageInfo->CheckModified(&flash))
  1130 					{// The page table page has been rejunvenated so can't steal it.
  1131 					r = KErrInUse;
  1132 					break;
  1133 					}
  1134 				}
  1135 			while (!(++pti)->IsFirstInPage());
  1136 			if (r != KErrNone)
  1137 				break;
  1138 			SPageTableInfo* ptiTmp = pti - KPtClusterSize;
  1139 			MmuLock::Unlock();
  1140 			if(!ptiTmp->IsOnCleanupList())
  1141 				{
  1142 				// the page might not already be on the cleanup list in the case where
  1143 				// it was previously freed whilst it was pinned.
  1144 				// In this case, a later unpinning would have put it back into the paging live
  1145 				// list from where it is now subsequently being stolen...
  1146 				iPagedAllocator.MoveToCleanup(ptiTmp);
  1147 				}
  1148 			MmuLock::Lock();
  1149 			flash = 0;		// The MmuLock has been flashed at least once.
  1150 			}
  1151 		else
  1152 			{// Move onto the next cluster this page of page table infos refers to.
  1153 			__NK_ASSERT_DEBUG(pti->IsFirstInPage());
  1154 			pti += KPtClusterSize;
  1155 			MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo);
  1156 			}
  1157 		}
  1158 	// free the pages discarded from allocator so they end up back in the paging pool as free pages...
  1159 	MmuLock::Unlock();
  1160 	while(FreeReserve(iPagedAllocator))
  1161 		{}
  1162 	if (r == KErrNone)
  1163 		r = KErrCompletion;	// The pager needs to remove the page from the live list.
  1164 	MmuLock::Lock();
  1165 	return r;
  1166 	}
  1167 
  1168 
  1169 TInt PageTableAllocator::MovePage(DMemoryObject* aMemory, SPageInfo* aOldPageInfo, 
  1170 									TUint aBlockZoneId, TBool aBlockRest)
  1171 	{
  1172 	// We don't move page table or page table info pages, however, if this page 
  1173 	// is demand paged then we may be able to discard it.
  1174 	MmuLock::Lock();
  1175 	if (aOldPageInfo->Owner() == iPageTableInfoMemory)
  1176 		{
  1177 		if (!(iPtPageAllocator.IsDemandPagedPtInfo(aOldPageInfo)))
  1178 			{
  1179 			MmuLock::Unlock();
  1180 			return KErrNotSupported;
  1181 			}
  1182 		}
  1183 	else
  1184 		{
  1185 		__NK_ASSERT_DEBUG(aOldPageInfo->Owner() == iPageTableMemory);
  1186 		if (!(iPtPageAllocator.IsDemandPagedPt(aOldPageInfo)))
  1187 			{
  1188 			MmuLock::Unlock();
  1189 			return KErrNotSupported;
  1190 			}
  1191 		}
  1192 	if (aOldPageInfo->PagedState() == SPageInfo::EPagedPinned)
  1193 		{// The page is pinned so don't attempt to discard it as pinned pages 
  1194 		// can't be discarded.  Also, the pager will invoke this method again.
  1195 		MmuLock::Unlock();
  1196 		return KErrInUse;
  1197 		}
  1198 	// Let the pager discard the page as it controls the size of the live list.
  1199 	// If the size of the live list allows then eventually 
  1200 	// PageTableAllocator::StealPage() will be invoked on this page.
  1201 	return ThePager.DiscardPage(aOldPageInfo, aBlockZoneId, aBlockRest);
  1202 	}
  1203 
  1204 
  1205 TInt PageTableAllocator::PinPageTable(TPte* aPageTable, TPinArgs& aPinArgs)
  1206 	{
  1207 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1208 	__NK_ASSERT_DEBUG(SPageTableInfo::FromPtPtr(aPageTable)->IsDemandPaged());
  1209 	__NK_ASSERT_DEBUG(!SPageTableInfo::FromPtPtr(aPageTable)->IsUnused());
  1210 	__NK_ASSERT_DEBUG(aPinArgs.HaveSufficientPages(KNumPagesToPinOnePageTable));
  1211 
  1212 	// pin page with page table in...
  1213 	TPhysAddr pagePhys = Mmu::PageTablePhysAddr(aPageTable);
  1214 	SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys);
  1215 	if (!pi->PinCount())
  1216 		{// Page is being pinned having previously been unpinned.
  1217 		TInt r = iPtPageAllocator.PtPagePinCountInc();
  1218 		if (r != KErrNone)
  1219 			return r;
  1220 		}
  1221 	ThePager.Pin(pi,aPinArgs);
  1222 
  1223 	// pin page with page table info in...
  1224 	SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPageTable);
  1225 	pagePhys = Mmu::UncheckedLinearToPhysical((TLinAddr)pti,KKernelOsAsid);
  1226 	pi = SPageInfo::FromPhysAddr(pagePhys);
  1227 	ThePager.Pin(pi,aPinArgs);
  1228 	return KErrNone;
  1229 	}
  1230 
  1231 
  1232 void PageTableAllocator::UnpinPageTable(TPte* aPageTable, TPinArgs& aPinArgs)
  1233 	{
  1234 	// unpin page with page table info in...
  1235 	SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPageTable);
  1236 	TPhysAddr pagePhys = Mmu::UncheckedLinearToPhysical((TLinAddr)pti,KKernelOsAsid);
  1237 	SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys);
  1238 	ThePager.Unpin(pi,aPinArgs);
  1239 
  1240 	// unpin page with page table in...
  1241 	pagePhys = Mmu::PageTablePhysAddr(aPageTable);
  1242 	pi = SPageInfo::FromPhysAddr(pagePhys);
  1243 	ThePager.Unpin(pi,aPinArgs);
  1244 
  1245 	if (!pi->PinCount())
  1246 		{// This page table page is no longer pinned.
  1247 		iPtPageAllocator.PtPagePinCountDec();
  1248 		}
  1249 	}
  1250 
  1251 
  1252 #ifdef _DEBUG
  1253 TBool IsPageTableUnpagedRemoveAllowed(SPageInfo* aPageInfo)
  1254 		{ return ::PageTables.IsPageTableUnpagedRemoveAllowed(aPageInfo); }
  1255 
  1256 TBool PageTableAllocator::IsPageTableUnpagedRemoveAllowed(SPageInfo* aPageInfo)
  1257 	{
  1258 	if (aPageInfo->Owner() == iPageTableInfoMemory)
  1259 		{// Page table info pages are never added to the live list but can be
  1260 		// stolen via DPager::StealPage()
  1261 		return ETrue;
  1262 		}
  1263 
  1264 	if (aPageInfo->Owner() == iPageTableMemory)
  1265 		{// Page table pages are added to the live list but only after the page they 
  1266 		// map has been paged in. Therefore, a pde can reference a pte before it has been
  1267 		// added to the live list so allow this but for uninitialised page table pages only.
  1268 		TUint ptPageIndex = aPageInfo->Index();
  1269 		SPageTableInfo* pti = (SPageTableInfo*)KPageTableInfoBase+ptPageIndex*KPtClusterSize;
  1270 		do
  1271 			{
  1272 			if (!pti->IsUnused())
  1273 				{
  1274 				TPte* pte = pti->PageTable();
  1275 				TPte* pteEnd = pte + (KPageTableSize/sizeof(TPte));
  1276 				while (pte < pteEnd)
  1277 					if (*pte++ != KPteUnallocatedEntry)
  1278 						return EFalse;
  1279 				}
  1280 			}
  1281 		while(!(++pti)->IsFirstInPage());
  1282 		return ETrue;
  1283 		}
  1284 	return EFalse;
  1285 	}
  1286 #endif
  1287 
  1288 
  1289 //
  1290 // Cleanup
  1291 //
  1292 
  1293 void PageTableAllocator::CleanupTrampoline(TAny* aSelf)
  1294 	{
  1295 	((PageTableAllocator*)aSelf)->Cleanup();
  1296 	}
  1297 
  1298 
  1299 void PageTableAllocator::Cleanup()
  1300 	{
  1301 	// free any surplus pages...
  1302 	Lock();
  1303 	while(FreeReserve(iPagedAllocator) || FreeReserve(iUnpagedAllocator))
  1304 		{}
  1305 	Unlock();
  1306 	}