os/kernelhwsrv/kernel/eka/memmodel/epoc/flexible/mmu/mpager.cpp
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
     1 // Copyright (c) 2005-2009 Nokia Corporation and/or its subsidiary(-ies).
     2 // All rights reserved.
     3 // This component and the accompanying materials are made available
     4 // under the terms of the License "Eclipse Public License v1.0"
     5 // which accompanies this distribution, and is available
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
     7 //
     8 // Initial Contributors:
     9 // Nokia Corporation - initial contribution.
    10 //
    11 // Contributors:
    12 //
    13 // Description:
    14 //
    15 
    16 #include "memmodel.h"
    17 #include "mm.h"
    18 #include "mmu.h"
    19 
    20 #include "mpager.h"
    21 #include "mrom.h"
    22 #include "mobject.h"
    23 #include "mmapping.h"
    24 #include "maddressspace.h"
    25 #include "mmanager.h"
    26 #include "mptalloc.h"
    27 #include "mpagearray.h"
    28 #include "mswap.h"
    29 #include "mthrash.h"
    30 #include "cache_maintenance.inl"
    31 
    32 
    33 const TUint16 KDefaultYoungOldRatio = 3;
    34 const TUint16 KDefaultMinPages = 256;
    35 #ifdef _USE_OLDEST_LISTS
    36 const TUint16 KDefaultOldOldestRatio = 3;
    37 #endif
    38 
    39 const TUint KMinOldPages = 1;
    40 
    41 /*	On a 32 bit system without PAE can't have more than 2^(32-KPageShift) pages.
    42  *	Subtract 1 so it doesn't overflow when converted to bytes.
    43 */
    44 const TUint	KAbsoluteMaxPageCount = (1u<<(32-KPageShift))-1u;
    45 
    46 
    47 
    48 DPager ThePager;
    49 
    50 
    51 DPager::DPager()
    52 	: iMinimumPageCount(0), iMaximumPageCount(0), iYoungOldRatio(0),
    53 	  iYoungCount(0),iOldCount(0),iNumberOfFreePages(0)
    54 	{
    55 	}
    56 
    57 
    58 void DPager::Init2()
    59 	{
    60 	TRACEB(("DPager::Init2()"));
    61 
    62 #if defined(__CPU_ARM)
    63 
    64 /** Minimum number of young pages the demand paging live list may have.
    65 	Need at least 4 mapped pages to guarantee to be able to execute all ARM instructions,
    66 	plus enough pages for 4 page tables to map those pages, plus enough pages for the
    67 	page table info structures of those page tables.
    68 	(Worst case is a Thumb-2 STM instruction with both instruction and data straddling chunk
    69 	boundaries.)
    70 */
    71 	iMinYoungPages = 4											// pages
    72 					+(4+KPtClusterSize-1)/KPtClusterSize		// page table pages
    73 					+(4+KPageTableInfosPerPage-1)/KPageTableInfosPerPage;	// page table info pages
    74 
    75 #elif defined(__CPU_X86)
    76 
    77 /*	Need at least 6 mapped pages to guarantee to be able to execute all ARM instructions,
    78 	plus enough pages for 6 page tables to map those pages, plus enough pages for the
    79 	page table info structures of those page tables.
    80 	(Worst case is (?) a MOV [X],[Y] instruction with instruction, 'X' and 'Y' all
    81 	straddling chunk boundaries.)
    82 */
    83 	iMinYoungPages = 6											// pages
    84 					+(6+KPtClusterSize-1)/KPtClusterSize		// page table pages
    85 					+(6+KPageTableInfosPerPage-1)/KPageTableInfosPerPage;	// page table info pages
    86 
    87 #else
    88 #error Unknown CPU
    89 #endif
    90 
    91 #ifdef __SMP__
    92 	// Adjust min page count so that all CPUs are guaranteed to make progress.
    93 	// NOTE: Can't use NKern::NumberOfCpus here because we haven't finished booting yet and will
    94 	// always have only one CPU running at this point...
    95 
    96 	// TODO: Before we can enable this the base test configuration needs
    97 	// updating to have a sufficient minimum page size...
    98 	//
    99 	// iMinYoungPages *= KMaxCpus;
   100 #endif
   101 
   102 	// A minimum young/old ratio of 1 means that we need at least twice iMinYoungPages pages...
   103 	iAbsoluteMinPageCount = 2*iMinYoungPages;
   104 
   105 	__NK_ASSERT_DEBUG(KMinOldPages<=iAbsoluteMinPageCount/2);
   106 
   107 	// initialise live list...
   108 	TUint minimumPageCount = 0;
   109 	TUint maximumPageCount = 0;
   110 
   111 	SDemandPagingConfig config = TheRomHeader().iDemandPagingConfig;
   112 
   113 	iMinimumPageCount = KDefaultMinPages;
   114 	if(minimumPageCount)
   115 		iMinimumPageCount = minimumPageCount;
   116 	if(config.iMinPages)
   117 		iMinimumPageCount = config.iMinPages;
   118 	if(iMinimumPageCount<iAbsoluteMinPageCount)
   119 		iMinimumPageCount = iAbsoluteMinPageCount;
   120 	iInitMinimumPageCount = iMinimumPageCount;
   121 
   122 	iMaximumPageCount = KMaxTInt;
   123 	if(maximumPageCount)
   124 		iMaximumPageCount = maximumPageCount;
   125 	if(config.iMaxPages)
   126 		iMaximumPageCount = config.iMaxPages;
   127 	if (iMaximumPageCount > KAbsoluteMaxPageCount)
   128 		iMaximumPageCount = KAbsoluteMaxPageCount;
   129 	iInitMaximumPageCount = iMaximumPageCount;
   130 
   131 	iYoungOldRatio = KDefaultYoungOldRatio;
   132 	if(config.iYoungOldRatio)
   133 		iYoungOldRatio = config.iYoungOldRatio;
   134 	TInt ratioLimit = (iMinimumPageCount-KMinOldPages)/KMinOldPages;
   135 	if(iYoungOldRatio>ratioLimit)
   136 		iYoungOldRatio = ratioLimit;
   137 
   138 #ifdef _USE_OLDEST_LISTS
   139 	iOldOldestRatio = KDefaultOldOldestRatio;
   140 	if(config.iSpare[2])
   141 		iOldOldestRatio = config.iSpare[2];
   142 #endif
   143 
   144 	iMinimumPageLimit = (iMinYoungPages * (1 + iYoungOldRatio)) / iYoungOldRatio;
   145 	if(iMinimumPageLimit<iAbsoluteMinPageCount)
   146 		iMinimumPageLimit = iAbsoluteMinPageCount;
   147 
   148 	TRACEB(("DPager::Init2() live list min=%d max=%d ratio=%d",iMinimumPageCount,iMaximumPageCount,iYoungOldRatio));
   149 
   150 	if(iMaximumPageCount<iMinimumPageCount)
   151 		__NK_ASSERT_ALWAYS(0);
   152 
   153 	//
   154 	// This routine doesn't acquire any mutexes because it should be called before the system
   155 	// is fully up and running. I.e. called before another thread can preempt this.
   156 	//
   157 
   158 	// Calculate page counts
   159 	TUint minOldAndOldest = iMinimumPageCount / (1 + iYoungOldRatio);
   160 	if(minOldAndOldest < KMinOldPages)
   161 		__NK_ASSERT_ALWAYS(0);
   162 	if (iMinimumPageCount < minOldAndOldest)
   163 		__NK_ASSERT_ALWAYS(0);
   164 	TUint minYoung = iMinimumPageCount - minOldAndOldest;
   165 	if(minYoung < iMinYoungPages)
   166 		__NK_ASSERT_ALWAYS(0); // Need at least iMinYoungPages pages mapped to execute worst case CPU instruction
   167 #ifdef _USE_OLDEST_LISTS
   168 	// There should always be enough old pages to allow the oldest lists ratio.
   169 	TUint oldestCount = minOldAndOldest / (1 + iOldOldestRatio);
   170 	if (!oldestCount)
   171 		__NK_ASSERT_ALWAYS(0);
   172 #endif
   173 	iNumberOfFreePages = 0;
   174 	iNumberOfDirtyPages = 0;
   175 
   176 	// Allocate RAM pages and put them all on the old list
   177 	RamAllocLock::Lock();
   178 	iYoungCount = 0;
   179 	iOldCount = 0;
   180 #ifdef _USE_OLDEST_LISTS
   181 	iOldestCleanCount = 0;
   182 	iOldestDirtyCount = 0;
   183 #endif
   184 	Mmu& m = TheMmu;
   185 	for(TUint i=0; i<iMinimumPageCount; i++)
   186 		{
   187 		// Allocate a single page
   188 		TPhysAddr pagePhys;
   189 		TInt r = m.AllocRam(&pagePhys, 1, 
   190 							(Mmu::TRamAllocFlags)(EMemAttNormalCached|Mmu::EAllocNoWipe|Mmu::EAllocNoPagerReclaim), 
   191 							EPageDiscard);
   192 		if(r!=KErrNone)
   193 			__NK_ASSERT_ALWAYS(0);
   194 		MmuLock::Lock();
   195 		AddAsFreePage(SPageInfo::FromPhysAddr(pagePhys));
   196 		MmuLock::Unlock();
   197 		}
   198 	RamAllocLock::Unlock();
   199 
   200 #ifdef _USE_OLDEST_LISTS
   201 	TRACEB(("DPager::Init2() end with young=%d old=%d oldClean=%d oldDirty=%d min=%d free=%d max=%d",iYoungCount,iOldCount,iOldestCleanCount,iOldestDirtyCount,iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
   202 #else
   203 	TRACEB(("DPager::Init2() end with young=%d old=%d min=%d free=%d max=%d",iYoungCount,iOldCount,iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
   204 #endif
   205 	}
   206 
   207 
   208 #ifdef _DEBUG
   209 TBool DPager::CheckLists()
   210 	{
   211 #if 0
   212 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   213 	SDblQueLink* head = &iOldList.iA;
   214 	TInt n = iOldCount;
   215 	SDblQueLink* link = head;
   216 	while(n--)
   217 		{
   218 		link = link->iNext;
   219 		if(link==head)
   220 			return false;
   221 		}
   222 	link = link->iNext;
   223 	if(link!=head)
   224 		return false;
   225 
   226 	head = &iYoungList.iA;
   227 	n = iYoungCount;
   228 	link = head;
   229 	while(n--)
   230 		{
   231 		link = link->iNext;
   232 		if(link==head)
   233 			return false;
   234 		}
   235 	link = link->iNext;
   236 	if(link!=head)
   237 		return false;
   238 
   239 //	TRACEP(("DP: y=%d o=%d f=%d",iYoungCount,iOldCount,iNumberOfFreePages));
   240 #endif
   241 //	TraceCounts();
   242 	return true;
   243 	}
   244 
   245 void DPager::TraceCounts()
   246 	{
   247 	TRACEP(("DP: y=%d o=%d f=%d min=%d max=%d ml=%d res=%d",
   248 		iYoungCount,iOldCount,iNumberOfFreePages,iMinimumPageCount,
   249 		iMaximumPageCount,iMinimumPageLimit,iReservePageCount));
   250 	}
   251 
   252 #endif
   253 
   254 
   255 TBool DPager::HaveTooManyPages()
   256 	{
   257 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
   258 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   259 	return iMinimumPageCount+iNumberOfFreePages > iMaximumPageCount;
   260 	}
   261 
   262 
   263 TBool DPager::HaveMaximumPages()
   264 	{
   265 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
   266 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   267 	return iMinimumPageCount+iNumberOfFreePages >= iMaximumPageCount;
   268 	}
   269 
   270 
   271 void DPager::AddAsYoungestPage(SPageInfo* aPageInfo)
   272 	{
   273 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   274 	__NK_ASSERT_DEBUG(CheckLists());
   275 	__NK_ASSERT_DEBUG(aPageInfo->PagedState()==SPageInfo::EUnpaged);
   276 
   277 	aPageInfo->SetPagedState(SPageInfo::EPagedYoung);
   278 	iYoungList.AddHead(&aPageInfo->iLink);
   279 	++iYoungCount;
   280 	}
   281 
   282 
   283 void DPager::AddAsFreePage(SPageInfo* aPageInfo)
   284 	{
   285 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   286 	__NK_ASSERT_DEBUG(CheckLists());
   287 
   288 	__NK_ASSERT_DEBUG(aPageInfo->PagedState()==SPageInfo::EUnpaged);
   289 	TheMmu.PageFreed(aPageInfo);
   290 	__NK_ASSERT_DEBUG(aPageInfo->PagedState()==SPageInfo::EUnpaged);
   291 
   292 	// add as oldest page...
   293 #ifdef _USE_OLDEST_LISTS
   294 	aPageInfo->SetPagedState(SPageInfo::EPagedOldestClean);
   295 	iOldestCleanList.Add(&aPageInfo->iLink);
   296 	++iOldestCleanCount;
   297 #else
   298 	aPageInfo->SetPagedState(SPageInfo::EPagedOld);
   299 	iOldList.Add(&aPageInfo->iLink);
   300 	++iOldCount;
   301 #endif
   302 
   303 	Event(EEventPageInFree,aPageInfo);
   304 	}
   305 
   306 
   307 TInt DPager::PageFreed(SPageInfo* aPageInfo)
   308 	{
   309 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   310 	__NK_ASSERT_DEBUG(CheckLists());
   311 
   312 	switch(aPageInfo->PagedState())
   313 		{
   314 	case SPageInfo::EUnpaged:
   315 		return KErrNotFound;
   316 
   317 	case SPageInfo::EPagedYoung:
   318 		__NK_ASSERT_DEBUG(iYoungCount);
   319 		aPageInfo->iLink.Deque();
   320 		--iYoungCount;
   321 		break;
   322 
   323 	case SPageInfo::EPagedOld:
   324 		__NK_ASSERT_DEBUG(iOldCount);
   325 		aPageInfo->iLink.Deque();
   326 		--iOldCount;
   327 		break;
   328 
   329 #ifdef _USE_OLDEST_LISTS
   330 	case SPageInfo::EPagedOldestClean:
   331 		__NK_ASSERT_DEBUG(iOldestCleanCount);
   332 		aPageInfo->iLink.Deque();
   333 		--iOldestCleanCount;
   334 		break;
   335 
   336 	case SPageInfo::EPagedOldestDirty:
   337 		__NK_ASSERT_DEBUG(iOldestDirtyCount);
   338 		aPageInfo->iLink.Deque();
   339 		--iOldestDirtyCount;
   340 		break;
   341 #endif
   342 
   343 	case SPageInfo::EPagedPinned:
   344 		// this can occur if a pinned mapping is being unmapped when memory is decommitted.
   345 		// the decommit will have succeeded because the the mapping no longer vetoes this,
   346 		// however the unpinning hasn't yet got around to changing the page state.
   347 		// When the state change happens the page will be put back on the live list so
   348 		// we don't have to do anything now...
   349 		return KErrNone;
   350 
   351 	case SPageInfo::EPagedPinnedMoved:
   352 		// This page was pinned when it was moved but it has not been returned 
   353 		// to the free pool yet so make sure it is...
   354 		aPageInfo->SetPagedState(SPageInfo::EUnpaged);	// Must be unpaged before returned to free pool.
   355 		return KErrNotFound;
   356 
   357 	default:
   358 		__NK_ASSERT_DEBUG(0);
   359 		return KErrNotFound;
   360 		}
   361 
   362 	// Update the dirty page count as required...
   363 	if (aPageInfo->IsDirty())
   364 		SetClean(*aPageInfo);
   365 
   366 	// add as oldest page...
   367 #ifdef _USE_OLDEST_LISTS
   368 	aPageInfo->SetPagedState(SPageInfo::EPagedOldestClean);
   369 	iOldestCleanList.Add(&aPageInfo->iLink);
   370 	++iOldestCleanCount;
   371 #else
   372 	aPageInfo->SetPagedState(SPageInfo::EPagedOld);
   373 	iOldList.Add(&aPageInfo->iLink);
   374 	++iOldCount;
   375 #endif
   376 
   377 	return KErrNone;
   378 	}
   379 
   380 
   381 extern TBool IsPageTableUnpagedRemoveAllowed(SPageInfo* aPageInfo);
   382 
   383 void DPager::RemovePage(SPageInfo* aPageInfo)
   384 	{
   385 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   386 	__NK_ASSERT_DEBUG(CheckLists());
   387 
   388 	switch(aPageInfo->PagedState())
   389 		{
   390 	case SPageInfo::EPagedYoung:
   391 		__NK_ASSERT_DEBUG(iYoungCount);
   392 		aPageInfo->iLink.Deque();
   393 		--iYoungCount;
   394 		break;
   395 
   396 	case SPageInfo::EPagedOld:
   397 		__NK_ASSERT_DEBUG(iOldCount);
   398 		aPageInfo->iLink.Deque();
   399 		--iOldCount;
   400 		break;
   401 
   402 #ifdef _USE_OLDEST_LISTS
   403 	case SPageInfo::EPagedOldestClean:
   404 		__NK_ASSERT_DEBUG(iOldestCleanCount);
   405 		aPageInfo->iLink.Deque();
   406 		--iOldestCleanCount;
   407 		break;
   408 
   409 	case SPageInfo::EPagedOldestDirty:
   410 		__NK_ASSERT_DEBUG(iOldestDirtyCount);
   411 		aPageInfo->iLink.Deque();
   412 		--iOldestDirtyCount;
   413 		break;
   414 #endif
   415 
   416 	case SPageInfo::EPagedPinned:
   417 		__NK_ASSERT_DEBUG(0);
   418 	case SPageInfo::EUnpaged:
   419 #ifdef _DEBUG
   420 		if (!IsPageTableUnpagedRemoveAllowed(aPageInfo))
   421 			__NK_ASSERT_DEBUG(0);
   422 		break;
   423 #endif
   424 	default:
   425 		__NK_ASSERT_DEBUG(0);
   426 		return;
   427 		}
   428 
   429 	aPageInfo->SetPagedState(SPageInfo::EUnpaged);
   430 	}
   431 
   432 
   433 void DPager::ReplacePage(SPageInfo& aOldPageInfo, SPageInfo& aNewPageInfo)
   434 	{
   435 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   436 	__NK_ASSERT_DEBUG(CheckLists());
   437 
   438 	__NK_ASSERT_DEBUG(aOldPageInfo.PagedState() == aNewPageInfo.PagedState());
   439 	switch(aOldPageInfo.PagedState())
   440 		{
   441 		case SPageInfo::EPagedYoung:
   442 		case SPageInfo::EPagedOld:
   443 		case SPageInfo::EPagedOldestClean:
   444 		case SPageInfo::EPagedOldestDirty:
   445 			{// Update the list links point to the new page.
   446 			__NK_ASSERT_DEBUG(iYoungCount);
   447 			SDblQueLink* prevLink = aOldPageInfo.iLink.iPrev;
   448 #ifdef _DEBUG
   449 			SDblQueLink* nextLink = aOldPageInfo.iLink.iNext;
   450 			__NK_ASSERT_DEBUG(prevLink == aOldPageInfo.iLink.iPrev);
   451 			__NK_ASSERT_DEBUG(prevLink->iNext == &aOldPageInfo.iLink);
   452 			__NK_ASSERT_DEBUG(nextLink == aOldPageInfo.iLink.iNext);
   453 			__NK_ASSERT_DEBUG(nextLink->iPrev == &aOldPageInfo.iLink);
   454 #endif
   455 			aOldPageInfo.iLink.Deque();
   456 			aNewPageInfo.iLink.InsertAfter(prevLink);
   457 			aOldPageInfo.SetPagedState(SPageInfo::EUnpaged);
   458 #ifdef _DEBUG
   459 			__NK_ASSERT_DEBUG(prevLink == aNewPageInfo.iLink.iPrev);
   460 			__NK_ASSERT_DEBUG(prevLink->iNext == &aNewPageInfo.iLink);
   461 			__NK_ASSERT_DEBUG(nextLink == aNewPageInfo.iLink.iNext);
   462 			__NK_ASSERT_DEBUG(nextLink->iPrev == &aNewPageInfo.iLink);
   463 #endif
   464 			}
   465 			break;
   466 		case SPageInfo::EPagedPinned:
   467 			// Mark the page as 'pinned moved' so that when the page moving invokes 
   468 			// Mmu::FreeRam() it returns this page to the free pool.
   469 			aOldPageInfo.ClearPinCount();
   470 			aOldPageInfo.SetPagedState(SPageInfo::EPagedPinnedMoved);
   471 			break;
   472 		case SPageInfo::EPagedPinnedMoved:
   473 			// Shouldn't happen as the ram alloc mutex will be held for the 
   474 			// entire time the page's is paged state == EPagedPinnedMoved.
   475 		case SPageInfo::EUnpaged:
   476 			// Shouldn't happen as we only move pinned memory and unpinning will 
   477 			// atomically add the page to the live list and it can't be removed 
   478 			// from the live list without the ram alloc mutex.
   479 			__NK_ASSERT_DEBUG(0);
   480 			break;
   481 		}	
   482 	}
   483 
   484 
   485 SPageInfo* DPager::StealOldestPage()
   486 	{
   487 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
   488 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   489 
   490 	for(;;)
   491 		{
   492 		// find oldest page in list...
   493 		SDblQueLink* link;
   494 #ifdef _USE_OLDEST_LISTS
   495 		if (iOldestCleanCount)
   496 			{
   497 			__NK_ASSERT_DEBUG(!iOldestCleanList.IsEmpty());
   498 			link = iOldestCleanList.Last();
   499 			}
   500 		else if (iOldestDirtyCount)
   501 			{
   502 			__NK_ASSERT_DEBUG(!iOldestDirtyList.IsEmpty());
   503 			link = iOldestDirtyList.Last();
   504 			}
   505 		else if (iOldCount)
   506 #else
   507 		if (iOldCount)
   508 #endif
   509 			{
   510 			__NK_ASSERT_DEBUG(!iOldList.IsEmpty());
   511 			link = iOldList.Last();
   512 			}
   513 		else
   514 			{
   515 			__NK_ASSERT_DEBUG(iYoungCount);
   516 			__NK_ASSERT_ALWAYS(!iYoungList.IsEmpty());
   517 			link = iYoungList.Last();
   518 			}
   519 		SPageInfo* pageInfo = SPageInfo::FromLink(link);
   520 
   521 		// steal it from owning object...
   522 		TInt r = StealPage(pageInfo);
   523 
   524 		BalanceAges();
   525 
   526 		if(r==KErrNone)
   527 			return pageInfo; // done
   528 
   529 		// loop back and try again
   530 		}
   531 	}
   532 
   533 
   534 TInt DPager::RestrictPage(SPageInfo* aPageInfo, TRestrictPagesType aRestriction)
   535 	{
   536 	TRACE(("DPager::RestrictPage(0x%08x,%d)",aPageInfo,aRestriction));
   537 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   538 
   539 	TInt r;
   540 	if(aPageInfo->Type()==SPageInfo::EUnused)
   541 		{
   542 		// page was unused, so nothing to do...
   543 		r = KErrNone;
   544 		}
   545 	else
   546 		{
   547 		// get memory object which owns the page...
   548 		__NK_ASSERT_DEBUG(aPageInfo->Type()==SPageInfo::EManaged);
   549 		DMemoryObject* memory = aPageInfo->Owner();
   550 		memory->Open();
   551 
   552 		// try restricting access to page...
   553 		r = memory->iManager->RestrictPage(memory,aPageInfo,aRestriction);
   554 		__NK_ASSERT_DEBUG(r!=KErrNotSupported);
   555 
   556 		// close memory object...
   557 		MmuLock::Unlock();
   558 		memory->AsyncClose();
   559 		MmuLock::Lock();
   560 		}
   561 
   562 	TRACE(("DPager::RestrictPage returns %d",r));
   563 	return r;
   564 	}
   565 
   566 
   567 TInt DPager::StealPage(SPageInfo* aPageInfo)
   568 	{
   569 	TRACE(("DPager::StealPage(0x%08x)",aPageInfo));
   570 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
   571 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   572  
   573 	__UNLOCK_GUARD_START(MmuLock);
   574 	RemovePage(aPageInfo);
   575 
   576 	TInt r;
   577 	if(aPageInfo->Type()==SPageInfo::EUnused)
   578 		{
   579 		// page was unused, so nothing to do...
   580 		r = KErrNone;
   581 		__UNLOCK_GUARD_END(MmuLock);
   582 		MmuLock::Unlock();
   583 		}
   584 	else
   585 		{
   586 		// get memory object which owns the page...
   587 		__NK_ASSERT_DEBUG(aPageInfo->Type()==SPageInfo::EManaged);
   588 		DMemoryObject* memory = aPageInfo->Owner();
   589 		memory->Open();
   590 
   591 		// try and steal page from memory object...
   592 		__UNLOCK_GUARD_END(MmuLock); // StealPage must be called without releasing the MmuLock
   593 		r = memory->iManager->StealPage(memory,aPageInfo);
   594 		__NK_ASSERT_DEBUG(r!=KErrNotSupported);
   595 
   596 		// close memory object...
   597 		MmuLock::Unlock();
   598 		memory->AsyncClose();
   599 		}
   600 
   601 	MmuLock::Lock();
   602 
   603 	if(r==KErrNone)
   604 		Event(EEventPageOut,aPageInfo);
   605 
   606 	TRACE(("DPager::StealPage returns %d",r));
   607 	return r;
   608 	}
   609 
   610 
   611 TInt DPager::DiscardPage(SPageInfo* aOldPageInfo, TUint aBlockZoneId, TBool aBlockRest)
   612 	{
   613 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
   614 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   615 
   616 	TInt r;
   617 	// If the page is pinned or if the page is dirty and a general defrag is being 
   618 	// performed then don't attempt to steal it.
   619 	if (aOldPageInfo->Type() != SPageInfo::EUnused && 
   620 		(aOldPageInfo->PagedState() == SPageInfo::EPagedPinned ||
   621 		(aBlockRest && aOldPageInfo->IsDirty())))
   622 		{// The page is pinned or is dirty and this is a general defrag so move the page.
   623 		DMemoryObject* memory = aOldPageInfo->Owner();
   624 		// Page must be managed if it is pinned or dirty.
   625 		__NK_ASSERT_DEBUG(aOldPageInfo->Type()==SPageInfo::EManaged);
   626 		__NK_ASSERT_DEBUG(memory);
   627 		MmuLock::Unlock();
   628 		TPhysAddr newAddr;
   629 		return memory->iManager->MovePage(memory, aOldPageInfo, newAddr, aBlockZoneId, aBlockRest);
   630 		}
   631 
   632 	if (!iNumberOfFreePages)
   633 		{
   634 		// Allocate a new page for the live list as it has reached its minimum size.
   635 		MmuLock::Unlock();
   636 		SPageInfo* newPageInfo = GetPageFromSystem((Mmu::TRamAllocFlags)(EMemAttNormalCached|Mmu::EAllocNoWipe),
   637 													aBlockZoneId, aBlockRest);
   638 		 if (!newPageInfo)
   639 			return KErrNoMemory;
   640 
   641 		// Re-acquire the mmulock and re-check that the page is not pinned or dirty.
   642 		MmuLock::Lock();
   643 		if (aOldPageInfo->Type() != SPageInfo::EUnused && 
   644 			(aOldPageInfo->PagedState() == SPageInfo::EPagedPinned ||
   645 			(aBlockRest && aOldPageInfo->IsDirty())))
   646 			{// Page is now pinned or dirty so give up as it is inuse.
   647 			ReturnPageToSystem(*newPageInfo);
   648 			MmuLock::Unlock();
   649 			return KErrInUse;
   650 			}
   651 
   652 		// Attempt to steal the page
   653 		r = StealPage(aOldPageInfo);
   654 		__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   655 
   656 		if (r == KErrCompletion)
   657 			{// This was a page table that has been freed but added to the 
   658 			// live list as a free page.  Remove from live list and continue.
   659 			__NK_ASSERT_DEBUG(!aOldPageInfo->IsDirty());
   660 			RemovePage(aOldPageInfo);
   661 			r = KErrNone;
   662 			}
   663 
   664 		if (r == KErrNone)
   665 			{// Add the new page to the live list as discarding the old page 
   666 			// will reduce the live list below the minimum.
   667 			AddAsFreePage(newPageInfo);
   668 			// We've successfully discarded the page so return it to the free pool.
   669 			ReturnPageToSystem(*aOldPageInfo);
   670 			BalanceAges();
   671 			}
   672 		 else
   673 			{
   674 			// New page not required so just return it to the system.  This is safe as 
   675 			// iNumberOfFreePages will have this page counted but as it is not on the live list
   676 			// noone else can touch it.
   677 			ReturnPageToSystem(*newPageInfo);
   678 			}
   679 		}
   680 	else
   681 		{
   682 		// Attempt to steal the page
   683 		r = StealPage(aOldPageInfo);
   684 
   685 		__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   686 
   687 		if (r == KErrCompletion)
   688 			{// This was a page table that has been freed but added to the 
   689 			// live list as a free page.  Remove from live list.
   690 			__NK_ASSERT_DEBUG(!aOldPageInfo->IsDirty());
   691 			RemovePage(aOldPageInfo);
   692 			r = KErrNone;
   693 			}
   694 
   695 		if (r == KErrNone)
   696 			{// We've successfully discarded the page so return it to the free pool.
   697 			ReturnPageToSystem(*aOldPageInfo);
   698 			BalanceAges();
   699 			}
   700 		}
   701 	MmuLock::Unlock();
   702 	return r;	
   703 	}
   704 
   705 
   706 TBool DPager::TryGrowLiveList()
   707 	{
   708 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
   709 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   710 
   711 	MmuLock::Unlock();
   712 	SPageInfo* sparePage = GetPageFromSystem((Mmu::TRamAllocFlags)(EMemAttNormalCached|Mmu::EAllocNoWipe));
   713 	MmuLock::Lock();
   714 
   715 	if(!sparePage)
   716 		return false;
   717 
   718 	// add page to live list...
   719 	AddAsFreePage(sparePage);
   720 	return true;
   721 	}
   722 
   723 
   724 SPageInfo* DPager::GetPageFromSystem(Mmu::TRamAllocFlags aAllocFlags, TUint aBlockZoneId, TBool aBlockRest)
   725 	{
   726 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
   727 
   728 	TPhysAddr pagePhys;
   729 	TInt r = TheMmu.AllocRam(&pagePhys, 1, 
   730 							(Mmu::TRamAllocFlags)(aAllocFlags|Mmu::EAllocNoPagerReclaim), 
   731 							EPageDiscard, aBlockZoneId, aBlockRest);
   732 	if(r!=KErrNone)
   733 		return NULL;
   734 
   735 	MmuLock::Lock();
   736 	++iNumberOfFreePages;
   737 	MmuLock::Unlock();
   738 
   739 	return SPageInfo::FromPhysAddr(pagePhys);
   740 	}
   741 
   742 
   743 void DPager::ReturnPageToSystem()
   744 	{
   745 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
   746 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   747 
   748 	ReturnPageToSystem(*StealOldestPage());
   749 	}
   750 
   751 
   752 void DPager::ReturnPageToSystem(SPageInfo& aPageInfo)
   753 	{
   754 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
   755 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   756 
   757 	__NK_ASSERT_DEBUG(iNumberOfFreePages>0);
   758 	--iNumberOfFreePages;
   759 
   760 	MmuLock::Unlock();
   761 
   762 	TPhysAddr pagePhys = aPageInfo.PhysAddr();
   763 	TheMmu.FreeRam(&pagePhys, 1, EPageDiscard);
   764 
   765 	MmuLock::Lock();
   766 	}
   767 
   768 
   769 SPageInfo* DPager::PageInAllocPage(Mmu::TRamAllocFlags aAllocFlags)
   770 	{
   771 	SPageInfo* pageInfo;
   772 	TPhysAddr pagePhys;
   773 
   774 	RamAllocLock::Lock();
   775 	MmuLock::Lock();
   776 
   777 	// try getting a free page from our live list...
   778 #ifdef _USE_OLDEST_LISTS
   779 	if (iOldestCleanCount)
   780 		{
   781 		pageInfo = SPageInfo::FromLink(iOldestCleanList.Last());
   782 		if(pageInfo->Type()==SPageInfo::EUnused)
   783 			goto get_oldest;
   784 		}
   785 #else
   786 	if(iOldCount)
   787 		{
   788 		pageInfo = SPageInfo::FromLink(iOldList.Last());
   789 		if(pageInfo->Type()==SPageInfo::EUnused)
   790 			goto get_oldest;
   791 		}
   792 #endif
   793 
   794 	// try getting a free page from the system pool...
   795 	if(!HaveMaximumPages())
   796 		{
   797 		MmuLock::Unlock();
   798 		pageInfo = GetPageFromSystem(aAllocFlags);
   799 		if(pageInfo)
   800 			goto done;
   801 		MmuLock::Lock();
   802 		}
   803 
   804 	// as a last resort, steal a page from the live list...
   805 get_oldest:
   806 #ifdef _USE_OLDEST_LISTS
   807 	__NK_ASSERT_ALWAYS(iOldestCleanCount|iOldestDirtyCount|iOldCount|iYoungCount);
   808 #else
   809 	__NK_ASSERT_ALWAYS(iOldCount|iYoungCount);
   810 #endif
   811 	pageInfo = StealOldestPage();
   812 	MmuLock::Unlock();
   813 
   814 	// make page state same as a freshly allocated page...
   815 	pagePhys = pageInfo->PhysAddr();
   816 	TheMmu.PagesAllocated(&pagePhys,1,aAllocFlags);
   817 
   818 done:
   819 	RamAllocLock::Unlock();
   820 	return pageInfo;
   821 	}
   822 
   823 
   824 TBool DPager::GetFreePages(TInt aNumPages)
   825 	{
   826 	TRACE(("DPager::GetFreePages(%d)",aNumPages));
   827 
   828 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
   829 
   830 	MmuLock::Lock();
   831 	while(aNumPages>0 && (TInt)NumberOfFreePages()>=aNumPages)
   832 		{
   833 		ReturnPageToSystem();
   834 		--aNumPages;
   835 		}
   836 	MmuLock::Unlock();
   837 
   838 	TRACE(("DPager::GetFreePages returns %d",!aNumPages));
   839 	return !aNumPages;
   840 	}
   841 
   842 
   843 void DPager::DonatePages(TUint aCount, TPhysAddr* aPages)
   844 	{
   845 	TRACE(("DPager::DonatePages(%d,?)",aCount));
   846 	__ASSERT_CRITICAL;
   847 	RamAllocLock::Lock();
   848 	MmuLock::Lock();
   849 
   850 	TPhysAddr* end = aPages+aCount;
   851 	while(aPages<end)
   852 		{
   853 		TPhysAddr pagePhys = *aPages++;
   854 		if(RPageArray::State(pagePhys)!=RPageArray::ECommitted)
   855 			continue; // page is not present
   856 
   857 #ifdef _DEBUG
   858 		SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pagePhys&~KPageMask);
   859 		__NK_ASSERT_DEBUG(pi);
   860 #else
   861 		SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys);
   862 #endif
   863 		switch(pi->PagedState())
   864 			{
   865 		case SPageInfo::EUnpaged:
   866 			// Change the type of this page to discardable and 
   867 			// then add it to live list.
   868 			// Only the DDiscardableMemoryManager should be invoking this and
   869 			// its pages will be movable before they are donated.
   870 			__NK_ASSERT_DEBUG(pi->Owner()->iManager->PageType() == EPageMovable);
   871 			TheMmu.ChangePageType(pi, EPageMovable, EPageDiscard);
   872 			break;
   873 
   874 		case SPageInfo::EPagedYoung:
   875 		case SPageInfo::EPagedOld:
   876 #ifdef _USE_OLDEST_LISTS
   877 		case SPageInfo::EPagedOldestDirty:
   878 		case SPageInfo::EPagedOldestClean:
   879 #endif
   880 			continue; // discard already been allowed
   881 
   882 		case SPageInfo::EPagedPinned:
   883 			__NK_ASSERT_DEBUG(0);
   884 		default:
   885 			__NK_ASSERT_DEBUG(0);
   886 			continue;
   887 			}
   888 
   889 		// put page on live list...
   890 		AddAsYoungestPage(pi);
   891 		++iNumberOfFreePages;
   892 
   893 		Event(EEventPageDonate,pi);
   894 
   895 		// re-balance live list...
   896 		RemoveExcessPages();
   897 		BalanceAges();
   898 		}
   899 
   900 	MmuLock::Unlock();
   901 	RamAllocLock::Unlock();
   902 	}
   903 
   904 
   905 TInt DPager::ReclaimPages(TUint aCount, TPhysAddr* aPages)
   906 	{
   907 	TRACE(("DPager::ReclaimPages(%d,?)",aCount));
   908 	__ASSERT_CRITICAL;
   909 	RamAllocLock::Lock();
   910 	MmuLock::Lock();
   911 
   912 	TInt r = KErrNone;
   913 	TPhysAddr* end = aPages+aCount;
   914 	while(aPages<end)
   915 		{
   916 		TPhysAddr pagePhys = *aPages++;
   917 		TBool changeType = EFalse;
   918 
   919 		if(RPageArray::State(pagePhys)!=RPageArray::ECommitted)
   920 			{
   921 			r = KErrNotFound; // too late, page has gone
   922 			continue;
   923 			}
   924 
   925 #ifdef _DEBUG
   926 		SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pagePhys&~KPageMask);
   927 		__NK_ASSERT_DEBUG(pi);
   928 #else
   929 		SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys);
   930 #endif
   931 		switch(pi->PagedState())
   932 			{
   933 		case SPageInfo::EUnpaged:
   934 			continue; // discard already been disallowed
   935 
   936 		case SPageInfo::EPagedYoung:
   937 		case SPageInfo::EPagedOld:
   938 #ifdef _USE_OLDEST_LISTS
   939 		case SPageInfo::EPagedOldestClean:
   940 		case SPageInfo::EPagedOldestDirty:
   941 #endif
   942 			changeType = ETrue;
   943 			break; // remove from live list
   944 
   945 		case SPageInfo::EPagedPinned:
   946 			__NK_ASSERT_DEBUG(0);
   947 		default:
   948 			__NK_ASSERT_DEBUG(0);
   949 			break;
   950 			}
   951 
   952 		// check paging list has enough pages before we remove one...
   953 		if(iNumberOfFreePages<1)
   954 			{
   955 			// need more pages so get a page from the system...
   956 			if(!TryGrowLiveList())
   957 				{
   958 				// out of memory...
   959 				r = KErrNoMemory;
   960 				break;
   961 				}
   962 			// retry the page reclaim...
   963 			--aPages;
   964 			continue;
   965 			}
   966 
   967 		if (changeType)
   968 			{// Change the type of this page to movable, wait until any retries
   969 			// have been attempted as we can't change a page's type twice.
   970 			// Only the DDiscardableMemoryManager should be invoking this and
   971 			// its pages should be movable once they are reclaimed.
   972 			__NK_ASSERT_DEBUG(pi->Owner()->iManager->PageType() == EPageMovable);
   973 			TheMmu.ChangePageType(pi, EPageDiscard, EPageMovable);
   974 			}
   975 
   976 		// remove page from paging list...
   977 		__NK_ASSERT_DEBUG(iNumberOfFreePages>0);
   978 		--iNumberOfFreePages;
   979 		RemovePage(pi);
   980 
   981 		Event(EEventPageReclaim,pi);
   982 
   983 		// re-balance live list...
   984 		BalanceAges();
   985 		}
   986 
   987 	// we may have added a spare free page to the live list without removing one,
   988 	// this could cause us to have too many pages, so deal with this...
   989 	RemoveExcessPages();
   990 
   991 	MmuLock::Unlock();
   992 	RamAllocLock::Unlock();
   993 	return r;
   994 	}
   995 
   996 
   997 TInt VMHalFunction(TAny*, TInt aFunction, TAny* a1, TAny* a2);
   998 
   999 void DPager::Init3()
  1000 	{
  1001 	TRACEB(("DPager::Init3()"));
  1002 	TheRomMemoryManager->Init3();
  1003 	TheDataPagedMemoryManager->Init3();
  1004 	TheCodePagedMemoryManager->Init3();
  1005 	TInt r = Kern::AddHalEntry(EHalGroupVM, VMHalFunction, 0);
  1006 	__NK_ASSERT_ALWAYS(r==KErrNone);
  1007 	}
  1008 
  1009 
  1010 void DPager::Fault(TFault aFault)
  1011 	{
  1012 	Kern::Fault("DPager",aFault);
  1013 	}
  1014 
  1015 
  1016 void DPager::BalanceAges()
  1017 	{
  1018 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1019 	TBool restrictPage = EFalse;
  1020 	SPageInfo* pageInfo = NULL;
  1021 #ifdef _USE_OLDEST_LISTS
  1022 	TUint oldestCount = iOldestCleanCount + iOldestDirtyCount;
  1023 	if((iOldCount + oldestCount) * iYoungOldRatio < iYoungCount)
  1024 #else
  1025 	if (iOldCount * iYoungOldRatio < iYoungCount)
  1026 #endif
  1027 		{
  1028 		// Need more old pages so make one young page into an old page...
  1029 		__NK_ASSERT_DEBUG(!iYoungList.IsEmpty());
  1030 		__NK_ASSERT_DEBUG(iYoungCount);
  1031 		SDblQueLink* link = iYoungList.Last()->Deque();
  1032 		--iYoungCount;
  1033 
  1034 		pageInfo = SPageInfo::FromLink(link);
  1035 		pageInfo->SetPagedState(SPageInfo::EPagedOld);
  1036 
  1037 		iOldList.AddHead(link);
  1038 		++iOldCount;
  1039 
  1040 		Event(EEventPageAged,pageInfo);
  1041 		// Delay restricting the page until it is safe to release the MmuLock.
  1042 		restrictPage = ETrue;
  1043 		}
  1044 
  1045 #ifdef _USE_OLDEST_LISTS
  1046 	// Check we have enough oldest pages.
  1047 	if (oldestCount * iOldOldestRatio < iOldCount)
  1048 		{
  1049 		__NK_ASSERT_DEBUG(!iOldList.IsEmpty());
  1050 		__NK_ASSERT_DEBUG(iOldCount);
  1051 		SDblQueLink* link = iOldList.Last()->Deque();
  1052 		--iOldCount;
  1053 
  1054 		SPageInfo* oldestPageInfo = SPageInfo::FromLink(link);
  1055 		if (oldestPageInfo->IsDirty())
  1056 			{
  1057 			oldestPageInfo->SetPagedState(SPageInfo::EPagedOldestDirty);
  1058 			iOldestDirtyList.AddHead(link);
  1059 			++iOldestDirtyCount;
  1060 			Event(EEventPageAgedDirty,oldestPageInfo);
  1061 			}
  1062 		else
  1063 			{
  1064 			oldestPageInfo->SetPagedState(SPageInfo::EPagedOldestClean);
  1065 			iOldestCleanList.AddHead(link);
  1066 			++iOldestCleanCount;
  1067 			Event(EEventPageAgedClean,oldestPageInfo);
  1068 			}
  1069 		}
  1070 #endif
  1071 	if (restrictPage)
  1072 		{
  1073 		// Make the recently aged old page inaccessible.  This is done last as it 
  1074 		// will release the MmuLock and therefore the page counts may otherwise change.
  1075 		RestrictPage(pageInfo,ERestrictPagesNoAccessForOldPage);
  1076 		}
  1077 	}
  1078 
  1079 
  1080 void DPager::RemoveExcessPages()
  1081 	{
  1082 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
  1083 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1084 	while(HaveTooManyPages())
  1085 		ReturnPageToSystem();
  1086 	}
  1087 
  1088 
  1089 void DPager::RejuvenatePageTable(TPte* aPt)
  1090 	{
  1091 	SPageInfo* pi = SPageInfo::FromPhysAddr(Mmu::PageTablePhysAddr(aPt));
  1092 
  1093 	SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPt);
  1094 	if(!pti->IsDemandPaged())
  1095 		{
  1096 		__NK_ASSERT_DEBUG(pi->PagedState()==SPageInfo::EUnpaged);
  1097 		return;
  1098 		}
  1099 
  1100 	TRACE2(("DP: %O Rejuvenate PT 0x%08x 0x%08x",TheCurrentThread,pi->PhysAddr(),aPt));
  1101 	switch(pi->PagedState())
  1102 		{
  1103 	case SPageInfo::EPagedYoung:
  1104 	case SPageInfo::EPagedOld:
  1105 #ifdef _USE_OLDEST_LISTS
  1106 	case SPageInfo::EPagedOldestClean:
  1107 	case SPageInfo::EPagedOldestDirty:
  1108 #endif
  1109 		RemovePage(pi);
  1110 		AddAsYoungestPage(pi);
  1111 		BalanceAges();
  1112 		break;
  1113 
  1114 	case SPageInfo::EUnpaged:
  1115 		AddAsYoungestPage(pi);
  1116 		BalanceAges();
  1117 		break;
  1118 
  1119 	case SPageInfo::EPagedPinned:
  1120 		break;
  1121 
  1122 	default:
  1123 		__NK_ASSERT_DEBUG(0);
  1124 		break;
  1125 		}
  1126 	}
  1127 
  1128 TInt DPager::PteAndInfoFromLinAddr(	TInt aOsAsid, TLinAddr aAddress, DMemoryMappingBase* aMapping, 
  1129 									TUint aMapInstanceCount, TPte*& aPte, SPageInfo*& aPageInfo)
  1130 	{
  1131 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());	
  1132 
  1133 	// Verify the mapping is still mapped and has not been reused.
  1134 	if (aMapInstanceCount != aMapping->MapInstanceCount() || aMapping->BeingDetached())
  1135 		return KErrAbort;
  1136 
  1137 	aPte = Mmu::SafePtePtrFromLinAddr(aAddress,aOsAsid);
  1138 	if(!aPte)
  1139 		return KErrNotFound;
  1140 
  1141 	TPte pte = *aPte;
  1142 	if(pte==KPteUnallocatedEntry)
  1143 		return KErrNotFound;
  1144 
  1145 	SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pte & ~KPageMask);
  1146 	if(!pi)
  1147 		return KErrNotFound;
  1148 	aPageInfo = pi;
  1149 
  1150 	return KErrNone;
  1151 	}
  1152 
  1153 TInt DPager::TryRejuvenate(	TInt aOsAsid, TLinAddr aAddress, TUint aAccessPermissions, TLinAddr aPc,
  1154 							DMemoryMappingBase* aMapping, TUint aMapInstanceCount, DThread* aThread, 
  1155 							TAny* aExceptionInfo)
  1156 	{
  1157 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1158 
  1159 	SPageInfo* pi;
  1160 	TPte* pPte;
  1161 	TPte pte;
  1162 	TInt r = PteAndInfoFromLinAddr(aOsAsid, aAddress, aMapping, aMapInstanceCount, pPte, pi);
  1163 	if (r != KErrNone)
  1164 		{
  1165 		if (aThread->IsRealtime())
  1166 			{// This thread is real time so it shouldn't be accessing paged out paged memory
  1167 			// unless there is a paging trap.
  1168 			MmuLock::Unlock();
  1169 			// Ensure that we abort when the thread is not allowed to access paged out pages.
  1170 			if (CheckRealtimeThreadFault(aThread, aExceptionInfo) != KErrNone)
  1171 				r = KErrAbort;
  1172 			MmuLock::Lock();
  1173 			}
  1174 		return r;
  1175 		}
  1176 	pte = *pPte;
  1177 	SPageInfo::TType type = pi->Type();
  1178 	SPageInfo::TPagedState state = pi->PagedState();
  1179 
  1180 	if (aThread->IsRealtime() && 
  1181 		state != SPageInfo::EPagedPinned && 
  1182 		state != SPageInfo::EPagedPinnedMoved)
  1183 		{// This thread is real time so it shouldn't be accessing unpinned paged memory
  1184 		// unless there is a paging trap.
  1185 		MmuLock::Unlock();
  1186 		r = CheckRealtimeThreadFault(aThread, aExceptionInfo);
  1187 		MmuLock::Lock();
  1188 		if (r != KErrNone)
  1189 			return r;
  1190 		// We had to release the MmuLock have to reverify the status of the page and mappings.
  1191 		r = PteAndInfoFromLinAddr(aOsAsid, aAddress, aMapping, aMapInstanceCount, pPte, pi);
  1192 		if (r != KErrNone)
  1193 			return r;
  1194 		pte = *pPte;
  1195 		type = pi->Type();
  1196 		state = pi->PagedState();
  1197 		}
  1198 
  1199 	if (type != SPageInfo::EManaged)
  1200 		return KErrNotFound;
  1201 
  1202 	if(state==SPageInfo::EUnpaged)
  1203 		return KErrNotFound;
  1204 
  1205 	DMemoryObject* memory = pi->Owner();
  1206 	TUint index = pi->Index();
  1207 
  1208 	TPhysAddr page = memory->iPages.Page(index);
  1209 	if(!RPageArray::IsPresent(page))
  1210 		return KErrNotFound;
  1211 
  1212 	TPhysAddr physAddr = pi->PhysAddr();
  1213 	if ((page^physAddr) >= (TPhysAddr)KPageSize)
  1214 		{// Page array entry should contain same physical address as PTE unless the 
  1215 		// page has or is being moved and this mapping accessed the page.
  1216 		// Get the page info for the page that we should be using.
  1217 		physAddr = page & ~KPageMask;
  1218 		pi = SPageInfo::SafeFromPhysAddr(physAddr);
  1219 		if(!pi)
  1220 			return KErrNotFound;
  1221 
  1222 		type = pi->Type();
  1223 		if (type!=SPageInfo::EManaged)
  1224 			return KErrNotFound;
  1225 
  1226 		state = pi->PagedState();
  1227 		if(state==SPageInfo::EUnpaged)
  1228 			return KErrNotFound;
  1229 
  1230 		memory = pi->Owner();
  1231 		index = pi->Index();
  1232 
  1233 		// Update pte to point to the correct physical address for this memory object's page.
  1234 		pte = (pte & KPageMask) | physAddr;
  1235 		}
  1236 
  1237 	if(aAccessPermissions&EReadWrite)
  1238 		{// The mapping that took the fault permits writes and is still attached 
  1239 		// to the memory object therefore the object can't be read only.
  1240 		__NK_ASSERT_DEBUG(!memory->IsReadOnly());
  1241 		SetWritable(*pi);
  1242 		}
  1243 
  1244 	pte = Mmu::MakePteAccessible(pte,aAccessPermissions&EReadWrite);
  1245 	TRACE2(("!PTE %x=%x",pPte,pte));
  1246 	*pPte = pte;
  1247 	CacheMaintenance::SinglePteUpdated((TLinAddr)pPte);
  1248 	InvalidateTLBForPage((aAddress&~KPageMask)|aOsAsid);
  1249 
  1250 	Event(EEventPageRejuvenate,pi,aPc,aAddress,aAccessPermissions);
  1251 
  1252 	TBool balance = false;
  1253 #ifdef _USE_OLDEST_LISTS
  1254 	if(	state==SPageInfo::EPagedYoung || state==SPageInfo::EPagedOld || 
  1255 		state==SPageInfo::EPagedOldestClean || state==SPageInfo::EPagedOldestDirty)
  1256 #else
  1257 	if(state==SPageInfo::EPagedYoung || state==SPageInfo::EPagedOld)
  1258 #endif
  1259 		{
  1260 		RemovePage(pi);
  1261 		AddAsYoungestPage(pi);
  1262 		// delay BalanceAges because we don't want to release MmuLock until after
  1263 		// RejuvenatePageTable has chance to look at the page table page...
  1264 		balance = true;
  1265 		}
  1266 	else
  1267 		{// Clear the modifier so that if this page is being moved then this 
  1268 		// access is detected. For non-pinned pages the modifier is cleared 
  1269 		// by RemovePage().
  1270 		__NK_ASSERT_DEBUG(state==SPageInfo::EPagedPinned);
  1271 		pi->SetModifier(0);
  1272 		}
  1273 
  1274 	RejuvenatePageTable(pPte);
  1275 
  1276 	if(balance)
  1277 		BalanceAges();
  1278 
  1279 	return KErrNone;
  1280 	}
  1281 
  1282 
  1283 TInt DPager::PageInAllocPages(TPhysAddr* aPages, TUint aCount, Mmu::TRamAllocFlags aAllocFlags)
  1284 	{
  1285 	TUint n = 0;
  1286 	while(n<aCount)
  1287 		{
  1288 		SPageInfo* pi = PageInAllocPage(aAllocFlags);
  1289 		if(!pi)
  1290 			goto fail;
  1291 		aPages[n++] = pi->PhysAddr();
  1292 		}
  1293 	return KErrNone;
  1294 fail:
  1295 	PageInFreePages(aPages,n);
  1296 	return KErrNoMemory;
  1297 	}
  1298 
  1299 
  1300 void DPager::PageInFreePages(TPhysAddr* aPages, TUint aCount)
  1301 	{
  1302 	while(aCount--)
  1303 		{
  1304 		MmuLock::Lock();
  1305 		SPageInfo* pi = SPageInfo::FromPhysAddr(aPages[aCount]);
  1306 		switch(pi->PagedState())
  1307 			{
  1308 		case SPageInfo::EPagedYoung:
  1309 		case SPageInfo::EPagedOld:
  1310 #ifdef _USE_OLDEST_LISTS
  1311 		case SPageInfo::EPagedOldestClean:
  1312 		case SPageInfo::EPagedOldestDirty:
  1313 #endif
  1314 			RemovePage(pi);
  1315 			// fall through...
  1316 		case SPageInfo::EUnpaged:
  1317 			AddAsFreePage(pi);
  1318 			break;
  1319 
  1320 		case SPageInfo::EPagedPinned:
  1321 			__NK_ASSERT_DEBUG(0);
  1322 			break;
  1323 		default:
  1324 			__NK_ASSERT_DEBUG(0);
  1325 			break;
  1326 			}
  1327 		MmuLock::Unlock();
  1328 		}
  1329 	}
  1330 
  1331 
  1332 void DPager::PagedInUnneeded(SPageInfo* aPageInfo)
  1333 	{
  1334 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1335 	Event(EEventPageInUnneeded,aPageInfo);
  1336 	AddAsFreePage(aPageInfo);
  1337 	}
  1338 
  1339 
  1340 void DPager::PagedIn(SPageInfo* aPageInfo)
  1341 	{
  1342 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1343 	switch(aPageInfo->PagedState())
  1344 		{
  1345 	case SPageInfo::EPagedYoung:
  1346 	case SPageInfo::EPagedOld:
  1347 #ifdef _USE_OLDEST_LISTS
  1348 	case SPageInfo::EPagedOldestClean:
  1349 	case SPageInfo::EPagedOldestDirty:
  1350 #endif
  1351 		RemovePage(aPageInfo);
  1352 		AddAsYoungestPage(aPageInfo);
  1353 		BalanceAges();
  1354 		break;
  1355 
  1356 	case SPageInfo::EUnpaged:
  1357 		AddAsYoungestPage(aPageInfo);
  1358 		BalanceAges();
  1359 		break;
  1360 
  1361 	case SPageInfo::EPagedPinned:
  1362 		// Clear the modifier so that if this page is being moved then this 
  1363 		// access is detected. For non-pinned pages the modifier is cleared by RemovePage().
  1364 		aPageInfo->SetModifier(0);
  1365 		break;
  1366 
  1367 	default:
  1368 		__NK_ASSERT_DEBUG(0);
  1369 		break;
  1370 		}
  1371 	}
  1372 
  1373 
  1374 void DPager::PagedInPinned(SPageInfo* aPageInfo, TPinArgs& aPinArgs)
  1375 	{
  1376 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1377 	Pin(aPageInfo,aPinArgs);
  1378 	}
  1379 
  1380 
  1381 void DPager::Pin(SPageInfo* aPageInfo, TPinArgs& aPinArgs)
  1382 	{
  1383 	__ASSERT_CRITICAL;
  1384 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1385 	__NK_ASSERT_DEBUG(aPinArgs.HaveSufficientPages(1));
  1386 
  1387 	aPageInfo->IncPinCount();
  1388 	Event(EEventPagePin,aPageInfo);
  1389 
  1390 	// remove page from live list...
  1391 	switch(aPageInfo->PagedState())
  1392 		{
  1393 	case SPageInfo::EPagedYoung:
  1394 		__NK_ASSERT_DEBUG(iYoungCount);
  1395 		aPageInfo->iLink.Deque();
  1396 		--iYoungCount;
  1397 		__NK_ASSERT_DEBUG(aPageInfo->PinCount()==1);
  1398 		break;
  1399 
  1400 	case SPageInfo::EPagedOld:
  1401 		__NK_ASSERT_DEBUG(iOldCount);
  1402 		aPageInfo->iLink.Deque();
  1403 		--iOldCount;
  1404 		__NK_ASSERT_DEBUG(aPageInfo->PinCount()==1);
  1405 		break;
  1406 
  1407 #ifdef _USE_OLDEST_LISTS
  1408 	case SPageInfo::EPagedOldestClean:
  1409 		__NK_ASSERT_DEBUG(iOldestCleanCount);
  1410 		aPageInfo->iLink.Deque();
  1411 		--iOldestCleanCount;
  1412 		__NK_ASSERT_DEBUG(aPageInfo->PinCount()==1);
  1413 		break;
  1414 
  1415 	case SPageInfo::EPagedOldestDirty:
  1416 		__NK_ASSERT_DEBUG(iOldestDirtyCount);
  1417 		aPageInfo->iLink.Deque();
  1418 		--iOldestDirtyCount;
  1419 		__NK_ASSERT_DEBUG(aPageInfo->PinCount()==1);
  1420 		break;
  1421 #endif
  1422 
  1423 	case SPageInfo::EPagedPinned:
  1424 		// nothing more to do...
  1425 		__NK_ASSERT_DEBUG(aPageInfo->PinCount()>1);
  1426 		return;
  1427 
  1428 	case SPageInfo::EUnpaged:
  1429 		__NK_ASSERT_DEBUG(aPageInfo->PinCount()==1);
  1430 		TRACE2(("DPager::PinPage page was unpaged"));
  1431 		// This could be a page in the process of being stolen.
  1432 		// Could also be page for storing page table infos, which aren't necessarily
  1433 		// on the live list.
  1434 		break;
  1435 
  1436 	default:
  1437 		__NK_ASSERT_DEBUG(0);
  1438 		return;
  1439 		}
  1440 
  1441 	// page has now been removed from the live list and is pinned...
  1442 	aPageInfo->SetPagedState(SPageInfo::EPagedPinned);
  1443 
  1444 	if(aPinArgs.iReplacementPages==TPinArgs::EUseReserveForPinReplacementPages)
  1445 		{
  1446 		// pinned paged counts as coming from reserve pool...
  1447 		aPageInfo->SetPinnedReserve();
  1448 		}
  1449 	else
  1450 		{
  1451 		// we used up a replacement page...
  1452 		--aPinArgs.iReplacementPages;
  1453 		}
  1454 
  1455 	BalanceAges();
  1456 	}
  1457 
  1458 
  1459 void DPager::Unpin(SPageInfo* aPageInfo, TPinArgs& aPinArgs)
  1460 	{
  1461 	__ASSERT_CRITICAL;
  1462 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1463 	__NK_ASSERT_DEBUG(aPageInfo->PagedState()==SPageInfo::EPagedPinned);
  1464 	__NK_ASSERT_DEBUG(aPageInfo->PinCount()>0);
  1465 
  1466 	TUint pinCount = aPageInfo->DecPinCount();
  1467 	Event(EEventPageUnpin,aPageInfo);
  1468 
  1469 	if(pinCount)
  1470 		return;
  1471 
  1472 	aPageInfo->SetPagedState(SPageInfo::EUnpaged);
  1473 
  1474 	if(!aPageInfo->ClearPinnedReserve())
  1475 		{
  1476 		// was not a pinned reserve page, so we how have a spare replacement page,
  1477 		// which can be used again or freed later ...
  1478 		__NK_ASSERT_DEBUG(aPinArgs.iReplacementPages!=TPinArgs::EUseReserveForPinReplacementPages);
  1479 		++aPinArgs.iReplacementPages;
  1480 		}
  1481 
  1482 	AddAsYoungestPage(aPageInfo);
  1483 	BalanceAges();
  1484 	}
  1485 
  1486 
  1487 TInt TPinArgs::AllocReplacementPages(TUint aNumPages)
  1488 	{
  1489 	if(iUseReserve)
  1490 		{
  1491 		__NK_ASSERT_DEBUG(iReplacementPages==0 || iReplacementPages==EUseReserveForPinReplacementPages);
  1492 		iReplacementPages = EUseReserveForPinReplacementPages;
  1493 		}
  1494 	else
  1495 		{
  1496 		if(aNumPages>iReplacementPages)
  1497 			{
  1498 			if(!ThePager.AllocPinReplacementPages(aNumPages-iReplacementPages))
  1499 				return KErrNoMemory;
  1500 			iReplacementPages = aNumPages;
  1501 			}
  1502 		}
  1503 	return KErrNone;
  1504 	}
  1505 
  1506 
  1507 void TPinArgs::FreeReplacementPages()
  1508 	{
  1509 	if(iReplacementPages!=0 && iReplacementPages!=EUseReserveForPinReplacementPages)
  1510 		ThePager.FreePinReplacementPages(iReplacementPages);
  1511 	iReplacementPages = 0;
  1512 	}
  1513 
  1514 
  1515 TBool DPager::AllocPinReplacementPages(TUint aNumPages)
  1516 	{
  1517 	TRACE2(("DPager::AllocPinReplacementPages(0x%x)",aNumPages));
  1518 	__ASSERT_CRITICAL;
  1519 	RamAllocLock::Lock();
  1520 	MmuLock::Lock();
  1521 
  1522 	TBool ok = false;
  1523 	do
  1524 		{
  1525 		if(iNumberOfFreePages>=aNumPages)
  1526 			{
  1527 			iNumberOfFreePages -= aNumPages;
  1528 			ok = true;
  1529 			break;
  1530 			}
  1531 		}
  1532 	while(TryGrowLiveList());
  1533 
  1534 	MmuLock::Unlock();
  1535 	RamAllocLock::Unlock();
  1536 	return ok;
  1537 	}
  1538 
  1539 
  1540 void DPager::FreePinReplacementPages(TUint aNumPages)
  1541 	{
  1542 	TRACE2(("DPager::FreePinReplacementPage(0x%x)",aNumPages));
  1543 	__ASSERT_CRITICAL;
  1544 
  1545 	RamAllocLock::Lock();
  1546 	MmuLock::Lock();
  1547 
  1548 	iNumberOfFreePages += aNumPages;
  1549 	RemoveExcessPages();
  1550 
  1551 	MmuLock::Unlock();
  1552 	RamAllocLock::Unlock();
  1553 	}
  1554 
  1555 
  1556 TBool DPager::ReservePage()
  1557 	{
  1558 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
  1559 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1560 	__ASSERT_CRITICAL;
  1561 	__NK_ASSERT_DEBUG(iMinimumPageCount >= iMinimumPageLimit+iReservePageCount);
  1562 	while(iMinimumPageCount==iMinimumPageLimit+iReservePageCount && iNumberOfFreePages==0)
  1563 		{
  1564 		if(!TryGrowLiveList())
  1565 			return false;
  1566 		}
  1567 	if(iMinimumPageCount==iMinimumPageLimit+iReservePageCount)
  1568 		{
  1569 		++iMinimumPageCount;
  1570 		--iNumberOfFreePages;
  1571 		if(iMinimumPageCount>iMaximumPageCount)
  1572 			iMaximumPageCount = iMinimumPageCount;
  1573 		}
  1574 	++iReservePageCount;
  1575 	__NK_ASSERT_DEBUG(iMinimumPageCount >= iMinimumPageLimit+iReservePageCount);
  1576 	__NK_ASSERT_DEBUG(iMinimumPageCount+iNumberOfFreePages <= iMaximumPageCount);
  1577 	return ETrue;
  1578 	}
  1579 
  1580 
  1581 TBool DPager::ReservePages(TUint aRequiredCount, TUint& aCount)
  1582 	{
  1583 	__ASSERT_CRITICAL;
  1584 
  1585 	RamAllocLock::Lock();
  1586 	MmuLock::Lock();
  1587 	while(aCount<aRequiredCount)
  1588 		{
  1589 		if(!ReservePage())
  1590 			break;
  1591 		++aCount;
  1592 		MmuLock::Flash();
  1593 		}
  1594 	TBool enoughPages = aCount==aRequiredCount;
  1595 	MmuLock::Unlock();
  1596 	RamAllocLock::Unlock();
  1597 
  1598 	if(!enoughPages)
  1599 		UnreservePages(aCount);
  1600 
  1601 	return enoughPages;
  1602 	}
  1603 
  1604 
  1605 void DPager::UnreservePages(TUint& aCount)
  1606 	{
  1607 	MmuLock::Lock();
  1608 	iReservePageCount -= aCount;
  1609 	aCount = 0;
  1610 	MmuLock::Unlock();
  1611 	}
  1612 
  1613 
  1614 TInt DPager::CheckRealtimeThreadFault(DThread* aThread, TAny* aExceptionInfo)
  1615 	{
  1616 	// realtime threads shouldn't take paging faults...
  1617 	DThread* client = aThread->iIpcClient;
  1618 
  1619 	// If iIpcClient is set then we are accessing the address space of a remote thread.  If we are
  1620 	// in an IPC trap, this will contain information the local and remote addresses being accessed.
  1621 	// If this is not set then we assume than any fault must be the fault of a bad remote address.
  1622 	TIpcExcTrap* ipcTrap = (TIpcExcTrap*)aThread->iExcTrap;
  1623 	if (ipcTrap && !ipcTrap->IsTIpcExcTrap())
  1624 		ipcTrap = 0;
  1625 	if (client && (!ipcTrap || ipcTrap->ExcLocation(aThread, aExceptionInfo) == TIpcExcTrap::EExcRemote))
  1626 		{
  1627 		// kill client thread...
  1628 		if(K::IllegalFunctionForRealtimeThread(client,"Access to Paged Memory (by other thread)"))
  1629 			{
  1630 			// treat memory access as bad...
  1631 			return KErrAbort;
  1632 			}
  1633 		// else thread is in 'warning only' state so allow paging...
  1634 		}
  1635 	else
  1636 		{
  1637 		// kill current thread...
  1638 		if(K::IllegalFunctionForRealtimeThread(NULL,"Access to Paged Memory"))
  1639 			{
  1640 			// if current thread is in critical section, then the above kill will be deferred
  1641 			// and we will continue executing. We will handle this by returning an error
  1642 			// which means that the thread will take an exception (which hopefully is XTRAPed!)
  1643 			return KErrAbort;
  1644 			}
  1645 		// else thread is in 'warning only' state so allow paging...
  1646 		}
  1647 	return KErrNone;
  1648 	}
  1649 
  1650 
  1651 TInt DPager::HandlePageFault(	TLinAddr aPc, TLinAddr aFaultAddress, TUint aFaultAsid, TUint aFaultIndex,
  1652 								TUint aAccessPermissions, DMemoryObject* aMemory, DMemoryMapping* aMapping,
  1653 								TUint aMapInstanceCount, DThread* aThread, TAny* aExceptionInfo)
  1654 	{
  1655 	MmuLock::Lock();
  1656 	TInt r = TryRejuvenate(	aFaultAsid, aFaultAddress, aAccessPermissions, aPc, aMapping, aMapInstanceCount,
  1657 							aThread, aExceptionInfo);
  1658 	if(r == KErrNone || r == KErrAbort)
  1659 		{
  1660 		MmuLock::Unlock();
  1661 		}
  1662 	else
  1663 		{
  1664 		// rejuvenate failed, call memory manager to page in memory...
  1665 		Event(EEventPageInStart, 0, aPc, aFaultAddress, aAccessPermissions);
  1666 		MmuLock::Unlock();
  1667 		TheThrashMonitor.NotifyStartPaging();
  1668 
  1669 		DMemoryManager* manager = aMemory->iManager;
  1670 		r = manager->HandleFault(aMemory, aFaultIndex, aMapping, aMapInstanceCount, aAccessPermissions);
  1671 
  1672 		TheThrashMonitor.NotifyEndPaging();
  1673 		}
  1674 	return r;
  1675 	}
  1676 
  1677 
  1678 TInt DPager::ResizeLiveList()
  1679 	{
  1680 	MmuLock::Lock();
  1681 	TUint min = iMinimumPageCount;
  1682 	TUint max = iMaximumPageCount;
  1683 	MmuLock::Unlock();
  1684 	return ResizeLiveList(min,max);
  1685 	}
  1686 
  1687 
  1688 TInt DPager::ResizeLiveList(TUint aMinimumPageCount, TUint aMaximumPageCount)
  1689 	{
  1690 	TRACE(("DPager::ResizeLiveList(%d,%d) current young=%d old=%d min=%d free=%d max=%d",aMinimumPageCount,aMaximumPageCount,iYoungCount,iOldCount,iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
  1691 	if(!aMaximumPageCount)
  1692 		{
  1693 		aMinimumPageCount = iInitMinimumPageCount;
  1694 		aMaximumPageCount = iInitMaximumPageCount;
  1695 		}
  1696 	if (aMaximumPageCount > KAbsoluteMaxPageCount)
  1697 		aMaximumPageCount = KAbsoluteMaxPageCount;
  1698 
  1699 	// Min must not be greater than max...
  1700 	if(aMinimumPageCount>aMaximumPageCount)
  1701 		return KErrArgument;
  1702 
  1703 	NKern::ThreadEnterCS();
  1704 	RamAllocLock::Lock();
  1705 
  1706 	MmuLock::Lock();
  1707 
  1708 	// Make sure aMinimumPageCount is not less than absolute minimum we can cope with...
  1709 	iMinimumPageLimit = iMinYoungPages * (1 + iYoungOldRatio) / iYoungOldRatio
  1710 						+ DPageReadRequest::ReservedPagesRequired();
  1711 	if(iMinimumPageLimit<iAbsoluteMinPageCount)
  1712 		iMinimumPageLimit = iAbsoluteMinPageCount;
  1713 	if(aMinimumPageCount<iMinimumPageLimit+iReservePageCount)
  1714 		aMinimumPageCount = iMinimumPageLimit+iReservePageCount;
  1715 	if(aMaximumPageCount<aMinimumPageCount)
  1716 		aMaximumPageCount=aMinimumPageCount;
  1717 
  1718 	// Increase iMaximumPageCount?
  1719 	TInt extra = aMaximumPageCount-iMaximumPageCount;
  1720 	if(extra>0)
  1721 		iMaximumPageCount += extra;
  1722 
  1723 	// Reduce iMinimumPageCount?
  1724 	TInt spare = iMinimumPageCount-aMinimumPageCount;
  1725 	if(spare>0)
  1726 		{
  1727 		iMinimumPageCount -= spare;
  1728 		iNumberOfFreePages += spare;
  1729 		}
  1730 
  1731 	// Increase iMinimumPageCount?
  1732 	TInt r=KErrNone;
  1733 	while(iMinimumPageCount<aMinimumPageCount)
  1734 		{
  1735 		TUint newMin = aMinimumPageCount;
  1736 		TUint maxMin = iMinimumPageCount+iNumberOfFreePages;
  1737 		if(newMin>maxMin)
  1738 			newMin = maxMin;
  1739 
  1740 		TUint delta = newMin-iMinimumPageCount;
  1741 		if(delta)
  1742 			{
  1743 			iMinimumPageCount = newMin;
  1744 			iNumberOfFreePages -= delta;
  1745 			continue;
  1746 			}
  1747 
  1748 		if(!TryGrowLiveList())
  1749 			{
  1750 			r=KErrNoMemory;
  1751 			break;
  1752 			}
  1753 		}
  1754 
  1755 	// Reduce iMaximumPageCount?
  1756 	while(iMaximumPageCount>aMaximumPageCount)
  1757 		{
  1758 		TUint newMax = aMaximumPageCount;
  1759 		TUint minMax = iMinimumPageCount+iNumberOfFreePages;
  1760 		if(newMax<minMax)
  1761 			newMax = minMax;
  1762 
  1763 		TUint delta = iMaximumPageCount-newMax;
  1764 		if(delta)
  1765 			{
  1766 			iMaximumPageCount = newMax;
  1767 			continue;
  1768 			}
  1769 
  1770 		ReturnPageToSystem();
  1771 		}
  1772 
  1773 	TRACE(("DPager::ResizeLiveList end with young=%d old=%d min=%d free=%d max=%d",iYoungCount,iOldCount,iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
  1774 
  1775 #ifdef BTRACE_KERNEL_MEMORY
  1776 	BTrace4(BTrace::EKernelMemory,BTrace::EKernelMemoryDemandPagingCache,iMinimumPageCount << KPageShift);
  1777 #endif
  1778 
  1779 	MmuLock::Unlock();
  1780 
  1781 	RamAllocLock::Unlock();
  1782 	NKern::ThreadLeaveCS();
  1783 
  1784 	return r;
  1785 	}
  1786 
  1787 
  1788 void DPager::FlushAll()
  1789 	{
  1790 	NKern::ThreadEnterCS();
  1791 	RamAllocLock::Lock();
  1792 
  1793 	TRACE(("DPager::FlushAll() live list young=%d old=%d min=%d free=%d max=%d",iYoungCount,iOldCount,iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
  1794 
  1795 	// look at all RAM pages in the system, and unmap all those used for paging
  1796 	const TUint32* piMap = (TUint32*)KPageInfoMap;
  1797 	const TUint32* piMapEnd = piMap+(KNumPageInfoPages>>5);
  1798 	SPageInfo* pi = (SPageInfo*)KPageInfoLinearBase;
  1799 	MmuLock::Lock();
  1800 	do
  1801 		{
  1802 		SPageInfo* piNext = pi+(KPageInfosPerPage<<5);
  1803 		for(TUint32 piFlags=*piMap++; piFlags; piFlags>>=1)
  1804 			{
  1805 			if(!(piFlags&1))
  1806 				{
  1807 				pi += KPageInfosPerPage;
  1808 				continue;
  1809 				}
  1810 			SPageInfo* piEnd = pi+KPageInfosPerPage;
  1811 			do
  1812 				{
  1813 				SPageInfo::TPagedState state = pi->PagedState();
  1814 #ifdef _USE_OLDEST_LISTS
  1815 				if (state==SPageInfo::EPagedYoung || state==SPageInfo::EPagedOld ||
  1816 					state==SPageInfo::EPagedOldestClean || state==SPageInfo::EPagedOldestDirty)
  1817 #else
  1818 				if(state==SPageInfo::EPagedYoung || state==SPageInfo::EPagedOld)
  1819 #endif
  1820 					{
  1821 					if (pi->Type() != SPageInfo::EUnused)
  1822 						{
  1823 						TInt r = StealPage(pi);
  1824 						if(r==KErrNone)
  1825 							AddAsFreePage(pi);
  1826 						MmuLock::Flash();
  1827 						}
  1828 					}
  1829 				++pi;
  1830 				if(((TUint)pi&(0xf<<KPageInfoShift))==0)
  1831 					MmuLock::Flash(); // every 16 page infos
  1832 				}
  1833 			while(pi<piEnd);
  1834 			}
  1835 		pi = piNext;
  1836 		}
  1837 	while(piMap<piMapEnd);
  1838 	MmuLock::Unlock();
  1839 
  1840 	// reduce live page list to a minimum
  1841 	while(GetFreePages(1)) {}; 
  1842 
  1843 	TRACE(("DPager::FlushAll() end with young=%d old=%d min=%d free=%d max=%d",iYoungCount,iOldCount,iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
  1844 
  1845 	RamAllocLock::Unlock();
  1846 	NKern::ThreadLeaveCS();
  1847 	}
  1848 
  1849 
  1850 void DPager::GetLiveListInfo(SVMCacheInfo& aInfo)
  1851 	{
  1852 	MmuLock::Lock(); // ensure consistent set of values are read...
  1853 	aInfo.iMinSize = iMinimumPageCount<<KPageShift;
  1854 	aInfo.iMaxSize = iMaximumPageCount<<KPageShift;
  1855 	aInfo.iCurrentSize = (iMinimumPageCount+iNumberOfFreePages)<<KPageShift;
  1856 	aInfo.iMaxFreeSize = iNumberOfFreePages<<KPageShift;
  1857 	MmuLock::Unlock();
  1858 	}
  1859 
  1860 
  1861 void DPager::GetEventInfo(SVMEventInfo& aInfoOut)
  1862 	{
  1863 	MmuLock::Lock(); // ensure consistent set of values are read...
  1864 	aInfoOut = iEventInfo;
  1865 	MmuLock::Unlock();
  1866 	}
  1867 
  1868 
  1869 void DPager::ResetEventInfo()
  1870 	{
  1871 	MmuLock::Lock();
  1872 	memclr(&iEventInfo, sizeof(iEventInfo));
  1873 	MmuLock::Unlock();
  1874 	}
  1875 
  1876 
  1877 TInt TestPageState(TLinAddr aAddr)
  1878 	{
  1879 	DMemModelProcess* process = (DMemModelProcess*)TheCurrentThread->iOwningProcess;
  1880 	// Get the os asid of current thread's process so no need to open a reference on it.
  1881 	TInt osAsid = process->OsAsid();
  1882 	TPte* ptePtr = 0;
  1883 	TPte pte = 0;
  1884 	TInt r = 0;
  1885 	SPageInfo* pageInfo = NULL;
  1886 
  1887 	NKern::ThreadEnterCS();
  1888 
  1889 	TUint offsetInMapping;
  1890 	TUint mapInstanceCount;
  1891 	DMemoryMapping* mapping = MM::FindMappingInAddressSpace(osAsid, aAddr, 1, offsetInMapping, mapInstanceCount);
  1892 
  1893 	MmuLock::Lock();
  1894 
  1895 	if(mapping)
  1896 		{
  1897 		DMemoryObject* memory = mapping->Memory();
  1898 		if(mapInstanceCount == mapping->MapInstanceCount() && memory)
  1899 			{
  1900 			DMemoryManager* manager = memory->iManager;
  1901 			if(manager==TheCodePagedMemoryManager)
  1902 				r |= EPageStateInRamCode|EPageStatePaged;
  1903 			}
  1904 		}
  1905 
  1906 	ptePtr = Mmu::SafePtePtrFromLinAddr(aAddr,osAsid);
  1907 	if (!ptePtr)
  1908 		goto done;
  1909 	pte = *ptePtr;
  1910 	if (pte == KPteUnallocatedEntry)
  1911 		goto done;		
  1912 	r |= EPageStatePtePresent;
  1913 	if (pte!=Mmu::MakePteInaccessible(pte,0))
  1914 		r |= EPageStatePteValid;
  1915 	
  1916 	pageInfo = SPageInfo::SafeFromPhysAddr(pte&~KPageMask);
  1917 	if(pageInfo)
  1918 		{
  1919 		r |= pageInfo->Type();
  1920 		r |= pageInfo->PagedState()<<8;
  1921 		}
  1922 done:
  1923 	MmuLock::Unlock();
  1924 	if(mapping)
  1925 		mapping->Close();
  1926 	NKern::ThreadLeaveCS();
  1927 	return r;
  1928 	}
  1929 
  1930 
  1931 
  1932 TInt VMHalFunction(TAny*, TInt aFunction, TAny* a1, TAny* a2)
  1933 	{
  1934 	switch(aFunction)
  1935 		{
  1936 	case EVMHalFlushCache:
  1937 		if(!TheCurrentThread->HasCapability(ECapabilityWriteDeviceData,__PLATSEC_DIAGNOSTIC_STRING("Checked by VMHalFunction(EVMHalFlushCache)")))
  1938 			K::UnlockedPlatformSecurityPanic();
  1939 		ThePager.FlushAll();
  1940 		return KErrNone;
  1941 
  1942 	case EVMHalSetCacheSize:
  1943 		{
  1944 		if(!TheCurrentThread->HasCapability(ECapabilityWriteDeviceData,__PLATSEC_DIAGNOSTIC_STRING("Checked by VMHalFunction(EVMHalSetCacheSize)")))
  1945 			K::UnlockedPlatformSecurityPanic();
  1946 		TUint min = TUint(a1)>>KPageShift;
  1947 		if(TUint(a1)&KPageMask)
  1948 			++min;
  1949 		TUint max = TUint(a2)>>KPageShift;
  1950 		if(TUint(a2)&KPageMask)
  1951 			++max;
  1952 		return ThePager.ResizeLiveList(min,max);
  1953 		}
  1954 
  1955 	case EVMHalGetCacheSize:
  1956 		{
  1957 		SVMCacheInfo info;
  1958 		ThePager.GetLiveListInfo(info);
  1959 		kumemput32(a1,&info,sizeof(info));
  1960 		}
  1961 		return KErrNone;
  1962 
  1963 	case EVMHalGetEventInfo:
  1964 		{
  1965 		SVMEventInfo info;
  1966 		ThePager.GetEventInfo(info);
  1967 		Kern::InfoCopy(*(TDes8*)a1,(TUint8*)&info,sizeof(info));
  1968 		}
  1969 		return KErrNone;
  1970 
  1971 	case EVMHalResetEventInfo:
  1972 		ThePager.ResetEventInfo();
  1973 		return KErrNone;
  1974 
  1975 #ifdef __SUPPORT_DEMAND_PAGING_EMULATION__
  1976 	case EVMHalGetOriginalRomPages:
  1977 		RomOriginalPages(*((TPhysAddr**)a1), *((TUint*)a2));
  1978 		return KErrNone;
  1979 #endif
  1980 
  1981 	case EVMPageState:
  1982 		return TestPageState((TLinAddr)a1);
  1983 
  1984 	case EVMHalGetSwapInfo:
  1985 		{
  1986 		if ((K::MemModelAttributes & EMemModelAttrDataPaging) == 0)
  1987 			return KErrNotSupported;
  1988 		SVMSwapInfo info;
  1989 		GetSwapInfo(info);
  1990 		kumemput32(a1,&info,sizeof(info));
  1991 		}
  1992 		return KErrNone;
  1993 
  1994 	case EVMHalGetThrashLevel:
  1995 		return TheThrashMonitor.ThrashLevel();
  1996 
  1997 	case EVMHalSetSwapThresholds:
  1998 		{
  1999 		if(!TheCurrentThread->HasCapability(ECapabilityWriteDeviceData,__PLATSEC_DIAGNOSTIC_STRING("Checked by VMHalFunction(EVMHalSetSwapThresholds)")))
  2000 			K::UnlockedPlatformSecurityPanic();
  2001 		if ((K::MemModelAttributes & EMemModelAttrDataPaging) == 0)
  2002 			return KErrNotSupported;
  2003 		SVMSwapThresholds thresholds;
  2004 		kumemget32(&thresholds,a1,sizeof(thresholds));
  2005 		return SetSwapThresholds(thresholds);
  2006 		}
  2007 
  2008 	case EVMHalSetThrashThresholds:
  2009 		if(!TheCurrentThread->HasCapability(ECapabilityWriteDeviceData,__PLATSEC_DIAGNOSTIC_STRING("Checked by VMHalFunction(EVMHalSetThrashThresholds)")))
  2010 			K::UnlockedPlatformSecurityPanic();
  2011 		return TheThrashMonitor.SetThresholds((TUint)a1, (TUint)a2);
  2012 
  2013 #ifdef __DEMAND_PAGING_BENCHMARKS__
  2014 	case EVMHalGetPagingBenchmark:
  2015 		{
  2016 		TUint index = (TInt) a1;
  2017 		if (index >= EMaxPagingBm)
  2018 			return KErrNotFound;
  2019 		NKern::LockSystem();
  2020 		SPagingBenchmarkInfo info = ThePager.iBenchmarkInfo[index];
  2021 		NKern::UnlockSystem();
  2022 		kumemput32(a2,&info,sizeof(info));
  2023 		}		
  2024 		return KErrNone;
  2025 		
  2026 	case EVMHalResetPagingBenchmark:
  2027 		{
  2028 		TUint index = (TInt) a1;
  2029 		if (index >= EMaxPagingBm)
  2030 			return KErrNotFound;
  2031 		NKern::LockSystem();
  2032 		ThePager.ResetBenchmarkData((TPagingBenchmark)index);
  2033 		NKern::UnlockSystem();
  2034 		}
  2035 		return KErrNone;
  2036 #endif
  2037 
  2038 	default:
  2039 		return KErrNotSupported;
  2040 		}
  2041 	}
  2042 
  2043 
  2044 #ifdef __DEMAND_PAGING_BENCHMARKS__
  2045 
  2046 void DPager::ResetBenchmarkData(TPagingBenchmark aBm)
  2047     {
  2048     SPagingBenchmarkInfo& info = iBenchmarkInfo[aBm];
  2049     info.iCount = 0;
  2050     info.iTotalTime = 0;
  2051     info.iMaxTime = 0;
  2052     info.iMinTime = KMaxTInt;
  2053     }
  2054  
  2055 void DPager::RecordBenchmarkData(TPagingBenchmark aBm, TUint32 aStartTime, TUint32 aEndTime)
  2056     {
  2057     SPagingBenchmarkInfo& info = iBenchmarkInfo[aBm];
  2058     ++info.iCount;
  2059 #if !defined(HIGH_RES_TIMER) || defined(HIGH_RES_TIMER_COUNTS_UP)
  2060     TInt64 elapsed = aEndTime - aStartTime;
  2061 #else
  2062     TInt64 elapsed = aStartTime - aEndTime;
  2063 #endif
  2064     info.iTotalTime += elapsed;
  2065     if (elapsed > info.iMaxTime)
  2066         info.iMaxTime = elapsed;
  2067     if (elapsed < info.iMinTime)
  2068         info.iMinTime = elapsed;
  2069     }
  2070 
  2071 #endif //__DEMAND_PAGING_BENCHMARKS__
  2072 
  2073 
  2074 //
  2075 // Paging request management...
  2076 //
  2077 
  2078 //
  2079 // DPagingRequest
  2080 //
  2081 
  2082 DPagingRequest::DPagingRequest(DPagingRequestPool::TGroup& aPoolGroup)
  2083 	: iPoolGroup(aPoolGroup), iUseRegionMemory(0), iUseRegionIndex(0), iUseRegionCount(0)
  2084 	{
  2085 	}
  2086 
  2087 
  2088 FORCE_INLINE void DPagingRequest::SetUse(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
  2089 	{
  2090 	__ASSERT_SYSTEM_LOCK;
  2091 	iUseRegionMemory = aMemory;
  2092 	iUseRegionIndex = aIndex;
  2093 	iUseRegionCount = aCount;
  2094 	}
  2095 
  2096 
  2097 TBool DPagingRequest::CheckUse(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
  2098 	{
  2099 	return aMemory==iUseRegionMemory
  2100 		&& TUint(aIndex-iUseRegionIndex) < iUseRegionCount
  2101 		&& TUint(iUseRegionCount-TUint(aIndex-iUseRegionIndex)) <= aCount;
  2102 	}
  2103 
  2104 
  2105 void DPagingRequest::Release()
  2106 	{
  2107 	NKern::LockSystem();
  2108 	SetUse(0,0,0);
  2109 	Signal();
  2110 	}
  2111 
  2112 
  2113 void DPagingRequest::Wait()
  2114 	{
  2115 	__ASSERT_SYSTEM_LOCK;
  2116 	++iUsageCount;
  2117 	TInt r = iMutex->Wait();
  2118 	__NK_ASSERT_ALWAYS(r == KErrNone);
  2119 	}
  2120 
  2121 
  2122 void DPagingRequest::Signal()
  2123 	{
  2124 	__ASSERT_SYSTEM_LOCK;
  2125 	iPoolGroup.Signal(this);
  2126 	}
  2127 
  2128 
  2129 FORCE_INLINE TBool DPagingRequest::IsCollision(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
  2130 	{
  2131 	__ASSERT_SYSTEM_LOCK;
  2132 	DMemoryObject* memory = iUseRegionMemory;
  2133 	TUint index = iUseRegionIndex;
  2134 	TUint count = iUseRegionCount;
  2135 	// note, this comparison would fail if either region includes page number KMaxTUint,
  2136 	// but it isn't possible to create a memory object which is > KMaxTUint pages...
  2137 	return memory == aMemory && index+count > aIndex && index < aIndex+aCount;
  2138 	}
  2139 
  2140 
  2141 TLinAddr DPagingRequest::MapPages(TUint aColour, TUint aCount, TPhysAddr* aPages)
  2142 	{
  2143 	__NK_ASSERT_DEBUG(iMutex->iCleanup.iThread == &Kern::CurrentThread());
  2144 	return iTempMapping.Map(aPages,aCount,aColour);
  2145 	}
  2146 
  2147 
  2148 void DPagingRequest::UnmapPages(TBool aIMBRequired)
  2149 	{
  2150 	__NK_ASSERT_DEBUG(iMutex->iCleanup.iThread == &Kern::CurrentThread());
  2151 	iTempMapping.Unmap(aIMBRequired);
  2152 	}
  2153 
  2154 
  2155 //
  2156 // DPageReadRequest
  2157 //
  2158 
  2159 TInt DPageReadRequest::iAllocNext = 0;
  2160 
  2161 TInt DPageReadRequest::Construct()
  2162 	{
  2163 	// allocate id and mutex...
  2164 	TUint id = (TUint)__e32_atomic_add_ord32(&iAllocNext, 1);
  2165 	_LIT(KLitPagingRequest,"PageReadRequest-");
  2166 	TBuf<sizeof("PageReadRequest-")+10> mutexName(KLitPagingRequest);
  2167 	mutexName.AppendNum(id);
  2168 	TInt r = K::MutexCreate(iMutex, mutexName, NULL, EFalse, KMutexOrdPageIn);
  2169 	if(r!=KErrNone)
  2170 		return r;
  2171 
  2172 	// allocate space for mapping pages whilst they're being loaded...
  2173 	iTempMapping.Alloc(EMaxPages);
  2174 
  2175 	// create memory buffer...
  2176 	TUint bufferSize = EMaxPages+1;
  2177 	DMemoryObject* bufferMemory;
  2178 	r = MM::MemoryNew(bufferMemory,EMemoryObjectUnpaged,bufferSize,EMemoryCreateNoWipe);
  2179 	if(r!=KErrNone)
  2180 		return r;
  2181 	MM::MemorySetLock(bufferMemory,iMutex);
  2182 	TPhysAddr physAddr;
  2183 	r = MM::MemoryAllocContiguous(bufferMemory,0,bufferSize,0,physAddr);
  2184 	(void)physAddr;
  2185 	if(r!=KErrNone)
  2186 		return r;
  2187 	DMemoryMapping* bufferMapping;
  2188 	r = MM::MappingNew(bufferMapping,bufferMemory,ESupervisorReadWrite,KKernelOsAsid);
  2189 	if(r!=KErrNone)
  2190 		return r;
  2191 	iBuffer = MM::MappingBase(bufferMapping);
  2192 
  2193 	// ensure there are enough young pages to cope with new request object...
  2194 	r = ThePager.ResizeLiveList();
  2195 	if(r!=KErrNone)
  2196 		return r;
  2197 
  2198 	return r;
  2199 	}
  2200 
  2201 
  2202 //
  2203 // DPageWriteRequest
  2204 //
  2205 
  2206 TInt DPageWriteRequest::iAllocNext = 0;
  2207 
  2208 TInt DPageWriteRequest::Construct()
  2209 	{
  2210 	// allocate id and mutex...
  2211 	TUint id = (TUint)__e32_atomic_add_ord32(&iAllocNext, 1);
  2212 	_LIT(KLitPagingRequest,"PageWriteRequest-");
  2213 	TBuf<sizeof("PageWriteRequest-")+10> mutexName(KLitPagingRequest);
  2214 	mutexName.AppendNum(id);
  2215 	TInt r = K::MutexCreate(iMutex, mutexName, NULL, EFalse, KMutexOrdPageOut);
  2216 	if(r!=KErrNone)
  2217 		return r;
  2218 
  2219 	// allocate space for mapping pages whilst they're being loaded...
  2220 	iTempMapping.Alloc(EMaxPages);
  2221 
  2222 	return r;
  2223 	}
  2224 
  2225 
  2226 //
  2227 // DPagingRequestPool
  2228 //
  2229 
  2230 DPagingRequestPool::DPagingRequestPool(TUint aNumPageReadRequest,TUint aNumPageWriteRequest)
  2231 	: iPageReadRequests(aNumPageReadRequest), iPageWriteRequests(aNumPageWriteRequest)
  2232 	{
  2233 	TUint i;
  2234 
  2235 	for(i=0; i<aNumPageReadRequest; ++i)
  2236 		{
  2237 		DPageReadRequest* req = new DPageReadRequest(iPageReadRequests);
  2238 		__NK_ASSERT_ALWAYS(req);
  2239 		TInt r = req->Construct();
  2240 		__NK_ASSERT_ALWAYS(r==KErrNone);
  2241 		iPageReadRequests.iRequests[i] = req;
  2242 		iPageReadRequests.iFreeList.Add(req);
  2243 		}
  2244 
  2245 	for(i=0; i<aNumPageWriteRequest; ++i)
  2246 		{
  2247 		DPageWriteRequest* req = new DPageWriteRequest(iPageWriteRequests);
  2248 		__NK_ASSERT_ALWAYS(req);
  2249 		TInt r = req->Construct();
  2250 		__NK_ASSERT_ALWAYS(r==KErrNone);
  2251 		iPageWriteRequests.iRequests[i] = req;
  2252 		iPageWriteRequests.iFreeList.Add(req);
  2253 		}
  2254 	}
  2255 
  2256 
  2257 DPagingRequestPool::~DPagingRequestPool()
  2258 	{
  2259 	__NK_ASSERT_ALWAYS(0); // deletion not implemented
  2260 	}
  2261 
  2262 
  2263 DPageReadRequest* DPagingRequestPool::AcquirePageReadRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
  2264 	{
  2265 	NKern::LockSystem();
  2266 
  2267 	DPagingRequest* req;
  2268 
  2269 	// if we collide with page write operation...
  2270 	req = iPageWriteRequests.FindCollision(aMemory,aIndex,aCount);
  2271 	if(req)
  2272 		{
  2273 		// wait until write completes...
  2274 		req->Wait();
  2275 		req->Signal();
  2276 		return 0; // caller expected to retry if needed
  2277 		}
  2278 
  2279 	// get a request object to use...
  2280 	req = iPageReadRequests.GetRequest(aMemory,aIndex,aCount);
  2281 
  2282 	// check no new requests collide with us...
  2283 	if(iPageWriteRequests.FindCollision(aMemory,aIndex,aCount)
  2284 		|| iPageReadRequests.FindCollision(aMemory,aIndex,aCount))
  2285 		{
  2286 		// another operation is colliding with this region, give up and retry...
  2287 		req->Signal();
  2288 		return 0; // caller expected to retry if needed
  2289 		}
  2290 
  2291 	// we have a request object which we can use...
  2292 	req->SetUse(aMemory,aIndex,aCount);
  2293 
  2294 	NKern::UnlockSystem();
  2295 	return (DPageReadRequest*)req;
  2296 	}
  2297 
  2298 
  2299 DPageWriteRequest* DPagingRequestPool::AcquirePageWriteRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
  2300 	{
  2301 	NKern::LockSystem();
  2302 
  2303 	DPagingRequest* req;
  2304 
  2305 	for(;;)
  2306 		{
  2307 		// get a request object to use...
  2308 		req = iPageWriteRequests.GetRequest(aMemory,aIndex,aCount);
  2309 
  2310 		if(iPageWriteRequests.FindCollision(aMemory,aIndex,aCount))
  2311 			{
  2312 			// another write operation is colliding with this region, give up and retry...
  2313 			req->Signal();
  2314 			// Reacquire the system lock as Signal() above will release it.
  2315 			NKern::LockSystem();
  2316 			continue;
  2317 			}
  2318 
  2319 		break;
  2320 		}
  2321 
  2322 	// we have a request object which we can use...
  2323 	req->SetUse(aMemory,aIndex,aCount);
  2324 
  2325 	NKern::UnlockSystem();
  2326 	return (DPageWriteRequest*)req;
  2327 	}
  2328 
  2329 
  2330 DPagingRequestPool::TGroup::TGroup(TUint aNumRequests)
  2331 	{
  2332 	iNumRequests = aNumRequests;
  2333 	iRequests = new DPagingRequest*[aNumRequests];
  2334 	__NK_ASSERT_ALWAYS(iRequests);
  2335 	}
  2336 
  2337 
  2338 DPagingRequest* DPagingRequestPool::TGroup::FindCollision(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
  2339 	{
  2340 	__ASSERT_SYSTEM_LOCK;
  2341 	DPagingRequest** ptr = iRequests;
  2342 	DPagingRequest** ptrEnd = ptr+iNumRequests;
  2343 	while(ptr<ptrEnd)
  2344 		{
  2345 		DPagingRequest* req = *ptr++;
  2346 		if(req->IsCollision(aMemory,aIndex,aCount))
  2347 			return req;
  2348 		}
  2349 	return 0;
  2350 	}
  2351 
  2352 
  2353 static TUint32 RandomSeed = 33333;
  2354 
  2355 DPagingRequest* DPagingRequestPool::TGroup::GetRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
  2356 	{
  2357 	__NK_ASSERT_DEBUG(iNumRequests > 0);
  2358 
  2359 	// try using an existing request which collides with this region...
  2360 	DPagingRequest* req  = FindCollision(aMemory,aIndex,aCount);
  2361 	if(!req)
  2362 		{
  2363 		// use a free request...
  2364 		req = (DPagingRequest*)iFreeList.GetFirst();
  2365 		if(req)
  2366 			{
  2367 			// free requests aren't being used...
  2368 			__NK_ASSERT_DEBUG(req->iUsageCount == 0);
  2369 			}
  2370 		else
  2371 			{
  2372 			// pick a random request...
  2373 			RandomSeed = RandomSeed*69069+1; // next 'random' number
  2374 			TUint index = (TUint64(RandomSeed) * TUint64(iNumRequests)) >> 32;
  2375 			req = iRequests[index];
  2376 			__NK_ASSERT_DEBUG(req->iUsageCount > 0); // we only pick random when none are free
  2377 			}
  2378 		}
  2379 
  2380 	// wait for chosen request object...
  2381 	req->Wait();
  2382 
  2383 	return req;
  2384 	}
  2385 
  2386 
  2387 void DPagingRequestPool::TGroup::Signal(DPagingRequest* aRequest)
  2388 	{
  2389 	// if there are no threads waiting on the mutex then return it to the free pool...
  2390 	__NK_ASSERT_DEBUG(aRequest->iUsageCount > 0);
  2391 	if (--aRequest->iUsageCount==0)
  2392 		iFreeList.AddHead(aRequest);
  2393 
  2394 	aRequest->iMutex->Signal();
  2395 	}
  2396 
  2397 
  2398 /**
  2399 Register the specified paging device with the kernel.
  2400 
  2401 @param aDevice	A pointer to the paging device to install
  2402 
  2403 @return KErrNone on success
  2404 */
  2405 EXPORT_C TInt Kern::InstallPagingDevice(DPagingDevice* aDevice)
  2406 	{
  2407 	TRACEB(("Kern::InstallPagingDevice(0x%08x) name='%s' type=%d",aDevice,aDevice->iName,aDevice->iType));
  2408 
  2409 	__NK_ASSERT_ALWAYS(aDevice->iReadUnitShift <= KPageShift);
  2410 
  2411 	TInt r = KErrNotSupported;	// Will return this if unsupported device type is installed
  2412 
  2413 	// create the pools of page out and page in requests...
  2414 	const TInt writeReqs = (aDevice->iType & DPagingDevice::EData) ? KPagingRequestsPerDevice : 0;
  2415 	aDevice->iRequestPool = new DPagingRequestPool(KPagingRequestsPerDevice,writeReqs);
  2416 	if(!aDevice->iRequestPool)
  2417 		{
  2418 		r = KErrNoMemory;
  2419 		goto exit;
  2420 		}
  2421 
  2422 	if(aDevice->iType & DPagingDevice::ERom)
  2423 		{
  2424 		r = TheRomMemoryManager->InstallPagingDevice(aDevice);
  2425 		if(r!=KErrNone)
  2426 			goto exit;
  2427 		}
  2428 
  2429 	if(aDevice->iType & DPagingDevice::ECode)
  2430 		{
  2431 		r = TheCodePagedMemoryManager->InstallPagingDevice(aDevice);
  2432 		if(r!=KErrNone)
  2433 			goto exit;
  2434 		}
  2435 
  2436 	if(aDevice->iType & DPagingDevice::EData)
  2437 		{
  2438 		r = TheDataPagedMemoryManager->InstallPagingDevice(aDevice);
  2439 		if(r!=KErrNone)
  2440 			goto exit;
  2441 		}
  2442 
  2443  	if (K::MemModelAttributes & (EMemModelAttrRomPaging | EMemModelAttrCodePaging | EMemModelAttrDataPaging))
  2444 		TheThrashMonitor.Start();
  2445 
  2446 exit:
  2447 	TRACEB(("Kern::InstallPagingDevice returns %d",r));
  2448 	return r;
  2449 	}
  2450 
  2451 
  2452 
  2453 //
  2454 // DDemandPagingLock
  2455 //
  2456 
  2457 EXPORT_C DDemandPagingLock::DDemandPagingLock()
  2458 	: iReservedPageCount(0), iLockedPageCount(0), iPinMapping(0)
  2459 	{
  2460 	}
  2461 
  2462 
  2463 EXPORT_C TInt DDemandPagingLock::Alloc(TInt aSize)
  2464 	{
  2465 	TRACEP(("DDemandPagingLock[0x%08x]::Alloc(0x%x)",this,aSize));
  2466 	iMaxPageCount = ((aSize-1+KPageMask)>>KPageShift)+1;
  2467 
  2468 	TInt r = KErrNoMemory;
  2469 
  2470 	NKern::ThreadEnterCS();
  2471 
  2472 	TUint maxPt = DVirtualPinMapping::MaxPageTables(iMaxPageCount);
  2473 	// Note, we need to reserve whole pages even for page tables which are smaller
  2474 	// because pinning can remove the page from live list...
  2475 	TUint reserve = iMaxPageCount+maxPt*KNumPagesToPinOnePageTable;
  2476 	if(ThePager.ReservePages(reserve,(TUint&)iReservedPageCount))
  2477 		{
  2478 		iPinMapping = DVirtualPinMapping::New(iMaxPageCount);
  2479 		if(iPinMapping)
  2480 			r = KErrNone;
  2481 		else
  2482 			ThePager.UnreservePages((TUint&)iReservedPageCount);
  2483 		}
  2484 
  2485 	NKern::ThreadLeaveCS();
  2486 	TRACEP(("DDemandPagingLock[0x%08x]::Alloc returns %d, iMaxPageCount=%d, iReservedPageCount=%d",this,r,iMaxPageCount,iReservedPageCount));
  2487 	return r;
  2488 	}
  2489 
  2490 
  2491 EXPORT_C void DDemandPagingLock::Free()
  2492 	{
  2493 	TRACEP(("DDemandPagingLock[0x%08x]::Free()"));
  2494 	Unlock();
  2495 	NKern::ThreadEnterCS();
  2496 	DVirtualPinMapping* pinMapping = (DVirtualPinMapping*)__e32_atomic_swp_ord_ptr(&iPinMapping, 0);
  2497 	if (pinMapping)
  2498 		pinMapping->Close();
  2499 	NKern::ThreadLeaveCS();
  2500 	ThePager.UnreservePages((TUint&)iReservedPageCount);
  2501 	}
  2502 
  2503 
  2504 EXPORT_C TInt DDemandPagingLock::Lock(DThread* aThread, TLinAddr aStart, TInt aSize)
  2505 	{
  2506 //	TRACEP(("DDemandPagingLock[0x%08x]::Lock(0x%08x,0x%08x,0x%08x)",this,aThread,aStart,aSize));
  2507 	if(iLockedPageCount)
  2508 		__NK_ASSERT_ALWAYS(0); // lock already used
  2509 
  2510 	// calculate the number of pages that need to be locked...
  2511 	TUint mask=KPageMask;
  2512 	TUint offset=aStart&mask;
  2513 	TInt numPages = (aSize+offset+mask)>>KPageShift;
  2514 	if(numPages>iMaxPageCount)
  2515 		__NK_ASSERT_ALWAYS(0);
  2516 
  2517 	NKern::ThreadEnterCS();
  2518 
  2519 	// find mapping which covers the specified region...
  2520 	TUint offsetInMapping;
  2521 	TUint mapInstanceCount;
  2522 	DMemoryMapping* mapping = MM::FindMappingInThread((DMemModelThread*)aThread, aStart, aSize, offsetInMapping, mapInstanceCount);
  2523 	if(!mapping)
  2524 		{
  2525 		NKern::ThreadLeaveCS();
  2526 		return KErrBadDescriptor;
  2527 		}
  2528 
  2529 	MmuLock::Lock(); 
  2530 	DMemoryObject* memory = mapping->Memory();
  2531 	if(mapInstanceCount != mapping->MapInstanceCount() || !memory)
  2532 		{// Mapping has been reused or no memory.
  2533 		MmuLock::Unlock();
  2534 		mapping->Close();
  2535 		NKern::ThreadLeaveCS();
  2536 		return KErrBadDescriptor;
  2537 		}
  2538 
  2539 	if(!memory->IsDemandPaged())
  2540 		{
  2541 		// memory not demand paged, so we have nothing to do...
  2542 		MmuLock::Unlock();
  2543 		mapping->Close();
  2544 		NKern::ThreadLeaveCS();
  2545 		return KErrNone;
  2546 		}
  2547 
  2548 	// Open a reference on the memory so it doesn't get deleted.
  2549 	memory->Open();
  2550 	MmuLock::Unlock();
  2551 
  2552 	// pin memory...
  2553 	TUint index = (offsetInMapping>>KPageShift)+mapping->iStartIndex;
  2554 	TUint count = ((offsetInMapping&KPageMask)+aSize+KPageMask)>>KPageShift;
  2555 	TInt r = ((DVirtualPinMapping*)iPinMapping)->Pin(	memory,index,count,mapping->Permissions(),
  2556 														mapping, mapInstanceCount);
  2557 
  2558 	if(r==KErrNotFound)
  2559 		{
  2560 		// some memory wasn't present, so treat this as an error...
  2561 		memory->Close();
  2562 		mapping->Close();
  2563 		NKern::ThreadLeaveCS();
  2564 		return KErrBadDescriptor;
  2565 		}
  2566 
  2567 	// we can't fail to pin otherwise...
  2568 	__NK_ASSERT_DEBUG(r!=KErrNoMemory); // separate OOM assert to aid debugging
  2569 	__NK_ASSERT_ALWAYS(r==KErrNone);
  2570 
  2571 	// indicate that we have actually pinned...
  2572 	__NK_ASSERT_DEBUG(iLockedPageCount==0);
  2573 	iLockedPageCount = count;
  2574 
  2575 	// cleanup...
  2576 	memory->Close();
  2577 	mapping->Close();
  2578 	NKern::ThreadLeaveCS();
  2579 
  2580 	return 1;
  2581 	}
  2582 
  2583 
  2584 EXPORT_C void DDemandPagingLock::DoUnlock()
  2585 	{
  2586 	NKern::ThreadEnterCS();
  2587 	((DVirtualPinMapping*)iPinMapping)->Unpin();
  2588 	__NK_ASSERT_DEBUG(iLockedPageCount);
  2589 	iLockedPageCount = 0;
  2590 	NKern::ThreadLeaveCS();
  2591 	}
  2592 
  2593