os/kernelhwsrv/kernel/eka/memmodel/epoc/flexible/mmu/mptalloc.cpp
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
sl@0
     1
// Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
//
sl@0
    15
sl@0
    16
#include <plat_priv.h>
sl@0
    17
#include "mm.h"
sl@0
    18
#include "mmu.h"
sl@0
    19
#include "mpager.h"
sl@0
    20
sl@0
    21
#include "mmanager.h"
sl@0
    22
#include "mmapping.h"
sl@0
    23
#include "mobject.h"
sl@0
    24
sl@0
    25
#include "mptalloc.h"
sl@0
    26
#include "cache_maintenance.inl"
sl@0
    27
sl@0
    28
/**
sl@0
    29
@class PageTableAllocator
sl@0
    30
@details
sl@0
    31
sl@0
    32
NOTES
sl@0
    33
sl@0
    34
Page tables are mapped into a sparse array in the virtual address range
sl@0
    35
#KPageTableBase..#KPageTableEnd. For each present page table there is a
sl@0
    36
corresponding #SPageTableInfo object mapped from #KPageTableInfoBase upwards.
sl@0
    37
sl@0
    38
Page tables for demand paged content are kept separate from other page tables,
sl@0
    39
this enables the memory for these to be freed when the page tables no longer map
sl@0
    40
any memory i.e. when it has all been paged-out. Pages with these 'paged' page
sl@0
    41
tables are stored in the demand paging live list, so it participates in the page
sl@0
    42
aging process.
sl@0
    43
sl@0
    44
The 'unpaged' page tables are allocated from the bottom of the array upwards,
sl@0
    45
via TPtPageAllocator::iLowerAllocator; the 'paged' page tables are allocated
sl@0
    46
from the top of the array downwards, via TPtPageAllocator::iUpperAllocator.
sl@0
    47
These two regions are prevented from overlapping, or from coming close enough
sl@0
    48
together so that the #SPageTableInfo struct for paged and unpaged page tables
sl@0
    49
lie in the same page. This means that the SPageTableInfo memory for paged page
sl@0
    50
tables can be discarded when it's page tables are discarded.
sl@0
    51
sl@0
    52
Memory for page tables and page table info objects is managed by
sl@0
    53
#ThePageTableMemoryManager. When allocating memory for demand paged use, this
sl@0
    54
uses memory from #ThePager which will reclaim paged memory if necessary.
sl@0
    55
Providing the live list always has #DPager::iMinYoungPages, this guarantees that
sl@0
    56
handling page faults can never fail by running out of memory.
sl@0
    57
sl@0
    58
TODO: In really pathological situations page table allocation can fail due to
sl@0
    59
being out of virtual address space to map the table, this needs to be prevented
sl@0
    60
from happening when handling demand paging faults.
sl@0
    61
*/
sl@0
    62
sl@0
    63
sl@0
    64
PageTableAllocator PageTables;
sl@0
    65
sl@0
    66
sl@0
    67
sl@0
    68
TBool PageTablesLockIsHeld()
sl@0
    69
	{
sl@0
    70
	return ::PageTables.LockIsHeld();
sl@0
    71
	}
sl@0
    72
sl@0
    73
sl@0
    74
/**
sl@0
    75
Minimum number of page tables to keep in reserve.
sl@0
    76
*/
sl@0
    77
const TUint KNumReservedPageTables = 0; // none needed - page tables for mapping page tables and infos are permanently allocated
sl@0
    78
sl@0
    79
sl@0
    80
/**
sl@0
    81
Manager for the memory object used to store all the MMU page tables.
sl@0
    82
*/
sl@0
    83
class DPageTableMemoryManager : public DMemoryManager
sl@0
    84
	{
sl@0
    85
public:
sl@0
    86
	/**
sl@0
    87
	Not implemented - page table memory is never destroyed.
sl@0
    88
	*/
sl@0
    89
	virtual void Destruct(DMemoryObject* aMemory)
sl@0
    90
		{}
sl@0
    91
sl@0
    92
	virtual TInt StealPage(DMemoryObject* aMemory, SPageInfo* aPageInfo)
sl@0
    93
		{ return PageTables.StealPage(aPageInfo); }
sl@0
    94
sl@0
    95
	/**
sl@0
    96
	Does nothing, returns KErrNone.
sl@0
    97
	The RAM containing page tables does not need access restrictions applied for demand paging
sl@0
    98
	purposes. Page table life-time is implicitly managed through the pages it maps.
sl@0
    99
	*/
sl@0
   100
	virtual TInt RestrictPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TRestrictPagesType aRestriction)
sl@0
   101
		{ return KErrNone; }
sl@0
   102
sl@0
   103
	/**
sl@0
   104
	Does nothing, returns KErrNone.
sl@0
   105
	The contents of page tables never need saving as their contents are dynamically generated.
sl@0
   106
	*/
sl@0
   107
	virtual TInt CleanPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TPhysAddr*& aPageArrayEntry)
sl@0
   108
		{ return KErrNone; }
sl@0
   109
sl@0
   110
	/**
sl@0
   111
	Not implemented, returns KErrNotSupported.
sl@0
   112
	*/
sl@0
   113
	virtual TInt Pin(DMemoryObject* aMemory, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs)
sl@0
   114
		{ return KErrNotSupported; }
sl@0
   115
sl@0
   116
	/**
sl@0
   117
	Not implemented.
sl@0
   118
	*/
sl@0
   119
	virtual void Unpin(DMemoryObject* aMemory, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs)
sl@0
   120
		{ }
sl@0
   121
sl@0
   122
sl@0
   123
	virtual TInt MovePage(	DMemoryObject* aMemory, SPageInfo* aOldPageInfo, 
sl@0
   124
							TPhysAddr& aNewPage, TUint aBlockZoneId, TBool aBlockRest);
sl@0
   125
public:
sl@0
   126
	/**
sl@0
   127
	Allocate a page of RAM for storing page tables in.
sl@0
   128
sl@0
   129
	@param aMemory		A memory object associated with this manager.
sl@0
   130
	@param aIndex		Page index, within the memory, to allocate the page at.
sl@0
   131
	@param aDemandPaged	True if the memory is to be used for page tables mapping
sl@0
   132
						demand paged content.
sl@0
   133
sl@0
   134
	@return KErrNone if successful, otherwise one of the system wide error codes.
sl@0
   135
	*/
sl@0
   136
	TInt Alloc(DMemoryObject* aMemory, TUint aIndex, TBool aDemandPaged);
sl@0
   137
sl@0
   138
	/**
sl@0
   139
	Allocate a page of RAM being used for storing page tables in.
sl@0
   140
sl@0
   141
	@param aMemory		A memory object associated with this manager.
sl@0
   142
	@param aIndex		Page index, within the memory, to free the page from.
sl@0
   143
	@param aDemandPaged	True if the memory is being used for page tables mapping
sl@0
   144
						demand paged content.
sl@0
   145
sl@0
   146
	@return KErrNone if successful, otherwise one of the system wide error codes.
sl@0
   147
	*/
sl@0
   148
	TInt Free(DMemoryObject* aMemory, TUint aIndex, TBool aDemandPaged);
sl@0
   149
	};
sl@0
   150
sl@0
   151
/**
sl@0
   152
The single instance of the #DPageTableMemoryManager class.
sl@0
   153
*/
sl@0
   154
DPageTableMemoryManager ThePageTableMemoryManager;
sl@0
   155
sl@0
   156
sl@0
   157
TInt DPageTableMemoryManager::Alloc(DMemoryObject* aMemory, TUint aIndex, TBool aDemandPaged)
sl@0
   158
	{
sl@0
   159
	TRACE2(("DPageTableMemoryManager::Alloc(0x%08x,0x%x,%d)",aMemory, aIndex, aDemandPaged));
sl@0
   160
	__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
sl@0
   161
sl@0
   162
	// allocate page array entry...
sl@0
   163
	RPageArray::TIter pageList;
sl@0
   164
	TPhysAddr* p = aMemory->iPages.AddPageStart(aIndex,pageList);
sl@0
   165
	if(!p)
sl@0
   166
		return KErrNoMemory;
sl@0
   167
sl@0
   168
	// allocate RAM...
sl@0
   169
	RamAllocLock::Lock();
sl@0
   170
	TPhysAddr pagePhys;
sl@0
   171
	TInt r;
sl@0
   172
	if(aDemandPaged)
sl@0
   173
		{
sl@0
   174
		r = ThePager.PageInAllocPages(&pagePhys,1,aMemory->RamAllocFlags());
sl@0
   175
		__NK_ASSERT_DEBUG(r!=KErrNoMemory);
sl@0
   176
		}
sl@0
   177
	else
sl@0
   178
		{// Allocate fixed paged as page tables aren't movable.
sl@0
   179
		r = TheMmu.AllocRam(&pagePhys, 1, aMemory->RamAllocFlags(), EPageFixed);
sl@0
   180
		}
sl@0
   181
	RamAllocLock::Unlock();
sl@0
   182
sl@0
   183
	TUint usedNew = 0;
sl@0
   184
	if(r==KErrNone)
sl@0
   185
		{
sl@0
   186
		// add RAM to page array...
sl@0
   187
		MmuLock::Lock();
sl@0
   188
		if(aDemandPaged)
sl@0
   189
			ThePager.Event(DPager::EEventPagePageTableAlloc,SPageInfo::FromPhysAddr(pagePhys));
sl@0
   190
		SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys);
sl@0
   191
		pi->SetManaged(aMemory,aIndex,aMemory->PageInfoFlags());
sl@0
   192
		RPageArray::AddPage(p,pagePhys);
sl@0
   193
		MmuLock::Unlock();
sl@0
   194
		usedNew = 1;
sl@0
   195
sl@0
   196
		// map page...
sl@0
   197
		r = aMemory->MapPages(pageList);
sl@0
   198
		}
sl@0
   199
sl@0
   200
	// release page array entry...
sl@0
   201
	aMemory->iPages.AddPageEnd(aIndex,usedNew);
sl@0
   202
sl@0
   203
	// revert if error...
sl@0
   204
	if(r!=KErrNone)
sl@0
   205
		Free(aMemory,aIndex,aDemandPaged);
sl@0
   206
sl@0
   207
	return r;
sl@0
   208
	}
sl@0
   209
sl@0
   210
sl@0
   211
TInt DPageTableMemoryManager::Free(DMemoryObject* aMemory, TUint aIndex, TBool aDemandPaged)
sl@0
   212
	{
sl@0
   213
	TRACE2(("DPageTableMemoryManager::Free(0x%08x,0x%x,%d)",aMemory, aIndex, aDemandPaged));
sl@0
   214
	__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
sl@0
   215
sl@0
   216
	// find page array entry...
sl@0
   217
	RPageArray::TIter pageList;
sl@0
   218
	TPhysAddr* p = aMemory->iPages.RemovePageStart(aIndex,pageList);
sl@0
   219
	if(!p)
sl@0
   220
		return KErrNoMemory;
sl@0
   221
sl@0
   222
	// unmap page...
sl@0
   223
	aMemory->UnmapPages(pageList,true);
sl@0
   224
sl@0
   225
	RamAllocLock::Lock();
sl@0
   226
sl@0
   227
	// remove page...
sl@0
   228
	MmuLock::Lock();
sl@0
   229
	TPhysAddr pagePhys = RPageArray::RemovePage(p);
sl@0
   230
	MmuLock::Unlock();
sl@0
   231
sl@0
   232
	TInt r;
sl@0
   233
	if(pagePhys==KPhysAddrInvalid)
sl@0
   234
		{
sl@0
   235
		// no page removed...
sl@0
   236
		r = 0;
sl@0
   237
		}
sl@0
   238
	else
sl@0
   239
		{
sl@0
   240
		// free the removed page...
sl@0
   241
		if(aDemandPaged)
sl@0
   242
			ThePager.PageInFreePages(&pagePhys,1);
sl@0
   243
		else
sl@0
   244
			TheMmu.FreeRam(&pagePhys, 1, EPageFixed);
sl@0
   245
		r = 1;
sl@0
   246
		}
sl@0
   247
sl@0
   248
	RamAllocLock::Unlock();
sl@0
   249
sl@0
   250
	// cleanup...
sl@0
   251
	aMemory->iPages.RemovePageEnd(aIndex,r);
sl@0
   252
	return r;
sl@0
   253
	}
sl@0
   254
sl@0
   255
TInt DPageTableMemoryManager::MovePage(	DMemoryObject* aMemory, SPageInfo* aOldPageInfo, 
sl@0
   256
										TPhysAddr& aNewPage, TUint aBlockZoneId, TBool aBlockRest)
sl@0
   257
		{
sl@0
   258
		// This could be a demand paged page table info which can be discarded 
sl@0
   259
		// but let the PageTableAllocator handle that.
sl@0
   260
		return ::PageTables.MovePage(aMemory, aOldPageInfo, aBlockZoneId, aBlockRest);
sl@0
   261
		}
sl@0
   262
sl@0
   263
sl@0
   264
//
sl@0
   265
// PageTableAllocator
sl@0
   266
//
sl@0
   267
sl@0
   268
void PageTableAllocator::Init2(DMutex* aLock)
sl@0
   269
	{
sl@0
   270
	TRACEB(("PageTableAllocator::Init2(0x%x)",aLock));
sl@0
   271
	iLock = aLock;
sl@0
   272
sl@0
   273
	__NK_ASSERT_DEBUG(iUnpagedAllocator.CheckFreeList());
sl@0
   274
sl@0
   275
	// scan for already allocated page tables
sl@0
   276
	// (assumes the first page table is used to map page tables)...
sl@0
   277
	SPageTableInfo* pti = (SPageTableInfo*)KPageTableInfoBase;
sl@0
   278
	TUint pages = 0;
sl@0
   279
	for(;;)
sl@0
   280
		{
sl@0
   281
		TPte pte = ((TPte*)KPageTableBase)[pages];
sl@0
   282
		if(pte==KPteUnallocatedEntry)
sl@0
   283
			break; // end (assumes no gaps in page table allocation) 
sl@0
   284
sl@0
   285
		// process free page tables in this page...
sl@0
   286
		TUint freeCount = 0;
sl@0
   287
		do
sl@0
   288
			{
sl@0
   289
			if(pti->IsUnused())
sl@0
   290
				{
sl@0
   291
				pti->PtClusterAlloc();
sl@0
   292
				iUnpagedAllocator.iFreeList.Add(&pti->FreeLink());
sl@0
   293
				++freeCount;
sl@0
   294
				}
sl@0
   295
#ifdef _DEBUG
sl@0
   296
			else
sl@0
   297
				__NK_ASSERT_DEBUG(pti->IsPtClusterAllocated());
sl@0
   298
#endif
sl@0
   299
			}
sl@0
   300
		while(!(++pti)->IsFirstInPage());
sl@0
   301
		iUnpagedAllocator.iFreeCount += freeCount;
sl@0
   302
		__NK_ASSERT_DEBUG(iUnpagedAllocator.CheckFreeList());
sl@0
   303
		TRACE2(("PT page 0x%08x has %d free tables",pti[-KPtClusterSize].PageTable(),freeCount));
sl@0
   304
sl@0
   305
		// count page, and move on to next one...
sl@0
   306
		++pages;
sl@0
   307
		__NK_ASSERT_DEBUG(pages<KChunkSize/KPageSize); // we've assumed less than one page table of page tables
sl@0
   308
		}
sl@0
   309
sl@0
   310
	// construct allocator for page table pages...
sl@0
   311
	iPtPageAllocator.Init2(pages);
sl@0
   312
sl@0
   313
	// initialise allocator page table infos...
sl@0
   314
	iPageTableGroupCounts[0] = pages;
sl@0
   315
	__NK_ASSERT_DEBUG(pages/KPageTableGroupSize==0); // we've assumed less than 1 page of page table infos
sl@0
   316
sl@0
   317
	// FOLLOWING CODE WILL USE THIS OBJECT TO ALLOCATE SOME PAGE TABLES,
sl@0
   318
	// SO ALLOCATOR MUST BE INITIALISED TO A FIT STATE BEFORE THIS POINT!
sl@0
   319
sl@0
   320
	// construct memory object for page tables...
sl@0
   321
	TMappingCreateFlags mapFlags = (TMappingCreateFlags)(EMappingCreateFixedVirtual|EMappingCreateReserveAllResources);
sl@0
   322
#if defined(__CPU_PAGE_TABLES_FULLY_CACHED)
sl@0
   323
	TMemoryAttributes memAttr = EMemoryAttributeStandard;
sl@0
   324
#else
sl@0
   325
	TMemoryAttributes memAttr = (TMemoryAttributes)(EMemoryAttributeNormalUncached|EMemoryAttributeDefaultShareable);
sl@0
   326
#endif
sl@0
   327
	TMemoryCreateFlags createFlags = (TMemoryCreateFlags)(EMemoryCreateNoWipe|EMemoryCreateCustomManager);
sl@0
   328
	TInt r = MM::InitFixedKernelMemory(iPageTableMemory, KPageTableBase, KPageTableEnd, pages<<KPageShift, (TMemoryObjectType)(T_UintPtr)&ThePageTableMemoryManager, createFlags, memAttr, mapFlags);
sl@0
   329
	__NK_ASSERT_ALWAYS(r==KErrNone);
sl@0
   330
	MM::MemorySetLock(iPageTableMemory,aLock);
sl@0
   331
sl@0
   332
	// construct memory object for page table infos...
sl@0
   333
	memAttr = EMemoryAttributeStandard;
sl@0
   334
	TUint size = pages*KPtClusterSize*sizeof(SPageTableInfo);
sl@0
   335
	size = (size+KPageMask)&~KPageMask;
sl@0
   336
	r = MM::InitFixedKernelMemory(iPageTableInfoMemory, KPageTableInfoBase, KPageTableInfoEnd, size, (TMemoryObjectType)(T_UintPtr)&ThePageTableMemoryManager, createFlags, memAttr, mapFlags);
sl@0
   337
	__NK_ASSERT_ALWAYS(r==KErrNone);
sl@0
   338
	MM::MemorySetLock(iPageTableInfoMemory,aLock);
sl@0
   339
sl@0
   340
	// make sure we have enough reserve page tables...
sl@0
   341
	Lock();
sl@0
   342
	iUnpagedAllocator.Init2(this,KNumReservedPageTables,false);
sl@0
   343
	iPagedAllocator.Init2(this,0,true);
sl@0
   344
	Unlock();
sl@0
   345
sl@0
   346
	TRACEB(("PageTableAllocator::Init2 done"));
sl@0
   347
	}
sl@0
   348
sl@0
   349
sl@0
   350
void PageTableAllocator::Init2B()
sl@0
   351
	{
sl@0
   352
	TRACEB(("PageTableAllocator::Init2B()"));
sl@0
   353
	TInt r = iPageTableMemory->iPages.PreallocateMemory();
sl@0
   354
	__NK_ASSERT_ALWAYS(r==KErrNone);
sl@0
   355
	r = iPageTableInfoMemory->iPages.PreallocateMemory();
sl@0
   356
	__NK_ASSERT_ALWAYS(r==KErrNone);
sl@0
   357
	TRACEB(("PageTableAllocator::Init2B done"));
sl@0
   358
	}
sl@0
   359
sl@0
   360
sl@0
   361
void PageTableAllocator::TSubAllocator::Init2(PageTableAllocator* aAllocator, TUint aReserveCount, TBool aDemandPaged)
sl@0
   362
	{
sl@0
   363
	iReserveCount = aReserveCount;
sl@0
   364
	iDemandPaged = aDemandPaged;
sl@0
   365
	while(iFreeCount<aReserveCount)
sl@0
   366
		if(!aAllocator->AllocReserve(*this))
sl@0
   367
			{
sl@0
   368
			__NK_ASSERT_ALWAYS(0);
sl@0
   369
			}
sl@0
   370
	}
sl@0
   371
sl@0
   372
sl@0
   373
void PageTableAllocator::TPtPageAllocator::Init2(TUint aNumInitPages)
sl@0
   374
	{
sl@0
   375
	iLowerAllocator = TBitMapAllocator::New(KMaxPageTablePages,ETrue);
sl@0
   376
	__NK_ASSERT_ALWAYS(iLowerAllocator);
sl@0
   377
	iLowerAllocator->Alloc(0,aNumInitPages);
sl@0
   378
	iLowerWaterMark = aNumInitPages-1;
sl@0
   379
sl@0
   380
	iUpperAllocator = TBitMapAllocator::New(KMaxPageTablePages,ETrue);
sl@0
   381
	__NK_ASSERT_ALWAYS(iUpperAllocator);
sl@0
   382
	
sl@0
   383
	__ASSERT_COMPILE(KMaxPageTablePages > (TUint)KMinUnpinnedPagedPtPages);	// Unlikely to be untrue.
sl@0
   384
	iUpperWaterMark = KMaxPageTablePages - KMinUnpinnedPagedPtPages;
sl@0
   385
	iPinnedPageTablePages = 0;	// OK to clear this without MmuLock as only one thread running so far.
sl@0
   386
	}
sl@0
   387
sl@0
   388
sl@0
   389
static TUint32 RandomSeed = 33333;
sl@0
   390
sl@0
   391
TUint PageTableAllocator::TPtPageAllocator::RandomPagedPtPage()
sl@0
   392
	{
sl@0
   393
	__NK_ASSERT_DEBUG(PageTablesLockIsHeld());
sl@0
   394
sl@0
   395
	// Pick an allocated page at random, from iUpperWaterMark - KMaxPageTablePages.
sl@0
   396
	RandomSeed = RandomSeed * 69069 + 1; // next 'random' number
sl@0
   397
	TUint allocRange = KMaxPageTablePages - iUpperWaterMark - 1;
sl@0
   398
	TUint bit = (TUint64(RandomSeed) * TUint64(allocRange)) >> 32;
sl@0
   399
sl@0
   400
	// All page table pages should be allocated or we shouldn't be stealing one at random.
sl@0
   401
	__NK_ASSERT_DEBUG(iUpperAllocator->NotFree(bit, 1));
sl@0
   402
sl@0
   403
	return KMaxPageTablePages - 1 - bit;
sl@0
   404
	}
sl@0
   405
sl@0
   406
sl@0
   407
TInt PageTableAllocator::TPtPageAllocator::Alloc(TBool aDemandPaged)
sl@0
   408
	{
sl@0
   409
	__NK_ASSERT_DEBUG(PageTablesLockIsHeld());
sl@0
   410
	TUint pageIndex;
sl@0
   411
	if(aDemandPaged)
sl@0
   412
		{
sl@0
   413
		TInt bit = iUpperAllocator->Alloc();
sl@0
   414
		// There are always unpaged page tables so iUpperAllocator will always have 
sl@0
   415
		// at least one free bit.
sl@0
   416
		__NK_ASSERT_DEBUG(bit >= 0);
sl@0
   417
sl@0
   418
		pageIndex = KMaxPageTablePages - 1 - bit;
sl@0
   419
sl@0
   420
		if(pageIndex < iUpperWaterMark)
sl@0
   421
			{
sl@0
   422
			// new upper watermark...
sl@0
   423
			if((pageIndex & ~(KPageTableGroupSize - 1)) <= iLowerWaterMark)
sl@0
   424
				{
sl@0
   425
				// clashes with other bitmap allocator, so fail..
sl@0
   426
				iUpperAllocator->Free(bit);
sl@0
   427
				TRACE(("TPtPageAllocator::Alloc too low iUpperWaterMark %d ",iUpperWaterMark));
sl@0
   428
				return -1;
sl@0
   429
				}
sl@0
   430
			// Hold mmu lock so iUpperWaterMark isn't read by pinning before we've updated it.
sl@0
   431
			MmuLock::Lock();
sl@0
   432
			iUpperWaterMark = pageIndex;
sl@0
   433
			MmuLock::Unlock();
sl@0
   434
			TRACE(("TPtPageAllocator::Alloc new iUpperWaterMark=%d",pageIndex));
sl@0
   435
			}
sl@0
   436
		}
sl@0
   437
	else
sl@0
   438
		{
sl@0
   439
		TInt bit = iLowerAllocator->Alloc();
sl@0
   440
		if(bit < 0)
sl@0
   441
			return bit;
sl@0
   442
		pageIndex = bit;
sl@0
   443
		if(pageIndex > iLowerWaterMark)
sl@0
   444
			{// iLowerAllocator->Alloc() should only pick the next bit after iLowerWaterMark.
sl@0
   445
			__NK_ASSERT_DEBUG(pageIndex == iLowerWaterMark + 1);
sl@0
   446
			MmuLock::Lock();
sl@0
   447
			// new lower watermark...
sl@0
   448
			if(	pageIndex >= (iUpperWaterMark & ~(KPageTableGroupSize - 1)) ||
sl@0
   449
				AtPinnedPagedPtsLimit(iUpperWaterMark, pageIndex, iPinnedPageTablePages))
sl@0
   450
				{
sl@0
   451
				// clashes with other bitmap allocator or it would reduce the amount 
sl@0
   452
				// of available unpinned paged page tables too far, so fail..
sl@0
   453
				MmuLock::Unlock();
sl@0
   454
				iLowerAllocator->Free(bit);
sl@0
   455
				TRACE(("TPtPageAllocator::Alloc iLowerWaterMark=%d",iLowerWaterMark));
sl@0
   456
				return -1;
sl@0
   457
				}
sl@0
   458
			iLowerWaterMark = pageIndex;
sl@0
   459
			MmuLock::Unlock();
sl@0
   460
			TRACE(("TPtPageAllocator::Alloc new iLowerWaterMark=%d", iLowerWaterMark));
sl@0
   461
			}
sl@0
   462
		}
sl@0
   463
	return pageIndex;
sl@0
   464
	}
sl@0
   465
sl@0
   466
sl@0
   467
void PageTableAllocator::TPtPageAllocator::Free(TUint aPageIndex, TBool aDemandPaged)
sl@0
   468
	{
sl@0
   469
	__NK_ASSERT_DEBUG(PageTablesLockIsHeld());
sl@0
   470
	if(aDemandPaged)
sl@0
   471
		iUpperAllocator->Free(KMaxPageTablePages-1-aPageIndex);
sl@0
   472
	else
sl@0
   473
		iLowerAllocator->Free(aPageIndex);
sl@0
   474
	}
sl@0
   475
sl@0
   476
sl@0
   477
void PageTableAllocator::Lock()
sl@0
   478
	{
sl@0
   479
	Kern::MutexWait(*iLock);
sl@0
   480
	}
sl@0
   481
sl@0
   482
sl@0
   483
void PageTableAllocator::Unlock()
sl@0
   484
	{
sl@0
   485
	Kern::MutexSignal(*iLock);
sl@0
   486
	}
sl@0
   487
sl@0
   488
sl@0
   489
TBool PageTableAllocator::LockIsHeld()
sl@0
   490
	{
sl@0
   491
	return iLock->iCleanup.iThread == &Kern::CurrentThread();
sl@0
   492
	}
sl@0
   493
sl@0
   494
sl@0
   495
TBool PageTableAllocator::AllocReserve(TSubAllocator& aSubAllocator)
sl@0
   496
	{
sl@0
   497
	__NK_ASSERT_DEBUG(LockIsHeld());
sl@0
   498
sl@0
   499
	TBool demandPaged = aSubAllocator.iDemandPaged;
sl@0
   500
sl@0
   501
	// allocate page...
sl@0
   502
	TInt ptPageIndex = iPtPageAllocator.Alloc(demandPaged);
sl@0
   503
	if (ptPageIndex < 0) 
sl@0
   504
		{
sl@0
   505
		if (demandPaged)
sl@0
   506
			{
sl@0
   507
			TInt r;
sl@0
   508
			do
sl@0
   509
				{
sl@0
   510
				// Can't fail to find a demand paged page table, otherwise a page fault 
sl@0
   511
				// could fail with KErrNoMemory.  Therefore, keep attempting to steal a 
sl@0
   512
				// demand paged page table page until successful.
sl@0
   513
				TUint index = iPtPageAllocator.RandomPagedPtPage();
sl@0
   514
				MmuLock::Lock();
sl@0
   515
				TLinAddr pageTableLin = KPageTableBase + (index << (KPtClusterShift + KPageTableShift));
sl@0
   516
				TPhysAddr ptPhysAddr = Mmu::LinearToPhysical(pageTableLin);
sl@0
   517
				// Page tables must be allocated otherwise we shouldn't be stealing the page.
sl@0
   518
				__NK_ASSERT_DEBUG(ptPhysAddr != KPhysAddrInvalid);
sl@0
   519
				SPageInfo* ptSPageInfo = SPageInfo::FromPhysAddr(ptPhysAddr);
sl@0
   520
				r = StealPage(ptSPageInfo);
sl@0
   521
				MmuLock::Unlock();
sl@0
   522
				}
sl@0
   523
			while(r != KErrCompletion);
sl@0
   524
			// Retry the allocation now that we've stolen a page table page.
sl@0
   525
			ptPageIndex = iPtPageAllocator.Alloc(demandPaged);
sl@0
   526
			__NK_ASSERT_DEBUG(ptPageIndex >= 0);
sl@0
   527
			}		
sl@0
   528
		else
sl@0
   529
			{
sl@0
   530
			return EFalse;
sl@0
   531
			}
sl@0
   532
		}
sl@0
   533
sl@0
   534
	// commit memory for page...
sl@0
   535
	__NK_ASSERT_DEBUG(iPageTableMemory); // check we've initialised iPageTableMemory
sl@0
   536
	TInt r = ThePageTableMemoryManager.Alloc(iPageTableMemory,ptPageIndex,aSubAllocator.iDemandPaged);
sl@0
   537
	if(r==KErrNoMemory)
sl@0
   538
		{
sl@0
   539
		iPtPageAllocator.Free(ptPageIndex,aSubAllocator.iDemandPaged);
sl@0
   540
		return false;
sl@0
   541
		}
sl@0
   542
	__NK_ASSERT_DEBUG(r==KErrNone);
sl@0
   543
sl@0
   544
	// allocate page table info...
sl@0
   545
	TUint ptgIndex = ptPageIndex/KPageTableGroupSize;
sl@0
   546
	if(!iPageTableGroupCounts[ptgIndex])
sl@0
   547
		{
sl@0
   548
		__NK_ASSERT_DEBUG(iPageTableInfoMemory); // check we've initialised iPageTableInfoMemory
sl@0
   549
		r = ThePageTableMemoryManager.Alloc(iPageTableInfoMemory,ptgIndex,aSubAllocator.iDemandPaged);
sl@0
   550
sl@0
   551
		if(r==KErrNoMemory)
sl@0
   552
			{
sl@0
   553
			r = ThePageTableMemoryManager.Free(iPageTableMemory,ptPageIndex,aSubAllocator.iDemandPaged);
sl@0
   554
			__NK_ASSERT_DEBUG(r==1);
sl@0
   555
			iPtPageAllocator.Free(ptPageIndex,aSubAllocator.iDemandPaged);
sl@0
   556
			return false;
sl@0
   557
			}
sl@0
   558
		__NK_ASSERT_DEBUG(r==KErrNone);
sl@0
   559
		// For paged page tables set all the page table infos in this page as unused 
sl@0
   560
		// and their page table clusters as not allocated.
sl@0
   561
		if (aSubAllocator.iDemandPaged)
sl@0
   562
			{
sl@0
   563
			SPageTableInfo* ptiBase = (SPageTableInfo*)KPageTableInfoBase + (ptgIndex*KPageTableInfosPerPage);
sl@0
   564
			memclr(ptiBase, KPageSize);
sl@0
   565
			}
sl@0
   566
		}
sl@0
   567
	++iPageTableGroupCounts[ptgIndex];
sl@0
   568
sl@0
   569
	SPageTableInfo* pti = (SPageTableInfo*)KPageTableInfoBase+ptPageIndex*KPtClusterSize;
sl@0
   570
	aSubAllocator.AllocPage(pti);
sl@0
   571
	return true;
sl@0
   572
	}
sl@0
   573
sl@0
   574
sl@0
   575
void PageTableAllocator::TSubAllocator::AllocPage(SPageTableInfo* aPageTableInfo)
sl@0
   576
	{
sl@0
   577
	SPageTableInfo* pti = aPageTableInfo;
sl@0
   578
	__NK_ASSERT_DEBUG(pti->IsFirstInPage());
sl@0
   579
sl@0
   580
	TRACE2(("Alloc PT page (%d) 0x%08x",iDemandPaged,pti->PageTable()));
sl@0
   581
sl@0
   582
	// initialise page table infos...
sl@0
   583
	do pti->New(iDemandPaged);
sl@0
   584
	while(!(++pti)->IsFirstInPage());
sl@0
   585
	pti -= KPtClusterSize;
sl@0
   586
sl@0
   587
	// all page tables initially unused, so start them off on iCleanupList...
sl@0
   588
	pti->AddToCleanupList(iCleanupList);
sl@0
   589
	iFreeCount += KPtClusterSize;
sl@0
   590
	__NK_ASSERT_DEBUG(CheckFreeList());
sl@0
   591
	}
sl@0
   592
sl@0
   593
sl@0
   594
SPageTableInfo* PageTableAllocator::TSubAllocator::FreePage()
sl@0
   595
	{
sl@0
   596
	if(!IsCleanupRequired())
sl@0
   597
		return 0;
sl@0
   598
sl@0
   599
	// get a completely free page...
sl@0
   600
	SDblQueLink* link = iCleanupList.Last();
sl@0
   601
	__NK_ASSERT_DEBUG(link);
sl@0
   602
	SPageTableInfo* pti = SPageTableInfo::FromFreeLink(link);
sl@0
   603
	__NK_ASSERT_DEBUG(pti->IsFirstInPage());
sl@0
   604
	pti->RemoveFromCleanupList();
sl@0
   605
	iFreeCount -= KPtClusterSize;
sl@0
   606
	__NK_ASSERT_DEBUG(CheckFreeList());
sl@0
   607
sl@0
   608
	TRACE2(("Free PT page (%d) 0x%08x",iDemandPaged,pti->PageTable()));
sl@0
   609
sl@0
   610
	// Mark each page table info as no longer having its page table cluster allocated.
sl@0
   611
	do 
sl@0
   612
		{// make sure all page tables in page are unused...
sl@0
   613
		__NK_ASSERT_DEBUG(pti->IsUnused());
sl@0
   614
		pti->PtClusterFreed();
sl@0
   615
		}
sl@0
   616
	while(!(++pti)->IsFirstInPage());
sl@0
   617
	pti -= KPtClusterSize;
sl@0
   618
sl@0
   619
	return pti;
sl@0
   620
	}
sl@0
   621
sl@0
   622
sl@0
   623
TBool PageTableAllocator::FreeReserve(TSubAllocator& aSubAllocator)
sl@0
   624
	{
sl@0
   625
	__NK_ASSERT_DEBUG(LockIsHeld());
sl@0
   626
sl@0
   627
	// get a page which needs freeing...
sl@0
   628
	SPageTableInfo* pti = aSubAllocator.FreePage();
sl@0
   629
	if(!pti)
sl@0
   630
		return false;
sl@0
   631
sl@0
   632
	// free the page...
sl@0
   633
	TUint ptPageIndex = ((TLinAddr)pti-KPageTableInfoBase)>>(KPageTableInfoShift+KPtClusterShift);
sl@0
   634
	iPtPageAllocator.Free(ptPageIndex,aSubAllocator.iDemandPaged);
sl@0
   635
	TInt r = ThePageTableMemoryManager.Free(iPageTableMemory,ptPageIndex,aSubAllocator.iDemandPaged);
sl@0
   636
	(void)r;
sl@0
   637
	__NK_ASSERT_DEBUG(r==1);
sl@0
   638
sl@0
   639
	// free page table info...
sl@0
   640
	TUint ptgIndex = ptPageIndex/KPageTableGroupSize;
sl@0
   641
	TUint groupCount = iPageTableGroupCounts[ptgIndex]; // compiler handles half-word values stupidly, so give it a hand
sl@0
   642
	--groupCount;
sl@0
   643
	iPageTableGroupCounts[ptgIndex] = groupCount;
sl@0
   644
	if(!groupCount)
sl@0
   645
		r = ThePageTableMemoryManager.Free(iPageTableInfoMemory,ptgIndex,aSubAllocator.iDemandPaged);
sl@0
   646
sl@0
   647
	return true;
sl@0
   648
	}
sl@0
   649
sl@0
   650
sl@0
   651
TPte* PageTableAllocator::Alloc(TBool aDemandPaged)
sl@0
   652
	{
sl@0
   653
	TRACE(("PageTableAllocator::Alloc(%d)",(bool)aDemandPaged));
sl@0
   654
	TPte* pt = DoAlloc(aDemandPaged);
sl@0
   655
	TRACE(("PageTableAllocator::Alloc() returns 0x%08x phys=0x%08x",pt,pt?Mmu::PageTablePhysAddr(pt):KPhysAddrInvalid));
sl@0
   656
	return pt;
sl@0
   657
	}
sl@0
   658
sl@0
   659
sl@0
   660
TPte* PageTableAllocator::DoAlloc(TBool aDemandPaged)
sl@0
   661
	{
sl@0
   662
	__NK_ASSERT_DEBUG(LockIsHeld());
sl@0
   663
sl@0
   664
#ifdef _DEBUG
sl@0
   665
	// simulated OOM, but not if demand paged as this can't fail under normal circumstances...
sl@0
   666
	if(!aDemandPaged)
sl@0
   667
		{
sl@0
   668
		RamAllocLock::Lock();
sl@0
   669
		TBool fail = K::CheckForSimulatedAllocFail();
sl@0
   670
		RamAllocLock::Unlock();
sl@0
   671
		if(fail)
sl@0
   672
			return 0;
sl@0
   673
		}
sl@0
   674
#endif
sl@0
   675
sl@0
   676
	TSubAllocator& allocator = aDemandPaged ? iPagedAllocator : iUnpagedAllocator;
sl@0
   677
sl@0
   678
	__NK_ASSERT_DEBUG(!iAllocating || !aDemandPaged); // can't recursively allocate demand paged tables
sl@0
   679
sl@0
   680
	__NK_ASSERT_DEBUG(iAllocating<=allocator.iReserveCount); // can't recursively allocate more than the reserve
sl@0
   681
sl@0
   682
	// keep up enough spare page tables...
sl@0
   683
	if(!iAllocating++) // if we haven't gone recursive...
sl@0
   684
		{
sl@0
   685
		// make sure we have a page table to allocate...
sl@0
   686
		while(allocator.iFreeCount<=allocator.iReserveCount)
sl@0
   687
			if(!AllocReserve(allocator))
sl@0
   688
				{
sl@0
   689
				--iAllocating;
sl@0
   690
				return 0; // out of memory
sl@0
   691
				}
sl@0
   692
		}
sl@0
   693
	else
sl@0
   694
		{
sl@0
   695
		TRACE(("PageTableAllocator::DoAlloc recurse=%d",iAllocating));
sl@0
   696
		}
sl@0
   697
sl@0
   698
	// allocate a page table...
sl@0
   699
	SPageTableInfo* pti = allocator.Alloc();
sl@0
   700
sl@0
   701
	// initialise page table info...
sl@0
   702
	pti->Init();
sl@0
   703
sl@0
   704
	// initialise page table...
sl@0
   705
	TPte* pt = pti->PageTable();
sl@0
   706
	memclr(pt,KPageTableSize);
sl@0
   707
	CacheMaintenance::MultiplePtesUpdated((TLinAddr)pt,KPageTableSize);
sl@0
   708
sl@0
   709
	// done...
sl@0
   710
	--iAllocating;
sl@0
   711
	return pt;
sl@0
   712
	}
sl@0
   713
sl@0
   714
sl@0
   715
SPageTableInfo* PageTableAllocator::TSubAllocator::Alloc()
sl@0
   716
	{
sl@0
   717
	__NK_ASSERT_DEBUG(PageTablesLockIsHeld());
sl@0
   718
	__NK_ASSERT_DEBUG(iFreeCount);
sl@0
   719
	__NK_ASSERT_DEBUG(CheckFreeList());
sl@0
   720
sl@0
   721
	// get next free page table...
sl@0
   722
	SDblQueLink* link = iFreeList.GetFirst();
sl@0
   723
	SPageTableInfo* pti; 
sl@0
   724
	if(link)
sl@0
   725
		pti = SPageTableInfo::FromFreeLink(link);
sl@0
   726
	else
sl@0
   727
		{
sl@0
   728
		// need to get one back from the cleanup list...
sl@0
   729
		link = iCleanupList.First();
sl@0
   730
		__NK_ASSERT_DEBUG(link); // we can't be out of page tables
sl@0
   731
		pti = SPageTableInfo::FromFreeLink(link);
sl@0
   732
		__NK_ASSERT_DEBUG(pti->IsFirstInPage());
sl@0
   733
		pti->RemoveFromCleanupList();
sl@0
   734
sl@0
   735
		// add other page tables in the page to the free list...
sl@0
   736
		SPageTableInfo* free = pti+1;
sl@0
   737
		while(!free->IsFirstInPage())
sl@0
   738
			{
sl@0
   739
			__NK_ASSERT_DEBUG(free->IsUnused());
sl@0
   740
			iFreeList.Add(&free->FreeLink());
sl@0
   741
			++free;
sl@0
   742
			}
sl@0
   743
		}
sl@0
   744
sl@0
   745
	// count page as allocated...
sl@0
   746
	--iFreeCount;
sl@0
   747
	__NK_ASSERT_DEBUG(pti->IsUnused());
sl@0
   748
	__NK_ASSERT_DEBUG(CheckFreeList());
sl@0
   749
sl@0
   750
	return pti;
sl@0
   751
	}
sl@0
   752
sl@0
   753
sl@0
   754
void PageTableAllocator::Free(TPte* aPageTable)
sl@0
   755
	{
sl@0
   756
	TRACE(("PageTableAllocator::Free(0x%08x)",aPageTable));
sl@0
   757
	DoFree(aPageTable);
sl@0
   758
	}
sl@0
   759
sl@0
   760
sl@0
   761
void PageTableAllocator::DoFree(TPte* aPageTable)
sl@0
   762
	{
sl@0
   763
	__NK_ASSERT_DEBUG(LockIsHeld());
sl@0
   764
sl@0
   765
	// make sure page table isn't being aliased...
sl@0
   766
	TPhysAddr pagePhys = Mmu::PageTablePhysAddr(aPageTable);
sl@0
   767
	__NK_ASSERT_DEBUG(pagePhys!=KPhysAddrInvalid);
sl@0
   768
	TheMmu.RemoveAliasesForPageTable(pagePhys);
sl@0
   769
sl@0
   770
	// free page table...
sl@0
   771
	SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPageTable);
sl@0
   772
	TSubAllocator& allocator = pti->IsDemandPaged() ? iPagedAllocator : iUnpagedAllocator;
sl@0
   773
	allocator.Free(pti);
sl@0
   774
sl@0
   775
	// check for surplus pages...
sl@0
   776
	if(allocator.IsCleanupRequired())
sl@0
   777
		{
sl@0
   778
		iCleanup.Add(CleanupTrampoline,this);
sl@0
   779
		}
sl@0
   780
	}
sl@0
   781
sl@0
   782
sl@0
   783
void PageTableAllocator::TSubAllocator::Free(SPageTableInfo* aPageTableInfo)
sl@0
   784
	{
sl@0
   785
	__NK_ASSERT_DEBUG(PageTablesLockIsHeld());
sl@0
   786
	__NK_ASSERT_DEBUG(CheckFreeList());
sl@0
   787
sl@0
   788
	SPageTableInfo* pti = aPageTableInfo;
sl@0
   789
sl@0
   790
	// clear the page table info...
sl@0
   791
	MmuLock::Lock();
sl@0
   792
	__NK_ASSERT_DEBUG(!pti->PermanenceCount());
sl@0
   793
	pti->SetUnused();
sl@0
   794
	MmuLock::Unlock();
sl@0
   795
sl@0
   796
	// scan other page tables in same page...
sl@0
   797
	SPageTableInfo* first = pti->FirstInPage();
sl@0
   798
	SPageTableInfo* last = pti->LastInPage();
sl@0
   799
	SPageTableInfo* prev;
sl@0
   800
	SPageTableInfo* next;
sl@0
   801
sl@0
   802
	// try insert page table after previous free page table in same page...
sl@0
   803
	prev = pti;
sl@0
   804
	while(prev>first)
sl@0
   805
		{
sl@0
   806
		--prev;
sl@0
   807
		if(prev->IsUnused())
sl@0
   808
			{
sl@0
   809
			pti->FreeLink().InsertAfter(&prev->FreeLink());
sl@0
   810
			goto inserted;
sl@0
   811
			}
sl@0
   812
		}
sl@0
   813
sl@0
   814
	// try insert page table before next free page table in same page...
sl@0
   815
	next = pti;
sl@0
   816
	while(next<last)
sl@0
   817
		{
sl@0
   818
		++next;
sl@0
   819
		if(next->IsUnused())
sl@0
   820
			{
sl@0
   821
			pti->FreeLink().InsertBefore(&next->FreeLink());
sl@0
   822
			goto inserted;
sl@0
   823
			}
sl@0
   824
		}
sl@0
   825
sl@0
   826
	// only free page table in page, so link into start of free list...
sl@0
   827
	pti->FreeLink().InsertAfter(&iFreeList.iA);
sl@0
   828
sl@0
   829
inserted:
sl@0
   830
	++iFreeCount;
sl@0
   831
	__NK_ASSERT_DEBUG(CheckFreeList());
sl@0
   832
sl@0
   833
	// see if all page tables in page are empty...
sl@0
   834
	pti = first;
sl@0
   835
	do
sl@0
   836
		{
sl@0
   837
		if(!pti->IsUnused())
sl@0
   838
			return; // some page tables still in use, so end
sl@0
   839
		}
sl@0
   840
	while(!(++pti)->IsFirstInPage());
sl@0
   841
	pti -= KPtClusterSize;
sl@0
   842
sl@0
   843
	// check if page with page table in is pinned...
sl@0
   844
	MmuLock::Lock();
sl@0
   845
	TPte* pt = pti->PageTable();
sl@0
   846
	TPhysAddr pagePhys = Mmu::PageTablePhysAddr(pt);
sl@0
   847
	SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys);
sl@0
   848
	TBool pinned = pi->PagedState()==SPageInfo::EPagedPinned;
sl@0
   849
	MmuLock::Unlock();
sl@0
   850
	// Note, the pinned state can't change even though we've now released the MmuLock.
sl@0
   851
	// This is because all page tables in the page are unused and we don't pin unused
sl@0
   852
	// page tables. Also, the page table(s) can't become used again whilst this function
sl@0
   853
	// executes as we hold the page table allocator lock.
sl@0
   854
	if(pinned)
sl@0
   855
		{
sl@0
   856
		// return now and leave page table(s) in free list if their page is pinned...
sl@0
   857
		// Note, when page is unpinned it will end up in the paging live list and
sl@0
   858
		// eventually be reclaimed for other use (if the page tables in the page
sl@0
   859
		// don't get reallocated before then).
sl@0
   860
		__NK_ASSERT_DEBUG(pti->IsDemandPaged()); // only paged page tables should have been pinned
sl@0
   861
		return; 
sl@0
   862
		}
sl@0
   863
sl@0
   864
	// the page with our page table in it is no longer in use...
sl@0
   865
	MoveToCleanup(pti);
sl@0
   866
	}
sl@0
   867
sl@0
   868
sl@0
   869
void PageTableAllocator::TSubAllocator::MoveToCleanup(SPageTableInfo* aPageTableInfo)
sl@0
   870
	{
sl@0
   871
	__NK_ASSERT_DEBUG(PageTablesLockIsHeld());
sl@0
   872
	__NK_ASSERT_DEBUG(CheckFreeList());
sl@0
   873
sl@0
   874
	SPageTableInfo* pti = aPageTableInfo;
sl@0
   875
	__NK_ASSERT_DEBUG(pti->IsFirstInPage());
sl@0
   876
sl@0
   877
	TRACE2(("Cleanup PT page (%d) 0x%08x",iDemandPaged,pti->PageTable()));
sl@0
   878
sl@0
   879
	// make sure all page tables in page are unused...
sl@0
   880
#ifdef _DEBUG
sl@0
   881
	do __NK_ASSERT_DEBUG(pti->IsUnused());
sl@0
   882
	while(!(++pti)->IsFirstInPage());
sl@0
   883
	pti -= KPtClusterSize;
sl@0
   884
#endif
sl@0
   885
sl@0
   886
	// unlink all page tables in page...
sl@0
   887
	SDblQueLink* prev = pti->FreeLink().iPrev;
sl@0
   888
	SDblQueLink* next = pti->LastInPage()->FreeLink().iNext;
sl@0
   889
	prev->iNext = next;
sl@0
   890
	next->iPrev = prev;
sl@0
   891
sl@0
   892
	// add page tables to cleanup list...
sl@0
   893
	__NK_ASSERT_DEBUG(!pti->IsOnCleanupList());
sl@0
   894
	pti->AddToCleanupList(iCleanupList);
sl@0
   895
	__NK_ASSERT_DEBUG(CheckFreeList());
sl@0
   896
	}
sl@0
   897
sl@0
   898
sl@0
   899
sl@0
   900
TBool PageTableAllocator::TSubAllocator::IsCleanupRequired()
sl@0
   901
	{
sl@0
   902
	return iFreeCount>=iReserveCount+KPtClusterSize && !iCleanupList.IsEmpty();
sl@0
   903
	}
sl@0
   904
sl@0
   905
sl@0
   906
#ifdef _DEBUG
sl@0
   907
sl@0
   908
TBool PageTableAllocator::TSubAllocator::CheckFreeList()
sl@0
   909
	{
sl@0
   910
	TUint count = iFreeCount;
sl@0
   911
sl@0
   912
	// count page tables in iCleanupList...
sl@0
   913
	SDblQueLink* head = &iCleanupList.iA;
sl@0
   914
	SDblQueLink* link = head;
sl@0
   915
	for(;;)
sl@0
   916
		{
sl@0
   917
		link = link->iNext;
sl@0
   918
		if(link==head)
sl@0
   919
			break;
sl@0
   920
		SPageTableInfo* pti = SPageTableInfo::FromFreeLink(link);
sl@0
   921
		__NK_ASSERT_DEBUG(pti->IsFirstInPage());
sl@0
   922
		__NK_ASSERT_DEBUG(pti->IsOnCleanupList());
sl@0
   923
		if(count<(TUint)KPtClusterSize)
sl@0
   924
			return false;
sl@0
   925
		count -= KPtClusterSize;
sl@0
   926
		}
sl@0
   927
sl@0
   928
	// count page tables in iFreeList...	
sl@0
   929
	head = &iFreeList.iA;
sl@0
   930
	link = head;
sl@0
   931
	while(count)
sl@0
   932
		{
sl@0
   933
		link = link->iNext;
sl@0
   934
		if(link==head)
sl@0
   935
			return false;
sl@0
   936
sl@0
   937
		// check next free page table in page is linked in correct order...
sl@0
   938
		SPageTableInfo* pti = SPageTableInfo::FromFreeLink(link);
sl@0
   939
		SPageTableInfo* last = pti->LastInPage();
sl@0
   940
		SPageTableInfo* next = pti;
sl@0
   941
		while(next<last)
sl@0
   942
			{
sl@0
   943
			++next;
sl@0
   944
			if(next->IsUnused())
sl@0
   945
				{
sl@0
   946
				__NK_ASSERT_DEBUG(pti->FreeLink().iNext==&next->FreeLink());
sl@0
   947
				__NK_ASSERT_DEBUG(next->FreeLink().iPrev==&pti->FreeLink());
sl@0
   948
				break;
sl@0
   949
				}
sl@0
   950
			}
sl@0
   951
sl@0
   952
		--count;
sl@0
   953
		}
sl@0
   954
sl@0
   955
	return link->iNext==head;
sl@0
   956
	}
sl@0
   957
sl@0
   958
#endif
sl@0
   959
sl@0
   960
sl@0
   961
sl@0
   962
//
sl@0
   963
// Paged page table handling
sl@0
   964
//
sl@0
   965
sl@0
   966
TInt SPageTableInfo::ForcedFree()
sl@0
   967
	{
sl@0
   968
	__NK_ASSERT_DEBUG(PageTablesLockIsHeld());
sl@0
   969
	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
sl@0
   970
	__NK_ASSERT_DEBUG(IsDemandPaged());
sl@0
   971
sl@0
   972
	TUint type = iType;
sl@0
   973
sl@0
   974
	if(type==EUnused)
sl@0
   975
		return KErrNone;
sl@0
   976
sl@0
   977
	__NK_ASSERT_DEBUG(iPermanenceCount==0);
sl@0
   978
sl@0
   979
	// clear all PTEs in page table...
sl@0
   980
	TPte* pt = PageTable();
sl@0
   981
	memclr(pt,KPageTableSize);
sl@0
   982
	__e32_memory_barrier(); // make sure all CPUs read zeros from pt so forcing a page-in (rather than a rejuvenate) if accessed
sl@0
   983
	iPageCount = 0;
sl@0
   984
sl@0
   985
	if(type==ECoarseMapping)
sl@0
   986
		{
sl@0
   987
		TRACE2(("SPageTableInfo::ForcedFree() coarse 0x%08x 0x%x %d",iCoarse.iMemoryObject,iCoarse.iChunkIndex,iCoarse.iPteType));
sl@0
   988
		// mustn't release MmuLock between clearing page table and calling this
sl@0
   989
		// (otherwise page table may get updated before its actually removed from
sl@0
   990
		// the memory object)...
sl@0
   991
		iCoarse.iMemoryObject->StealPageTable(iCoarse.iChunkIndex,iCoarse.iPteType);
sl@0
   992
		}
sl@0
   993
	else if(type==EFineMapping)
sl@0
   994
		{
sl@0
   995
		// need to remove page table from address spaces's page directory...
sl@0
   996
		TLinAddr addr = iFine.iLinAddrAndOsAsid;
sl@0
   997
		TUint osAsid = addr&KPageMask;
sl@0
   998
		TPde* pPde = Mmu::PageDirectoryEntry(osAsid,addr);
sl@0
   999
sl@0
  1000
		TRACE2(("SPageTableInfo::ForcedFree() fine %d 0x%08x",osAsid,addr&~KPageMask));
sl@0
  1001
sl@0
  1002
		TPde pde = KPdeUnallocatedEntry;
sl@0
  1003
		TRACE2(("!PDE %x=%x",pPde,pde));
sl@0
  1004
		*pPde = pde;
sl@0
  1005
		SinglePdeUpdated(pPde);
sl@0
  1006
		}
sl@0
  1007
	else
sl@0
  1008
		{
sl@0
  1009
		// invalid type...
sl@0
  1010
		__NK_ASSERT_DEBUG(0);
sl@0
  1011
		return KErrNotSupported;
sl@0
  1012
		}
sl@0
  1013
sl@0
  1014
	MmuLock::Unlock();
sl@0
  1015
sl@0
  1016
	// make sure page table updates visible to MMU...
sl@0
  1017
	CacheMaintenance::MultiplePtesUpdated((TLinAddr)pt,KPageTableSize);
sl@0
  1018
	InvalidateTLB();
sl@0
  1019
sl@0
  1020
	// free the page table back to the allocator,
sl@0
  1021
	// this will also remove any IPC alias using it...
sl@0
  1022
	__NK_ASSERT_DEBUG(iPageCount==0); // should still be unused
sl@0
  1023
	::PageTables.Free(pt);
sl@0
  1024
sl@0
  1025
	MmuLock::Lock();
sl@0
  1026
sl@0
  1027
	return KErrNone;
sl@0
  1028
	}
sl@0
  1029
sl@0
  1030
sl@0
  1031
TInt PageTableAllocator::StealPage(SPageInfo* aPageInfo)
sl@0
  1032
	{
sl@0
  1033
	TRACE2(("PageTableAllocator::StealPage(0x%08x)",aPageInfo));
sl@0
  1034
	__NK_ASSERT_DEBUG(LockIsHeld()); // only works if PageTableAllocator lock is the RamAllocLock
sl@0
  1035
	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
sl@0
  1036
sl@0
  1037
	if (aPageInfo->Owner() == iPageTableInfoMemory)
sl@0
  1038
		return StealPageTableInfo(aPageInfo);
sl@0
  1039
sl@0
  1040
	__UNLOCK_GUARD_START(MmuLock);
sl@0
  1041
sl@0
  1042
	// This must be a page table page so steal it.
sl@0
  1043
	__NK_ASSERT_ALWAYS(aPageInfo->Owner()==iPageTableMemory);
sl@0
  1044
	TUint ptPageIndex = aPageInfo->Index();
sl@0
  1045
	SPageTableInfo* pti = (SPageTableInfo*)KPageTableInfoBase+ptPageIndex*KPtClusterSize;
sl@0
  1046
sl@0
  1047
	aPageInfo->SetModifier(&pti);
sl@0
  1048
	__UNLOCK_GUARD_END(MmuLock);
sl@0
  1049
sl@0
  1050
	// forcibly free each page table in the page...
sl@0
  1051
	TInt r;
sl@0
  1052
	do
sl@0
  1053
		{// Check for pinning, ForcedFree() releases MmuLock so must check for
sl@0
  1054
		// each page table.
sl@0
  1055
		if (aPageInfo->PagedState() == SPageInfo::EPagedPinned)
sl@0
  1056
			{// The page table page is pinned so can't steal it.
sl@0
  1057
			r = KErrInUse;
sl@0
  1058
			break;
sl@0
  1059
			}
sl@0
  1060
		r = pti->ForcedFree();
sl@0
  1061
		if(r!=KErrNone)
sl@0
  1062
			break;
sl@0
  1063
		if(aPageInfo->CheckModified(&pti))
sl@0
  1064
			{
sl@0
  1065
			r = KErrInUse;
sl@0
  1066
			break;
sl@0
  1067
			}
sl@0
  1068
		}
sl@0
  1069
	while(!(++pti)->IsFirstInPage());
sl@0
  1070
	pti -= KPtClusterSize; // restore pti back to first page table
sl@0
  1071
sl@0
  1072
	if(r==KErrNone)
sl@0
  1073
		{
sl@0
  1074
		MmuLock::Unlock();
sl@0
  1075
		if(!pti->IsOnCleanupList())
sl@0
  1076
			{
sl@0
  1077
			// the page might not already be on the cleanup list in the case where
sl@0
  1078
			// it was previously freed whilst it was pinned.
sl@0
  1079
			// In this case, a later unpinning would have put it back into the paging live
sl@0
  1080
			// list from where it is now subsequently being stolen...
sl@0
  1081
			iPagedAllocator.MoveToCleanup(pti);
sl@0
  1082
			}
sl@0
  1083
		// free the page from allocator so it ends up back in the paging pool as a free page...
sl@0
  1084
		while(FreeReserve(iPagedAllocator))
sl@0
  1085
			{}
sl@0
  1086
		// return an 'error' to indicate page has not been stolen.
sl@0
  1087
		// We have however achieved the main aim of making the page 'free' and
sl@0
  1088
		// it will be available if page stealing attempts to steal the page again...
sl@0
  1089
		r = KErrCompletion;
sl@0
  1090
		MmuLock::Lock();
sl@0
  1091
		}
sl@0
  1092
sl@0
  1093
	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
sl@0
  1094
	TRACE2(("PageTableAllocator::StealPage returns %d",r));
sl@0
  1095
	return r;
sl@0
  1096
	}
sl@0
  1097
sl@0
  1098
sl@0
  1099
TInt PageTableAllocator::StealPageTableInfo(SPageInfo* aPageInfo)
sl@0
  1100
	{
sl@0
  1101
	// Need to steal every page table for every page table info in this page.
sl@0
  1102
	// This page can't be modified or removed as we hold the lock, however
sl@0
  1103
	// the page table pages being freed may be rejuvenated and therefore their 
sl@0
  1104
	// SPageInfos may be marked as modified.
sl@0
  1105
	TInt r = KErrNone;
sl@0
  1106
	TUint ptiOffset = aPageInfo->Index() * KPageTableInfosPerPage;
sl@0
  1107
	SPageTableInfo* ptiBase = (SPageTableInfo*)KPageTableInfoBase + ptiOffset;
sl@0
  1108
	SPageTableInfo* ptiEnd = ptiBase + KPageTableInfosPerPage;
sl@0
  1109
	TUint flash = 0;
sl@0
  1110
	for (SPageTableInfo* pti = ptiBase; pti < ptiEnd;)
sl@0
  1111
		{// Free each page table cluster that is allocated.
sl@0
  1112
		if (pti->IsPtClusterAllocated())
sl@0
  1113
			{
sl@0
  1114
			TPhysAddr ptPhysAddr = Mmu::LinearToPhysical((TLinAddr)pti->PageTable());
sl@0
  1115
			SPageInfo* ptSPageInfo = SPageInfo::FromPhysAddr(ptPhysAddr);
sl@0
  1116
			ptSPageInfo->SetModifier(&flash);
sl@0
  1117
			do 
sl@0
  1118
				{
sl@0
  1119
				__NK_ASSERT_DEBUG(pti->IsPtClusterAllocated());
sl@0
  1120
				if (aPageInfo->PagedState() == SPageInfo::EPagedPinned || 
sl@0
  1121
					ptSPageInfo->PagedState() == SPageInfo::EPagedPinned)
sl@0
  1122
					{// The page table or page table info is pinned so can't steal info page.
sl@0
  1123
					r = KErrInUse;
sl@0
  1124
					break;
sl@0
  1125
					}
sl@0
  1126
				r = pti->ForcedFree();
sl@0
  1127
				if(r!=KErrNone)
sl@0
  1128
					break;
sl@0
  1129
				if(ptSPageInfo->CheckModified(&flash))
sl@0
  1130
					{// The page table page has been rejunvenated so can't steal it.
sl@0
  1131
					r = KErrInUse;
sl@0
  1132
					break;
sl@0
  1133
					}
sl@0
  1134
				}
sl@0
  1135
			while (!(++pti)->IsFirstInPage());
sl@0
  1136
			if (r != KErrNone)
sl@0
  1137
				break;
sl@0
  1138
			SPageTableInfo* ptiTmp = pti - KPtClusterSize;
sl@0
  1139
			MmuLock::Unlock();
sl@0
  1140
			if(!ptiTmp->IsOnCleanupList())
sl@0
  1141
				{
sl@0
  1142
				// the page might not already be on the cleanup list in the case where
sl@0
  1143
				// it was previously freed whilst it was pinned.
sl@0
  1144
				// In this case, a later unpinning would have put it back into the paging live
sl@0
  1145
				// list from where it is now subsequently being stolen...
sl@0
  1146
				iPagedAllocator.MoveToCleanup(ptiTmp);
sl@0
  1147
				}
sl@0
  1148
			MmuLock::Lock();
sl@0
  1149
			flash = 0;		// The MmuLock has been flashed at least once.
sl@0
  1150
			}
sl@0
  1151
		else
sl@0
  1152
			{// Move onto the next cluster this page of page table infos refers to.
sl@0
  1153
			__NK_ASSERT_DEBUG(pti->IsFirstInPage());
sl@0
  1154
			pti += KPtClusterSize;
sl@0
  1155
			MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo);
sl@0
  1156
			}
sl@0
  1157
		}
sl@0
  1158
	// free the pages discarded from allocator so they end up back in the paging pool as free pages...
sl@0
  1159
	MmuLock::Unlock();
sl@0
  1160
	while(FreeReserve(iPagedAllocator))
sl@0
  1161
		{}
sl@0
  1162
	if (r == KErrNone)
sl@0
  1163
		r = KErrCompletion;	// The pager needs to remove the page from the live list.
sl@0
  1164
	MmuLock::Lock();
sl@0
  1165
	return r;
sl@0
  1166
	}
sl@0
  1167
sl@0
  1168
sl@0
  1169
TInt PageTableAllocator::MovePage(DMemoryObject* aMemory, SPageInfo* aOldPageInfo, 
sl@0
  1170
									TUint aBlockZoneId, TBool aBlockRest)
sl@0
  1171
	{
sl@0
  1172
	// We don't move page table or page table info pages, however, if this page 
sl@0
  1173
	// is demand paged then we may be able to discard it.
sl@0
  1174
	MmuLock::Lock();
sl@0
  1175
	if (aOldPageInfo->Owner() == iPageTableInfoMemory)
sl@0
  1176
		{
sl@0
  1177
		if (!(iPtPageAllocator.IsDemandPagedPtInfo(aOldPageInfo)))
sl@0
  1178
			{
sl@0
  1179
			MmuLock::Unlock();
sl@0
  1180
			return KErrNotSupported;
sl@0
  1181
			}
sl@0
  1182
		}
sl@0
  1183
	else
sl@0
  1184
		{
sl@0
  1185
		__NK_ASSERT_DEBUG(aOldPageInfo->Owner() == iPageTableMemory);
sl@0
  1186
		if (!(iPtPageAllocator.IsDemandPagedPt(aOldPageInfo)))
sl@0
  1187
			{
sl@0
  1188
			MmuLock::Unlock();
sl@0
  1189
			return KErrNotSupported;
sl@0
  1190
			}
sl@0
  1191
		}
sl@0
  1192
	if (aOldPageInfo->PagedState() == SPageInfo::EPagedPinned)
sl@0
  1193
		{// The page is pinned so don't attempt to discard it as pinned pages 
sl@0
  1194
		// can't be discarded.  Also, the pager will invoke this method again.
sl@0
  1195
		MmuLock::Unlock();
sl@0
  1196
		return KErrInUse;
sl@0
  1197
		}
sl@0
  1198
	// Let the pager discard the page as it controls the size of the live list.
sl@0
  1199
	// If the size of the live list allows then eventually 
sl@0
  1200
	// PageTableAllocator::StealPage() will be invoked on this page.
sl@0
  1201
	return ThePager.DiscardPage(aOldPageInfo, aBlockZoneId, aBlockRest);
sl@0
  1202
	}
sl@0
  1203
sl@0
  1204
sl@0
  1205
TInt PageTableAllocator::PinPageTable(TPte* aPageTable, TPinArgs& aPinArgs)
sl@0
  1206
	{
sl@0
  1207
	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
sl@0
  1208
	__NK_ASSERT_DEBUG(SPageTableInfo::FromPtPtr(aPageTable)->IsDemandPaged());
sl@0
  1209
	__NK_ASSERT_DEBUG(!SPageTableInfo::FromPtPtr(aPageTable)->IsUnused());
sl@0
  1210
	__NK_ASSERT_DEBUG(aPinArgs.HaveSufficientPages(KNumPagesToPinOnePageTable));
sl@0
  1211
sl@0
  1212
	// pin page with page table in...
sl@0
  1213
	TPhysAddr pagePhys = Mmu::PageTablePhysAddr(aPageTable);
sl@0
  1214
	SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys);
sl@0
  1215
	if (!pi->PinCount())
sl@0
  1216
		{// Page is being pinned having previously been unpinned.
sl@0
  1217
		TInt r = iPtPageAllocator.PtPagePinCountInc();
sl@0
  1218
		if (r != KErrNone)
sl@0
  1219
			return r;
sl@0
  1220
		}
sl@0
  1221
	ThePager.Pin(pi,aPinArgs);
sl@0
  1222
sl@0
  1223
	// pin page with page table info in...
sl@0
  1224
	SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPageTable);
sl@0
  1225
	pagePhys = Mmu::UncheckedLinearToPhysical((TLinAddr)pti,KKernelOsAsid);
sl@0
  1226
	pi = SPageInfo::FromPhysAddr(pagePhys);
sl@0
  1227
	ThePager.Pin(pi,aPinArgs);
sl@0
  1228
	return KErrNone;
sl@0
  1229
	}
sl@0
  1230
sl@0
  1231
sl@0
  1232
void PageTableAllocator::UnpinPageTable(TPte* aPageTable, TPinArgs& aPinArgs)
sl@0
  1233
	{
sl@0
  1234
	// unpin page with page table info in...
sl@0
  1235
	SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPageTable);
sl@0
  1236
	TPhysAddr pagePhys = Mmu::UncheckedLinearToPhysical((TLinAddr)pti,KKernelOsAsid);
sl@0
  1237
	SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys);
sl@0
  1238
	ThePager.Unpin(pi,aPinArgs);
sl@0
  1239
sl@0
  1240
	// unpin page with page table in...
sl@0
  1241
	pagePhys = Mmu::PageTablePhysAddr(aPageTable);
sl@0
  1242
	pi = SPageInfo::FromPhysAddr(pagePhys);
sl@0
  1243
	ThePager.Unpin(pi,aPinArgs);
sl@0
  1244
sl@0
  1245
	if (!pi->PinCount())
sl@0
  1246
		{// This page table page is no longer pinned.
sl@0
  1247
		iPtPageAllocator.PtPagePinCountDec();
sl@0
  1248
		}
sl@0
  1249
	}
sl@0
  1250
sl@0
  1251
sl@0
  1252
#ifdef _DEBUG
sl@0
  1253
TBool IsPageTableUnpagedRemoveAllowed(SPageInfo* aPageInfo)
sl@0
  1254
		{ return ::PageTables.IsPageTableUnpagedRemoveAllowed(aPageInfo); }
sl@0
  1255
sl@0
  1256
TBool PageTableAllocator::IsPageTableUnpagedRemoveAllowed(SPageInfo* aPageInfo)
sl@0
  1257
	{
sl@0
  1258
	if (aPageInfo->Owner() == iPageTableInfoMemory)
sl@0
  1259
		{// Page table info pages are never added to the live list but can be
sl@0
  1260
		// stolen via DPager::StealPage()
sl@0
  1261
		return ETrue;
sl@0
  1262
		}
sl@0
  1263
sl@0
  1264
	if (aPageInfo->Owner() == iPageTableMemory)
sl@0
  1265
		{// Page table pages are added to the live list but only after the page they 
sl@0
  1266
		// map has been paged in. Therefore, a pde can reference a pte before it has been
sl@0
  1267
		// added to the live list so allow this but for uninitialised page table pages only.
sl@0
  1268
		TUint ptPageIndex = aPageInfo->Index();
sl@0
  1269
		SPageTableInfo* pti = (SPageTableInfo*)KPageTableInfoBase+ptPageIndex*KPtClusterSize;
sl@0
  1270
		do
sl@0
  1271
			{
sl@0
  1272
			if (!pti->IsUnused())
sl@0
  1273
				{
sl@0
  1274
				TPte* pte = pti->PageTable();
sl@0
  1275
				TPte* pteEnd = pte + (KPageTableSize/sizeof(TPte));
sl@0
  1276
				while (pte < pteEnd)
sl@0
  1277
					if (*pte++ != KPteUnallocatedEntry)
sl@0
  1278
						return EFalse;
sl@0
  1279
				}
sl@0
  1280
			}
sl@0
  1281
		while(!(++pti)->IsFirstInPage());
sl@0
  1282
		return ETrue;
sl@0
  1283
		}
sl@0
  1284
	return EFalse;
sl@0
  1285
	}
sl@0
  1286
#endif
sl@0
  1287
sl@0
  1288
sl@0
  1289
//
sl@0
  1290
// Cleanup
sl@0
  1291
//
sl@0
  1292
sl@0
  1293
void PageTableAllocator::CleanupTrampoline(TAny* aSelf)
sl@0
  1294
	{
sl@0
  1295
	((PageTableAllocator*)aSelf)->Cleanup();
sl@0
  1296
	}
sl@0
  1297
sl@0
  1298
sl@0
  1299
void PageTableAllocator::Cleanup()
sl@0
  1300
	{
sl@0
  1301
	// free any surplus pages...
sl@0
  1302
	Lock();
sl@0
  1303
	while(FreeReserve(iPagedAllocator) || FreeReserve(iUnpagedAllocator))
sl@0
  1304
		{}
sl@0
  1305
	Unlock();
sl@0
  1306
	}