os/kernelhwsrv/kernel/eka/memmodel/epoc/flexible/mmu/mmanager.cpp
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
sl@0
     1
// Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
//
sl@0
    15
sl@0
    16
#include <plat_priv.h>
sl@0
    17
#include "cache_maintenance.h"
sl@0
    18
#include "decompress.h"	// include for the generic BytePairDecompress().
sl@0
    19
#include "mm.h"
sl@0
    20
#include "mmu.h"
sl@0
    21
#include "mpager.h"
sl@0
    22
#include "mmanager.h"
sl@0
    23
#include "mmapping.h"
sl@0
    24
#include "mobject.h"
sl@0
    25
#include "mcleanup.h"
sl@0
    26
sl@0
    27
sl@0
    28
//
sl@0
    29
// DMemoryManager
sl@0
    30
//
sl@0
    31
sl@0
    32
TInt DMemoryManager::New(DMemoryObject*& aMemory, TUint aSizeInPages, TMemoryAttributes aAttributes, TMemoryCreateFlags aCreateFlags)
sl@0
    33
	{
sl@0
    34
	DMemoryObject* memory;
sl@0
    35
	if(aSizeInPages&(KChunkMask>>KPageShift))
sl@0
    36
		memory = DFineMemory::New(this,aSizeInPages,aAttributes,aCreateFlags);
sl@0
    37
	else
sl@0
    38
		memory = DCoarseMemory::New(this,aSizeInPages,aAttributes,aCreateFlags);
sl@0
    39
	aMemory = memory;
sl@0
    40
	if(!memory)
sl@0
    41
		return KErrNoMemory;
sl@0
    42
	return KErrNone;
sl@0
    43
	}
sl@0
    44
sl@0
    45
sl@0
    46
TInt DMemoryManager::Alloc(DMemoryObject* /*aMemory*/, TUint /*aIndex*/, TUint /*aCount*/)
sl@0
    47
	{
sl@0
    48
	return KErrNotSupported;
sl@0
    49
	}
sl@0
    50
sl@0
    51
sl@0
    52
TInt DMemoryManager::AllocContiguous(DMemoryObject* /*aMemory*/, TUint /*aIndex*/, TUint /*aCount*/, TUint /*aAlign*/, TPhysAddr& /*aPhysAddr*/)
sl@0
    53
	{
sl@0
    54
	return KErrNotSupported;
sl@0
    55
	}
sl@0
    56
sl@0
    57
sl@0
    58
void DMemoryManager::Free(DMemoryObject* /*aMemory*/, TUint /*aIndex*/, TUint /*aCount*/)
sl@0
    59
	{
sl@0
    60
	}
sl@0
    61
sl@0
    62
sl@0
    63
TInt DMemoryManager::Wipe(DMemoryObject* /*aMemory*/)
sl@0
    64
	{
sl@0
    65
	return KErrNotSupported;
sl@0
    66
	}
sl@0
    67
sl@0
    68
sl@0
    69
TInt DMemoryManager::AddPages(DMemoryObject* /*aMemory*/, TUint /*aIndex*/, TUint /*aCount*/, TPhysAddr* /*aPages*/)
sl@0
    70
	{
sl@0
    71
	return KErrNotSupported;
sl@0
    72
	}
sl@0
    73
sl@0
    74
sl@0
    75
TInt DMemoryManager::AddContiguous(DMemoryObject* /*aMemory*/, TUint /*aIndex*/, TUint /*aCount*/, TPhysAddr /*aPhysAddr*/)
sl@0
    76
	{
sl@0
    77
	return KErrNotSupported;
sl@0
    78
	}
sl@0
    79
sl@0
    80
sl@0
    81
TInt DMemoryManager::RemovePages(DMemoryObject* /*aMemory*/, TUint /*aIndex*/, TUint /*aCount*/, TPhysAddr* /*aPages*/)
sl@0
    82
	{
sl@0
    83
	return KErrNotSupported;
sl@0
    84
	}
sl@0
    85
sl@0
    86
sl@0
    87
TInt DMemoryManager::AllowDiscard(DMemoryObject* /*aMemory*/, TUint /*aIndex*/, TUint /*aCount*/)
sl@0
    88
	{
sl@0
    89
	return KErrNotSupported;
sl@0
    90
	}
sl@0
    91
sl@0
    92
sl@0
    93
TInt DMemoryManager::DisallowDiscard(DMemoryObject* /*aMemory*/, TUint /*aIndex*/, TUint /*aCount*/)
sl@0
    94
	{
sl@0
    95
	return KErrNotSupported;
sl@0
    96
	}
sl@0
    97
sl@0
    98
sl@0
    99
TInt DMemoryManager::StealPage(DMemoryObject* /*aMemory*/, SPageInfo* /*aPageInfo*/)
sl@0
   100
	{
sl@0
   101
	return KErrNotSupported;
sl@0
   102
	}
sl@0
   103
sl@0
   104
sl@0
   105
TInt DMemoryManager::RestrictPage(DMemoryObject* /*aMemory*/, SPageInfo* /*aPageInfo*/, TRestrictPagesType /*aRestriction*/)
sl@0
   106
	{
sl@0
   107
	return KErrNotSupported;
sl@0
   108
	}
sl@0
   109
sl@0
   110
sl@0
   111
TInt DMemoryManager::CleanPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TPhysAddr*& /*aPageArrayEntry*/)
sl@0
   112
	{
sl@0
   113
	if(aPageInfo->IsDirty()==false)
sl@0
   114
		return KErrNone;
sl@0
   115
	__NK_ASSERT_DEBUG(0);
sl@0
   116
	return KErrNotSupported;
sl@0
   117
	}
sl@0
   118
sl@0
   119
sl@0
   120
TInt DMemoryManager::HandleFault(	DMemoryObject* aMemory, TUint aIndex, DMemoryMapping* aMapping,
sl@0
   121
									TUint aMapInstanceCount, TUint aAccessPermissions)
sl@0
   122
	{
sl@0
   123
	(void)aMemory;
sl@0
   124
	(void)aIndex;
sl@0
   125
	(void)aMapping;
sl@0
   126
	(void)aMapInstanceCount;
sl@0
   127
	(void)aAccessPermissions;
sl@0
   128
//	Kern::Printf("DMemoryManager::HandlePageFault(0x%08x,0x%x,0x%08x,%d)",aMemory,aIndex,aMapping,aAccessPermissions);
sl@0
   129
	return KErrAbort;
sl@0
   130
	}
sl@0
   131
sl@0
   132
sl@0
   133
TInt DMemoryManager::MovePage(	DMemoryObject* aMemory, SPageInfo* aOldPageInfo, 
sl@0
   134
								TPhysAddr& aNewPage, TUint aBlockZoneId, TBool aBlockRest)
sl@0
   135
	{
sl@0
   136
	return KErrNotSupported;
sl@0
   137
	}
sl@0
   138
sl@0
   139
TZonePageType DMemoryManager::PageType()
sl@0
   140
	{// This should not be invoked on memory managers that do not use the methods
sl@0
   141
	// AllocPages() and FreePages().
sl@0
   142
	__NK_ASSERT_DEBUG(0);
sl@0
   143
	return EPageFixed;
sl@0
   144
	}
sl@0
   145
sl@0
   146
static TMemoryCleanup Cleanup;
sl@0
   147
sl@0
   148
DMemoryObject* DMemoryManager::iCleanupHead = 0;
sl@0
   149
TSpinLock DMemoryManager::iCleanupLock(TSpinLock::EOrderGenericIrqHigh3);
sl@0
   150
sl@0
   151
void DMemoryManager::CleanupFunction(TAny*)
sl@0
   152
	{
sl@0
   153
	for(;;)
sl@0
   154
		{
sl@0
   155
		__SPIN_LOCK_IRQ(iCleanupLock);
sl@0
   156
sl@0
   157
		// get an object from queue...
sl@0
   158
		DMemoryObject* memory = iCleanupHead;
sl@0
   159
		if(!memory)
sl@0
   160
			{
sl@0
   161
			// none left, so end...
sl@0
   162
			__SPIN_UNLOCK_IRQ(iCleanupLock);
sl@0
   163
			return;
sl@0
   164
			}
sl@0
   165
sl@0
   166
		if(memory->iCleanupFlags&ECleanupDecommitted)
sl@0
   167
			{
sl@0
   168
			// object requires cleanup of decommitted pages...
sl@0
   169
			memory->iCleanupFlags &= ~ECleanupDecommitted;
sl@0
   170
			__SPIN_UNLOCK_IRQ(iCleanupLock);
sl@0
   171
			memory->iManager->DoCleanupDecommitted(memory);
sl@0
   172
			}
sl@0
   173
		else
sl@0
   174
			{
sl@0
   175
			// object has no more cleanup operations to perform,
sl@0
   176
			// so remove it from the cleanup queue...
sl@0
   177
			__NK_ASSERT_DEBUG(memory->iCleanupFlags==ECleanupIsQueued); // no operations left, just flag to say its in the cleanup queue
sl@0
   178
			memory->iCleanupFlags &= ~ECleanupIsQueued;
sl@0
   179
			iCleanupHead = memory->iCleanupNext;
sl@0
   180
			memory->iCleanupNext = NULL;
sl@0
   181
			__SPIN_UNLOCK_IRQ(iCleanupLock);
sl@0
   182
sl@0
   183
			// close reference which was added when object was queued...
sl@0
   184
			memory->Close();
sl@0
   185
			}
sl@0
   186
		}
sl@0
   187
	}
sl@0
   188
sl@0
   189
sl@0
   190
void DMemoryManager::QueueCleanup(DMemoryObject* aMemory, TCleanupOperationFlag aCleanupOp)
sl@0
   191
	{
sl@0
   192
	// add new cleanup operation...
sl@0
   193
	__SPIN_LOCK_IRQ(iCleanupLock);
sl@0
   194
	TUint32 oldFlags = aMemory->iCleanupFlags;
sl@0
   195
	aMemory->iCleanupFlags = oldFlags|aCleanupOp|ECleanupIsQueued;
sl@0
   196
	__SPIN_UNLOCK_IRQ(iCleanupLock);
sl@0
   197
sl@0
   198
	// if cleanup was already requested...
sl@0
   199
	if(oldFlags)
sl@0
   200
		return; // nothing more to do
sl@0
   201
sl@0
   202
	// increase reference count...
sl@0
   203
	aMemory->Open();
sl@0
   204
sl@0
   205
	// add object to cleanup queue...
sl@0
   206
	__SPIN_LOCK_IRQ(iCleanupLock);
sl@0
   207
	aMemory->iCleanupNext = iCleanupHead;
sl@0
   208
	iCleanupHead = aMemory;
sl@0
   209
	__SPIN_UNLOCK_IRQ(iCleanupLock);
sl@0
   210
sl@0
   211
	// queue cleanup function to run...
sl@0
   212
	Cleanup.Add((TMemoryCleanupCallback)CleanupFunction,0);
sl@0
   213
	}
sl@0
   214
sl@0
   215
sl@0
   216
void DMemoryManager::DoCleanupDecommitted(DMemoryObject* aMemory)
sl@0
   217
	{
sl@0
   218
	TRACE2(("DMemoryManager::DoCleanupDecommitted(0x%08x)",aMemory));
sl@0
   219
	__NK_ASSERT_DEBUG(0);
sl@0
   220
	}
sl@0
   221
sl@0
   222
sl@0
   223
void DMemoryManager::ReAllocDecommitted(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
sl@0
   224
	{
sl@0
   225
	__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
sl@0
   226
sl@0
   227
	// make iterator for region...
sl@0
   228
	RPageArray::TIter pageIter;
sl@0
   229
	aMemory->iPages.FindStart(aIndex,aCount,pageIter);
sl@0
   230
sl@0
   231
	for(;;)
sl@0
   232
		{
sl@0
   233
		// find some pages...
sl@0
   234
		RPageArray::TIter pageList;
sl@0
   235
		TUint n = pageIter.Find(pageList);
sl@0
   236
		if(!n)
sl@0
   237
			break;
sl@0
   238
sl@0
   239
		// check each existing page...
sl@0
   240
		RamAllocLock::Lock();
sl@0
   241
		TPhysAddr* pages;
sl@0
   242
		while(pageList.Pages(pages))
sl@0
   243
			{
sl@0
   244
			TPhysAddr page = *pages;
sl@0
   245
			if(RPageArray::State(page)==RPageArray::EDecommitted)
sl@0
   246
				{
sl@0
   247
				// decommitted pages need re-initialising...
sl@0
   248
				TPhysAddr pagePhys = page&~KPageMask;
sl@0
   249
				*pages = pagePhys|RPageArray::ECommitted;
sl@0
   250
				TheMmu.PagesAllocated(&pagePhys,1,aMemory->RamAllocFlags(),true);
sl@0
   251
				}
sl@0
   252
			pageList.Skip(1);
sl@0
   253
			}
sl@0
   254
		RamAllocLock::Unlock();
sl@0
   255
sl@0
   256
		// move on...
sl@0
   257
		pageIter.FindRelease(n);
sl@0
   258
		}
sl@0
   259
sl@0
   260
	aMemory->iPages.FindEnd(aIndex,aCount);
sl@0
   261
	}
sl@0
   262
sl@0
   263
sl@0
   264
void DMemoryManager::FreeDecommitted(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
sl@0
   265
	{
sl@0
   266
	TRACE2(("DMemoryManager::FreeDecommitted(0x%08x,0x%x,0x%x)",aMemory, aIndex, aCount));
sl@0
   267
	__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
sl@0
   268
sl@0
   269
	// make iterator for region...
sl@0
   270
	RPageArray::TIter pageIter;
sl@0
   271
	aMemory->iPages.FindStart(aIndex,aCount,pageIter);
sl@0
   272
sl@0
   273
	for(;;)
sl@0
   274
		{
sl@0
   275
		// find some pages...
sl@0
   276
		RPageArray::TIter pageList;
sl@0
   277
		TUint findCount = pageIter.Find(pageList);
sl@0
   278
		if(!findCount)
sl@0
   279
			break;
sl@0
   280
sl@0
   281
		// search for decommitted pages...
sl@0
   282
		RamAllocLock::Lock();
sl@0
   283
		TPhysAddr* pages;
sl@0
   284
		TUint numPages;
sl@0
   285
		while((numPages=pageList.Pages(pages))!=0)
sl@0
   286
			{
sl@0
   287
			TUint n=0;
sl@0
   288
			if(RPageArray::State(pages[n])!=RPageArray::EDecommitted)
sl@0
   289
				{
sl@0
   290
				// skip pages which aren't EDecommitted...
sl@0
   291
				while(++n<numPages && RPageArray::State(pages[n])!=RPageArray::EDecommitted)
sl@0
   292
					{}
sl@0
   293
				}
sl@0
   294
			else
sl@0
   295
				{
sl@0
   296
				// find range of pages which are EDecommitted...
sl@0
   297
				while(++n<numPages && RPageArray::State(pages[n])==RPageArray::EDecommitted)
sl@0
   298
					{}
sl@0
   299
				RPageArray::TIter decommittedList(pageList.Left(n));
sl@0
   300
sl@0
   301
				// free pages...
sl@0
   302
				TUint freedCount = FreePages(aMemory,decommittedList);
sl@0
   303
				(void)freedCount;
sl@0
   304
				TRACE2(("DMemoryManager::FreeDecommitted(0x%08x) freed %d in 0x%x..0x%x",aMemory,freedCount,decommittedList.Index(),decommittedList.IndexEnd()));
sl@0
   305
				}
sl@0
   306
			pageList.Skip(n);
sl@0
   307
			}
sl@0
   308
		RamAllocLock::Unlock();
sl@0
   309
sl@0
   310
		// move on...
sl@0
   311
		pageIter.FindRelease(findCount);
sl@0
   312
		}
sl@0
   313
sl@0
   314
	aMemory->iPages.FindEnd(aIndex,aCount);
sl@0
   315
	}
sl@0
   316
sl@0
   317
sl@0
   318
void DMemoryManager::DoFree(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
sl@0
   319
	{
sl@0
   320
	TRACE2(("DMemoryManager::DoFree(0x%08x,0x%x,0x%x)",aMemory, aIndex, aCount));
sl@0
   321
	__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
sl@0
   322
sl@0
   323
	RPageArray::TIter pageIter;
sl@0
   324
	aMemory->iPages.FindStart(aIndex,aCount,pageIter);
sl@0
   325
sl@0
   326
	for(;;)
sl@0
   327
		{
sl@0
   328
		// find some pages...
sl@0
   329
		RPageArray::TIter pageList;
sl@0
   330
		TUint n = pageIter.RemoveFind(pageList);
sl@0
   331
		if(!n)
sl@0
   332
			break;
sl@0
   333
sl@0
   334
		// free pages...
sl@0
   335
		FreePages(aMemory,pageList);
sl@0
   336
sl@0
   337
		// move on...
sl@0
   338
		pageIter.FindRelease(n);
sl@0
   339
		}
sl@0
   340
sl@0
   341
	aMemory->iPages.FindEnd(aIndex,aCount);
sl@0
   342
	}
sl@0
   343
sl@0
   344
sl@0
   345
TInt DMemoryManager::FreePages(DMemoryObject* aMemory, RPageArray::TIter aPageList)
sl@0
   346
	{
sl@0
   347
	// unmap the pages...
sl@0
   348
	aMemory->UnmapPages(aPageList,true);
sl@0
   349
sl@0
   350
	RamAllocLock::Lock();
sl@0
   351
sl@0
   352
	// remove and free pages...
sl@0
   353
	Mmu& m = TheMmu;
sl@0
   354
	TUint count = 0;
sl@0
   355
	TPhysAddr pages[KMaxPagesInOneGo];
sl@0
   356
	TUint n;
sl@0
   357
	while((n=aPageList.Remove(KMaxPagesInOneGo,pages))!=0)
sl@0
   358
		{
sl@0
   359
		count += n;
sl@0
   360
		m.FreeRam(pages, n, aMemory->iManager->PageType());
sl@0
   361
		}
sl@0
   362
sl@0
   363
	RamAllocLock::Unlock();
sl@0
   364
sl@0
   365
	return count;
sl@0
   366
	}
sl@0
   367
sl@0
   368
sl@0
   369
sl@0
   370
/**
sl@0
   371
Manager for memory objects containing normal unpaged program memory (RAM) which
sl@0
   372
is allocated from a system wide pool. The physical pages allocated to this
sl@0
   373
memory are fixed until explicitly freed.
sl@0
   374
sl@0
   375
This is normally used for kernel memory and any other situation where it
sl@0
   376
is not permissible for memory accesses to generate page faults of any kind.
sl@0
   377
*/
sl@0
   378
class DUnpagedMemoryManager : public DMemoryManager
sl@0
   379
	{
sl@0
   380
public:
sl@0
   381
	// from DMemoryManager...
sl@0
   382
	virtual void Destruct(DMemoryObject* aMemory);
sl@0
   383
	virtual TInt Alloc(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
sl@0
   384
	virtual TInt AllocContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TUint aAlign, TPhysAddr& aPhysAddr);
sl@0
   385
	virtual void Free(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
sl@0
   386
	virtual TInt Pin(DMemoryObject* aMemory, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs);
sl@0
   387
	virtual void Unpin(DMemoryObject* aMemory, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs);
sl@0
   388
	virtual TInt Wipe(DMemoryObject* aMemory);
sl@0
   389
	virtual TZonePageType PageType();
sl@0
   390
sl@0
   391
private:
sl@0
   392
	// from DMemoryManager...
sl@0
   393
	virtual void DoCleanupDecommitted(DMemoryObject* aMemory);
sl@0
   394
sl@0
   395
	/**
sl@0
   396
	Implementation factor for implementation of #Alloc.
sl@0
   397
	*/
sl@0
   398
	static TInt AllocPages(DMemoryObject* aMemory, RPageArray::TIter aPageList);
sl@0
   399
sl@0
   400
	/**
sl@0
   401
	Implementation factor for implementation of #AllocContiguous.
sl@0
   402
	*/
sl@0
   403
	static TInt AllocContiguousPages(DMemoryObject* aMemory, RPageArray::TIter aPageList, TUint aAlign, TPhysAddr& aPhysAddr);
sl@0
   404
sl@0
   405
	/**
sl@0
   406
	Implementation factor for implementation of #Wipe.
sl@0
   407
	*/
sl@0
   408
	static void WipePages(DMemoryObject* aMemory, RPageArray::TIter aPageList);
sl@0
   409
sl@0
   410
public:
sl@0
   411
	/**
sl@0
   412
	The single instance of this manager class.
sl@0
   413
	*/
sl@0
   414
	static DUnpagedMemoryManager TheManager;
sl@0
   415
	};
sl@0
   416
sl@0
   417
sl@0
   418
DUnpagedMemoryManager DUnpagedMemoryManager::TheManager;
sl@0
   419
DMemoryManager* TheUnpagedMemoryManager = &DUnpagedMemoryManager::TheManager;
sl@0
   420
sl@0
   421
sl@0
   422
void DUnpagedMemoryManager::Destruct(DMemoryObject* aMemory)
sl@0
   423
	{
sl@0
   424
	MemoryObjectLock::Lock(aMemory);
sl@0
   425
	Free(aMemory,0,aMemory->iSizeInPages);
sl@0
   426
	MemoryObjectLock::Unlock(aMemory);
sl@0
   427
	aMemory->Close();
sl@0
   428
	}
sl@0
   429
sl@0
   430
sl@0
   431
TInt DUnpagedMemoryManager::Alloc(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
sl@0
   432
	{
sl@0
   433
	TRACE2(("DUnpagedMemoryManager::Alloc(0x%08x,0x%x,0x%x)",aMemory, aIndex, aCount));
sl@0
   434
	__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
sl@0
   435
sl@0
   436
	// re-initialise any decommitted pages which we may still own because they were pinned...
sl@0
   437
	ReAllocDecommitted(aMemory,aIndex,aCount);
sl@0
   438
sl@0
   439
	// check and allocate page array entries...
sl@0
   440
	RPageArray::TIter pageList;
sl@0
   441
	TInt r = aMemory->iPages.AddStart(aIndex,aCount,pageList,true);
sl@0
   442
	if(r!=KErrNone)
sl@0
   443
		return r;
sl@0
   444
sl@0
   445
	// allocate RAM and add it to page array...
sl@0
   446
	r = AllocPages(aMemory,pageList);
sl@0
   447
sl@0
   448
	// map pages...
sl@0
   449
	if(r==KErrNone)
sl@0
   450
		r = aMemory->MapPages(pageList);
sl@0
   451
sl@0
   452
	// release page array entries...
sl@0
   453
	aMemory->iPages.AddEnd(aIndex,aCount);
sl@0
   454
sl@0
   455
	// revert if error...
sl@0
   456
	if(r!=KErrNone)
sl@0
   457
		Free(aMemory,aIndex,aCount);
sl@0
   458
sl@0
   459
	return r;
sl@0
   460
	}
sl@0
   461
sl@0
   462
sl@0
   463
TInt DUnpagedMemoryManager::AllocContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TUint aAlign, TPhysAddr& aPhysAddr)
sl@0
   464
	{
sl@0
   465
	TRACE2(("DUnpagedMemoryManager::AllocContiguous(0x%08x,0x%x,0x%x,%d,?)",aMemory, aIndex, aCount, aAlign));
sl@0
   466
	__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
sl@0
   467
sl@0
   468
	// set invalid memory in case of error...
sl@0
   469
	aPhysAddr = KPhysAddrInvalid;
sl@0
   470
sl@0
   471
	// check and allocate page array entries...
sl@0
   472
	RPageArray::TIter pageList;
sl@0
   473
	TInt r = aMemory->iPages.AddStart(aIndex,aCount,pageList);
sl@0
   474
	if(r!=KErrNone)
sl@0
   475
		return r;
sl@0
   476
sl@0
   477
	// allocate memory...
sl@0
   478
	TPhysAddr physAddr;
sl@0
   479
	r = AllocContiguousPages(aMemory, pageList, aAlign, physAddr);
sl@0
   480
sl@0
   481
	// map memory...
sl@0
   482
	if(r==KErrNone)
sl@0
   483
		{
sl@0
   484
		r = aMemory->MapPages(pageList);
sl@0
   485
		if(r==KErrNone)
sl@0
   486
			aPhysAddr = physAddr;
sl@0
   487
		}
sl@0
   488
sl@0
   489
	// release page array entries...
sl@0
   490
	aMemory->iPages.AddEnd(aIndex,aCount);
sl@0
   491
sl@0
   492
	// revert if error...
sl@0
   493
	if(r!=KErrNone)
sl@0
   494
		Free(aMemory,aIndex,aCount);
sl@0
   495
sl@0
   496
	return r;
sl@0
   497
	}
sl@0
   498
sl@0
   499
sl@0
   500
void DUnpagedMemoryManager::Free(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
sl@0
   501
	{
sl@0
   502
	DoFree(aMemory,aIndex,aCount);
sl@0
   503
	}
sl@0
   504
sl@0
   505
sl@0
   506
TInt DUnpagedMemoryManager::AllocPages(DMemoryObject* aMemory, RPageArray::TIter aPageList)
sl@0
   507
	{
sl@0
   508
	TInt r = KErrNone;
sl@0
   509
	RamAllocLock::Lock();
sl@0
   510
sl@0
   511
	Mmu& m = TheMmu;
sl@0
   512
	for(;;)
sl@0
   513
		{
sl@0
   514
		// find entries in page array to allocate...
sl@0
   515
		RPageArray::TIter allocList;
sl@0
   516
		TUint n = aPageList.AddFind(allocList);
sl@0
   517
		if(!n)
sl@0
   518
			break;
sl@0
   519
sl@0
   520
		do
sl@0
   521
			{
sl@0
   522
			// allocate ram...
sl@0
   523
			TPhysAddr pages[KMaxPagesInOneGo];
sl@0
   524
			if(n>KMaxPagesInOneGo)
sl@0
   525
				n = KMaxPagesInOneGo;
sl@0
   526
			r = m.AllocRam(pages, n, aMemory->RamAllocFlags(), aMemory->iManager->PageType());
sl@0
   527
			if(r!=KErrNone)
sl@0
   528
				goto done;
sl@0
   529
sl@0
   530
			// assign pages to memory object...
sl@0
   531
			{
sl@0
   532
			TUint index = allocList.Index();
sl@0
   533
			TUint flags = aMemory->PageInfoFlags();
sl@0
   534
			TUint i=0;
sl@0
   535
			MmuLock::Lock();
sl@0
   536
			do
sl@0
   537
				{
sl@0
   538
				SPageInfo* pi = SPageInfo::FromPhysAddr(pages[i]);
sl@0
   539
				pi->SetManaged(aMemory,index+i,flags);
sl@0
   540
				}
sl@0
   541
			while(++i<n);
sl@0
   542
			MmuLock::Unlock();
sl@0
   543
			}
sl@0
   544
sl@0
   545
			// add pages to page array...
sl@0
   546
			allocList.Add(n,pages);
sl@0
   547
			}
sl@0
   548
		while((n=allocList.Count())!=0);
sl@0
   549
		}
sl@0
   550
done:
sl@0
   551
	RamAllocLock::Unlock();
sl@0
   552
	return r;
sl@0
   553
	}
sl@0
   554
sl@0
   555
sl@0
   556
TInt DUnpagedMemoryManager::AllocContiguousPages(DMemoryObject* aMemory, RPageArray::TIter aPageList, TUint aAlign, TPhysAddr& aPhysAddr)
sl@0
   557
	{
sl@0
   558
	TUint size = aPageList.Count();
sl@0
   559
	RamAllocLock::Lock();
sl@0
   560
sl@0
   561
	// allocate memory...
sl@0
   562
	Mmu& m = TheMmu;
sl@0
   563
	TPhysAddr physAddr;
sl@0
   564
	TInt r = m.AllocContiguousRam(physAddr, size, aAlign, aMemory->RamAllocFlags());
sl@0
   565
	if(r==KErrNone)
sl@0
   566
		{
sl@0
   567
		// assign pages to memory object...
sl@0
   568
		TUint index = aPageList.Index();
sl@0
   569
		TUint flags = aMemory->PageInfoFlags();
sl@0
   570
		SPageInfo* pi = SPageInfo::FromPhysAddr(physAddr);
sl@0
   571
		SPageInfo* piEnd = pi+size;
sl@0
   572
		TUint flash = 0;
sl@0
   573
		MmuLock::Lock();
sl@0
   574
		while(pi<piEnd)
sl@0
   575
			{
sl@0
   576
			MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo);
sl@0
   577
			pi->SetManaged(aMemory,index++,flags);
sl@0
   578
			++pi;
sl@0
   579
			}
sl@0
   580
		MmuLock::Unlock();
sl@0
   581
sl@0
   582
		// add pages to page array...
sl@0
   583
		aPageList.AddContiguous(size,physAddr);
sl@0
   584
sl@0
   585
		// set result...
sl@0
   586
		aPhysAddr = physAddr;
sl@0
   587
		}
sl@0
   588
sl@0
   589
	RamAllocLock::Unlock();
sl@0
   590
	return r;
sl@0
   591
	}
sl@0
   592
sl@0
   593
sl@0
   594
TInt DUnpagedMemoryManager::Pin(DMemoryObject* aMemory, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs)
sl@0
   595
	{
sl@0
   596
	RPageArray::TIter pageList;
sl@0
   597
	aMemory->iPages.FindStart(aMapping->iStartIndex,aMapping->iSizeInPages,pageList);
sl@0
   598
sl@0
   599
	MmuLock::Lock();
sl@0
   600
sl@0
   601
	TUint n;
sl@0
   602
	TPhysAddr* pages;
sl@0
   603
	TUint flash = 0;
sl@0
   604
	while((n=pageList.Pages(pages,KMaxPageInfoUpdatesInOneGo))!=0)
sl@0
   605
		{
sl@0
   606
		TPhysAddr* p = pages;
sl@0
   607
		TPhysAddr* pEnd = p+n;
sl@0
   608
		do
sl@0
   609
			{
sl@0
   610
			TPhysAddr page = *p++;
sl@0
   611
			if(RPageArray::TargetStateIsDecommitted(page))
sl@0
   612
				goto stop; // page is being decommitted, so can't pin it
sl@0
   613
			}
sl@0
   614
		while(p!=pEnd);
sl@0
   615
		pageList.Skip(n);
sl@0
   616
		flash += n;
sl@0
   617
		MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo);
sl@0
   618
		}
sl@0
   619
stop:
sl@0
   620
	MmuLock::Unlock();
sl@0
   621
sl@0
   622
	aMemory->iPages.FindEnd(aMapping->iStartIndex,aMapping->iSizeInPages);
sl@0
   623
sl@0
   624
	return pageList.Count() ? KErrNotFound : KErrNone;
sl@0
   625
	}
sl@0
   626
sl@0
   627
sl@0
   628
void DUnpagedMemoryManager::Unpin(DMemoryObject* aMemory, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs)
sl@0
   629
	{
sl@0
   630
	}
sl@0
   631
sl@0
   632
sl@0
   633
void DUnpagedMemoryManager::DoCleanupDecommitted(DMemoryObject* aMemory)
sl@0
   634
	{
sl@0
   635
	MemoryObjectLock::Lock(aMemory);
sl@0
   636
	FreeDecommitted(aMemory,0,aMemory->iSizeInPages);
sl@0
   637
	MemoryObjectLock::Unlock(aMemory);
sl@0
   638
	}
sl@0
   639
sl@0
   640
sl@0
   641
TInt DUnpagedMemoryManager::Wipe(DMemoryObject* aMemory)
sl@0
   642
	{
sl@0
   643
	__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
sl@0
   644
sl@0
   645
	// make iterator for region...
sl@0
   646
	RPageArray::TIter pageIter;
sl@0
   647
	aMemory->iPages.FindStart(0,aMemory->iSizeInPages,pageIter);
sl@0
   648
sl@0
   649
	for(;;)
sl@0
   650
		{
sl@0
   651
		// find some pages...
sl@0
   652
		RPageArray::TIter pageList;
sl@0
   653
		TUint n = pageIter.Find(pageList);
sl@0
   654
		if(!n)
sl@0
   655
			break;
sl@0
   656
sl@0
   657
		// wipe some pages...
sl@0
   658
		WipePages(aMemory,pageList);
sl@0
   659
sl@0
   660
		// move on...
sl@0
   661
		pageIter.FindRelease(n);
sl@0
   662
		}
sl@0
   663
sl@0
   664
	aMemory->iPages.FindEnd(0,aMemory->iSizeInPages);
sl@0
   665
sl@0
   666
	return KErrNone;
sl@0
   667
	}
sl@0
   668
sl@0
   669
sl@0
   670
void DUnpagedMemoryManager::WipePages(DMemoryObject* aMemory, RPageArray::TIter aPageList)
sl@0
   671
	{
sl@0
   672
	TUint index = aPageList.Index();
sl@0
   673
	TUint count = aPageList.Count();
sl@0
   674
	TRACE(("DUnpagedMemoryManager::WipePages(0x%08x,0x%x,0x%x)",aMemory,index,count));
sl@0
   675
sl@0
   676
	__NK_ASSERT_ALWAYS(!aMemory->IsReadOnly()); // trap wiping read-only memory
sl@0
   677
sl@0
   678
	RamAllocLock::Lock();
sl@0
   679
sl@0
   680
	while(count)
sl@0
   681
		{
sl@0
   682
		// get some physical page addresses...
sl@0
   683
		TPhysAddr pages[KMaxPagesInOneGo];
sl@0
   684
		TPhysAddr physAddr;
sl@0
   685
		TUint n = count;
sl@0
   686
		if(n>KMaxPagesInOneGo)
sl@0
   687
			n = KMaxPagesInOneGo;
sl@0
   688
		TInt r = aMemory->iPages.PhysAddr(index,n,physAddr,pages);
sl@0
   689
		__NK_ASSERT_ALWAYS(r>=0); // caller should have ensured all pages are present
sl@0
   690
sl@0
   691
		// wipe some pages...
sl@0
   692
		TPhysAddr* pagesToWipe = r!=0 ? pages : (TPhysAddr*)((TLinAddr)physAddr|1);
sl@0
   693
		TheMmu.PagesAllocated(pagesToWipe,n,aMemory->RamAllocFlags(),true);
sl@0
   694
sl@0
   695
		// move on...
sl@0
   696
		index += n;
sl@0
   697
		count -= n;
sl@0
   698
		}
sl@0
   699
sl@0
   700
	RamAllocLock::Unlock();
sl@0
   701
	}
sl@0
   702
sl@0
   703
sl@0
   704
TZonePageType DUnpagedMemoryManager::PageType()
sl@0
   705
	{// Unpaged memory cannot be moved or discarded therefore it is fixed.
sl@0
   706
	return EPageFixed;
sl@0
   707
	}
sl@0
   708
sl@0
   709
sl@0
   710
/**
sl@0
   711
Manager for memory objects containing normal unpaged RAM, as
sl@0
   712
#DUnpagedMemoryManager, but which may be 'moved' by RAM
sl@0
   713
defragmentation. I.e. have the physical pages used to store its content
sl@0
   714
substituted for others.
sl@0
   715
sl@0
   716
Such memory may cause transient page faults if it is accessed whilst its
sl@0
   717
contents are being moved, this makes it unsuitable for most kernel-side
sl@0
   718
usage. This is the memory management scheme normally used for unpaged user
sl@0
   719
memory.
sl@0
   720
*/
sl@0
   721
class DMovableMemoryManager : public DUnpagedMemoryManager
sl@0
   722
	{
sl@0
   723
public:
sl@0
   724
	// from DMemoryManager...
sl@0
   725
	virtual TInt MovePage(DMemoryObject* aMemory, SPageInfo* aOldPageInfo, TPhysAddr& aNewPage, TUint aBlockZoneId, TBool aBlockRest);
sl@0
   726
	virtual TInt HandleFault(	DMemoryObject* aMemory, TUint aIndex, DMemoryMapping* aMapping, 
sl@0
   727
								TUint aMapInstanceCount, TUint aAccessPermissions);
sl@0
   728
	virtual TZonePageType PageType();
sl@0
   729
public:
sl@0
   730
	/**
sl@0
   731
	The single instance of this manager class.
sl@0
   732
	*/
sl@0
   733
	static DMovableMemoryManager TheManager;
sl@0
   734
	};
sl@0
   735
sl@0
   736
sl@0
   737
DMovableMemoryManager DMovableMemoryManager::TheManager;
sl@0
   738
DMemoryManager* TheMovableMemoryManager = &DMovableMemoryManager::TheManager;
sl@0
   739
sl@0
   740
sl@0
   741
TInt DMovableMemoryManager::MovePage(	DMemoryObject* aMemory, SPageInfo* aOldPageInfo, 
sl@0
   742
										TPhysAddr& aNewPage, TUint aBlockZoneId, TBool aBlockRest)
sl@0
   743
	{
sl@0
   744
	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
sl@0
   745
sl@0
   746
	// Allocate the new page to move to, ensuring that we use the page type of the
sl@0
   747
	// manager assigned to this page.
sl@0
   748
	TPhysAddr newPage;
sl@0
   749
	Mmu& m = TheMmu;
sl@0
   750
	TInt r = m.AllocRam(&newPage, 1, aMemory->RamAllocFlags(), aMemory->iManager->PageType(), 
sl@0
   751
						aBlockZoneId, aBlockRest);
sl@0
   752
	if (r != KErrNone)
sl@0
   753
		{// Failed to allocate a new page to move the page to so can't continue.
sl@0
   754
		return r;
sl@0
   755
		}
sl@0
   756
	
sl@0
   757
	r = KErrInUse;
sl@0
   758
	MmuLock::Lock();
sl@0
   759
sl@0
   760
	TUint index = aOldPageInfo->Index();
sl@0
   761
	TRACE(	("DMovableMemoryManager::MovePage(0x%08x,0x%08x,?,0x%08x,%d) index=0x%x",
sl@0
   762
			aMemory,aOldPageInfo,aBlockZoneId,aBlockRest,index));
sl@0
   763
	__NK_ASSERT_DEBUG(aMemory==aOldPageInfo->Owner());
sl@0
   764
sl@0
   765
	// Mark the page as being moved and get a pointer to the page array entry.
sl@0
   766
	RPageArray::TIter pageIter;
sl@0
   767
	TPhysAddr* const movingPageArrayPtr = aMemory->iPages.MovePageStart(index, pageIter);
sl@0
   768
	if (!movingPageArrayPtr)
sl@0
   769
		{// Can't move the page another operation is being performed on it.
sl@0
   770
		MmuLock::Unlock();
sl@0
   771
		TheMmu.FreeRam(&newPage, 1, aMemory->iManager->PageType());
sl@0
   772
		return r;
sl@0
   773
		}
sl@0
   774
	__NK_ASSERT_DEBUG(RPageArray::IsPresent(*movingPageArrayPtr));
sl@0
   775
	TPhysAddr oldPageEntry = *movingPageArrayPtr;
sl@0
   776
	TPhysAddr oldPage = oldPageEntry & ~KPageMask;
sl@0
   777
#ifdef _DEBUG
sl@0
   778
	if (oldPage != aOldPageInfo->PhysAddr())
sl@0
   779
		{// The address of page array entry and the page info should match except
sl@0
   780
		// when the page is being shadowed.
sl@0
   781
		__NK_ASSERT_DEBUG(SPageInfo::FromPhysAddr(oldPage)->Type() == SPageInfo::EShadow);
sl@0
   782
		}
sl@0
   783
#endif
sl@0
   784
	__NK_ASSERT_DEBUG((newPage & KPageMask) == 0);
sl@0
   785
	__NK_ASSERT_DEBUG(newPage != oldPage);
sl@0
   786
sl@0
   787
	// Set the modifier so we can detect if the page state is updated.
sl@0
   788
	aOldPageInfo->SetModifier(&pageIter);
sl@0
   789
sl@0
   790
	// Restrict the page ready for moving.
sl@0
   791
	// Read only memory objects don't need to be restricted but we still need
sl@0
   792
	// to discover any physically pinned mappings.
sl@0
   793
	TBool pageRestrictedNA = !aMemory->IsReadOnly();
sl@0
   794
	TRestrictPagesType restrictType = 	pageRestrictedNA ? 
sl@0
   795
										ERestrictPagesNoAccessForMoving :
sl@0
   796
										ERestrictPagesForMovingFlag;
sl@0
   797
sl@0
   798
	// This page's contents may be changed so restrict the page to no access 
sl@0
   799
	// so we can detect any access to it while we are moving it.
sl@0
   800
	MmuLock::Unlock();
sl@0
   801
	// This will clear the memory objects mapping added flag so we can detect any new mappings.
sl@0
   802
	aMemory->RestrictPages(pageIter, restrictType);
sl@0
   803
sl@0
   804
	const TUint KOldMappingSlot = 0;
sl@0
   805
	const TUint KNewMappingSlot = 1;
sl@0
   806
	const TAny* tmpPtrOld = NULL;
sl@0
   807
	TAny* tmpPtrNew;
sl@0
   808
	// Verify that page restricting wasn't interrupted, if it was then the page 
sl@0
   809
	// can't be moved so remap it.
sl@0
   810
	// If the page array entry (*movingPageArrayPtr) has been modified then a pinning 
sl@0
   811
	// veto'd the preparation.
sl@0
   812
	MmuLock::Lock();
sl@0
   813
	if (aOldPageInfo->CheckModified(&pageIter) || oldPageEntry != *movingPageArrayPtr)
sl@0
   814
		{// Page is pinned or has been modified by another operation.
sl@0
   815
		MmuLock::Unlock();
sl@0
   816
		TheMmu.FreeRam(&newPage, 1, aMemory->iManager->PageType());
sl@0
   817
		goto remap;
sl@0
   818
		}
sl@0
   819
sl@0
   820
	MmuLock::Unlock();
sl@0
   821
	// Copy the contents of the page using some temporary mappings.
sl@0
   822
	tmpPtrOld = (TAny*)TheMmu.MapTemp(oldPage, index, KOldMappingSlot);
sl@0
   823
	tmpPtrNew = (TAny*)TheMmu.MapTemp(newPage, index, KNewMappingSlot);
sl@0
   824
	pagecpy(tmpPtrNew, tmpPtrOld);
sl@0
   825
sl@0
   826
	// Unmap and perform cache maintenance if the memory object is executable.
sl@0
   827
	// Must do cache maintenance before we add any new mappings to the new page 
sl@0
   828
	// to ensure that any old instruction cache entries for the new page aren't 
sl@0
   829
	// picked up by any remapped executable mappings.
sl@0
   830
	if (aMemory->IsExecutable())
sl@0
   831
		CacheMaintenance::CodeChanged((TLinAddr)tmpPtrNew, KPageSize);
sl@0
   832
	TheMmu.UnmapTemp(KNewMappingSlot);
sl@0
   833
#ifndef _DEBUG
sl@0
   834
	TheMmu.UnmapTemp(KOldMappingSlot);
sl@0
   835
#endif
sl@0
   836
	
sl@0
   837
	MmuLock::Lock();
sl@0
   838
	if (!aOldPageInfo->CheckModified(&pageIter) && oldPageEntry == *movingPageArrayPtr &&
sl@0
   839
		!aMemory->MappingAddedFlag())
sl@0
   840
		{
sl@0
   841
		// The page has been copied without anyone modifying it so set the page 
sl@0
   842
		// array entry to new physical address and map the page.
sl@0
   843
		RPageArray::PageMoveNewAddr(*movingPageArrayPtr, newPage);
sl@0
   844
sl@0
   845
		// Copy across the page info data from the old page to the new.
sl@0
   846
		SPageInfo& newPageInfo = *SPageInfo::FromPhysAddr(newPage);
sl@0
   847
		newPageInfo = *aOldPageInfo;
sl@0
   848
		if (aMemory->IsDemandPaged())
sl@0
   849
			{// Let the pager deal with the live list links for this page if required.
sl@0
   850
			ThePager.ReplacePage(*aOldPageInfo, newPageInfo);
sl@0
   851
			}
sl@0
   852
sl@0
   853
		MmuLock::Unlock();
sl@0
   854
		r = KErrNone;
sl@0
   855
		aNewPage = newPage;
sl@0
   856
		}
sl@0
   857
	else
sl@0
   858
		{
sl@0
   859
		MmuLock::Unlock();
sl@0
   860
		TheMmu.FreeRam(&newPage, 1, aMemory->iManager->PageType());
sl@0
   861
		}
sl@0
   862
remap:
sl@0
   863
	// Remap all mappings to the new physical address if the move was successful or
sl@0
   864
	// back to the old page if the move failed.
sl@0
   865
	// Invalidate the TLB for the page if old mappings still exist or new
sl@0
   866
	// mappings were added but will be removed as the page can't be moved.
sl@0
   867
	TBool invalidateTLB = !pageRestrictedNA || r != KErrNone;
sl@0
   868
	aMemory->RemapPage(*movingPageArrayPtr, index, invalidateTLB);
sl@0
   869
sl@0
   870
	if (r == KErrNone)
sl@0
   871
		{// Must wait until here as read only memory objects' mappings aren't 
sl@0
   872
		// all guaranteed to point to the new page until after RemapPage().
sl@0
   873
		TheMmu.FreeRam(&oldPage, 1, aMemory->iManager->PageType());
sl@0
   874
#ifdef _DEBUG
sl@0
   875
		// For testing purposes clear the old page to help detect any 
sl@0
   876
		// erroneous mappings to the old page.  
sl@0
   877
		memclr((TAny*)tmpPtrOld, KPageSize);
sl@0
   878
		}
sl@0
   879
	TheMmu.UnmapTemp(KOldMappingSlot);	// Will invalidate the TLB entry for the mapping.
sl@0
   880
#else
sl@0
   881
		}
sl@0
   882
#endif
sl@0
   883
	// indicate we've stopped moving memory now...
sl@0
   884
	MmuLock::Lock();
sl@0
   885
	RPageArray::MovePageEnd(*movingPageArrayPtr);
sl@0
   886
	MmuLock::Unlock();
sl@0
   887
sl@0
   888
	return r;
sl@0
   889
	}
sl@0
   890
sl@0
   891
sl@0
   892
TInt DMovableMemoryManager::HandleFault(DMemoryObject* aMemory, TUint aIndex, DMemoryMapping* aMapping, 
sl@0
   893
										TUint aMapInstanceCount, TUint aAccessPermissions)
sl@0
   894
	{
sl@0
   895
	TInt r = KErrNotFound;
sl@0
   896
	SPageInfo* pageInfo;
sl@0
   897
	MmuLock::Lock();
sl@0
   898
	__UNLOCK_GUARD_START(MmuLock);
sl@0
   899
	TPhysAddr* const pageEntry = aMemory->iPages.PageEntry(aIndex);
sl@0
   900
	if (!pageEntry || !RPageArray::IsPresent(*pageEntry) || 
sl@0
   901
		aMapInstanceCount != aMapping->MapInstanceCount() || aMapping->BeingDetached())
sl@0
   902
		{// The page isn't present or has been unmapped so invalid access.
sl@0
   903
		goto exit;
sl@0
   904
		}
sl@0
   905
sl@0
   906
	if (aMapping->MovingPageIn(*pageEntry, aIndex))
sl@0
   907
		{// The page was has been paged in as it was still mapped.
sl@0
   908
		pageInfo = SPageInfo::FromPhysAddr(*pageEntry & ~KPageMask);
sl@0
   909
		pageInfo->SetModifier(0); // Signal to MovePage() that the page has been paged in.
sl@0
   910
		r = KErrNone;
sl@0
   911
		}
sl@0
   912
sl@0
   913
exit:
sl@0
   914
	__UNLOCK_GUARD_END(MmuLock);
sl@0
   915
	MmuLock::Unlock();
sl@0
   916
	return r;
sl@0
   917
	}
sl@0
   918
sl@0
   919
sl@0
   920
TZonePageType DMovableMemoryManager::PageType()
sl@0
   921
	{// Movable memory object pages are movable.
sl@0
   922
	return EPageMovable;
sl@0
   923
	}
sl@0
   924
sl@0
   925
sl@0
   926
/**
sl@0
   927
Manager for memory objects containing normal unpaged RAM, which
sl@0
   928
as well as being 'movable', like #DMovableMemoryManager,
sl@0
   929
may also have regions marked as 'discardable'.  Discardable pages may be
sl@0
   930
reclaimed (removed) by the system at any time; this state is controlled using
sl@0
   931
the functions #AllowDiscard and #DisallowDiscard.
sl@0
   932
<P>
sl@0
   933
This is used for the memory containing file system caches. Discardable memory
sl@0
   934
is managed using similar 
sl@0
   935
*/
sl@0
   936
class DDiscardableMemoryManager : public DMovableMemoryManager
sl@0
   937
	{
sl@0
   938
public:
sl@0
   939
	// from DMemoryManager...
sl@0
   940
	virtual TInt AllowDiscard(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
sl@0
   941
	virtual TInt DisallowDiscard(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
sl@0
   942
	virtual TInt StealPage(DMemoryObject* aMemory, SPageInfo* aPageInfo);
sl@0
   943
	virtual TInt RestrictPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TRestrictPagesType aRestriction);
sl@0
   944
	virtual TZonePageType PageType();
sl@0
   945
public:
sl@0
   946
	/**
sl@0
   947
	The single instance of this manager class.
sl@0
   948
	*/
sl@0
   949
	static DDiscardableMemoryManager TheManager;
sl@0
   950
	};
sl@0
   951
sl@0
   952
sl@0
   953
DDiscardableMemoryManager DDiscardableMemoryManager::TheManager;
sl@0
   954
DMemoryManager* TheDiscardableMemoryManager = &DDiscardableMemoryManager::TheManager;
sl@0
   955
sl@0
   956
sl@0
   957
TInt DDiscardableMemoryManager::AllowDiscard(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
sl@0
   958
	{
sl@0
   959
	TRACE2(("DDiscardableMemoryManager::AllowDiscard(0x%08x,0x%x,0x%x)",aMemory, aIndex, aCount));
sl@0
   960
	__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
sl@0
   961
sl@0
   962
	// make iterator for region...
sl@0
   963
	RPageArray::TIter pageIter;
sl@0
   964
	aMemory->iPages.FindStart(aIndex,aCount,pageIter);
sl@0
   965
sl@0
   966
	for(;;)
sl@0
   967
		{
sl@0
   968
		// find some pages...
sl@0
   969
		RPageArray::TIter pageList;
sl@0
   970
		TUint nFound = pageIter.Find(pageList);
sl@0
   971
		if(!nFound)
sl@0
   972
			break;
sl@0
   973
sl@0
   974
		// donate pages...
sl@0
   975
		TUint n;
sl@0
   976
		TPhysAddr* pages;
sl@0
   977
		while((n=pageList.Pages(pages,KMaxPagesInOneGo))!=0)
sl@0
   978
			{
sl@0
   979
			pageList.Skip(n);
sl@0
   980
			ThePager.DonatePages(n,pages);
sl@0
   981
			}
sl@0
   982
sl@0
   983
		// move on...
sl@0
   984
		pageIter.FindRelease(nFound);
sl@0
   985
		}
sl@0
   986
sl@0
   987
	// done...
sl@0
   988
	aMemory->iPages.FindEnd(aIndex,aCount);
sl@0
   989
sl@0
   990
	return KErrNone;
sl@0
   991
	}
sl@0
   992
sl@0
   993
sl@0
   994
TInt DDiscardableMemoryManager::DisallowDiscard(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
sl@0
   995
	{
sl@0
   996
	TRACE2(("DDiscardableMemoryManager::DisallowDiscard(0x%08x,0x%x,0x%x)",aMemory, aIndex, aCount));
sl@0
   997
	__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
sl@0
   998
sl@0
   999
	TInt r = KErrNone;
sl@0
  1000
sl@0
  1001
	// get pages...
sl@0
  1002
	RPageArray::TIter pageIter;
sl@0
  1003
	aMemory->iPages.FindStart(aIndex,aCount,pageIter);
sl@0
  1004
sl@0
  1005
	RPageArray::TIter pageList;
sl@0
  1006
	TUint numPages = pageIter.Find(pageList);
sl@0
  1007
sl@0
  1008
	if(numPages!=aCount)
sl@0
  1009
		{
sl@0
  1010
		// not all pages are present...
sl@0
  1011
		r = KErrNotFound;
sl@0
  1012
		}
sl@0
  1013
	else
sl@0
  1014
		{
sl@0
  1015
		TUint n;
sl@0
  1016
		TPhysAddr* pages;
sl@0
  1017
		while((n=pageList.Pages(pages,KMaxPagesInOneGo))!=0)
sl@0
  1018
			{
sl@0
  1019
			pageList.Skip(n);
sl@0
  1020
			r = ThePager.ReclaimPages(n,pages);
sl@0
  1021
			if(r!=KErrNone)
sl@0
  1022
				break;
sl@0
  1023
			}
sl@0
  1024
		}
sl@0
  1025
sl@0
  1026
	// done with pages...
sl@0
  1027
	if(numPages)
sl@0
  1028
		pageIter.FindRelease(numPages);
sl@0
  1029
	aMemory->iPages.FindEnd(aIndex,aCount);
sl@0
  1030
sl@0
  1031
	return r;
sl@0
  1032
	}
sl@0
  1033
sl@0
  1034
sl@0
  1035
TInt DDiscardableMemoryManager::StealPage(DMemoryObject* aMemory, SPageInfo* aPageInfo)
sl@0
  1036
	{
sl@0
  1037
	TRACE2(("DDiscardableMemoryManager::StealPage(0x%08x,0x%08x)",aMemory,aPageInfo));
sl@0
  1038
	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
sl@0
  1039
	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
sl@0
  1040
	__UNLOCK_GUARD_START(MmuLock);
sl@0
  1041
sl@0
  1042
	TUint index = aPageInfo->Index();
sl@0
  1043
	TInt r;
sl@0
  1044
sl@0
  1045
	RPageArray::TIter pageList;
sl@0
  1046
	TPhysAddr* p = aMemory->iPages.StealPageStart(index,pageList);
sl@0
  1047
	__NK_ASSERT_DEBUG((*p&~KPageMask)==aPageInfo->PhysAddr()); // object should have our page
sl@0
  1048
sl@0
  1049
	aPageInfo->SetModifier(&pageList);
sl@0
  1050
sl@0
  1051
	__UNLOCK_GUARD_END(MmuLock);
sl@0
  1052
	MmuLock::Unlock();
sl@0
  1053
sl@0
  1054
	// unmap the page...
sl@0
  1055
	aMemory->UnmapPages(pageList,false);
sl@0
  1056
sl@0
  1057
	MmuLock::Lock();
sl@0
  1058
sl@0
  1059
	__NK_ASSERT_DEBUG((*p&~KPageMask)==aPageInfo->PhysAddr()); // object should still have our page because freeing a page requires the RamAllocLock, which we hold
sl@0
  1060
sl@0
  1061
	if(aPageInfo->CheckModified(&pageList))
sl@0
  1062
		{
sl@0
  1063
		// page state was changed, this can only happen if a page fault put this page
sl@0
  1064
		// back into the committed state or if the page was pinned.
sl@0
  1065
		// From either of these states it's possible to subsequently change
sl@0
  1066
		// to any other state or use (so we can't assert anything here).
sl@0
  1067
		r = KErrInUse;
sl@0
  1068
		}
sl@0
  1069
	else
sl@0
  1070
		{
sl@0
  1071
		// nobody else has modified page state, so we can...
sl@0
  1072
		TPhysAddr page = *p;
sl@0
  1073
		__NK_ASSERT_DEBUG(RPageArray::TargetStateIsDecommitted(page));
sl@0
  1074
		if(page&RPageArray::EUnmapVetoed)
sl@0
  1075
			{
sl@0
  1076
			// operation was vetoed, which means page had a pinned mapping but the pin
sl@0
  1077
			// operation hadn't got around to removing the page from the live list,
sl@0
  1078
			// we need to restore correct state...
sl@0
  1079
			if(RPageArray::State(page)==RPageArray::EStealing)
sl@0
  1080
				*p = (page&~(RPageArray::EStateMask|RPageArray::EUnmapVetoed))|RPageArray::ECommitted;
sl@0
  1081
			// else
sl@0
  1082
			//	   leave page in state it was before we attempted to steal it
sl@0
  1083
sl@0
  1084
			// put page back on live list so it doesn't get lost.
sl@0
  1085
			// We put it at the start as if it were recently accessed because being pinned
sl@0
  1086
			// counts as an access and we can't put it anywhere else otherwise when
sl@0
  1087
			// page stealing retries it may get this same page again, potentially causing
sl@0
  1088
			// deadlock.
sl@0
  1089
			__NK_ASSERT_DEBUG(aPageInfo->PagedState()==SPageInfo::EUnpaged); // no one else has changed page since we removed it in DPager::StealPage
sl@0
  1090
			ThePager.PagedIn(aPageInfo);
sl@0
  1091
sl@0
  1092
			r = KErrInUse;
sl@0
  1093
			}
sl@0
  1094
		else
sl@0
  1095
			{
sl@0
  1096
			// page successfully unmapped...
sl@0
  1097
			aPageInfo->SetReadOnly(); // page not mapped, so must be read-only
sl@0
  1098
sl@0
  1099
			// if the page can be made clean...
sl@0
  1100
			r = aMemory->iManager->CleanPage(aMemory,aPageInfo,p);
sl@0
  1101
sl@0
  1102
			if(r==KErrNone)
sl@0
  1103
				{
sl@0
  1104
				// page successfully stolen...
sl@0
  1105
				__NK_ASSERT_DEBUG((*p^page)<(TUint)KPageSize); // sanity check, page should still be allocated to us
sl@0
  1106
				__NK_ASSERT_DEBUG(aPageInfo->IsDirty()==false);
sl@0
  1107
				__NK_ASSERT_DEBUG(aPageInfo->IsWritable()==false);
sl@0
  1108
sl@0
  1109
				TPhysAddr pagerInfo = aPageInfo->PagingManagerData();
sl@0
  1110
				*p = pagerInfo;
sl@0
  1111
				__NK_ASSERT_ALWAYS((pagerInfo&(RPageArray::EFlagsMask|RPageArray::EStateMask)) == RPageArray::ENotPresent);
sl@0
  1112
sl@0
  1113
				TheMmu.PageFreed(aPageInfo);
sl@0
  1114
				}
sl@0
  1115
			else
sl@0
  1116
				{
sl@0
  1117
				// only legitimate reason for failing the clean is if the page state was changed
sl@0
  1118
				// by a page fault or by pinning, this should return KErrInUse...
sl@0
  1119
				__NK_ASSERT_DEBUG(r==KErrInUse);
sl@0
  1120
				}
sl@0
  1121
			}
sl@0
  1122
		}
sl@0
  1123
sl@0
  1124
	aMemory->iPages.StealPageEnd(index,r==KErrNone ? 1 : 0);
sl@0
  1125
sl@0
  1126
#ifdef _DEBUG
sl@0
  1127
	if(r!=KErrNone)
sl@0
  1128
		TRACE2(("DDiscardableMemoryManager::StealPage fail because preempted"));
sl@0
  1129
#endif
sl@0
  1130
sl@0
  1131
	TRACE2(("DDiscardableMemoryManager::StealPage returns %d",r));
sl@0
  1132
	return r;
sl@0
  1133
	}
sl@0
  1134
sl@0
  1135
sl@0
  1136
TInt DDiscardableMemoryManager::RestrictPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TRestrictPagesType aRestriction)
sl@0
  1137
	{
sl@0
  1138
	if(aRestriction==ERestrictPagesNoAccessForOldPage)
sl@0
  1139
		{
sl@0
  1140
		// Lie to pager when it sets an old page inaccessible as we don't want to rejunvanate
sl@0
  1141
		// the page if it is accessed as RChunk::Lock() should be used to remove the page from 
sl@0
  1142
		// the live list before accessing the page.
sl@0
  1143
		return KErrNone;
sl@0
  1144
		}
sl@0
  1145
	return DMovableMemoryManager::RestrictPage(aMemory, aPageInfo, aRestriction);
sl@0
  1146
	}
sl@0
  1147
sl@0
  1148
sl@0
  1149
TZonePageType DDiscardableMemoryManager::PageType()
sl@0
  1150
	{// Discardable memory objects page are movable unless they are donated to the pager.
sl@0
  1151
	return EPageMovable;
sl@0
  1152
	}
sl@0
  1153
sl@0
  1154
sl@0
  1155
sl@0
  1156
/**
sl@0
  1157
Manager for memory objects containing memory mapped hardware devices or special
sl@0
  1158
purpose memory for which the physical addresses are fixed.
sl@0
  1159
*/
sl@0
  1160
class DHardwareMemoryManager : public DMemoryManager
sl@0
  1161
	{
sl@0
  1162
public:
sl@0
  1163
	// from DMemoryManager...
sl@0
  1164
	virtual void Destruct(DMemoryObject* aMemory);
sl@0
  1165
	virtual TInt AddPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages);
sl@0
  1166
	virtual TInt AddContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr aPhysAddr);
sl@0
  1167
	virtual TInt RemovePages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages);
sl@0
  1168
	virtual TInt Pin(DMemoryObject* aMemory, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs);
sl@0
  1169
	virtual void Unpin(DMemoryObject* aMemory, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs);
sl@0
  1170
sl@0
  1171
private:
sl@0
  1172
	/**
sl@0
  1173
	Update the page information structure for RAM added with #AddPages and #AddContiguous.
sl@0
  1174
sl@0
  1175
	This performs debug checks to ensure that any physical memory which is added to more than
sl@0
  1176
	one memory object meets with the restriction imposed by the MMU and cache hardware.
sl@0
  1177
	It also verifies that the RAM pages are of type SPageInfo::EPhysAlloc,
sl@0
  1178
	i.e. were allocated with Epoc::AllocPhysicalRam or similar.
sl@0
  1179
sl@0
  1180
	This is only used when the physical addresses of the page being added to a memory
sl@0
  1181
	object corresponds to RAM being managed by the kernel, i.e. physical addresses
sl@0
  1182
	with an associated #SPageInfo structure.
sl@0
  1183
sl@0
  1184
	@param aMemory			A memory object associated with this manager.
sl@0
  1185
	@param aIndex			Page index, within the memory, for the page.
sl@0
  1186
	@param aPageInfo		The page information structure of the RAM page.
sl@0
  1187
sl@0
  1188
	@pre #MmuLock held.
sl@0
  1189
	@post #MmuLock held.
sl@0
  1190
	*/
sl@0
  1191
	static void AssignPage(DMemoryObject* aMemory, TUint aIndex, SPageInfo* aPageInfo);
sl@0
  1192
sl@0
  1193
	/**
sl@0
  1194
	Update the page information structure for RAM removed with #RemovePages.
sl@0
  1195
sl@0
  1196
	This is only used when the physical addresses of the page being removed from a memory
sl@0
  1197
	object corresponds to RAM being managed by the kernel, i.e. physical addresses
sl@0
  1198
	with an associated #SPageInfo structure.
sl@0
  1199
sl@0
  1200
	@param aMemory			A memory object associated with this manager.
sl@0
  1201
	@param aIndex			Page index, within the memory, for the page.
sl@0
  1202
	@param aPageInfo		The page information structure of the RAM page.
sl@0
  1203
sl@0
  1204
	@pre #MmuLock held.
sl@0
  1205
	@post #MmuLock held.
sl@0
  1206
	*/
sl@0
  1207
	static void UnassignPage(DMemoryObject* aMemory, TUint aIndex, SPageInfo* aPageInfo);
sl@0
  1208
sl@0
  1209
public:
sl@0
  1210
	/**
sl@0
  1211
	The single instance of this manager class.
sl@0
  1212
	*/
sl@0
  1213
	static DHardwareMemoryManager TheManager;
sl@0
  1214
	};
sl@0
  1215
sl@0
  1216
sl@0
  1217
DHardwareMemoryManager DHardwareMemoryManager::TheManager;
sl@0
  1218
DMemoryManager* TheHardwareMemoryManager = &DHardwareMemoryManager::TheManager;
sl@0
  1219
sl@0
  1220
sl@0
  1221
void DHardwareMemoryManager::Destruct(DMemoryObject* aMemory)
sl@0
  1222
	{
sl@0
  1223
	MemoryObjectLock::Lock(aMemory);
sl@0
  1224
	RemovePages(aMemory,0,aMemory->iSizeInPages,0);
sl@0
  1225
	MemoryObjectLock::Unlock(aMemory);
sl@0
  1226
	aMemory->Close();
sl@0
  1227
	}
sl@0
  1228
sl@0
  1229
sl@0
  1230
TInt DHardwareMemoryManager::AddPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages)
sl@0
  1231
	{
sl@0
  1232
	TRACE2(("DHardwareMemoryManager::AddPages(0x%08x,0x%x,0x%x,?)",aMemory, aIndex, aCount));
sl@0
  1233
	__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
sl@0
  1234
sl@0
  1235
	// validate arguments...
sl@0
  1236
	TPhysAddr* pages = aPages;
sl@0
  1237
	TPhysAddr* pagesEnd = aPages+aCount;
sl@0
  1238
	TPhysAddr checkMask = 0;
sl@0
  1239
	do checkMask |= *pages++;
sl@0
  1240
	while(pages<pagesEnd);
sl@0
  1241
	if(checkMask&KPageMask)
sl@0
  1242
		return KErrArgument;
sl@0
  1243
sl@0
  1244
	// check and allocate page array entries...
sl@0
  1245
	RPageArray::TIter pageIter;
sl@0
  1246
	TInt r = aMemory->iPages.AddStart(aIndex,aCount,pageIter);
sl@0
  1247
	if(r!=KErrNone)
sl@0
  1248
		return r;
sl@0
  1249
sl@0
  1250
	// assign pages...
sl@0
  1251
	pages = aPages;
sl@0
  1252
	TUint index = aIndex;
sl@0
  1253
	TUint flash = 0;
sl@0
  1254
	MmuLock::Lock();
sl@0
  1255
	do
sl@0
  1256
		{
sl@0
  1257
		MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo/2); // flash twice as often because we're doing about twice the work as a simple page info update
sl@0
  1258
		TPhysAddr pagePhys = *pages++;
sl@0
  1259
		SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pagePhys);
sl@0
  1260
		if(pi)
sl@0
  1261
			AssignPage(aMemory,index,pi);
sl@0
  1262
		++index;
sl@0
  1263
		}
sl@0
  1264
	while(pages<pagesEnd);
sl@0
  1265
	MmuLock::Unlock();
sl@0
  1266
sl@0
  1267
	// map the pages...
sl@0
  1268
	RPageArray::TIter pageList = pageIter;
sl@0
  1269
	pageIter.Add(aCount,aPages);
sl@0
  1270
	r = aMemory->MapPages(pageList);
sl@0
  1271
sl@0
  1272
	// release page array entries...
sl@0
  1273
	aMemory->iPages.AddEnd(aIndex,aCount);
sl@0
  1274
sl@0
  1275
	// revert if error...
sl@0
  1276
	if(r!=KErrNone)
sl@0
  1277
		RemovePages(aMemory,aIndex,aCount,0);
sl@0
  1278
sl@0
  1279
	return r;
sl@0
  1280
	}
sl@0
  1281
sl@0
  1282
sl@0
  1283
TInt DHardwareMemoryManager::AddContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr aPhysAddr)
sl@0
  1284
	{
sl@0
  1285
	TRACE2(("DHardwareMemoryManager::AddContiguous(0x%08x,0x%x,0x%x,0x%08x)",aMemory, aIndex, aCount, aPhysAddr));
sl@0
  1286
	__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
sl@0
  1287
sl@0
  1288
	// validate arguments...
sl@0
  1289
	if(aPhysAddr&KPageMask)
sl@0
  1290
		return KErrArgument;
sl@0
  1291
sl@0
  1292
	// check and allocate page array entries...
sl@0
  1293
	RPageArray::TIter pageIter;
sl@0
  1294
	TInt r = aMemory->iPages.AddStart(aIndex,aCount,pageIter);
sl@0
  1295
	if(r!=KErrNone)
sl@0
  1296
		return r;
sl@0
  1297
sl@0
  1298
	RPageArray::TIter pageList = pageIter;
sl@0
  1299
sl@0
  1300
	// assign pages...
sl@0
  1301
	SPageInfo* piStart = SPageInfo::SafeFromPhysAddr(aPhysAddr);
sl@0
  1302
	SPageInfo* piEnd = piStart+aCount;
sl@0
  1303
	if(piStart)
sl@0
  1304
		{
sl@0
  1305
		SPageInfo* pi = piStart;
sl@0
  1306
		TUint index = aIndex;
sl@0
  1307
		TUint flash = 0;
sl@0
  1308
		MmuLock::Lock();
sl@0
  1309
		while(pi<piEnd)
sl@0
  1310
			{
sl@0
  1311
			MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo/2); // flash twice as often because we're doing about twice the work as a simple page info update
sl@0
  1312
			AssignPage(aMemory,index,pi);
sl@0
  1313
			++index;
sl@0
  1314
			++pi;
sl@0
  1315
			}
sl@0
  1316
		MmuLock::Unlock();
sl@0
  1317
		}
sl@0
  1318
sl@0
  1319
	// map the pages...
sl@0
  1320
	pageIter.AddContiguous(aCount,aPhysAddr);
sl@0
  1321
	r = aMemory->MapPages(pageList);
sl@0
  1322
sl@0
  1323
	// release page array entries...
sl@0
  1324
	aMemory->iPages.AddEnd(aIndex,aCount);
sl@0
  1325
sl@0
  1326
	// revert if error...
sl@0
  1327
	if(r!=KErrNone)
sl@0
  1328
		RemovePages(aMemory,aIndex,aCount,0);
sl@0
  1329
sl@0
  1330
	return r;
sl@0
  1331
	}
sl@0
  1332
sl@0
  1333
sl@0
  1334
TInt DHardwareMemoryManager::RemovePages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages)
sl@0
  1335
	{
sl@0
  1336
	TRACE2(("DHardwareMemoryManager::RemovePages(0x%08x,0x%x,0x%x,?)",aMemory, aIndex, aCount));
sl@0
  1337
	__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
sl@0
  1338
sl@0
  1339
	RPageArray::TIter pageIter;
sl@0
  1340
	aMemory->iPages.FindStart(aIndex,aCount,pageIter);
sl@0
  1341
sl@0
  1342
	TUint numPages = 0;
sl@0
  1343
	for(;;)
sl@0
  1344
		{
sl@0
  1345
		// find some pages...
sl@0
  1346
		RPageArray::TIter pageList;
sl@0
  1347
		TUint n = pageIter.RemoveFind(pageList);
sl@0
  1348
		if(!n)
sl@0
  1349
			break;
sl@0
  1350
sl@0
  1351
		// unmap some pages...
sl@0
  1352
		aMemory->UnmapPages(pageList,true);
sl@0
  1353
sl@0
  1354
		// free pages...
sl@0
  1355
		TPhysAddr pagePhys;
sl@0
  1356
		while(pageList.Remove(1,&pagePhys))
sl@0
  1357
			{
sl@0
  1358
			if(aPages)
sl@0
  1359
				*aPages++ = pagePhys;
sl@0
  1360
			++numPages;
sl@0
  1361
sl@0
  1362
			__NK_ASSERT_DEBUG((pagePhys&KPageMask)==0);
sl@0
  1363
sl@0
  1364
			TUint index = pageList.Index()-1;
sl@0
  1365
			SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pagePhys);
sl@0
  1366
			if(!pi)
sl@0
  1367
				TheMmu.CleanAndInvalidatePages(&pagePhys,1,aMemory->Attributes(),index);
sl@0
  1368
			else
sl@0
  1369
				{
sl@0
  1370
				MmuLock::Lock();
sl@0
  1371
				UnassignPage(aMemory,index,pi);
sl@0
  1372
				MmuLock::Unlock();
sl@0
  1373
				}
sl@0
  1374
			}
sl@0
  1375
sl@0
  1376
		// move on...
sl@0
  1377
		pageIter.FindRelease(n);
sl@0
  1378
		}
sl@0
  1379
sl@0
  1380
	aMemory->iPages.FindEnd(aIndex,aCount);
sl@0
  1381
sl@0
  1382
	return numPages;
sl@0
  1383
	}
sl@0
  1384
sl@0
  1385
sl@0
  1386
void DHardwareMemoryManager::AssignPage(DMemoryObject* aMemory, TUint aIndex, SPageInfo* aPageInfo)
sl@0
  1387
	{
sl@0
  1388
	TRACE2(("DHardwareMemoryManager::AssignPage(0x%08x,0x%x,phys=0x%08x)",aMemory, aIndex, aPageInfo->PhysAddr()));
sl@0
  1389
 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
sl@0
  1390
	__NK_ASSERT_DEBUG(aPageInfo->Type()==SPageInfo::EPhysAlloc);
sl@0
  1391
	TUint flags = aMemory->PageInfoFlags();
sl@0
  1392
	if(aPageInfo->UseCount()==0)
sl@0
  1393
		{
sl@0
  1394
		// not mapped yet...
sl@0
  1395
		aPageInfo->SetMapped(aIndex,flags);
sl@0
  1396
		}
sl@0
  1397
	else
sl@0
  1398
		{
sl@0
  1399
		// already mapped somewhere...
sl@0
  1400
		TMemoryType type = (TMemoryType)(flags&KMemoryTypeMask);
sl@0
  1401
		if(CacheMaintenance::IsCached(type))
sl@0
  1402
			{
sl@0
  1403
			// memory is cached at L1, check colour matches existing mapping...
sl@0
  1404
			if( (aPageInfo->Index()^aIndex) & KPageColourMask )
sl@0
  1405
				{
sl@0
  1406
				#ifdef _DEBUG
sl@0
  1407
					Kern::Printf("DHardwareMemoryManager::AssignPage BAD COLOUR");
sl@0
  1408
					aPageInfo->Dump();
sl@0
  1409
				#endif
sl@0
  1410
				__NK_ASSERT_ALWAYS(0);
sl@0
  1411
				}
sl@0
  1412
			}
sl@0
  1413
		// check memory type matches existing mapping...
sl@0
  1414
		if( (aPageInfo->Flags()^flags) & EMemoryAttributeMask )
sl@0
  1415
			{
sl@0
  1416
			#ifdef _DEBUG
sl@0
  1417
				Kern::Printf("DHardwareMemoryManager::AssignPage BAD MEMORY TYPE");
sl@0
  1418
				aPageInfo->Dump();
sl@0
  1419
			#endif
sl@0
  1420
			__NK_ASSERT_ALWAYS(0);
sl@0
  1421
			}
sl@0
  1422
		}
sl@0
  1423
	aPageInfo->IncUseCount();
sl@0
  1424
	TRACE2(("DHardwareMemoryManager::AssignPage iUseCount=%d",aPageInfo->UseCount()));
sl@0
  1425
	}
sl@0
  1426
sl@0
  1427
sl@0
  1428
void DHardwareMemoryManager::UnassignPage(DMemoryObject* aMemory, TUint aIndex, SPageInfo* aPageInfo)
sl@0
  1429
	{
sl@0
  1430
	TRACE2(("DHardwareMemoryManager::UnassignPage(0x%08x,0x%x,phys=0x%08x)",aMemory, aIndex, aPageInfo->PhysAddr()));
sl@0
  1431
	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
sl@0
  1432
	TRACE2(("DHardwareMemoryManager::UnassignPage iUseCount=%d",aPageInfo->UseCount()));
sl@0
  1433
	__NK_ASSERT_DEBUG(aPageInfo->UseCount());
sl@0
  1434
	if(!aPageInfo->DecUseCount())
sl@0
  1435
		{
sl@0
  1436
		// page no longer being used by any memory object, make sure it's contents
sl@0
  1437
		// are purged from the cache...
sl@0
  1438
		TPhysAddr pagePhys = aPageInfo->PhysAddr();
sl@0
  1439
		aPageInfo->SetModifier(&pagePhys);
sl@0
  1440
		MmuLock::Unlock();
sl@0
  1441
		TheMmu.CleanAndInvalidatePages(&pagePhys,1,aMemory->Attributes(),aIndex);
sl@0
  1442
		MmuLock::Lock();
sl@0
  1443
		if(!aPageInfo->CheckModified(&pagePhys)) // if page has not been reused...
sl@0
  1444
			aPageInfo->SetUncached();			 //     we know the memory is not in the cache
sl@0
  1445
		}
sl@0
  1446
	}
sl@0
  1447
sl@0
  1448
sl@0
  1449
TInt DHardwareMemoryManager::Pin(DMemoryObject* aMemory, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs)
sl@0
  1450
	{
sl@0
  1451
	return ((DUnpagedMemoryManager*)this)->DUnpagedMemoryManager::Pin(aMemory,aMapping,aPinArgs);
sl@0
  1452
	}
sl@0
  1453
sl@0
  1454
sl@0
  1455
void DHardwareMemoryManager::Unpin(DMemoryObject* aMemory, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs)
sl@0
  1456
	{
sl@0
  1457
	((DUnpagedMemoryManager*)this)->DUnpagedMemoryManager::Unpin(aMemory,aMapping,aPinArgs);
sl@0
  1458
	}
sl@0
  1459
sl@0
  1460
sl@0
  1461
sl@0
  1462
//
sl@0
  1463
// DPagedMemoryManager
sl@0
  1464
//
sl@0
  1465
sl@0
  1466
TInt DPagedMemoryManager::New(DMemoryObject*& aMemory, TUint aSizeInPages, TMemoryAttributes aAttributes, TMemoryCreateFlags aCreateFlags)
sl@0
  1467
	{
sl@0
  1468
	return DMemoryManager::New(aMemory, aSizeInPages, aAttributes, (TMemoryCreateFlags)(aCreateFlags | EMemoryCreateDemandPaged));
sl@0
  1469
	}
sl@0
  1470
sl@0
  1471
sl@0
  1472
void DPagedMemoryManager::Destruct(DMemoryObject* aMemory)
sl@0
  1473
	{
sl@0
  1474
	((DUnpagedMemoryManager*)this)->DUnpagedMemoryManager::Destruct(aMemory);
sl@0
  1475
	}
sl@0
  1476
sl@0
  1477
sl@0
  1478
TInt DPagedMemoryManager::StealPage(DMemoryObject* aMemory, SPageInfo* aPageInfo)
sl@0
  1479
	{
sl@0
  1480
	return ((DDiscardableMemoryManager*)this)->DDiscardableMemoryManager::StealPage(aMemory,aPageInfo);
sl@0
  1481
	}
sl@0
  1482
sl@0
  1483
sl@0
  1484
TInt DPagedMemoryManager::MovePage(	DMemoryObject* aMemory, SPageInfo* aOldPageInfo, 
sl@0
  1485
									TPhysAddr& aNewPage, TUint aBlockZoneId, TBool aBlockRest)
sl@0
  1486
	{
sl@0
  1487
	return TheMovableMemoryManager->MovePage(aMemory, aOldPageInfo, aNewPage, aBlockZoneId, aBlockRest);
sl@0
  1488
	}
sl@0
  1489
sl@0
  1490
sl@0
  1491
TInt DPagedMemoryManager::RestrictPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TRestrictPagesType aRestriction)
sl@0
  1492
	{
sl@0
  1493
	TRACE2(("DPagedMemoryManager::RestrictPage(0x%08x,0x%08x,%d)",aMemory,aPageInfo,aRestriction));
sl@0
  1494
	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
sl@0
  1495
sl@0
  1496
	TUint index = aPageInfo->Index();
sl@0
  1497
	TInt r = KErrNotFound;
sl@0
  1498
sl@0
  1499
	TPhysAddr page;
sl@0
  1500
	TPhysAddr originalPage;
sl@0
  1501
	RPageArray::TIter pageList;
sl@0
  1502
	TPhysAddr* p = aMemory->iPages.RestrictPageNAStart(index,pageList);
sl@0
  1503
	if(!p)
sl@0
  1504
		goto fail;
sl@0
  1505
	originalPage = *p;
sl@0
  1506
	__NK_ASSERT_DEBUG((originalPage&~KPageMask)==aPageInfo->PhysAddr());
sl@0
  1507
sl@0
  1508
	aPageInfo->SetModifier(&pageList);
sl@0
  1509
sl@0
  1510
	MmuLock::Unlock();
sl@0
  1511
sl@0
  1512
	// restrict page...
sl@0
  1513
	aMemory->RestrictPages(pageList,aRestriction);
sl@0
  1514
sl@0
  1515
	MmuLock::Lock();
sl@0
  1516
sl@0
  1517
	page = *p;
sl@0
  1518
	if(aPageInfo->CheckModified(&pageList) || page!=originalPage/*page state changed*/)
sl@0
  1519
		{
sl@0
  1520
		// page state was changed by someone else...
sl@0
  1521
		r = KErrInUse;
sl@0
  1522
		}
sl@0
  1523
	else
sl@0
  1524
		{
sl@0
  1525
		// nobody else has modified page state, so restrictions successfully applied...
sl@0
  1526
		*p = (page&~RPageArray::EStateMask)|RPageArray::ECommitted; // restore state
sl@0
  1527
		aPageInfo->SetReadOnly();
sl@0
  1528
		r = KErrNone;
sl@0
  1529
		}
sl@0
  1530
sl@0
  1531
	aMemory->iPages.RestrictPageNAEnd(index);
sl@0
  1532
sl@0
  1533
#ifdef _DEBUG
sl@0
  1534
	if(r!=KErrNone)
sl@0
  1535
		TRACE2(("DPagedMemoryManager::RestrictPage fail because preempted or vetoed"));
sl@0
  1536
#endif
sl@0
  1537
sl@0
  1538
fail:
sl@0
  1539
	TRACE2(("DPagedMemoryManager::RestrictPage returns %d",r));
sl@0
  1540
	return r;
sl@0
  1541
	}
sl@0
  1542
sl@0
  1543
sl@0
  1544
TInt DPagedMemoryManager::HandleFault(	DMemoryObject* aMemory, TUint aIndex, DMemoryMapping* aMapping, 
sl@0
  1545
										TUint aMapInstanceCount, TUint aAccessPermissions)
sl@0
  1546
	{
sl@0
  1547
	TPinArgs pinArgs;
sl@0
  1548
	pinArgs.iReadOnly = !(aAccessPermissions&EReadWrite);
sl@0
  1549
sl@0
  1550
	TUint usedNew = 0;
sl@0
  1551
sl@0
  1552
	RPageArray::TIter pageList;
sl@0
  1553
	TPhysAddr* p = aMemory->iPages.AddPageStart(aIndex,pageList);
sl@0
  1554
	__NK_ASSERT_ALWAYS(p); // we should never run out of memory handling a paging fault
sl@0
  1555
sl@0
  1556
	TInt r = 1; // positive value to indicate nothing done
sl@0
  1557
sl@0
  1558
	// if memory object already has page, then we can use it...
sl@0
  1559
	MmuLock::Lock();
sl@0
  1560
	if(RPageArray::IsPresent(*p))
sl@0
  1561
		{
sl@0
  1562
		r = PageInDone(aMemory,aIndex,0,p);
sl@0
  1563
		__NK_ASSERT_DEBUG(r<=0); // can't return >0 as we didn't supply a new page
sl@0
  1564
		}
sl@0
  1565
	MmuLock::Unlock();
sl@0
  1566
sl@0
  1567
	if(r>0)
sl@0
  1568
		{
sl@0
  1569
		// need to read page from backing store...
sl@0
  1570
sl@0
  1571
		// get paging request object...
sl@0
  1572
		DPageReadRequest* req;
sl@0
  1573
		do
sl@0
  1574
			{
sl@0
  1575
			r = AcquirePageReadRequest(req,aMemory,aIndex,1);
sl@0
  1576
			__NK_ASSERT_DEBUG(r!=KErrNoMemory); // not allowed to allocated memory, therefore can't fail with KErrNoMemory
sl@0
  1577
			if(r==KErrNone)
sl@0
  1578
				{
sl@0
  1579
				// if someone else has since read our page, then we can use it...
sl@0
  1580
				MmuLock::Lock();
sl@0
  1581
				r = 1;
sl@0
  1582
				if(RPageArray::IsPresent(*p))
sl@0
  1583
					{
sl@0
  1584
					r = PageInDone(aMemory,aIndex,0,p);
sl@0
  1585
					__NK_ASSERT_DEBUG(r<=0); // can't return >0 as we didn't supply a new page
sl@0
  1586
					}
sl@0
  1587
				MmuLock::Unlock();
sl@0
  1588
				}
sl@0
  1589
			}
sl@0
  1590
		while(r>0 && !req); // while not paged in && don't have a request object
sl@0
  1591
sl@0
  1592
		if(r>0)
sl@0
  1593
			{
sl@0
  1594
			// still need to read page from backing store...
sl@0
  1595
sl@0
  1596
			// get RAM page...
sl@0
  1597
			TPhysAddr pagePhys;
sl@0
  1598
			r = ThePager.PageInAllocPages(&pagePhys,1,aMemory->RamAllocFlags());
sl@0
  1599
			__NK_ASSERT_DEBUG(r!=KErrNoMemory);
sl@0
  1600
			if(r==KErrNone)
sl@0
  1601
				{
sl@0
  1602
				// read data for page...
sl@0
  1603
				r = ReadPages(aMemory,aIndex,1,&pagePhys,req);
sl@0
  1604
				__NK_ASSERT_DEBUG(r!=KErrNoMemory); // not allowed to allocated memory, therefore can't fail with KErrNoMemory
sl@0
  1605
				if(r!=KErrNone)
sl@0
  1606
					{
sl@0
  1607
					// error, so free unused pages...
sl@0
  1608
					ThePager.PageInFreePages(&pagePhys,1);
sl@0
  1609
					}
sl@0
  1610
				else
sl@0
  1611
					{
sl@0
  1612
					// use new page...
sl@0
  1613
					MmuLock::Lock();
sl@0
  1614
					r = PageInDone(aMemory,aIndex,SPageInfo::FromPhysAddr(pagePhys),p);
sl@0
  1615
					MmuLock::Unlock();
sl@0
  1616
					if(r>0)
sl@0
  1617
						{
sl@0
  1618
						// new page actually used...
sl@0
  1619
						r = KErrNone;
sl@0
  1620
						usedNew = 1;
sl@0
  1621
						}
sl@0
  1622
					}
sl@0
  1623
				}
sl@0
  1624
			}
sl@0
  1625
sl@0
  1626
		// done with paging request object...
sl@0
  1627
		if(req)
sl@0
  1628
			req->Release();
sl@0
  1629
		}
sl@0
  1630
sl@0
  1631
	// map page...
sl@0
  1632
	if(r==KErrNone && aMapping)
sl@0
  1633
		{
sl@0
  1634
		r = aMapping->PageIn(pageList, pinArgs, aMapInstanceCount);
sl@0
  1635
		__NK_ASSERT_ALWAYS(r!=KErrNoMemory); // we should never run out of memory handling a paging fault
sl@0
  1636
		#ifdef COARSE_GRAINED_TLB_MAINTENANCE
sl@0
  1637
		InvalidateTLB();
sl@0
  1638
		#endif
sl@0
  1639
		}
sl@0
  1640
sl@0
  1641
	// finished with this page...
sl@0
  1642
	aMemory->iPages.AddPageEnd(aIndex,usedNew);
sl@0
  1643
sl@0
  1644
	__NK_ASSERT_ALWAYS(r!=KErrNoMemory); // we should never run out of memory handling a paging fault
sl@0
  1645
	return r;
sl@0
  1646
	}
sl@0
  1647
sl@0
  1648
sl@0
  1649
TInt DPagedMemoryManager::Pin(DMemoryObject* aMemory, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs)
sl@0
  1650
	{
sl@0
  1651
	__ASSERT_CRITICAL;
sl@0
  1652
	return DoPin(aMemory,aMapping->iStartIndex,aMapping->iSizeInPages,aMapping,aPinArgs);
sl@0
  1653
	}
sl@0
  1654
sl@0
  1655
sl@0
  1656
TInt DPagedMemoryManager::DoPin(DMemoryObject* aMemory, TUint aIndex, TUint aCount, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs)
sl@0
  1657
	{
sl@0
  1658
	TRACE(("DPagedMemoryManager::DoPin(0x%08x,0x%08x,0x%08x,0x%08x)",aMemory, aIndex, aCount, aMapping));
sl@0
  1659
	__ASSERT_CRITICAL;
sl@0
  1660
	__NK_ASSERT_DEBUG(aPinArgs.HaveSufficientPages(aCount));
sl@0
  1661
sl@0
  1662
	// check and allocate page array entries...
sl@0
  1663
	RPageArray::TIter pageList;
sl@0
  1664
	TInt r = aMemory->iPages.AddStart(aIndex,aCount,pageList,true);
sl@0
  1665
	if(r!=KErrNone)
sl@0
  1666
		return r;
sl@0
  1667
sl@0
  1668
	RPageArray::TIter pageIter = pageList;
sl@0
  1669
	TUint n;
sl@0
  1670
	TPhysAddr* pages;
sl@0
  1671
	while((n=pageIter.Pages(pages,DPageReadRequest::EMaxPages))!=0)
sl@0
  1672
		{
sl@0
  1673
		MmuLock::Lock();
sl@0
  1674
sl@0
  1675
		if(RPageArray::IsPresent(*pages))
sl@0
  1676
			{
sl@0
  1677
			// pin page which is already committed to memory object...
sl@0
  1678
			r = PageInPinnedDone(aMemory,pageIter.Index(),0,pages,aPinArgs);
sl@0
  1679
			__NK_ASSERT_DEBUG(r<=0); // can't return >0 as we didn't supply a new page
sl@0
  1680
			}
sl@0
  1681
		else
sl@0
  1682
			{
sl@0
  1683
			// count consecutive pages which need to be read...
sl@0
  1684
			TUint i;
sl@0
  1685
			for(i=1; i<n; ++i)
sl@0
  1686
				if(RPageArray::IsPresent(pages[i]))
sl@0
  1687
					break;
sl@0
  1688
			n = i;
sl@0
  1689
			r = 1; // positive value to indicate nothing done
sl@0
  1690
			}
sl@0
  1691
sl@0
  1692
		MmuLock::Unlock();
sl@0
  1693
sl@0
  1694
		if(r==KErrNone)
sl@0
  1695
			{
sl@0
  1696
			// successfully pinned one page, so move on to next one...
sl@0
  1697
			pageIter.Skip(1);
sl@0
  1698
			continue;
sl@0
  1699
			}
sl@0
  1700
		else if(r<0)
sl@0
  1701
			{
sl@0
  1702
			// error, so end...
sl@0
  1703
			break;
sl@0
  1704
			}
sl@0
  1705
sl@0
  1706
		// need to read pages from backing store...
sl@0
  1707
sl@0
  1708
		// get paging request object...
sl@0
  1709
		DPageReadRequest* req;
sl@0
  1710
		TUint i;
sl@0
  1711
		do
sl@0
  1712
			{
sl@0
  1713
			i = 0;
sl@0
  1714
			r = AcquirePageReadRequest(req,aMemory,pageIter.Index(),n);
sl@0
  1715
			if(r==KErrNone)
sl@0
  1716
				{
sl@0
  1717
				// see if someone else has since read any of our pages...
sl@0
  1718
				MmuLock::Lock();
sl@0
  1719
				for(; i<n; ++i)
sl@0
  1720
					if(RPageArray::IsPresent(pages[i]))
sl@0
  1721
						break;
sl@0
  1722
				MmuLock::Unlock();
sl@0
  1723
				}
sl@0
  1724
			}
sl@0
  1725
		while(i==n && !req); // while still need all pages && don't have a request object
sl@0
  1726
sl@0
  1727
		// if don't need all pages any more...
sl@0
  1728
		if(i!=n)
sl@0
  1729
			{
sl@0
  1730
			// retry loop...
sl@0
  1731
			if(req)
sl@0
  1732
				req->Release();
sl@0
  1733
			continue;
sl@0
  1734
			}
sl@0
  1735
sl@0
  1736
		// keep count of number of pages actually added to memory object...
sl@0
  1737
		TUint usedNew = 0;
sl@0
  1738
sl@0
  1739
		// get RAM pages...
sl@0
  1740
		TPhysAddr newPages[DPageReadRequest::EMaxPages];
sl@0
  1741
		__NK_ASSERT_DEBUG(n<=DPageReadRequest::EMaxPages);
sl@0
  1742
		r = ThePager.PageInAllocPages(newPages,n,aMemory->RamAllocFlags());
sl@0
  1743
		if(r==KErrNone)
sl@0
  1744
			{
sl@0
  1745
			// read data for pages...
sl@0
  1746
			r = ReadPages(aMemory,pageIter.Index(),n,newPages,req);
sl@0
  1747
			if(r!=KErrNone)
sl@0
  1748
				{
sl@0
  1749
				// error, so free unused pages...
sl@0
  1750
				ThePager.PageInFreePages(newPages,n);
sl@0
  1751
				}
sl@0
  1752
			else
sl@0
  1753
				{
sl@0
  1754
				// use new pages...
sl@0
  1755
				for(i=0; i<n; ++i)
sl@0
  1756
					{
sl@0
  1757
					MmuLock::Lock();
sl@0
  1758
					r = PageInPinnedDone(aMemory,
sl@0
  1759
									pageIter.Index()+i,
sl@0
  1760
									SPageInfo::FromPhysAddr(newPages[i]),
sl@0
  1761
									pages+i,
sl@0
  1762
									aPinArgs
sl@0
  1763
									);
sl@0
  1764
					MmuLock::Unlock();
sl@0
  1765
					if(r>0)
sl@0
  1766
						{
sl@0
  1767
						// new page actually used...
sl@0
  1768
						r = KErrNone;
sl@0
  1769
						++usedNew;
sl@0
  1770
						}
sl@0
  1771
					if(r!=KErrNone)
sl@0
  1772
						{
sl@0
  1773
						// error, so free remaining unused pages...
sl@0
  1774
						ThePager.PageInFreePages(newPages+(i+1),n-(i+1));
sl@0
  1775
						// and update array for any pages already added...
sl@0
  1776
						if(i)
sl@0
  1777
							pageIter.Added(i,usedNew);
sl@0
  1778
						break;
sl@0
  1779
						}
sl@0
  1780
					}
sl@0
  1781
				}
sl@0
  1782
			}
sl@0
  1783
sl@0
  1784
		// done with paging request object...
sl@0
  1785
		if(req)
sl@0
  1786
			req->Release();
sl@0
  1787
sl@0
  1788
		if(r!=KErrNone)
sl@0
  1789
			break; // error, so give up
sl@0
  1790
sl@0
  1791
		// move on to next set of pages...
sl@0
  1792
		pageIter.Added(n,usedNew);
sl@0
  1793
		}
sl@0
  1794
sl@0
  1795
	// map pages...
sl@0
  1796
	if(r==KErrNone)
sl@0
  1797
		{// Page in the page with the pinning mapping, OK to get the instance count here 
sl@0
  1798
		// without any locking as the pinned mapping can't be reused for another purpose 
sl@0
  1799
		// during this method.
sl@0
  1800
		r = aMapping->PageIn(pageList, aPinArgs, aMapping->MapInstanceCount());
sl@0
  1801
		#ifdef COARSE_GRAINED_TLB_MAINTENANCE
sl@0
  1802
		InvalidateTLB();
sl@0
  1803
		#endif
sl@0
  1804
		}
sl@0
  1805
sl@0
  1806
	// release page array entries...
sl@0
  1807
	aMemory->iPages.AddEnd(aIndex,aCount);
sl@0
  1808
sl@0
  1809
	if(r==KErrNone)
sl@0
  1810
		{
sl@0
  1811
		// set EPagesPinned flag to indicate success...
sl@0
  1812
		__NK_ASSERT_DEBUG((aMapping->Flags()&DMemoryMapping::EPagesPinned)==0);
sl@0
  1813
		__e32_atomic_ior_ord8(&aMapping->Flags(), (TUint8)DMemoryMapping::EPagesPinned);
sl@0
  1814
		}
sl@0
  1815
	else
sl@0
  1816
		{
sl@0
  1817
		// cleanup on error...
sl@0
  1818
		TUint pinnedCount = pageIter.Index()-aIndex; // number of pages actually pinned
sl@0
  1819
		DoUnpin(aMemory,aIndex,pinnedCount,aMapping,aPinArgs);
sl@0
  1820
		}
sl@0
  1821
sl@0
  1822
	return r;
sl@0
  1823
	}
sl@0
  1824
sl@0
  1825
sl@0
  1826
void DPagedMemoryManager::Unpin(DMemoryObject* aMemory, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs)
sl@0
  1827
	{
sl@0
  1828
	__ASSERT_CRITICAL;
sl@0
  1829
	// if mapping successfully pinned...
sl@0
  1830
	if(aMapping->Flags()&DMemoryMapping::EPagesPinned)
sl@0
  1831
		{
sl@0
  1832
		// then undo pinning...
sl@0
  1833
		DoUnpin(aMemory,aMapping->iStartIndex,aMapping->iSizeInPages,aMapping,aPinArgs);
sl@0
  1834
		}
sl@0
  1835
	}
sl@0
  1836
sl@0
  1837
sl@0
  1838
void DPagedMemoryManager::DoUnpin(DMemoryObject* aMemory, TUint aIndex, TUint aCount, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs)
sl@0
  1839
	{
sl@0
  1840
	TRACE(("DPagedMemoryManager::DoUnpin(0x%08x,0x%08x,0x%08x,0x%08x,?)",aMemory, aIndex, aCount, aMapping));
sl@0
  1841
	__ASSERT_CRITICAL;
sl@0
  1842
sl@0
  1843
	MmuLock::Lock();
sl@0
  1844
	TUint endIndex = aIndex+aCount;
sl@0
  1845
	for(TUint i=aIndex; i<endIndex; ++i)
sl@0
  1846
		{
sl@0
  1847
		TPhysAddr page = aMemory->iPages.Page(i);
sl@0
  1848
		__NK_ASSERT_DEBUG(RPageArray::IsPresent(page));
sl@0
  1849
		__NK_ASSERT_DEBUG(SPageInfo::SafeFromPhysAddr(page&~KPageMask));
sl@0
  1850
		ThePager.Unpin(SPageInfo::FromPhysAddr(page),aPinArgs);
sl@0
  1851
		MmuLock::Flash();
sl@0
  1852
		}
sl@0
  1853
	MmuLock::Unlock();
sl@0
  1854
sl@0
  1855
	// clear EPagesPinned flag...
sl@0
  1856
	__e32_atomic_and_ord8(&aMapping->Flags(), TUint8(~DMemoryMapping::EPagesPinned));
sl@0
  1857
	}
sl@0
  1858
sl@0
  1859
sl@0
  1860
void DPagedMemoryManager::DoCleanupDecommitted(DMemoryObject* aMemory)
sl@0
  1861
	{
sl@0
  1862
	MemoryObjectLock::Lock(aMemory);
sl@0
  1863
	FreeDecommitted(aMemory,0,aMemory->iSizeInPages);
sl@0
  1864
	MemoryObjectLock::Unlock(aMemory);
sl@0
  1865
	}
sl@0
  1866
sl@0
  1867
sl@0
  1868
TInt DPagedMemoryManager::PageInDone(DMemoryObject* aMemory, TUint aIndex, SPageInfo* aPageInfo, TPhysAddr* aPageArrayEntry)
sl@0
  1869
	{
sl@0
  1870
	TInt r = DoPageInDone(aMemory,aIndex,aPageInfo,aPageArrayEntry,false);
sl@0
  1871
sl@0
  1872
	if(r>=0)
sl@0
  1873
		ThePager.PagedIn(aPageInfo);
sl@0
  1874
sl@0
  1875
	// check page assigned correctly...
sl@0
  1876
#ifdef _DEBUG
sl@0
  1877
	if(RPageArray::IsPresent(*aPageArrayEntry))
sl@0
  1878
		{
sl@0
  1879
		SPageInfo* pi = SPageInfo::FromPhysAddr(*aPageArrayEntry);
sl@0
  1880
		__NK_ASSERT_DEBUG(pi->Owner()==aMemory);
sl@0
  1881
		__NK_ASSERT_DEBUG(pi->Index()==aIndex);
sl@0
  1882
		}
sl@0
  1883
#endif
sl@0
  1884
sl@0
  1885
	return r;
sl@0
  1886
	}
sl@0
  1887
sl@0
  1888
sl@0
  1889
TInt DPagedMemoryManager::PageInPinnedDone(DMemoryObject* aMemory, TUint aIndex, SPageInfo* aPageInfo, TPhysAddr* aPageArrayEntry, TPinArgs& aPinArgs)
sl@0
  1890
	{
sl@0
  1891
	TInt r = DoPageInDone(aMemory,aIndex,aPageInfo,aPageArrayEntry,true);
sl@0
  1892
sl@0
  1893
	if(r>=0)
sl@0
  1894
		ThePager.PagedInPinned(aPageInfo,aPinArgs);
sl@0
  1895
sl@0
  1896
	// check page assigned correctly...
sl@0
  1897
#ifdef _DEBUG
sl@0
  1898
	if(RPageArray::IsPresent(*aPageArrayEntry))
sl@0
  1899
		{
sl@0
  1900
		SPageInfo* pi = SPageInfo::FromPhysAddr(*aPageArrayEntry);
sl@0
  1901
		__NK_ASSERT_DEBUG(pi->Owner()==aMemory);
sl@0
  1902
		__NK_ASSERT_DEBUG(pi->Index()==aIndex);
sl@0
  1903
		if(r>=0)
sl@0
  1904
			__NK_ASSERT_DEBUG(pi->PagedState()==SPageInfo::EPagedPinned);
sl@0
  1905
		}
sl@0
  1906
#endif
sl@0
  1907
sl@0
  1908
	return r;
sl@0
  1909
	}
sl@0
  1910
sl@0
  1911
sl@0
  1912
TInt DPagedMemoryManager::DoPageInDone(DMemoryObject* aMemory, TUint aIndex, SPageInfo*& aPageInfo, TPhysAddr* aPageArrayEntry, TBool aPinning)
sl@0
  1913
	{
sl@0
  1914
	TRACE(("DPagedMemoryManager::DoPageInDone(0x%08x,0x%08x,0x%08x,?,%d)",aMemory,aIndex,aPageInfo,aPinning));
sl@0
  1915
	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
sl@0
  1916
sl@0
  1917
	__UNLOCK_GUARD_START(MmuLock);
sl@0
  1918
sl@0
  1919
	SPageInfo* pi = aPageInfo;
sl@0
  1920
sl@0
  1921
	if(!IsAllocated(aMemory,aIndex,1))
sl@0
  1922
		{
sl@0
  1923
		// memory has been decommitted from memory object...
sl@0
  1924
		if(pi)
sl@0
  1925
			ThePager.PagedInUnneeded(pi);
sl@0
  1926
		__UNLOCK_GUARD_END(MmuLock);
sl@0
  1927
		aPageInfo = 0;
sl@0
  1928
		return KErrNotFound;
sl@0
  1929
		}
sl@0
  1930
sl@0
  1931
	TPhysAddr oldPage = *aPageArrayEntry;
sl@0
  1932
	TBool useNew = (bool)!RPageArray::IsPresent(oldPage);
sl@0
  1933
	if(useNew)
sl@0
  1934
		{
sl@0
  1935
		if(!pi)
sl@0
  1936
			{
sl@0
  1937
			__UNLOCK_GUARD_END(MmuLock);
sl@0
  1938
			// aPageInfo = 0; // this is already set to zero
sl@0
  1939
			return KErrNotFound; // no new page to use
sl@0
  1940
			}
sl@0
  1941
sl@0
  1942
		// assign page to memory object...
sl@0
  1943
		pi->SetManaged(aMemory,aIndex,aMemory->PageInfoFlags());
sl@0
  1944
sl@0
  1945
		ThePager.Event(DPager::EEventPageInNew,pi);
sl@0
  1946
sl@0
  1947
		// save any paging manager data stored in page array before we overwrite it...
sl@0
  1948
		pi->SetPagingManagerData(*aPageArrayEntry);
sl@0
  1949
		}
sl@0
  1950
	else
sl@0
  1951
		{
sl@0
  1952
		__NK_ASSERT_DEBUG(!pi); // should only have read new page if none present
sl@0
  1953
sl@0
  1954
		// discard new page...
sl@0
  1955
		if(pi)
sl@0
  1956
			ThePager.PagedInUnneeded(pi);
sl@0
  1957
sl@0
  1958
		// check existing page can be committed...
sl@0
  1959
		if(RPageArray::State(oldPage)<=RPageArray::EDecommitting)
sl@0
  1960
			{
sl@0
  1961
			__UNLOCK_GUARD_END(MmuLock);
sl@0
  1962
			aPageInfo = 0;
sl@0
  1963
			return KErrNotFound;
sl@0
  1964
			}
sl@0
  1965
sl@0
  1966
		// and use one we already have...
sl@0
  1967
		SPageInfo* newPage = SPageInfo::FromPhysAddr(oldPage);
sl@0
  1968
sl@0
  1969
		if(!pi && !aPinning)
sl@0
  1970
			ThePager.Event(DPager::EEventPageInAgain,newPage);
sl@0
  1971
		
sl@0
  1972
		pi = newPage;
sl@0
  1973
		pi->SetModifier(0); // so observers see page state has changed
sl@0
  1974
		}
sl@0
  1975
sl@0
  1976
	// set page array entry...
sl@0
  1977
	TPhysAddr pagePhys = pi->PhysAddr();
sl@0
  1978
	*aPageArrayEntry = pagePhys|RPageArray::ECommitted;
sl@0
  1979
sl@0
  1980
	// return the page we actually used...
sl@0
  1981
	aPageInfo = pi;
sl@0
  1982
sl@0
  1983
	__UNLOCK_GUARD_END(MmuLock);
sl@0
  1984
	return useNew;
sl@0
  1985
	}
sl@0
  1986
sl@0
  1987
sl@0
  1988
TInt DPagedMemoryManager::Decompress(TUint32 aCompressionType, TLinAddr aDst, TUint aDstBytes, TLinAddr aSrc, TUint aSrcBytes)
sl@0
  1989
	{
sl@0
  1990
#ifdef BTRACE_PAGING_VERBOSE
sl@0
  1991
	BTraceContext4(BTrace::EPaging, BTrace::EPagingDecompressStart, aCompressionType);
sl@0
  1992
#endif
sl@0
  1993
	TInt r;
sl@0
  1994
	switch(aCompressionType)
sl@0
  1995
		{
sl@0
  1996
	case 0:
sl@0
  1997
		__NK_ASSERT_DEBUG(aSrcBytes == aDstBytes);
sl@0
  1998
		memcpy((void*)aDst, (void*)aSrc, aSrcBytes);
sl@0
  1999
		r = aSrcBytes;
sl@0
  2000
		break;
sl@0
  2001
sl@0
  2002
	case SRomPageInfo::EBytePair:
sl@0
  2003
	case KUidCompressionBytePair:
sl@0
  2004
		{
sl@0
  2005
		TUint8* srcNext = 0;
sl@0
  2006
		START_PAGING_BENCHMARK;
sl@0
  2007
		r = BytePairDecompress((TUint8*)aDst, aDstBytes, (TUint8*)aSrc, aSrcBytes, srcNext);
sl@0
  2008
		END_PAGING_BENCHMARK(EPagingBmDecompress);
sl@0
  2009
		if (r > 0)
sl@0
  2010
			{
sl@0
  2011
			// decompression successful so check srcNext points to the end of the compressed data...
sl@0
  2012
			__NK_ASSERT_ALWAYS((TLinAddr)srcNext == aSrc + aSrcBytes);
sl@0
  2013
			}
sl@0
  2014
		}
sl@0
  2015
		break;
sl@0
  2016
sl@0
  2017
	default:
sl@0
  2018
		r = KErrNotSupported;
sl@0
  2019
		break;
sl@0
  2020
		}
sl@0
  2021
#ifdef BTRACE_PAGING_VERBOSE
sl@0
  2022
	BTraceContext0(BTrace::EPaging, BTrace::EPagingDecompressEnd);
sl@0
  2023
#endif
sl@0
  2024
	return r;
sl@0
  2025
	}
sl@0
  2026
sl@0
  2027
sl@0
  2028
TInt DPagedMemoryManager::AcquirePageWriteRequest(DPageWriteRequest*& aRequest, DMemoryObject* aMemory, TUint aIndex, TUint aCount)
sl@0
  2029
	{
sl@0
  2030
	__NK_ASSERT_ALWAYS(0);
sl@0
  2031
	return KErrNotSupported;
sl@0
  2032
	}
sl@0
  2033
sl@0
  2034
sl@0
  2035
TInt DPagedMemoryManager::WritePages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages, DPageWriteRequest* aRequest)
sl@0
  2036
	{
sl@0
  2037
	__NK_ASSERT_ALWAYS(0);
sl@0
  2038
	return KErrNotSupported;
sl@0
  2039
	}
sl@0
  2040
sl@0
  2041
TZonePageType DPagedMemoryManager::PageType()
sl@0
  2042
	{// Paged manager's pages should be discardable and will actaully be freed by 
sl@0
  2043
	// the pager so this value won't be used.
sl@0
  2044
	return EPageDiscard;
sl@0
  2045
	}
sl@0
  2046