os/kernelhwsrv/kernel/eka/memmodel/epoc/flexible/mmu/mdatapaging.cpp
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200 (2012-06-15)
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
sl@0
     1
// Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
//
sl@0
    15
sl@0
    16
#include <plat_priv.h>
sl@0
    17
#include "mm.h"
sl@0
    18
#include "mmu.h"
sl@0
    19
sl@0
    20
#include "mmanager.h"
sl@0
    21
#include "mobject.h"
sl@0
    22
#include "mmapping.h"
sl@0
    23
#include "mpager.h"
sl@0
    24
#include "mswap.h"
sl@0
    25
sl@0
    26
sl@0
    27
/**
sl@0
    28
Manages the swap via the data paging device.
sl@0
    29
*/
sl@0
    30
class DSwapManager
sl@0
    31
	{
sl@0
    32
public:
sl@0
    33
sl@0
    34
	enum TSwapFlags
sl@0
    35
		{
sl@0
    36
		EAllocated		= 1 << 0,
sl@0
    37
		EUninitialised	= 1 << 1,
sl@0
    38
		ESaved			= 1 << 2,
sl@0
    39
		ESwapFlagsMask 	= 0x7,
sl@0
    40
sl@0
    41
		ESwapIndexShift = 3,
sl@0
    42
		ESwapIndexMask = 0xffffffff << ESwapIndexShift,
sl@0
    43
		};
sl@0
    44
sl@0
    45
	TInt Create(DPagingDevice* aDevice);
sl@0
    46
sl@0
    47
	TInt ReserveSwap(DMemoryObject* aMemory, TUint aStartIndex, TUint aPageCount);
sl@0
    48
	TInt UnreserveSwap(DMemoryObject* aMemory, TUint aStartIndex, TUint aPageCount);
sl@0
    49
	TBool IsReserved(DMemoryObject* aMemory, TUint aStartIndex, TUint aPageCount);
sl@0
    50
sl@0
    51
	TInt ReadSwapPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TLinAddr aLinAddr, DPageReadRequest* aRequest, TPhysAddr* aPhysAddrs);
sl@0
    52
	TInt WriteSwapPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TLinAddr aLinAddr, DPageWriteRequest* aRequest);
sl@0
    53
	void DoDeleteNotify(TUint aSwapData);
sl@0
    54
sl@0
    55
	void GetSwapInfo(SVMSwapInfo& aInfoOut);
sl@0
    56
	TInt SetSwapThresholds(const SVMSwapThresholds& aThresholds);
sl@0
    57
	void CheckSwapThresholds(TUint aInitial, TUint aFinal);
sl@0
    58
	
sl@0
    59
protected:
sl@0
    60
	DPagingDevice* iDevice;
sl@0
    61
	TBitMapAllocator* iBitMap;
sl@0
    62
	TUint iBitMapFree;
sl@0
    63
	TUint iAllocOffset;
sl@0
    64
 	TUint iSwapThesholdLow;
sl@0
    65
 	TUint iSwapThesholdGood;
sl@0
    66
	TThreadMessage iDelNotifyMsg;
sl@0
    67
	};
sl@0
    68
sl@0
    69
sl@0
    70
/**
sl@0
    71
Manager for demand paged memory objects which contain writeable data.
sl@0
    72
The contents of the memory are written to a backing store whenever its
sl@0
    73
pages are 'paged out'.
sl@0
    74
sl@0
    75
@see DSwapManager
sl@0
    76
*/
sl@0
    77
class DDataPagedMemoryManager : public DPagedMemoryManager
sl@0
    78
	{
sl@0
    79
private:
sl@0
    80
	// from DMemoryManager...
sl@0
    81
	virtual TInt Alloc(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
sl@0
    82
	virtual void Free(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
sl@0
    83
	virtual TInt Wipe(DMemoryObject* aMemory);
sl@0
    84
	virtual TInt CleanPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TPhysAddr*& aPageArrayEntry);
sl@0
    85
sl@0
    86
	// Methods inherited from DPagedMemoryManager
sl@0
    87
	virtual void Init3();
sl@0
    88
	virtual TInt InstallPagingDevice(DPagingDevice* aDevice);
sl@0
    89
	virtual TInt AcquirePageReadRequest(DPageReadRequest*& aRequest, DMemoryObject* aMemory, TUint aIndex, TUint aCount);
sl@0
    90
	virtual TInt AcquirePageWriteRequest(DPageWriteRequest*& aRequest, DMemoryObject* aMemory, TUint aIndex, TUint aCount);
sl@0
    91
	virtual TInt ReadPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages, DPageReadRequest* aRequest);
sl@0
    92
	virtual TInt WritePages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages, DPageWriteRequest* aRequest);
sl@0
    93
	virtual TBool IsAllocated(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
sl@0
    94
sl@0
    95
public:
sl@0
    96
	void GetSwapInfo(SVMSwapInfo& aInfoOut);
sl@0
    97
	TInt SetSwapThresholds(const SVMSwapThresholds& aThresholds);
sl@0
    98
sl@0
    99
private:
sl@0
   100
	/**
sl@0
   101
	The paging device used for accessing the backing store.
sl@0
   102
	This is set by #InstallPagingDevice.
sl@0
   103
	*/
sl@0
   104
	DPagingDevice* iDevice;
sl@0
   105
sl@0
   106
	/**
sl@0
   107
	The instance of #DSwapManager being used by this manager.
sl@0
   108
	*/
sl@0
   109
	DSwapManager* iSwapManager;
sl@0
   110
sl@0
   111
public:
sl@0
   112
	/**
sl@0
   113
	The single instance of this manager class.
sl@0
   114
	*/
sl@0
   115
	static DDataPagedMemoryManager TheManager;
sl@0
   116
	};
sl@0
   117
sl@0
   118
sl@0
   119
DDataPagedMemoryManager DDataPagedMemoryManager::TheManager;
sl@0
   120
DPagedMemoryManager* TheDataPagedMemoryManager = &DDataPagedMemoryManager::TheManager;
sl@0
   121
sl@0
   122
sl@0
   123
/**
sl@0
   124
Create a swap manager.
sl@0
   125
sl@0
   126
@param	aDevice	The demand paging device for access to the swap.
sl@0
   127
*/
sl@0
   128
TInt DSwapManager::Create(DPagingDevice* aDevice)
sl@0
   129
	{
sl@0
   130
	__ASSERT_COMPILE(!(ESwapIndexMask & ESwapFlagsMask));
sl@0
   131
	__NK_ASSERT_DEBUG(iDevice == NULL);
sl@0
   132
	iDevice = aDevice;
sl@0
   133
sl@0
   134
	// Create the structures required to track the swap usage.
sl@0
   135
	TUint swapPages = (iDevice->iSwapSize << iDevice->iReadUnitShift) >> KPageShift;
sl@0
   136
	// Can't have more swap pages than we can map.
sl@0
   137
	__NK_ASSERT_DEBUG(swapPages<=DMemoryObject::KMaxPagingManagerData);
sl@0
   138
	__NK_ASSERT_DEBUG(swapPages<=(KMaxTUint>>ESwapIndexShift));
sl@0
   139
sl@0
   140
	if ((TheMmu.TotalPhysicalRamPages() << 2) < swapPages)
sl@0
   141
		{// The swap is limited to a maximum of 4 times the amount of RAM.
sl@0
   142
		return KErrTooBig;
sl@0
   143
		}
sl@0
   144
sl@0
   145
	iBitMap = TBitMapAllocator::New(swapPages, ETrue);
sl@0
   146
	if (iBitMap == NULL)
sl@0
   147
		{// Not enough RAM to keep track of the swap.
sl@0
   148
		return KErrNoMemory;
sl@0
   149
		}
sl@0
   150
	iBitMapFree = swapPages;
sl@0
   151
	iAllocOffset = 0;
sl@0
   152
	return KErrNone;
sl@0
   153
	}
sl@0
   154
sl@0
   155
sl@0
   156
/**
sl@0
   157
Reserve some swap pages for the requested region of the memory object
sl@0
   158
sl@0
   159
@param aMemory		The memory object to reserve pages for.
sl@0
   160
@param aStartIndex	The page index in the memory object of the start of the region.
sl@0
   161
@param aPageCount	The number of pages to reserve.
sl@0
   162
sl@0
   163
@return KErrNone on success, KErrNoMemory if not enough swap space available.
sl@0
   164
@pre aMemory's lock is held.
sl@0
   165
@post aMemory's lock is held.
sl@0
   166
*/
sl@0
   167
TInt DSwapManager::ReserveSwap(DMemoryObject* aMemory, TUint aStartIndex, TUint aPageCount)
sl@0
   168
	{
sl@0
   169
	__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
sl@0
   170
	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
sl@0
   171
sl@0
   172
	const TUint indexEnd = aStartIndex + aPageCount;
sl@0
   173
	TUint index = aStartIndex;
sl@0
   174
sl@0
   175
#ifdef _DEBUG
sl@0
   176
	for (; index < indexEnd; index++)
sl@0
   177
		{// This page shouldn't already be in use.
sl@0
   178
		MmuLock::Lock();
sl@0
   179
		__NK_ASSERT_DEBUG(!(aMemory->PagingManagerData(index) & ESwapFlagsMask));
sl@0
   180
		MmuLock::Unlock();
sl@0
   181
		}
sl@0
   182
#endif
sl@0
   183
sl@0
   184
	if (iBitMapFree < aPageCount)
sl@0
   185
		{
sl@0
   186
		Kern::AsyncNotifyChanges(EChangesOutOfMemory);
sl@0
   187
		return KErrNoMemory;
sl@0
   188
		}
sl@0
   189
	// Reserve the required swap space and mark each page as allocated and uninitialised.
sl@0
   190
	TUint initFree = iBitMapFree;
sl@0
   191
	iBitMapFree -= aPageCount;
sl@0
   192
	for (index = aStartIndex; index < indexEnd; index++)
sl@0
   193
		{		
sl@0
   194
		// Grab MmuLock to stop manager data being accessed.
sl@0
   195
		MmuLock::Lock();
sl@0
   196
		TUint swapData = aMemory->PagingManagerData(index);
sl@0
   197
		__NK_ASSERT_DEBUG(!(swapData & EAllocated));
sl@0
   198
		swapData = EAllocated | EUninitialised;
sl@0
   199
		aMemory->SetPagingManagerData(index, swapData);
sl@0
   200
		MmuLock::Unlock();
sl@0
   201
		}
sl@0
   202
sl@0
   203
	CheckSwapThresholds(initFree, iBitMapFree);		
sl@0
   204
	return KErrNone;
sl@0
   205
	}
sl@0
   206
sl@0
   207
sl@0
   208
/**
sl@0
   209
Unreserve swap pages for the requested region of the memory object.
sl@0
   210
sl@0
   211
@param aMemory		The memory object to unreserve pages for.
sl@0
   212
@param aStartIndex	The page index in the memory object of the start of the region.
sl@0
   213
@param aPageCount	The number of pages to unreserve.
sl@0
   214
sl@0
   215
@return The number of pages freed.
sl@0
   216
@pre aMemory's lock is held.
sl@0
   217
@post aMemory's lock is held.
sl@0
   218
*/
sl@0
   219
TInt DSwapManager::UnreserveSwap(DMemoryObject* aMemory, TUint aStartIndex, TUint aPageCount)
sl@0
   220
	{
sl@0
   221
	__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
sl@0
   222
	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
sl@0
   223
sl@0
   224
	TUint initFree = iBitMapFree;
sl@0
   225
	TUint freedPages = 0;
sl@0
   226
	const TUint indexEnd = aStartIndex + aPageCount;
sl@0
   227
	for (TUint index = aStartIndex; index < indexEnd; index++)
sl@0
   228
		{
sl@0
   229
		// Grab MmuLock to stop manager data being accessed.
sl@0
   230
		MmuLock::Lock();
sl@0
   231
		TUint swapData = aMemory->PagingManagerData(index);
sl@0
   232
		TUint swapIndex = swapData >> ESwapIndexShift;
sl@0
   233
		TBool notifyDelete = EFalse;
sl@0
   234
		if (swapData & EAllocated)
sl@0
   235
			{
sl@0
   236
			if (swapData & ESaved)
sl@0
   237
				{
sl@0
   238
				notifyDelete = ETrue;
sl@0
   239
				iBitMap->Free(swapIndex);
sl@0
   240
				}
sl@0
   241
			freedPages++;
sl@0
   242
			aMemory->SetPagingManagerData(index, 0);
sl@0
   243
			}
sl@0
   244
#ifdef _DEBUG
sl@0
   245
		else
sl@0
   246
			__NK_ASSERT_DEBUG(swapData == 0);
sl@0
   247
#endif
sl@0
   248
sl@0
   249
		MmuLock::Unlock();
sl@0
   250
sl@0
   251
		if (notifyDelete)
sl@0
   252
			DoDeleteNotify(swapIndex);
sl@0
   253
		}
sl@0
   254
	iBitMapFree += freedPages;
sl@0
   255
	CheckSwapThresholds(initFree, iBitMapFree);	
sl@0
   256
	return freedPages;
sl@0
   257
	}
sl@0
   258
sl@0
   259
sl@0
   260
/**
sl@0
   261
Determine whether the specified pages in the memory object have swap reserved for them.
sl@0
   262
sl@0
   263
@param aMemory		The memory object that owns the pages.
sl@0
   264
@param aStartIndex	The first index of the pages to check.
sl@0
   265
@param aPageCount	The number of pages to check.
sl@0
   266
sl@0
   267
@return ETrue if swap is reserved for all the pages, EFalse otherwise.
sl@0
   268
*/
sl@0
   269
TBool DSwapManager::IsReserved(DMemoryObject* aMemory, TUint aStartIndex, TUint aPageCount)
sl@0
   270
	{// MmuLock required to protect manager data.
sl@0
   271
	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
sl@0
   272
	__NK_ASSERT_DEBUG(aStartIndex < aMemory->iSizeInPages);
sl@0
   273
	__NK_ASSERT_DEBUG(aStartIndex + aPageCount <= aMemory->iSizeInPages);
sl@0
   274
sl@0
   275
	const TUint indexEnd = aStartIndex + aPageCount;
sl@0
   276
	for (TUint index = aStartIndex; index < indexEnd; index++)
sl@0
   277
		{
sl@0
   278
		if (!(aMemory->PagingManagerData(index) & DSwapManager::EAllocated))
sl@0
   279
			{// This page is not allocated by swap manager.
sl@0
   280
			return EFalse;
sl@0
   281
			}
sl@0
   282
		}
sl@0
   283
	return ETrue;
sl@0
   284
	}
sl@0
   285
sl@0
   286
sl@0
   287
/**
sl@0
   288
Read from the swap the specified pages associated with the memory object.
sl@0
   289
sl@0
   290
@param aMemory 	The memory object to read the pages for
sl@0
   291
@param aIndex	The index of the first page within the memory object.
sl@0
   292
@param aCount	The number of pages to read.
sl@0
   293
@param aLinAddr	The address to copy the pages to.
sl@0
   294
@param aRequest	The request to use for the read.
sl@0
   295
@param aPhysAddrs	An array of the physical addresses for each page to read in.
sl@0
   296
*/
sl@0
   297
TInt DSwapManager::ReadSwapPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TLinAddr aLinAddr, DPageReadRequest* aRequest, TPhysAddr* aPhysAddrs)
sl@0
   298
	{
sl@0
   299
	TInt r = KErrNone;
sl@0
   300
	const TUint readUnitShift = iDevice->iReadUnitShift;
sl@0
   301
	TUint readSize = KPageSize >> readUnitShift;
sl@0
   302
	TThreadMessage* msg = const_cast<TThreadMessage*>(&aRequest->iMessage);
sl@0
   303
sl@0
   304
	// Determine the wipe byte values for uninitialised pages.
sl@0
   305
	TUint allocFlags = aMemory->RamAllocFlags();
sl@0
   306
	TBool wipePages = !(allocFlags & Mmu::EAllocNoWipe);
sl@0
   307
	TUint8 wipeByte = (allocFlags & Mmu::EAllocUseCustomWipeByte) ? (allocFlags >> Mmu::EAllocWipeByteShift) & 0xff : 0x03;
sl@0
   308
sl@0
   309
	const TUint indexEnd = aIndex + aCount;
sl@0
   310
	for (TUint index = aIndex; index < indexEnd; index++, aLinAddr += KPageSize, aPhysAddrs++)
sl@0
   311
		{
sl@0
   312
		START_PAGING_BENCHMARK;
sl@0
   313
sl@0
   314
		MmuLock::Lock();	// MmuLock required for atomic access to manager data.
sl@0
   315
		TUint swapData = aMemory->PagingManagerData(index);
sl@0
   316
sl@0
   317
		if (!(swapData & EAllocated))
sl@0
   318
			{// This page is not committed to the memory object
sl@0
   319
			MmuLock::Unlock();
sl@0
   320
			return KErrNotFound;			
sl@0
   321
			}
sl@0
   322
		if (swapData & EUninitialised)
sl@0
   323
			{// This page has not been written to yet so don't read from swap 
sl@0
   324
			// just wipe it if required.
sl@0
   325
			MmuLock::Unlock();
sl@0
   326
			if (wipePages)
sl@0
   327
				{
sl@0
   328
				memset((TAny*)aLinAddr, wipeByte, KPageSize);
sl@0
   329
				}
sl@0
   330
			}
sl@0
   331
		else
sl@0
   332
			{
sl@0
   333
			__NK_ASSERT_DEBUG(swapData & ESaved);
sl@0
   334
			TUint swapIndex = swapData >> ESwapIndexShift;
sl@0
   335
			// OK to release as if the object's data is decommitted the pager 
sl@0
   336
			// will check that data is still valid before mapping it.
sl@0
   337
			MmuLock::Unlock();
sl@0
   338
			TUint readStart = (swapIndex << KPageShift) >> readUnitShift;
sl@0
   339
			START_PAGING_BENCHMARK;
sl@0
   340
			r = iDevice->Read(msg, aLinAddr, readStart, readSize, DPagingDevice::EDriveDataPaging);
sl@0
   341
			if (r != KErrNone)
sl@0
   342
				__KTRACE_OPT(KPANIC, Kern::Printf("DSwapManager::ReadSwapPages: error reading media at %08x + %x: %d", readStart << readUnitShift, readSize << readUnitShift, r));				
sl@0
   343
			__NK_ASSERT_DEBUG(r!=KErrNoMemory); // not allowed to allocate memory, therefore can't fail with KErrNoMemory
sl@0
   344
			END_PAGING_BENCHMARK(EPagingBmReadDataMedia);
sl@0
   345
			// TODO: Work out what to do if page in fails, unmap all pages????
sl@0
   346
			__NK_ASSERT_ALWAYS(r == KErrNone);
sl@0
   347
			}
sl@0
   348
		END_PAGING_BENCHMARK(EPagingBmReadDataPage);
sl@0
   349
		}
sl@0
   350
sl@0
   351
	return r;
sl@0
   352
	}
sl@0
   353
sl@0
   354
sl@0
   355
/**
sl@0
   356
Write the specified memory object's pages from the RAM into the swap.
sl@0
   357
sl@0
   358
@param	aMemory		The memory object who owns the pages.
sl@0
   359
@param	aIndex		The index within the memory object.
sl@0
   360
@param 	aCount		The number of pages to write out.
sl@0
   361
@param	aLinAddr	The location of the pages to write out.
sl@0
   362
@param	aRequest	The demand paging request to use.
sl@0
   363
sl@0
   364
*/
sl@0
   365
TInt DSwapManager::WriteSwapPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TLinAddr aLinAddr, DPageWriteRequest* aRequest)
sl@0
   366
	{// The RamAllocLock prevents the object's swap pages being reassigned.
sl@0
   367
	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
sl@0
   368
sl@0
   369
	// Write the page out to the swap.
sl@0
   370
	TInt r = KErrNone;
sl@0
   371
	const TUint readUnitShift = iDevice->iReadUnitShift;
sl@0
   372
	TUint writeSize = KPageSize >> readUnitShift;
sl@0
   373
	TThreadMessage* msg = const_cast<TThreadMessage*>(&aRequest->iMessage);
sl@0
   374
sl@0
   375
	const TUint indexEnd = aIndex + aCount;
sl@0
   376
	for (TUint index = aIndex; index < indexEnd; index++)
sl@0
   377
		{
sl@0
   378
		START_PAGING_BENCHMARK;
sl@0
   379
sl@0
   380
		MmuLock::Lock();
sl@0
   381
		TUint swapData = aMemory->PagingManagerData(index);
sl@0
   382
		// OK to release as ram alloc lock prevents manager data being updated.
sl@0
   383
		MmuLock::Unlock();
sl@0
   384
		if (!(swapData & EAllocated))
sl@0
   385
			{// This page is being decommited from aMemory so it is clean/unrequired.
sl@0
   386
			continue;
sl@0
   387
			}
sl@0
   388
		TInt swapIndex = swapData >> ESwapIndexShift;
sl@0
   389
		if (swapData & ESaved)
sl@0
   390
			{// An old version of this page has been saved to swap so free it now
sl@0
   391
			// as it will be out of date.
sl@0
   392
			iBitMap->Free(swapIndex);
sl@0
   393
			DoDeleteNotify(swapIndex);
sl@0
   394
			}
sl@0
   395
		// Get a new swap location for this page.
sl@0
   396
		swapIndex = iBitMap->AllocFrom(iAllocOffset);
sl@0
   397
		__NK_ASSERT_DEBUG(swapIndex != -1 && swapIndex < iBitMap->iSize);
sl@0
   398
		iAllocOffset = swapIndex + 1;
sl@0
   399
		if (iAllocOffset == (TUint)iBitMap->iSize)
sl@0
   400
			iAllocOffset = 0;
sl@0
   401
sl@0
   402
		TUint writeOffset = (swapIndex << KPageShift) >> readUnitShift;
sl@0
   403
		{
sl@0
   404
		START_PAGING_BENCHMARK;
sl@0
   405
		r = iDevice->Write(msg, aLinAddr, writeOffset, writeSize, EFalse);
sl@0
   406
		if (r != KErrNone)
sl@0
   407
			__KTRACE_OPT(KPANIC, Kern::Printf("DSwapManager::WriteSwapPages: error writing media at %08x + %x: %d", writeOffset << readUnitShift, writeSize << readUnitShift, r));				
sl@0
   408
		__NK_ASSERT_DEBUG(r!=KErrNoMemory); // not allowed to allocate memory, therefore can't fail with KErrNoMemory
sl@0
   409
		END_PAGING_BENCHMARK(EPagingBmWriteDataMedia);
sl@0
   410
		}
sl@0
   411
		// TODO: Work out what to do if page out fails.
sl@0
   412
		__NK_ASSERT_ALWAYS(r == KErrNone);
sl@0
   413
		MmuLock::Lock();
sl@0
   414
		// The swap data should not have been modified.
sl@0
   415
		__NK_ASSERT_DEBUG(swapData == aMemory->PagingManagerData(index));
sl@0
   416
		// Store the new swap location and mark the page as saved.
sl@0
   417
		swapData &= ~(EUninitialised | ESwapIndexMask);
sl@0
   418
		swapData |= (swapIndex << ESwapIndexShift) | ESaved;
sl@0
   419
		aMemory->SetPagingManagerData(index, swapData);
sl@0
   420
		MmuLock::Unlock();
sl@0
   421
sl@0
   422
		END_PAGING_BENCHMARK(EPagingBmWriteDataPage);
sl@0
   423
		}
sl@0
   424
	
sl@0
   425
	return r;
sl@0
   426
	}
sl@0
   427
sl@0
   428
sl@0
   429
/**
sl@0
   430
Notify the media driver that the page written to swap is no longer required.
sl@0
   431
*/
sl@0
   432
void DSwapManager::DoDeleteNotify(TUint aSwapIndex)
sl@0
   433
	{
sl@0
   434
	// Ram Alloc lock prevents the swap location being assigned to another page.
sl@0
   435
	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
sl@0
   436
sl@0
   437
#ifdef __PAGING_DELETE_NOTIFY_ENABLED
sl@0
   438
	const TUint readUnitShift = iDevice->iReadUnitShift;
sl@0
   439
	const TUint size = KPageSize >> readUnitShift;
sl@0
   440
	TUint offset = (aSwapIndex << KPageShift) >> readUnitShift;
sl@0
   441
sl@0
   442
	START_PAGING_BENCHMARK;
sl@0
   443
	// Ignore the return value as this is just an optimisation that is not supported on all media.
sl@0
   444
	(void)iDevice->DeleteNotify(&iDelNotifyMsg, offset, size);
sl@0
   445
	END_PAGING_BENCHMARK(EPagingBmDeleteNotifyDataPage);
sl@0
   446
#endif
sl@0
   447
	}
sl@0
   448
sl@0
   449
sl@0
   450
// Check swap thresholds and notify (see K::CheckFreeMemoryLevel)
sl@0
   451
void DSwapManager::CheckSwapThresholds(TUint aInitial, TUint aFinal)
sl@0
   452
	{
sl@0
   453
	TUint changes = 0;
sl@0
   454
	if (aFinal < iSwapThesholdLow && aInitial >= iSwapThesholdLow)
sl@0
   455
		changes |= (EChangesFreeMemory | EChangesLowMemory);
sl@0
   456
	if (aFinal >= iSwapThesholdGood && aInitial < iSwapThesholdGood)
sl@0
   457
		changes |= EChangesFreeMemory;
sl@0
   458
	if (changes)
sl@0
   459
		Kern::AsyncNotifyChanges(changes);
sl@0
   460
	}
sl@0
   461
sl@0
   462
sl@0
   463
void DSwapManager::GetSwapInfo(SVMSwapInfo& aInfoOut)
sl@0
   464
	{
sl@0
   465
	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
sl@0
   466
	aInfoOut.iSwapSize = iBitMap->iSize << KPageShift;
sl@0
   467
	aInfoOut.iSwapFree = iBitMapFree << KPageShift;
sl@0
   468
	}
sl@0
   469
sl@0
   470
sl@0
   471
TInt DSwapManager::SetSwapThresholds(const SVMSwapThresholds& aThresholds)
sl@0
   472
	{
sl@0
   473
	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
sl@0
   474
	if (aThresholds.iLowThreshold > aThresholds.iGoodThreshold)
sl@0
   475
		return KErrArgument;		
sl@0
   476
	TInt low = (aThresholds.iLowThreshold + KPageSize - 1) >> KPageShift;
sl@0
   477
	TInt good = (aThresholds.iGoodThreshold + KPageSize - 1) >> KPageShift;
sl@0
   478
	if (good > iBitMap->iSize)
sl@0
   479
		return KErrArgument;
sl@0
   480
	iSwapThesholdLow = low;
sl@0
   481
	iSwapThesholdGood = good;
sl@0
   482
	return KErrNone;
sl@0
   483
	}
sl@0
   484
sl@0
   485
sl@0
   486
sl@0
   487
TInt DDataPagedMemoryManager::InstallPagingDevice(DPagingDevice* aDevice)
sl@0
   488
	{
sl@0
   489
	TRACEB(("DDataPagedMemoryManager::InstallPagingDevice(0x%08x)",aDevice));
sl@0
   490
sl@0
   491
	TUint dataPolicy = TheSuperPage().KernelConfigFlags() & EKernelConfigDataPagingPolicyMask;
sl@0
   492
	TRACEB(("Data Paging Policy = %d", dataPolicy >> EKernelConfigDataPagingPolicyShift));
sl@0
   493
	if (dataPolicy == EKernelConfigDataPagingPolicyNoPaging)
sl@0
   494
		{// No paging allowed so don't register the device.
sl@0
   495
		return KErrNone;
sl@0
   496
		}
sl@0
   497
sl@0
   498
	// Store the device, blocking any other devices from installing.
sl@0
   499
	if (!NKern::CompareAndSwap((TAny*&)iDevice, (TAny*)NULL, (TAny*)aDevice))
sl@0
   500
		{// Data paging device already installed.
sl@0
   501
		__KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("**** Attempt to install more than one data paging device !!!!!!!! ****"));
sl@0
   502
		return KErrAlreadyExists;
sl@0
   503
		}
sl@0
   504
sl@0
   505
	// Now we can determine the size of the swap, create the swap manager.
sl@0
   506
	iSwapManager = new DSwapManager;
sl@0
   507
	__NK_ASSERT_ALWAYS(iSwapManager);
sl@0
   508
sl@0
   509
	TInt r = iSwapManager->Create(iDevice);
sl@0
   510
	if (r != KErrNone)
sl@0
   511
		{// Couldn't create the swap manager.
sl@0
   512
		delete iSwapManager;
sl@0
   513
		iSwapManager = NULL;
sl@0
   514
		NKern::SafeSwap(NULL, (TAny*&)iDevice);
sl@0
   515
		return r;
sl@0
   516
		}
sl@0
   517
 	NKern::LockedSetClear(K::MemModelAttributes, 0, EMemModelAttrDataPaging);
sl@0
   518
sl@0
   519
	return r;
sl@0
   520
	}
sl@0
   521
sl@0
   522
sl@0
   523
TInt DDataPagedMemoryManager::AcquirePageReadRequest(DPageReadRequest*& aRequest, DMemoryObject* aMemory, TUint aIndex, TUint aCount)
sl@0
   524
	{
sl@0
   525
	aRequest = iDevice->iRequestPool->AcquirePageReadRequest(aMemory,aIndex,aCount);
sl@0
   526
	return KErrNone;
sl@0
   527
	}
sl@0
   528
sl@0
   529
sl@0
   530
TInt DDataPagedMemoryManager::AcquirePageWriteRequest(DPageWriteRequest*& aRequest, DMemoryObject* aMemory, TUint aIndex, TUint aCount)
sl@0
   531
	{
sl@0
   532
	aRequest = iDevice->iRequestPool->AcquirePageWriteRequest(aMemory,aIndex,aCount);
sl@0
   533
	return KErrNone;
sl@0
   534
	}
sl@0
   535
sl@0
   536
sl@0
   537
void DDataPagedMemoryManager::Init3()
sl@0
   538
	{
sl@0
   539
	}
sl@0
   540
sl@0
   541
sl@0
   542
TInt DDataPagedMemoryManager::Alloc(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
sl@0
   543
	{
sl@0
   544
	__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
sl@0
   545
sl@0
   546
	// re-initialise any decommitted pages which we may still own because they were pinned...
sl@0
   547
	ReAllocDecommitted(aMemory,aIndex,aCount);
sl@0
   548
sl@0
   549
	// Reserve the swap pages required.
sl@0
   550
	RamAllocLock::Lock();
sl@0
   551
	TInt r = iSwapManager->ReserveSwap(aMemory, aIndex, aCount);
sl@0
   552
	RamAllocLock::Unlock();
sl@0
   553
sl@0
   554
	return r;
sl@0
   555
	}
sl@0
   556
sl@0
   557
sl@0
   558
void DDataPagedMemoryManager::Free(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
sl@0
   559
	{
sl@0
   560
	TRACE2(("DDataPagedMemoryManager::Free(0x%08x,0x%x,0x%x)", aMemory, aIndex, aCount));
sl@0
   561
	__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
sl@0
   562
sl@0
   563
	// Unreserve the swap pages associated with the memory object.  Do this before
sl@0
   564
	// removing the page array entries to prevent a page fault reallocating these pages.
sl@0
   565
	RamAllocLock::Lock();
sl@0
   566
	TInt freed = iSwapManager->UnreserveSwap(aMemory, aIndex, aCount);
sl@0
   567
	(void)freed;
sl@0
   568
	RamAllocLock::Unlock();
sl@0
   569
sl@0
   570
	DoFree(aMemory,aIndex,aCount);
sl@0
   571
	}
sl@0
   572
sl@0
   573
sl@0
   574
/**
sl@0
   575
@copydoc DMemoryManager::Wipe
sl@0
   576
@todo	Not yet implemented.
sl@0
   577
		Need to handle this smartly, e.g. throw RAM away and set to uninitialised 
sl@0
   578
*/
sl@0
   579
TInt DDataPagedMemoryManager::Wipe(DMemoryObject* aMemory)
sl@0
   580
	{
sl@0
   581
	__NK_ASSERT_ALWAYS(0); // not implemented yet
sl@0
   582
sl@0
   583
	return KErrNotSupported;
sl@0
   584
	}
sl@0
   585
sl@0
   586
sl@0
   587
TInt DDataPagedMemoryManager::ReadPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages, DPageReadRequest* aRequest)
sl@0
   588
	{
sl@0
   589
	__NK_ASSERT_DEBUG(aRequest->CheckUse(aMemory,aIndex,aCount));
sl@0
   590
sl@0
   591
	// Map pages temporarily so that we can copy into them.
sl@0
   592
	const TLinAddr linAddr = aRequest->MapPages(aIndex, aCount, aPages);
sl@0
   593
sl@0
   594
	TInt r = iSwapManager->ReadSwapPages(aMemory, aIndex, aCount, linAddr, aRequest, aPages);
sl@0
   595
sl@0
   596
	// The memory object allows executable mappings then need IMB.
sl@0
   597
	aRequest->UnmapPages(aMemory->IsExecutable());
sl@0
   598
sl@0
   599
	return r;
sl@0
   600
	}
sl@0
   601
sl@0
   602
sl@0
   603
TInt DDataPagedMemoryManager::WritePages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages, DPageWriteRequest* aRequest)
sl@0
   604
	{
sl@0
   605
	__NK_ASSERT_DEBUG(aRequest->CheckUse(aMemory,aIndex,aCount));
sl@0
   606
sl@0
   607
	// Map pages temporarily so that we can copy into them.
sl@0
   608
	const TLinAddr linAddr = aRequest->MapPages(aIndex, aCount, aPages);
sl@0
   609
sl@0
   610
	TInt r = iSwapManager->WriteSwapPages(aMemory, aIndex, aCount, linAddr, aRequest);
sl@0
   611
sl@0
   612
	// The memory object allows executable mappings then need IMB.
sl@0
   613
	aRequest->UnmapPages(aMemory->IsExecutable());
sl@0
   614
sl@0
   615
	return r;
sl@0
   616
	}
sl@0
   617
sl@0
   618
sl@0
   619
TInt DDataPagedMemoryManager::CleanPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TPhysAddr*& aPageArrayEntry)
sl@0
   620
	{
sl@0
   621
	if(aPageInfo->IsDirty()==false)
sl@0
   622
		return KErrNone;
sl@0
   623
sl@0
   624
	// shouldn't be asked to clean a page which is writable...
sl@0
   625
	__NK_ASSERT_DEBUG(aPageInfo->IsWritable()==false);
sl@0
   626
sl@0
   627
	// mark page as being modified by us...
sl@0
   628
	TUint modifierInstance; // dummy variable used only for it's storage address on the stack
sl@0
   629
	aPageInfo->SetModifier(&modifierInstance);
sl@0
   630
sl@0
   631
	// get info about page...
sl@0
   632
	TUint index = aPageInfo->Index();
sl@0
   633
	TPhysAddr physAddr = aPageInfo->PhysAddr();
sl@0
   634
sl@0
   635
	// Release the mmu lock while we write out the page.  This is safe as the 
sl@0
   636
	// RamAllocLock stops the physical address being freed from this object.
sl@0
   637
	MmuLock::Unlock();
sl@0
   638
sl@0
   639
	// get paging request object...
sl@0
   640
	DPageWriteRequest* req;
sl@0
   641
	TInt r = AcquirePageWriteRequest(req, aMemory, index, 1);
sl@0
   642
	__NK_ASSERT_DEBUG(r==KErrNone); // we should always get a write request because the previous function blocks until it gets one
sl@0
   643
	__NK_ASSERT_DEBUG(req); // we should always get a write request because the previous function blocks until it gets one
sl@0
   644
sl@0
   645
	r = WritePages(aMemory, index, 1, &physAddr, req);
sl@0
   646
sl@0
   647
	req->Release();
sl@0
   648
sl@0
   649
	MmuLock::Lock();
sl@0
   650
sl@0
   651
	if(r!=KErrNone)
sl@0
   652
		return r;
sl@0
   653
sl@0
   654
	// check if page is clean...
sl@0
   655
	if(aPageInfo->CheckModified(&modifierInstance) || aPageInfo->IsWritable())
sl@0
   656
		{
sl@0
   657
		// someone else modified the page, or it became writable, so fail...
sl@0
   658
		r = KErrInUse;
sl@0
   659
		}
sl@0
   660
	else
sl@0
   661
		{
sl@0
   662
		// page is now clean!
sl@0
   663
		ThePager.SetClean(*aPageInfo);
sl@0
   664
		}
sl@0
   665
sl@0
   666
	return r;
sl@0
   667
	}
sl@0
   668
sl@0
   669
sl@0
   670
TBool DDataPagedMemoryManager::IsAllocated(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
sl@0
   671
	{// MmuLock required to protect manager data.
sl@0
   672
	// DPagedMemoryManager::DoPageInDone() won't allow MmuLock to be released
sl@0
   673
	// so can only cope with a maximum of KMaxPagesInOneGo.
sl@0
   674
	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
sl@0
   675
	__NK_ASSERT_DEBUG(aCount <= KMaxPagesInOneGo);
sl@0
   676
sl@0
   677
	return iSwapManager->IsReserved(aMemory, aIndex, aCount);
sl@0
   678
	}
sl@0
   679
sl@0
   680
sl@0
   681
void DDataPagedMemoryManager::GetSwapInfo(SVMSwapInfo& aInfoOut)
sl@0
   682
	{
sl@0
   683
	NKern::ThreadEnterCS();
sl@0
   684
	RamAllocLock::Lock();
sl@0
   685
	iSwapManager->GetSwapInfo(aInfoOut);
sl@0
   686
	RamAllocLock::Unlock();
sl@0
   687
	NKern::ThreadLeaveCS();
sl@0
   688
	}
sl@0
   689
sl@0
   690
sl@0
   691
TInt DDataPagedMemoryManager::SetSwapThresholds(const SVMSwapThresholds& aThresholds)
sl@0
   692
	{
sl@0
   693
	NKern::ThreadEnterCS();
sl@0
   694
	RamAllocLock::Lock();
sl@0
   695
	TInt r = iSwapManager->SetSwapThresholds(aThresholds);
sl@0
   696
	RamAllocLock::Unlock();
sl@0
   697
	NKern::ThreadLeaveCS();
sl@0
   698
	return r;
sl@0
   699
	}
sl@0
   700
sl@0
   701
sl@0
   702
void GetSwapInfo(SVMSwapInfo& aInfoOut)
sl@0
   703
	{
sl@0
   704
	((DDataPagedMemoryManager*)TheDataPagedMemoryManager)->GetSwapInfo(aInfoOut);
sl@0
   705
	}
sl@0
   706
sl@0
   707
sl@0
   708
TInt SetSwapThresholds(const SVMSwapThresholds& aThresholds)
sl@0
   709
	{
sl@0
   710
	return ((DDataPagedMemoryManager*)TheDataPagedMemoryManager)->SetSwapThresholds(aThresholds);
sl@0
   711
	}
sl@0
   712