os/kernelhwsrv/kernel/eka/memmodel/epoc/flexible/mmu/mcodepaging.cpp
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200 (2012-06-15)
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
sl@0
     1
// Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
//
sl@0
    15
sl@0
    16
#include <plat_priv.h>
sl@0
    17
#include "cache_maintenance.h"
sl@0
    18
#include "mm.h"
sl@0
    19
#include "mmu.h"
sl@0
    20
#include "mmanager.h"
sl@0
    21
#include "mobject.h"
sl@0
    22
#include "mpager.h"
sl@0
    23
#include "mcodepaging.h"
sl@0
    24
sl@0
    25
/**
sl@0
    26
Manager for memory objects containing demand paged executable code.
sl@0
    27
This is the memory used by DCodeSegMemory object to store the contents of RAM loaded
sl@0
    28
EXEs and DLLs which are to be demand paged.
sl@0
    29
sl@0
    30
This memory has associated information, supplied by the Loader, which enables
sl@0
    31
the executable's code to be located in the file system and its contents
sl@0
    32
relocated and fixed-up when demand loaded.
sl@0
    33
sl@0
    34
@see DPagedCodeInfo
sl@0
    35
@see MM::PagedCodeNew
sl@0
    36
*/
sl@0
    37
class DCodePagedMemoryManager : public DPagedMemoryManager
sl@0
    38
	{
sl@0
    39
private:
sl@0
    40
	// from DMemoryManager...
sl@0
    41
	virtual TInt New(DMemoryObject*& aMemory, TUint aSizeInPages, TMemoryAttributes aAttributes, TMemoryCreateFlags aCreateFlags);
sl@0
    42
	virtual void Destruct(DMemoryObject* aMemory);
sl@0
    43
	virtual void Free(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
sl@0
    44
	virtual TInt CleanPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TPhysAddr*& aPageArrayEntry);
sl@0
    45
sl@0
    46
	// from DPagedMemoryManager...
sl@0
    47
	virtual void Init3();
sl@0
    48
	virtual TInt InstallPagingDevice(DPagingDevice* aDevice);
sl@0
    49
	virtual TInt AcquirePageReadRequest(DPageReadRequest*& aRequest, DMemoryObject* aMemory, TUint aIndex, TUint aCount);
sl@0
    50
	virtual TInt ReadPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages, DPageReadRequest* aRequest);
sl@0
    51
	virtual TBool IsAllocated(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
sl@0
    52
sl@0
    53
private:
sl@0
    54
	/**
sl@0
    55
	Array of paging devices used for each media drive.
sl@0
    56
	This is a initialised by #InstallPagingDevice.
sl@0
    57
	Drives without paging devices have the null pointer in their entry.
sl@0
    58
	*/
sl@0
    59
	DPagingDevice* iDevice[KMaxLocalDrives];
sl@0
    60
sl@0
    61
public:
sl@0
    62
	/**
sl@0
    63
	The single instance of this manager class.
sl@0
    64
	*/
sl@0
    65
	static DCodePagedMemoryManager TheManager;
sl@0
    66
sl@0
    67
	friend DPagingDevice* CodePagingDevice(TInt aDiveNum);
sl@0
    68
	};
sl@0
    69
sl@0
    70
sl@0
    71
/**
sl@0
    72
Reference counted object containing a #TPagedCodeInfo.
sl@0
    73
This is a structure containing the information about a demand paged code segment
sl@0
    74
which is required to load and fixup its code section.
sl@0
    75
sl@0
    76
An instance of this object is created for each memory object being managed by
sl@0
    77
#DCodePagedMemoryManager, and a pointer to it is stored in the memory object's
sl@0
    78
DMemoryObject::iManagerData member.
sl@0
    79
sl@0
    80
@see TPagedCodeInfo
sl@0
    81
@see MM::PagedCodeLoaded
sl@0
    82
*/
sl@0
    83
class DPagedCodeInfo : public DReferenceCountedObject
sl@0
    84
	{
sl@0
    85
public:
sl@0
    86
	/**
sl@0
    87
	Return a reference to the embedded #TPagedCodeInfo.
sl@0
    88
	*/
sl@0
    89
	inline TPagedCodeInfo& Info()
sl@0
    90
		{ return iInfo; }
sl@0
    91
private:
sl@0
    92
	/**
sl@0
    93
	@copybrief TPagedCodeInfo
sl@0
    94
	*/
sl@0
    95
	TPagedCodeInfo iInfo;
sl@0
    96
	};
sl@0
    97
sl@0
    98
sl@0
    99
DCodePagedMemoryManager DCodePagedMemoryManager::TheManager;
sl@0
   100
DPagedMemoryManager* TheCodePagedMemoryManager = &DCodePagedMemoryManager::TheManager;
sl@0
   101
sl@0
   102
sl@0
   103
DPagingDevice* CodePagingDevice(TInt aDriveNum)
sl@0
   104
	{
sl@0
   105
	__NK_ASSERT_DEBUG(aDriveNum<KMaxLocalDrives);
sl@0
   106
	return DCodePagedMemoryManager::TheManager.iDevice[aDriveNum];
sl@0
   107
	}
sl@0
   108
sl@0
   109
sl@0
   110
void DCodePagedMemoryManager::Init3()
sl@0
   111
	{
sl@0
   112
	TRACEB(("DCodePagedMemoryManager::Init3()"));
sl@0
   113
	}
sl@0
   114
sl@0
   115
sl@0
   116
TInt DCodePagedMemoryManager::InstallPagingDevice(DPagingDevice* aDevice)
sl@0
   117
	{
sl@0
   118
	TRACEB(("DCodePagedMemoryManager::InstallPagingDevice(0x%08x)",aDevice));
sl@0
   119
sl@0
   120
	TUint codePolicy = TheSuperPage().KernelConfigFlags() & EKernelConfigCodePagingPolicyMask;
sl@0
   121
	TRACEB(("Code Paging Policy = %d", codePolicy >> EKernelConfigCodePagingPolicyShift));
sl@0
   122
	if(codePolicy == EKernelConfigCodePagingPolicyNoPaging)
sl@0
   123
		{
sl@0
   124
		// no paging allowed so end now...
sl@0
   125
		return KErrNone;
sl@0
   126
		}
sl@0
   127
	
sl@0
   128
	TInt i;
sl@0
   129
	for(i=0; i<KMaxLocalDrives; ++i)
sl@0
   130
		if(aDevice->iDrivesSupported&(1<<i))
sl@0
   131
			{
sl@0
   132
			TRACEB(("DCodePagedMemoryManager::InstallPagingDevice drive=%d",i));
sl@0
   133
			TAny* null = 0;
sl@0
   134
			if(!__e32_atomic_cas_ord_ptr(&iDevice[i], &null, aDevice)) // set iDevice[i]=aDevice if it was originally 0
sl@0
   135
				{
sl@0
   136
				// paging device already registered...
sl@0
   137
				TRACEB(("DCodePagedMemoryManager::InstallPagingDevice returns ALREADY EXISTS!"));
sl@0
   138
				return KErrAlreadyExists;
sl@0
   139
				}
sl@0
   140
			// flag code paging is supported...
sl@0
   141
			__e32_atomic_ior_ord32(&K::MemModelAttributes, (TUint32)EMemModelAttrCodePaging);
sl@0
   142
			}
sl@0
   143
sl@0
   144
	return KErrNone;
sl@0
   145
	}
sl@0
   146
sl@0
   147
sl@0
   148
TInt DCodePagedMemoryManager::AcquirePageReadRequest(DPageReadRequest*& aRequest, DMemoryObject* aMemory, TUint aIndex, TUint aCount)
sl@0
   149
	{
sl@0
   150
	DPagingDevice* device = 0;
sl@0
   151
	MmuLock::Lock();
sl@0
   152
	DPagedCodeInfo* pagedCodeInfo = (DPagedCodeInfo*)aMemory->iManagerData;
sl@0
   153
	if(pagedCodeInfo)
sl@0
   154
		{
sl@0
   155
		TPagedCodeInfo& info = pagedCodeInfo->Info();
sl@0
   156
		device = iDevice[info.iCodeLocalDrive];
sl@0
   157
		}
sl@0
   158
	MmuLock::Unlock();
sl@0
   159
sl@0
   160
	if(!device)
sl@0
   161
		{
sl@0
   162
		aRequest = 0;
sl@0
   163
		return KErrNotFound;
sl@0
   164
		}
sl@0
   165
sl@0
   166
	aRequest = device->iRequestPool->AcquirePageReadRequest(aMemory,aIndex,aCount);
sl@0
   167
	return KErrNone;
sl@0
   168
	}
sl@0
   169
sl@0
   170
sl@0
   171
TInt DCodePagedMemoryManager::New(DMemoryObject*& aMemory, TUint aSizeInPages, TMemoryAttributes aAttributes, TMemoryCreateFlags aCreateFlags)
sl@0
   172
	{
sl@0
   173
	DPagedCodeInfo* pagedCodeInfo = new DPagedCodeInfo;
sl@0
   174
	if(!pagedCodeInfo)
sl@0
   175
		return KErrNoMemory;
sl@0
   176
sl@0
   177
	TInt r = DPagedMemoryManager::New(aMemory, aSizeInPages, aAttributes, aCreateFlags);
sl@0
   178
	if(r!=KErrNone)
sl@0
   179
		pagedCodeInfo->Close();
sl@0
   180
	else
sl@0
   181
		aMemory->iManagerData = pagedCodeInfo;
sl@0
   182
sl@0
   183
	return r;
sl@0
   184
	}
sl@0
   185
sl@0
   186
sl@0
   187
void DCodePagedMemoryManager::Destruct(DMemoryObject* aMemory)
sl@0
   188
	{
sl@0
   189
	MmuLock::Lock();
sl@0
   190
	DPagedCodeInfo* pagedCodeInfo = (DPagedCodeInfo*)aMemory->iManagerData; 
sl@0
   191
	aMemory->iManagerData = 0;
sl@0
   192
	MmuLock::Unlock();
sl@0
   193
sl@0
   194
	if(pagedCodeInfo)
sl@0
   195
		pagedCodeInfo->Close();
sl@0
   196
sl@0
   197
	// base call to free memory and close object...
sl@0
   198
	DPagedMemoryManager::Destruct(aMemory);
sl@0
   199
	}
sl@0
   200
sl@0
   201
sl@0
   202
void DCodePagedMemoryManager::Free(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
sl@0
   203
	{
sl@0
   204
	DoFree(aMemory,aIndex,aCount);
sl@0
   205
	}
sl@0
   206
sl@0
   207
sl@0
   208
TInt DCodePagedMemoryManager::CleanPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TPhysAddr*& aPageArrayEntry)
sl@0
   209
	{
sl@0
   210
	if(aPageInfo->IsDirty()==false)
sl@0
   211
		return KErrNone;
sl@0
   212
sl@0
   213
	// shouldn't be asked to clean a page which is writable...
sl@0
   214
	__NK_ASSERT_DEBUG(aPageInfo->IsWritable()==false);
sl@0
   215
sl@0
   216
	// Note, memory may have been modified by the CodeModifier class.
sl@0
   217
sl@0
   218
	// just mark page as clean as we don't try and preserve code modifications...
sl@0
   219
	ThePager.SetClean(*aPageInfo);
sl@0
   220
sl@0
   221
	return KErrNone;
sl@0
   222
	}
sl@0
   223
sl@0
   224
sl@0
   225
TInt ReadFunc(TAny* aArg1, TAny* aArg2, TLinAddr aBuffer, TInt aBlockNumber, TInt aBlockCount)
sl@0
   226
	{
sl@0
   227
	START_PAGING_BENCHMARK;
sl@0
   228
	TInt drive = (TInt)aArg1;
sl@0
   229
	TThreadMessage* msg = (TThreadMessage*)aArg2;
sl@0
   230
	DPagingDevice* device = CodePagingDevice(drive);
sl@0
   231
	TInt r = device->Read(msg, aBuffer, aBlockNumber, aBlockCount, drive);
sl@0
   232
	__NK_ASSERT_DEBUG(r!=KErrNoMemory); // not allowed to allocated memory, therefore can't fail with KErrNoMemory
sl@0
   233
	END_PAGING_BENCHMARK(EPagingBmReadMedia);
sl@0
   234
	return r;
sl@0
   235
	}
sl@0
   236
sl@0
   237
sl@0
   238
TInt DCodePagedMemoryManager::ReadPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages, DPageReadRequest* aRequest)
sl@0
   239
	{
sl@0
   240
	TRACE2(("DCodePagedMemoryManager::ReadPage(0x%08x,0x%08x,0x%08x,?,?)",aMemory,aIndex,aCount));
sl@0
   241
sl@0
   242
	__NK_ASSERT_DEBUG(aRequest->CheckUse(aMemory,aIndex,aCount));
sl@0
   243
sl@0
   244
	START_PAGING_BENCHMARK;
sl@0
   245
sl@0
   246
	MmuLock::Lock();
sl@0
   247
	DPagedCodeInfo* pagedCodeInfo = (DPagedCodeInfo*)aMemory->iManagerData;
sl@0
   248
	if(pagedCodeInfo)
sl@0
   249
		pagedCodeInfo->Open();
sl@0
   250
	MmuLock::Unlock();
sl@0
   251
	if(!pagedCodeInfo)
sl@0
   252
		return KErrNotFound;
sl@0
   253
sl@0
   254
	TPagedCodeInfo& info = pagedCodeInfo->Info();
sl@0
   255
	DPagingDevice& device = *iDevice[info.iCodeLocalDrive];
sl@0
   256
sl@0
   257
	TLinAddr linAddr = aRequest->MapPages(aIndex,aCount,aPages);
sl@0
   258
	TInt r = KErrNone;
sl@0
   259
sl@0
   260
	if(!info.iCodeSize)
sl@0
   261
		{
sl@0
   262
		// no blockmap yet, use blank pages...
sl@0
   263
		memset((TAny*)linAddr, aCount*KPageSize, 0x03);
sl@0
   264
		CacheMaintenance::CodeChanged(linAddr, aCount*KPageSize);
sl@0
   265
		goto done;
sl@0
   266
		}
sl@0
   267
sl@0
   268
	for(; aCount; ++aIndex, --aCount, linAddr+=KPageSize)
sl@0
   269
		{
sl@0
   270
		// work out which bit of the file to read
sl@0
   271
		TInt codeOffset = aIndex<<KPageShift;
sl@0
   272
		TInt dataOffset;
sl@0
   273
		TInt dataSize;
sl@0
   274
		TInt decompressedSize = Min(KPageSize, info.iCodeSize-codeOffset);
sl@0
   275
		if(info.iCompressionType)
sl@0
   276
			{
sl@0
   277
			dataOffset = info.iCodePageOffsets[aIndex];
sl@0
   278
			dataSize = info.iCodePageOffsets[aIndex+1] - dataOffset;
sl@0
   279
			__KTRACE_OPT(KPAGING,Kern::Printf("  compressed, file offset == %x, size == %d", dataOffset, dataSize));
sl@0
   280
			}
sl@0
   281
		else
sl@0
   282
			{
sl@0
   283
			dataOffset = codeOffset + info.iCodeStartInFile;
sl@0
   284
			dataSize = Min(KPageSize, info.iBlockMap.DataLength()-dataOffset);
sl@0
   285
			__NK_ASSERT_DEBUG(dataSize==decompressedSize);
sl@0
   286
			__KTRACE_OPT(KPAGING,Kern::Printf("  uncompressed, file offset == %x, size == %d", dataOffset, dataSize));
sl@0
   287
			}
sl@0
   288
sl@0
   289
		TInt bufferStart = info.iBlockMap.Read(aRequest->iBuffer,
sl@0
   290
												dataOffset,
sl@0
   291
												dataSize,
sl@0
   292
												device.iReadUnitShift,
sl@0
   293
												ReadFunc,
sl@0
   294
												(TAny*)info.iCodeLocalDrive,
sl@0
   295
												(TAny*)&aRequest->iMessage);
sl@0
   296
sl@0
   297
		if(bufferStart<0)
sl@0
   298
			{
sl@0
   299
			r = bufferStart; // return error
sl@0
   300
			__NK_ASSERT_DEBUG(0);
sl@0
   301
			break;
sl@0
   302
			}
sl@0
   303
sl@0
   304
		TLinAddr data = aRequest->iBuffer + bufferStart;
sl@0
   305
		r = Decompress(info.iCompressionType, linAddr, decompressedSize, data, dataSize);
sl@0
   306
		if(r>=0)
sl@0
   307
			{
sl@0
   308
			if(r!=decompressedSize)
sl@0
   309
				{
sl@0
   310
				__KTRACE_OPT(KPANIC, Kern::Printf("DCodePagedMemoryManager::ReadPage: error decompressing page at %08x + %x: %d", dataOffset, dataSize, r));
sl@0
   311
				__NK_ASSERT_DEBUG(0);
sl@0
   312
				r = KErrCorrupt;
sl@0
   313
				}
sl@0
   314
			else
sl@0
   315
				r = KErrNone;
sl@0
   316
			}
sl@0
   317
		else
sl@0
   318
			{
sl@0
   319
			__NK_ASSERT_DEBUG(0);
sl@0
   320
			}
sl@0
   321
sl@0
   322
		if(r!=KErrNone)
sl@0
   323
			break;
sl@0
   324
sl@0
   325
		if(decompressedSize<KPageSize)
sl@0
   326
			memset((TAny*)(linAddr+decompressedSize), KPageSize-decompressedSize, 0x03);
sl@0
   327
		if(info.iLoaded)
sl@0
   328
			info.ApplyFixups(linAddr, aIndex);
sl@0
   329
		}
sl@0
   330
done:
sl@0
   331
	aRequest->UnmapPages(true);
sl@0
   332
sl@0
   333
	pagedCodeInfo->AsyncClose();
sl@0
   334
sl@0
   335
	END_PAGING_BENCHMARK(EPagingBmReadCodePage);
sl@0
   336
	return r;
sl@0
   337
	}
sl@0
   338
sl@0
   339
sl@0
   340
TBool DCodePagedMemoryManager::IsAllocated(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
sl@0
   341
	{
sl@0
   342
	// all pages allocated if memory not destroyed (iManagerData!=0)...
sl@0
   343
	return aMemory->iManagerData!=0;
sl@0
   344
	}
sl@0
   345
sl@0
   346
sl@0
   347
TInt MM::PagedCodeNew(DMemoryObject*& aMemory, TUint aPageCount, TPagedCodeInfo*& aInfo)
sl@0
   348
	{
sl@0
   349
	TRACE(("MM::PagedCodeNew(?,0x%08x,0x%08x)",aPageCount,aInfo));
sl@0
   350
	TMemoryCreateFlags createFlags = (TMemoryCreateFlags)(EMemoryCreateNoWipe | EMemoryCreateAllowExecution);
sl@0
   351
	TInt r = TheCodePagedMemoryManager->New(aMemory,aPageCount,EMemoryAttributeStandard,createFlags);
sl@0
   352
	if(r==KErrNone)
sl@0
   353
		aInfo = &((DPagedCodeInfo*)aMemory->iManagerData)->Info();
sl@0
   354
	TRACE(("MM::PagedCodeNew returns %d, aMemory=0x%08x",r,aMemory));
sl@0
   355
	return r;
sl@0
   356
	}
sl@0
   357
sl@0
   358
sl@0
   359
void MM::PagedCodeLoaded(DMemoryObject* aMemory, TLinAddr aLoadAddress)
sl@0
   360
	{
sl@0
   361
	TRACE(("MM::PagedCodeLoaded(0x%08x,0x%08x)",aMemory,aLoadAddress));
sl@0
   362
sl@0
   363
	TPagedCodeInfo& info = ((DPagedCodeInfo*)aMemory->iManagerData)->Info();
sl@0
   364
sl@0
   365
	// we need to apply fixups for all memory already paged in.
sl@0
   366
	// Note, if this memory is subsequently discarded it should not be paged-in again
sl@0
   367
	// until after this function has completed, because the Loader won't touch the memory
sl@0
   368
	// and it has not yet been mapped into any other process.
sl@0
   369
sl@0
   370
	// make iterator for memory...
sl@0
   371
	RPageArray::TIter pageIter;
sl@0
   372
	aMemory->iPages.FindStart(0,aMemory->iSizeInPages,pageIter);
sl@0
   373
sl@0
   374
	for(;;)
sl@0
   375
		{
sl@0
   376
		// find some pages...
sl@0
   377
		RPageArray::TIter pageList;
sl@0
   378
		TUint n = pageIter.Find(pageList);
sl@0
   379
		if(!n)
sl@0
   380
			break;
sl@0
   381
sl@0
   382
		// fix up each page found...
sl@0
   383
		UNLOCK_USER_MEMORY();
sl@0
   384
		do
sl@0
   385
			{
sl@0
   386
			TUint i = pageList.Index();
sl@0
   387
			TLinAddr a = aLoadAddress+i*KPageSize;
sl@0
   388
			info.ApplyFixups(a,i);
sl@0
   389
			CacheMaintenance::CodeChanged(a, KPageSize);
sl@0
   390
			// now we've finished updating the page, mark it as read only and 
sl@0
   391
			// clean as we don't need to save changes if it is stolen.
sl@0
   392
			MmuLock::Lock();
sl@0
   393
			TPhysAddr* pages;
sl@0
   394
			if(pageList.Pages(pages,1)==1)
sl@0
   395
				if(RPageArray::IsPresent(*pages))
sl@0
   396
					{// The loader page still has a writable mapping but it won't
sl@0
   397
					// touch the page again so this is safe.  No use restricting the 
sl@0
   398
					// page to be read only as if the loader did write to it again 
sl@0
   399
					// it would just be rejuvenated as writeable and made dirty.
sl@0
   400
					SPageInfo& pageInfo = *SPageInfo::FromPhysAddr(*pages);
sl@0
   401
					pageInfo.SetReadOnly();
sl@0
   402
					ThePager.SetClean(pageInfo);
sl@0
   403
					}
sl@0
   404
			MmuLock::Unlock();
sl@0
   405
sl@0
   406
			pageList.Skip(1);
sl@0
   407
			}
sl@0
   408
		while(pageList.Count());
sl@0
   409
		LOCK_USER_MEMORY();
sl@0
   410
sl@0
   411
		// move on...
sl@0
   412
		pageIter.FindRelease(n);
sl@0
   413
		}
sl@0
   414
sl@0
   415
	// done...
sl@0
   416
	aMemory->iPages.FindEnd(0,aMemory->iSizeInPages);
sl@0
   417
	info.iLoaded = true; // allow ReadPage to start applying fixups when handling page faults
sl@0
   418
	}