os/kernelhwsrv/kernel/eka/memmodel/epoc/flexible/mmu/mcodepaging.cpp
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
     1 // Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
     2 // All rights reserved.
     3 // This component and the accompanying materials are made available
     4 // under the terms of the License "Eclipse Public License v1.0"
     5 // which accompanies this distribution, and is available
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
     7 //
     8 // Initial Contributors:
     9 // Nokia Corporation - initial contribution.
    10 //
    11 // Contributors:
    12 //
    13 // Description:
    14 //
    15 
    16 #include <plat_priv.h>
    17 #include "cache_maintenance.h"
    18 #include "mm.h"
    19 #include "mmu.h"
    20 #include "mmanager.h"
    21 #include "mobject.h"
    22 #include "mpager.h"
    23 #include "mcodepaging.h"
    24 
    25 /**
    26 Manager for memory objects containing demand paged executable code.
    27 This is the memory used by DCodeSegMemory object to store the contents of RAM loaded
    28 EXEs and DLLs which are to be demand paged.
    29 
    30 This memory has associated information, supplied by the Loader, which enables
    31 the executable's code to be located in the file system and its contents
    32 relocated and fixed-up when demand loaded.
    33 
    34 @see DPagedCodeInfo
    35 @see MM::PagedCodeNew
    36 */
    37 class DCodePagedMemoryManager : public DPagedMemoryManager
    38 	{
    39 private:
    40 	// from DMemoryManager...
    41 	virtual TInt New(DMemoryObject*& aMemory, TUint aSizeInPages, TMemoryAttributes aAttributes, TMemoryCreateFlags aCreateFlags);
    42 	virtual void Destruct(DMemoryObject* aMemory);
    43 	virtual void Free(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
    44 	virtual TInt CleanPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TPhysAddr*& aPageArrayEntry);
    45 
    46 	// from DPagedMemoryManager...
    47 	virtual void Init3();
    48 	virtual TInt InstallPagingDevice(DPagingDevice* aDevice);
    49 	virtual TInt AcquirePageReadRequest(DPageReadRequest*& aRequest, DMemoryObject* aMemory, TUint aIndex, TUint aCount);
    50 	virtual TInt ReadPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages, DPageReadRequest* aRequest);
    51 	virtual TBool IsAllocated(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
    52 
    53 private:
    54 	/**
    55 	Array of paging devices used for each media drive.
    56 	This is a initialised by #InstallPagingDevice.
    57 	Drives without paging devices have the null pointer in their entry.
    58 	*/
    59 	DPagingDevice* iDevice[KMaxLocalDrives];
    60 
    61 public:
    62 	/**
    63 	The single instance of this manager class.
    64 	*/
    65 	static DCodePagedMemoryManager TheManager;
    66 
    67 	friend DPagingDevice* CodePagingDevice(TInt aDiveNum);
    68 	};
    69 
    70 
    71 /**
    72 Reference counted object containing a #TPagedCodeInfo.
    73 This is a structure containing the information about a demand paged code segment
    74 which is required to load and fixup its code section.
    75 
    76 An instance of this object is created for each memory object being managed by
    77 #DCodePagedMemoryManager, and a pointer to it is stored in the memory object's
    78 DMemoryObject::iManagerData member.
    79 
    80 @see TPagedCodeInfo
    81 @see MM::PagedCodeLoaded
    82 */
    83 class DPagedCodeInfo : public DReferenceCountedObject
    84 	{
    85 public:
    86 	/**
    87 	Return a reference to the embedded #TPagedCodeInfo.
    88 	*/
    89 	inline TPagedCodeInfo& Info()
    90 		{ return iInfo; }
    91 private:
    92 	/**
    93 	@copybrief TPagedCodeInfo
    94 	*/
    95 	TPagedCodeInfo iInfo;
    96 	};
    97 
    98 
    99 DCodePagedMemoryManager DCodePagedMemoryManager::TheManager;
   100 DPagedMemoryManager* TheCodePagedMemoryManager = &DCodePagedMemoryManager::TheManager;
   101 
   102 
   103 DPagingDevice* CodePagingDevice(TInt aDriveNum)
   104 	{
   105 	__NK_ASSERT_DEBUG(aDriveNum<KMaxLocalDrives);
   106 	return DCodePagedMemoryManager::TheManager.iDevice[aDriveNum];
   107 	}
   108 
   109 
   110 void DCodePagedMemoryManager::Init3()
   111 	{
   112 	TRACEB(("DCodePagedMemoryManager::Init3()"));
   113 	}
   114 
   115 
   116 TInt DCodePagedMemoryManager::InstallPagingDevice(DPagingDevice* aDevice)
   117 	{
   118 	TRACEB(("DCodePagedMemoryManager::InstallPagingDevice(0x%08x)",aDevice));
   119 
   120 	TUint codePolicy = TheSuperPage().KernelConfigFlags() & EKernelConfigCodePagingPolicyMask;
   121 	TRACEB(("Code Paging Policy = %d", codePolicy >> EKernelConfigCodePagingPolicyShift));
   122 	if(codePolicy == EKernelConfigCodePagingPolicyNoPaging)
   123 		{
   124 		// no paging allowed so end now...
   125 		return KErrNone;
   126 		}
   127 	
   128 	TInt i;
   129 	for(i=0; i<KMaxLocalDrives; ++i)
   130 		if(aDevice->iDrivesSupported&(1<<i))
   131 			{
   132 			TRACEB(("DCodePagedMemoryManager::InstallPagingDevice drive=%d",i));
   133 			TAny* null = 0;
   134 			if(!__e32_atomic_cas_ord_ptr(&iDevice[i], &null, aDevice)) // set iDevice[i]=aDevice if it was originally 0
   135 				{
   136 				// paging device already registered...
   137 				TRACEB(("DCodePagedMemoryManager::InstallPagingDevice returns ALREADY EXISTS!"));
   138 				return KErrAlreadyExists;
   139 				}
   140 			// flag code paging is supported...
   141 			__e32_atomic_ior_ord32(&K::MemModelAttributes, (TUint32)EMemModelAttrCodePaging);
   142 			}
   143 
   144 	return KErrNone;
   145 	}
   146 
   147 
   148 TInt DCodePagedMemoryManager::AcquirePageReadRequest(DPageReadRequest*& aRequest, DMemoryObject* aMemory, TUint aIndex, TUint aCount)
   149 	{
   150 	DPagingDevice* device = 0;
   151 	MmuLock::Lock();
   152 	DPagedCodeInfo* pagedCodeInfo = (DPagedCodeInfo*)aMemory->iManagerData;
   153 	if(pagedCodeInfo)
   154 		{
   155 		TPagedCodeInfo& info = pagedCodeInfo->Info();
   156 		device = iDevice[info.iCodeLocalDrive];
   157 		}
   158 	MmuLock::Unlock();
   159 
   160 	if(!device)
   161 		{
   162 		aRequest = 0;
   163 		return KErrNotFound;
   164 		}
   165 
   166 	aRequest = device->iRequestPool->AcquirePageReadRequest(aMemory,aIndex,aCount);
   167 	return KErrNone;
   168 	}
   169 
   170 
   171 TInt DCodePagedMemoryManager::New(DMemoryObject*& aMemory, TUint aSizeInPages, TMemoryAttributes aAttributes, TMemoryCreateFlags aCreateFlags)
   172 	{
   173 	DPagedCodeInfo* pagedCodeInfo = new DPagedCodeInfo;
   174 	if(!pagedCodeInfo)
   175 		return KErrNoMemory;
   176 
   177 	TInt r = DPagedMemoryManager::New(aMemory, aSizeInPages, aAttributes, aCreateFlags);
   178 	if(r!=KErrNone)
   179 		pagedCodeInfo->Close();
   180 	else
   181 		aMemory->iManagerData = pagedCodeInfo;
   182 
   183 	return r;
   184 	}
   185 
   186 
   187 void DCodePagedMemoryManager::Destruct(DMemoryObject* aMemory)
   188 	{
   189 	MmuLock::Lock();
   190 	DPagedCodeInfo* pagedCodeInfo = (DPagedCodeInfo*)aMemory->iManagerData; 
   191 	aMemory->iManagerData = 0;
   192 	MmuLock::Unlock();
   193 
   194 	if(pagedCodeInfo)
   195 		pagedCodeInfo->Close();
   196 
   197 	// base call to free memory and close object...
   198 	DPagedMemoryManager::Destruct(aMemory);
   199 	}
   200 
   201 
   202 void DCodePagedMemoryManager::Free(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
   203 	{
   204 	DoFree(aMemory,aIndex,aCount);
   205 	}
   206 
   207 
   208 TInt DCodePagedMemoryManager::CleanPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TPhysAddr*& aPageArrayEntry)
   209 	{
   210 	if(aPageInfo->IsDirty()==false)
   211 		return KErrNone;
   212 
   213 	// shouldn't be asked to clean a page which is writable...
   214 	__NK_ASSERT_DEBUG(aPageInfo->IsWritable()==false);
   215 
   216 	// Note, memory may have been modified by the CodeModifier class.
   217 
   218 	// just mark page as clean as we don't try and preserve code modifications...
   219 	ThePager.SetClean(*aPageInfo);
   220 
   221 	return KErrNone;
   222 	}
   223 
   224 
   225 TInt ReadFunc(TAny* aArg1, TAny* aArg2, TLinAddr aBuffer, TInt aBlockNumber, TInt aBlockCount)
   226 	{
   227 	START_PAGING_BENCHMARK;
   228 	TInt drive = (TInt)aArg1;
   229 	TThreadMessage* msg = (TThreadMessage*)aArg2;
   230 	DPagingDevice* device = CodePagingDevice(drive);
   231 	TInt r = device->Read(msg, aBuffer, aBlockNumber, aBlockCount, drive);
   232 	__NK_ASSERT_DEBUG(r!=KErrNoMemory); // not allowed to allocated memory, therefore can't fail with KErrNoMemory
   233 	END_PAGING_BENCHMARK(EPagingBmReadMedia);
   234 	return r;
   235 	}
   236 
   237 
   238 TInt DCodePagedMemoryManager::ReadPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages, DPageReadRequest* aRequest)
   239 	{
   240 	TRACE2(("DCodePagedMemoryManager::ReadPage(0x%08x,0x%08x,0x%08x,?,?)",aMemory,aIndex,aCount));
   241 
   242 	__NK_ASSERT_DEBUG(aRequest->CheckUse(aMemory,aIndex,aCount));
   243 
   244 	START_PAGING_BENCHMARK;
   245 
   246 	MmuLock::Lock();
   247 	DPagedCodeInfo* pagedCodeInfo = (DPagedCodeInfo*)aMemory->iManagerData;
   248 	if(pagedCodeInfo)
   249 		pagedCodeInfo->Open();
   250 	MmuLock::Unlock();
   251 	if(!pagedCodeInfo)
   252 		return KErrNotFound;
   253 
   254 	TPagedCodeInfo& info = pagedCodeInfo->Info();
   255 	DPagingDevice& device = *iDevice[info.iCodeLocalDrive];
   256 
   257 	TLinAddr linAddr = aRequest->MapPages(aIndex,aCount,aPages);
   258 	TInt r = KErrNone;
   259 
   260 	if(!info.iCodeSize)
   261 		{
   262 		// no blockmap yet, use blank pages...
   263 		memset((TAny*)linAddr, aCount*KPageSize, 0x03);
   264 		CacheMaintenance::CodeChanged(linAddr, aCount*KPageSize);
   265 		goto done;
   266 		}
   267 
   268 	for(; aCount; ++aIndex, --aCount, linAddr+=KPageSize)
   269 		{
   270 		// work out which bit of the file to read
   271 		TInt codeOffset = aIndex<<KPageShift;
   272 		TInt dataOffset;
   273 		TInt dataSize;
   274 		TInt decompressedSize = Min(KPageSize, info.iCodeSize-codeOffset);
   275 		if(info.iCompressionType)
   276 			{
   277 			dataOffset = info.iCodePageOffsets[aIndex];
   278 			dataSize = info.iCodePageOffsets[aIndex+1] - dataOffset;
   279 			__KTRACE_OPT(KPAGING,Kern::Printf("  compressed, file offset == %x, size == %d", dataOffset, dataSize));
   280 			}
   281 		else
   282 			{
   283 			dataOffset = codeOffset + info.iCodeStartInFile;
   284 			dataSize = Min(KPageSize, info.iBlockMap.DataLength()-dataOffset);
   285 			__NK_ASSERT_DEBUG(dataSize==decompressedSize);
   286 			__KTRACE_OPT(KPAGING,Kern::Printf("  uncompressed, file offset == %x, size == %d", dataOffset, dataSize));
   287 			}
   288 
   289 		TInt bufferStart = info.iBlockMap.Read(aRequest->iBuffer,
   290 												dataOffset,
   291 												dataSize,
   292 												device.iReadUnitShift,
   293 												ReadFunc,
   294 												(TAny*)info.iCodeLocalDrive,
   295 												(TAny*)&aRequest->iMessage);
   296 
   297 		if(bufferStart<0)
   298 			{
   299 			r = bufferStart; // return error
   300 			__NK_ASSERT_DEBUG(0);
   301 			break;
   302 			}
   303 
   304 		TLinAddr data = aRequest->iBuffer + bufferStart;
   305 		r = Decompress(info.iCompressionType, linAddr, decompressedSize, data, dataSize);
   306 		if(r>=0)
   307 			{
   308 			if(r!=decompressedSize)
   309 				{
   310 				__KTRACE_OPT(KPANIC, Kern::Printf("DCodePagedMemoryManager::ReadPage: error decompressing page at %08x + %x: %d", dataOffset, dataSize, r));
   311 				__NK_ASSERT_DEBUG(0);
   312 				r = KErrCorrupt;
   313 				}
   314 			else
   315 				r = KErrNone;
   316 			}
   317 		else
   318 			{
   319 			__NK_ASSERT_DEBUG(0);
   320 			}
   321 
   322 		if(r!=KErrNone)
   323 			break;
   324 
   325 		if(decompressedSize<KPageSize)
   326 			memset((TAny*)(linAddr+decompressedSize), KPageSize-decompressedSize, 0x03);
   327 		if(info.iLoaded)
   328 			info.ApplyFixups(linAddr, aIndex);
   329 		}
   330 done:
   331 	aRequest->UnmapPages(true);
   332 
   333 	pagedCodeInfo->AsyncClose();
   334 
   335 	END_PAGING_BENCHMARK(EPagingBmReadCodePage);
   336 	return r;
   337 	}
   338 
   339 
   340 TBool DCodePagedMemoryManager::IsAllocated(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
   341 	{
   342 	// all pages allocated if memory not destroyed (iManagerData!=0)...
   343 	return aMemory->iManagerData!=0;
   344 	}
   345 
   346 
   347 TInt MM::PagedCodeNew(DMemoryObject*& aMemory, TUint aPageCount, TPagedCodeInfo*& aInfo)
   348 	{
   349 	TRACE(("MM::PagedCodeNew(?,0x%08x,0x%08x)",aPageCount,aInfo));
   350 	TMemoryCreateFlags createFlags = (TMemoryCreateFlags)(EMemoryCreateNoWipe | EMemoryCreateAllowExecution);
   351 	TInt r = TheCodePagedMemoryManager->New(aMemory,aPageCount,EMemoryAttributeStandard,createFlags);
   352 	if(r==KErrNone)
   353 		aInfo = &((DPagedCodeInfo*)aMemory->iManagerData)->Info();
   354 	TRACE(("MM::PagedCodeNew returns %d, aMemory=0x%08x",r,aMemory));
   355 	return r;
   356 	}
   357 
   358 
   359 void MM::PagedCodeLoaded(DMemoryObject* aMemory, TLinAddr aLoadAddress)
   360 	{
   361 	TRACE(("MM::PagedCodeLoaded(0x%08x,0x%08x)",aMemory,aLoadAddress));
   362 
   363 	TPagedCodeInfo& info = ((DPagedCodeInfo*)aMemory->iManagerData)->Info();
   364 
   365 	// we need to apply fixups for all memory already paged in.
   366 	// Note, if this memory is subsequently discarded it should not be paged-in again
   367 	// until after this function has completed, because the Loader won't touch the memory
   368 	// and it has not yet been mapped into any other process.
   369 
   370 	// make iterator for memory...
   371 	RPageArray::TIter pageIter;
   372 	aMemory->iPages.FindStart(0,aMemory->iSizeInPages,pageIter);
   373 
   374 	for(;;)
   375 		{
   376 		// find some pages...
   377 		RPageArray::TIter pageList;
   378 		TUint n = pageIter.Find(pageList);
   379 		if(!n)
   380 			break;
   381 
   382 		// fix up each page found...
   383 		UNLOCK_USER_MEMORY();
   384 		do
   385 			{
   386 			TUint i = pageList.Index();
   387 			TLinAddr a = aLoadAddress+i*KPageSize;
   388 			info.ApplyFixups(a,i);
   389 			CacheMaintenance::CodeChanged(a, KPageSize);
   390 			// now we've finished updating the page, mark it as read only and 
   391 			// clean as we don't need to save changes if it is stolen.
   392 			MmuLock::Lock();
   393 			TPhysAddr* pages;
   394 			if(pageList.Pages(pages,1)==1)
   395 				if(RPageArray::IsPresent(*pages))
   396 					{// The loader page still has a writable mapping but it won't
   397 					// touch the page again so this is safe.  No use restricting the 
   398 					// page to be read only as if the loader did write to it again 
   399 					// it would just be rejuvenated as writeable and made dirty.
   400 					SPageInfo& pageInfo = *SPageInfo::FromPhysAddr(*pages);
   401 					pageInfo.SetReadOnly();
   402 					ThePager.SetClean(pageInfo);
   403 					}
   404 			MmuLock::Unlock();
   405 
   406 			pageList.Skip(1);
   407 			}
   408 		while(pageList.Count());
   409 		LOCK_USER_MEMORY();
   410 
   411 		// move on...
   412 		pageIter.FindRelease(n);
   413 		}
   414 
   415 	// done...
   416 	aMemory->iPages.FindEnd(0,aMemory->iSizeInPages);
   417 	info.iLoaded = true; // allow ReadPage to start applying fixups when handling page faults
   418 	}