os/kernelhwsrv/kernel/eka/memmodel/epoc/flexible/mcodeseg.cpp
author sl
Tue, 10 Jun 2014 14:32:02 +0200
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
     1 // Copyright (c) 1995-2009 Nokia Corporation and/or its subsidiary(-ies).
     2 // All rights reserved.
     3 // This component and the accompanying materials are made available
     4 // under the terms of the License "Eclipse Public License v1.0"
     5 // which accompanies this distribution, and is available
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
     7 //
     8 // Initial Contributors:
     9 // Nokia Corporation - initial contribution.
    10 //
    11 // Contributors:
    12 //
    13 // Description:
    14 //
    15 
    16 #include <memmodel.h>
    17 #include "mmu/mm.h"
    18 #include "mmboot.h"
    19 #include "mmu/mcodepaging.h"
    20 
    21 #include "cache_maintenance.h"
    22 
    23 
    24 DCodeSeg* M::NewCodeSeg(TCodeSegCreateInfo&)
    25 	{
    26 	__KTRACE_OPT(KDLL,Kern::Printf("M::NewCodeSeg"));
    27 	return new DMemModelCodeSeg;
    28 	}
    29 
    30 
    31 //
    32 // DMemModelCodeSegMemory
    33 //
    34 
    35 DEpocCodeSegMemory* DEpocCodeSegMemory::New(DEpocCodeSeg* aCodeSeg)
    36 	{
    37 	return new DMemModelCodeSegMemory(aCodeSeg);
    38 	}
    39 
    40 
    41 DMemModelCodeSegMemory::DMemModelCodeSegMemory(DEpocCodeSeg* aCodeSeg)
    42 	: DEpocCodeSegMemory(aCodeSeg)
    43 	{
    44 	}
    45 
    46 
    47 TInt DMemModelCodeSegMemory::Create(TCodeSegCreateInfo& aInfo, DMemModelProcess* aProcess)
    48 	{
    49 	TInt r;
    50 
    51 	TUint codePageCount;
    52 	TUint dataPageCount;
    53 	TBool isDemandPaged;
    54 	if(!aInfo.iUseCodePaging)
    55 		{
    56 		isDemandPaged = 0;
    57 		codePageCount = MM::RoundToPageCount(iRamInfo.iCodeSize+iRamInfo.iDataSize);
    58 		dataPageCount = 0;
    59 		}
    60 	else
    61 		{
    62 		isDemandPaged = 1;
    63 		codePageCount = MM::RoundToPageCount(iRamInfo.iCodeSize);
    64 		dataPageCount = MM::RoundToPageCount(iRamInfo.iDataSize);
    65 
    66 		iDataSectionMemory = Kern::Alloc(iRamInfo.iDataSize);
    67 		if(!iDataSectionMemory)
    68 			return KErrNoMemory;
    69 		}
    70 
    71 	iCodeSeg->iSize = codePageCount<<KPageShift;
    72 
    73 	// allocate virtual address for code to run at...
    74 	const TUint codeSize = codePageCount<<KPageShift;
    75 	if(iCodeSeg->IsExe())
    76 		{// Get the os asid without opening a reference on it as aProcess isn't fully 
    77 		// created yet so won't free its os asid.
    78 		r = MM::VirtualAlloc(aProcess->OsAsid(),iRamInfo.iCodeRunAddr,codeSize,isDemandPaged);
    79 		if(r!=KErrNone)
    80 			return r;
    81 		aProcess->iCodeVirtualAllocSize = codeSize;
    82 		aProcess->iCodeVirtualAllocAddress = iRamInfo.iCodeRunAddr;
    83 		iCodeSeg->iAttr |= ECodeSegAttAddrNotUnique;
    84 		}
    85 	else
    86 		{
    87 		r = MM::VirtualAllocCommon(iRamInfo.iCodeRunAddr,codeSize,isDemandPaged);
    88 		if(r!=KErrNone)
    89 			return r;
    90 		iVirtualAllocCommonSize = codeSize;
    91 		}
    92 
    93 	// create memory object for codeseg...
    94 	if(isDemandPaged)
    95 		{
    96 		// create memory object...
    97 		r = MM::PagedCodeNew(iCodeMemoryObject, codePageCount, iPagedCodeInfo);
    98 		if(r!=KErrNone)
    99 			return r;
   100 
   101 		// get file blockmap for codeseg contents...
   102 		r = iPagedCodeInfo->ReadBlockMap(aInfo);
   103 		if (r != KErrNone)
   104 			return r;
   105 		}
   106 	else
   107 		{
   108 		// create memory object...
   109 		TMemoryCreateFlags flags = (TMemoryCreateFlags)(EMemoryCreateNoWipe | EMemoryCreateAllowExecution);
   110 		r = MM::MemoryNew(iCodeMemoryObject, EMemoryObjectMovable, codePageCount, flags);
   111 		if(r!=KErrNone)
   112 			return r;
   113 
   114 		// commit memory...
   115 		r = MM::MemoryAlloc(iCodeMemoryObject,0,codePageCount);
   116 		if(r!=KErrNone)
   117 			return r;
   118 		}
   119 
   120 	// create a mapping of the memory for the loader...
   121 	// No need to open reference on os asid it is the current thread/process's.
   122 	DMemModelProcess* pP = (DMemModelProcess*)TheCurrentThread->iOwningProcess;
   123 	r = MM::MappingNew(iCodeLoadMapping,iCodeMemoryObject,EUserReadWrite,pP->OsAsid());
   124 	if(r!=KErrNone)
   125 		return r;
   126 
   127 	iRamInfo.iCodeLoadAddr = MM::MappingBase(iCodeLoadMapping);
   128 
   129 	// work out where the loader is to put the loaded data section...
   130 	TInt loadSize = iRamInfo.iCodeSize; // size of memory filled by loader
   131 	if(iRamInfo.iDataSize)
   132 		{
   133 		if(!dataPageCount)
   134 			{
   135 			// data loaded immediately after code...
   136 			iRamInfo.iDataLoadAddr = iRamInfo.iCodeLoadAddr+iRamInfo.iCodeSize;
   137 			loadSize += iRamInfo.iDataSize;
   138 			}
   139 		else
   140 			{
   141 			// create memory object for data...
   142 			DMemoryObject* dataMemory;
   143 			r = MM::MemoryNew(dataMemory, EMemoryObjectMovable, dataPageCount, EMemoryCreateNoWipe);
   144 			if(r!=KErrNone)
   145 				return r;
   146 
   147 			// commit memory...
   148 			r = MM::MemoryAlloc(dataMemory,0,dataPageCount);
   149 			if(r==KErrNone)
   150 				{
   151 				// create a mapping of the memory for the loader...
   152 				// No need to open reference on os asid it is the current thread/process's.
   153 				r = MM::MappingNew(iDataLoadMapping,dataMemory,EUserReadWrite,pP->OsAsid());
   154 				}
   155 
   156 			if(r!=KErrNone)
   157 				{
   158 				MM::MemoryDestroy(dataMemory);
   159 				return r;
   160 				}
   161 
   162 			iRamInfo.iDataLoadAddr = MM::MappingBase(iDataLoadMapping);
   163 			}
   164 		}
   165 
   166 	if(!isDemandPaged)
   167 		{
   168 		// wipe memory that the loader wont fill...
   169 		UNLOCK_USER_MEMORY();
   170 		memset((TAny*)(iRamInfo.iCodeLoadAddr+loadSize), 0x03, codeSize-loadSize);
   171 		LOCK_USER_MEMORY();
   172 		}
   173 
   174 	// done...
   175 	iCreator = pP;
   176 	
   177 	return KErrNone;
   178 	}
   179 
   180 
   181 TInt DMemModelCodeSegMemory::Loaded(TCodeSegCreateInfo& aInfo)
   182 	{
   183 	if(iPagedCodeInfo)
   184 		{
   185 		// get information needed to fixup code for it's run address...
   186 		TInt r = iPagedCodeInfo->ReadFixupTables(aInfo);
   187 		if(r!=KErrNone)
   188 			return r;
   189 		MM::PagedCodeLoaded(iCodeMemoryObject, iRamInfo.iCodeLoadAddr);
   190 		}
   191 	else
   192 		{
   193 		// make code visible to instruction cache...
   194 		UNLOCK_USER_MEMORY();
   195 		CacheMaintenance::CodeChanged(iRamInfo.iCodeLoadAddr, iRamInfo.iCodeSize);
   196 		LOCK_USER_MEMORY();
   197 		}
   198 
   199 	// adjust iDataLoadAddr to point to address contents for initial data section
   200 	// in running process...
   201 	if(iRamInfo.iDataLoadAddr)
   202 		{
   203 		TAny* dataSection = iDataSectionMemory;
   204 		if(dataSection)
   205 			{
   206 			// contents for initial data section to be stored in iDataSectionMemory...
   207 			UNLOCK_USER_MEMORY();
   208 			memcpy(dataSection,(TAny*)iRamInfo.iDataLoadAddr,iRamInfo.iDataSize);
   209 			LOCK_USER_MEMORY();
   210 			iRamInfo.iDataLoadAddr = (TLinAddr)dataSection;
   211 			}
   212 		else
   213 			{
   214 			// contents for initial data section stored after code...
   215 			__NK_ASSERT_DEBUG(iRamInfo.iDataLoadAddr==iRamInfo.iCodeLoadAddr+iRamInfo.iCodeSize); // check data loaded at end of code
   216 			iRamInfo.iDataLoadAddr = iRamInfo.iCodeRunAddr+iRamInfo.iCodeSize;
   217 			}
   218 		}
   219 
   220 	// copy export directory (this will now have fixups applied)...
   221 	TInt exportDirSize = iRamInfo.iExportDirCount * sizeof(TLinAddr);
   222 	if(exportDirSize > 0 || (exportDirSize==0 && (iCodeSeg->iAttr&ECodeSegAttNmdExpData)) )
   223 		{
   224 		exportDirSize += sizeof(TLinAddr);
   225 		TLinAddr* expDir = (TLinAddr*)Kern::Alloc(exportDirSize);
   226 		if(!expDir)
   227 			return KErrNoMemory;
   228 		iCopyOfExportDir = expDir;
   229 		TLinAddr expDirLoad = iRamInfo.iExportDir-iRamInfo.iCodeRunAddr+iRamInfo.iCodeLoadAddr;
   230 		UNLOCK_USER_MEMORY();
   231 		memcpy(expDir,(TAny*)(expDirLoad-sizeof(TLinAddr)),exportDirSize);
   232 		LOCK_USER_MEMORY();
   233 		}
   234 
   235 	// unmap code from loading process...
   236 	DMemModelProcess* pP=(DMemModelProcess*)TheCurrentThread->iOwningProcess;
   237 	__ASSERT_ALWAYS(iCreator==pP, MM::Panic(MM::ECodeSegLoadedNotCreator));
   238 	MM::MappingDestroy(iCodeLoadMapping);
   239 	MM::MappingAndMemoryDestroy(iDataLoadMapping);
   240 	iCreator=NULL;
   241 
   242 	// Mark the code memory object read only to prevent malicious code modifying it.
   243 	TInt r = MM::MemorySetReadOnly(iCodeMemoryObject);
   244 	__ASSERT_ALWAYS(r == KErrNone, MM::Panic(MM::ECodeSegSetReadOnlyFailure));
   245 
   246 	return KErrNone;
   247 	}
   248 
   249 
   250 void DMemModelCodeSegMemory::Destroy()
   251 	{
   252 	MM::MappingDestroy(iCodeLoadMapping);
   253 	MM::MappingAndMemoryDestroy(iDataLoadMapping);
   254 	}
   255 
   256 
   257 DMemModelCodeSegMemory::~DMemModelCodeSegMemory()
   258 	{
   259 	__KTRACE_OPT(KDLL,Kern::Printf("DMemModelCodeSegMemory::~DMemModelCodeSegMemory %x", this));
   260 	__NK_ASSERT_DEBUG(iAccessCount==0);
   261 
   262 	MM::MappingDestroy(iCodeLoadMapping);
   263 	MM::MappingAndMemoryDestroy(iDataLoadMapping);
   264 	MM::MemoryDestroy(iCodeMemoryObject);
   265 
   266 	if(iVirtualAllocCommonSize)
   267 		MM::VirtualFreeCommon(iRamInfo.iCodeRunAddr, iVirtualAllocCommonSize);
   268 
   269 	Kern::Free(iCopyOfExportDir);
   270 	Kern::Free(iDataSectionMemory);
   271 	}
   272 
   273 
   274 //
   275 // DMemModelCodeSeg
   276 //
   277 
   278 DMemModelCodeSeg::DMemModelCodeSeg()
   279 	{
   280 	}
   281 
   282 
   283 DMemModelCodeSeg::~DMemModelCodeSeg()
   284 	{
   285 	__KTRACE_OPT(KDLL,Kern::Printf("DMemModelCodeSeg::Destruct %C", this));
   286 	DCodeSeg::Wait();
   287 
   288 	MM::MappingDestroy(iCodeLoadMapping);
   289 	MM::MappingDestroy(iCodeGlobalMapping);
   290 	MM::MemoryDestroy(iCodeMemoryObject);
   291 
   292 	if(Memory())
   293 		Memory()->Destroy();
   294 
   295 	if(iDataAllocSize)
   296 		MM::VirtualFreeCommon(iDataAllocBase,iDataAllocSize);
   297 
   298 	DCodeSeg::Signal();
   299 
   300 	Kern::Free(iKernelData);
   301 
   302 	DEpocCodeSeg::Destruct();
   303 	}
   304 
   305 
   306 TInt DMemModelCodeSeg::DoCreateRam(TCodeSegCreateInfo& aInfo, DProcess* aProcess)
   307 	{
   308 	__KTRACE_OPT(KDLL,Kern::Printf("DMemModelCodeSeg::DoCreateRam %C", this));
   309 
   310 	SRamCodeInfo& ri = RamInfo();
   311 	iSize = MM::RoundToPageSize(ri.iCodeSize+ri.iDataSize);
   312 	if (iSize==0)
   313 		return KErrCorrupt;
   314 
   315 	TBool kernel = ( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttKernel );
   316 //	TBool user_global = ( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttGlobal );
   317 	TBool user_local = ( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == 0 );
   318 
   319 	TUint total_data_size = ri.iDataSize+ri.iBssSize;
   320 
   321 	if(user_local)
   322 		{
   323 		// setup paging attribute for code...
   324 		if(aInfo.iUseCodePaging)
   325 			iAttr |= ECodeSegAttCodePaged;
   326 
   327 		if(total_data_size && !IsExe())
   328 			{
   329 			// setup paging attribute for data section...
   330 			if(aInfo.iUseCodePaging)
   331 				if(K::MemModelAttributes & EMemModelAttrDataPaging)
   332 					iAttr |= ECodeSegAttDataPaged;
   333 
   334 			// allocate virtual address for data section...
   335 			TInt r = MM::VirtualAllocCommon(iDataAllocBase,total_data_size,iAttr&ECodeSegAttDataPaged);
   336 			if(r!=KErrNone)
   337 				return r;
   338 			iDataAllocSize = total_data_size;
   339 			ri.iDataRunAddr = iDataAllocBase;
   340 			}
   341 
   342 		// create DCodeSegMemory for RAM loaded user local code...
   343 		TInt r = Memory()->Create(aInfo,(DMemModelProcess*)aProcess);
   344 
   345 #ifdef BTRACE_FLEXIBLE_MEM_MODEL
   346 		if (r == KErrNone)
   347 			{
   348 			BTrace8(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsCodeSeg,Memory()->iCodeMemoryObject,this);
   349 			}
   350 #endif
   351 		
   352 		return r;
   353 		}
   354 
   355 	// kernel or user-global code...
   356 
   357 	// create memory object for codeseg...
   358 	TMemoryCreateFlags flags = EMemoryCreateAllowExecution;
   359 	if(kernel)
   360 		{
   361 		flags = (TMemoryCreateFlags)(flags|EMemoryCreateNoWipe);
   362 		}
   363 	TInt r = MM::MemoryNew(iCodeMemoryObject, EMemoryObjectMovable, MM::BytesToPages(iSize), flags);
   364 	if(r!=KErrNone)
   365 		return r;
   366 
   367 	// commit memory...
   368 	r = MM::MemoryAlloc(iCodeMemoryObject,0,MM::BytesToPages(iSize));
   369 	if(r!=KErrNone)
   370 		return r;
   371 
   372 	// create a mapping of the memory for the loader...
   373 	// No need to open reference on os asid it is the current thread/process's.
   374 	DMemModelProcess* pP = (DMemModelProcess*)TheCurrentThread->iOwningProcess;
   375 	r = MM::MappingNew(iCodeLoadMapping,iCodeMemoryObject,EUserReadWrite,pP->OsAsid());
   376 	if(r!=KErrNone)
   377 		return r;
   378 	ri.iCodeLoadAddr = MM::MappingBase(iCodeLoadMapping);
   379 
   380 	// create a global mapping of the memory for the codeseg to run at...
   381 	r = MM::MappingNew(iCodeGlobalMapping,iCodeMemoryObject,kernel?ESupervisorExecute:EUserExecute,KKernelOsAsid);
   382 	if(r!=KErrNone)
   383 		return r;
   384 	ri.iCodeRunAddr = MM::MappingBase(iCodeGlobalMapping);
   385 
   386 	if(kernel)
   387 		{
   388 		// setup data section memory...
   389 		if (ri.iDataSize)
   390 			ri.iDataLoadAddr = ri.iCodeLoadAddr+ri.iCodeSize;
   391 		if (total_data_size)
   392 			{
   393 			iKernelData = Kern::Alloc(total_data_size);
   394 			if (!iKernelData)
   395 				return KErrNoMemory;
   396 			ri.iDataRunAddr = (TLinAddr)iKernelData;
   397 			}
   398 		}
   399 	else
   400 		{
   401 		// we don't allow static data in global code...
   402 		ri.iDataLoadAddr = 0;
   403 		ri.iDataRunAddr = 0;
   404 		}
   405 
   406 #ifdef BTRACE_FLEXIBLE_MEM_MODEL
   407 	BTrace8(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsCodeSeg,iCodeMemoryObject,this);
   408 #endif
   409 
   410 	// done...
   411 	return KErrNone;
   412 	}
   413 
   414 
   415 TInt DMemModelCodeSeg::DoCreateXIP(DProcess* aProcess)
   416 	{
   417 //	__KTRACE_OPT(KDLL,Kern::Printf("DMemModelCodeSeg::DoCreateXIP %C proc %O", this, aProcess));
   418 	return KErrNone;
   419 	}
   420 
   421 
   422 TInt DMemModelCodeSeg::Loaded(TCodeSegCreateInfo& aInfo)
   423 	{
   424 	if(iXIP)
   425 		return DEpocCodeSeg::Loaded(aInfo);
   426 
   427 	TBool kernel = ( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttKernel );
   428 	TBool user_global = ( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttGlobal );
   429 	TBool user_local = ( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == 0 );
   430 	if(user_local)
   431 		{
   432 		TInt r = Memory()->Loaded(aInfo);
   433 		if(r!=KErrNone)
   434 			return r;
   435 		}
   436 	else if((kernel && iExeCodeSeg!=this) || user_global)
   437 		{
   438 		// user-global or kernel code...
   439 		SRamCodeInfo& ri = RamInfo();
   440 		UNLOCK_USER_MEMORY();
   441 		CacheMaintenance::CodeChanged(ri.iCodeLoadAddr, ri.iCodeSize);
   442 		LOCK_USER_MEMORY();
   443 		MM::MappingDestroy(iCodeLoadMapping);
   444 		// adjust iDataLoadAddr to point to address contents for initial data section
   445 		// in running process...
   446 		if(ri.iDataLoadAddr)
   447 			ri.iDataLoadAddr = ri.iCodeRunAddr+ri.iCodeSize;
   448 
   449 		// Mark the code memory object read only to prevent malicious code modifying it.
   450 		TInt r = MM::MemorySetReadOnly(iCodeMemoryObject);
   451 		__ASSERT_ALWAYS(r == KErrNone, MM::Panic(MM::ECodeSegSetReadOnlyFailure));
   452 		}
   453 	return DEpocCodeSeg::Loaded(aInfo);
   454 	}
   455 
   456 
   457 void DMemModelCodeSeg::ReadExportDir(TUint32* aDest)
   458 	{
   459 	__KTRACE_OPT(KDLL,Kern::Printf("DMemModelCodeSeg::ReadExportDir %C %08x",this, aDest));
   460 
   461 	if(!iXIP)
   462 		{
   463 		// This is not XIP code so the loader can't access the export directory. 
   464 		if (Memory()->iCopyOfExportDir)
   465 			{// This must be local user side code.
   466 			__NK_ASSERT_DEBUG((iAttr & (ECodeSegAttKernel|ECodeSegAttGlobal)) == 0);
   467 			// Copy the kernel's copy of the export directory for this code seg to the loader's buffer.
   468 			SRamCodeInfo& ri = RamInfo();
   469 			TInt size = (ri.iExportDirCount + 1) * sizeof(TLinAddr);
   470 			kumemput(aDest, Memory()->iCopyOfExportDir, size);
   471 			}
   472 		else
   473 			{// This must be kernel side code.
   474 			__NK_ASSERT_DEBUG((iAttr & (ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttKernel);
   475 			// Copy the export directory for this code seg to the loader's buffer.
   476 			SRamCodeInfo& ri = RamInfo();
   477 			TInt size = (ri.iExportDirCount + 1) * sizeof(TLinAddr);
   478 			TAny* expDirLoad = (TAny*)(ri.iExportDir - sizeof(TLinAddr));
   479 			kumemput(aDest, expDirLoad, size);
   480 			}
   481 		}
   482 	}
   483 
   484 
   485 TBool DMemModelCodeSeg::OpenCheck(DProcess* aProcess)
   486 	{
   487 	return FindCheck(aProcess);
   488 	}
   489 
   490 
   491 TBool DMemModelCodeSeg::FindCheck(DProcess* aProcess)
   492 	{
   493 	__KTRACE_OPT(KDLL,Kern::Printf("CSEG:%08x Compat? proc=%O",this,aProcess));
   494 	if (aProcess)
   495 		{
   496 		DMemModelProcess& p=*(DMemModelProcess*)aProcess;
   497 		DCodeSeg* pPSeg=p.CodeSeg();
   498 		if (iAttachProcess && iAttachProcess!=aProcess)
   499 			return EFalse;
   500 		if (iExeCodeSeg && iExeCodeSeg!=pPSeg)
   501 			return EFalse;
   502 		}
   503 	return ETrue;
   504 	}
   505 
   506 
   507 void DMemModelCodeSeg::BTracePrime(TInt aCategory)
   508 	{
   509 	DCodeSeg::BTracePrime(aCategory);
   510 
   511 #ifdef BTRACE_FLEXIBLE_MEM_MODEL
   512 	if (aCategory == BTrace::EFlexibleMemModel || aCategory == -1)
   513 		{
   514 		// code seg mutex is held here, so memory objects cannot be destroyed
   515 		DMemModelCodeSegMemory* codeSegMemory = Memory();
   516 		if (codeSegMemory)
   517 			{
   518 			if (codeSegMemory->iCodeMemoryObject)
   519 				{
   520 				BTrace8(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsCodeSeg,Memory()->iCodeMemoryObject,this);
   521 				}
   522 			}
   523 		else
   524 			{
   525 			if (iCodeMemoryObject)
   526 				{
   527 				BTrace8(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsCodeSeg,iCodeMemoryObject,this);
   528 				}
   529 			}
   530 		}
   531 #endif	
   532 	}
   533 
   534 
   535 //
   536 // TPagedCodeInfo
   537 //
   538 
   539 TPagedCodeInfo::~TPagedCodeInfo()
   540 	{
   541 	Kern::Free(iCodeRelocTable);
   542 	Kern::Free(iCodePageOffsets);
   543 	}
   544 
   545 
   546 TInt TPagedCodeInfo::ReadBlockMap(const TCodeSegCreateInfo& aInfo)
   547 	{
   548 	if(aInfo.iCodeBlockMapEntriesSize <= 0)
   549 		return KErrArgument;  // no block map provided
   550 
   551 	// get compression data...
   552 	iCompressionType = aInfo.iCompressionType;
   553 	switch(iCompressionType)
   554 		{
   555 	case KFormatNotCompressed:
   556 		__ASSERT_COMPILE(KFormatNotCompressed==0); // Decompress() assumes this
   557 		break;
   558 
   559 	case KUidCompressionBytePair:
   560 		{
   561 		if(!aInfo.iCodePageOffsets)
   562 			return KErrArgument;
   563 
   564 		TInt pageCount = MM::RoundToPageCount(aInfo.iCodeSize);
   565 
   566 		TInt size = sizeof(TInt32) * (pageCount + 1);
   567 		iCodePageOffsets = (TInt32*)Kern::Alloc(size);
   568 		if(!iCodePageOffsets)
   569 			return KErrNoMemory;
   570 		kumemget32(iCodePageOffsets, aInfo.iCodePageOffsets, size);
   571 
   572 #ifdef __DUMP_BLOCKMAP_INFO
   573 		Kern::Printf("CodePageOffsets:");
   574 		for (TInt i = 0 ; i < pageCount + 1 ; ++i)
   575 			Kern::Printf("  %08x", iCodePageOffsets[i]);
   576 #endif
   577 
   578 		TInt last = 0;
   579 		for(TInt j=0; j<pageCount+1; ++j)
   580 			{
   581 			if(iCodePageOffsets[j] < last ||
   582 				iCodePageOffsets[j] > (aInfo.iCodeLengthInFile + aInfo.iCodeStartInFile))
   583 				{
   584 				__NK_ASSERT_DEBUG(0);
   585 				return KErrCorrupt;
   586 				}
   587 			last = iCodePageOffsets[j];
   588 			}
   589 		}
   590 		break;
   591 
   592 	default:
   593 		return KErrNotSupported;
   594 		}
   595 
   596 	// Copy block map data itself...
   597 
   598 #ifdef __DUMP_BLOCKMAP_INFO
   599 	Kern::Printf("Original block map");
   600 	Kern::Printf("  block granularity: %d", aInfo.iCodeBlockMapCommon.iBlockGranularity);
   601 	Kern::Printf("  block start offset: %x", aInfo.iCodeBlockMapCommon.iBlockStartOffset);
   602 	Kern::Printf("  start block address: %016lx", aInfo.iCodeBlockMapCommon.iStartBlockAddress);
   603 	Kern::Printf("  local drive number: %d", aInfo.iCodeBlockMapCommon.iLocalDriveNumber);
   604 	Kern::Printf("  entry size: %d", aInfo.iCodeBlockMapEntriesSize);
   605 #endif
   606 
   607 	// Find relevant paging device
   608 	iCodeLocalDrive = aInfo.iCodeBlockMapCommon.iLocalDriveNumber;
   609 	if(TUint(iCodeLocalDrive) >= (TUint)KMaxLocalDrives)
   610 		{
   611 		__KTRACE_OPT(KPAGING,Kern::Printf("Bad local drive number"));
   612 		return KErrArgument;
   613 		}
   614 
   615 	DPagingDevice* device = CodePagingDevice(iCodeLocalDrive);
   616 	if(!device)
   617 		{
   618 		__KTRACE_OPT(KPAGING,Kern::Printf("No paging device installed for drive"));
   619 		return KErrNotSupported;
   620 		}
   621 
   622 	// Set code start offset
   623 	iCodeStartInFile = aInfo.iCodeStartInFile;
   624 	if(iCodeStartInFile < 0)
   625 		{
   626 		__KTRACE_OPT(KPAGING,Kern::Printf("Bad code start offset"));
   627 		return KErrArgument;
   628 		}
   629 
   630 	// Allocate buffer for block map and copy from user-side
   631 	TBlockMapEntryBase* buffer = (TBlockMapEntryBase*)Kern::Alloc(aInfo.iCodeBlockMapEntriesSize);
   632 	if(!buffer)
   633 		return KErrNoMemory;
   634 	kumemget32(buffer, aInfo.iCodeBlockMapEntries, aInfo.iCodeBlockMapEntriesSize);
   635 
   636 #ifdef __DUMP_BLOCKMAP_INFO
   637 	Kern::Printf("  entries:");
   638 	for (TInt k = 0 ; k < aInfo.iCodeBlockMapEntriesSize / sizeof(TBlockMapEntryBase) ; ++k)
   639 		Kern::Printf("    %d: %d blocks at %08x", k, buffer[k].iNumberOfBlocks, buffer[k].iStartBlock);
   640 #endif
   641 
   642 	// Initialise block map
   643 	TInt r = iBlockMap.Initialise(aInfo.iCodeBlockMapCommon,
   644 								  buffer,
   645 								  aInfo.iCodeBlockMapEntriesSize,
   646 								  device->iReadUnitShift,
   647 								  iCodeStartInFile + aInfo.iCodeLengthInFile);
   648 	if(r!=KErrNone)
   649 		{
   650 		Kern::Free(buffer);
   651 		return r;
   652 		}
   653 
   654 #if defined(__DUMP_BLOCKMAP_INFO) && defined(_DEBUG)
   655 	iBlockMap.Dump();
   656 #endif
   657 
   658 	iCodeSize = aInfo.iCodeSize;
   659 	return KErrNone;
   660 	}
   661 
   662 
   663 /**
   664 Read code relocation table and import fixup table from user side.
   665 */
   666 TInt TPagedCodeInfo::ReadFixupTables(const TCodeSegCreateInfo& aInfo)
   667 	{
   668 	iCodeRelocTableSize = aInfo.iCodeRelocTableSize;
   669 	iImportFixupTableSize = aInfo.iImportFixupTableSize;
   670 	iCodeDelta = aInfo.iCodeDelta;
   671 	iDataDelta = aInfo.iDataDelta;
   672 
   673 	// round sizes up to four-byte boundaries...
   674 	TUint relocSize = (iCodeRelocTableSize + 3) & ~3;
   675 	TUint fixupSize = (iImportFixupTableSize + 3) & ~3;
   676 
   677 	// copy relocs and fixups...
   678 	iCodeRelocTable = (TUint8*)Kern::Alloc(relocSize+fixupSize);
   679 	if (!iCodeRelocTable)
   680 		return KErrNoMemory;
   681 	iImportFixupTable = iCodeRelocTable + relocSize;
   682 	kumemget32(iCodeRelocTable, aInfo.iCodeRelocTable, relocSize);
   683 	kumemget32(iImportFixupTable, aInfo.iImportFixupTable, fixupSize);
   684 
   685 	return KErrNone;
   686 	}
   687 
   688 
   689 void TPagedCodeInfo::ApplyFixups(TLinAddr aBuffer, TUint iIndex)
   690 	{
   691 //	START_PAGING_BENCHMARK;
   692 	
   693 	// relocate code...
   694 	if(iCodeRelocTableSize)
   695 		{
   696 		TUint8* codeRelocTable = iCodeRelocTable;
   697 		TUint startOffset = ((TUint32*)codeRelocTable)[iIndex];
   698 		TUint endOffset = ((TUint32*)codeRelocTable)[iIndex+1];
   699 
   700 		__KTRACE_OPT(KPAGING, Kern::Printf("Performing code relocation: start == %x, end == %x", startOffset, endOffset));
   701 		__ASSERT_ALWAYS(startOffset<=endOffset && endOffset<=iCodeRelocTableSize, K::Fault(K::ECodeSegBadFixupTables));
   702 
   703 		const TUint32 codeDelta = iCodeDelta;
   704 		const TUint32 dataDelta = iDataDelta;
   705 
   706 		const TUint16* ptr = (const TUint16*)(codeRelocTable + startOffset);
   707 		const TUint16* end = (const TUint16*)(codeRelocTable + endOffset);
   708 		while(ptr<end)
   709 			{
   710 			TUint16 entry = *ptr++;
   711 			TUint32* addr = (TUint32*)(aBuffer+(entry&0x0fff));
   712 			TUint32 word = *addr;
   713 #ifdef _DEBUG
   714 			TInt type = entry&0xf000;
   715 			__NK_ASSERT_DEBUG(type==KTextRelocType || type==KDataRelocType);
   716 #endif
   717 			if(entry<KDataRelocType)
   718 				word += codeDelta;
   719 			else
   720 				word += dataDelta;
   721 			*addr = word;
   722 			}
   723 		}
   724 
   725 	// fixup imports...
   726 	if(iImportFixupTableSize)
   727 		{
   728 		TUint8* importFixupTable = iImportFixupTable;
   729 		TUint startOffset = ((TUint32*)importFixupTable)[iIndex];
   730 		TUint endOffset = ((TUint32*)importFixupTable)[iIndex+1];
   731 
   732 		__KTRACE_OPT(KPAGING, Kern::Printf("Performing import fixup: start == %x, end == %x", startOffset, endOffset));
   733 		__ASSERT_ALWAYS(startOffset<=endOffset && endOffset<=iImportFixupTableSize, K::Fault(K::ECodeSegBadFixupTables));
   734 
   735 		const TUint16* ptr = (const TUint16*)(importFixupTable + startOffset);
   736 		const TUint16* end = (const TUint16*)(importFixupTable + endOffset);
   737 
   738 		while(ptr<end)
   739 			{
   740 			TUint16 offset = *ptr++;
   741 			TUint32 wordLow = *ptr++;
   742 			TUint32 wordHigh = *ptr++;
   743 			TUint32 word = (wordHigh << 16) | wordLow;
   744 //			__KTRACE_OPT(KPAGING, Kern::Printf("DP: Fixup %08x=%08x", iRamInfo.iCodeRunAddr+(page<<KPageShift)+offset, word));
   745 			*(TUint32*)(aBuffer+offset) = word;
   746 			}
   747 		}
   748 	
   749 //	END_PAGING_BENCHMARK(DemandPaging::ThePager, EPagingBmFixupCodePage);
   750 	}
   751 
   752