os/kernelhwsrv/kernel/eka/memmodel/epoc/flexible/mcodeseg.cpp
author sl
Tue, 10 Jun 2014 14:32:02 +0200
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
sl@0
     1
// Copyright (c) 1995-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
//
sl@0
    15
sl@0
    16
#include <memmodel.h>
sl@0
    17
#include "mmu/mm.h"
sl@0
    18
#include "mmboot.h"
sl@0
    19
#include "mmu/mcodepaging.h"
sl@0
    20
sl@0
    21
#include "cache_maintenance.h"
sl@0
    22
sl@0
    23
sl@0
    24
DCodeSeg* M::NewCodeSeg(TCodeSegCreateInfo&)
sl@0
    25
	{
sl@0
    26
	__KTRACE_OPT(KDLL,Kern::Printf("M::NewCodeSeg"));
sl@0
    27
	return new DMemModelCodeSeg;
sl@0
    28
	}
sl@0
    29
sl@0
    30
sl@0
    31
//
sl@0
    32
// DMemModelCodeSegMemory
sl@0
    33
//
sl@0
    34
sl@0
    35
DEpocCodeSegMemory* DEpocCodeSegMemory::New(DEpocCodeSeg* aCodeSeg)
sl@0
    36
	{
sl@0
    37
	return new DMemModelCodeSegMemory(aCodeSeg);
sl@0
    38
	}
sl@0
    39
sl@0
    40
sl@0
    41
DMemModelCodeSegMemory::DMemModelCodeSegMemory(DEpocCodeSeg* aCodeSeg)
sl@0
    42
	: DEpocCodeSegMemory(aCodeSeg)
sl@0
    43
	{
sl@0
    44
	}
sl@0
    45
sl@0
    46
sl@0
    47
TInt DMemModelCodeSegMemory::Create(TCodeSegCreateInfo& aInfo, DMemModelProcess* aProcess)
sl@0
    48
	{
sl@0
    49
	TInt r;
sl@0
    50
sl@0
    51
	TUint codePageCount;
sl@0
    52
	TUint dataPageCount;
sl@0
    53
	TBool isDemandPaged;
sl@0
    54
	if(!aInfo.iUseCodePaging)
sl@0
    55
		{
sl@0
    56
		isDemandPaged = 0;
sl@0
    57
		codePageCount = MM::RoundToPageCount(iRamInfo.iCodeSize+iRamInfo.iDataSize);
sl@0
    58
		dataPageCount = 0;
sl@0
    59
		}
sl@0
    60
	else
sl@0
    61
		{
sl@0
    62
		isDemandPaged = 1;
sl@0
    63
		codePageCount = MM::RoundToPageCount(iRamInfo.iCodeSize);
sl@0
    64
		dataPageCount = MM::RoundToPageCount(iRamInfo.iDataSize);
sl@0
    65
sl@0
    66
		iDataSectionMemory = Kern::Alloc(iRamInfo.iDataSize);
sl@0
    67
		if(!iDataSectionMemory)
sl@0
    68
			return KErrNoMemory;
sl@0
    69
		}
sl@0
    70
sl@0
    71
	iCodeSeg->iSize = codePageCount<<KPageShift;
sl@0
    72
sl@0
    73
	// allocate virtual address for code to run at...
sl@0
    74
	const TUint codeSize = codePageCount<<KPageShift;
sl@0
    75
	if(iCodeSeg->IsExe())
sl@0
    76
		{// Get the os asid without opening a reference on it as aProcess isn't fully 
sl@0
    77
		// created yet so won't free its os asid.
sl@0
    78
		r = MM::VirtualAlloc(aProcess->OsAsid(),iRamInfo.iCodeRunAddr,codeSize,isDemandPaged);
sl@0
    79
		if(r!=KErrNone)
sl@0
    80
			return r;
sl@0
    81
		aProcess->iCodeVirtualAllocSize = codeSize;
sl@0
    82
		aProcess->iCodeVirtualAllocAddress = iRamInfo.iCodeRunAddr;
sl@0
    83
		iCodeSeg->iAttr |= ECodeSegAttAddrNotUnique;
sl@0
    84
		}
sl@0
    85
	else
sl@0
    86
		{
sl@0
    87
		r = MM::VirtualAllocCommon(iRamInfo.iCodeRunAddr,codeSize,isDemandPaged);
sl@0
    88
		if(r!=KErrNone)
sl@0
    89
			return r;
sl@0
    90
		iVirtualAllocCommonSize = codeSize;
sl@0
    91
		}
sl@0
    92
sl@0
    93
	// create memory object for codeseg...
sl@0
    94
	if(isDemandPaged)
sl@0
    95
		{
sl@0
    96
		// create memory object...
sl@0
    97
		r = MM::PagedCodeNew(iCodeMemoryObject, codePageCount, iPagedCodeInfo);
sl@0
    98
		if(r!=KErrNone)
sl@0
    99
			return r;
sl@0
   100
sl@0
   101
		// get file blockmap for codeseg contents...
sl@0
   102
		r = iPagedCodeInfo->ReadBlockMap(aInfo);
sl@0
   103
		if (r != KErrNone)
sl@0
   104
			return r;
sl@0
   105
		}
sl@0
   106
	else
sl@0
   107
		{
sl@0
   108
		// create memory object...
sl@0
   109
		TMemoryCreateFlags flags = (TMemoryCreateFlags)(EMemoryCreateNoWipe | EMemoryCreateAllowExecution);
sl@0
   110
		r = MM::MemoryNew(iCodeMemoryObject, EMemoryObjectMovable, codePageCount, flags);
sl@0
   111
		if(r!=KErrNone)
sl@0
   112
			return r;
sl@0
   113
sl@0
   114
		// commit memory...
sl@0
   115
		r = MM::MemoryAlloc(iCodeMemoryObject,0,codePageCount);
sl@0
   116
		if(r!=KErrNone)
sl@0
   117
			return r;
sl@0
   118
		}
sl@0
   119
sl@0
   120
	// create a mapping of the memory for the loader...
sl@0
   121
	// No need to open reference on os asid it is the current thread/process's.
sl@0
   122
	DMemModelProcess* pP = (DMemModelProcess*)TheCurrentThread->iOwningProcess;
sl@0
   123
	r = MM::MappingNew(iCodeLoadMapping,iCodeMemoryObject,EUserReadWrite,pP->OsAsid());
sl@0
   124
	if(r!=KErrNone)
sl@0
   125
		return r;
sl@0
   126
sl@0
   127
	iRamInfo.iCodeLoadAddr = MM::MappingBase(iCodeLoadMapping);
sl@0
   128
sl@0
   129
	// work out where the loader is to put the loaded data section...
sl@0
   130
	TInt loadSize = iRamInfo.iCodeSize; // size of memory filled by loader
sl@0
   131
	if(iRamInfo.iDataSize)
sl@0
   132
		{
sl@0
   133
		if(!dataPageCount)
sl@0
   134
			{
sl@0
   135
			// data loaded immediately after code...
sl@0
   136
			iRamInfo.iDataLoadAddr = iRamInfo.iCodeLoadAddr+iRamInfo.iCodeSize;
sl@0
   137
			loadSize += iRamInfo.iDataSize;
sl@0
   138
			}
sl@0
   139
		else
sl@0
   140
			{
sl@0
   141
			// create memory object for data...
sl@0
   142
			DMemoryObject* dataMemory;
sl@0
   143
			r = MM::MemoryNew(dataMemory, EMemoryObjectMovable, dataPageCount, EMemoryCreateNoWipe);
sl@0
   144
			if(r!=KErrNone)
sl@0
   145
				return r;
sl@0
   146
sl@0
   147
			// commit memory...
sl@0
   148
			r = MM::MemoryAlloc(dataMemory,0,dataPageCount);
sl@0
   149
			if(r==KErrNone)
sl@0
   150
				{
sl@0
   151
				// create a mapping of the memory for the loader...
sl@0
   152
				// No need to open reference on os asid it is the current thread/process's.
sl@0
   153
				r = MM::MappingNew(iDataLoadMapping,dataMemory,EUserReadWrite,pP->OsAsid());
sl@0
   154
				}
sl@0
   155
sl@0
   156
			if(r!=KErrNone)
sl@0
   157
				{
sl@0
   158
				MM::MemoryDestroy(dataMemory);
sl@0
   159
				return r;
sl@0
   160
				}
sl@0
   161
sl@0
   162
			iRamInfo.iDataLoadAddr = MM::MappingBase(iDataLoadMapping);
sl@0
   163
			}
sl@0
   164
		}
sl@0
   165
sl@0
   166
	if(!isDemandPaged)
sl@0
   167
		{
sl@0
   168
		// wipe memory that the loader wont fill...
sl@0
   169
		UNLOCK_USER_MEMORY();
sl@0
   170
		memset((TAny*)(iRamInfo.iCodeLoadAddr+loadSize), 0x03, codeSize-loadSize);
sl@0
   171
		LOCK_USER_MEMORY();
sl@0
   172
		}
sl@0
   173
sl@0
   174
	// done...
sl@0
   175
	iCreator = pP;
sl@0
   176
	
sl@0
   177
	return KErrNone;
sl@0
   178
	}
sl@0
   179
sl@0
   180
sl@0
   181
TInt DMemModelCodeSegMemory::Loaded(TCodeSegCreateInfo& aInfo)
sl@0
   182
	{
sl@0
   183
	if(iPagedCodeInfo)
sl@0
   184
		{
sl@0
   185
		// get information needed to fixup code for it's run address...
sl@0
   186
		TInt r = iPagedCodeInfo->ReadFixupTables(aInfo);
sl@0
   187
		if(r!=KErrNone)
sl@0
   188
			return r;
sl@0
   189
		MM::PagedCodeLoaded(iCodeMemoryObject, iRamInfo.iCodeLoadAddr);
sl@0
   190
		}
sl@0
   191
	else
sl@0
   192
		{
sl@0
   193
		// make code visible to instruction cache...
sl@0
   194
		UNLOCK_USER_MEMORY();
sl@0
   195
		CacheMaintenance::CodeChanged(iRamInfo.iCodeLoadAddr, iRamInfo.iCodeSize);
sl@0
   196
		LOCK_USER_MEMORY();
sl@0
   197
		}
sl@0
   198
sl@0
   199
	// adjust iDataLoadAddr to point to address contents for initial data section
sl@0
   200
	// in running process...
sl@0
   201
	if(iRamInfo.iDataLoadAddr)
sl@0
   202
		{
sl@0
   203
		TAny* dataSection = iDataSectionMemory;
sl@0
   204
		if(dataSection)
sl@0
   205
			{
sl@0
   206
			// contents for initial data section to be stored in iDataSectionMemory...
sl@0
   207
			UNLOCK_USER_MEMORY();
sl@0
   208
			memcpy(dataSection,(TAny*)iRamInfo.iDataLoadAddr,iRamInfo.iDataSize);
sl@0
   209
			LOCK_USER_MEMORY();
sl@0
   210
			iRamInfo.iDataLoadAddr = (TLinAddr)dataSection;
sl@0
   211
			}
sl@0
   212
		else
sl@0
   213
			{
sl@0
   214
			// contents for initial data section stored after code...
sl@0
   215
			__NK_ASSERT_DEBUG(iRamInfo.iDataLoadAddr==iRamInfo.iCodeLoadAddr+iRamInfo.iCodeSize); // check data loaded at end of code
sl@0
   216
			iRamInfo.iDataLoadAddr = iRamInfo.iCodeRunAddr+iRamInfo.iCodeSize;
sl@0
   217
			}
sl@0
   218
		}
sl@0
   219
sl@0
   220
	// copy export directory (this will now have fixups applied)...
sl@0
   221
	TInt exportDirSize = iRamInfo.iExportDirCount * sizeof(TLinAddr);
sl@0
   222
	if(exportDirSize > 0 || (exportDirSize==0 && (iCodeSeg->iAttr&ECodeSegAttNmdExpData)) )
sl@0
   223
		{
sl@0
   224
		exportDirSize += sizeof(TLinAddr);
sl@0
   225
		TLinAddr* expDir = (TLinAddr*)Kern::Alloc(exportDirSize);
sl@0
   226
		if(!expDir)
sl@0
   227
			return KErrNoMemory;
sl@0
   228
		iCopyOfExportDir = expDir;
sl@0
   229
		TLinAddr expDirLoad = iRamInfo.iExportDir-iRamInfo.iCodeRunAddr+iRamInfo.iCodeLoadAddr;
sl@0
   230
		UNLOCK_USER_MEMORY();
sl@0
   231
		memcpy(expDir,(TAny*)(expDirLoad-sizeof(TLinAddr)),exportDirSize);
sl@0
   232
		LOCK_USER_MEMORY();
sl@0
   233
		}
sl@0
   234
sl@0
   235
	// unmap code from loading process...
sl@0
   236
	DMemModelProcess* pP=(DMemModelProcess*)TheCurrentThread->iOwningProcess;
sl@0
   237
	__ASSERT_ALWAYS(iCreator==pP, MM::Panic(MM::ECodeSegLoadedNotCreator));
sl@0
   238
	MM::MappingDestroy(iCodeLoadMapping);
sl@0
   239
	MM::MappingAndMemoryDestroy(iDataLoadMapping);
sl@0
   240
	iCreator=NULL;
sl@0
   241
sl@0
   242
	// Mark the code memory object read only to prevent malicious code modifying it.
sl@0
   243
	TInt r = MM::MemorySetReadOnly(iCodeMemoryObject);
sl@0
   244
	__ASSERT_ALWAYS(r == KErrNone, MM::Panic(MM::ECodeSegSetReadOnlyFailure));
sl@0
   245
sl@0
   246
	return KErrNone;
sl@0
   247
	}
sl@0
   248
sl@0
   249
sl@0
   250
void DMemModelCodeSegMemory::Destroy()
sl@0
   251
	{
sl@0
   252
	MM::MappingDestroy(iCodeLoadMapping);
sl@0
   253
	MM::MappingAndMemoryDestroy(iDataLoadMapping);
sl@0
   254
	}
sl@0
   255
sl@0
   256
sl@0
   257
DMemModelCodeSegMemory::~DMemModelCodeSegMemory()
sl@0
   258
	{
sl@0
   259
	__KTRACE_OPT(KDLL,Kern::Printf("DMemModelCodeSegMemory::~DMemModelCodeSegMemory %x", this));
sl@0
   260
	__NK_ASSERT_DEBUG(iAccessCount==0);
sl@0
   261
sl@0
   262
	MM::MappingDestroy(iCodeLoadMapping);
sl@0
   263
	MM::MappingAndMemoryDestroy(iDataLoadMapping);
sl@0
   264
	MM::MemoryDestroy(iCodeMemoryObject);
sl@0
   265
sl@0
   266
	if(iVirtualAllocCommonSize)
sl@0
   267
		MM::VirtualFreeCommon(iRamInfo.iCodeRunAddr, iVirtualAllocCommonSize);
sl@0
   268
sl@0
   269
	Kern::Free(iCopyOfExportDir);
sl@0
   270
	Kern::Free(iDataSectionMemory);
sl@0
   271
	}
sl@0
   272
sl@0
   273
sl@0
   274
//
sl@0
   275
// DMemModelCodeSeg
sl@0
   276
//
sl@0
   277
sl@0
   278
DMemModelCodeSeg::DMemModelCodeSeg()
sl@0
   279
	{
sl@0
   280
	}
sl@0
   281
sl@0
   282
sl@0
   283
DMemModelCodeSeg::~DMemModelCodeSeg()
sl@0
   284
	{
sl@0
   285
	__KTRACE_OPT(KDLL,Kern::Printf("DMemModelCodeSeg::Destruct %C", this));
sl@0
   286
	DCodeSeg::Wait();
sl@0
   287
sl@0
   288
	MM::MappingDestroy(iCodeLoadMapping);
sl@0
   289
	MM::MappingDestroy(iCodeGlobalMapping);
sl@0
   290
	MM::MemoryDestroy(iCodeMemoryObject);
sl@0
   291
sl@0
   292
	if(Memory())
sl@0
   293
		Memory()->Destroy();
sl@0
   294
sl@0
   295
	if(iDataAllocSize)
sl@0
   296
		MM::VirtualFreeCommon(iDataAllocBase,iDataAllocSize);
sl@0
   297
sl@0
   298
	DCodeSeg::Signal();
sl@0
   299
sl@0
   300
	Kern::Free(iKernelData);
sl@0
   301
sl@0
   302
	DEpocCodeSeg::Destruct();
sl@0
   303
	}
sl@0
   304
sl@0
   305
sl@0
   306
TInt DMemModelCodeSeg::DoCreateRam(TCodeSegCreateInfo& aInfo, DProcess* aProcess)
sl@0
   307
	{
sl@0
   308
	__KTRACE_OPT(KDLL,Kern::Printf("DMemModelCodeSeg::DoCreateRam %C", this));
sl@0
   309
sl@0
   310
	SRamCodeInfo& ri = RamInfo();
sl@0
   311
	iSize = MM::RoundToPageSize(ri.iCodeSize+ri.iDataSize);
sl@0
   312
	if (iSize==0)
sl@0
   313
		return KErrCorrupt;
sl@0
   314
sl@0
   315
	TBool kernel = ( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttKernel );
sl@0
   316
//	TBool user_global = ( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttGlobal );
sl@0
   317
	TBool user_local = ( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == 0 );
sl@0
   318
sl@0
   319
	TUint total_data_size = ri.iDataSize+ri.iBssSize;
sl@0
   320
sl@0
   321
	if(user_local)
sl@0
   322
		{
sl@0
   323
		// setup paging attribute for code...
sl@0
   324
		if(aInfo.iUseCodePaging)
sl@0
   325
			iAttr |= ECodeSegAttCodePaged;
sl@0
   326
sl@0
   327
		if(total_data_size && !IsExe())
sl@0
   328
			{
sl@0
   329
			// setup paging attribute for data section...
sl@0
   330
			if(aInfo.iUseCodePaging)
sl@0
   331
				if(K::MemModelAttributes & EMemModelAttrDataPaging)
sl@0
   332
					iAttr |= ECodeSegAttDataPaged;
sl@0
   333
sl@0
   334
			// allocate virtual address for data section...
sl@0
   335
			TInt r = MM::VirtualAllocCommon(iDataAllocBase,total_data_size,iAttr&ECodeSegAttDataPaged);
sl@0
   336
			if(r!=KErrNone)
sl@0
   337
				return r;
sl@0
   338
			iDataAllocSize = total_data_size;
sl@0
   339
			ri.iDataRunAddr = iDataAllocBase;
sl@0
   340
			}
sl@0
   341
sl@0
   342
		// create DCodeSegMemory for RAM loaded user local code...
sl@0
   343
		TInt r = Memory()->Create(aInfo,(DMemModelProcess*)aProcess);
sl@0
   344
sl@0
   345
#ifdef BTRACE_FLEXIBLE_MEM_MODEL
sl@0
   346
		if (r == KErrNone)
sl@0
   347
			{
sl@0
   348
			BTrace8(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsCodeSeg,Memory()->iCodeMemoryObject,this);
sl@0
   349
			}
sl@0
   350
#endif
sl@0
   351
		
sl@0
   352
		return r;
sl@0
   353
		}
sl@0
   354
sl@0
   355
	// kernel or user-global code...
sl@0
   356
sl@0
   357
	// create memory object for codeseg...
sl@0
   358
	TMemoryCreateFlags flags = EMemoryCreateAllowExecution;
sl@0
   359
	if(kernel)
sl@0
   360
		{
sl@0
   361
		flags = (TMemoryCreateFlags)(flags|EMemoryCreateNoWipe);
sl@0
   362
		}
sl@0
   363
	TInt r = MM::MemoryNew(iCodeMemoryObject, EMemoryObjectMovable, MM::BytesToPages(iSize), flags);
sl@0
   364
	if(r!=KErrNone)
sl@0
   365
		return r;
sl@0
   366
sl@0
   367
	// commit memory...
sl@0
   368
	r = MM::MemoryAlloc(iCodeMemoryObject,0,MM::BytesToPages(iSize));
sl@0
   369
	if(r!=KErrNone)
sl@0
   370
		return r;
sl@0
   371
sl@0
   372
	// create a mapping of the memory for the loader...
sl@0
   373
	// No need to open reference on os asid it is the current thread/process's.
sl@0
   374
	DMemModelProcess* pP = (DMemModelProcess*)TheCurrentThread->iOwningProcess;
sl@0
   375
	r = MM::MappingNew(iCodeLoadMapping,iCodeMemoryObject,EUserReadWrite,pP->OsAsid());
sl@0
   376
	if(r!=KErrNone)
sl@0
   377
		return r;
sl@0
   378
	ri.iCodeLoadAddr = MM::MappingBase(iCodeLoadMapping);
sl@0
   379
sl@0
   380
	// create a global mapping of the memory for the codeseg to run at...
sl@0
   381
	r = MM::MappingNew(iCodeGlobalMapping,iCodeMemoryObject,kernel?ESupervisorExecute:EUserExecute,KKernelOsAsid);
sl@0
   382
	if(r!=KErrNone)
sl@0
   383
		return r;
sl@0
   384
	ri.iCodeRunAddr = MM::MappingBase(iCodeGlobalMapping);
sl@0
   385
sl@0
   386
	if(kernel)
sl@0
   387
		{
sl@0
   388
		// setup data section memory...
sl@0
   389
		if (ri.iDataSize)
sl@0
   390
			ri.iDataLoadAddr = ri.iCodeLoadAddr+ri.iCodeSize;
sl@0
   391
		if (total_data_size)
sl@0
   392
			{
sl@0
   393
			iKernelData = Kern::Alloc(total_data_size);
sl@0
   394
			if (!iKernelData)
sl@0
   395
				return KErrNoMemory;
sl@0
   396
			ri.iDataRunAddr = (TLinAddr)iKernelData;
sl@0
   397
			}
sl@0
   398
		}
sl@0
   399
	else
sl@0
   400
		{
sl@0
   401
		// we don't allow static data in global code...
sl@0
   402
		ri.iDataLoadAddr = 0;
sl@0
   403
		ri.iDataRunAddr = 0;
sl@0
   404
		}
sl@0
   405
sl@0
   406
#ifdef BTRACE_FLEXIBLE_MEM_MODEL
sl@0
   407
	BTrace8(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsCodeSeg,iCodeMemoryObject,this);
sl@0
   408
#endif
sl@0
   409
sl@0
   410
	// done...
sl@0
   411
	return KErrNone;
sl@0
   412
	}
sl@0
   413
sl@0
   414
sl@0
   415
TInt DMemModelCodeSeg::DoCreateXIP(DProcess* aProcess)
sl@0
   416
	{
sl@0
   417
//	__KTRACE_OPT(KDLL,Kern::Printf("DMemModelCodeSeg::DoCreateXIP %C proc %O", this, aProcess));
sl@0
   418
	return KErrNone;
sl@0
   419
	}
sl@0
   420
sl@0
   421
sl@0
   422
TInt DMemModelCodeSeg::Loaded(TCodeSegCreateInfo& aInfo)
sl@0
   423
	{
sl@0
   424
	if(iXIP)
sl@0
   425
		return DEpocCodeSeg::Loaded(aInfo);
sl@0
   426
sl@0
   427
	TBool kernel = ( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttKernel );
sl@0
   428
	TBool user_global = ( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttGlobal );
sl@0
   429
	TBool user_local = ( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == 0 );
sl@0
   430
	if(user_local)
sl@0
   431
		{
sl@0
   432
		TInt r = Memory()->Loaded(aInfo);
sl@0
   433
		if(r!=KErrNone)
sl@0
   434
			return r;
sl@0
   435
		}
sl@0
   436
	else if((kernel && iExeCodeSeg!=this) || user_global)
sl@0
   437
		{
sl@0
   438
		// user-global or kernel code...
sl@0
   439
		SRamCodeInfo& ri = RamInfo();
sl@0
   440
		UNLOCK_USER_MEMORY();
sl@0
   441
		CacheMaintenance::CodeChanged(ri.iCodeLoadAddr, ri.iCodeSize);
sl@0
   442
		LOCK_USER_MEMORY();
sl@0
   443
		MM::MappingDestroy(iCodeLoadMapping);
sl@0
   444
		// adjust iDataLoadAddr to point to address contents for initial data section
sl@0
   445
		// in running process...
sl@0
   446
		if(ri.iDataLoadAddr)
sl@0
   447
			ri.iDataLoadAddr = ri.iCodeRunAddr+ri.iCodeSize;
sl@0
   448
sl@0
   449
		// Mark the code memory object read only to prevent malicious code modifying it.
sl@0
   450
		TInt r = MM::MemorySetReadOnly(iCodeMemoryObject);
sl@0
   451
		__ASSERT_ALWAYS(r == KErrNone, MM::Panic(MM::ECodeSegSetReadOnlyFailure));
sl@0
   452
		}
sl@0
   453
	return DEpocCodeSeg::Loaded(aInfo);
sl@0
   454
	}
sl@0
   455
sl@0
   456
sl@0
   457
void DMemModelCodeSeg::ReadExportDir(TUint32* aDest)
sl@0
   458
	{
sl@0
   459
	__KTRACE_OPT(KDLL,Kern::Printf("DMemModelCodeSeg::ReadExportDir %C %08x",this, aDest));
sl@0
   460
sl@0
   461
	if(!iXIP)
sl@0
   462
		{
sl@0
   463
		// This is not XIP code so the loader can't access the export directory. 
sl@0
   464
		if (Memory()->iCopyOfExportDir)
sl@0
   465
			{// This must be local user side code.
sl@0
   466
			__NK_ASSERT_DEBUG((iAttr & (ECodeSegAttKernel|ECodeSegAttGlobal)) == 0);
sl@0
   467
			// Copy the kernel's copy of the export directory for this code seg to the loader's buffer.
sl@0
   468
			SRamCodeInfo& ri = RamInfo();
sl@0
   469
			TInt size = (ri.iExportDirCount + 1) * sizeof(TLinAddr);
sl@0
   470
			kumemput(aDest, Memory()->iCopyOfExportDir, size);
sl@0
   471
			}
sl@0
   472
		else
sl@0
   473
			{// This must be kernel side code.
sl@0
   474
			__NK_ASSERT_DEBUG((iAttr & (ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttKernel);
sl@0
   475
			// Copy the export directory for this code seg to the loader's buffer.
sl@0
   476
			SRamCodeInfo& ri = RamInfo();
sl@0
   477
			TInt size = (ri.iExportDirCount + 1) * sizeof(TLinAddr);
sl@0
   478
			TAny* expDirLoad = (TAny*)(ri.iExportDir - sizeof(TLinAddr));
sl@0
   479
			kumemput(aDest, expDirLoad, size);
sl@0
   480
			}
sl@0
   481
		}
sl@0
   482
	}
sl@0
   483
sl@0
   484
sl@0
   485
TBool DMemModelCodeSeg::OpenCheck(DProcess* aProcess)
sl@0
   486
	{
sl@0
   487
	return FindCheck(aProcess);
sl@0
   488
	}
sl@0
   489
sl@0
   490
sl@0
   491
TBool DMemModelCodeSeg::FindCheck(DProcess* aProcess)
sl@0
   492
	{
sl@0
   493
	__KTRACE_OPT(KDLL,Kern::Printf("CSEG:%08x Compat? proc=%O",this,aProcess));
sl@0
   494
	if (aProcess)
sl@0
   495
		{
sl@0
   496
		DMemModelProcess& p=*(DMemModelProcess*)aProcess;
sl@0
   497
		DCodeSeg* pPSeg=p.CodeSeg();
sl@0
   498
		if (iAttachProcess && iAttachProcess!=aProcess)
sl@0
   499
			return EFalse;
sl@0
   500
		if (iExeCodeSeg && iExeCodeSeg!=pPSeg)
sl@0
   501
			return EFalse;
sl@0
   502
		}
sl@0
   503
	return ETrue;
sl@0
   504
	}
sl@0
   505
sl@0
   506
sl@0
   507
void DMemModelCodeSeg::BTracePrime(TInt aCategory)
sl@0
   508
	{
sl@0
   509
	DCodeSeg::BTracePrime(aCategory);
sl@0
   510
sl@0
   511
#ifdef BTRACE_FLEXIBLE_MEM_MODEL
sl@0
   512
	if (aCategory == BTrace::EFlexibleMemModel || aCategory == -1)
sl@0
   513
		{
sl@0
   514
		// code seg mutex is held here, so memory objects cannot be destroyed
sl@0
   515
		DMemModelCodeSegMemory* codeSegMemory = Memory();
sl@0
   516
		if (codeSegMemory)
sl@0
   517
			{
sl@0
   518
			if (codeSegMemory->iCodeMemoryObject)
sl@0
   519
				{
sl@0
   520
				BTrace8(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsCodeSeg,Memory()->iCodeMemoryObject,this);
sl@0
   521
				}
sl@0
   522
			}
sl@0
   523
		else
sl@0
   524
			{
sl@0
   525
			if (iCodeMemoryObject)
sl@0
   526
				{
sl@0
   527
				BTrace8(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsCodeSeg,iCodeMemoryObject,this);
sl@0
   528
				}
sl@0
   529
			}
sl@0
   530
		}
sl@0
   531
#endif	
sl@0
   532
	}
sl@0
   533
sl@0
   534
sl@0
   535
//
sl@0
   536
// TPagedCodeInfo
sl@0
   537
//
sl@0
   538
sl@0
   539
TPagedCodeInfo::~TPagedCodeInfo()
sl@0
   540
	{
sl@0
   541
	Kern::Free(iCodeRelocTable);
sl@0
   542
	Kern::Free(iCodePageOffsets);
sl@0
   543
	}
sl@0
   544
sl@0
   545
sl@0
   546
TInt TPagedCodeInfo::ReadBlockMap(const TCodeSegCreateInfo& aInfo)
sl@0
   547
	{
sl@0
   548
	if(aInfo.iCodeBlockMapEntriesSize <= 0)
sl@0
   549
		return KErrArgument;  // no block map provided
sl@0
   550
sl@0
   551
	// get compression data...
sl@0
   552
	iCompressionType = aInfo.iCompressionType;
sl@0
   553
	switch(iCompressionType)
sl@0
   554
		{
sl@0
   555
	case KFormatNotCompressed:
sl@0
   556
		__ASSERT_COMPILE(KFormatNotCompressed==0); // Decompress() assumes this
sl@0
   557
		break;
sl@0
   558
sl@0
   559
	case KUidCompressionBytePair:
sl@0
   560
		{
sl@0
   561
		if(!aInfo.iCodePageOffsets)
sl@0
   562
			return KErrArgument;
sl@0
   563
sl@0
   564
		TInt pageCount = MM::RoundToPageCount(aInfo.iCodeSize);
sl@0
   565
sl@0
   566
		TInt size = sizeof(TInt32) * (pageCount + 1);
sl@0
   567
		iCodePageOffsets = (TInt32*)Kern::Alloc(size);
sl@0
   568
		if(!iCodePageOffsets)
sl@0
   569
			return KErrNoMemory;
sl@0
   570
		kumemget32(iCodePageOffsets, aInfo.iCodePageOffsets, size);
sl@0
   571
sl@0
   572
#ifdef __DUMP_BLOCKMAP_INFO
sl@0
   573
		Kern::Printf("CodePageOffsets:");
sl@0
   574
		for (TInt i = 0 ; i < pageCount + 1 ; ++i)
sl@0
   575
			Kern::Printf("  %08x", iCodePageOffsets[i]);
sl@0
   576
#endif
sl@0
   577
sl@0
   578
		TInt last = 0;
sl@0
   579
		for(TInt j=0; j<pageCount+1; ++j)
sl@0
   580
			{
sl@0
   581
			if(iCodePageOffsets[j] < last ||
sl@0
   582
				iCodePageOffsets[j] > (aInfo.iCodeLengthInFile + aInfo.iCodeStartInFile))
sl@0
   583
				{
sl@0
   584
				__NK_ASSERT_DEBUG(0);
sl@0
   585
				return KErrCorrupt;
sl@0
   586
				}
sl@0
   587
			last = iCodePageOffsets[j];
sl@0
   588
			}
sl@0
   589
		}
sl@0
   590
		break;
sl@0
   591
sl@0
   592
	default:
sl@0
   593
		return KErrNotSupported;
sl@0
   594
		}
sl@0
   595
sl@0
   596
	// Copy block map data itself...
sl@0
   597
sl@0
   598
#ifdef __DUMP_BLOCKMAP_INFO
sl@0
   599
	Kern::Printf("Original block map");
sl@0
   600
	Kern::Printf("  block granularity: %d", aInfo.iCodeBlockMapCommon.iBlockGranularity);
sl@0
   601
	Kern::Printf("  block start offset: %x", aInfo.iCodeBlockMapCommon.iBlockStartOffset);
sl@0
   602
	Kern::Printf("  start block address: %016lx", aInfo.iCodeBlockMapCommon.iStartBlockAddress);
sl@0
   603
	Kern::Printf("  local drive number: %d", aInfo.iCodeBlockMapCommon.iLocalDriveNumber);
sl@0
   604
	Kern::Printf("  entry size: %d", aInfo.iCodeBlockMapEntriesSize);
sl@0
   605
#endif
sl@0
   606
sl@0
   607
	// Find relevant paging device
sl@0
   608
	iCodeLocalDrive = aInfo.iCodeBlockMapCommon.iLocalDriveNumber;
sl@0
   609
	if(TUint(iCodeLocalDrive) >= (TUint)KMaxLocalDrives)
sl@0
   610
		{
sl@0
   611
		__KTRACE_OPT(KPAGING,Kern::Printf("Bad local drive number"));
sl@0
   612
		return KErrArgument;
sl@0
   613
		}
sl@0
   614
sl@0
   615
	DPagingDevice* device = CodePagingDevice(iCodeLocalDrive);
sl@0
   616
	if(!device)
sl@0
   617
		{
sl@0
   618
		__KTRACE_OPT(KPAGING,Kern::Printf("No paging device installed for drive"));
sl@0
   619
		return KErrNotSupported;
sl@0
   620
		}
sl@0
   621
sl@0
   622
	// Set code start offset
sl@0
   623
	iCodeStartInFile = aInfo.iCodeStartInFile;
sl@0
   624
	if(iCodeStartInFile < 0)
sl@0
   625
		{
sl@0
   626
		__KTRACE_OPT(KPAGING,Kern::Printf("Bad code start offset"));
sl@0
   627
		return KErrArgument;
sl@0
   628
		}
sl@0
   629
sl@0
   630
	// Allocate buffer for block map and copy from user-side
sl@0
   631
	TBlockMapEntryBase* buffer = (TBlockMapEntryBase*)Kern::Alloc(aInfo.iCodeBlockMapEntriesSize);
sl@0
   632
	if(!buffer)
sl@0
   633
		return KErrNoMemory;
sl@0
   634
	kumemget32(buffer, aInfo.iCodeBlockMapEntries, aInfo.iCodeBlockMapEntriesSize);
sl@0
   635
sl@0
   636
#ifdef __DUMP_BLOCKMAP_INFO
sl@0
   637
	Kern::Printf("  entries:");
sl@0
   638
	for (TInt k = 0 ; k < aInfo.iCodeBlockMapEntriesSize / sizeof(TBlockMapEntryBase) ; ++k)
sl@0
   639
		Kern::Printf("    %d: %d blocks at %08x", k, buffer[k].iNumberOfBlocks, buffer[k].iStartBlock);
sl@0
   640
#endif
sl@0
   641
sl@0
   642
	// Initialise block map
sl@0
   643
	TInt r = iBlockMap.Initialise(aInfo.iCodeBlockMapCommon,
sl@0
   644
								  buffer,
sl@0
   645
								  aInfo.iCodeBlockMapEntriesSize,
sl@0
   646
								  device->iReadUnitShift,
sl@0
   647
								  iCodeStartInFile + aInfo.iCodeLengthInFile);
sl@0
   648
	if(r!=KErrNone)
sl@0
   649
		{
sl@0
   650
		Kern::Free(buffer);
sl@0
   651
		return r;
sl@0
   652
		}
sl@0
   653
sl@0
   654
#if defined(__DUMP_BLOCKMAP_INFO) && defined(_DEBUG)
sl@0
   655
	iBlockMap.Dump();
sl@0
   656
#endif
sl@0
   657
sl@0
   658
	iCodeSize = aInfo.iCodeSize;
sl@0
   659
	return KErrNone;
sl@0
   660
	}
sl@0
   661
sl@0
   662
sl@0
   663
/**
sl@0
   664
Read code relocation table and import fixup table from user side.
sl@0
   665
*/
sl@0
   666
TInt TPagedCodeInfo::ReadFixupTables(const TCodeSegCreateInfo& aInfo)
sl@0
   667
	{
sl@0
   668
	iCodeRelocTableSize = aInfo.iCodeRelocTableSize;
sl@0
   669
	iImportFixupTableSize = aInfo.iImportFixupTableSize;
sl@0
   670
	iCodeDelta = aInfo.iCodeDelta;
sl@0
   671
	iDataDelta = aInfo.iDataDelta;
sl@0
   672
sl@0
   673
	// round sizes up to four-byte boundaries...
sl@0
   674
	TUint relocSize = (iCodeRelocTableSize + 3) & ~3;
sl@0
   675
	TUint fixupSize = (iImportFixupTableSize + 3) & ~3;
sl@0
   676
sl@0
   677
	// copy relocs and fixups...
sl@0
   678
	iCodeRelocTable = (TUint8*)Kern::Alloc(relocSize+fixupSize);
sl@0
   679
	if (!iCodeRelocTable)
sl@0
   680
		return KErrNoMemory;
sl@0
   681
	iImportFixupTable = iCodeRelocTable + relocSize;
sl@0
   682
	kumemget32(iCodeRelocTable, aInfo.iCodeRelocTable, relocSize);
sl@0
   683
	kumemget32(iImportFixupTable, aInfo.iImportFixupTable, fixupSize);
sl@0
   684
sl@0
   685
	return KErrNone;
sl@0
   686
	}
sl@0
   687
sl@0
   688
sl@0
   689
void TPagedCodeInfo::ApplyFixups(TLinAddr aBuffer, TUint iIndex)
sl@0
   690
	{
sl@0
   691
//	START_PAGING_BENCHMARK;
sl@0
   692
	
sl@0
   693
	// relocate code...
sl@0
   694
	if(iCodeRelocTableSize)
sl@0
   695
		{
sl@0
   696
		TUint8* codeRelocTable = iCodeRelocTable;
sl@0
   697
		TUint startOffset = ((TUint32*)codeRelocTable)[iIndex];
sl@0
   698
		TUint endOffset = ((TUint32*)codeRelocTable)[iIndex+1];
sl@0
   699
sl@0
   700
		__KTRACE_OPT(KPAGING, Kern::Printf("Performing code relocation: start == %x, end == %x", startOffset, endOffset));
sl@0
   701
		__ASSERT_ALWAYS(startOffset<=endOffset && endOffset<=iCodeRelocTableSize, K::Fault(K::ECodeSegBadFixupTables));
sl@0
   702
sl@0
   703
		const TUint32 codeDelta = iCodeDelta;
sl@0
   704
		const TUint32 dataDelta = iDataDelta;
sl@0
   705
sl@0
   706
		const TUint16* ptr = (const TUint16*)(codeRelocTable + startOffset);
sl@0
   707
		const TUint16* end = (const TUint16*)(codeRelocTable + endOffset);
sl@0
   708
		while(ptr<end)
sl@0
   709
			{
sl@0
   710
			TUint16 entry = *ptr++;
sl@0
   711
			TUint32* addr = (TUint32*)(aBuffer+(entry&0x0fff));
sl@0
   712
			TUint32 word = *addr;
sl@0
   713
#ifdef _DEBUG
sl@0
   714
			TInt type = entry&0xf000;
sl@0
   715
			__NK_ASSERT_DEBUG(type==KTextRelocType || type==KDataRelocType);
sl@0
   716
#endif
sl@0
   717
			if(entry<KDataRelocType)
sl@0
   718
				word += codeDelta;
sl@0
   719
			else
sl@0
   720
				word += dataDelta;
sl@0
   721
			*addr = word;
sl@0
   722
			}
sl@0
   723
		}
sl@0
   724
sl@0
   725
	// fixup imports...
sl@0
   726
	if(iImportFixupTableSize)
sl@0
   727
		{
sl@0
   728
		TUint8* importFixupTable = iImportFixupTable;
sl@0
   729
		TUint startOffset = ((TUint32*)importFixupTable)[iIndex];
sl@0
   730
		TUint endOffset = ((TUint32*)importFixupTable)[iIndex+1];
sl@0
   731
sl@0
   732
		__KTRACE_OPT(KPAGING, Kern::Printf("Performing import fixup: start == %x, end == %x", startOffset, endOffset));
sl@0
   733
		__ASSERT_ALWAYS(startOffset<=endOffset && endOffset<=iImportFixupTableSize, K::Fault(K::ECodeSegBadFixupTables));
sl@0
   734
sl@0
   735
		const TUint16* ptr = (const TUint16*)(importFixupTable + startOffset);
sl@0
   736
		const TUint16* end = (const TUint16*)(importFixupTable + endOffset);
sl@0
   737
sl@0
   738
		while(ptr<end)
sl@0
   739
			{
sl@0
   740
			TUint16 offset = *ptr++;
sl@0
   741
			TUint32 wordLow = *ptr++;
sl@0
   742
			TUint32 wordHigh = *ptr++;
sl@0
   743
			TUint32 word = (wordHigh << 16) | wordLow;
sl@0
   744
//			__KTRACE_OPT(KPAGING, Kern::Printf("DP: Fixup %08x=%08x", iRamInfo.iCodeRunAddr+(page<<KPageShift)+offset, word));
sl@0
   745
			*(TUint32*)(aBuffer+offset) = word;
sl@0
   746
			}
sl@0
   747
		}
sl@0
   748
	
sl@0
   749
//	END_PAGING_BENCHMARK(DemandPaging::ThePager, EPagingBmFixupCodePage);
sl@0
   750
	}
sl@0
   751
sl@0
   752