os/kernelhwsrv/kernel/eka/memmodel/epoc/flexible/mprocess.cpp
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
sl@0
     1
// Copyright (c) 1994-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
//
sl@0
    15
sl@0
    16
#include <memmodel.h>
sl@0
    17
#include "mmu/mm.h"
sl@0
    18
#include "mmu/maddrcont.h"
sl@0
    19
#include "mmboot.h"
sl@0
    20
#include <kernel/cache.h>
sl@0
    21
#include "execs.h"
sl@0
    22
sl@0
    23
#define iMState		iWaitLink.iSpare1
sl@0
    24
sl@0
    25
NFastMutex TheSharedChunkLock;
sl@0
    26
sl@0
    27
#ifndef _DEBUG
sl@0
    28
const TInt KChunkGranularity = 4; // amount to grow SChunkInfo list by
sl@0
    29
const TInt KMaxChunkInfosInOneGo = 100; // max number of SChunkInfo objects to copy with System Lock held
sl@0
    30
#else // if debug...
sl@0
    31
const TInt KChunkGranularity = 1;
sl@0
    32
const TInt KMaxChunkInfosInOneGo = 1;
sl@0
    33
#endif
sl@0
    34
sl@0
    35
sl@0
    36
sl@0
    37
/********************************************
sl@0
    38
 * Process
sl@0
    39
 ********************************************/
sl@0
    40
sl@0
    41
DMemModelProcess::~DMemModelProcess()
sl@0
    42
	{
sl@0
    43
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelProcess destruct"));
sl@0
    44
	Destruct();
sl@0
    45
	}
sl@0
    46
sl@0
    47
sl@0
    48
void DMemModelProcess::Destruct()
sl@0
    49
	{
sl@0
    50
	__ASSERT_ALWAYS(!iOsAsidRefCount, MM::Panic(MM::EProcessDestructOsAsidRemaining));
sl@0
    51
	__ASSERT_ALWAYS(!iChunkCount, MM::Panic(MM::EProcessDestructChunksRemaining));
sl@0
    52
	Kern::Free(iChunks);
sl@0
    53
	__ASSERT_ALWAYS(!iSharedChunks || iSharedChunks->Count()==0, MM::Panic(MM::EProcessDestructChunksRemaining));
sl@0
    54
	delete iSharedChunks;
sl@0
    55
sl@0
    56
	DProcess::Destruct();
sl@0
    57
	}
sl@0
    58
sl@0
    59
sl@0
    60
TInt DMemModelProcess::TryOpenOsAsid()
sl@0
    61
	{
sl@0
    62
	if (__e32_atomic_tas_ord32(&iOsAsidRefCount, 1, 1, 0))
sl@0
    63
		{
sl@0
    64
		return iOsAsid;
sl@0
    65
		}
sl@0
    66
	return KErrDied;
sl@0
    67
	}
sl@0
    68
sl@0
    69
sl@0
    70
void DMemModelProcess::CloseOsAsid()
sl@0
    71
	{
sl@0
    72
	if (__e32_atomic_tas_ord32(&iOsAsidRefCount, 1, -1, 0) == 1)
sl@0
    73
		{// Last reference has been closed so free the asid.
sl@0
    74
		MM::AddressSpaceFree(iOsAsid);
sl@0
    75
		}
sl@0
    76
	}
sl@0
    77
sl@0
    78
sl@0
    79
void DMemModelProcess::AsyncCloseOsAsid()
sl@0
    80
	{
sl@0
    81
	if (__e32_atomic_tas_ord32(&iOsAsidRefCount, 1, -1, 0) == 1)
sl@0
    82
		{// Last reference has been closed so free the asid asynchronusly.
sl@0
    83
		MM::AsyncAddressSpaceFree(iOsAsid);
sl@0
    84
		}
sl@0
    85
	}
sl@0
    86
sl@0
    87
sl@0
    88
TInt DMemModelProcess::NewChunk(DChunk*& aChunk, SChunkCreateInfo& aInfo, TLinAddr& aRunAddr)
sl@0
    89
	{
sl@0
    90
	aChunk=NULL;
sl@0
    91
sl@0
    92
	DMemModelChunk* pC=new DMemModelChunk;
sl@0
    93
	if (!pC)
sl@0
    94
		return KErrNoMemory;
sl@0
    95
sl@0
    96
	TChunkType type = aInfo.iType;
sl@0
    97
	pC->iChunkType=type;
sl@0
    98
	TInt r=pC->SetAttributes(aInfo);
sl@0
    99
	if (r!=KErrNone)
sl@0
   100
		{
sl@0
   101
		pC->Close(NULL);
sl@0
   102
		return r;
sl@0
   103
		}
sl@0
   104
sl@0
   105
	pC->iOwningProcess=(pC->iAttributes&DMemModelChunk::EPublic)?NULL:this;
sl@0
   106
	r=pC->Create(aInfo);
sl@0
   107
	if (r==KErrNone && (aInfo.iOperations & SChunkCreateInfo::EAdjust))
sl@0
   108
		{
sl@0
   109
		if (aInfo.iRunAddress!=0)
sl@0
   110
			pC->SetFixedAddress(aInfo.iRunAddress,aInfo.iPreallocated);
sl@0
   111
		if (aInfo.iPreallocated==0 && aInfo.iInitialTop!=0)
sl@0
   112
			{
sl@0
   113
			if (pC->iAttributes & DChunk::EDisconnected)
sl@0
   114
				{
sl@0
   115
				r=pC->Commit(aInfo.iInitialBottom,aInfo.iInitialTop-aInfo.iInitialBottom);
sl@0
   116
				}
sl@0
   117
			else if (pC->iAttributes & DChunk::EDoubleEnded)
sl@0
   118
				{
sl@0
   119
				r=pC->AdjustDoubleEnded(aInfo.iInitialBottom,aInfo.iInitialTop);
sl@0
   120
				}
sl@0
   121
			else
sl@0
   122
				{
sl@0
   123
				r=pC->Adjust(aInfo.iInitialTop);
sl@0
   124
				}
sl@0
   125
			}
sl@0
   126
		}
sl@0
   127
	if (r==KErrNone && (aInfo.iOperations & SChunkCreateInfo::EAdd))
sl@0
   128
		{
sl@0
   129
		r = AddChunk(pC, EFalse);
sl@0
   130
		}
sl@0
   131
	if (r==KErrNone)
sl@0
   132
		{
sl@0
   133
		if(pC->iKernelMapping)
sl@0
   134
			aRunAddr = (TLinAddr)MM::MappingBase(pC->iKernelMapping);
sl@0
   135
		pC->iDestroyedDfc = aInfo.iDestroyedDfc;
sl@0
   136
		aChunk=(DChunk*)pC;
sl@0
   137
		}
sl@0
   138
	else
sl@0
   139
		pC->Close(NULL);	// NULL since chunk can't have been added to process
sl@0
   140
	return r;
sl@0
   141
	}
sl@0
   142
sl@0
   143
sl@0
   144
/**
sl@0
   145
Determine whether this process should be data paged.
sl@0
   146
sl@0
   147
@param aInfo	A reference to the create info for this process.
sl@0
   148
 */
sl@0
   149
TInt DMemModelProcess::SetPaging(const TProcessCreateInfo& aInfo)
sl@0
   150
	{
sl@0
   151
	TUint pagedFlags = aInfo.iFlags & TProcessCreateInfo::EDataPagingMask;
sl@0
   152
	// If KImageDataPaged and KImageDataUnpaged flags present then corrupt
sl@0
   153
	// Check this first to ensure that it is always verified.
sl@0
   154
	if (pagedFlags == TProcessCreateInfo::EDataPagingMask)
sl@0
   155
		{
sl@0
   156
		return KErrCorrupt;
sl@0
   157
		}
sl@0
   158
sl@0
   159
	if (aInfo.iAttr & ECodeSegAttKernel ||
sl@0
   160
		!(K::MemModelAttributes & EMemModelAttrDataPaging))
sl@0
   161
		{// Kernel process shouldn't be data paged or no data paging device installed.
sl@0
   162
		return KErrNone;
sl@0
   163
		}
sl@0
   164
sl@0
   165
	TUint dataPolicy = TheSuperPage().KernelConfigFlags() & EKernelConfigDataPagingPolicyMask;
sl@0
   166
	if (dataPolicy == EKernelConfigDataPagingPolicyAlwaysPage)
sl@0
   167
		{
sl@0
   168
		iAttributes |= EDataPaged;
sl@0
   169
		return KErrNone;
sl@0
   170
		}
sl@0
   171
	if (dataPolicy == EKernelConfigDataPagingPolicyNoPaging)
sl@0
   172
		{// No paging allowed so just return.
sl@0
   173
		return KErrNone;
sl@0
   174
		}
sl@0
   175
	if (pagedFlags == TProcessCreateInfo::EDataPaged)
sl@0
   176
		{
sl@0
   177
		iAttributes |= EDataPaged;
sl@0
   178
		return KErrNone;
sl@0
   179
		}
sl@0
   180
	if (pagedFlags == TProcessCreateInfo::EDataUnpaged)
sl@0
   181
		{// No paging set so just return.
sl@0
   182
		return KErrNone;
sl@0
   183
		}
sl@0
   184
	// Neither paged nor unpaged set so use default paging policy.
sl@0
   185
	// dataPolicy must be EKernelConfigDataPagingPolicyDefaultUnpaged or 
sl@0
   186
	// EKernelConfigDataPagingPolicyDefaultPaged.
sl@0
   187
	__NK_ASSERT_DEBUG(pagedFlags == TProcessCreateInfo::EDataPagingUnspecified);
sl@0
   188
	__NK_ASSERT_DEBUG(	dataPolicy == EKernelConfigDataPagingPolicyDefaultPaged ||
sl@0
   189
						dataPolicy == EKernelConfigDataPagingPolicyDefaultUnpaged);
sl@0
   190
	if (dataPolicy == EKernelConfigDataPagingPolicyDefaultPaged)
sl@0
   191
		{
sl@0
   192
		iAttributes |= EDataPaged;
sl@0
   193
		}
sl@0
   194
	return KErrNone;
sl@0
   195
	}
sl@0
   196
sl@0
   197
sl@0
   198
TInt DMemModelProcess::DoCreate(TBool aKernelProcess, TProcessCreateInfo& aInfo)
sl@0
   199
	{
sl@0
   200
	// Required so we can detect whether a process has been created and added 
sl@0
   201
	// to its object container by checking for iContainerID!=EProcess.
sl@0
   202
	__ASSERT_COMPILE(EProcess != 0);
sl@0
   203
	__KTRACE_OPT(KPROC,Kern::Printf(">DMemModelProcess::DoCreate %O",this));
sl@0
   204
	TInt r=KErrNone;
sl@0
   205
sl@0
   206
	if (aKernelProcess)
sl@0
   207
		{
sl@0
   208
		iAttributes |= ESupervisor;
sl@0
   209
		iOsAsid = KKernelOsAsid;
sl@0
   210
		}
sl@0
   211
	else
sl@0
   212
		{
sl@0
   213
		r = MM::AddressSpaceAlloc(iPageDir);
sl@0
   214
		if (r>=0)
sl@0
   215
			{
sl@0
   216
			iOsAsid = r;
sl@0
   217
			r = KErrNone;
sl@0
   218
			}
sl@0
   219
		}
sl@0
   220
	if (r == KErrNone)
sl@0
   221
		{// Add this process's own reference to its os asid.
sl@0
   222
		__e32_atomic_store_ord32(&iOsAsidRefCount, 1);
sl@0
   223
		}
sl@0
   224
sl@0
   225
#ifdef BTRACE_FLEXIBLE_MEM_MODEL
sl@0
   226
	BTrace8(BTrace::EFlexibleMemModel,BTrace::EAddressSpaceId,this,iOsAsid);
sl@0
   227
#endif
sl@0
   228
sl@0
   229
	__KTRACE_OPT(KPROC,Kern::Printf("OS ASID=%d, PD=%08x",iOsAsid,iPageDir));
sl@0
   230
	__KTRACE_OPT(KPROC,Kern::Printf("<DMemModelProcess::DoCreate %d",r));
sl@0
   231
	return r;
sl@0
   232
	}
sl@0
   233
sl@0
   234
TInt DMemModelProcess::CreateDataBssStackArea(TProcessCreateInfo& aInfo)
sl@0
   235
	{
sl@0
   236
	__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::CreateDataBssStackArea %O",this));
sl@0
   237
	TInt r = KErrNone;
sl@0
   238
	TInt dataBssSize = MM::RoundToPageSize(aInfo.iTotalDataSize);
sl@0
   239
	if(dataBssSize)
sl@0
   240
		{
sl@0
   241
		DMemoryObject* memory;
sl@0
   242
		TMemoryObjectType memoryType = iAttributes&EDataPaged ? EMemoryObjectPaged : EMemoryObjectMovable;
sl@0
   243
		r = MM::MemoryNew(memory,memoryType,MM::BytesToPages(dataBssSize));
sl@0
   244
		if(r==KErrNone)
sl@0
   245
			{
sl@0
   246
			r = MM::MemoryAlloc(memory,0,MM::BytesToPages(dataBssSize));
sl@0
   247
			if(r==KErrNone)
sl@0
   248
				{
sl@0
   249
				r = MM::MappingNew(iDataBssMapping,memory,EUserReadWrite,OsAsid());
sl@0
   250
				}
sl@0
   251
			if(r!=KErrNone)
sl@0
   252
				MM::MemoryDestroy(memory);
sl@0
   253
			else
sl@0
   254
				{
sl@0
   255
				iDataBssRunAddress = MM::MappingBase(iDataBssMapping);
sl@0
   256
#ifdef BTRACE_FLEXIBLE_MEM_MODEL
sl@0
   257
				BTrace8(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsProcessStaticData,memory,this);
sl@0
   258
#endif
sl@0
   259
				}
sl@0
   260
			}
sl@0
   261
		}
sl@0
   262
	__KTRACE_OPT(KPROC,Kern::Printf("DataBssSize=%x, ",dataBssSize));
sl@0
   263
sl@0
   264
	return r;
sl@0
   265
	}
sl@0
   266
sl@0
   267
sl@0
   268
TInt DMemModelProcess::AttachExistingCodeSeg(TProcessCreateInfo& aInfo)
sl@0
   269
	{
sl@0
   270
	TInt r = DEpocProcess::AttachExistingCodeSeg(aInfo);
sl@0
   271
	if(r==KErrNone)
sl@0
   272
		{
sl@0
   273
		// allocate virtual memory for the EXEs codeseg...
sl@0
   274
		DMemModelCodeSeg* seg = (DMemModelCodeSeg*)iTempCodeSeg;
sl@0
   275
		if(seg->iAttr&ECodeSegAttAddrNotUnique)
sl@0
   276
			{
sl@0
   277
			TUint codeSize = seg->iSize;
sl@0
   278
			TLinAddr codeAddr = seg->RamInfo().iCodeRunAddr;
sl@0
   279
			TBool isDemandPaged = seg->iAttr&ECodeSegAttCodePaged;
sl@0
   280
			// Allocate virtual memory for the code seg using the os asid.
sl@0
   281
			// No need to open a reference on os asid as process not fully 
sl@0
   282
			// created yet so it can't die and free the os asid.
sl@0
   283
			r = MM::VirtualAlloc(OsAsid(),codeAddr,codeSize,isDemandPaged);
sl@0
   284
			if(r==KErrNone)
sl@0
   285
				{
sl@0
   286
				iCodeVirtualAllocSize = codeSize;
sl@0
   287
				iCodeVirtualAllocAddress = codeAddr;
sl@0
   288
				}
sl@0
   289
			}
sl@0
   290
		}
sl@0
   291
sl@0
   292
	return r;
sl@0
   293
	}
sl@0
   294
sl@0
   295
sl@0
   296
TInt DMemModelProcess::AddChunk(DChunk* aChunk, TBool aIsReadOnly)
sl@0
   297
	{
sl@0
   298
	DMemModelChunk* pC=(DMemModelChunk*)aChunk;
sl@0
   299
	if(pC->iOwningProcess && this!=pC->iOwningProcess)
sl@0
   300
		return KErrAccessDenied;
sl@0
   301
sl@0
   302
	TInt r = WaitProcessLock();
sl@0
   303
	if(r==KErrNone)
sl@0
   304
		{
sl@0
   305
		TInt i = ChunkIndex(pC);
sl@0
   306
		if(i>=0) // Found the chunk in this process, just up its count
sl@0
   307
			{
sl@0
   308
			iChunks[i].iAccessCount++;
sl@0
   309
			__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::AddChunk %08x to %08x (Access count incremented to %d)",aChunk,this,iChunks[i].iAccessCount));
sl@0
   310
			SignalProcessLock();
sl@0
   311
			return KErrNone;
sl@0
   312
			}
sl@0
   313
		r = DoAddChunk(pC,aIsReadOnly);
sl@0
   314
		SignalProcessLock();
sl@0
   315
		}
sl@0
   316
	__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::AddChunk returns %d",r));
sl@0
   317
	return r;
sl@0
   318
	}
sl@0
   319
sl@0
   320
sl@0
   321
void M::FsRegisterThread()
sl@0
   322
	{
sl@0
   323
	TInternalRamDrive::Unlock();
sl@0
   324
	}
sl@0
   325
sl@0
   326
sl@0
   327
void ExecHandler::UnlockRamDrive()
sl@0
   328
	{
sl@0
   329
	}
sl@0
   330
sl@0
   331
sl@0
   332
EXPORT_C TLinAddr TInternalRamDrive::Base()
sl@0
   333
	{
sl@0
   334
	DMemModelChunk* pC=(DMemModelChunk*)PP::TheRamDriveChunk;
sl@0
   335
	DMemModelProcess* pP=(DMemModelProcess*)TheCurrentThread->iOwningProcess;
sl@0
   336
	NKern::LockSystem();
sl@0
   337
	TLinAddr addr = (TLinAddr)pC->Base(pP);
sl@0
   338
	NKern::UnlockSystem();
sl@0
   339
	if(!addr)
sl@0
   340
		{
sl@0
   341
		Unlock();
sl@0
   342
		NKern::LockSystem();
sl@0
   343
		addr = (TLinAddr)pC->Base(pP);
sl@0
   344
		NKern::UnlockSystem();
sl@0
   345
		}
sl@0
   346
	return addr;
sl@0
   347
	}
sl@0
   348
sl@0
   349
sl@0
   350
EXPORT_C void TInternalRamDrive::Unlock()
sl@0
   351
	{
sl@0
   352
	DMemModelChunk* pC=(DMemModelChunk*)PP::TheRamDriveChunk;
sl@0
   353
	DMemModelProcess* pP=(DMemModelProcess*)TheCurrentThread->iOwningProcess;
sl@0
   354
	
sl@0
   355
	TInt r = pP->WaitProcessLock();
sl@0
   356
	if(r==KErrNone)
sl@0
   357
		if(pP->ChunkIndex(pC)==KErrNotFound)
sl@0
   358
			r = pP->DoAddChunk(pC,EFalse);
sl@0
   359
	__ASSERT_ALWAYS(r==KErrNone, MM::Panic(MM::EFsRegisterThread));
sl@0
   360
	pP->SignalProcessLock();
sl@0
   361
	}
sl@0
   362
sl@0
   363
sl@0
   364
EXPORT_C void TInternalRamDrive::Lock()
sl@0
   365
	{
sl@0
   366
	}
sl@0
   367
sl@0
   368
sl@0
   369
TInt DMemModelProcess::DoAddChunk(DMemModelChunk* aChunk, TBool aIsReadOnly)
sl@0
   370
	{
sl@0
   371
	//
sl@0
   372
	// Must hold the process $LOCK mutex before calling this.
sl@0
   373
	// As the process lock is held it is safe to access iOsAsid without a reference.
sl@0
   374
	//
sl@0
   375
sl@0
   376
	__NK_ASSERT_DEBUG(ChunkIndex(aChunk)==KErrNotFound); // shouldn't be adding a chunk which is already added
sl@0
   377
sl@0
   378
	__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::DoAddChunk %O to %O",aChunk,this));
sl@0
   379
sl@0
   380
	// create mapping for chunk...
sl@0
   381
	DMemoryMapping* mapping;
sl@0
   382
	TMappingPermissions perm = MM::MappingPermissions
sl@0
   383
		(
sl@0
   384
		iOsAsid!=(TInt)KKernelOsAsid,	// user?
sl@0
   385
		aIsReadOnly==false, // write?
sl@0
   386
		aChunk->iAttributes&DMemModelChunk::ECode // execute?
sl@0
   387
		);
sl@0
   388
	TInt r;
sl@0
   389
	if(aChunk->iFixedBase) // HACK, kernel chunk has a fixed iBase
sl@0
   390
		r = MM::MappingNew(mapping,aChunk->iMemoryObject,perm,iOsAsid,EMappingCreateExactVirtual,(TLinAddr)aChunk->iFixedBase);
sl@0
   391
	else
sl@0
   392
		r = MM::MappingNew(mapping,aChunk->iMemoryObject,perm,iOsAsid);
sl@0
   393
	if(r!=KErrNone)
sl@0
   394
		return r;
sl@0
   395
	if(iOsAsid==0)
sl@0
   396
		aChunk->iKernelMapping = mapping;
sl@0
   397
	TLinAddr base = MM::MappingBase(mapping);
sl@0
   398
sl@0
   399
	// expand chunk info memory if required...
sl@0
   400
	if(iChunkCount==iChunkAlloc)
sl@0
   401
		{
sl@0
   402
		TInt newAlloc = iChunkAlloc+KChunkGranularity;
sl@0
   403
		r = Kern::SafeReAlloc((TAny*&)iChunks,iChunkAlloc*sizeof(SChunkInfo),newAlloc*sizeof(SChunkInfo));
sl@0
   404
		if(r!=KErrNone)
sl@0
   405
			{
sl@0
   406
			MM::MappingDestroy(mapping);
sl@0
   407
			return r;
sl@0
   408
			}
sl@0
   409
		iChunkAlloc = newAlloc;
sl@0
   410
		}
sl@0
   411
sl@0
   412
	// insert new chunk info...
sl@0
   413
	TUint i = ChunkInsertIndex(aChunk);
sl@0
   414
	SChunkInfo* info = iChunks+i;
sl@0
   415
	SChunkInfo* infoEnd = iChunks+iChunkCount;
sl@0
   416
	NKern::LockSystem();
sl@0
   417
	++iChunkCount;
sl@0
   418
	for(;;)
sl@0
   419
		{
sl@0
   420
		// make space for new chunk info by shuffling along
sl@0
   421
		// existing infos KMaxChunkInfosInOneGo at a time...
sl@0
   422
		SChunkInfo* infoPtr = infoEnd-KMaxChunkInfosInOneGo;
sl@0
   423
		if(infoPtr<info)
sl@0
   424
			infoPtr = info;
sl@0
   425
		memmove(infoPtr+1,infoPtr,(TLinAddr)infoEnd-(TLinAddr)infoPtr);
sl@0
   426
		infoEnd = infoPtr;
sl@0
   427
		if(infoEnd<=info)
sl@0
   428
			break;
sl@0
   429
		NKern::FlashSystem();
sl@0
   430
		}
sl@0
   431
	info->iChunk = aChunk;
sl@0
   432
	info->iMapping = mapping;
sl@0
   433
	info->iAccessCount = 1;
sl@0
   434
	info->iIsReadOnly = aIsReadOnly;
sl@0
   435
	NKern::UnlockSystem();
sl@0
   436
sl@0
   437
	// add chunk to list of Shared Chunks...
sl@0
   438
	if(aChunk->iChunkType==ESharedKernelSingle || aChunk->iChunkType==ESharedKernelMultiple)
sl@0
   439
		{
sl@0
   440
		if(!iSharedChunks)
sl@0
   441
			iSharedChunks = new RAddressedContainer(&TheSharedChunkLock,iProcessLock);
sl@0
   442
		if(!iSharedChunks)
sl@0
   443
			r = KErrNoMemory;
sl@0
   444
		else
sl@0
   445
			r = iSharedChunks->Add(base,aChunk);
sl@0
   446
		if(r!=KErrNone)
sl@0
   447
			{
sl@0
   448
			DoRemoveChunk(i);
sl@0
   449
			return r;
sl@0
   450
			}
sl@0
   451
		}
sl@0
   452
sl@0
   453
	// done OK...
sl@0
   454
	__DEBUG_EVENT(EEventUpdateProcess, this);
sl@0
   455
	return KErrNone;
sl@0
   456
	}
sl@0
   457
sl@0
   458
sl@0
   459
void DMemModelProcess::DoRemoveChunk(TInt aIndex)
sl@0
   460
	{
sl@0
   461
	__DEBUG_EVENT(EEventUpdateProcess, this);
sl@0
   462
sl@0
   463
	DMemModelChunk* chunk = iChunks[aIndex].iChunk;
sl@0
   464
	DMemoryMapping* mapping = iChunks[aIndex].iMapping;
sl@0
   465
sl@0
   466
	if(chunk->iChunkType==ESharedKernelSingle || chunk->iChunkType==ESharedKernelMultiple)
sl@0
   467
		{
sl@0
   468
		// remove chunk from list of Shared Chunks...
sl@0
   469
		if(iSharedChunks)
sl@0
   470
			{
sl@0
   471
			iSharedChunks->Remove(MM::MappingBase(mapping));
sl@0
   472
#ifdef _DEBUG
sl@0
   473
			// delete iSharedChunks if it's empty, so memory leak test code passes...
sl@0
   474
			if(iSharedChunks->Count()==0)
sl@0
   475
				{
sl@0
   476
				NKern::FMWait(&TheSharedChunkLock);
sl@0
   477
				RAddressedContainer* s = iSharedChunks;
sl@0
   478
				iSharedChunks = 0;
sl@0
   479
				NKern::FMSignal(&TheSharedChunkLock);
sl@0
   480
				delete s;
sl@0
   481
				}
sl@0
   482
#endif
sl@0
   483
			}
sl@0
   484
		}
sl@0
   485
sl@0
   486
	// remove chunk from array...
sl@0
   487
	SChunkInfo* infoStart = iChunks+aIndex+1;
sl@0
   488
	SChunkInfo* infoEnd = iChunks+iChunkCount;
sl@0
   489
	NKern::LockSystem();
sl@0
   490
	for(;;)
sl@0
   491
		{
sl@0
   492
		// shuffle existing infos down KMaxChunkInfosInOneGo at a time...
sl@0
   493
		SChunkInfo* infoPtr = infoStart+KMaxChunkInfosInOneGo;
sl@0
   494
		if(infoPtr>infoEnd)
sl@0
   495
			infoPtr = infoEnd;
sl@0
   496
		memmove(infoStart-1,infoStart,(TLinAddr)infoPtr-(TLinAddr)infoStart);
sl@0
   497
		infoStart = infoPtr;
sl@0
   498
		if(infoStart>=infoEnd)
sl@0
   499
			break;
sl@0
   500
		NKern::FlashSystem();
sl@0
   501
		}
sl@0
   502
	--iChunkCount;
sl@0
   503
	NKern::UnlockSystem();
sl@0
   504
sl@0
   505
	if(mapping==chunk->iKernelMapping)
sl@0
   506
		chunk->iKernelMapping = 0;
sl@0
   507
sl@0
   508
	MM::MappingDestroy(mapping);
sl@0
   509
	}
sl@0
   510
sl@0
   511
sl@0
   512
/**
sl@0
   513
Final chance for process to release resources during its death.
sl@0
   514
sl@0
   515
Called with process $LOCK mutex held (if it exists).
sl@0
   516
This mutex will not be released before it is deleted.
sl@0
   517
I.e. no other thread will ever hold the mutex again.
sl@0
   518
*/
sl@0
   519
void DMemModelProcess::FinalRelease()
sl@0
   520
	{
sl@0
   521
	// Clean up any left over chunks (such as SharedIo buffers)
sl@0
   522
	if(iProcessLock)
sl@0
   523
		while(iChunkCount)
sl@0
   524
			DoRemoveChunk(0);
sl@0
   525
	// Destroy the remaining mappings and memory objects owned by this process
sl@0
   526
	MM::MappingAndMemoryDestroy(iDataBssMapping);
sl@0
   527
	if(iCodeVirtualAllocSize)
sl@0
   528
		MM::VirtualFree(iOsAsid,iCodeVirtualAllocAddress,iCodeVirtualAllocSize);
sl@0
   529
sl@0
   530
	// Close the original reference on the os asid.
sl@0
   531
	CloseOsAsid();
sl@0
   532
	}
sl@0
   533
sl@0
   534
sl@0
   535
void DMemModelProcess::RemoveChunk(DMemModelChunk *aChunk)
sl@0
   536
	{
sl@0
   537
	// note that this can't be called after the process $LOCK mutex has been deleted
sl@0
   538
	// since it can only be called by a thread in this process doing a handle close or
sl@0
   539
	// dying, or by the process handles array being deleted due to the process dying,
sl@0
   540
	// all of which happen before $LOCK is deleted.
sl@0
   541
	__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess %O RemoveChunk %O",this,aChunk));
sl@0
   542
	Kern::MutexWait(*iProcessLock);
sl@0
   543
	TInt i = ChunkIndex(aChunk);
sl@0
   544
	if(i>=0) // Found the chunk
sl@0
   545
		{
sl@0
   546
		__KTRACE_OPT(KPROC,Kern::Printf("Chunk access count %d",iChunks[i].iAccessCount));
sl@0
   547
		if(--iChunks[i].iAccessCount==0)
sl@0
   548
			{
sl@0
   549
			DoRemoveChunk(i);
sl@0
   550
			}
sl@0
   551
		}
sl@0
   552
	Kern::MutexSignal(*iProcessLock);
sl@0
   553
	}
sl@0
   554
sl@0
   555
sl@0
   556
TUint8* DMemModelChunk::Base(DProcess* aProcess)
sl@0
   557
	{
sl@0
   558
	DMemModelProcess* pP = (DMemModelProcess*)aProcess;
sl@0
   559
	DMemoryMapping* mapping = 0;
sl@0
   560
sl@0
   561
	if(iKernelMapping && pP==K::TheKernelProcess)
sl@0
   562
		{
sl@0
   563
		// shortcut for shared chunks...
sl@0
   564
		mapping = iKernelMapping;
sl@0
   565
		}
sl@0
   566
	else
sl@0
   567
		{
sl@0
   568
		// find chunk in process...
sl@0
   569
		TInt i = pP->ChunkIndex(this);
sl@0
   570
		if(i>=0)
sl@0
   571
			mapping = pP->iChunks[i].iMapping;
sl@0
   572
		}
sl@0
   573
sl@0
   574
	if(!mapping)
sl@0
   575
		return 0;
sl@0
   576
sl@0
   577
	return (TUint8*)MM::MappingBase(mapping);
sl@0
   578
	}
sl@0
   579
sl@0
   580
sl@0
   581
DChunk* DThread::OpenSharedChunk(const TAny* aAddress, TBool aWrite, TInt& aOffset)
sl@0
   582
	{
sl@0
   583
	DMemModelChunk* chunk = 0;
sl@0
   584
sl@0
   585
	NKern::FMWait(&TheSharedChunkLock);
sl@0
   586
	RAddressedContainer* list = ((DMemModelProcess*)iOwningProcess)->iSharedChunks;
sl@0
   587
	if(list)
sl@0
   588
		{
sl@0
   589
		// search list...
sl@0
   590
		TUint offset;
sl@0
   591
		chunk = (DMemModelChunk*)list->Find((TLinAddr)aAddress,offset);
sl@0
   592
		if(chunk && offset<TUint(chunk->iMaxSize) && chunk->Open()==KErrNone)
sl@0
   593
			aOffset = offset; // chunk found and opened successfully
sl@0
   594
		else
sl@0
   595
			chunk = 0; // failed
sl@0
   596
		}
sl@0
   597
	NKern::FMSignal(&TheSharedChunkLock);
sl@0
   598
sl@0
   599
	return chunk;
sl@0
   600
	}
sl@0
   601
sl@0
   602
sl@0
   603
TUint DMemModelProcess::ChunkInsertIndex(DMemModelChunk* aChunk)
sl@0
   604
	{
sl@0
   605
	// need to hold iProcessLock or System Lock...
sl@0
   606
#ifdef _DEBUG
sl@0
   607
	if(K::Initialising==false && iProcessLock!=NULL && iProcessLock->iCleanup.iThread!=&Kern::CurrentThread())
sl@0
   608
		{
sl@0
   609
		// don't hold iProcessLock, so...
sl@0
   610
		__ASSERT_SYSTEM_LOCK;
sl@0
   611
		}
sl@0
   612
#endif
sl@0
   613
sl@0
   614
	// binary search...
sl@0
   615
	SChunkInfo* list = iChunks;
sl@0
   616
	TUint l = 0;
sl@0
   617
	TUint r = iChunkCount;
sl@0
   618
	TUint m;
sl@0
   619
	while(l<r)
sl@0
   620
		{
sl@0
   621
		m = (l+r)>>1;
sl@0
   622
		DChunk* x = list[m].iChunk;
sl@0
   623
		if(x<=aChunk)
sl@0
   624
			l = m+1;
sl@0
   625
		else
sl@0
   626
			r = m;
sl@0
   627
		}
sl@0
   628
	return r;
sl@0
   629
	}
sl@0
   630
sl@0
   631
sl@0
   632
TInt DMemModelProcess::ChunkIndex(DMemModelChunk* aChunk)
sl@0
   633
	{
sl@0
   634
	TUint i = ChunkInsertIndex(aChunk);
sl@0
   635
	if(i && iChunks[--i].iChunk==aChunk)
sl@0
   636
		return i;
sl@0
   637
	return KErrNotFound;
sl@0
   638
	}
sl@0
   639
sl@0
   640
sl@0
   641
TInt DMemModelProcess::MapCodeSeg(DCodeSeg* aSeg)
sl@0
   642
	{
sl@0
   643
	__ASSERT_CRITICAL;	// Must be in critical section so can't leak os asid references.
sl@0
   644
		
sl@0
   645
	DMemModelCodeSeg& seg=*(DMemModelCodeSeg*)aSeg;
sl@0
   646
	__KTRACE_OPT(KDLL,Kern::Printf("Process %O MapCodeSeg %C", this, aSeg));
sl@0
   647
	TBool kernel_only=( (seg.iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttKernel );
sl@0
   648
	TBool user_local=( (seg.iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == 0 );
sl@0
   649
	if (kernel_only && !(iAttributes&ESupervisor))
sl@0
   650
		return KErrNotSupported;
sl@0
   651
	if (seg.iAttr&ECodeSegAttKernel)
sl@0
   652
		return KErrNone;	// no extra mappings needed for kernel code
sl@0
   653
sl@0
   654
	// Attempt to open a reference on the os asid it is required so
sl@0
   655
	// MapUserRamCode() and CommitDllData() can use iOsAsid safely.
sl@0
   656
	TInt osAsid = TryOpenOsAsid();
sl@0
   657
	if (osAsid < 0)
sl@0
   658
		{// The process has died.
sl@0
   659
		return KErrDied;
sl@0
   660
		}
sl@0
   661
sl@0
   662
	TInt r=KErrNone;
sl@0
   663
	if (user_local)
sl@0
   664
		r=MapUserRamCode(seg.Memory());
sl@0
   665
	if (seg.IsDll())
sl@0
   666
		{
sl@0
   667
		TInt total_data_size;
sl@0
   668
		TLinAddr data_base;
sl@0
   669
		seg.GetDataSizeAndBase(total_data_size, data_base);
sl@0
   670
		if (r==KErrNone && total_data_size)
sl@0
   671
			{
sl@0
   672
			TInt size=MM::RoundToPageSize(total_data_size);
sl@0
   673
			r=CommitDllData(data_base, size, aSeg);
sl@0
   674
			if (r!=KErrNone && user_local)
sl@0
   675
				UnmapUserRamCode(seg.Memory());
sl@0
   676
			}
sl@0
   677
		}
sl@0
   678
	CloseOsAsid();
sl@0
   679
sl@0
   680
	return r;
sl@0
   681
	}
sl@0
   682
sl@0
   683
sl@0
   684
void DMemModelProcess::UnmapCodeSeg(DCodeSeg* aSeg)
sl@0
   685
	{
sl@0
   686
	__ASSERT_CRITICAL;	// Must be in critical section so can't leak os asid references.
sl@0
   687
sl@0
   688
	DMemModelCodeSeg& seg=*(DMemModelCodeSeg*)aSeg;
sl@0
   689
	__KTRACE_OPT(KDLL,Kern::Printf("Process %O UnmapCodeSeg %C", this, aSeg));
sl@0
   690
	if (seg.iAttr&ECodeSegAttKernel)
sl@0
   691
		return;	// no extra mappings needed for kernel code
sl@0
   692
sl@0
   693
	// Attempt to open a reference on the os asid it is required so
sl@0
   694
	// UnmapUserRamCode() and DecommitDllData() can use iOsAsid safely.
sl@0
   695
	TInt osAsid = TryOpenOsAsid();
sl@0
   696
	if (osAsid < 0)
sl@0
   697
		{// The process has died and it the process it will have cleaned up any code segs.
sl@0
   698
		return;
sl@0
   699
		}
sl@0
   700
sl@0
   701
	if (seg.IsDll())
sl@0
   702
		{
sl@0
   703
		TInt total_data_size;
sl@0
   704
		TLinAddr data_base;
sl@0
   705
		seg.GetDataSizeAndBase(total_data_size, data_base);
sl@0
   706
		if (total_data_size)
sl@0
   707
			DecommitDllData(data_base, MM::RoundToPageSize(total_data_size));
sl@0
   708
		}
sl@0
   709
	if (seg.Memory())
sl@0
   710
		UnmapUserRamCode(seg.Memory());
sl@0
   711
sl@0
   712
	CloseOsAsid();
sl@0
   713
	}
sl@0
   714
sl@0
   715
void DMemModelProcess::RemoveDllData()
sl@0
   716
//
sl@0
   717
// Call with CodeSegLock held
sl@0
   718
//
sl@0
   719
	{
sl@0
   720
	}
sl@0
   721
sl@0
   722
sl@0
   723
TInt DMemModelProcess::MapUserRamCode(DMemModelCodeSegMemory* aMemory)
sl@0
   724
	{
sl@0
   725
	__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess %O MapUserRamCode %C %d %d",
sl@0
   726
									this, aMemory->iCodeSeg, iOsAsid, aMemory->iPagedCodeInfo!=0));
sl@0
   727
	__ASSERT_MUTEX(DCodeSeg::CodeSegLock);
sl@0
   728
sl@0
   729
	TMappingCreateFlags createFlags = EMappingCreateExactVirtual;
sl@0
   730
sl@0
   731
	if(!(aMemory->iCodeSeg->iAttr&ECodeSegAttAddrNotUnique))
sl@0
   732
		{
sl@0
   733
		// codeseg memory address is globally unique, (common address across all processes)...
sl@0
   734
		FlagSet(createFlags,EMappingCreateCommonVirtual);
sl@0
   735
		}
sl@0
   736
sl@0
   737
	if(aMemory->iCodeSeg->IsExe())
sl@0
   738
		{
sl@0
   739
		// EXE codesegs have already had their virtual address allocated so we must adopt that...
sl@0
   740
		__NK_ASSERT_DEBUG(iCodeVirtualAllocSize);
sl@0
   741
		__NK_ASSERT_DEBUG(iCodeVirtualAllocAddress==aMemory->iRamInfo.iCodeRunAddr);
sl@0
   742
		iCodeVirtualAllocSize = 0;
sl@0
   743
		iCodeVirtualAllocAddress = 0;
sl@0
   744
		FlagSet(createFlags,EMappingCreateAdoptVirtual);
sl@0
   745
		}
sl@0
   746
sl@0
   747
	DMemoryMapping* mapping;
sl@0
   748
	return MM::MappingNew(mapping,aMemory->iCodeMemoryObject,EUserExecute,iOsAsid,createFlags,aMemory->iRamInfo.iCodeRunAddr);
sl@0
   749
	}
sl@0
   750
sl@0
   751
sl@0
   752
void DMemModelProcess::UnmapUserRamCode(DMemModelCodeSegMemory* aMemory)
sl@0
   753
	{
sl@0
   754
	__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess %O UnmapUserRamCode %C %d %d",
sl@0
   755
									this, aMemory->iCodeSeg, iOsAsid, aMemory->iPagedCodeInfo!=0));
sl@0
   756
sl@0
   757
	__ASSERT_MUTEX(DCodeSeg::CodeSegLock);
sl@0
   758
	MM::MappingDestroy(aMemory->iRamInfo.iCodeRunAddr,iOsAsid);
sl@0
   759
	}
sl@0
   760
sl@0
   761
sl@0
   762
TInt DMemModelProcess::CommitDllData(TLinAddr aBase, TInt aSize, DCodeSeg* aCodeSeg)
sl@0
   763
	{
sl@0
   764
	__KTRACE_OPT(KDLL,Kern::Printf("DMemModelProcess %O CommitDllData %08x+%x",this,aBase,aSize));
sl@0
   765
sl@0
   766
	DMemoryObject* memory;
sl@0
   767
	TMemoryObjectType memoryType = aCodeSeg->iAttr&ECodeSegAttDataPaged ? EMemoryObjectPaged : EMemoryObjectMovable;
sl@0
   768
	TInt r = MM::MemoryNew(memory,memoryType,MM::BytesToPages(aSize));
sl@0
   769
	if(r==KErrNone)
sl@0
   770
		{
sl@0
   771
		r = MM::MemoryAlloc(memory,0,MM::BytesToPages(aSize));
sl@0
   772
		if(r==KErrNone)
sl@0
   773
			{
sl@0
   774
			DMemoryMapping* mapping;
sl@0
   775
			r = MM::MappingNew(mapping,memory,EUserReadWrite,iOsAsid,EMappingCreateCommonVirtual,aBase);
sl@0
   776
			}
sl@0
   777
		if(r!=KErrNone)
sl@0
   778
			MM::MemoryDestroy(memory);
sl@0
   779
		else
sl@0
   780
			{
sl@0
   781
#ifdef BTRACE_FLEXIBLE_MEM_MODEL
sl@0
   782
			BTrace12(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsDllStaticData,memory,aCodeSeg,this);
sl@0
   783
#endif
sl@0
   784
			}
sl@0
   785
		
sl@0
   786
		}
sl@0
   787
	__KTRACE_OPT(KDLL,Kern::Printf("CommitDllData returns %d",r));
sl@0
   788
	return r;
sl@0
   789
	}
sl@0
   790
sl@0
   791
sl@0
   792
void DMemModelProcess::DecommitDllData(TLinAddr aBase, TInt aSize)
sl@0
   793
	{
sl@0
   794
	__KTRACE_OPT(KDLL,Kern::Printf("DMemModelProcess %O DecommitDllData %08x+%x",this,aBase,aSize));
sl@0
   795
	MM::MappingAndMemoryDestroy(aBase,iOsAsid);
sl@0
   796
	}
sl@0
   797
sl@0
   798
void DMemModelProcess::BTracePrime(TInt aCategory)
sl@0
   799
	{
sl@0
   800
	DProcess::BTracePrime(aCategory);
sl@0
   801
sl@0
   802
#ifdef BTRACE_FLEXIBLE_MEM_MODEL
sl@0
   803
	if (aCategory == BTrace::EFlexibleMemModel || aCategory == -1)
sl@0
   804
		{
sl@0
   805
		BTrace8(BTrace::EFlexibleMemModel,BTrace::EAddressSpaceId,this,iOsAsid);
sl@0
   806
sl@0
   807
		if (iDataBssMapping)
sl@0
   808
			{
sl@0
   809
			DMemoryObject* memory = MM::MappingGetAndOpenMemory(iDataBssMapping);
sl@0
   810
			if (memory)
sl@0
   811
				{
sl@0
   812
				MM::MemoryBTracePrime(memory);
sl@0
   813
				BTrace8(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsProcessStaticData,memory,this);
sl@0
   814
				MM::MemoryClose(memory);
sl@0
   815
				}
sl@0
   816
			}
sl@0
   817
		
sl@0
   818
		// Trace memory objects for DLL static data
sl@0
   819
		SDblQue cs_list;
sl@0
   820
		DCodeSeg::UnmarkAll(DCodeSeg::EMarkListDeps|DCodeSeg::EMarkUnListDeps);
sl@0
   821
		TraverseCodeSegs(&cs_list, NULL, DCodeSeg::EMarkListDeps, 0);
sl@0
   822
		SDblQueLink* anchor=&cs_list.iA;
sl@0
   823
		SDblQueLink* pL=cs_list.First();
sl@0
   824
		for(; pL!=anchor; pL=pL->iNext)
sl@0
   825
			{
sl@0
   826
			DMemModelCodeSeg* seg = _LOFF(pL,DMemModelCodeSeg,iTempLink);
sl@0
   827
			if (seg->IsDll())
sl@0
   828
				{
sl@0
   829
				TInt total_data_size;
sl@0
   830
				TLinAddr data_base;
sl@0
   831
				seg->GetDataSizeAndBase(total_data_size, data_base);
sl@0
   832
				if (total_data_size)
sl@0
   833
					{
sl@0
   834
					TUint offset;
sl@0
   835
					// The instance count can be ignored as a dll data mapping is only ever 
sl@0
   836
					// used with a single memory object.
sl@0
   837
					TUint mappingInstanceCount;
sl@0
   838
					NKern::ThreadEnterCS();
sl@0
   839
					DMemoryMapping* mapping = MM::FindMappingInAddressSpace(iOsAsid, data_base, 0, offset, mappingInstanceCount);
sl@0
   840
					if (mapping)
sl@0
   841
						{
sl@0
   842
						DMemoryObject* memory = MM::MappingGetAndOpenMemory(mapping);
sl@0
   843
						if (memory)
sl@0
   844
							{
sl@0
   845
							MM::MemoryBTracePrime(memory);
sl@0
   846
							BTrace12(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsDllStaticData,memory,seg,this);
sl@0
   847
							MM::MemoryClose(memory);
sl@0
   848
							}
sl@0
   849
						MM::MappingClose(mapping);
sl@0
   850
						}
sl@0
   851
					NKern::ThreadLeaveCS();
sl@0
   852
					}
sl@0
   853
				}
sl@0
   854
			}
sl@0
   855
		DCodeSeg::EmptyQueue(cs_list, 0);	// leave cs_list empty
sl@0
   856
		}
sl@0
   857
#endif
sl@0
   858
	}
sl@0
   859
sl@0
   860
sl@0
   861
TInt DMemModelProcess::NewShPool(DShPool*& aPool, TShPoolCreateInfo& aInfo)
sl@0
   862
	{
sl@0
   863
	aPool = NULL;
sl@0
   864
	DMemModelShPool* pC = NULL;
sl@0
   865
sl@0
   866
	if (aInfo.iInfo.iFlags & TShPoolCreateInfo::EPageAlignedBuffer)
sl@0
   867
		{
sl@0
   868
		pC = new DMemModelAlignedShPool();
sl@0
   869
		}
sl@0
   870
	else
sl@0
   871
		{
sl@0
   872
		pC = new DMemModelNonAlignedShPool();
sl@0
   873
		}
sl@0
   874
sl@0
   875
	if (pC == NULL)
sl@0
   876
		{
sl@0
   877
		return KErrNoMemory;
sl@0
   878
		}
sl@0
   879
sl@0
   880
	TInt r = pC->Create(this, aInfo);
sl@0
   881
sl@0
   882
	if (r == KErrNone)
sl@0
   883
		{
sl@0
   884
		aPool = pC;
sl@0
   885
		}
sl@0
   886
	else
sl@0
   887
		{
sl@0
   888
		pC->Close(NULL);
sl@0
   889
		}
sl@0
   890
sl@0
   891
	return r;
sl@0
   892
	}
sl@0
   893
sl@0
   894
sl@0
   895
TInt DThread::RawRead(const TAny* aSrc, TAny* aDest, TInt aLength, TInt aFlags, TIpcExcTrap* aExcTrap)
sl@0
   896
//
sl@0
   897
// Read from the thread's process.
sl@0
   898
// aSrc      Run address of memory to read
sl@0
   899
// aDest     Current address of destination
sl@0
   900
// aExcTrap  Exception trap object to be updated if the actual memory access is performed on other memory area than specified.
sl@0
   901
//           It happens when  reading is performed on un-aligned memory area.
sl@0
   902
//
sl@0
   903
	{
sl@0
   904
	(void)aExcTrap;
sl@0
   905
	DMemModelThread& t=*(DMemModelThread*)TheCurrentThread;
sl@0
   906
	DMemModelProcess* pP=(DMemModelProcess*)iOwningProcess;
sl@0
   907
	TLinAddr src=(TLinAddr)aSrc;
sl@0
   908
	TLinAddr dest=(TLinAddr)aDest;
sl@0
   909
	TInt result = KErrNone;
sl@0
   910
	TBool have_taken_fault = EFalse;
sl@0
   911
sl@0
   912
	while (aLength)
sl@0
   913
		{
sl@0
   914
		if (iMState==EDead)
sl@0
   915
			{
sl@0
   916
			result = KErrDied;
sl@0
   917
			break;
sl@0
   918
			}
sl@0
   919
		TLinAddr alias_src;
sl@0
   920
		TUint alias_size;
sl@0
   921
		
sl@0
   922
#ifdef __BROADCAST_CACHE_MAINTENANCE__
sl@0
   923
		TInt pagingTrap;
sl@0
   924
		XTRAP_PAGING_START(pagingTrap);		
sl@0
   925
#endif
sl@0
   926
sl@0
   927
		TInt len = have_taken_fault ? Min(aLength, KPageSize - (src & KPageMask)) : aLength;
sl@0
   928
		TInt alias_result=t.Alias(src, pP, len, alias_src, alias_size);
sl@0
   929
		if (alias_result<0)
sl@0
   930
			{
sl@0
   931
			result = KErrBadDescriptor;	// bad permissions
sl@0
   932
			break;
sl@0
   933
			}
sl@0
   934
		
sl@0
   935
#ifdef __BROADCAST_CACHE_MAINTENANCE__
sl@0
   936
		// need to let the trap handler know where we are accessing in case we take a page fault
sl@0
   937
		// and the alias gets removed
sl@0
   938
		aExcTrap->iRemoteBase = alias_src;
sl@0
   939
		aExcTrap->iSize = alias_size;
sl@0
   940
#endif
sl@0
   941
			
sl@0
   942
		__KTRACE_OPT(KTHREAD2,Kern::Printf("DThread::RawRead %08x<-%08x+%x",dest,alias_src,alias_size));
sl@0
   943
sl@0
   944
		CHECK_PAGING_SAFE;
sl@0
   945
sl@0
   946
		if(aFlags&KCheckLocalAddress)
sl@0
   947
			MM::ValidateLocalIpcAddress(dest,alias_size,ETrue);
sl@0
   948
		UNLOCK_USER_MEMORY();
sl@0
   949
		memcpy( (TAny*)dest, (const TAny*)alias_src, alias_size);
sl@0
   950
		LOCK_USER_MEMORY();
sl@0
   951
sl@0
   952
		src+=alias_size;
sl@0
   953
		dest+=alias_size;
sl@0
   954
		aLength-=alias_size;
sl@0
   955
sl@0
   956
#ifdef __BROADCAST_CACHE_MAINTENANCE__
sl@0
   957
		XTRAP_PAGING_END;
sl@0
   958
		if(pagingTrap)
sl@0
   959
			have_taken_fault = ETrue;
sl@0
   960
#endif
sl@0
   961
		}
sl@0
   962
	t.RemoveAlias();
sl@0
   963
#ifdef __BROADCAST_CACHE_MAINTENANCE__
sl@0
   964
	t.iPagingExcTrap = NULL;  // in case we broke out of the loop and skipped XTRAP_PAGING_END
sl@0
   965
#endif
sl@0
   966
sl@0
   967
	return result;
sl@0
   968
	}
sl@0
   969
sl@0
   970
sl@0
   971
TInt DThread::RawWrite(const TAny* aDest, const TAny* aSrc, TInt aLength, TInt aFlags, DThread* /*anOriginatingThread*/, TIpcExcTrap* aExcTrap)
sl@0
   972
//
sl@0
   973
// Write to the thread's process.
sl@0
   974
// aDest               Run address of memory to write
sl@0
   975
// aSrc                Current address of destination
sl@0
   976
// aExcTrap            Exception trap object to be updated if the actual memory access is performed on other memory area then specified.
sl@0
   977
//                     It happens when reading is performed on un-aligned memory area.
sl@0
   978
//
sl@0
   979
	{
sl@0
   980
	(void)aExcTrap;
sl@0
   981
	DMemModelThread& t=*(DMemModelThread*)TheCurrentThread;
sl@0
   982
	DMemModelProcess* pP=(DMemModelProcess*)iOwningProcess;
sl@0
   983
	TLinAddr src=(TLinAddr)aSrc;
sl@0
   984
	TLinAddr dest=(TLinAddr)aDest;
sl@0
   985
	TInt result = KErrNone;
sl@0
   986
	TBool have_taken_fault = EFalse;
sl@0
   987
sl@0
   988
	while (aLength)
sl@0
   989
		{
sl@0
   990
		if (iMState==EDead)
sl@0
   991
			{
sl@0
   992
			result = KErrDied;
sl@0
   993
			break;
sl@0
   994
			}
sl@0
   995
		TLinAddr alias_dest;
sl@0
   996
		TUint alias_size;
sl@0
   997
sl@0
   998
#ifdef __BROADCAST_CACHE_MAINTENANCE__
sl@0
   999
		TInt pagingTrap;
sl@0
  1000
		XTRAP_PAGING_START(pagingTrap);		
sl@0
  1001
#endif
sl@0
  1002
		
sl@0
  1003
		TInt len = have_taken_fault ? Min(aLength, KPageSize - (dest & KPageMask)) : aLength;
sl@0
  1004
		TInt alias_result=t.Alias(dest, pP, len, alias_dest, alias_size);
sl@0
  1005
		if (alias_result<0)
sl@0
  1006
			{
sl@0
  1007
			result = KErrBadDescriptor;	// bad permissions
sl@0
  1008
			break;
sl@0
  1009
			}
sl@0
  1010
sl@0
  1011
#ifdef __BROADCAST_CACHE_MAINTENANCE__
sl@0
  1012
		// need to let the trap handler know where we are accessing in case we take a page fault
sl@0
  1013
		// and the alias gets removed
sl@0
  1014
		aExcTrap->iRemoteBase = alias_dest;
sl@0
  1015
		aExcTrap->iSize = alias_size;
sl@0
  1016
#endif
sl@0
  1017
sl@0
  1018
		__KTRACE_OPT(KTHREAD2,Kern::Printf("DThread::RawWrite %08x+%x->%08x",src,alias_size,alias_dest));
sl@0
  1019
sl@0
  1020
		// Must check that it is safe to page, unless we are reading from unpaged ROM in which case
sl@0
  1021
		// we allow it.
sl@0
  1022
		CHECK_PAGING_SAFE_RANGE(src, aLength);
sl@0
  1023
		CHECK_DATA_PAGING_SAFE_RANGE(dest, aLength);
sl@0
  1024
sl@0
  1025
		if(aFlags&KCheckLocalAddress)
sl@0
  1026
			MM::ValidateLocalIpcAddress(src,alias_size,EFalse);
sl@0
  1027
		UNLOCK_USER_MEMORY();
sl@0
  1028
		memcpy( (TAny*)alias_dest, (const TAny*)src, alias_size);
sl@0
  1029
		LOCK_USER_MEMORY();
sl@0
  1030
sl@0
  1031
		src+=alias_size;
sl@0
  1032
		dest+=alias_size;
sl@0
  1033
		aLength-=alias_size;
sl@0
  1034
sl@0
  1035
#ifdef __BROADCAST_CACHE_MAINTENANCE__
sl@0
  1036
		XTRAP_PAGING_END;
sl@0
  1037
		if(pagingTrap)
sl@0
  1038
			have_taken_fault = ETrue;
sl@0
  1039
#endif
sl@0
  1040
 		}
sl@0
  1041
	t.RemoveAlias();
sl@0
  1042
#ifdef __BROADCAST_CACHE_MAINTENANCE__
sl@0
  1043
	t.iPagingExcTrap = NULL;  // in case we broke out of the loop and skipped XTRAP_PAGING_END
sl@0
  1044
#endif
sl@0
  1045
sl@0
  1046
	return result;
sl@0
  1047
	}
sl@0
  1048
sl@0
  1049
sl@0
  1050
#ifndef __MARM__
sl@0
  1051
sl@0
  1052
TInt DThread::ReadAndParseDesHeader(const TAny* aSrc, TDesHeader& aDest)
sl@0
  1053
//
sl@0
  1054
// Read the header of a remote descriptor.
sl@0
  1055
//
sl@0
  1056
	{
sl@0
  1057
	static const TUint8 LengthLookup[16]={4,8,12,8,12,0,0,0,0,0,0,0,0,0,0,0};
sl@0
  1058
sl@0
  1059
	CHECK_PAGING_SAFE;
sl@0
  1060
	
sl@0
  1061
	DMemModelThread& t=*(DMemModelThread*)TheCurrentThread;
sl@0
  1062
	DMemModelProcess* pP=(DMemModelProcess*)iOwningProcess;
sl@0
  1063
	TLinAddr src=(TLinAddr)aSrc;
sl@0
  1064
sl@0
  1065
	__NK_ASSERT_DEBUG(t.iIpcClient==NULL);
sl@0
  1066
	t.iIpcClient = this;
sl@0
  1067
	
sl@0
  1068
	TLinAddr pAlias;
sl@0
  1069
	TUint8* pDest = (TUint8*)&aDest;
sl@0
  1070
	TUint alias_size = 0;
sl@0
  1071
	TInt length = 12;
sl@0
  1072
	TInt type = KErrBadDescriptor;
sl@0
  1073
	while (length > 0)
sl@0
  1074
		{
sl@0
  1075
#ifdef __BROADCAST_CACHE_MAINTENANCE__
sl@0
  1076
		TInt pagingTrap;
sl@0
  1077
		XTRAP_PAGING_START(pagingTrap);		
sl@0
  1078
#endif
sl@0
  1079
		
sl@0
  1080
		if (alias_size == 0)  
sl@0
  1081
			{
sl@0
  1082
			// no alias present, so must create one here
sl@0
  1083
			if (t.Alias(src, pP, length, pAlias, alias_size) != KErrNone)
sl@0
  1084
				break;
sl@0
  1085
			__NK_ASSERT_DEBUG(alias_size >= sizeof(TUint32));
sl@0
  1086
			}
sl@0
  1087
sl@0
  1088
		// read either the first word, or as much as aliased of the remainder
sl@0
  1089
		TInt l = length == 12 ? sizeof(TUint32) : Min(length, alias_size);
sl@0
  1090
		if (Kern::SafeRead((TAny*)pAlias, (TAny*)pDest, l))
sl@0
  1091
			break;  // exception reading from user space
sl@0
  1092
		
sl@0
  1093
		if (length == 12)  
sl@0
  1094
			{
sl@0
  1095
			// we have just read the first word, so decode the descriptor type
sl@0
  1096
			type = *(TUint32*)pDest >> KShiftDesType8;
sl@0
  1097
			length = LengthLookup[type];
sl@0
  1098
			// invalid descriptor type will have length 0 which will get decrease by 'l' and
sl@0
  1099
			// terminate the loop with length < 0
sl@0
  1100
			}
sl@0
  1101
sl@0
  1102
		src += l;
sl@0
  1103
		alias_size -= l;
sl@0
  1104
		pAlias += l;
sl@0
  1105
		pDest += l;
sl@0
  1106
		length -= l;
sl@0
  1107
		
sl@0
  1108
#ifdef __BROADCAST_CACHE_MAINTENANCE__
sl@0
  1109
		XTRAP_PAGING_END;
sl@0
  1110
		if (pagingTrap)
sl@0
  1111
			alias_size = 0;  // a page fault caused the alias to be removed
sl@0
  1112
#endif
sl@0
  1113
		}
sl@0
  1114
	
sl@0
  1115
	t.RemoveAlias();
sl@0
  1116
	t.iIpcClient = NULL;
sl@0
  1117
#ifdef __BROADCAST_CACHE_MAINTENANCE__
sl@0
  1118
	t.iPagingExcTrap = NULL;  // in case we broke out of the loop and skipped XTRAP_PAGING_END
sl@0
  1119
#endif
sl@0
  1120
	return length == 0 ? K::ParseDesHeader(aSrc, (TRawDesHeader&)aDest, aDest) : KErrBadDescriptor;
sl@0
  1121
	}
sl@0
  1122
sl@0
  1123
sl@0
  1124
#endif
sl@0
  1125
sl@0
  1126
sl@0
  1127
TInt DThread::PrepareMemoryForDMA(const TAny* aLinAddr, TInt aSize, TPhysAddr* aPhysicalPageList)
sl@0
  1128
	{
sl@0
  1129
	// not supported, new Physical Pinning APIs should be used for DMA
sl@0
  1130
	return KErrNotSupported;
sl@0
  1131
	}
sl@0
  1132
sl@0
  1133
TInt DThread::ReleaseMemoryFromDMA(const TAny* aLinAddr, TInt aSize, TPhysAddr* aPhysicalPageList)
sl@0
  1134
	{
sl@0
  1135
	// not supported, new Physical Pinning APIs should be used for DMA
sl@0
  1136
	return KErrNotSupported;
sl@0
  1137
	}
sl@0
  1138