os/kernelhwsrv/kernel/eka/memmodel/epoc/moving/mprocess.cpp
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
sl@0
     1
// Copyright (c) 1994-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
// e32\memmodel\epoc\moving\mprocess.cpp
sl@0
    15
// 
sl@0
    16
//
sl@0
    17
sl@0
    18
#include "memmodel.h"
sl@0
    19
#include "cache_maintenance.h"
sl@0
    20
#include "mmboot.h"
sl@0
    21
sl@0
    22
#define iMState		iWaitLink.iSpare1
sl@0
    23
sl@0
    24
_LIT(KDollarDat,"$DAT");
sl@0
    25
_LIT(KLitDllDollarData,"DLL$DATA");
sl@0
    26
sl@0
    27
/********************************************
sl@0
    28
 * Process
sl@0
    29
 ********************************************/
sl@0
    30
void DMemModelProcess::Destruct()
sl@0
    31
	{
sl@0
    32
	NKern::LockSystem();
sl@0
    33
	if (this==TheCurrentAddressSpace)
sl@0
    34
		TheCurrentAddressSpace=NULL;
sl@0
    35
	if (this==TheCurrentVMProcess)
sl@0
    36
		TheCurrentVMProcess=NULL;
sl@0
    37
	if (this==TheCurrentDataSectionProcess)
sl@0
    38
		TheCurrentDataSectionProcess=NULL;
sl@0
    39
	if (this==TheCompleteDataSectionProcess)
sl@0
    40
		TheCompleteDataSectionProcess=NULL;
sl@0
    41
	NKern::UnlockSystem();
sl@0
    42
	DProcess::Destruct();
sl@0
    43
	}
sl@0
    44
sl@0
    45
TInt DMemModelProcess::NewChunk(DChunk*& aChunk, SChunkCreateInfo& aInfo, TLinAddr& aRunAddr)
sl@0
    46
	{
sl@0
    47
	aChunk=NULL;
sl@0
    48
	DMemModelChunk* pC=NULL;
sl@0
    49
	TInt r=GetNewChunk(pC,aInfo);
sl@0
    50
	if (r!=KErrNone)
sl@0
    51
		{
sl@0
    52
		if (pC)
sl@0
    53
			pC->Close(NULL);
sl@0
    54
		return r;
sl@0
    55
		}
sl@0
    56
	if (aInfo.iForceFixed || iAttributes & DMemModelProcess::EFixedAddress)
sl@0
    57
		pC->iAttributes |= DMemModelChunk::EFixedAddress;
sl@0
    58
	if (!aInfo.iGlobal && (iAttributes & DMemModelProcess::EPrivate)!=0)
sl@0
    59
		pC->iAttributes |= DMemModelChunk::EPrivate;
sl@0
    60
	if (pC->iChunkType==EDll || pC->iChunkType==EUserCode || pC->iChunkType==EUserSelfModCode || pC->iChunkType==EKernelCode)
sl@0
    61
		pC->iAttributes |= (DMemModelChunk::EFixedAddress|DMemModelChunk::ECode);
sl@0
    62
	pC->iOwningProcess=(aInfo.iGlobal)?NULL:this;
sl@0
    63
	r=pC->Create(aInfo);
sl@0
    64
	if (r==KErrNone && (aInfo.iOperations & SChunkCreateInfo::EAdjust))
sl@0
    65
		{
sl@0
    66
		if (aInfo.iRunAddress!=0)
sl@0
    67
			pC->SetFixedAddress(aInfo.iRunAddress,aInfo.iPreallocated);
sl@0
    68
		if (aInfo.iPreallocated==0)
sl@0
    69
			{
sl@0
    70
			if (pC->iAttributes & DChunk::EDisconnected)
sl@0
    71
				{
sl@0
    72
				r=pC->Commit(aInfo.iInitialBottom,aInfo.iInitialTop-aInfo.iInitialBottom);
sl@0
    73
				}
sl@0
    74
			else if (pC->iAttributes & DChunk::EDoubleEnded)
sl@0
    75
				{
sl@0
    76
				r=pC->AdjustDoubleEnded(aInfo.iInitialBottom,aInfo.iInitialTop);
sl@0
    77
				}
sl@0
    78
			else
sl@0
    79
				{
sl@0
    80
				r=pC->Adjust(aInfo.iInitialTop);
sl@0
    81
				}
sl@0
    82
			}
sl@0
    83
		if (r==KErrNone && pC->iHomeRegionBase==0 && (pC->iAttributes&DMemModelChunk::EFixedAddress)!=0)
sl@0
    84
			{
sl@0
    85
			r=pC->Reserve(0);
sl@0
    86
			aRunAddr=(TLinAddr)pC->Base();
sl@0
    87
			}
sl@0
    88
		}
sl@0
    89
	if (r==KErrNone && (aInfo.iOperations & SChunkCreateInfo::EAdd))
sl@0
    90
		{
sl@0
    91
		if (pC->iAttributes & DMemModelChunk::ECode)
sl@0
    92
			Mmu::Get().SyncCodeMappings();
sl@0
    93
		if (pC->iChunkType!=EUserCode)
sl@0
    94
			{
sl@0
    95
			r=WaitProcessLock();
sl@0
    96
			if (r==KErrNone)
sl@0
    97
				{
sl@0
    98
				r=AddChunk(pC,aRunAddr,EFalse);
sl@0
    99
				SignalProcessLock();
sl@0
   100
				}
sl@0
   101
			}
sl@0
   102
		else
sl@0
   103
			aRunAddr=(TLinAddr)pC->Base();	// code chunks always fixed address
sl@0
   104
		}
sl@0
   105
	if (r==KErrNone)
sl@0
   106
		{
sl@0
   107
		pC->iDestroyedDfc = aInfo.iDestroyedDfc;
sl@0
   108
		aChunk=(DChunk*)pC;
sl@0
   109
		}
sl@0
   110
	else
sl@0
   111
		pC->Close(NULL);	// NULL since chunk can't have been added to process
sl@0
   112
	return r;
sl@0
   113
	}
sl@0
   114
sl@0
   115
TInt DMemModelProcess::DoCreate(TBool aKernelProcess, TProcessCreateInfo& aInfo)
sl@0
   116
	{
sl@0
   117
	__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::DoCreate %O",this));
sl@0
   118
sl@0
   119
	if (aKernelProcess)
sl@0
   120
		iAttributes=ESupervisor|EFixedAddress|EPrivate;
sl@0
   121
	else if (aInfo.iAttr & ECodeSegAttFixed)
sl@0
   122
		iAttributes=EFixedAddress|EPrivate;
sl@0
   123
	else
sl@0
   124
		iAttributes=0;
sl@0
   125
	if ((iAttributes & ESupervisor)==0 && (iAttributes & EFixedAddress)!=0)
sl@0
   126
		{
sl@0
   127
		CheckForFixedAccess();
sl@0
   128
		}
sl@0
   129
	return KErrNone;
sl@0
   130
	}
sl@0
   131
sl@0
   132
TInt DMemModelProcess::CreateDataBssStackArea(TProcessCreateInfo& aInfo)
sl@0
   133
	{
sl@0
   134
	__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::CreateDataBssStackArea %O",this));
sl@0
   135
	TInt dataBssSize=Mmu::RoundToPageSize(aInfo.iTotalDataSize);
sl@0
   136
	TInt maxSize=dataBssSize+PP::MaxStackSpacePerProcess;
sl@0
   137
	TBool fixed=(iAttributes & EFixedAddress);
sl@0
   138
sl@0
   139
	__KTRACE_OPT(KPROC,Kern::Printf("DataBssSize=%x, chunk max size %x",dataBssSize,maxSize));
sl@0
   140
sl@0
   141
	SChunkCreateInfo cinfo;
sl@0
   142
	cinfo.iGlobal=EFalse;
sl@0
   143
	cinfo.iAtt=TChunkCreate::EDisconnected;
sl@0
   144
	cinfo.iForceFixed=EFalse;
sl@0
   145
	cinfo.iOperations=SChunkCreateInfo::EAdjust|SChunkCreateInfo::EAdd;
sl@0
   146
	cinfo.iType=EUserData;
sl@0
   147
	cinfo.iMaxSize=maxSize;
sl@0
   148
	cinfo.iInitialBottom=0;
sl@0
   149
	cinfo.iInitialTop=dataBssSize;
sl@0
   150
	cinfo.iPreallocated=0;
sl@0
   151
	cinfo.iName.Set(KDollarDat);
sl@0
   152
	cinfo.iOwner=this;
sl@0
   153
	if (fixed && dataBssSize!=0 && aInfo.iCodeLoadAddress)
sl@0
   154
		{
sl@0
   155
		const TRomImageHeader& rih=*(const TRomImageHeader*)aInfo.iCodeLoadAddress;
sl@0
   156
		cinfo.iRunAddress=rih.iDataBssLinearBase;
sl@0
   157
		}
sl@0
   158
	else
sl@0
   159
		cinfo.iRunAddress=0;
sl@0
   160
	TInt r=NewChunk((DChunk*&)iDataBssStackChunk,cinfo,iDataBssRunAddress);
sl@0
   161
	return r;
sl@0
   162
	}
sl@0
   163
sl@0
   164
TInt DMemModelProcess::AddChunk(DChunk* aChunk,TBool isReadOnly)
sl@0
   165
	{
sl@0
   166
	DMemModelChunk* pC=(DMemModelChunk*)aChunk;
sl@0
   167
	TInt r=WaitProcessLock();
sl@0
   168
	if (r==KErrNone)
sl@0
   169
		{
sl@0
   170
		TInt pos=0;
sl@0
   171
		r=ChunkIndex(pC,pos);
sl@0
   172
		TLinAddr dataSectionBase=0;
sl@0
   173
		if (r==0) // Found the chunk in this process, just up its count
sl@0
   174
			{
sl@0
   175
			iChunks[pos].iAccessCount++;
sl@0
   176
			__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::AddChunk %08x to %08x (Access count incremented to %d)",aChunk,this,iChunks[pos].iAccessCount));
sl@0
   177
			SignalProcessLock();
sl@0
   178
			return KErrNone;
sl@0
   179
			}
sl@0
   180
		r=AddChunk(pC,dataSectionBase,isReadOnly);
sl@0
   181
		SignalProcessLock();
sl@0
   182
		}
sl@0
   183
	__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::AddChunk returns %d",r));
sl@0
   184
	return r;
sl@0
   185
	}
sl@0
   186
sl@0
   187
void FlushBeforeChunkMove(DMemModelChunk* aChunk)
sl@0
   188
	{
sl@0
   189
	Mmu& m = Mmu::Get();
sl@0
   190
	TUint32 ff=Mmu::EFlushDMove|Mmu::EFlushDPermChg;
sl@0
   191
	if (aChunk->iAttributes & DMemModelChunk::ECode)		// assumption here that code chunks don't move
sl@0
   192
		ff |= Mmu::EFlushIPermChg;
sl@0
   193
	m.GenericFlush(ff);
sl@0
   194
	}
sl@0
   195
sl@0
   196
TInt DMemModelProcess::AddChunk(DMemModelChunk* aChunk, TLinAddr& aDataSectionBase, TBool isReadOnly)
sl@0
   197
	{
sl@0
   198
	//
sl@0
   199
	// Must hold the process $LOCK mutex before calling this
sl@0
   200
	//
sl@0
   201
	__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::AddChunk %08x to %08x (for first time)",aChunk,this));
sl@0
   202
	TInt r=AllocateDataSectionBase(*((DMemModelChunk*)aChunk),(TUint&)aDataSectionBase);
sl@0
   203
	if(r!=KErrNone)
sl@0
   204
		return r;
sl@0
   205
sl@0
   206
	if (iNumChunks==KMaxChunksInProcess)
sl@0
   207
		return KErrOverflow;		// too many chunks in the process
sl@0
   208
sl@0
   209
	SChunkInfo *pC=iChunks;
sl@0
   210
	SChunkInfo *pE=pC+iNumChunks-1;
sl@0
   211
	NKern::LockSystem();
sl@0
   212
	while(pE>=pC && TUint(pE->iDataSectionBase)>TUint(aDataSectionBase))
sl@0
   213
		{
sl@0
   214
		pE[1]=pE[0];
sl@0
   215
		pE--;
sl@0
   216
		}
sl@0
   217
	pC=pE+1;
sl@0
   218
	pC->iDataSectionBase=aDataSectionBase;
sl@0
   219
	pC->isReadOnly=isReadOnly;
sl@0
   220
	pC->iAccessCount=1;
sl@0
   221
	pC->iChunk=aChunk;
sl@0
   222
	iNumChunks++;
sl@0
   223
sl@0
   224
	if(!(iAttributes&ESupervisor))
sl@0
   225
		{
sl@0
   226
		TInt attribs=aChunk->iAttributes;
sl@0
   227
		if (!(attribs&DMemModelChunk::EFixedAddress))
sl@0
   228
			{
sl@0
   229
			iNumMovingChunks++;
sl@0
   230
			iAttributes |= EMoving;
sl@0
   231
			}
sl@0
   232
sl@0
   233
		if (attribs&DMemModelChunk::EFixedAccess)
sl@0
   234
			{
sl@0
   235
			NKern::UnlockSystem();
sl@0
   236
			AddFixedAccessChunk(aChunk);
sl@0
   237
			goto done;	// FINISHED
sl@0
   238
			}
sl@0
   239
sl@0
   240
		iAttributes |= EVariableAccess;
sl@0
   241
		if (attribs & DMemModelChunk::ECode)
sl@0
   242
			{
sl@0
   243
			iNumNonFixedAccessCodeChunks++;
sl@0
   244
			iAttributes |= EVariableCode;
sl@0
   245
			}
sl@0
   246
		if (++iNumNonFixedAccessChunks==1)
sl@0
   247
			{
sl@0
   248
			NKern::UnlockSystem();
sl@0
   249
			DoAttributeChange();	// change process from fixed to variable access
sl@0
   250
			NKern::LockSystem();
sl@0
   251
			}
sl@0
   252
sl@0
   253
		if (this!=TheCurrentThread->iOwningProcess)
sl@0
   254
			{
sl@0
   255
			// Adding chunk to another process
sl@0
   256
			if (this==TheCurrentDataSectionProcess && !(attribs&DMemModelChunk::EFixedAddress))
sl@0
   257
				TheCompleteDataSectionProcess=NULL;	// just set partial state change flag and leave chunk alone
sl@0
   258
			if (this==TheCurrentAddressSpace)
sl@0
   259
				TheCurrentAddressSpace=NULL;
sl@0
   260
			NKern::UnlockSystem();
sl@0
   261
			goto done;	// FINISHED
sl@0
   262
			}
sl@0
   263
sl@0
   264
		// Adding chunk to currently active user process
sl@0
   265
		{
sl@0
   266
		TheCurrentAddressSpace=NULL;
sl@0
   267
		Mmu& m = Mmu::Get();
sl@0
   268
		TUint32 ff=0;	// flush flags
sl@0
   269
		DMemModelChunk::TChunkState state=isReadOnly?DMemModelChunk::ERunningRO:DMemModelChunk::ERunningRW;
sl@0
   270
		if (attribs&DMemModelChunk::EFixedAddress)
sl@0
   271
			{
sl@0
   272
			// Fixed address chunk, just change permissions
sl@0
   273
			ff|=aChunk->ApplyTopLevelPermissions(state);
sl@0
   274
			}
sl@0
   275
		else if (this==TheCurrentDataSectionProcess)
sl@0
   276
			{
sl@0
   277
			// Moving chunk.
sl@0
   278
			// This process is already in the data section, so just move the chunk down.
sl@0
   279
			// Must do flushing first
sl@0
   280
			TheCompleteDataSectionProcess=NULL;
sl@0
   281
			FlushBeforeChunkMove(aChunk);
sl@0
   282
			aChunk->MoveToRunAddress(aDataSectionBase,state);	// idempotent
sl@0
   283
			TheCompleteDataSectionProcess=this;
sl@0
   284
			}
sl@0
   285
		else if (iNumMovingChunks==1)
sl@0
   286
			{
sl@0
   287
			// The first moving chunk being added to a process with the data section occupied by another process.
sl@0
   288
			// This is the problematic case - we must displace the other process from the data section.
sl@0
   289
			// However we must allow preemption after each chunk is moved. Note that if a reschedule does
sl@0
   290
			// occur the necessary chunk moves will have been done by the scheduler, so we can finish
sl@0
   291
			// immediately.
sl@0
   292
			// Must do cache flushing first
sl@0
   293
			m.GenericFlush(Mmu::EFlushDMove);
sl@0
   294
			if (TheCurrentDataSectionProcess)
sl@0
   295
				{
sl@0
   296
				if (TheCurrentDataSectionProcess->iAttributes & EVariableCode)
sl@0
   297
					ff |= Mmu::EFlushIPermChg;
sl@0
   298
				SChunkInfo* pOtherProcChunks=TheCurrentDataSectionProcess->iChunks;
sl@0
   299
				SChunkInfo* pEndOtherProcChunks=pOtherProcChunks+TheCurrentDataSectionProcess->iNumChunks;
sl@0
   300
				NKern::FlashSystem();
sl@0
   301
				// if a reschedule occurs, TheCompleteDataSectionProcess will become equal to this
sl@0
   302
				while (TheCompleteDataSectionProcess!=this && pOtherProcChunks<pEndOtherProcChunks)
sl@0
   303
					{
sl@0
   304
					DMemModelChunk *pChunk=pOtherProcChunks->iChunk;
sl@0
   305
					pChunk->MoveToHomeSection();
sl@0
   306
					++pOtherProcChunks;
sl@0
   307
					TheCompleteDataSectionProcess=NULL;
sl@0
   308
					NKern::FlashSystem();
sl@0
   309
					}
sl@0
   310
				}
sl@0
   311
			if (TheCompleteDataSectionProcess!=this)
sl@0
   312
				{
sl@0
   313
				if (attribs & DMemModelChunk::ECode)
sl@0
   314
					ff |= Mmu::EFlushIPermChg;
sl@0
   315
				aChunk->MoveToRunAddress(aDataSectionBase,state);
sl@0
   316
				TheCurrentDataSectionProcess=this;
sl@0
   317
				TheCompleteDataSectionProcess=this;
sl@0
   318
				}
sl@0
   319
			}
sl@0
   320
		TheCurrentAddressSpace=this;
sl@0
   321
		TheCurrentVMProcess=this;
sl@0
   322
		if (ff)
sl@0
   323
			m.GenericFlush(ff);
sl@0
   324
		}
sl@0
   325
	}
sl@0
   326
	NKern::UnlockSystem();
sl@0
   327
done:
sl@0
   328
	__KTRACE_OPT(KPROC,Kern::Printf("Added array entry for %x",aDataSectionBase));
sl@0
   329
	__KTRACE_OPT(KPROC,Kern::Printf("Chunks maxsize %x",pC->iChunk->MaxSize()));
sl@0
   330
	__DEBUG_EVENT(EEventUpdateProcess, this);
sl@0
   331
	return KErrNone;
sl@0
   332
	}
sl@0
   333
sl@0
   334
TInt DMemModelProcess::AllocateDataSectionBase(DMemModelChunk& aChunk, TUint& aBase)
sl@0
   335
	{
sl@0
   336
	__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::AllocateDataSectionBase"));
sl@0
   337
	aBase=0;
sl@0
   338
	if ((aChunk.iAttributes & DMemModelChunk::EPrivate) && this!=aChunk.iOwningProcess)
sl@0
   339
		return KErrAccessDenied;
sl@0
   340
	if (aChunk.iAttributes & DMemModelChunk::EFixedAddress)
sl@0
   341
		{
sl@0
   342
		aBase=aChunk.iHomeRegionBase;
sl@0
   343
		return KErrNone;
sl@0
   344
		}
sl@0
   345
	Mmu& m = Mmu::Get();
sl@0
   346
	TLinAddr base=0;
sl@0
   347
	TLinAddr maxBase=0;
sl@0
   348
	switch (aChunk.iChunkType)
sl@0
   349
		{
sl@0
   350
	case EUserData:
sl@0
   351
		base=m.iDataSectionBase;
sl@0
   352
		maxBase=m.iDllDataBase;
sl@0
   353
		break;
sl@0
   354
	case EUserCode:
sl@0
   355
	case EUserSelfModCode:
sl@0
   356
		MM::Panic(MM::EUserCodeNotFixed);
sl@0
   357
		break;
sl@0
   358
	case EDllData:
sl@0
   359
		aBase=m.iDllDataBase;
sl@0
   360
		return KErrNone;
sl@0
   361
	default:
sl@0
   362
		__KTRACE_OPT(KPANIC,Kern::Printf("DMemModelProcess::AllocateDataSectionBase BadChunkType %d",aChunk.iChunkType));
sl@0
   363
		return KErrAccessDenied;
sl@0
   364
		}
sl@0
   365
sl@0
   366
	TLinAddr lastBase=base;
sl@0
   367
	SChunkInfo *pS=iChunks;
sl@0
   368
	SChunkInfo *pE=pS+iNumChunks;
sl@0
   369
	while (pS<pE)
sl@0
   370
		{
sl@0
   371
		TLinAddr thisBase=pS->iDataSectionBase;
sl@0
   372
		__KTRACE_OPT(KPROC,Kern::Printf("Chunk already at %x",thisBase));
sl@0
   373
		if (thisBase>=maxBase)
sl@0
   374
			break;
sl@0
   375
		if (thisBase>=base) // Within the range we are allocating
sl@0
   376
			{
sl@0
   377
			TInt gap=thisBase-lastBase;
sl@0
   378
			if (gap>=aChunk.MaxSize())
sl@0
   379
				break;
sl@0
   380
			lastBase=thisBase+pS->iChunk->MaxSize();
sl@0
   381
			}
sl@0
   382
		pS++;
sl@0
   383
		}
sl@0
   384
	if (lastBase+aChunk.MaxSize()>maxBase)
sl@0
   385
		{
sl@0
   386
		__KTRACE_OPT(KPROC,Kern::Printf("ERROR - none allocated, out of memory"));
sl@0
   387
		return KErrNoMemory;
sl@0
   388
		}
sl@0
   389
	aBase=lastBase;
sl@0
   390
	__KTRACE_OPT(KPROC,Kern::Printf("User allocated %x",aBase));
sl@0
   391
	return KErrNone;
sl@0
   392
	}
sl@0
   393
sl@0
   394
TUint8* DMemModelProcess::DataSectionBase(DMemModelChunk* aChunk)
sl@0
   395
	{
sl@0
   396
	// this can't be called after $LOCK is deleted
sl@0
   397
	Kern::MutexWait(*iProcessLock);
sl@0
   398
	TInt pos=0;
sl@0
   399
	TInt r=ChunkIndex(aChunk,pos);
sl@0
   400
	if (r==0) // Found the chunk
sl@0
   401
		{
sl@0
   402
		TUint8* answer=((TUint8*)iChunks[pos].iDataSectionBase);
sl@0
   403
		__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::DataSectionBase %x",answer));
sl@0
   404
		Kern::MutexSignal(*iProcessLock);
sl@0
   405
		return answer;
sl@0
   406
		}
sl@0
   407
	__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::DataSectionBase chunk %08x not present in %08x",aChunk,this));
sl@0
   408
	Kern::MutexSignal(*iProcessLock);
sl@0
   409
	return(NULL);
sl@0
   410
	}
sl@0
   411
sl@0
   412
void DMemModelProcess::DoRemoveChunk(TInt aIndex)
sl@0
   413
	{
sl@0
   414
	// Must be called with process $LOCK mutex held
sl@0
   415
	__DEBUG_EVENT(EEventUpdateProcess, this);
sl@0
   416
	DMemModelChunk* chunk = iChunks[aIndex].iChunk;
sl@0
   417
	Mmu& m = Mmu::Get();
sl@0
   418
	NKern::LockSystem();
sl@0
   419
	TInt attribs=chunk->iAttributes;
sl@0
   420
	__KTRACE_OPT(KPROC,Kern::Printf("Removing Chunk attribs=%08x, Process attribs=%08x",attribs,iAttributes));
sl@0
   421
	if (!(attribs&DMemModelChunk::EFixedAccess))
sl@0
   422
		{
sl@0
   423
		// Must leave chunk in process chunk list until we have flushed the cache if necessary
sl@0
   424
		if (this==TheCurrentVMProcess && (attribs&DMemModelChunk::EFixedAddress))
sl@0
   425
			{
sl@0
   426
			TUint32 ff=chunk->ApplyTopLevelPermissions(DMemModelChunk::ENotRunning);
sl@0
   427
			m.GenericFlush(ff);
sl@0
   428
			// the system must now remain locked until the chunk is removed from the process chunk list
sl@0
   429
			}
sl@0
   430
		if (this==TheCurrentDataSectionProcess && !(attribs&DMemModelChunk::EFixedAddress))
sl@0
   431
			{
sl@0
   432
			// must do cache flush first
sl@0
   433
			FlushBeforeChunkMove(chunk);	// preemptible, but on return cache is free of chunk data
sl@0
   434
			chunk->MoveToHomeSection();
sl@0
   435
			// the system must now remain locked until the chunk is removed from the process chunk list
sl@0
   436
			}
sl@0
   437
		}
sl@0
   438
sl@0
   439
	// Remove the chunk from the process chunk list
sl@0
   440
	SChunkInfo *pD=iChunks+aIndex;
sl@0
   441
	SChunkInfo *pS=iChunks+aIndex+1;
sl@0
   442
	SChunkInfo *pE=iChunks+iNumChunks;
sl@0
   443
	while(pS<pE)
sl@0
   444
		*pD++=*pS++;
sl@0
   445
	iNumChunks--;
sl@0
   446
sl@0
   447
	// Update the process attribute flags
sl@0
   448
	if (!(attribs&DMemModelChunk::EFixedAddress))
sl@0
   449
		{
sl@0
   450
		if (--iNumMovingChunks==0)
sl@0
   451
			iAttributes &= ~EMoving;
sl@0
   452
		}
sl@0
   453
	if (!(attribs&DMemModelChunk::EFixedAccess))
sl@0
   454
		{
sl@0
   455
		if ((attribs&DMemModelChunk::ECode) && --iNumNonFixedAccessCodeChunks==0)
sl@0
   456
				iAttributes &= ~EVariableCode;
sl@0
   457
		if (this==TheCurrentDataSectionProcess && !(iAttributes&EMoving))
sl@0
   458
			{
sl@0
   459
			TheCurrentDataSectionProcess=NULL;
sl@0
   460
			TheCompleteDataSectionProcess=NULL;
sl@0
   461
			}
sl@0
   462
		if (--iNumNonFixedAccessChunks==0)
sl@0
   463
			{
sl@0
   464
			iAttributes &= ~EVariableAccess;
sl@0
   465
			if (this==TheCurrentVMProcess)
sl@0
   466
				{
sl@0
   467
				TheCurrentVMProcess=NULL;
sl@0
   468
				TheCurrentAddressSpace=NULL;
sl@0
   469
				}
sl@0
   470
			NKern::UnlockSystem();
sl@0
   471
			DoAttributeChange();	// change process from variable to fixed access
sl@0
   472
			}
sl@0
   473
		else
sl@0
   474
			NKern::UnlockSystem();
sl@0
   475
		}
sl@0
   476
	else
sl@0
   477
		{
sl@0
   478
		NKern::UnlockSystem();
sl@0
   479
		RemoveFixedAccessChunk(chunk);
sl@0
   480
		}
sl@0
   481
	}
sl@0
   482
sl@0
   483
/**
sl@0
   484
Final chance for process to release resources during its death.
sl@0
   485
sl@0
   486
Called with process $LOCK mutex held (if it exists).
sl@0
   487
This mutex will not be released before it is deleted.
sl@0
   488
I.e. no other thread will ever hold the mutex again.
sl@0
   489
*/
sl@0
   490
void DMemModelProcess::FinalRelease()
sl@0
   491
	{
sl@0
   492
	// Clean up any left over chunks (such as SharedIo buffers)
sl@0
   493
	while(iNumChunks)
sl@0
   494
		DoRemoveChunk(0);
sl@0
   495
	}
sl@0
   496
sl@0
   497
void DMemModelProcess::RemoveChunk(DMemModelChunk *aChunk)
sl@0
   498
	{
sl@0
   499
	// note that this can't be called after the process $LOCK mutex has been deleted
sl@0
   500
	// since it can only be called by a thread in this process doing a handle close or
sl@0
   501
	// dying, or by the process handles array being deleted due to the process dying,
sl@0
   502
	// all of which happen before $LOCK is deleted.
sl@0
   503
	__KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::RemoveChunk %08x from %08x",aChunk,this));
sl@0
   504
	Kern::MutexWait(*iProcessLock);
sl@0
   505
	TInt pos=0;
sl@0
   506
	TInt r=ChunkIndex(aChunk,pos);
sl@0
   507
	__KTRACE_OPT(KPROC,if(r) Kern::Printf("Chunk lookup failed with %d",r));
sl@0
   508
	if (r==0) // Found the chunk
sl@0
   509
		{
sl@0
   510
		__KTRACE_OPT(KPROC,Kern::Printf("Chunk access count %d",iChunks[pos].iAccessCount));
sl@0
   511
		if (--iChunks[pos].iAccessCount==0)
sl@0
   512
			DoRemoveChunk(pos);
sl@0
   513
		}
sl@0
   514
	Kern::MutexSignal(*iProcessLock);
sl@0
   515
	}
sl@0
   516
sl@0
   517
TInt DMemModelProcess::ChunkIndex(DMemModelChunk* aChunk,TInt& aPos)
sl@0
   518
	{
sl@0
   519
	if (aChunk==NULL)
sl@0
   520
		return(KErrNotFound);
sl@0
   521
	TInt i=0;
sl@0
   522
	SChunkInfo *pC=iChunks;
sl@0
   523
	SChunkInfo *pE=pC+iNumChunks;
sl@0
   524
	while(pC<pE && (pC->iChunk!=aChunk))
sl@0
   525
		{
sl@0
   526
		pC++;
sl@0
   527
		i++;
sl@0
   528
		}
sl@0
   529
	if (pC==pE)
sl@0
   530
		return KErrNotFound;
sl@0
   531
	aPos=i;
sl@0
   532
	return KErrNone;
sl@0
   533
	}
sl@0
   534
sl@0
   535
void DMemModelProcess::RemoveDllData()
sl@0
   536
//
sl@0
   537
// Call with CodeSegLock held
sl@0
   538
//
sl@0
   539
	{
sl@0
   540
	Kern::SafeClose((DObject*&)iDllDataChunk, this);
sl@0
   541
	}
sl@0
   542
sl@0
   543
TInt DMemModelProcess::CreateDllDataChunk()
sl@0
   544
//
sl@0
   545
// Call with CodeSegLock held
sl@0
   546
//
sl@0
   547
	{
sl@0
   548
	__KTRACE_OPT(KDLL,Kern::Printf("DMemModelProcess %O CreateDllDataChunk",this));
sl@0
   549
	Mmu& m = Mmu::Get();
sl@0
   550
	SChunkCreateInfo c;
sl@0
   551
	c.iGlobal=EFalse;
sl@0
   552
	c.iAtt=TChunkCreate::EDisconnected;
sl@0
   553
	c.iForceFixed=EFalse;
sl@0
   554
	c.iOperations=SChunkCreateInfo::EAdjust|SChunkCreateInfo::EAdd;
sl@0
   555
	c.iRunAddress=0;
sl@0
   556
	c.iPreallocated=0;
sl@0
   557
	c.iType=EDllData;
sl@0
   558
	c.iMaxSize=(iAttributes&EFixedAddress) ? 1 : m.iMaxDllDataSize;	// minimal size for fixed processes
sl@0
   559
	c.iName.Set(KLitDllDollarData);
sl@0
   560
	c.iOwner=this;
sl@0
   561
	c.iInitialBottom=0;
sl@0
   562
	c.iInitialTop=0;
sl@0
   563
	TLinAddr runAddr;
sl@0
   564
	return NewChunk((DChunk*&)iDllDataChunk,c,runAddr);
sl@0
   565
	}
sl@0
   566
sl@0
   567
void DMemModelProcess::FreeDllDataChunk()
sl@0
   568
	{
sl@0
   569
	iDllDataChunk->Close(this);
sl@0
   570
	iDllDataChunk=NULL;
sl@0
   571
	}
sl@0
   572
sl@0
   573
TInt DMemModelProcess::CommitDllData(TLinAddr aBase, TInt aSize)
sl@0
   574
	{
sl@0
   575
	__KTRACE_OPT(KDLL,Kern::Printf("DMemModelProcess %O CommitDllData %08x+%x",this,aBase,aSize));
sl@0
   576
	TInt r=KErrNone;
sl@0
   577
	if (!iDllDataChunk)
sl@0
   578
		r=CreateDllDataChunk();
sl@0
   579
	if (r==KErrNone)
sl@0
   580
		{
sl@0
   581
		Mmu& m = Mmu::Get();
sl@0
   582
		TLinAddr dll_data_base=(iAttributes & EFixedAddress) ? (TLinAddr)iDllDataChunk->Base()
sl@0
   583
														: TLinAddr(m.iDllDataBase);
sl@0
   584
		TInt offset=aBase-dll_data_base;
sl@0
   585
		__ASSERT_ALWAYS(TUint32(offset)<TUint32(iDllDataChunk->iMaxSize),MM::Panic(MM::ECommitInvalidDllDataAddress));
sl@0
   586
		r=iDllDataChunk->Commit(offset, aSize);
sl@0
   587
		if (r!=KErrNone && iDllDataChunk->iSize==0)
sl@0
   588
			FreeDllDataChunk();
sl@0
   589
		}
sl@0
   590
	__KTRACE_OPT(KDLL,Kern::Printf("CommitDllData returns %d",r));
sl@0
   591
	return r;
sl@0
   592
	}
sl@0
   593
sl@0
   594
void DMemModelProcess::DecommitDllData(TLinAddr aBase, TInt aSize)
sl@0
   595
	{
sl@0
   596
	__KTRACE_OPT(KDLL,Kern::Printf("DMemModelProcess %O DecommitDllData %08x+%x",this,aBase,aSize));
sl@0
   597
	Mmu& m = Mmu::Get();
sl@0
   598
	TLinAddr dll_data_base=(iAttributes & EFixedAddress) ? (TLinAddr)iDllDataChunk->Base()
sl@0
   599
													: TLinAddr(m.iDllDataBase);
sl@0
   600
	TInt offset=aBase-dll_data_base;
sl@0
   601
	TInt r=iDllDataChunk->Decommit(offset, aSize);
sl@0
   602
	__ASSERT_ALWAYS(r==KErrNone,MM::Panic(MM::EDecommitInvalidDllDataAddress));
sl@0
   603
	if (iDllDataChunk->iSize==0)
sl@0
   604
		FreeDllDataChunk();
sl@0
   605
	}
sl@0
   606
sl@0
   607
TInt DMemModelProcess::MapCodeSeg(DCodeSeg* aSeg)
sl@0
   608
	{
sl@0
   609
	DMemModelCodeSeg& seg=*(DMemModelCodeSeg*)aSeg;
sl@0
   610
	__KTRACE_OPT(KDLL,Kern::Printf("Process %O MapCodeSeg %C", this, aSeg));
sl@0
   611
	TBool kernel_only=( (seg.iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttKernel );
sl@0
   612
	if (kernel_only && !(iAttributes&ESupervisor))
sl@0
   613
		return KErrNotSupported;
sl@0
   614
	if (seg.iAttr&ECodeSegAttKernel || seg.iDataAllocBase==-1)
sl@0
   615
		return KErrNone;	// no extra mappings needed for kernel code or code with fixed data address
sl@0
   616
	TInt r=KErrNone;
sl@0
   617
	if (seg.IsDll())
sl@0
   618
		{
sl@0
   619
		TInt total_data_size;
sl@0
   620
		TLinAddr data_base;
sl@0
   621
		seg.GetDataSizeAndBase(total_data_size, data_base);
sl@0
   622
		if (r==KErrNone && total_data_size)
sl@0
   623
			{
sl@0
   624
			TInt size=Mmu::RoundToPageSize(total_data_size);
sl@0
   625
			r=CommitDllData(data_base, size);
sl@0
   626
			}
sl@0
   627
		}
sl@0
   628
	return r;
sl@0
   629
	}
sl@0
   630
sl@0
   631
void DMemModelProcess::UnmapCodeSeg(DCodeSeg* aSeg)
sl@0
   632
	{
sl@0
   633
	DMemModelCodeSeg& seg=*(DMemModelCodeSeg*)aSeg;
sl@0
   634
	__KTRACE_OPT(KDLL,Kern::Printf("Process %O UnmapCodeSeg %C", this, aSeg));
sl@0
   635
	if (seg.iAttr&ECodeSegAttKernel || seg.iDataAllocBase==-1)
sl@0
   636
		return;	// no extra mappings needed for kernel code or code with fixed data address
sl@0
   637
	if (seg.IsDll())
sl@0
   638
		{
sl@0
   639
		TInt total_data_size;
sl@0
   640
		TLinAddr data_base;
sl@0
   641
		seg.GetDataSizeAndBase(total_data_size, data_base);
sl@0
   642
		if (total_data_size)
sl@0
   643
			DecommitDllData(data_base, Mmu::RoundToPageSize(total_data_size));
sl@0
   644
		}
sl@0
   645
	}
sl@0
   646
sl@0
   647
TInt DMemModelProcess::NewShPool(DShPool*& /* aPool */, TShPoolCreateInfo& /* aInfo */)
sl@0
   648
	{
sl@0
   649
	return KErrNotSupported;
sl@0
   650
	}
sl@0
   651
sl@0
   652
TInt DThread::RawRead(const TAny* aSrc, TAny* aDest, TInt aLength, TInt aFlags, TIpcExcTrap* aExcTrap)
sl@0
   653
//
sl@0
   654
// Read from the thread's process.
sl@0
   655
// aSrc is run address of memory to read. The memory is in aThread's address space.
sl@0
   656
// aDest is the address of destination. The memory is in the current process's address space.
sl@0
   657
// aExcTrap, exception trap object to be updated if the actual memory access is performed on another memory area. It happens 
sl@0
   658
//           when  reading is performed in chunks or if home adress is read instead of the provided run address.
sl@0
   659
// Enter and return with system locked.
sl@0
   660
	{
sl@0
   661
	const TUint8* pS=(const TUint8*)aSrc;
sl@0
   662
	TUint8* pD=(TUint8*)aDest;
sl@0
   663
	const TUint8* pC=NULL;
sl@0
   664
	TBool check=ETrue;
sl@0
   665
	TBool suspect=EFalse;
sl@0
   666
	DThread* pT=TheCurrentThread;
sl@0
   667
	while (aLength)
sl@0
   668
		{
sl@0
   669
		if (check)
sl@0
   670
			{
sl@0
   671
			suspect=((aFlags & KCheckLocalAddress) && !MM::CurrentAddress(pT,pD,aLength,ETrue));
sl@0
   672
			if (iMState==EDead)
sl@0
   673
				return KErrDied;
sl@0
   674
			pC=(const TUint8*)MM::CurrentAddress(this,pS,aLength,EFalse);
sl@0
   675
			__KTRACE_OPT(KTHREAD2,Kern::Printf("DThread::RawRead %08x<-[%08x::%08x]%08x+%x",pD,this,pS,pC,aLength));
sl@0
   676
			if (!pC)
sl@0
   677
				return KErrBadDescriptor;
sl@0
   678
			}
sl@0
   679
		TInt len=Min(aLength,K::MaxMemCopyInOneGo);
sl@0
   680
		if (aExcTrap)
sl@0
   681
			{
sl@0
   682
			aExcTrap->iSize = (len + 2*(sizeof(TInt32)-1));//+6 is for the worst case. We do not have to be precise here.
sl@0
   683
			aExcTrap->iRemoteBase = (TLinAddr)pC & ~(sizeof(TInt32)-1);		
sl@0
   684
			if (aExcTrap->iLocalBase)
sl@0
   685
				aExcTrap->iLocalBase = (TLinAddr)pD & ~(sizeof(TInt32)-1);
sl@0
   686
			__KTRACE_OPT(KTHREAD2,Kern::Printf("DThread::RawRead exc. update: %08x %08x %08x",aExcTrap->iLocalBase,aExcTrap->iRemoteBase,aExcTrap->iSize));
sl@0
   687
			}
sl@0
   688
sl@0
   689
#ifdef __DEMAND_PAGING__
sl@0
   690
		XTRAP_PAGING_START(check);
sl@0
   691
		CHECK_PAGING_SAFE;
sl@0
   692
#endif
sl@0
   693
sl@0
   694
		suspect?(void)umemput(pD,pC,len):(void)memcpy(pD,pC,len);
sl@0
   695
sl@0
   696
#ifdef __DEMAND_PAGING__
sl@0
   697
		XTRAP_PAGING_END;
sl@0
   698
		if(check<0)
sl@0
   699
			return check; // paging error caused by bad client (I.e. 'this' thread was bad)
sl@0
   700
		if(check)
sl@0
   701
			{
sl@0
   702
			__KTRACE_OPT(KTHREAD2,Kern::Printf("DThread::RawRead paging trap, suspect %d, dest %08x, source %08x, length %d\n", suspect, pD, pC, len));
sl@0
   703
			continue;
sl@0
   704
			}
sl@0
   705
#endif
sl@0
   706
sl@0
   707
		pD+=len;
sl@0
   708
		pS+=len;
sl@0
   709
		pC+=len;
sl@0
   710
		aLength-=len;
sl@0
   711
		if (aLength)
sl@0
   712
			check=NKern::FlashSystem();
sl@0
   713
		}
sl@0
   714
	return KErrNone;
sl@0
   715
	}
sl@0
   716
sl@0
   717
TInt DThread::RawWrite(const TAny* aDest, const TAny* aSrc, TInt aLength, TInt aFlags, DThread* aOriginatingThread, TIpcExcTrap* aExcTrap)
sl@0
   718
//
sl@0
   719
// Write to the thread's process.
sl@0
   720
// aDest is run address of memory to write. It resides in this thread's address space.
sl@0
   721
// aSrc is address of the source buffer. It resides in the current process's address space.
sl@0
   722
// aOriginatingThread is the thread on behalf of which this operation is performed (eg client of device driver).
sl@0
   723
// Enter and return with system locked
sl@0
   724
// aExcTrap, exception trap object to be updated if the actual memory access is performed on another memory area. It happens 
sl@0
   725
//           when  reading is performed in chunks or if home adress is read instead of the provided run address.
sl@0
   726
//
sl@0
   727
	{
sl@0
   728
	TUint8* pD=(TUint8*)aDest;
sl@0
   729
	const TUint8* pS=(const TUint8*)aSrc;
sl@0
   730
	TUint8* pC=NULL;
sl@0
   731
	TBool check=ETrue;
sl@0
   732
	TBool suspect=EFalse;
sl@0
   733
	DThread* pT=TheCurrentThread;
sl@0
   734
	DThread* pO=aOriginatingThread;
sl@0
   735
	if (!pO)
sl@0
   736
		pO=pT;
sl@0
   737
	DProcess* pF=K::TheFileServerProcess;
sl@0
   738
	TBool special=(iOwningProcess==pF && pO->iOwningProcess==pF);
sl@0
   739
	while (aLength)
sl@0
   740
		{
sl@0
   741
		if (check)
sl@0
   742
			{
sl@0
   743
			suspect=((aFlags & KCheckLocalAddress) && !MM::CurrentAddress(pT,pS,aLength,EFalse));
sl@0
   744
			if (iMState==EDead)
sl@0
   745
				return KErrDied;
sl@0
   746
			pC=(TUint8*)MM::CurrentAddress(this,pD,aLength,ETrue);
sl@0
   747
			__KTRACE_OPT(KTHREAD2,Kern::Printf("DThread::RawRead [%08x::%08x]%08x<-%08x+%x",this,pD,pC,pS,aLength));
sl@0
   748
			if (!pC)
sl@0
   749
				{
sl@0
   750
				if (special)
sl@0
   751
					pC=pD;
sl@0
   752
				else
sl@0
   753
					return KErrBadDescriptor;
sl@0
   754
				}
sl@0
   755
			}
sl@0
   756
		TInt len=Min(aLength,K::MaxMemCopyInOneGo);
sl@0
   757
		if (aExcTrap)
sl@0
   758
			{
sl@0
   759
			aExcTrap->iSize = (len + 2*(sizeof(TInt32)-1));//+6 is for the worst case. We do not have to be precise here.
sl@0
   760
			aExcTrap->iRemoteBase = (TLinAddr)pC & ~(sizeof(TInt32)-1);	
sl@0
   761
			if (aExcTrap->iLocalBase)
sl@0
   762
				aExcTrap->iLocalBase = (TLinAddr)pS & ~(sizeof(TInt32)-1);
sl@0
   763
			__KTRACE_OPT(KTHREAD2,Kern::Printf("DThread::RawWrite exc. update %08x %08x %08x",aExcTrap->iLocalBase,aExcTrap->iRemoteBase,aExcTrap->iSize));
sl@0
   764
			}
sl@0
   765
sl@0
   766
#ifdef __DEMAND_PAGING__
sl@0
   767
		XTRAP_PAGING_START(check);
sl@0
   768
		// Must check that it is safe to page, unless we are reading from unpaged ROM in which case
sl@0
   769
		// we allow it.  umemget does this anyway, so we just need to check if suspect is not set.
sl@0
   770
		if (!suspect)
sl@0
   771
			{
sl@0
   772
			CHECK_PAGING_SAFE_RANGE((TLinAddr)aSrc, aLength);
sl@0
   773
			CHECK_DATA_PAGING_SAFE_RANGE((TLinAddr)aDest, aLength);
sl@0
   774
			}
sl@0
   775
#endif
sl@0
   776
sl@0
   777
		suspect?(void)umemget(pC,pS,len):(void)memcpy(pC,pS,len);
sl@0
   778
sl@0
   779
#ifdef __DEMAND_PAGING__
sl@0
   780
		XTRAP_PAGING_END
sl@0
   781
		if(check<0)
sl@0
   782
			return check; // paging error caused by bad client (I.e. 'this' thread was bad)
sl@0
   783
		if(check)
sl@0
   784
			{
sl@0
   785
			__KTRACE_OPT(KTHREAD2,Kern::Printf("DThread::RawWrite paging trap, suspect %d, dest %08x, src %08x, length %d\n", suspect, pC, pD, len));
sl@0
   786
			continue;
sl@0
   787
			}
sl@0
   788
#endif
sl@0
   789
sl@0
   790
		pD+=len;
sl@0
   791
		pS+=len;
sl@0
   792
		pC+=len;
sl@0
   793
		aLength-=len;
sl@0
   794
		if (aLength)
sl@0
   795
			check=NKern::FlashSystem();
sl@0
   796
		}
sl@0
   797
	return KErrNone;
sl@0
   798
	}
sl@0
   799
sl@0
   800
#ifdef __DEBUGGER_SUPPORT__
sl@0
   801
sl@0
   802
TInt CodeModifier::SafeWriteCode(DProcess* aProcess, TLinAddr aAddress, TInt aSize, TUint aValue, void* aOldValue)
sl@0
   803
	{
sl@0
   804
	//Set exception handler. Make sure the boundaries cover the worst case (aSize = 4)
sl@0
   805
	TIpcExcTrap xt;
sl@0
   806
	xt.iLocalBase=0;
sl@0
   807
	xt.iRemoteBase=(TLinAddr)aAddress&~3; //word aligned.
sl@0
   808
	xt.iSize=sizeof(TInt);
sl@0
   809
	xt.iDir=1;
sl@0
   810
	NKern::LockSystem();
sl@0
   811
	TInt r=xt.Trap(NULL);
sl@0
   812
	if (r==0)
sl@0
   813
		{
sl@0
   814
		r = WriteCode(aAddress, aSize, aValue, aOldValue);
sl@0
   815
		xt.UnTrap();
sl@0
   816
		}
sl@0
   817
	NKern::UnlockSystem();
sl@0
   818
	return r;	
sl@0
   819
	}
sl@0
   820
sl@0
   821
TInt CodeModifier::WriteCode(TLinAddr aAddress, TInt aSize, TUint aValue, void* aOldValue)
sl@0
   822
	{
sl@0
   823
	TUint userChunkBase = (TUint)MM::UserCodeChunk->Base();
sl@0
   824
	TRomHeader romHeader = Epoc::RomHeader();
sl@0
   825
sl@0
   826
	if (!((aAddress >= romHeader.iRomBase ) && (aAddress < (romHeader.iRomBase + romHeader.iUncompressedSize))))  //if not in ROM
sl@0
   827
		if ( (aAddress<userChunkBase) || (aAddress) > (userChunkBase+MM::UserCodeChunk->MaxSize()) ) //and not in non-XIP code
sl@0
   828
			return KErrBadDescriptor;
sl@0
   829
sl@0
   830
	// if page was moved by defrag there may be a cache line with the
sl@0
   831
	// wrong, old physical address, so we must invalidate this first.
sl@0
   832
	InternalCache::Invalidate(KCacheSelectD, (TLinAddr)aAddress, 4);
sl@0
   833
sl@0
   834
	//Copy data and clean/invalidate caches with interrupts disabled.
sl@0
   835
	TInt irq=NKern::DisableAllInterrupts();
sl@0
   836
	switch(aSize)
sl@0
   837
		{
sl@0
   838
		case 1:
sl@0
   839
			*(TUint8*) aOldValue = *(TUint8*)aAddress;
sl@0
   840
			*(TUint8*) aAddress  = (TUint8)aValue;
sl@0
   841
			 break;
sl@0
   842
		case 2:
sl@0
   843
			*(TUint16*) aOldValue = *(TUint16*)aAddress;
sl@0
   844
			*(TUint16*) aAddress  = (TUint16)aValue;
sl@0
   845
			 break;
sl@0
   846
		default://It is 4 otherwise
sl@0
   847
			*(TUint32*) aOldValue = *(TUint32*)aAddress;
sl@0
   848
			*(TUint32*) aAddress  = (TUint32)aValue;
sl@0
   849
			 break;
sl@0
   850
		};
sl@0
   851
	CacheMaintenance::CodeChanged(aAddress, aSize, CacheMaintenance::ECodeModifier);
sl@0
   852
	NKern::RestoreInterrupts(irq);
sl@0
   853
sl@0
   854
	return KErrNone;
sl@0
   855
	}
sl@0
   856
#endif //__DEBUGGER_SUPPORT__
sl@0
   857
sl@0
   858
TInt DThread::ReadAndParseDesHeader(const TAny* aSrc, TDesHeader& aDest)
sl@0
   859
//
sl@0
   860
// Read the header of a remote descriptor.
sl@0
   861
// Enter and return with system locked
sl@0
   862
//
sl@0
   863
	{
sl@0
   864
	TInt r=KErrBadDescriptor;
sl@0
   865
	DThread* thread = TheCurrentThread;
sl@0
   866
	TRawDesHeader& header = (TRawDesHeader&)aDest;
sl@0
   867
sl@0
   868
#ifdef __DEMAND_PAGING__
sl@0
   869
retry:
sl@0
   870
	TInt pagingFault;
sl@0
   871
	XTRAP_PAGING_START(pagingFault);
sl@0
   872
	CHECK_PAGING_SAFE;
sl@0
   873
	thread->iIpcClient = this;
sl@0
   874
#endif
sl@0
   875
sl@0
   876
	const TUint32* pS=(const TUint32*)MM::CurrentAddress(this,aSrc,sizeof(TDesC8),EFalse);
sl@0
   877
	if (pS && KErrNone==Kern::SafeRead(pS,&header[0],sizeof(TUint32)))
sl@0
   878
		{
sl@0
   879
		TInt type=header[0]>>KShiftDesType8;
sl@0
   880
		static const TUint8 LengthLookup[16]={4,8,12,8,12,0,0,0,0,0,0,0,0,0,0,0};
sl@0
   881
		TInt len=LengthLookup[type];
sl@0
   882
		if(len>(TInt)sizeof(TUint32))
sl@0
   883
			{
sl@0
   884
			if(KErrNone==Kern::SafeRead(pS+1,&header[1],len-sizeof(TUint32)))
sl@0
   885
				r = type;
sl@0
   886
			// else, bad descriptor
sl@0
   887
			}
sl@0
   888
		else if(len)
sl@0
   889
			r = type;
sl@0
   890
		// else, bad descriptor
sl@0
   891
		}
sl@0
   892
sl@0
   893
#ifdef __DEMAND_PAGING__
sl@0
   894
	thread->iIpcClient = NULL;
sl@0
   895
	XTRAP_PAGING_END;
sl@0
   896
	if(pagingFault<0)
sl@0
   897
		return pagingFault; // paging error caused by bad client (I.e. 'this' thread was bad)
sl@0
   898
	if(pagingFault)
sl@0
   899
		goto retry;
sl@0
   900
#endif
sl@0
   901
	
sl@0
   902
	return K::ParseDesHeader(aSrc, header, aDest);
sl@0
   903
	}
sl@0
   904
sl@0
   905
DMemModelChunk* ChunkFromAddress(DThread* aThread, const TAny* aAddress)
sl@0
   906
	{
sl@0
   907
	DMemModelProcess* pP = (DMemModelProcess*)aThread->iOwningProcess;
sl@0
   908
	DMemModelProcess::SChunkInfo* pS=pP->iChunks;
sl@0
   909
	DMemModelProcess::SChunkInfo* pC=pS+pP->iNumChunks;
sl@0
   910
	while(--pC>=pS && TUint(pC->iDataSectionBase)>TUint(aAddress)) {};
sl@0
   911
	if(pC<pS)
sl@0
   912
		return 0;
sl@0
   913
	return pC->iChunk;
sl@0
   914
	}
sl@0
   915
sl@0
   916
/**
sl@0
   917
	Open a shared chunk in which a remote address range is located.
sl@0
   918
*/
sl@0
   919
DChunk* DThread::OpenSharedChunk(const TAny* aAddress, TBool aWrite, TInt& aOffset)
sl@0
   920
	{
sl@0
   921
	NKern::LockSystem();
sl@0
   922
	
sl@0
   923
	DMemModelProcess* pP = (DMemModelProcess*)iOwningProcess;
sl@0
   924
	DMemModelProcess::SChunkInfo* pS=pP->iChunks;
sl@0
   925
	DMemModelProcess::SChunkInfo* pC=pS+pP->iNumChunks;
sl@0
   926
	while(--pC>=pS && TUint(pC->iDataSectionBase)>TUint(aAddress)) {};
sl@0
   927
	if(pC>=pS)
sl@0
   928
		{
sl@0
   929
		DMemModelChunk* chunk = pC->iChunk;
sl@0
   930
		if(chunk->iChunkType==ESharedKernelSingle || chunk->iChunkType==ESharedKernelMultiple)
sl@0
   931
			{
sl@0
   932
			TInt offset = (TInt)aAddress-(TInt)chunk->Base();
sl@0
   933
			if(TUint(offset)<TUint(chunk->iMaxSize) && chunk->Open()==KErrNone)
sl@0
   934
				{
sl@0
   935
				aOffset = offset;
sl@0
   936
				NKern::UnlockSystem();
sl@0
   937
				return chunk;
sl@0
   938
				}
sl@0
   939
			}
sl@0
   940
		}
sl@0
   941
	NKern::UnlockSystem();
sl@0
   942
	return 0;
sl@0
   943
	}
sl@0
   944
	
sl@0
   945
TInt DThread::PrepareMemoryForDMA(const TAny* aLinAddr, TInt aSize, TPhysAddr* aPhysicalPageList)
sl@0
   946
	{
sl@0
   947
	if ((iOwningProcess->iAttributes & DMemModelProcess::EFixedAddress )==0)
sl@0
   948
		return KErrNotSupported;
sl@0
   949
	Mmu& m=(Mmu&)*MmuBase::TheMmu;
sl@0
   950
	return m.PreparePagesForDMA((TLinAddr)aLinAddr, aSize, aPhysicalPageList);
sl@0
   951
	}
sl@0
   952
sl@0
   953
TInt DThread::ReleaseMemoryFromDMA(const TAny* aLinAddr, TInt aSize, TPhysAddr* aPhysicalPageList)
sl@0
   954
	{
sl@0
   955
	if ((iOwningProcess->iAttributes & DMemModelProcess::EFixedAddress )==0)
sl@0
   956
		return KErrNotSupported;
sl@0
   957
	TInt pageCount = (((TInt)aLinAddr & KPageMask) + aSize + KPageMask) >> KPageShift;
sl@0
   958
	Mmu& m=(Mmu&)*MmuBase::TheMmu;
sl@0
   959
	return m.ReleasePagesFromDMA(aPhysicalPageList, pageCount);
sl@0
   960
	}