os/kernelhwsrv/kernel/eka/memmodel/epoc/flexible/mchunk.cpp
author sl
Tue, 10 Jun 2014 14:32:02 +0200
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
sl@0
     1
// Copyright (c) 1994-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
//
sl@0
    15
sl@0
    16
#include <memmodel.h>
sl@0
    17
#include "mmu/mm.h"
sl@0
    18
#include "mmboot.h"
sl@0
    19
sl@0
    20
sl@0
    21
DMemModelChunk::DMemModelChunk()
sl@0
    22
	{
sl@0
    23
	}
sl@0
    24
sl@0
    25
sl@0
    26
DMemModelChunk::~DMemModelChunk()
sl@0
    27
	{
sl@0
    28
	__KTRACE_OPT(KTHREAD,Kern::Printf("DMemModelChunk destruct %O",this));
sl@0
    29
sl@0
    30
	MM::MappingDestroy(iKernelMapping);
sl@0
    31
	MM::MemoryDestroy(iMemoryObject);
sl@0
    32
sl@0
    33
	delete iPageBitMap;
sl@0
    34
	delete iPermanentPageBitMap;
sl@0
    35
sl@0
    36
	TDfc* dfc = iDestroyedDfc;
sl@0
    37
	if(dfc)
sl@0
    38
		dfc->QueueOnIdle();
sl@0
    39
sl@0
    40
	__KTRACE_OPT(KMEMTRACE, Kern::Printf("MT:D %d %x %O",NTickCount(),this,this));
sl@0
    41
#ifdef BTRACE_CHUNKS
sl@0
    42
	BTraceContext4(BTrace::EChunks,BTrace::EChunkDestroyed,this);
sl@0
    43
#endif
sl@0
    44
	}
sl@0
    45
sl@0
    46
sl@0
    47
TInt DMemModelChunk::Close(TAny* aPtr)
sl@0
    48
	{
sl@0
    49
	if (aPtr)
sl@0
    50
		{
sl@0
    51
		DMemModelProcess* pP=(DMemModelProcess*)aPtr;
sl@0
    52
		__NK_ASSERT_DEBUG(!iOwningProcess || iOwningProcess==pP);
sl@0
    53
		pP->RemoveChunk(this);
sl@0
    54
		}	
sl@0
    55
	TInt r=Dec();
sl@0
    56
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Close %d %O",r,this));
sl@0
    57
	__NK_ASSERT_DEBUG(r > 0); // Should never be negative.
sl@0
    58
	if (r==1)
sl@0
    59
		{
sl@0
    60
		K::ObjDelete(this);
sl@0
    61
		return EObjectDeleted;
sl@0
    62
		}
sl@0
    63
	return 0;
sl@0
    64
	}
sl@0
    65
sl@0
    66
sl@0
    67
void DMemModelChunk::SetPaging(TUint aCreateAtt)
sl@0
    68
	{
sl@0
    69
	// Only user data chunks should be able to be data paged, i.e. only those 
sl@0
    70
	// that can be created via the RChunk create methods.
sl@0
    71
	if ((iChunkType != EUserData && iChunkType != EUserSelfModCode) ||
sl@0
    72
		!(K::MemModelAttributes & EMemModelAttrDataPaging))	// Data paging device installed?
sl@0
    73
		{
sl@0
    74
		return;
sl@0
    75
		}
sl@0
    76
	// Pageable chunks must own their memory.
sl@0
    77
	__NK_ASSERT_DEBUG(!(iAttributes & EMemoryNotOwned));
sl@0
    78
sl@0
    79
	// Set the data paging attributes
sl@0
    80
	TUint dataPolicy = TheSuperPage().KernelConfigFlags() & EKernelConfigDataPagingPolicyMask;
sl@0
    81
	if (dataPolicy == EKernelConfigDataPagingPolicyNoPaging)
sl@0
    82
		{
sl@0
    83
		return;
sl@0
    84
		}
sl@0
    85
	if (dataPolicy == EKernelConfigDataPagingPolicyAlwaysPage)
sl@0
    86
		{
sl@0
    87
		iAttributes |= EDataPaged;
sl@0
    88
		return;
sl@0
    89
		}
sl@0
    90
	TUint pagingAtt = aCreateAtt & TChunkCreate::EPagingMask;
sl@0
    91
	if (pagingAtt == TChunkCreate::EPaged)
sl@0
    92
		{
sl@0
    93
		iAttributes |= EDataPaged;
sl@0
    94
		return;
sl@0
    95
		}
sl@0
    96
	if (pagingAtt == TChunkCreate::EUnpaged)
sl@0
    97
		{
sl@0
    98
		return;
sl@0
    99
		}
sl@0
   100
	// No data paging attribute specified for this chunk so use the process's
sl@0
   101
	__NK_ASSERT_DEBUG(pagingAtt == TChunkCreate::EPagingUnspec);
sl@0
   102
	DProcess* currentProcess = TheCurrentThread->iOwningProcess;
sl@0
   103
	if (currentProcess->iAttributes & DProcess::EDataPaged)
sl@0
   104
		{
sl@0
   105
		iAttributes |= EDataPaged;
sl@0
   106
		}
sl@0
   107
	}
sl@0
   108
sl@0
   109
sl@0
   110
TInt DMemModelChunk::DoCreate(SChunkCreateInfo& aInfo)
sl@0
   111
	{
sl@0
   112
	__ASSERT_COMPILE(!(EMMChunkAttributesMask & EChunkAttributesMask));
sl@0
   113
	__KTRACE_OPT(KMMU,Kern::Printf("Chunk %O DoCreate att=%08x",this,iAttributes));
sl@0
   114
	if (aInfo.iMaxSize<=0)
sl@0
   115
		return KErrArgument;
sl@0
   116
sl@0
   117
	iMaxSize = MM::RoundToPageSize(aInfo.iMaxSize);
sl@0
   118
sl@0
   119
	TInt maxpages=iMaxSize>>KPageShift;
sl@0
   120
	if (iAttributes & EDisconnected)
sl@0
   121
		{
sl@0
   122
		TBitMapAllocator* pM=TBitMapAllocator::New(maxpages,ETrue);
sl@0
   123
		if (!pM)
sl@0
   124
			return KErrNoMemory;
sl@0
   125
		iPageBitMap=pM;
sl@0
   126
		__KTRACE_OPT(KMMU,Kern::Printf("PageBitMap at %08x, MaxPages %d",pM,maxpages));
sl@0
   127
		}
sl@0
   128
	if(iChunkType==ESharedKernelSingle || iChunkType==ESharedKernelMultiple)
sl@0
   129
		{
sl@0
   130
		TBitMapAllocator* pM=TBitMapAllocator::New(maxpages,ETrue);
sl@0
   131
		if (!pM)
sl@0
   132
			return KErrNoMemory;
sl@0
   133
		iPermanentPageBitMap = pM;
sl@0
   134
		}
sl@0
   135
sl@0
   136
	TMemoryAttributes attr = EMemoryAttributeStandard;
sl@0
   137
	TBool mapInKernel = false;
sl@0
   138
	TBool nowipe = false;
sl@0
   139
	TBool executable = false;
sl@0
   140
	TBool movable = false;
sl@0
   141
	TInt r;
sl@0
   142
sl@0
   143
	switch(iChunkType)
sl@0
   144
		{
sl@0
   145
	case EUserSelfModCode:
sl@0
   146
		executable = true;
sl@0
   147
		movable = true;
sl@0
   148
		break;
sl@0
   149
sl@0
   150
	case EUserData:
sl@0
   151
	case ERamDrive:
sl@0
   152
		movable = true;
sl@0
   153
		break;
sl@0
   154
sl@0
   155
	case EKernelMessage:
sl@0
   156
	case ESharedKernelSingle:
sl@0
   157
	case ESharedKernelMultiple:
sl@0
   158
	case ESharedIo:
sl@0
   159
		mapInKernel = true;
sl@0
   160
		r = MM::MemoryAttributes(attr,*(TMappingAttributes2*)&aInfo.iMapAttr);
sl@0
   161
		if(r!=KErrNone)
sl@0
   162
			return r;
sl@0
   163
		break;
sl@0
   164
sl@0
   165
	case EKernelData:
sl@0
   166
		nowipe = true;
sl@0
   167
		break;
sl@0
   168
sl@0
   169
	case EDllData:
sl@0
   170
		__NK_ASSERT_DEBUG(0); // invalid chunk type
sl@0
   171
	case EKernelStack:
sl@0
   172
		__NK_ASSERT_DEBUG(0); // invalid chunk type
sl@0
   173
	case EDll: // global code
sl@0
   174
		__NK_ASSERT_DEBUG(0); // invalid chunk type
sl@0
   175
	case EKernelCode:
sl@0
   176
		__NK_ASSERT_DEBUG(0); // invalid chunk type
sl@0
   177
	case EUserCode: // local code
sl@0
   178
		__NK_ASSERT_DEBUG(0); // invalid chunk type
sl@0
   179
	case ESharedKernelMirror:
sl@0
   180
		__NK_ASSERT_DEBUG(0); // invalid chunk type
sl@0
   181
	default:
sl@0
   182
		__NK_ASSERT_DEBUG(0); // invalid chunk type
sl@0
   183
		return KErrArgument;
sl@0
   184
		}
sl@0
   185
sl@0
   186
	// calculate memory type...
sl@0
   187
	TMemoryObjectType memoryType = EMemoryObjectUnpaged;
sl@0
   188
	if (iAttributes & EMemoryNotOwned)
sl@0
   189
		{
sl@0
   190
		if (memoryType != EMemoryObjectUnpaged)
sl@0
   191
			return KErrArgument;
sl@0
   192
		memoryType = EMemoryObjectHardware;
sl@0
   193
		}
sl@0
   194
	if (iAttributes & EDataPaged)
sl@0
   195
		{
sl@0
   196
		if (memoryType != EMemoryObjectUnpaged)
sl@0
   197
			return KErrArgument;
sl@0
   198
		memoryType = EMemoryObjectPaged;
sl@0
   199
		}
sl@0
   200
	if (iAttributes & ECache)
sl@0
   201
		{
sl@0
   202
		if (memoryType != EMemoryObjectUnpaged)
sl@0
   203
			return KErrArgument;
sl@0
   204
		memoryType = EMemoryObjectDiscardable;
sl@0
   205
		}
sl@0
   206
	if (memoryType == EMemoryObjectUnpaged)
sl@0
   207
		{
sl@0
   208
		if (movable)
sl@0
   209
			memoryType = EMemoryObjectMovable;
sl@0
   210
		}
sl@0
   211
sl@0
   212
	// calculate memory flags...
sl@0
   213
	TMemoryCreateFlags flags = nowipe ? EMemoryCreateNoWipe : EMemoryCreateDefault;
sl@0
   214
	flags = (TMemoryCreateFlags)(flags|EMemoryCreateUseCustomWipeByte|(iClearByte<<EMemoryCreateWipeByteShift));
sl@0
   215
	if(executable)
sl@0
   216
		flags = (TMemoryCreateFlags)(flags|EMemoryCreateAllowExecution);
sl@0
   217
sl@0
   218
	r = MM::MemoryNew(iMemoryObject,memoryType,MM::BytesToPages(iMaxSize),flags,attr);
sl@0
   219
	if(r!=KErrNone)
sl@0
   220
		return r;
sl@0
   221
sl@0
   222
	if(mapInKernel)
sl@0
   223
		{
sl@0
   224
		TInt r = MM::MappingNew(iKernelMapping, iMemoryObject, ESupervisorReadWrite, KKernelOsAsid);
sl@0
   225
		if(r!=KErrNone)
sl@0
   226
			return r; // Note, iMemoryObject will get cleaned-up when chunk is destroyed
sl@0
   227
		const TMappingAttributes2& lma = MM::LegacyMappingAttributes(attr,EUserReadWrite);
sl@0
   228
		*(TMappingAttributes2*)&iMapAttr = lma;
sl@0
   229
		}
sl@0
   230
sl@0
   231
#ifdef BTRACE_CHUNKS
sl@0
   232
	TKName nameBuf;
sl@0
   233
	Name(nameBuf);
sl@0
   234
	BTraceContextN(BTrace::EChunks,BTrace::EChunkCreated,this,iMaxSize,nameBuf.Ptr(),nameBuf.Size());
sl@0
   235
	if(iOwningProcess)
sl@0
   236
		BTrace8(BTrace::EChunks,BTrace::EChunkOwner,this,iOwningProcess);
sl@0
   237
	BTraceContext12(BTrace::EChunks,BTrace::EChunkInfo,this,iChunkType,iAttributes);
sl@0
   238
#endif
sl@0
   239
#ifdef BTRACE_FLEXIBLE_MEM_MODEL
sl@0
   240
	BTrace8(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsChunk,iMemoryObject,this);
sl@0
   241
#endif
sl@0
   242
	return KErrNone;
sl@0
   243
	}
sl@0
   244
sl@0
   245
sl@0
   246
void DMemModelChunk::SetFixedAddress(TLinAddr aAddr, TInt aInitialSize)
sl@0
   247
	{
sl@0
   248
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O SetFixedAddress %08x size %08x",this,aAddr,aInitialSize));
sl@0
   249
	iFixedBase = aAddr;
sl@0
   250
	iSize = MM::RoundToPageSize(aInitialSize);
sl@0
   251
	if(iSize)
sl@0
   252
		MM::MemoryClaimInitialPages(iMemoryObject,iFixedBase,iSize,ESupervisorReadWrite);
sl@0
   253
	}
sl@0
   254
sl@0
   255
sl@0
   256
TInt DMemModelChunk::SetAttributes(SChunkCreateInfo& aInfo)
sl@0
   257
	{
sl@0
   258
	switch(iChunkType)
sl@0
   259
		{
sl@0
   260
		case EKernelData:
sl@0
   261
		case EKernelMessage:
sl@0
   262
			iAttributes = EPrivate;
sl@0
   263
			break;
sl@0
   264
		case ERamDrive:
sl@0
   265
			iAttributes = EPrivate;
sl@0
   266
			break;
sl@0
   267
		case EUserData:
sl@0
   268
			if (aInfo.iGlobal)
sl@0
   269
				iAttributes = EPublic;
sl@0
   270
			else
sl@0
   271
				iAttributes = EPrivate;
sl@0
   272
			break;
sl@0
   273
		case EUserSelfModCode:
sl@0
   274
			if (aInfo.iGlobal)
sl@0
   275
				iAttributes = EPublic|ECode;
sl@0
   276
			else
sl@0
   277
				iAttributes = EPrivate|ECode;
sl@0
   278
			break;
sl@0
   279
		case ESharedKernelSingle:
sl@0
   280
		case ESharedKernelMultiple:
sl@0
   281
		case ESharedIo:
sl@0
   282
			iAttributes = EPublic;
sl@0
   283
			break;
sl@0
   284
		case EDllData:
sl@0
   285
			__NK_ASSERT_DEBUG(0); // invalid chunk type
sl@0
   286
		case EKernelStack:
sl@0
   287
			__NK_ASSERT_DEBUG(0); // invalid chunk type
sl@0
   288
		case EDll: // global code
sl@0
   289
			__NK_ASSERT_DEBUG(0); // invalid chunk type
sl@0
   290
		case EKernelCode:
sl@0
   291
			__NK_ASSERT_DEBUG(0); // invalid chunk type
sl@0
   292
		case EUserCode: // local code
sl@0
   293
			__NK_ASSERT_DEBUG(0); // invalid chunk type
sl@0
   294
		case ESharedKernelMirror:
sl@0
   295
			__NK_ASSERT_DEBUG(0); // invalid chunk type
sl@0
   296
		default:
sl@0
   297
			FAULT();
sl@0
   298
		}
sl@0
   299
	return KErrNone;
sl@0
   300
	}
sl@0
   301
sl@0
   302
sl@0
   303
TInt DMemModelChunk::Adjust(TInt aNewSize)
sl@0
   304
	{
sl@0
   305
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Adjust %08x",aNewSize));
sl@0
   306
	if (iAttributes & (EDoubleEnded|EDisconnected))
sl@0
   307
		return KErrGeneral;
sl@0
   308
	if (aNewSize<0 || aNewSize>iMaxSize)
sl@0
   309
		return KErrArgument;
sl@0
   310
sl@0
   311
	TInt r=KErrNone;
sl@0
   312
	TInt newSize=MM::RoundToPageSize(aNewSize);
sl@0
   313
	if (newSize!=iSize)
sl@0
   314
		{
sl@0
   315
		MM::MemoryLock(iMemoryObject);
sl@0
   316
		if (newSize>iSize)
sl@0
   317
			{
sl@0
   318
			__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Adjust growing"));
sl@0
   319
			r=DoCommit(iSize,newSize-iSize);
sl@0
   320
			}
sl@0
   321
		else if (newSize<iSize)
sl@0
   322
			{
sl@0
   323
			__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Adjust shrinking"));
sl@0
   324
			DoDecommit(newSize,iSize-newSize);
sl@0
   325
			}
sl@0
   326
		MM::MemoryUnlock(iMemoryObject);
sl@0
   327
		}
sl@0
   328
	__COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this);
sl@0
   329
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O adjusted to %x",this,iSize));
sl@0
   330
	return r;
sl@0
   331
	}
sl@0
   332
sl@0
   333
sl@0
   334
TInt DMemModelChunk::Address(TInt aOffset, TInt aSize, TLinAddr& aKernelAddress)
sl@0
   335
	{
sl@0
   336
	if(!iPermanentPageBitMap)
sl@0
   337
		return KErrAccessDenied;
sl@0
   338
	if(TUint(aOffset)>=TUint(iMaxSize))
sl@0
   339
		return KErrArgument;
sl@0
   340
	if(TUint(aOffset+aSize)>TUint(iMaxSize))
sl@0
   341
		return KErrArgument;
sl@0
   342
	if(aSize<=0)
sl@0
   343
		return KErrArgument;
sl@0
   344
	TInt start = aOffset>>KPageShift;
sl@0
   345
	TInt size = ((aOffset+aSize-1)>>KPageShift)-start+1;
sl@0
   346
	if(iPermanentPageBitMap->NotAllocated(start,size))
sl@0
   347
		return KErrNotFound;
sl@0
   348
	aKernelAddress = MM::MappingBase(iKernelMapping)+aOffset;
sl@0
   349
	return KErrNone;
sl@0
   350
	}
sl@0
   351
sl@0
   352
sl@0
   353
TInt DMemModelChunk::PhysicalAddress(TInt aOffset, TInt aSize, TLinAddr& aKernelAddress, TUint32& aPhysicalAddress, TUint32* aPhysicalPageList)
sl@0
   354
	{
sl@0
   355
	if(aSize<=0)
sl@0
   356
		return KErrArgument;
sl@0
   357
	TInt r = Address(aOffset,aSize,aKernelAddress);
sl@0
   358
	if(r!=KErrNone)
sl@0
   359
		return r;
sl@0
   360
	TInt index = aOffset>>KPageShift;
sl@0
   361
	TInt count = ((aOffset+aSize-1)>>KPageShift)-index+1;
sl@0
   362
	r = MM::MemoryPhysAddr(iMemoryObject,index,count,aPhysicalAddress,aPhysicalPageList);
sl@0
   363
	if(r==KErrNone)
sl@0
   364
		aPhysicalAddress += aOffset&KPageMask;
sl@0
   365
	return r;
sl@0
   366
	}
sl@0
   367
sl@0
   368
sl@0
   369
TInt DMemModelChunk::DoCommit(TInt aOffset, TInt aSize, TCommitType aCommitType, TUint32* aExtraArg)
sl@0
   370
	{
sl@0
   371
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::DoCommit %x+%x type=%d extra=%08x",aOffset,aSize,aCommitType,aExtraArg));
sl@0
   372
sl@0
   373
	__NK_ASSERT_DEBUG(((aOffset|aSize)&KPageMask)==0);
sl@0
   374
sl@0
   375
	TInt r = KErrArgument;
sl@0
   376
	switch(aCommitType)
sl@0
   377
		{
sl@0
   378
	case DChunk::ECommitDiscontiguous:
sl@0
   379
		r = MM::MemoryAlloc(iMemoryObject, MM::BytesToPages(aOffset), MM::BytesToPages(aSize));
sl@0
   380
		break;
sl@0
   381
sl@0
   382
	case DChunk::ECommitDiscontiguousPhysical:
sl@0
   383
		r = MM::MemoryAddPages(iMemoryObject, MM::BytesToPages(aOffset), MM::BytesToPages(aSize), (TPhysAddr*)aExtraArg);
sl@0
   384
		break;
sl@0
   385
sl@0
   386
	case DChunk::ECommitContiguous:
sl@0
   387
		r = MM::MemoryAllocContiguous(iMemoryObject, MM::BytesToPages(aOffset), MM::BytesToPages(aSize), 0, *(TPhysAddr*)aExtraArg);
sl@0
   388
		break;
sl@0
   389
sl@0
   390
	case DChunk::ECommitContiguousPhysical:
sl@0
   391
		r = MM::MemoryAddContiguous(iMemoryObject, MM::BytesToPages(aOffset), MM::BytesToPages(aSize), (TPhysAddr)aExtraArg);
sl@0
   392
		break;
sl@0
   393
sl@0
   394
	case DChunk::ECommitVirtual:
sl@0
   395
	default:
sl@0
   396
		__NK_ASSERT_DEBUG(0); // Invalid commit type
sl@0
   397
		r = KErrNotSupported;
sl@0
   398
		break;
sl@0
   399
		}
sl@0
   400
sl@0
   401
	if(r==KErrNone)
sl@0
   402
		{
sl@0
   403
		iSize += aSize;
sl@0
   404
		if(iPermanentPageBitMap)
sl@0
   405
			iPermanentPageBitMap->Alloc(aOffset>>KPageShift,aSize>>KPageShift);
sl@0
   406
#ifdef BTRACE_CHUNKS
sl@0
   407
		TInt subcategory = (aCommitType & DChunk::ECommitPhysicalMask) ? BTrace::EChunkMemoryAdded : BTrace::EChunkMemoryAllocated;
sl@0
   408
		BTraceContext12(BTrace::EChunks,subcategory,this,aOffset,aSize);
sl@0
   409
#endif
sl@0
   410
		}
sl@0
   411
sl@0
   412
	return r;
sl@0
   413
	}
sl@0
   414
sl@0
   415
sl@0
   416
void DMemModelChunk::DoDecommit(TInt aOffset, TInt aSize)
sl@0
   417
	{
sl@0
   418
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::DoDecommit %x+%x",aOffset,aSize));
sl@0
   419
sl@0
   420
	__NK_ASSERT_DEBUG(((aOffset|aSize)&KPageMask)==0);
sl@0
   421
sl@0
   422
	TUint index = MM::BytesToPages(aOffset);
sl@0
   423
	TUint count = MM::BytesToPages(aSize);
sl@0
   424
	iSize -= count*KPageSize;
sl@0
   425
	if(iAttributes&EMemoryNotOwned)
sl@0
   426
		MM::MemoryRemovePages(iMemoryObject, index, count, 0);
sl@0
   427
	else
sl@0
   428
		MM::MemoryFree(iMemoryObject, index, count);
sl@0
   429
sl@0
   430
#ifdef BTRACE_CHUNKS
sl@0
   431
	if (count != 0)
sl@0
   432
		{
sl@0
   433
		TInt subcategory = (iAttributes & EMemoryNotOwned) ? BTrace::EChunkMemoryRemoved : BTrace::EChunkMemoryDeallocated;
sl@0
   434
		BTraceContext12(BTrace::EChunks,subcategory,this,aOffset,count*KPageSize);
sl@0
   435
		}
sl@0
   436
#endif
sl@0
   437
	}
sl@0
   438
sl@0
   439
sl@0
   440
TInt DMemModelChunk::AdjustDoubleEnded(TInt aBottom, TInt aTop)
sl@0
   441
	{
sl@0
   442
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::AdjustDoubleEnded %x-%x",aBottom,aTop));
sl@0
   443
	if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDoubleEnded)
sl@0
   444
		return KErrGeneral;
sl@0
   445
	if (aTop<0 || aBottom<0 || aTop<aBottom || aTop>iMaxSize)
sl@0
   446
		return KErrArgument;
sl@0
   447
sl@0
   448
	aBottom &= ~KPageMask;
sl@0
   449
	aTop = MM::RoundToPageSize(aTop);
sl@0
   450
	TInt newSize=aTop-aBottom;
sl@0
   451
	if (newSize>iMaxSize)
sl@0
   452
		return KErrArgument;
sl@0
   453
sl@0
   454
	MM::MemoryLock(iMemoryObject);
sl@0
   455
	TInt initBottom=iStartPos;
sl@0
   456
	TInt initTop=iStartPos+iSize;
sl@0
   457
	TInt nBottom=Max(aBottom,iStartPos);	// intersection bottom
sl@0
   458
	TInt nTop=Min(aTop,iStartPos+iSize);	// intersection top
sl@0
   459
	TInt r=KErrNone;
sl@0
   460
	if (nBottom<nTop)
sl@0
   461
		{
sl@0
   462
		__KTRACE_OPT(KMMU,Kern::Printf("Initial and final regions intersect"));
sl@0
   463
		if (initBottom<nBottom)
sl@0
   464
			{
sl@0
   465
			iStartPos=aBottom;
sl@0
   466
			DoDecommit(initBottom,nBottom-initBottom);
sl@0
   467
			}
sl@0
   468
		if (initTop>nTop)
sl@0
   469
			DoDecommit(nTop,initTop-nTop);	// this changes iSize
sl@0
   470
		if (aBottom<nBottom)
sl@0
   471
			{
sl@0
   472
			r=DoCommit(aBottom,nBottom-aBottom);
sl@0
   473
			if (r==KErrNone)
sl@0
   474
				{
sl@0
   475
				if (aTop>nTop)
sl@0
   476
					r=DoCommit(nTop,aTop-nTop);
sl@0
   477
				if (r==KErrNone)
sl@0
   478
					iStartPos=aBottom;
sl@0
   479
				else
sl@0
   480
					DoDecommit(aBottom,nBottom-aBottom);
sl@0
   481
				}
sl@0
   482
			}
sl@0
   483
		else if (aTop>nTop)
sl@0
   484
			r=DoCommit(nTop,aTop-nTop);
sl@0
   485
		}
sl@0
   486
	else
sl@0
   487
		{
sl@0
   488
		__KTRACE_OPT(KMMU,Kern::Printf("Initial and final regions disjoint"));
sl@0
   489
		if (iSize)
sl@0
   490
			DoDecommit(initBottom,iSize);
sl@0
   491
		iStartPos=aBottom;
sl@0
   492
		if (newSize)
sl@0
   493
			r=DoCommit(iStartPos,newSize);
sl@0
   494
		}
sl@0
   495
	MM::MemoryUnlock(iMemoryObject);
sl@0
   496
	__COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this);
sl@0
   497
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O adjusted to %x+%x",this,iStartPos,iSize));
sl@0
   498
	return r;
sl@0
   499
	}
sl@0
   500
sl@0
   501
sl@0
   502
TInt DMemModelChunk::CheckRegion(TInt& aOffset, TInt& aSize)
sl@0
   503
	{
sl@0
   504
	if((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
sl@0
   505
		return KErrGeneral;
sl@0
   506
	if(aOffset<0 || aSize<0)
sl@0
   507
		return KErrArgument;
sl@0
   508
	if(aSize==0)
sl@0
   509
		return KErrNone;
sl@0
   510
sl@0
   511
	TUint end = MM::RoundToPageSize(aOffset+aSize);
sl@0
   512
	if(end>TUint(iMaxSize))
sl@0
   513
		return KErrArgument;
sl@0
   514
	aOffset &= ~KPageMask;
sl@0
   515
	aSize = end-aOffset;
sl@0
   516
	if(end<=TUint(aOffset))
sl@0
   517
		return KErrArgument;
sl@0
   518
sl@0
   519
	return 1;
sl@0
   520
	}
sl@0
   521
sl@0
   522
sl@0
   523
TInt DMemModelChunk::Commit(TInt aOffset, TInt aSize, TCommitType aCommitType, TUint32* aExtraArg)
sl@0
   524
	{
sl@0
   525
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Commit %x+%x type=%d extra=%08x",aOffset,aSize,aCommitType,aExtraArg));
sl@0
   526
sl@0
   527
	TInt r = CheckRegion(aOffset,aSize);
sl@0
   528
	if(r<=0)
sl@0
   529
		return r;
sl@0
   530
sl@0
   531
	MM::MemoryLock(iMemoryObject);
sl@0
   532
	TInt i=aOffset>>KPageShift;
sl@0
   533
	TInt n=aSize>>KPageShift;
sl@0
   534
	if (iPageBitMap->NotFree(i,n))
sl@0
   535
		r=KErrAlreadyExists;
sl@0
   536
	else
sl@0
   537
		{
sl@0
   538
		r=DoCommit(aOffset,aSize,aCommitType,aExtraArg);
sl@0
   539
		if (r==KErrNone)
sl@0
   540
			iPageBitMap->Alloc(i,n);
sl@0
   541
		}
sl@0
   542
	MM::MemoryUnlock(iMemoryObject);
sl@0
   543
	__COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this);
sl@0
   544
	return r;
sl@0
   545
	}
sl@0
   546
sl@0
   547
sl@0
   548
TInt DMemModelChunk::Allocate(TInt aSize, TInt aGuard, TInt aAlign)
sl@0
   549
	{
sl@0
   550
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Allocate %x %x %d",aSize,aGuard,aAlign));
sl@0
   551
sl@0
   552
	// the flexible memory model doesn't implement aGuard and aAlign...
sl@0
   553
	__NK_ASSERT_DEBUG(aGuard==0);
sl@0
   554
	(void)aGuard;
sl@0
   555
	__NK_ASSERT_DEBUG(aAlign==0);
sl@0
   556
	(void)aAlign;
sl@0
   557
sl@0
   558
	TInt dummyOffset = 0;
sl@0
   559
	TInt r = CheckRegion(dummyOffset,aSize);
sl@0
   560
	if(r<=0)
sl@0
   561
		return r;
sl@0
   562
sl@0
   563
	MM::MemoryLock(iMemoryObject);
sl@0
   564
	TInt n=aSize>>KPageShift;
sl@0
   565
	TInt i=iPageBitMap->AllocConsecutive(n, EFalse);		// allocate the offset
sl@0
   566
	if (i<0)
sl@0
   567
		r=KErrNoMemory;		// run out of reserved space for this chunk
sl@0
   568
	else
sl@0
   569
		{
sl@0
   570
		TInt offset=i<<KPageShift;
sl@0
   571
		__KTRACE_OPT(KMMU,Kern::Printf("Offset %x allocated",offset));
sl@0
   572
		r=DoCommit(offset,aSize);
sl@0
   573
		if (r==KErrNone)
sl@0
   574
			{
sl@0
   575
			iPageBitMap->Alloc(i,n);
sl@0
   576
			r=offset;		// if operation successful, return allocated offset
sl@0
   577
			}
sl@0
   578
		}
sl@0
   579
	MM::MemoryUnlock(iMemoryObject);
sl@0
   580
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Allocate returns %x",r));
sl@0
   581
	__COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this);
sl@0
   582
	return r;
sl@0
   583
	}
sl@0
   584
sl@0
   585
sl@0
   586
TInt DMemModelChunk::Decommit(TInt aOffset, TInt aSize)
sl@0
   587
	{
sl@0
   588
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Decommit %x+%x",aOffset,aSize));
sl@0
   589
	TInt r = CheckRegion(aOffset,aSize);
sl@0
   590
	if(r<=0)
sl@0
   591
		return r;
sl@0
   592
sl@0
   593
	MM::MemoryLock(iMemoryObject);
sl@0
   594
sl@0
   595
	TInt i=aOffset>>KPageShift;
sl@0
   596
	TInt n=aSize>>KPageShift;
sl@0
   597
	__KTRACE_OPT(KMMU,Kern::Printf("Calling SelectiveFree(%d,%d)",i,n));
sl@0
   598
sl@0
   599
	TUint oldAvail = iPageBitMap->iAvail;
sl@0
   600
	iPageBitMap->SelectiveFree(i,n);	// free those positions which are actually allocated
sl@0
   601
	TUint oldSize = iSize;
sl@0
   602
sl@0
   603
	DoDecommit(aOffset,aSize);
sl@0
   604
sl@0
   605
	// Use the bit map to adjust the size of the chunk as unlocked and reclaimed pages
sl@0
   606
	// will have been unmapped but not removed from the bit map as DoDecommit() only 
sl@0
   607
	// decommits the mapped pages.
sl@0
   608
	TUint actualFreedPages = iPageBitMap->iAvail - oldAvail;
sl@0
   609
	iSize = oldSize - (actualFreedPages << KPageShift);
sl@0
   610
sl@0
   611
	MM::MemoryUnlock(iMemoryObject);
sl@0
   612
sl@0
   613
	r=KErrNone;
sl@0
   614
	__COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this);
sl@0
   615
	return r;
sl@0
   616
	}
sl@0
   617
sl@0
   618
sl@0
   619
TInt DMemModelChunk::Unlock(TInt aOffset, TInt aSize)
sl@0
   620
	{
sl@0
   621
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Unlock %x+%x",aOffset,aSize));
sl@0
   622
	if(!(iAttributes&ECache))
sl@0
   623
		return KErrGeneral;
sl@0
   624
	TInt r = CheckRegion(aOffset,aSize);
sl@0
   625
	if(r<=0)
sl@0
   626
		return r;
sl@0
   627
sl@0
   628
	MM::MemoryLock(iMemoryObject);
sl@0
   629
sl@0
   630
	TInt i=aOffset>>KPageShift;
sl@0
   631
	TInt n=aSize>>KPageShift;
sl@0
   632
	if(iPageBitMap->NotAllocated(i,n))
sl@0
   633
		r = KErrNotFound;
sl@0
   634
	else
sl@0
   635
		r = MM::MemoryAllowDiscard(iMemoryObject,i,n);
sl@0
   636
sl@0
   637
	MM::MemoryUnlock(iMemoryObject);
sl@0
   638
sl@0
   639
	return r;
sl@0
   640
	}
sl@0
   641
sl@0
   642
sl@0
   643
TInt DMemModelChunk::Lock(TInt aOffset, TInt aSize)
sl@0
   644
	{
sl@0
   645
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Lock %x+%x",aOffset,aSize));
sl@0
   646
	if(!(iAttributes&ECache))
sl@0
   647
		return KErrGeneral;
sl@0
   648
	TInt r = CheckRegion(aOffset,aSize);
sl@0
   649
	if(r<=0)
sl@0
   650
		return r;
sl@0
   651
sl@0
   652
	r = MM::MemoryDisallowDiscard(iMemoryObject, MM::BytesToPages(aOffset), MM::BytesToPages(aSize));
sl@0
   653
	if(r!=KErrNone)
sl@0
   654
		Decommit(aOffset,aSize);
sl@0
   655
sl@0
   656
	return r;
sl@0
   657
	}
sl@0
   658
sl@0
   659
sl@0
   660
TInt DMemModelChunk::CheckAccess()
sl@0
   661
	{
sl@0
   662
	if(iOwningProcess && iOwningProcess!=TheCurrentThread->iOwningProcess)
sl@0
   663
		return KErrAccessDenied;
sl@0
   664
	return KErrNone;
sl@0
   665
	}
sl@0
   666
sl@0
   667
sl@0
   668
void DMemModelChunk::BTracePrime(TInt aCategory)
sl@0
   669
	{
sl@0
   670
	DChunk::BTracePrime(aCategory);
sl@0
   671
#ifdef BTRACE_FLEXIBLE_MEM_MODEL
sl@0
   672
	if (aCategory == BTrace::EFlexibleMemModel || aCategory == -1)
sl@0
   673
		{
sl@0
   674
		if (iMemoryObject)
sl@0
   675
			{
sl@0
   676
			MM::MemoryBTracePrime(iMemoryObject);
sl@0
   677
			BTrace8(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsChunk,iMemoryObject,this);
sl@0
   678
			}
sl@0
   679
		}
sl@0
   680
#endif
sl@0
   681
	}
sl@0
   682
sl@0
   683
sl@0
   684
void DMemModelChunk::Substitute(TInt /*aOffset*/, TPhysAddr /*aOldAddr*/, TPhysAddr /*aNewAddr*/)
sl@0
   685
	{
sl@0
   686
	MM::Panic(MM::EUnsupportedOperation);
sl@0
   687
	}
sl@0
   688