os/kernelhwsrv/kernel/eka/memmodel/epoc/flexible/mmu/mm.cpp
author sl
Tue, 10 Jun 2014 14:32:02 +0200
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
sl@0
     1
// Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
//
sl@0
    15
sl@0
    16
#include "memmodel.h"
sl@0
    17
#include "mm.h"
sl@0
    18
#include "mmu.h"
sl@0
    19
#include "mobject.h"
sl@0
    20
#include "mmapping.h"
sl@0
    21
#include "mmanager.h"
sl@0
    22
#include "mpdalloc.h"
sl@0
    23
#include "mptalloc.h"
sl@0
    24
#include "mpager.h"
sl@0
    25
#include "maddressspace.h"
sl@0
    26
sl@0
    27
sl@0
    28
sl@0
    29
sl@0
    30
//
sl@0
    31
// DMutexPool
sl@0
    32
//
sl@0
    33
sl@0
    34
DMutexPool::~DMutexPool()
sl@0
    35
	{
sl@0
    36
	TUint i;
sl@0
    37
	for(i=0; i<iCount; ++i)
sl@0
    38
		{
sl@0
    39
		DMutex* mutex = iMembers[i].iMutex;
sl@0
    40
		if(mutex)
sl@0
    41
			mutex->Close(0);
sl@0
    42
		}
sl@0
    43
	Kern::Free(iMembers);
sl@0
    44
	}
sl@0
    45
sl@0
    46
sl@0
    47
TInt DMutexPool::Create(TUint aCount, const TDesC* aName, TUint aOrder)
sl@0
    48
	{
sl@0
    49
	if(aCount>EMaxPoolSize)
sl@0
    50
		return KErrTooBig;
sl@0
    51
sl@0
    52
	iMembers = (SMember*)Kern::AllocZ(aCount*sizeof(SMember));
sl@0
    53
	if(!iMembers)
sl@0
    54
		return KErrNoMemory;
sl@0
    55
sl@0
    56
	iCount = aCount;
sl@0
    57
sl@0
    58
	TInt r = KErrNone;
sl@0
    59
	TUint i;
sl@0
    60
	for(i=0; i<aCount; ++i)
sl@0
    61
		{
sl@0
    62
		TKName name;
sl@0
    63
		if(aName)
sl@0
    64
			{
sl@0
    65
			name = *aName;
sl@0
    66
			name.AppendNum(i);
sl@0
    67
			}
sl@0
    68
		K::MutexCreate(iMembers[i].iMutex, name, NULL, EFalse, aOrder);
sl@0
    69
		if(r!=KErrNone)
sl@0
    70
			break;
sl@0
    71
		}
sl@0
    72
sl@0
    73
	return r;
sl@0
    74
	}
sl@0
    75
sl@0
    76
sl@0
    77
/**
sl@0
    78
@class DMutexPool
sl@0
    79
@details
sl@0
    80
sl@0
    81
The cookie used for dynamically assigned mutexes is broken into three bit fields:
sl@0
    82
- Bit 0, always set. (To distinguish the cookie from a proper DMutex*).
sl@0
    83
- Bits 1 through #KMutexPoolIndexBits, these contain the index of the assigned
sl@0
    84
  mutex within DMutexPool::iMembers.
sl@0
    85
- Bits (#KMutexPoolIndexBits+1) through 31, the count of the number of threads waiting
sl@0
    86
  for this particular mutex assignment. When this reaches zero, the mutex can
sl@0
    87
  be unassigned.
sl@0
    88
*/
sl@0
    89
sl@0
    90
/**
sl@0
    91
Number of bits used to contain the index value of a dynamically assigned pool mutex.
sl@0
    92
*/
sl@0
    93
const TUint KMutexPoolIndexBits = 7;
sl@0
    94
sl@0
    95
const TUint KMutexPoolIndexMask = ((1<<KMutexPoolIndexBits)-1)<<1;
sl@0
    96
const TUint KMutexPoolWaitCountIncrement = 1<<(KMutexPoolIndexBits+1);
sl@0
    97
sl@0
    98
__ASSERT_COMPILE(DMutexPool::EMaxPoolSize<=TUint(KMutexPoolIndexMask/2+1)); // required for algorithm correctness
sl@0
    99
sl@0
   100
__ASSERT_COMPILE(DMutexPool::EMaxPoolSize<=64); // required to avoid excessive system lock hold time
sl@0
   101
sl@0
   102
sl@0
   103
void DMutexPool::Wait(DMutex*& aMutexRef)
sl@0
   104
	{
sl@0
   105
	NKern::LockSystem();
sl@0
   106
sl@0
   107
	TUintPtr poolMutex = (TUintPtr)aMutexRef;
sl@0
   108
	if(!poolMutex)
sl@0
   109
		{
sl@0
   110
		// try and find a free mutex, else use the next one...
sl@0
   111
		TUint next = iNext;
sl@0
   112
		do
sl@0
   113
			{
sl@0
   114
			if(iMembers[next].iUseCount==0)
sl@0
   115
				break;
sl@0
   116
			if(++next>=iCount)
sl@0
   117
				next = 0;
sl@0
   118
			}
sl@0
   119
		while(next!=iNext);
sl@0
   120
		// use found mutex...
sl@0
   121
		++iMembers[next].iUseCount;
sl@0
   122
		poolMutex = (next*2)+1; // mutex index*2 | 1
sl@0
   123
		// update next...
sl@0
   124
		if(++next>=iCount)
sl@0
   125
			next = 0;
sl@0
   126
		iNext = next;
sl@0
   127
		}
sl@0
   128
sl@0
   129
	DMutex* mutex = (DMutex*)poolMutex;
sl@0
   130
	if(poolMutex&1)
sl@0
   131
		{
sl@0
   132
		// mutex is a pool mutex, get pointer, and update wait count...
sl@0
   133
		SMember* member = &iMembers[(poolMutex&KMutexPoolIndexMask)>>1];
sl@0
   134
		mutex = member->iMutex;
sl@0
   135
		poolMutex += KMutexPoolWaitCountIncrement;
sl@0
   136
		__NK_ASSERT_ALWAYS(poolMutex>=KMutexPoolWaitCountIncrement);
sl@0
   137
		aMutexRef = (DMutex*)poolMutex;
sl@0
   138
		}
sl@0
   139
sl@0
   140
	mutex->Wait();
sl@0
   141
sl@0
   142
	NKern::UnlockSystem();
sl@0
   143
	}
sl@0
   144
sl@0
   145
sl@0
   146
void DMutexPool::Signal(DMutex*& aMutexRef)
sl@0
   147
	{
sl@0
   148
	NKern::LockSystem();
sl@0
   149
sl@0
   150
	TUintPtr poolMutex = (TUintPtr)aMutexRef;
sl@0
   151
	__NK_ASSERT_ALWAYS(poolMutex);
sl@0
   152
sl@0
   153
	DMutex* mutex = (DMutex*)poolMutex;
sl@0
   154
sl@0
   155
	if(poolMutex&1)
sl@0
   156
		{
sl@0
   157
		// mutex is a pool mutex, get pointer, and update wait count...
sl@0
   158
		SMember* member = &iMembers[(poolMutex&KMutexPoolIndexMask)>>1];
sl@0
   159
		mutex = member->iMutex;
sl@0
   160
		__NK_ASSERT_ALWAYS(poolMutex>=KMutexPoolWaitCountIncrement);
sl@0
   161
		poolMutex -= KMutexPoolWaitCountIncrement;
sl@0
   162
		if(poolMutex<KMutexPoolWaitCountIncrement)
sl@0
   163
			{
sl@0
   164
			--member->iUseCount;
sl@0
   165
			poolMutex = 0;
sl@0
   166
			}
sl@0
   167
		aMutexRef = (DMutex*)poolMutex;
sl@0
   168
		}
sl@0
   169
sl@0
   170
	mutex->Signal();
sl@0
   171
	}
sl@0
   172
sl@0
   173
sl@0
   174
TBool DMutexPool::IsHeld(DMutex*& aMutexRef)
sl@0
   175
	{
sl@0
   176
	TBool held = false;
sl@0
   177
	NKern::LockSystem();
sl@0
   178
	TUintPtr poolMutex = (TUintPtr)aMutexRef;
sl@0
   179
	if(poolMutex)
sl@0
   180
		{
sl@0
   181
		DMutex* mutex = (DMutex*)poolMutex;
sl@0
   182
		if(poolMutex&1)
sl@0
   183
			{
sl@0
   184
			SMember* member = &iMembers[(poolMutex&KMutexPoolIndexMask)>>1];
sl@0
   185
			mutex = member->iMutex;
sl@0
   186
			}
sl@0
   187
		held = mutex->iCleanup.iThread==&Kern::CurrentThread();
sl@0
   188
		}
sl@0
   189
	NKern::UnlockSystem();
sl@0
   190
	return held;
sl@0
   191
	}
sl@0
   192
sl@0
   193
sl@0
   194
sl@0
   195
//
sl@0
   196
// DReferenceCountedObject
sl@0
   197
//
sl@0
   198
sl@0
   199
DReferenceCountedObject::~DReferenceCountedObject()
sl@0
   200
	{
sl@0
   201
	__NK_ASSERT_DEBUG(iReferenceCount==0);
sl@0
   202
	}
sl@0
   203
sl@0
   204
sl@0
   205
void DReferenceCountedObject::Open()
sl@0
   206
	{
sl@0
   207
	__ASSERT_CRITICAL
sl@0
   208
	TBool ok = __e32_atomic_tas_ord32(&iReferenceCount, 1, 1, 0);
sl@0
   209
	__NK_ASSERT_ALWAYS(ok);
sl@0
   210
	}
sl@0
   211
sl@0
   212
sl@0
   213
TBool DReferenceCountedObject::TryOpen()
sl@0
   214
	{
sl@0
   215
	__ASSERT_CRITICAL
sl@0
   216
	TBool ok = __e32_atomic_tas_ord32(&iReferenceCount, 1, 1, 0);
sl@0
   217
	return ok;
sl@0
   218
	}
sl@0
   219
sl@0
   220
sl@0
   221
TBool DReferenceCountedObject::CheckCloseIsSafe()
sl@0
   222
	{
sl@0
   223
	__ASSERT_CRITICAL
sl@0
   224
#ifdef _DEBUG
sl@0
   225
	NFastMutex* fm = NKern::HeldFastMutex();
sl@0
   226
	if(fm)
sl@0
   227
		{
sl@0
   228
		Kern::Printf("DReferenceCountedObject[0x%08x]::Close() fast mutex violation %M",this,fm);
sl@0
   229
		return false;
sl@0
   230
		}
sl@0
   231
	SDblQue& ml = TheCurrentThread->iMutexList;
sl@0
   232
	if(!ml.IsEmpty())
sl@0
   233
		{
sl@0
   234
		DMutex* m = _LOFF(ml.First(), DMutex, iOrderLink);
sl@0
   235
		if(m->iOrder<KMutexOrdKernelHeap)
sl@0
   236
			{
sl@0
   237
			Kern::Printf("DReferenceCountedObject[0x%08x]::Close() mutex order violation holding mutex %O",this,m);
sl@0
   238
			return false;
sl@0
   239
			}
sl@0
   240
		}
sl@0
   241
#endif
sl@0
   242
	return true;
sl@0
   243
	}
sl@0
   244
sl@0
   245
sl@0
   246
TBool DReferenceCountedObject::CheckAsyncCloseIsSafe()
sl@0
   247
	{
sl@0
   248
	__ASSERT_CRITICAL
sl@0
   249
#ifdef _DEBUG
sl@0
   250
	NFastMutex* fm = NKern::HeldFastMutex();
sl@0
   251
	if(fm)
sl@0
   252
		{
sl@0
   253
		Kern::Printf("DReferenceCountedObject[0x%08x]::AsyncClose() fast mutex violation %M",this,fm);
sl@0
   254
		return false;
sl@0
   255
		}
sl@0
   256
#endif
sl@0
   257
	return true;
sl@0
   258
	}
sl@0
   259
sl@0
   260
sl@0
   261
void DReferenceCountedObject::Close()
sl@0
   262
	{
sl@0
   263
	__ASSERT_CRITICAL
sl@0
   264
	__NK_ASSERT_DEBUG(CheckCloseIsSafe());
sl@0
   265
	__NK_ASSERT_DEBUG(iReferenceCount>0);
sl@0
   266
	if (__e32_atomic_tas_ord32(&iReferenceCount, 1, -1, 0) == 1)
sl@0
   267
		delete this;
sl@0
   268
	}
sl@0
   269
sl@0
   270
sl@0
   271
void DReferenceCountedObject::AsyncClose()
sl@0
   272
	{
sl@0
   273
	__ASSERT_CRITICAL
sl@0
   274
	__NK_ASSERT_DEBUG(CheckAsyncCloseIsSafe());
sl@0
   275
	__NK_ASSERT_DEBUG(iReferenceCount>0);
sl@0
   276
	if (__e32_atomic_tas_ord32(&iReferenceCount, 1, -1, 0) == 1)
sl@0
   277
		AsyncDelete();
sl@0
   278
	}
sl@0
   279
sl@0
   280
sl@0
   281
//
sl@0
   282
// Memory object functions
sl@0
   283
//
sl@0
   284
sl@0
   285
TInt MM::MemoryNew(DMemoryObject*& aMemory, TMemoryObjectType aType, TUint aPageCount, TMemoryCreateFlags aCreateFlags, TMemoryAttributes aAttributes)
sl@0
   286
	{
sl@0
   287
	TRACE(("MM::MemoryNew(?,0x%08x,0x%08x,0x%08x,0x%08x)",aType,aPageCount,aCreateFlags,*(TUint32*)&aAttributes));
sl@0
   288
sl@0
   289
	DMemoryManager* manager;
sl@0
   290
	if(aCreateFlags&EMemoryCreateCustomManager)
sl@0
   291
		manager = (DMemoryManager*)aType;
sl@0
   292
	else
sl@0
   293
		{
sl@0
   294
		switch(aType)
sl@0
   295
			{
sl@0
   296
		case EMemoryObjectUnpaged:
sl@0
   297
			manager = TheUnpagedMemoryManager;
sl@0
   298
			break;
sl@0
   299
		case EMemoryObjectMovable:
sl@0
   300
			manager = TheMovableMemoryManager;
sl@0
   301
			break;
sl@0
   302
		case EMemoryObjectPaged:
sl@0
   303
			manager = TheDataPagedMemoryManager;
sl@0
   304
			break;
sl@0
   305
		case EMemoryObjectDiscardable:
sl@0
   306
			manager = TheDiscardableMemoryManager;
sl@0
   307
			break;
sl@0
   308
		case EMemoryObjectHardware:
sl@0
   309
			manager = TheHardwareMemoryManager;
sl@0
   310
			break;
sl@0
   311
		default:
sl@0
   312
			manager = 0;
sl@0
   313
			__NK_ASSERT_DEBUG(0);
sl@0
   314
			break;
sl@0
   315
			}
sl@0
   316
		}
sl@0
   317
	TMemoryCreateFlags flags = (TMemoryCreateFlags)(aCreateFlags&~(EMemoryCreateDemandPaged));
sl@0
   318
	TInt r = manager->New(aMemory,aPageCount,aAttributes,flags);
sl@0
   319
	TRACE(("MM::MemoryNew returns %d, aMemory=0x%08x",r,aMemory));
sl@0
   320
#ifdef BTRACE_FLEXIBLE_MEM_MODEL
sl@0
   321
	if (r == KErrNone)
sl@0
   322
		aMemory->BTraceCreate();
sl@0
   323
#endif
sl@0
   324
	return r;
sl@0
   325
	}
sl@0
   326
sl@0
   327
sl@0
   328
TInt MM::MemoryClaimInitialPages(DMemoryObject* aMemory, TLinAddr aBase, TUint aSize, TMappingPermissions aPermissions, TBool aAllowGaps, TBool aAllowNonRamPages)
sl@0
   329
	{
sl@0
   330
	TRACE(("MM::MemoryClaimInitialPages(0x%08x,0x%08x,0x%08x,0x%08x,%d,%d)",aMemory,aBase,aPermissions,aSize,aAllowGaps!=0,aAllowNonRamPages!=0));
sl@0
   331
	TInt r = aMemory->ClaimInitialPages(aBase,aSize,aPermissions,aAllowGaps,aAllowNonRamPages);
sl@0
   332
	TRACE(("MM::MemoryClaimInitialPages returns %d",r));
sl@0
   333
	__NK_ASSERT_DEBUG(r==KErrNone);
sl@0
   334
	return r;
sl@0
   335
	}
sl@0
   336
sl@0
   337
sl@0
   338
void MM::MemorySetLock(DMemoryObject* aMemory, DMutex* aLock)
sl@0
   339
	{
sl@0
   340
	aMemory->SetLock(aLock);
sl@0
   341
	}
sl@0
   342
sl@0
   343
sl@0
   344
void MM::MemoryLock(DMemoryObject* aMemory)
sl@0
   345
	{
sl@0
   346
	MemoryObjectLock::Lock(aMemory);
sl@0
   347
	}
sl@0
   348
sl@0
   349
sl@0
   350
void MM::MemoryUnlock(DMemoryObject* aMemory)
sl@0
   351
	{
sl@0
   352
	MemoryObjectLock::Unlock(aMemory);
sl@0
   353
	}
sl@0
   354
sl@0
   355
sl@0
   356
void MM::MemoryDestroy(DMemoryObject*& aMemory)
sl@0
   357
	{
sl@0
   358
	DMemoryObject* memory = (DMemoryObject*)__e32_atomic_swp_ord_ptr(&aMemory, 0);
sl@0
   359
	if (!memory)
sl@0
   360
		return;
sl@0
   361
	TRACE(("MM::MemoryDestroy(0x%08x)",memory));
sl@0
   362
#ifdef BTRACE_FLEXIBLE_MEM_MODEL
sl@0
   363
	BTraceContext4(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectDestroy,memory);
sl@0
   364
#endif
sl@0
   365
	memory->iManager->Destruct(memory);
sl@0
   366
	}
sl@0
   367
sl@0
   368
sl@0
   369
TInt MM::MemoryAlloc(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
sl@0
   370
	{
sl@0
   371
	TRACE(("MM::MemoryAlloc(0x%08x,0x%08x,0x%08x)",aMemory,aIndex,aCount));
sl@0
   372
	MemoryObjectLock::Lock(aMemory);
sl@0
   373
	TInt r;
sl@0
   374
	if(!aMemory->CheckRegion(aIndex,aCount))
sl@0
   375
		r = KErrArgument;
sl@0
   376
	else
sl@0
   377
		r = aMemory->iManager->Alloc(aMemory,aIndex,aCount);
sl@0
   378
	MemoryObjectLock::Unlock(aMemory);
sl@0
   379
	TRACE(("MM::MemoryAlloc returns %d",r));
sl@0
   380
	return r;
sl@0
   381
	}
sl@0
   382
sl@0
   383
sl@0
   384
TInt MM::MemoryAllocContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TUint aAlign, TPhysAddr& aPhysAddr)
sl@0
   385
	{
sl@0
   386
	TRACE(("MM::MemoryAllocContiguous(0x%08x,0x%08x,0x%08x,%d,?)",aMemory,aIndex,aCount,aAlign));
sl@0
   387
	MemoryObjectLock::Lock(aMemory);
sl@0
   388
	TInt r;
sl@0
   389
	if(!aMemory->CheckRegion(aIndex,aCount))
sl@0
   390
		r = KErrArgument;
sl@0
   391
	else
sl@0
   392
		r = aMemory->iManager->AllocContiguous(aMemory,aIndex,aCount,MM::RoundToPageShift(aAlign),aPhysAddr);
sl@0
   393
	MemoryObjectLock::Unlock(aMemory);
sl@0
   394
	TRACE(("MM::MemoryAlloc returns %d (aPhysAddr=0x%08x)",r,aPhysAddr));
sl@0
   395
	return r;
sl@0
   396
	}
sl@0
   397
sl@0
   398
sl@0
   399
void MM::MemoryFree(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
sl@0
   400
	{
sl@0
   401
	TRACE(("MM::MemoryFree(0x%08x,0x%08x,0x%08x)",aMemory,aIndex,aCount));
sl@0
   402
	MemoryObjectLock::Lock(aMemory);
sl@0
   403
	aMemory->ClipRegion(aIndex,aCount);
sl@0
   404
	aMemory->iManager->Free(aMemory,aIndex,aCount);
sl@0
   405
	MemoryObjectLock::Unlock(aMemory);
sl@0
   406
	}
sl@0
   407
sl@0
   408
sl@0
   409
TInt MM::MemoryAddPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages)
sl@0
   410
	{
sl@0
   411
	TRACE(("MM::MemoryAddPages(0x%08x,0x%08x,0x%08x,?)",aMemory,aIndex,aCount));
sl@0
   412
	MemoryObjectLock::Lock(aMemory);
sl@0
   413
	TInt r;
sl@0
   414
	if(!aMemory->CheckRegion(aIndex,aCount))
sl@0
   415
		r = KErrArgument;
sl@0
   416
	else
sl@0
   417
		r = aMemory->iManager->AddPages(aMemory,aIndex,aCount,aPages);
sl@0
   418
	MemoryObjectLock::Unlock(aMemory);
sl@0
   419
	TRACE(("MM::MemoryAddPages returns %d",r));
sl@0
   420
	return r;
sl@0
   421
	}
sl@0
   422
sl@0
   423
sl@0
   424
TInt MM::MemoryAddContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr aPhysAddr)
sl@0
   425
	{
sl@0
   426
	TRACE(("MM::MemoryAddContiguous(0x%08x,0x%08x,0x%08x,0x%08x)",aMemory,aIndex,aCount,aPhysAddr));
sl@0
   427
	MemoryObjectLock::Lock(aMemory);
sl@0
   428
	TInt r;
sl@0
   429
	if(!aMemory->CheckRegion(aIndex,aCount))
sl@0
   430
		r = KErrArgument;
sl@0
   431
	else
sl@0
   432
		r = aMemory->iManager->AddContiguous(aMemory,aIndex,aCount,aPhysAddr);
sl@0
   433
	MemoryObjectLock::Unlock(aMemory);
sl@0
   434
	TRACE(("MM::MemoryAddContiguous returns %d",r));
sl@0
   435
	return r;
sl@0
   436
	}
sl@0
   437
sl@0
   438
sl@0
   439
TUint MM::MemoryRemovePages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages)
sl@0
   440
	{
sl@0
   441
	TRACE(("MM::MemoryRemovePages(0x%08x,0x%08x,0x%08x)",aMemory,aIndex,aCount));
sl@0
   442
	MemoryObjectLock::Lock(aMemory);
sl@0
   443
	aMemory->ClipRegion(aIndex,aCount);
sl@0
   444
	TInt r = aMemory->iManager->RemovePages(aMemory,aIndex,aCount,aPages);
sl@0
   445
	if(r<0)
sl@0
   446
		r = 0;
sl@0
   447
	MemoryObjectLock::Unlock(aMemory);
sl@0
   448
	TRACE(("MM::MemoryRemovePages returns %d",r));
sl@0
   449
	return r;
sl@0
   450
	}
sl@0
   451
sl@0
   452
sl@0
   453
TInt MM::MemoryAllowDiscard(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
sl@0
   454
	{
sl@0
   455
	TRACE(("MM::MemoryAllowDiscard(0x%08x,0x%08x,0x%08x)",aMemory,aIndex,aCount));
sl@0
   456
	MemoryObjectLock::Lock(aMemory);
sl@0
   457
	TInt r;
sl@0
   458
	if(!aMemory->CheckRegion(aIndex,aCount))
sl@0
   459
		r = KErrArgument;
sl@0
   460
	else
sl@0
   461
		r = aMemory->iManager->AllowDiscard(aMemory,aIndex,aCount);
sl@0
   462
	MemoryObjectLock::Unlock(aMemory);
sl@0
   463
	TRACE(("MM::MemoryAllowDiscard returns %d",r));
sl@0
   464
	return r;
sl@0
   465
	}
sl@0
   466
sl@0
   467
sl@0
   468
TInt MM::MemoryDisallowDiscard(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
sl@0
   469
	{
sl@0
   470
	TRACE(("MM::MemoryDisallowDiscard(0x%08x,0x%08x,0x%08x)",aMemory,aIndex,aCount));
sl@0
   471
	MemoryObjectLock::Lock(aMemory);
sl@0
   472
	TInt r;
sl@0
   473
	if(!aMemory->CheckRegion(aIndex,aCount))
sl@0
   474
		r = KErrArgument;
sl@0
   475
	else
sl@0
   476
		r = aMemory->iManager->DisallowDiscard(aMemory,aIndex,aCount);
sl@0
   477
	MemoryObjectLock::Unlock(aMemory);
sl@0
   478
	TRACE(("MM::MemoryDisallowDiscard returns %d",r));
sl@0
   479
	return r;
sl@0
   480
	}
sl@0
   481
sl@0
   482
sl@0
   483
TInt MM::MemoryPhysAddr(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr& aPhysicalAddress, TPhysAddr* aPhysicalPageList)
sl@0
   484
	{
sl@0
   485
	TRACE(("MM::MemoryPhysAddr(0x%08x,0x%08x,0x%08x,?,?)",aMemory,aIndex,aCount));
sl@0
   486
	TInt r = aMemory->PhysAddr(aIndex,aCount,aPhysicalAddress,aPhysicalPageList);
sl@0
   487
	TRACE(("MM::MemoryPhysAddr returns %d aPhysicalAddress=0x%08x",r,aPhysicalAddress));
sl@0
   488
	return r;
sl@0
   489
	}
sl@0
   490
sl@0
   491
sl@0
   492
void MM::MemoryBTracePrime(DMemoryObject* aMemory)
sl@0
   493
	{
sl@0
   494
	aMemory->BTraceCreate();
sl@0
   495
	aMemory->iMappings.Lock();
sl@0
   496
	TMappingListIter iter;
sl@0
   497
	DMemoryMapping* mapping = (DMemoryMapping*)iter.Start(aMemory->iMappings);
sl@0
   498
	while(mapping)
sl@0
   499
		{
sl@0
   500
		aMemory->iMappings.Unlock();	
sl@0
   501
		mapping->BTraceCreate();
sl@0
   502
		aMemory->iMappings.Lock();
sl@0
   503
		mapping = (DMemoryMapping*)iter.Next();
sl@0
   504
		}
sl@0
   505
	iter.Finish();
sl@0
   506
	aMemory->iMappings.Unlock();	
sl@0
   507
	}
sl@0
   508
sl@0
   509
sl@0
   510
void MM::MemoryClose(DMemoryObject* aMemory)
sl@0
   511
	{
sl@0
   512
	aMemory->Close();
sl@0
   513
	}
sl@0
   514
sl@0
   515
sl@0
   516
TBool MM::MemoryIsNotMapped(DMemoryObject* aMemory)
sl@0
   517
	{
sl@0
   518
	TBool r = aMemory->iMappings.IsEmpty();
sl@0
   519
	TRACE2(("MM::MemoryIsNotMapped(0x%08x) returns %d",aMemory,r));
sl@0
   520
	return r;
sl@0
   521
	}
sl@0
   522
sl@0
   523
//
sl@0
   524
// Physical pinning
sl@0
   525
//
sl@0
   526
sl@0
   527
TInt MM::PinPhysicalMemory(DMemoryObject* aMemory, DPhysicalPinMapping* aPinObject, TUint aIndex, TUint aCount, TBool aReadOnly, TPhysAddr& aAddress, TPhysAddr* aPages, TUint32& aMapAttr, TUint& aColour)
sl@0
   528
	{
sl@0
   529
sl@0
   530
	if (!aMemory->CheckRegion(aIndex,aCount))
sl@0
   531
	    return KErrArgument;
sl@0
   532
sl@0
   533
	TMappingPermissions permissions = aReadOnly ? ESupervisorReadOnly : ESupervisorReadWrite;
sl@0
   534
	TInt r = aPinObject->Pin(aMemory, aIndex, aCount, permissions);
sl@0
   535
	if (r == KErrNone)
sl@0
   536
		{
sl@0
   537
		r = aPinObject->PhysAddr(aIndex, aCount, aAddress, aPages);
sl@0
   538
		if (r>=KErrNone)
sl@0
   539
			{
sl@0
   540
			r = KErrNone; //Do not report discontigious memory in return value.
sl@0
   541
			const TMappingAttributes2& mapAttr2 =
sl@0
   542
				MM::LegacyMappingAttributes(aMemory->Attributes(), permissions);
sl@0
   543
			*(TMappingAttributes2*)&aMapAttr = mapAttr2;
sl@0
   544
			}
sl@0
   545
		else
sl@0
   546
			{
sl@0
   547
			aPinObject->Unpin();
sl@0
   548
			}
sl@0
   549
		}
sl@0
   550
sl@0
   551
	aColour = 0;
sl@0
   552
	return r;
sl@0
   553
	}
sl@0
   554
sl@0
   555
sl@0
   556
TInt MM::MemoryWipe(DMemoryObject* aMemory)
sl@0
   557
	{
sl@0
   558
	__NK_ASSERT_ALWAYS(aMemory->iMappings.IsEmpty()); // can't be mapped otherwise confidentiality can't be guaranteed
sl@0
   559
	TRACE2(("MM::MemoryWipe(0x%08x)",aMemory));
sl@0
   560
	MemoryObjectLock::Lock(aMemory);
sl@0
   561
	TInt r = aMemory->iManager->Wipe(aMemory);
sl@0
   562
	MemoryObjectLock::Unlock(aMemory);
sl@0
   563
	return r;
sl@0
   564
	}
sl@0
   565
sl@0
   566
sl@0
   567
TInt MM::MemorySetReadOnly(DMemoryObject* aMemory)
sl@0
   568
	{
sl@0
   569
	TRACE2(("MM::MemorySetReadOnly(0x%08x)",aMemory));
sl@0
   570
	MemoryObjectLock::Lock(aMemory);
sl@0
   571
	TInt r = aMemory->SetReadOnly();
sl@0
   572
	MemoryObjectLock::Unlock(aMemory);
sl@0
   573
	return r;
sl@0
   574
	}
sl@0
   575
sl@0
   576
//
sl@0
   577
// Mapping functions
sl@0
   578
//
sl@0
   579
sl@0
   580
TInt MM::MappingNew(DMemoryMapping*& aMapping, DMemoryObject* aMemory, TMappingPermissions aPermissions, TInt aOsAsid, TMappingCreateFlags aFlags, TLinAddr aAddr, TUint aIndex, TUint aCount)
sl@0
   581
	{
sl@0
   582
	TRACE(("MM::MappingNew(?,0x%08x,0x%08x,%d,0x%08x,0x%08x,0x%08x,0x%08x)",aMemory, aPermissions, aOsAsid, aFlags, aAddr, aIndex, aCount));
sl@0
   583
sl@0
   584
	/**
sl@0
   585
	@todo Make mappings created with this function fail (panic?) if the are reused to map
sl@0
   586
	another object.
sl@0
   587
	*/
sl@0
   588
	if(aCount==~0u)
sl@0
   589
		aCount = aMemory->iSizeInPages-aIndex;
sl@0
   590
sl@0
   591
	// if memory object reserves all resources, make mappings also do so...
sl@0
   592
	if(aMemory->iFlags&DMemoryObject::EReserveResources)
sl@0
   593
		FlagSet(aFlags,EMappingCreateReserveAllResources);
sl@0
   594
sl@0
   595
	// check if mapping is for global user data...
sl@0
   596
	if(aOsAsid==(TInt)KKernelOsAsid && aPermissions&EUser)
sl@0
   597
		FlagSet(aFlags,EMappingCreateUserGlobalVirtual);
sl@0
   598
	else
sl@0
   599
		FlagClear(aFlags,EMappingCreateUserGlobalVirtual);
sl@0
   600
sl@0
   601
	// set paged attribute for mapping...
sl@0
   602
	if(aMemory->IsDemandPaged())
sl@0
   603
		FlagSet(aFlags,EMappingCreateDemandPaged);
sl@0
   604
	else
sl@0
   605
		FlagClear(aFlags,EMappingCreateDemandPaged);
sl@0
   606
sl@0
   607
	DMemoryMapping* mapping = 0;
sl@0
   608
	TInt r = KErrNone;
sl@0
   609
	if(!aMemory->CheckRegion(aIndex,aCount))
sl@0
   610
		r = KErrArgument;
sl@0
   611
	else
sl@0
   612
		{
sl@0
   613
		mapping = aMemory->CreateMapping(aIndex, aCount);
sl@0
   614
		if(!mapping)
sl@0
   615
			r = KErrNoMemory;
sl@0
   616
		}
sl@0
   617
sl@0
   618
	if(!mapping)
sl@0
   619
		{
sl@0
   620
		// free any virtual address the mapping should have adopted...
sl@0
   621
		if(aFlags&EMappingCreateAdoptVirtual)
sl@0
   622
			MM::VirtualFree(aOsAsid, aAddr, aCount<<KPageShift);
sl@0
   623
		}
sl@0
   624
	else
sl@0
   625
		{
sl@0
   626
		r = mapping->Construct(aMemory->Attributes(), aFlags, aOsAsid, aAddr, aCount<<KPageShift, aIndex<<KPageShift);
sl@0
   627
		if(r==KErrNone)
sl@0
   628
			r = mapping->Map(aMemory, aIndex, aCount, aPermissions);
sl@0
   629
		if(r!=KErrNone)
sl@0
   630
			{
sl@0
   631
			mapping->Close();
sl@0
   632
			mapping = 0;
sl@0
   633
			}
sl@0
   634
		}
sl@0
   635
sl@0
   636
	aMapping = mapping;
sl@0
   637
	TRACE(("MM::MappingNew returns %d (aMapping=0x%0x)",r,aMapping));
sl@0
   638
#ifdef BTRACE_FLEXIBLE_MEM_MODEL
sl@0
   639
	if (r == KErrNone)
sl@0
   640
		aMapping->BTraceCreate();
sl@0
   641
#endif
sl@0
   642
	return r;
sl@0
   643
	}
sl@0
   644
sl@0
   645
sl@0
   646
TInt MM::MappingNew(DMemoryMapping*& aMapping, TUint aCount, TInt aOsAsid, TMappingCreateFlags aFlags, TLinAddr aAddr, TLinAddr aColourOffset)
sl@0
   647
	{
sl@0
   648
	TRACE2(("MM::MappingNew(?,0x%08x,%d,0x%08x,0x%08x,0x%08x)",aCount, aOsAsid, aFlags, aAddr, aColourOffset));
sl@0
   649
sl@0
   650
	FlagClear(aFlags,EMappingCreateDemandPaged); // mapping can't use demand paged page tables
sl@0
   651
sl@0
   652
	TInt r = KErrNone;
sl@0
   653
	DMemoryMapping* mapping = new DFineMapping();
sl@0
   654
	if(!mapping)
sl@0
   655
		r = KErrNoMemory;
sl@0
   656
sl@0
   657
	if(!mapping)
sl@0
   658
		{
sl@0
   659
		// free any virtual address the mapping should have adopted...
sl@0
   660
		if(aFlags&EMappingCreateAdoptVirtual)
sl@0
   661
			MM::VirtualFree(aOsAsid, aAddr, aCount<<KPageShift);
sl@0
   662
		}
sl@0
   663
	else
sl@0
   664
		{
sl@0
   665
		r = mapping->Construct(EMemoryAttributeStandard, aFlags, aOsAsid, aAddr, aCount<<KPageShift, aColourOffset);
sl@0
   666
		if(r!=KErrNone)
sl@0
   667
			{
sl@0
   668
			mapping->Close();
sl@0
   669
			mapping = 0;
sl@0
   670
			}
sl@0
   671
		}
sl@0
   672
sl@0
   673
	aMapping = mapping;
sl@0
   674
	TRACE2(("MM::MappingNew returns %d (aMapping=0x%0x)",r,aMapping));
sl@0
   675
sl@0
   676
	return r;
sl@0
   677
	}
sl@0
   678
sl@0
   679
sl@0
   680
TInt MM::MappingMap(DMemoryMapping* aMapping, TMappingPermissions aPermissions, DMemoryObject* aMemory, TUint aIndex, TUint aCount)
sl@0
   681
	{
sl@0
   682
	TRACE2(("MM::MappingMap(0x%08x,0x%08x,0x%08x,0x%x,0x%x)",aMapping,aPermissions,aMemory,aIndex,aCount));
sl@0
   683
	if(aCount==~0u)
sl@0
   684
		aCount = aMemory->iSizeInPages-aIndex;
sl@0
   685
	TInt r = aMapping->Map(aMemory, aIndex, aCount, aPermissions);
sl@0
   686
	TRACE2(("MM::MappingMap returns %d",r));
sl@0
   687
	return r;
sl@0
   688
	}
sl@0
   689
sl@0
   690
sl@0
   691
void MM::MappingUnmap(DMemoryMapping* aMapping)
sl@0
   692
	{
sl@0
   693
	if(aMapping->IsAttached())
sl@0
   694
		{
sl@0
   695
		TRACE2(("MM::MappingUnmap(0x%08x)",aMapping));
sl@0
   696
		aMapping->Unmap();
sl@0
   697
		}
sl@0
   698
	}
sl@0
   699
sl@0
   700
sl@0
   701
void MM::MappingDestroy(DMemoryMapping*& aMapping)
sl@0
   702
	{
sl@0
   703
	DMemoryMapping* mapping = (DMemoryMapping*)__e32_atomic_swp_ord_ptr(&aMapping, 0);
sl@0
   704
	if (!mapping)
sl@0
   705
		return;
sl@0
   706
	TRACE(("MM::MappingDestroy(0x%08x)",mapping));
sl@0
   707
#ifdef BTRACE_FLEXIBLE_MEM_MODEL
sl@0
   708
	BTraceContext4(BTrace::EFlexibleMemModel,BTrace::EMemoryMappingDestroy,mapping);
sl@0
   709
#endif
sl@0
   710
	if(mapping->IsAttached())
sl@0
   711
		mapping->Unmap();
sl@0
   712
	mapping->Close();
sl@0
   713
	}
sl@0
   714
sl@0
   715
sl@0
   716
void MM::MappingDestroy(TLinAddr aAddr, TInt aOsAsid)
sl@0
   717
	{
sl@0
   718
	DMemoryMapping* mapping = AddressSpace[aOsAsid]->GetMapping(aAddr);
sl@0
   719
	MM::MappingDestroy(mapping);
sl@0
   720
	}
sl@0
   721
sl@0
   722
sl@0
   723
void MM::MappingAndMemoryDestroy(DMemoryMapping*& aMapping)
sl@0
   724
	{
sl@0
   725
	DMemoryMapping* mapping = (DMemoryMapping*)__e32_atomic_swp_ord_ptr(&aMapping, 0);
sl@0
   726
	TRACE(("MM::MappingAndMemoryDestroy(0x%08x)",mapping));
sl@0
   727
	if (!mapping)
sl@0
   728
		return;
sl@0
   729
	DMemoryObject* memory = mapping->Memory(true); // safe because we assume owner hasn't unmapped mapping
sl@0
   730
	MM::MappingDestroy(mapping);
sl@0
   731
	MM::MemoryDestroy(memory);
sl@0
   732
	}
sl@0
   733
sl@0
   734
sl@0
   735
void MM::MappingAndMemoryDestroy(TLinAddr aAddr, TInt aOsAsid)
sl@0
   736
	{
sl@0
   737
	DMemoryMapping* mapping = AddressSpace[aOsAsid]->GetMapping(aAddr);
sl@0
   738
	MM::MappingAndMemoryDestroy(mapping);
sl@0
   739
	}
sl@0
   740
sl@0
   741
sl@0
   742
TLinAddr MM::MappingBase(DMemoryMapping* aMapping)
sl@0
   743
	{
sl@0
   744
	TLinAddr base = aMapping->Base();
sl@0
   745
	TRACE2(("MM::MappingBase(0x%08x) returns 0x%08x",aMapping,base));
sl@0
   746
	return base;
sl@0
   747
	}
sl@0
   748
sl@0
   749
sl@0
   750
TInt MM::MappingOsAsid(DMemoryMapping* aMapping)
sl@0
   751
	{
sl@0
   752
	return aMapping->OsAsid();
sl@0
   753
	}
sl@0
   754
sl@0
   755
sl@0
   756
DMemoryObject* MM::MappingGetAndOpenMemory(DMemoryMapping* aMapping)
sl@0
   757
	{
sl@0
   758
	MmuLock::Lock();
sl@0
   759
	DMemoryObject* memory = aMapping->Memory();
sl@0
   760
	if (memory)
sl@0
   761
		memory->Open();
sl@0
   762
	MmuLock::Unlock();
sl@0
   763
	TRACE2(("MM::MappingGetAndOpenMemory(0x%08x) returns 0x%08x",aMapping,memory));
sl@0
   764
	return memory;
sl@0
   765
	}
sl@0
   766
sl@0
   767
sl@0
   768
void MM::MappingClose(DMemoryMapping* aMapping)
sl@0
   769
	{
sl@0
   770
	TRACE2(("MM::MappingClose(0x%08x)",aMapping));
sl@0
   771
	aMapping->Close();
sl@0
   772
	}
sl@0
   773
sl@0
   774
sl@0
   775
DMemoryMapping* MM::FindMappingInThread(DMemModelThread* aThread, TLinAddr aAddr, TUint aSize, 
sl@0
   776
										TUint& aOffsetInMapping, TUint& aInstanceCount)
sl@0
   777
	{
sl@0
   778
	if(aAddr>=KGlobalMemoryBase)
sl@0
   779
		{
sl@0
   780
		// Address in global region, so look it up in kernel's address space...
sl@0
   781
		return FindMappingInAddressSpace(KKernelOsAsid, aAddr, aSize, aOffsetInMapping, aInstanceCount);
sl@0
   782
		}
sl@0
   783
sl@0
   784
	// Address in thread's process address space so open a reference to its os asid
sl@0
   785
	// so that it remains valid for FindMappingInAddressSpace() call.
sl@0
   786
	DMemModelProcess* process = (DMemModelProcess*)aThread->iOwningProcess;
sl@0
   787
	TInt osAsid = process->TryOpenOsAsid();
sl@0
   788
	if (osAsid < 0)
sl@0
   789
		{// The process no longer owns an address space so can't have any mappings.
sl@0
   790
		return NULL;
sl@0
   791
		}
sl@0
   792
sl@0
   793
	DMemoryMapping* r = FindMappingInAddressSpace(osAsid, aAddr, aSize, aOffsetInMapping, aInstanceCount);
sl@0
   794
sl@0
   795
	process->CloseOsAsid();
sl@0
   796
	return r;
sl@0
   797
	}
sl@0
   798
sl@0
   799
sl@0
   800
DMemoryMapping* MM::FindMappingInAddressSpace(	TUint aOsAsid, TLinAddr aAddr, TUint aSize, 
sl@0
   801
												TUint& aOffsetInMapping, TUint& aInstanceCount)
sl@0
   802
	{
sl@0
   803
	return AddressSpace[aOsAsid]->FindMapping(aAddr, aSize, aOffsetInMapping, aInstanceCount);
sl@0
   804
	}
sl@0
   805
sl@0
   806
sl@0
   807
sl@0
   808
//
sl@0
   809
// Address space
sl@0
   810
//
sl@0
   811
sl@0
   812
TInt MM::AddressSpaceAlloc(TPhysAddr& aPageDirectory)
sl@0
   813
	{
sl@0
   814
	return DAddressSpace::New(aPageDirectory);
sl@0
   815
	}
sl@0
   816
sl@0
   817
sl@0
   818
void MM::AddressSpaceFree(TUint aOsAsid)
sl@0
   819
	{
sl@0
   820
	AddressSpace[aOsAsid]->Close();
sl@0
   821
	}
sl@0
   822
sl@0
   823
sl@0
   824
void MM::AsyncAddressSpaceFree(TUint aOsAsid)
sl@0
   825
	{
sl@0
   826
	AddressSpace[aOsAsid]->AsyncClose();
sl@0
   827
	}
sl@0
   828
sl@0
   829
sl@0
   830
TInt MM::VirtualAllocCommon(TLinAddr& aLinAddr, TUint aSize, TBool aDemandPaged)
sl@0
   831
	{
sl@0
   832
	TRACE(("MM::VirtualAllocCommon(?,0x%08x,%d)",aSize,aDemandPaged));
sl@0
   833
	TUint pdeType = aDemandPaged ? EVirtualSlabTypeDemandPaged : 0;
sl@0
   834
	TInt r = DAddressSpace::AllocateUserCommonVirtualMemory(aLinAddr, aSize, 0, aSize, pdeType);
sl@0
   835
	TRACE(("MM::VirtualAllocCommon returns %d region=0x%08x+0x%08x",r,aLinAddr,aSize));
sl@0
   836
	return r;
sl@0
   837
	}
sl@0
   838
sl@0
   839
sl@0
   840
void MM::VirtualFreeCommon(TLinAddr aLinAddr, TUint aSize)
sl@0
   841
	{
sl@0
   842
	TRACE(("MM::VirtualFreeCommon(0x%08x,0x%08x)",aLinAddr,aSize));
sl@0
   843
	DAddressSpace::FreeUserCommonVirtualMemory(aLinAddr, aSize);
sl@0
   844
	}
sl@0
   845
sl@0
   846
sl@0
   847
TInt MM::VirtualAlloc(TInt aOsAsid, TLinAddr& aLinAddr, TUint aSize, TBool aDemandPaged)
sl@0
   848
	{
sl@0
   849
	TRACE(("MM::VirtualAlloc(?,%d,0x%08x,%d)",aOsAsid,aSize,aDemandPaged));
sl@0
   850
	TUint pdeType = aDemandPaged ? EVirtualSlabTypeDemandPaged : 0;
sl@0
   851
	TInt r = AddressSpace[aOsAsid]->AllocateVirtualMemory(aLinAddr, aSize, 0, aSize, pdeType);
sl@0
   852
	TRACE(("MM::VirtualAlloc returns %d region=0x%08x+0x%08x",r,aLinAddr,aSize));
sl@0
   853
	return r;
sl@0
   854
	}
sl@0
   855
sl@0
   856
sl@0
   857
void MM::VirtualFree(TInt aOsAsid, TLinAddr aLinAddr, TUint aSize)
sl@0
   858
	{
sl@0
   859
	TRACE(("MM::VirtualFree(%d,0x%08x,0x%08x)",aOsAsid,aLinAddr,aSize));
sl@0
   860
	AddressSpace[aOsAsid]->FreeVirtualMemory(aLinAddr, aSize);
sl@0
   861
	}
sl@0
   862
sl@0
   863
sl@0
   864
sl@0
   865
//
sl@0
   866
// Init
sl@0
   867
//
sl@0
   868
sl@0
   869
void MM::Init1()
sl@0
   870
	{
sl@0
   871
	TheMmu.Init1();
sl@0
   872
	}
sl@0
   873
sl@0
   874
sl@0
   875
extern DMutexPool MemoryObjectMutexPool;
sl@0
   876
extern DMutexPool AddressSpaceMutexPool;
sl@0
   877
sl@0
   878
void MM::Init2()
sl@0
   879
	{
sl@0
   880
	TInt r;
sl@0
   881
sl@0
   882
	TheMmu.Init2();
sl@0
   883
sl@0
   884
	// create mutex pools before calling any functions which require them...
sl@0
   885
	_LIT(KAddressSpaceMutexName,"AddressSpaceMutex");
sl@0
   886
	r = AddressSpaceMutexPool.Create(4, &KAddressSpaceMutexName, KMutexOrdAddresSpace);
sl@0
   887
	__NK_ASSERT_ALWAYS(r==KErrNone);
sl@0
   888
	_LIT(KMemoryObjectMutexName,"MemoryObjectMutex");
sl@0
   889
	r = MemoryObjectMutexPool.Create(8, &KMemoryObjectMutexName, KMutexOrdMemoryObject);
sl@0
   890
	__NK_ASSERT_ALWAYS(r==KErrNone);
sl@0
   891
sl@0
   892
	// use the Ram Allocator mutex for low-level memory functions...
sl@0
   893
	DMutex* mmuAllocMutex = TheMmu.iRamAllocatorMutex;
sl@0
   894
sl@0
   895
	// memory cleanup needs initialising before any memory is freed...
sl@0
   896
	TMemoryCleanup::Init2();
sl@0
   897
sl@0
   898
	// initialise allocators used for MMU operations...
sl@0
   899
	RPageArray::Init2A();
sl@0
   900
	PageTables.Init2(mmuAllocMutex); // must come before any other code which allocates memory objects
sl@0
   901
	RPageArray::Init2B(mmuAllocMutex);
sl@0
   902
	PageTables.Init2B();
sl@0
   903
	PageDirectories.Init2();
sl@0
   904
sl@0
   905
	// initialise address spaces...
sl@0
   906
	DAddressSpace::Init2();
sl@0
   907
sl@0
   908
	// init pager...
sl@0
   909
	ThePager.Init2();
sl@0
   910
sl@0
   911
	TheMmu.Init2Final();
sl@0
   912
	}
sl@0
   913
sl@0
   914
 
sl@0
   915
/** HAL Function wrapper for the RAM allocator.
sl@0
   916
*/
sl@0
   917
TInt RamHalFunction(TAny*, TInt aFunction, TAny* a1, TAny* a2)
sl@0
   918
	{
sl@0
   919
	return TheMmu.RamHalFunction(aFunction, a1, a2);
sl@0
   920
	}
sl@0
   921
sl@0
   922
sl@0
   923
void MM::Init3()
sl@0
   924
	{
sl@0
   925
	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("MM::Init3"));
sl@0
   926
	ThePager.Init3();
sl@0
   927
sl@0
   928
	// Register a HAL Function for the Ram allocator.
sl@0
   929
	TInt r = Kern::AddHalEntry(EHalGroupRam, RamHalFunction, 0);
sl@0
   930
	__NK_ASSERT_ALWAYS(r==KErrNone);
sl@0
   931
sl@0
   932
	TheMmu.Init3();
sl@0
   933
	}
sl@0
   934
sl@0
   935
sl@0
   936
TInt MM::InitFixedKernelMemory(DMemoryObject*& aMemory,
sl@0
   937
							   TLinAddr aStart,
sl@0
   938
							   TLinAddr aEnd,
sl@0
   939
							   TUint aInitSize,
sl@0
   940
							   TMemoryObjectType aType,
sl@0
   941
							   TMemoryCreateFlags aMemoryCreateFlags,
sl@0
   942
							   TMemoryAttributes aMemoryAttributes,
sl@0
   943
							   TMappingCreateFlags aMappingCreateFlags
sl@0
   944
							   )
sl@0
   945
	{
sl@0
   946
	TUint maxSize = aEnd-aStart;
sl@0
   947
	TInt r = MM::MemoryNew(aMemory, aType, MM::BytesToPages(maxSize), aMemoryCreateFlags, aMemoryAttributes);
sl@0
   948
	if(r==KErrNone)
sl@0
   949
		{
sl@0
   950
		TBool allowGaps = aInitSize&1; // lower bit of size is set if region to be claimed contains gaps
sl@0
   951
		aInitSize &= ~1;
sl@0
   952
		r = MM::MemoryClaimInitialPages(aMemory,aStart,aInitSize,ESupervisorReadWrite,allowGaps);
sl@0
   953
		if(r==KErrNone)
sl@0
   954
			{
sl@0
   955
			DMemoryMapping* mapping;
sl@0
   956
			r = MM::MappingNew(mapping,aMemory,ESupervisorReadWrite,KKernelOsAsid,aMappingCreateFlags,aStart);
sl@0
   957
			// prevent any further mappings of this memory,
sl@0
   958
			// this is needed for realtime and OOM guarantees...
sl@0
   959
			aMemory->DenyMappings();
sl@0
   960
			}
sl@0
   961
		}
sl@0
   962
	// Note, no cleanup is done if an error occurs because this function is only
sl@0
   963
	// used at boot time and the system can't recover from an error
sl@0
   964
	return r;
sl@0
   965
	}
sl@0
   966
sl@0
   967
sl@0
   968
void MM::Panic(MM::TMemModelPanic aPanic)
sl@0
   969
	{
sl@0
   970
	Kern::Fault("MemModel", aPanic);
sl@0
   971
	}
sl@0
   972
sl@0
   973
sl@0
   974
//
sl@0
   975
//
sl@0
   976
//
sl@0
   977
sl@0
   978
TUint MM::BytesToPages(TUint aBytes)
sl@0
   979
	{
sl@0
   980
	if(aBytes&KPageMask)
sl@0
   981
		Panic(EBadBytesToPages);
sl@0
   982
	return aBytes>>KPageShift;
sl@0
   983
	}
sl@0
   984
sl@0
   985
sl@0
   986
TUint MM::RoundToPageSize(TUint aSize)
sl@0
   987
	{
sl@0
   988
	return (aSize+KPageMask)&~KPageMask;
sl@0
   989
	}
sl@0
   990
sl@0
   991
sl@0
   992
TUint MM::RoundToPageCount(TUint aSize)
sl@0
   993
	{
sl@0
   994
	return (aSize+KPageMask)>>KPageShift;
sl@0
   995
	}
sl@0
   996
sl@0
   997
sl@0
   998
TUint MM::RoundToPageShift(TUint aShift)
sl@0
   999
	{
sl@0
  1000
	return aShift>(TUint)KPageShift ? aShift-KPageShift : 0;
sl@0
  1001
	}
sl@0
  1002
sl@0
  1003
sl@0
  1004
//
sl@0
  1005
//
sl@0
  1006
//
sl@0
  1007
sl@0
  1008
void MM::ValidateLocalIpcAddress(TLinAddr aAddr, TUint aSize, TBool aWrite)
sl@0
  1009
	{
sl@0
  1010
	__NK_ASSERT_DEBUG(aSize);
sl@0
  1011
sl@0
  1012
	TLinAddr end = aAddr+aSize-1;
sl@0
  1013
	if(end<aAddr)
sl@0
  1014
		end = ~(TLinAddr)0; // clip to end of memory
sl@0
  1015
sl@0
  1016
	// if IPC region is in process local data area then it's OK...
sl@0
  1017
	if(end<KUserLocalDataEnd && aAddr>=KUserLocalDataBase)
sl@0
  1018
		return;
sl@0
  1019
sl@0
  1020
	// if region overlaps alias region...
sl@0
  1021
	if(end>=KIPCAlias && aAddr<KIPCAlias+KIPCAliasAreaSize)
sl@0
  1022
		{
sl@0
  1023
		// remove alias...
sl@0
  1024
		((DMemModelThread*)TheCurrentThread)->RemoveAlias();
sl@0
  1025
		// make sure start address is in alias region...
sl@0
  1026
		if(aAddr<KIPCAlias)
sl@0
  1027
			aAddr = KIPCAlias;
sl@0
  1028
		// then cause fault now...
sl@0
  1029
		MM::UserPermissionFault(aAddr,aWrite);
sl@0
  1030
		}
sl@0
  1031
sl@0
  1032
	if(end<(TLinAddr)KUserMemoryLimit)
sl@0
  1033
		return; // user memory is safe
sl@0
  1034
	
sl@0
  1035
	// Compare the current thread's process os asid to kernel asid, no need to 
sl@0
  1036
	// open a reference on the os asid as it is the current thread.
sl@0
  1037
	if(((DMemModelProcess*)TheCurrentThread->iOwningProcess)->OsAsid()==(TInt)KKernelOsAsid)
sl@0
  1038
		return; // kernel can access everything
sl@0
  1039
sl@0
  1040
	// make sure address is in supervisor only region...
sl@0
  1041
	if(aAddr<KUserMemoryLimit)
sl@0
  1042
		aAddr = KUserMemoryLimit;
sl@0
  1043
	// then cause fault now...
sl@0
  1044
	MM::UserPermissionFault(aAddr,aWrite);
sl@0
  1045
	}
sl@0
  1046
sl@0
  1047
sl@0
  1048
void MM::UserPermissionFault(TLinAddr aAddr, TBool aWrite)
sl@0
  1049
	{
sl@0
  1050
	// Access aAddr with user permissions to generate an exception...
sl@0
  1051
	if(aWrite)
sl@0
  1052
		UserWriteFault(aAddr);
sl@0
  1053
	else
sl@0
  1054
		UserReadFault(aAddr);
sl@0
  1055
	__NK_ASSERT_ALWAYS(0); // shouldn't get here
sl@0
  1056
	}
sl@0
  1057
sl@0
  1058
sl@0
  1059
#ifndef __SMP__
sl@0
  1060
void MM::IpcAliasPde(TPde*& aPdePtr, TUint aOsAsid)
sl@0
  1061
	{
sl@0
  1062
	aPdePtr = &Mmu::PageDirectory(aOsAsid)[KIPCAlias>>KChunkShift];
sl@0
  1063
	}
sl@0
  1064
#endif
sl@0
  1065
sl@0
  1066
sl@0
  1067
TMappingPermissions MM::MappingPermissions(TBool aUser, TBool aWrite, TBool aExecute)
sl@0
  1068
	{
sl@0
  1069
	TUint perm	= 0;
sl@0
  1070
	if(aUser)
sl@0
  1071
		perm |= EUser;
sl@0
  1072
	if(aWrite)
sl@0
  1073
		perm |= EReadWrite;
sl@0
  1074
	if(aExecute)
sl@0
  1075
		perm |= EExecute;
sl@0
  1076
	return (TMappingPermissions)perm;
sl@0
  1077
	}
sl@0
  1078
sl@0
  1079
sl@0
  1080
TInt MM::MappingPermissions(TMappingPermissions& aPermissions, TMappingAttributes2 aLegacyAttributes)
sl@0
  1081
	{
sl@0
  1082
	TUint attr2 = *(TUint32*)&aLegacyAttributes;
sl@0
  1083
sl@0
  1084
	TUint read = attr2&EMapAttrReadMask;
sl@0
  1085
	TUint write = (attr2&EMapAttrWriteMask)>>4;
sl@0
  1086
	TUint execute = (attr2&EMapAttrExecMask)>>8;
sl@0
  1087
sl@0
  1088
	read |= execute; 	// execute access requires read access
sl@0
  1089
sl@0
  1090
	if(write==0) 		// no write required
sl@0
  1091
		{
sl@0
  1092
		if((read&5)==0)
sl@0
  1093
			return KErrNotSupported; // neither supervisor nor user read specified
sl@0
  1094
		}
sl@0
  1095
	else if(write<4)	// supervisor write required
sl@0
  1096
		{
sl@0
  1097
		if(read>=4)
sl@0
  1098
			return KErrNotSupported; // user read requested (but no user write)
sl@0
  1099
		}
sl@0
  1100
sl@0
  1101
	read |= write;		// write access implies read access
sl@0
  1102
sl@0
  1103
	TUint user = read&4;
sl@0
  1104
	aPermissions = MappingPermissions(user,write,execute);
sl@0
  1105
sl@0
  1106
	return KErrNone;
sl@0
  1107
	}
sl@0
  1108
sl@0
  1109
sl@0
  1110
TInt MM::MemoryAttributes(TMemoryAttributes& aAttributes, TMappingAttributes2 aLegacyAttributes)
sl@0
  1111
	{
sl@0
  1112
	TUint attr = aLegacyAttributes.Type();
sl@0
  1113
	if (aLegacyAttributes.Shared())
sl@0
  1114
		attr |= EMemoryAttributeShareable;
sl@0
  1115
	if (aLegacyAttributes.Parity())
sl@0
  1116
		attr |= EMemoryAttributeUseECC;
sl@0
  1117
	aAttributes = Mmu::CanonicalMemoryAttributes((TMemoryAttributes)attr);
sl@0
  1118
	return KErrNone;
sl@0
  1119
	}
sl@0
  1120
sl@0
  1121
sl@0
  1122
TMappingAttributes2 MM::LegacyMappingAttributes(TMemoryAttributes aAttributes, TMappingPermissions aPermissions)
sl@0
  1123
	{
sl@0
  1124
	TUint attr = Mmu::CanonicalMemoryAttributes(aAttributes);
sl@0
  1125
	return TMappingAttributes2
sl@0
  1126
		(
sl@0
  1127
		(TMemoryType)(attr&EMemoryAttributeTypeMask),
sl@0
  1128
		aPermissions&EUser,
sl@0
  1129
		aPermissions&EReadWrite,
sl@0
  1130
		aPermissions&EExecute,
sl@0
  1131
		attr&EMemoryAttributeShareable,
sl@0
  1132
		attr&EMemoryAttributeUseECC
sl@0
  1133
		);
sl@0
  1134
	}
sl@0
  1135
sl@0
  1136