os/kernelhwsrv/kernel/eka/memmodel/epoc/flexible/mmu/mm.cpp
changeset 0 bde4ae8d615e
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/os/kernelhwsrv/kernel/eka/memmodel/epoc/flexible/mmu/mm.cpp	Fri Jun 15 03:10:57 2012 +0200
     1.3 @@ -0,0 +1,1136 @@
     1.4 +// Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
     1.5 +// All rights reserved.
     1.6 +// This component and the accompanying materials are made available
     1.7 +// under the terms of the License "Eclipse Public License v1.0"
     1.8 +// which accompanies this distribution, and is available
     1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
    1.10 +//
    1.11 +// Initial Contributors:
    1.12 +// Nokia Corporation - initial contribution.
    1.13 +//
    1.14 +// Contributors:
    1.15 +//
    1.16 +// Description:
    1.17 +//
    1.18 +
    1.19 +#include "memmodel.h"
    1.20 +#include "mm.h"
    1.21 +#include "mmu.h"
    1.22 +#include "mobject.h"
    1.23 +#include "mmapping.h"
    1.24 +#include "mmanager.h"
    1.25 +#include "mpdalloc.h"
    1.26 +#include "mptalloc.h"
    1.27 +#include "mpager.h"
    1.28 +#include "maddressspace.h"
    1.29 +
    1.30 +
    1.31 +
    1.32 +
    1.33 +//
    1.34 +// DMutexPool
    1.35 +//
    1.36 +
    1.37 +DMutexPool::~DMutexPool()
    1.38 +	{
    1.39 +	TUint i;
    1.40 +	for(i=0; i<iCount; ++i)
    1.41 +		{
    1.42 +		DMutex* mutex = iMembers[i].iMutex;
    1.43 +		if(mutex)
    1.44 +			mutex->Close(0);
    1.45 +		}
    1.46 +	Kern::Free(iMembers);
    1.47 +	}
    1.48 +
    1.49 +
    1.50 +TInt DMutexPool::Create(TUint aCount, const TDesC* aName, TUint aOrder)
    1.51 +	{
    1.52 +	if(aCount>EMaxPoolSize)
    1.53 +		return KErrTooBig;
    1.54 +
    1.55 +	iMembers = (SMember*)Kern::AllocZ(aCount*sizeof(SMember));
    1.56 +	if(!iMembers)
    1.57 +		return KErrNoMemory;
    1.58 +
    1.59 +	iCount = aCount;
    1.60 +
    1.61 +	TInt r = KErrNone;
    1.62 +	TUint i;
    1.63 +	for(i=0; i<aCount; ++i)
    1.64 +		{
    1.65 +		TKName name;
    1.66 +		if(aName)
    1.67 +			{
    1.68 +			name = *aName;
    1.69 +			name.AppendNum(i);
    1.70 +			}
    1.71 +		K::MutexCreate(iMembers[i].iMutex, name, NULL, EFalse, aOrder);
    1.72 +		if(r!=KErrNone)
    1.73 +			break;
    1.74 +		}
    1.75 +
    1.76 +	return r;
    1.77 +	}
    1.78 +
    1.79 +
    1.80 +/**
    1.81 +@class DMutexPool
    1.82 +@details
    1.83 +
    1.84 +The cookie used for dynamically assigned mutexes is broken into three bit fields:
    1.85 +- Bit 0, always set. (To distinguish the cookie from a proper DMutex*).
    1.86 +- Bits 1 through #KMutexPoolIndexBits, these contain the index of the assigned
    1.87 +  mutex within DMutexPool::iMembers.
    1.88 +- Bits (#KMutexPoolIndexBits+1) through 31, the count of the number of threads waiting
    1.89 +  for this particular mutex assignment. When this reaches zero, the mutex can
    1.90 +  be unassigned.
    1.91 +*/
    1.92 +
    1.93 +/**
    1.94 +Number of bits used to contain the index value of a dynamically assigned pool mutex.
    1.95 +*/
    1.96 +const TUint KMutexPoolIndexBits = 7;
    1.97 +
    1.98 +const TUint KMutexPoolIndexMask = ((1<<KMutexPoolIndexBits)-1)<<1;
    1.99 +const TUint KMutexPoolWaitCountIncrement = 1<<(KMutexPoolIndexBits+1);
   1.100 +
   1.101 +__ASSERT_COMPILE(DMutexPool::EMaxPoolSize<=TUint(KMutexPoolIndexMask/2+1)); // required for algorithm correctness
   1.102 +
   1.103 +__ASSERT_COMPILE(DMutexPool::EMaxPoolSize<=64); // required to avoid excessive system lock hold time
   1.104 +
   1.105 +
   1.106 +void DMutexPool::Wait(DMutex*& aMutexRef)
   1.107 +	{
   1.108 +	NKern::LockSystem();
   1.109 +
   1.110 +	TUintPtr poolMutex = (TUintPtr)aMutexRef;
   1.111 +	if(!poolMutex)
   1.112 +		{
   1.113 +		// try and find a free mutex, else use the next one...
   1.114 +		TUint next = iNext;
   1.115 +		do
   1.116 +			{
   1.117 +			if(iMembers[next].iUseCount==0)
   1.118 +				break;
   1.119 +			if(++next>=iCount)
   1.120 +				next = 0;
   1.121 +			}
   1.122 +		while(next!=iNext);
   1.123 +		// use found mutex...
   1.124 +		++iMembers[next].iUseCount;
   1.125 +		poolMutex = (next*2)+1; // mutex index*2 | 1
   1.126 +		// update next...
   1.127 +		if(++next>=iCount)
   1.128 +			next = 0;
   1.129 +		iNext = next;
   1.130 +		}
   1.131 +
   1.132 +	DMutex* mutex = (DMutex*)poolMutex;
   1.133 +	if(poolMutex&1)
   1.134 +		{
   1.135 +		// mutex is a pool mutex, get pointer, and update wait count...
   1.136 +		SMember* member = &iMembers[(poolMutex&KMutexPoolIndexMask)>>1];
   1.137 +		mutex = member->iMutex;
   1.138 +		poolMutex += KMutexPoolWaitCountIncrement;
   1.139 +		__NK_ASSERT_ALWAYS(poolMutex>=KMutexPoolWaitCountIncrement);
   1.140 +		aMutexRef = (DMutex*)poolMutex;
   1.141 +		}
   1.142 +
   1.143 +	mutex->Wait();
   1.144 +
   1.145 +	NKern::UnlockSystem();
   1.146 +	}
   1.147 +
   1.148 +
   1.149 +void DMutexPool::Signal(DMutex*& aMutexRef)
   1.150 +	{
   1.151 +	NKern::LockSystem();
   1.152 +
   1.153 +	TUintPtr poolMutex = (TUintPtr)aMutexRef;
   1.154 +	__NK_ASSERT_ALWAYS(poolMutex);
   1.155 +
   1.156 +	DMutex* mutex = (DMutex*)poolMutex;
   1.157 +
   1.158 +	if(poolMutex&1)
   1.159 +		{
   1.160 +		// mutex is a pool mutex, get pointer, and update wait count...
   1.161 +		SMember* member = &iMembers[(poolMutex&KMutexPoolIndexMask)>>1];
   1.162 +		mutex = member->iMutex;
   1.163 +		__NK_ASSERT_ALWAYS(poolMutex>=KMutexPoolWaitCountIncrement);
   1.164 +		poolMutex -= KMutexPoolWaitCountIncrement;
   1.165 +		if(poolMutex<KMutexPoolWaitCountIncrement)
   1.166 +			{
   1.167 +			--member->iUseCount;
   1.168 +			poolMutex = 0;
   1.169 +			}
   1.170 +		aMutexRef = (DMutex*)poolMutex;
   1.171 +		}
   1.172 +
   1.173 +	mutex->Signal();
   1.174 +	}
   1.175 +
   1.176 +
   1.177 +TBool DMutexPool::IsHeld(DMutex*& aMutexRef)
   1.178 +	{
   1.179 +	TBool held = false;
   1.180 +	NKern::LockSystem();
   1.181 +	TUintPtr poolMutex = (TUintPtr)aMutexRef;
   1.182 +	if(poolMutex)
   1.183 +		{
   1.184 +		DMutex* mutex = (DMutex*)poolMutex;
   1.185 +		if(poolMutex&1)
   1.186 +			{
   1.187 +			SMember* member = &iMembers[(poolMutex&KMutexPoolIndexMask)>>1];
   1.188 +			mutex = member->iMutex;
   1.189 +			}
   1.190 +		held = mutex->iCleanup.iThread==&Kern::CurrentThread();
   1.191 +		}
   1.192 +	NKern::UnlockSystem();
   1.193 +	return held;
   1.194 +	}
   1.195 +
   1.196 +
   1.197 +
   1.198 +//
   1.199 +// DReferenceCountedObject
   1.200 +//
   1.201 +
   1.202 +DReferenceCountedObject::~DReferenceCountedObject()
   1.203 +	{
   1.204 +	__NK_ASSERT_DEBUG(iReferenceCount==0);
   1.205 +	}
   1.206 +
   1.207 +
   1.208 +void DReferenceCountedObject::Open()
   1.209 +	{
   1.210 +	__ASSERT_CRITICAL
   1.211 +	TBool ok = __e32_atomic_tas_ord32(&iReferenceCount, 1, 1, 0);
   1.212 +	__NK_ASSERT_ALWAYS(ok);
   1.213 +	}
   1.214 +
   1.215 +
   1.216 +TBool DReferenceCountedObject::TryOpen()
   1.217 +	{
   1.218 +	__ASSERT_CRITICAL
   1.219 +	TBool ok = __e32_atomic_tas_ord32(&iReferenceCount, 1, 1, 0);
   1.220 +	return ok;
   1.221 +	}
   1.222 +
   1.223 +
   1.224 +TBool DReferenceCountedObject::CheckCloseIsSafe()
   1.225 +	{
   1.226 +	__ASSERT_CRITICAL
   1.227 +#ifdef _DEBUG
   1.228 +	NFastMutex* fm = NKern::HeldFastMutex();
   1.229 +	if(fm)
   1.230 +		{
   1.231 +		Kern::Printf("DReferenceCountedObject[0x%08x]::Close() fast mutex violation %M",this,fm);
   1.232 +		return false;
   1.233 +		}
   1.234 +	SDblQue& ml = TheCurrentThread->iMutexList;
   1.235 +	if(!ml.IsEmpty())
   1.236 +		{
   1.237 +		DMutex* m = _LOFF(ml.First(), DMutex, iOrderLink);
   1.238 +		if(m->iOrder<KMutexOrdKernelHeap)
   1.239 +			{
   1.240 +			Kern::Printf("DReferenceCountedObject[0x%08x]::Close() mutex order violation holding mutex %O",this,m);
   1.241 +			return false;
   1.242 +			}
   1.243 +		}
   1.244 +#endif
   1.245 +	return true;
   1.246 +	}
   1.247 +
   1.248 +
   1.249 +TBool DReferenceCountedObject::CheckAsyncCloseIsSafe()
   1.250 +	{
   1.251 +	__ASSERT_CRITICAL
   1.252 +#ifdef _DEBUG
   1.253 +	NFastMutex* fm = NKern::HeldFastMutex();
   1.254 +	if(fm)
   1.255 +		{
   1.256 +		Kern::Printf("DReferenceCountedObject[0x%08x]::AsyncClose() fast mutex violation %M",this,fm);
   1.257 +		return false;
   1.258 +		}
   1.259 +#endif
   1.260 +	return true;
   1.261 +	}
   1.262 +
   1.263 +
   1.264 +void DReferenceCountedObject::Close()
   1.265 +	{
   1.266 +	__ASSERT_CRITICAL
   1.267 +	__NK_ASSERT_DEBUG(CheckCloseIsSafe());
   1.268 +	__NK_ASSERT_DEBUG(iReferenceCount>0);
   1.269 +	if (__e32_atomic_tas_ord32(&iReferenceCount, 1, -1, 0) == 1)
   1.270 +		delete this;
   1.271 +	}
   1.272 +
   1.273 +
   1.274 +void DReferenceCountedObject::AsyncClose()
   1.275 +	{
   1.276 +	__ASSERT_CRITICAL
   1.277 +	__NK_ASSERT_DEBUG(CheckAsyncCloseIsSafe());
   1.278 +	__NK_ASSERT_DEBUG(iReferenceCount>0);
   1.279 +	if (__e32_atomic_tas_ord32(&iReferenceCount, 1, -1, 0) == 1)
   1.280 +		AsyncDelete();
   1.281 +	}
   1.282 +
   1.283 +
   1.284 +//
   1.285 +// Memory object functions
   1.286 +//
   1.287 +
   1.288 +TInt MM::MemoryNew(DMemoryObject*& aMemory, TMemoryObjectType aType, TUint aPageCount, TMemoryCreateFlags aCreateFlags, TMemoryAttributes aAttributes)
   1.289 +	{
   1.290 +	TRACE(("MM::MemoryNew(?,0x%08x,0x%08x,0x%08x,0x%08x)",aType,aPageCount,aCreateFlags,*(TUint32*)&aAttributes));
   1.291 +
   1.292 +	DMemoryManager* manager;
   1.293 +	if(aCreateFlags&EMemoryCreateCustomManager)
   1.294 +		manager = (DMemoryManager*)aType;
   1.295 +	else
   1.296 +		{
   1.297 +		switch(aType)
   1.298 +			{
   1.299 +		case EMemoryObjectUnpaged:
   1.300 +			manager = TheUnpagedMemoryManager;
   1.301 +			break;
   1.302 +		case EMemoryObjectMovable:
   1.303 +			manager = TheMovableMemoryManager;
   1.304 +			break;
   1.305 +		case EMemoryObjectPaged:
   1.306 +			manager = TheDataPagedMemoryManager;
   1.307 +			break;
   1.308 +		case EMemoryObjectDiscardable:
   1.309 +			manager = TheDiscardableMemoryManager;
   1.310 +			break;
   1.311 +		case EMemoryObjectHardware:
   1.312 +			manager = TheHardwareMemoryManager;
   1.313 +			break;
   1.314 +		default:
   1.315 +			manager = 0;
   1.316 +			__NK_ASSERT_DEBUG(0);
   1.317 +			break;
   1.318 +			}
   1.319 +		}
   1.320 +	TMemoryCreateFlags flags = (TMemoryCreateFlags)(aCreateFlags&~(EMemoryCreateDemandPaged));
   1.321 +	TInt r = manager->New(aMemory,aPageCount,aAttributes,flags);
   1.322 +	TRACE(("MM::MemoryNew returns %d, aMemory=0x%08x",r,aMemory));
   1.323 +#ifdef BTRACE_FLEXIBLE_MEM_MODEL
   1.324 +	if (r == KErrNone)
   1.325 +		aMemory->BTraceCreate();
   1.326 +#endif
   1.327 +	return r;
   1.328 +	}
   1.329 +
   1.330 +
   1.331 +TInt MM::MemoryClaimInitialPages(DMemoryObject* aMemory, TLinAddr aBase, TUint aSize, TMappingPermissions aPermissions, TBool aAllowGaps, TBool aAllowNonRamPages)
   1.332 +	{
   1.333 +	TRACE(("MM::MemoryClaimInitialPages(0x%08x,0x%08x,0x%08x,0x%08x,%d,%d)",aMemory,aBase,aPermissions,aSize,aAllowGaps!=0,aAllowNonRamPages!=0));
   1.334 +	TInt r = aMemory->ClaimInitialPages(aBase,aSize,aPermissions,aAllowGaps,aAllowNonRamPages);
   1.335 +	TRACE(("MM::MemoryClaimInitialPages returns %d",r));
   1.336 +	__NK_ASSERT_DEBUG(r==KErrNone);
   1.337 +	return r;
   1.338 +	}
   1.339 +
   1.340 +
   1.341 +void MM::MemorySetLock(DMemoryObject* aMemory, DMutex* aLock)
   1.342 +	{
   1.343 +	aMemory->SetLock(aLock);
   1.344 +	}
   1.345 +
   1.346 +
   1.347 +void MM::MemoryLock(DMemoryObject* aMemory)
   1.348 +	{
   1.349 +	MemoryObjectLock::Lock(aMemory);
   1.350 +	}
   1.351 +
   1.352 +
   1.353 +void MM::MemoryUnlock(DMemoryObject* aMemory)
   1.354 +	{
   1.355 +	MemoryObjectLock::Unlock(aMemory);
   1.356 +	}
   1.357 +
   1.358 +
   1.359 +void MM::MemoryDestroy(DMemoryObject*& aMemory)
   1.360 +	{
   1.361 +	DMemoryObject* memory = (DMemoryObject*)__e32_atomic_swp_ord_ptr(&aMemory, 0);
   1.362 +	if (!memory)
   1.363 +		return;
   1.364 +	TRACE(("MM::MemoryDestroy(0x%08x)",memory));
   1.365 +#ifdef BTRACE_FLEXIBLE_MEM_MODEL
   1.366 +	BTraceContext4(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectDestroy,memory);
   1.367 +#endif
   1.368 +	memory->iManager->Destruct(memory);
   1.369 +	}
   1.370 +
   1.371 +
   1.372 +TInt MM::MemoryAlloc(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
   1.373 +	{
   1.374 +	TRACE(("MM::MemoryAlloc(0x%08x,0x%08x,0x%08x)",aMemory,aIndex,aCount));
   1.375 +	MemoryObjectLock::Lock(aMemory);
   1.376 +	TInt r;
   1.377 +	if(!aMemory->CheckRegion(aIndex,aCount))
   1.378 +		r = KErrArgument;
   1.379 +	else
   1.380 +		r = aMemory->iManager->Alloc(aMemory,aIndex,aCount);
   1.381 +	MemoryObjectLock::Unlock(aMemory);
   1.382 +	TRACE(("MM::MemoryAlloc returns %d",r));
   1.383 +	return r;
   1.384 +	}
   1.385 +
   1.386 +
   1.387 +TInt MM::MemoryAllocContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TUint aAlign, TPhysAddr& aPhysAddr)
   1.388 +	{
   1.389 +	TRACE(("MM::MemoryAllocContiguous(0x%08x,0x%08x,0x%08x,%d,?)",aMemory,aIndex,aCount,aAlign));
   1.390 +	MemoryObjectLock::Lock(aMemory);
   1.391 +	TInt r;
   1.392 +	if(!aMemory->CheckRegion(aIndex,aCount))
   1.393 +		r = KErrArgument;
   1.394 +	else
   1.395 +		r = aMemory->iManager->AllocContiguous(aMemory,aIndex,aCount,MM::RoundToPageShift(aAlign),aPhysAddr);
   1.396 +	MemoryObjectLock::Unlock(aMemory);
   1.397 +	TRACE(("MM::MemoryAlloc returns %d (aPhysAddr=0x%08x)",r,aPhysAddr));
   1.398 +	return r;
   1.399 +	}
   1.400 +
   1.401 +
   1.402 +void MM::MemoryFree(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
   1.403 +	{
   1.404 +	TRACE(("MM::MemoryFree(0x%08x,0x%08x,0x%08x)",aMemory,aIndex,aCount));
   1.405 +	MemoryObjectLock::Lock(aMemory);
   1.406 +	aMemory->ClipRegion(aIndex,aCount);
   1.407 +	aMemory->iManager->Free(aMemory,aIndex,aCount);
   1.408 +	MemoryObjectLock::Unlock(aMemory);
   1.409 +	}
   1.410 +
   1.411 +
   1.412 +TInt MM::MemoryAddPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages)
   1.413 +	{
   1.414 +	TRACE(("MM::MemoryAddPages(0x%08x,0x%08x,0x%08x,?)",aMemory,aIndex,aCount));
   1.415 +	MemoryObjectLock::Lock(aMemory);
   1.416 +	TInt r;
   1.417 +	if(!aMemory->CheckRegion(aIndex,aCount))
   1.418 +		r = KErrArgument;
   1.419 +	else
   1.420 +		r = aMemory->iManager->AddPages(aMemory,aIndex,aCount,aPages);
   1.421 +	MemoryObjectLock::Unlock(aMemory);
   1.422 +	TRACE(("MM::MemoryAddPages returns %d",r));
   1.423 +	return r;
   1.424 +	}
   1.425 +
   1.426 +
   1.427 +TInt MM::MemoryAddContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr aPhysAddr)
   1.428 +	{
   1.429 +	TRACE(("MM::MemoryAddContiguous(0x%08x,0x%08x,0x%08x,0x%08x)",aMemory,aIndex,aCount,aPhysAddr));
   1.430 +	MemoryObjectLock::Lock(aMemory);
   1.431 +	TInt r;
   1.432 +	if(!aMemory->CheckRegion(aIndex,aCount))
   1.433 +		r = KErrArgument;
   1.434 +	else
   1.435 +		r = aMemory->iManager->AddContiguous(aMemory,aIndex,aCount,aPhysAddr);
   1.436 +	MemoryObjectLock::Unlock(aMemory);
   1.437 +	TRACE(("MM::MemoryAddContiguous returns %d",r));
   1.438 +	return r;
   1.439 +	}
   1.440 +
   1.441 +
   1.442 +TUint MM::MemoryRemovePages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages)
   1.443 +	{
   1.444 +	TRACE(("MM::MemoryRemovePages(0x%08x,0x%08x,0x%08x)",aMemory,aIndex,aCount));
   1.445 +	MemoryObjectLock::Lock(aMemory);
   1.446 +	aMemory->ClipRegion(aIndex,aCount);
   1.447 +	TInt r = aMemory->iManager->RemovePages(aMemory,aIndex,aCount,aPages);
   1.448 +	if(r<0)
   1.449 +		r = 0;
   1.450 +	MemoryObjectLock::Unlock(aMemory);
   1.451 +	TRACE(("MM::MemoryRemovePages returns %d",r));
   1.452 +	return r;
   1.453 +	}
   1.454 +
   1.455 +
   1.456 +TInt MM::MemoryAllowDiscard(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
   1.457 +	{
   1.458 +	TRACE(("MM::MemoryAllowDiscard(0x%08x,0x%08x,0x%08x)",aMemory,aIndex,aCount));
   1.459 +	MemoryObjectLock::Lock(aMemory);
   1.460 +	TInt r;
   1.461 +	if(!aMemory->CheckRegion(aIndex,aCount))
   1.462 +		r = KErrArgument;
   1.463 +	else
   1.464 +		r = aMemory->iManager->AllowDiscard(aMemory,aIndex,aCount);
   1.465 +	MemoryObjectLock::Unlock(aMemory);
   1.466 +	TRACE(("MM::MemoryAllowDiscard returns %d",r));
   1.467 +	return r;
   1.468 +	}
   1.469 +
   1.470 +
   1.471 +TInt MM::MemoryDisallowDiscard(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
   1.472 +	{
   1.473 +	TRACE(("MM::MemoryDisallowDiscard(0x%08x,0x%08x,0x%08x)",aMemory,aIndex,aCount));
   1.474 +	MemoryObjectLock::Lock(aMemory);
   1.475 +	TInt r;
   1.476 +	if(!aMemory->CheckRegion(aIndex,aCount))
   1.477 +		r = KErrArgument;
   1.478 +	else
   1.479 +		r = aMemory->iManager->DisallowDiscard(aMemory,aIndex,aCount);
   1.480 +	MemoryObjectLock::Unlock(aMemory);
   1.481 +	TRACE(("MM::MemoryDisallowDiscard returns %d",r));
   1.482 +	return r;
   1.483 +	}
   1.484 +
   1.485 +
   1.486 +TInt MM::MemoryPhysAddr(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr& aPhysicalAddress, TPhysAddr* aPhysicalPageList)
   1.487 +	{
   1.488 +	TRACE(("MM::MemoryPhysAddr(0x%08x,0x%08x,0x%08x,?,?)",aMemory,aIndex,aCount));
   1.489 +	TInt r = aMemory->PhysAddr(aIndex,aCount,aPhysicalAddress,aPhysicalPageList);
   1.490 +	TRACE(("MM::MemoryPhysAddr returns %d aPhysicalAddress=0x%08x",r,aPhysicalAddress));
   1.491 +	return r;
   1.492 +	}
   1.493 +
   1.494 +
   1.495 +void MM::MemoryBTracePrime(DMemoryObject* aMemory)
   1.496 +	{
   1.497 +	aMemory->BTraceCreate();
   1.498 +	aMemory->iMappings.Lock();
   1.499 +	TMappingListIter iter;
   1.500 +	DMemoryMapping* mapping = (DMemoryMapping*)iter.Start(aMemory->iMappings);
   1.501 +	while(mapping)
   1.502 +		{
   1.503 +		aMemory->iMappings.Unlock();	
   1.504 +		mapping->BTraceCreate();
   1.505 +		aMemory->iMappings.Lock();
   1.506 +		mapping = (DMemoryMapping*)iter.Next();
   1.507 +		}
   1.508 +	iter.Finish();
   1.509 +	aMemory->iMappings.Unlock();	
   1.510 +	}
   1.511 +
   1.512 +
   1.513 +void MM::MemoryClose(DMemoryObject* aMemory)
   1.514 +	{
   1.515 +	aMemory->Close();
   1.516 +	}
   1.517 +
   1.518 +
   1.519 +TBool MM::MemoryIsNotMapped(DMemoryObject* aMemory)
   1.520 +	{
   1.521 +	TBool r = aMemory->iMappings.IsEmpty();
   1.522 +	TRACE2(("MM::MemoryIsNotMapped(0x%08x) returns %d",aMemory,r));
   1.523 +	return r;
   1.524 +	}
   1.525 +
   1.526 +//
   1.527 +// Physical pinning
   1.528 +//
   1.529 +
   1.530 +TInt MM::PinPhysicalMemory(DMemoryObject* aMemory, DPhysicalPinMapping* aPinObject, TUint aIndex, TUint aCount, TBool aReadOnly, TPhysAddr& aAddress, TPhysAddr* aPages, TUint32& aMapAttr, TUint& aColour)
   1.531 +	{
   1.532 +
   1.533 +	if (!aMemory->CheckRegion(aIndex,aCount))
   1.534 +	    return KErrArgument;
   1.535 +
   1.536 +	TMappingPermissions permissions = aReadOnly ? ESupervisorReadOnly : ESupervisorReadWrite;
   1.537 +	TInt r = aPinObject->Pin(aMemory, aIndex, aCount, permissions);
   1.538 +	if (r == KErrNone)
   1.539 +		{
   1.540 +		r = aPinObject->PhysAddr(aIndex, aCount, aAddress, aPages);
   1.541 +		if (r>=KErrNone)
   1.542 +			{
   1.543 +			r = KErrNone; //Do not report discontigious memory in return value.
   1.544 +			const TMappingAttributes2& mapAttr2 =
   1.545 +				MM::LegacyMappingAttributes(aMemory->Attributes(), permissions);
   1.546 +			*(TMappingAttributes2*)&aMapAttr = mapAttr2;
   1.547 +			}
   1.548 +		else
   1.549 +			{
   1.550 +			aPinObject->Unpin();
   1.551 +			}
   1.552 +		}
   1.553 +
   1.554 +	aColour = 0;
   1.555 +	return r;
   1.556 +	}
   1.557 +
   1.558 +
   1.559 +TInt MM::MemoryWipe(DMemoryObject* aMemory)
   1.560 +	{
   1.561 +	__NK_ASSERT_ALWAYS(aMemory->iMappings.IsEmpty()); // can't be mapped otherwise confidentiality can't be guaranteed
   1.562 +	TRACE2(("MM::MemoryWipe(0x%08x)",aMemory));
   1.563 +	MemoryObjectLock::Lock(aMemory);
   1.564 +	TInt r = aMemory->iManager->Wipe(aMemory);
   1.565 +	MemoryObjectLock::Unlock(aMemory);
   1.566 +	return r;
   1.567 +	}
   1.568 +
   1.569 +
   1.570 +TInt MM::MemorySetReadOnly(DMemoryObject* aMemory)
   1.571 +	{
   1.572 +	TRACE2(("MM::MemorySetReadOnly(0x%08x)",aMemory));
   1.573 +	MemoryObjectLock::Lock(aMemory);
   1.574 +	TInt r = aMemory->SetReadOnly();
   1.575 +	MemoryObjectLock::Unlock(aMemory);
   1.576 +	return r;
   1.577 +	}
   1.578 +
   1.579 +//
   1.580 +// Mapping functions
   1.581 +//
   1.582 +
   1.583 +TInt MM::MappingNew(DMemoryMapping*& aMapping, DMemoryObject* aMemory, TMappingPermissions aPermissions, TInt aOsAsid, TMappingCreateFlags aFlags, TLinAddr aAddr, TUint aIndex, TUint aCount)
   1.584 +	{
   1.585 +	TRACE(("MM::MappingNew(?,0x%08x,0x%08x,%d,0x%08x,0x%08x,0x%08x,0x%08x)",aMemory, aPermissions, aOsAsid, aFlags, aAddr, aIndex, aCount));
   1.586 +
   1.587 +	/**
   1.588 +	@todo Make mappings created with this function fail (panic?) if the are reused to map
   1.589 +	another object.
   1.590 +	*/
   1.591 +	if(aCount==~0u)
   1.592 +		aCount = aMemory->iSizeInPages-aIndex;
   1.593 +
   1.594 +	// if memory object reserves all resources, make mappings also do so...
   1.595 +	if(aMemory->iFlags&DMemoryObject::EReserveResources)
   1.596 +		FlagSet(aFlags,EMappingCreateReserveAllResources);
   1.597 +
   1.598 +	// check if mapping is for global user data...
   1.599 +	if(aOsAsid==(TInt)KKernelOsAsid && aPermissions&EUser)
   1.600 +		FlagSet(aFlags,EMappingCreateUserGlobalVirtual);
   1.601 +	else
   1.602 +		FlagClear(aFlags,EMappingCreateUserGlobalVirtual);
   1.603 +
   1.604 +	// set paged attribute for mapping...
   1.605 +	if(aMemory->IsDemandPaged())
   1.606 +		FlagSet(aFlags,EMappingCreateDemandPaged);
   1.607 +	else
   1.608 +		FlagClear(aFlags,EMappingCreateDemandPaged);
   1.609 +
   1.610 +	DMemoryMapping* mapping = 0;
   1.611 +	TInt r = KErrNone;
   1.612 +	if(!aMemory->CheckRegion(aIndex,aCount))
   1.613 +		r = KErrArgument;
   1.614 +	else
   1.615 +		{
   1.616 +		mapping = aMemory->CreateMapping(aIndex, aCount);
   1.617 +		if(!mapping)
   1.618 +			r = KErrNoMemory;
   1.619 +		}
   1.620 +
   1.621 +	if(!mapping)
   1.622 +		{
   1.623 +		// free any virtual address the mapping should have adopted...
   1.624 +		if(aFlags&EMappingCreateAdoptVirtual)
   1.625 +			MM::VirtualFree(aOsAsid, aAddr, aCount<<KPageShift);
   1.626 +		}
   1.627 +	else
   1.628 +		{
   1.629 +		r = mapping->Construct(aMemory->Attributes(), aFlags, aOsAsid, aAddr, aCount<<KPageShift, aIndex<<KPageShift);
   1.630 +		if(r==KErrNone)
   1.631 +			r = mapping->Map(aMemory, aIndex, aCount, aPermissions);
   1.632 +		if(r!=KErrNone)
   1.633 +			{
   1.634 +			mapping->Close();
   1.635 +			mapping = 0;
   1.636 +			}
   1.637 +		}
   1.638 +
   1.639 +	aMapping = mapping;
   1.640 +	TRACE(("MM::MappingNew returns %d (aMapping=0x%0x)",r,aMapping));
   1.641 +#ifdef BTRACE_FLEXIBLE_MEM_MODEL
   1.642 +	if (r == KErrNone)
   1.643 +		aMapping->BTraceCreate();
   1.644 +#endif
   1.645 +	return r;
   1.646 +	}
   1.647 +
   1.648 +
   1.649 +TInt MM::MappingNew(DMemoryMapping*& aMapping, TUint aCount, TInt aOsAsid, TMappingCreateFlags aFlags, TLinAddr aAddr, TLinAddr aColourOffset)
   1.650 +	{
   1.651 +	TRACE2(("MM::MappingNew(?,0x%08x,%d,0x%08x,0x%08x,0x%08x)",aCount, aOsAsid, aFlags, aAddr, aColourOffset));
   1.652 +
   1.653 +	FlagClear(aFlags,EMappingCreateDemandPaged); // mapping can't use demand paged page tables
   1.654 +
   1.655 +	TInt r = KErrNone;
   1.656 +	DMemoryMapping* mapping = new DFineMapping();
   1.657 +	if(!mapping)
   1.658 +		r = KErrNoMemory;
   1.659 +
   1.660 +	if(!mapping)
   1.661 +		{
   1.662 +		// free any virtual address the mapping should have adopted...
   1.663 +		if(aFlags&EMappingCreateAdoptVirtual)
   1.664 +			MM::VirtualFree(aOsAsid, aAddr, aCount<<KPageShift);
   1.665 +		}
   1.666 +	else
   1.667 +		{
   1.668 +		r = mapping->Construct(EMemoryAttributeStandard, aFlags, aOsAsid, aAddr, aCount<<KPageShift, aColourOffset);
   1.669 +		if(r!=KErrNone)
   1.670 +			{
   1.671 +			mapping->Close();
   1.672 +			mapping = 0;
   1.673 +			}
   1.674 +		}
   1.675 +
   1.676 +	aMapping = mapping;
   1.677 +	TRACE2(("MM::MappingNew returns %d (aMapping=0x%0x)",r,aMapping));
   1.678 +
   1.679 +	return r;
   1.680 +	}
   1.681 +
   1.682 +
   1.683 +TInt MM::MappingMap(DMemoryMapping* aMapping, TMappingPermissions aPermissions, DMemoryObject* aMemory, TUint aIndex, TUint aCount)
   1.684 +	{
   1.685 +	TRACE2(("MM::MappingMap(0x%08x,0x%08x,0x%08x,0x%x,0x%x)",aMapping,aPermissions,aMemory,aIndex,aCount));
   1.686 +	if(aCount==~0u)
   1.687 +		aCount = aMemory->iSizeInPages-aIndex;
   1.688 +	TInt r = aMapping->Map(aMemory, aIndex, aCount, aPermissions);
   1.689 +	TRACE2(("MM::MappingMap returns %d",r));
   1.690 +	return r;
   1.691 +	}
   1.692 +
   1.693 +
   1.694 +void MM::MappingUnmap(DMemoryMapping* aMapping)
   1.695 +	{
   1.696 +	if(aMapping->IsAttached())
   1.697 +		{
   1.698 +		TRACE2(("MM::MappingUnmap(0x%08x)",aMapping));
   1.699 +		aMapping->Unmap();
   1.700 +		}
   1.701 +	}
   1.702 +
   1.703 +
   1.704 +void MM::MappingDestroy(DMemoryMapping*& aMapping)
   1.705 +	{
   1.706 +	DMemoryMapping* mapping = (DMemoryMapping*)__e32_atomic_swp_ord_ptr(&aMapping, 0);
   1.707 +	if (!mapping)
   1.708 +		return;
   1.709 +	TRACE(("MM::MappingDestroy(0x%08x)",mapping));
   1.710 +#ifdef BTRACE_FLEXIBLE_MEM_MODEL
   1.711 +	BTraceContext4(BTrace::EFlexibleMemModel,BTrace::EMemoryMappingDestroy,mapping);
   1.712 +#endif
   1.713 +	if(mapping->IsAttached())
   1.714 +		mapping->Unmap();
   1.715 +	mapping->Close();
   1.716 +	}
   1.717 +
   1.718 +
   1.719 +void MM::MappingDestroy(TLinAddr aAddr, TInt aOsAsid)
   1.720 +	{
   1.721 +	DMemoryMapping* mapping = AddressSpace[aOsAsid]->GetMapping(aAddr);
   1.722 +	MM::MappingDestroy(mapping);
   1.723 +	}
   1.724 +
   1.725 +
   1.726 +void MM::MappingAndMemoryDestroy(DMemoryMapping*& aMapping)
   1.727 +	{
   1.728 +	DMemoryMapping* mapping = (DMemoryMapping*)__e32_atomic_swp_ord_ptr(&aMapping, 0);
   1.729 +	TRACE(("MM::MappingAndMemoryDestroy(0x%08x)",mapping));
   1.730 +	if (!mapping)
   1.731 +		return;
   1.732 +	DMemoryObject* memory = mapping->Memory(true); // safe because we assume owner hasn't unmapped mapping
   1.733 +	MM::MappingDestroy(mapping);
   1.734 +	MM::MemoryDestroy(memory);
   1.735 +	}
   1.736 +
   1.737 +
   1.738 +void MM::MappingAndMemoryDestroy(TLinAddr aAddr, TInt aOsAsid)
   1.739 +	{
   1.740 +	DMemoryMapping* mapping = AddressSpace[aOsAsid]->GetMapping(aAddr);
   1.741 +	MM::MappingAndMemoryDestroy(mapping);
   1.742 +	}
   1.743 +
   1.744 +
   1.745 +TLinAddr MM::MappingBase(DMemoryMapping* aMapping)
   1.746 +	{
   1.747 +	TLinAddr base = aMapping->Base();
   1.748 +	TRACE2(("MM::MappingBase(0x%08x) returns 0x%08x",aMapping,base));
   1.749 +	return base;
   1.750 +	}
   1.751 +
   1.752 +
   1.753 +TInt MM::MappingOsAsid(DMemoryMapping* aMapping)
   1.754 +	{
   1.755 +	return aMapping->OsAsid();
   1.756 +	}
   1.757 +
   1.758 +
   1.759 +DMemoryObject* MM::MappingGetAndOpenMemory(DMemoryMapping* aMapping)
   1.760 +	{
   1.761 +	MmuLock::Lock();
   1.762 +	DMemoryObject* memory = aMapping->Memory();
   1.763 +	if (memory)
   1.764 +		memory->Open();
   1.765 +	MmuLock::Unlock();
   1.766 +	TRACE2(("MM::MappingGetAndOpenMemory(0x%08x) returns 0x%08x",aMapping,memory));
   1.767 +	return memory;
   1.768 +	}
   1.769 +
   1.770 +
   1.771 +void MM::MappingClose(DMemoryMapping* aMapping)
   1.772 +	{
   1.773 +	TRACE2(("MM::MappingClose(0x%08x)",aMapping));
   1.774 +	aMapping->Close();
   1.775 +	}
   1.776 +
   1.777 +
   1.778 +DMemoryMapping* MM::FindMappingInThread(DMemModelThread* aThread, TLinAddr aAddr, TUint aSize, 
   1.779 +										TUint& aOffsetInMapping, TUint& aInstanceCount)
   1.780 +	{
   1.781 +	if(aAddr>=KGlobalMemoryBase)
   1.782 +		{
   1.783 +		// Address in global region, so look it up in kernel's address space...
   1.784 +		return FindMappingInAddressSpace(KKernelOsAsid, aAddr, aSize, aOffsetInMapping, aInstanceCount);
   1.785 +		}
   1.786 +
   1.787 +	// Address in thread's process address space so open a reference to its os asid
   1.788 +	// so that it remains valid for FindMappingInAddressSpace() call.
   1.789 +	DMemModelProcess* process = (DMemModelProcess*)aThread->iOwningProcess;
   1.790 +	TInt osAsid = process->TryOpenOsAsid();
   1.791 +	if (osAsid < 0)
   1.792 +		{// The process no longer owns an address space so can't have any mappings.
   1.793 +		return NULL;
   1.794 +		}
   1.795 +
   1.796 +	DMemoryMapping* r = FindMappingInAddressSpace(osAsid, aAddr, aSize, aOffsetInMapping, aInstanceCount);
   1.797 +
   1.798 +	process->CloseOsAsid();
   1.799 +	return r;
   1.800 +	}
   1.801 +
   1.802 +
   1.803 +DMemoryMapping* MM::FindMappingInAddressSpace(	TUint aOsAsid, TLinAddr aAddr, TUint aSize, 
   1.804 +												TUint& aOffsetInMapping, TUint& aInstanceCount)
   1.805 +	{
   1.806 +	return AddressSpace[aOsAsid]->FindMapping(aAddr, aSize, aOffsetInMapping, aInstanceCount);
   1.807 +	}
   1.808 +
   1.809 +
   1.810 +
   1.811 +//
   1.812 +// Address space
   1.813 +//
   1.814 +
   1.815 +TInt MM::AddressSpaceAlloc(TPhysAddr& aPageDirectory)
   1.816 +	{
   1.817 +	return DAddressSpace::New(aPageDirectory);
   1.818 +	}
   1.819 +
   1.820 +
   1.821 +void MM::AddressSpaceFree(TUint aOsAsid)
   1.822 +	{
   1.823 +	AddressSpace[aOsAsid]->Close();
   1.824 +	}
   1.825 +
   1.826 +
   1.827 +void MM::AsyncAddressSpaceFree(TUint aOsAsid)
   1.828 +	{
   1.829 +	AddressSpace[aOsAsid]->AsyncClose();
   1.830 +	}
   1.831 +
   1.832 +
   1.833 +TInt MM::VirtualAllocCommon(TLinAddr& aLinAddr, TUint aSize, TBool aDemandPaged)
   1.834 +	{
   1.835 +	TRACE(("MM::VirtualAllocCommon(?,0x%08x,%d)",aSize,aDemandPaged));
   1.836 +	TUint pdeType = aDemandPaged ? EVirtualSlabTypeDemandPaged : 0;
   1.837 +	TInt r = DAddressSpace::AllocateUserCommonVirtualMemory(aLinAddr, aSize, 0, aSize, pdeType);
   1.838 +	TRACE(("MM::VirtualAllocCommon returns %d region=0x%08x+0x%08x",r,aLinAddr,aSize));
   1.839 +	return r;
   1.840 +	}
   1.841 +
   1.842 +
   1.843 +void MM::VirtualFreeCommon(TLinAddr aLinAddr, TUint aSize)
   1.844 +	{
   1.845 +	TRACE(("MM::VirtualFreeCommon(0x%08x,0x%08x)",aLinAddr,aSize));
   1.846 +	DAddressSpace::FreeUserCommonVirtualMemory(aLinAddr, aSize);
   1.847 +	}
   1.848 +
   1.849 +
   1.850 +TInt MM::VirtualAlloc(TInt aOsAsid, TLinAddr& aLinAddr, TUint aSize, TBool aDemandPaged)
   1.851 +	{
   1.852 +	TRACE(("MM::VirtualAlloc(?,%d,0x%08x,%d)",aOsAsid,aSize,aDemandPaged));
   1.853 +	TUint pdeType = aDemandPaged ? EVirtualSlabTypeDemandPaged : 0;
   1.854 +	TInt r = AddressSpace[aOsAsid]->AllocateVirtualMemory(aLinAddr, aSize, 0, aSize, pdeType);
   1.855 +	TRACE(("MM::VirtualAlloc returns %d region=0x%08x+0x%08x",r,aLinAddr,aSize));
   1.856 +	return r;
   1.857 +	}
   1.858 +
   1.859 +
   1.860 +void MM::VirtualFree(TInt aOsAsid, TLinAddr aLinAddr, TUint aSize)
   1.861 +	{
   1.862 +	TRACE(("MM::VirtualFree(%d,0x%08x,0x%08x)",aOsAsid,aLinAddr,aSize));
   1.863 +	AddressSpace[aOsAsid]->FreeVirtualMemory(aLinAddr, aSize);
   1.864 +	}
   1.865 +
   1.866 +
   1.867 +
   1.868 +//
   1.869 +// Init
   1.870 +//
   1.871 +
   1.872 +void MM::Init1()
   1.873 +	{
   1.874 +	TheMmu.Init1();
   1.875 +	}
   1.876 +
   1.877 +
   1.878 +extern DMutexPool MemoryObjectMutexPool;
   1.879 +extern DMutexPool AddressSpaceMutexPool;
   1.880 +
   1.881 +void MM::Init2()
   1.882 +	{
   1.883 +	TInt r;
   1.884 +
   1.885 +	TheMmu.Init2();
   1.886 +
   1.887 +	// create mutex pools before calling any functions which require them...
   1.888 +	_LIT(KAddressSpaceMutexName,"AddressSpaceMutex");
   1.889 +	r = AddressSpaceMutexPool.Create(4, &KAddressSpaceMutexName, KMutexOrdAddresSpace);
   1.890 +	__NK_ASSERT_ALWAYS(r==KErrNone);
   1.891 +	_LIT(KMemoryObjectMutexName,"MemoryObjectMutex");
   1.892 +	r = MemoryObjectMutexPool.Create(8, &KMemoryObjectMutexName, KMutexOrdMemoryObject);
   1.893 +	__NK_ASSERT_ALWAYS(r==KErrNone);
   1.894 +
   1.895 +	// use the Ram Allocator mutex for low-level memory functions...
   1.896 +	DMutex* mmuAllocMutex = TheMmu.iRamAllocatorMutex;
   1.897 +
   1.898 +	// memory cleanup needs initialising before any memory is freed...
   1.899 +	TMemoryCleanup::Init2();
   1.900 +
   1.901 +	// initialise allocators used for MMU operations...
   1.902 +	RPageArray::Init2A();
   1.903 +	PageTables.Init2(mmuAllocMutex); // must come before any other code which allocates memory objects
   1.904 +	RPageArray::Init2B(mmuAllocMutex);
   1.905 +	PageTables.Init2B();
   1.906 +	PageDirectories.Init2();
   1.907 +
   1.908 +	// initialise address spaces...
   1.909 +	DAddressSpace::Init2();
   1.910 +
   1.911 +	// init pager...
   1.912 +	ThePager.Init2();
   1.913 +
   1.914 +	TheMmu.Init2Final();
   1.915 +	}
   1.916 +
   1.917 + 
   1.918 +/** HAL Function wrapper for the RAM allocator.
   1.919 +*/
   1.920 +TInt RamHalFunction(TAny*, TInt aFunction, TAny* a1, TAny* a2)
   1.921 +	{
   1.922 +	return TheMmu.RamHalFunction(aFunction, a1, a2);
   1.923 +	}
   1.924 +
   1.925 +
   1.926 +void MM::Init3()
   1.927 +	{
   1.928 +	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("MM::Init3"));
   1.929 +	ThePager.Init3();
   1.930 +
   1.931 +	// Register a HAL Function for the Ram allocator.
   1.932 +	TInt r = Kern::AddHalEntry(EHalGroupRam, RamHalFunction, 0);
   1.933 +	__NK_ASSERT_ALWAYS(r==KErrNone);
   1.934 +
   1.935 +	TheMmu.Init3();
   1.936 +	}
   1.937 +
   1.938 +
   1.939 +TInt MM::InitFixedKernelMemory(DMemoryObject*& aMemory,
   1.940 +							   TLinAddr aStart,
   1.941 +							   TLinAddr aEnd,
   1.942 +							   TUint aInitSize,
   1.943 +							   TMemoryObjectType aType,
   1.944 +							   TMemoryCreateFlags aMemoryCreateFlags,
   1.945 +							   TMemoryAttributes aMemoryAttributes,
   1.946 +							   TMappingCreateFlags aMappingCreateFlags
   1.947 +							   )
   1.948 +	{
   1.949 +	TUint maxSize = aEnd-aStart;
   1.950 +	TInt r = MM::MemoryNew(aMemory, aType, MM::BytesToPages(maxSize), aMemoryCreateFlags, aMemoryAttributes);
   1.951 +	if(r==KErrNone)
   1.952 +		{
   1.953 +		TBool allowGaps = aInitSize&1; // lower bit of size is set if region to be claimed contains gaps
   1.954 +		aInitSize &= ~1;
   1.955 +		r = MM::MemoryClaimInitialPages(aMemory,aStart,aInitSize,ESupervisorReadWrite,allowGaps);
   1.956 +		if(r==KErrNone)
   1.957 +			{
   1.958 +			DMemoryMapping* mapping;
   1.959 +			r = MM::MappingNew(mapping,aMemory,ESupervisorReadWrite,KKernelOsAsid,aMappingCreateFlags,aStart);
   1.960 +			// prevent any further mappings of this memory,
   1.961 +			// this is needed for realtime and OOM guarantees...
   1.962 +			aMemory->DenyMappings();
   1.963 +			}
   1.964 +		}
   1.965 +	// Note, no cleanup is done if an error occurs because this function is only
   1.966 +	// used at boot time and the system can't recover from an error
   1.967 +	return r;
   1.968 +	}
   1.969 +
   1.970 +
   1.971 +void MM::Panic(MM::TMemModelPanic aPanic)
   1.972 +	{
   1.973 +	Kern::Fault("MemModel", aPanic);
   1.974 +	}
   1.975 +
   1.976 +
   1.977 +//
   1.978 +//
   1.979 +//
   1.980 +
   1.981 +TUint MM::BytesToPages(TUint aBytes)
   1.982 +	{
   1.983 +	if(aBytes&KPageMask)
   1.984 +		Panic(EBadBytesToPages);
   1.985 +	return aBytes>>KPageShift;
   1.986 +	}
   1.987 +
   1.988 +
   1.989 +TUint MM::RoundToPageSize(TUint aSize)
   1.990 +	{
   1.991 +	return (aSize+KPageMask)&~KPageMask;
   1.992 +	}
   1.993 +
   1.994 +
   1.995 +TUint MM::RoundToPageCount(TUint aSize)
   1.996 +	{
   1.997 +	return (aSize+KPageMask)>>KPageShift;
   1.998 +	}
   1.999 +
  1.1000 +
  1.1001 +TUint MM::RoundToPageShift(TUint aShift)
  1.1002 +	{
  1.1003 +	return aShift>(TUint)KPageShift ? aShift-KPageShift : 0;
  1.1004 +	}
  1.1005 +
  1.1006 +
  1.1007 +//
  1.1008 +//
  1.1009 +//
  1.1010 +
  1.1011 +void MM::ValidateLocalIpcAddress(TLinAddr aAddr, TUint aSize, TBool aWrite)
  1.1012 +	{
  1.1013 +	__NK_ASSERT_DEBUG(aSize);
  1.1014 +
  1.1015 +	TLinAddr end = aAddr+aSize-1;
  1.1016 +	if(end<aAddr)
  1.1017 +		end = ~(TLinAddr)0; // clip to end of memory
  1.1018 +
  1.1019 +	// if IPC region is in process local data area then it's OK...
  1.1020 +	if(end<KUserLocalDataEnd && aAddr>=KUserLocalDataBase)
  1.1021 +		return;
  1.1022 +
  1.1023 +	// if region overlaps alias region...
  1.1024 +	if(end>=KIPCAlias && aAddr<KIPCAlias+KIPCAliasAreaSize)
  1.1025 +		{
  1.1026 +		// remove alias...
  1.1027 +		((DMemModelThread*)TheCurrentThread)->RemoveAlias();
  1.1028 +		// make sure start address is in alias region...
  1.1029 +		if(aAddr<KIPCAlias)
  1.1030 +			aAddr = KIPCAlias;
  1.1031 +		// then cause fault now...
  1.1032 +		MM::UserPermissionFault(aAddr,aWrite);
  1.1033 +		}
  1.1034 +
  1.1035 +	if(end<(TLinAddr)KUserMemoryLimit)
  1.1036 +		return; // user memory is safe
  1.1037 +	
  1.1038 +	// Compare the current thread's process os asid to kernel asid, no need to 
  1.1039 +	// open a reference on the os asid as it is the current thread.
  1.1040 +	if(((DMemModelProcess*)TheCurrentThread->iOwningProcess)->OsAsid()==(TInt)KKernelOsAsid)
  1.1041 +		return; // kernel can access everything
  1.1042 +
  1.1043 +	// make sure address is in supervisor only region...
  1.1044 +	if(aAddr<KUserMemoryLimit)
  1.1045 +		aAddr = KUserMemoryLimit;
  1.1046 +	// then cause fault now...
  1.1047 +	MM::UserPermissionFault(aAddr,aWrite);
  1.1048 +	}
  1.1049 +
  1.1050 +
  1.1051 +void MM::UserPermissionFault(TLinAddr aAddr, TBool aWrite)
  1.1052 +	{
  1.1053 +	// Access aAddr with user permissions to generate an exception...
  1.1054 +	if(aWrite)
  1.1055 +		UserWriteFault(aAddr);
  1.1056 +	else
  1.1057 +		UserReadFault(aAddr);
  1.1058 +	__NK_ASSERT_ALWAYS(0); // shouldn't get here
  1.1059 +	}
  1.1060 +
  1.1061 +
  1.1062 +#ifndef __SMP__
  1.1063 +void MM::IpcAliasPde(TPde*& aPdePtr, TUint aOsAsid)
  1.1064 +	{
  1.1065 +	aPdePtr = &Mmu::PageDirectory(aOsAsid)[KIPCAlias>>KChunkShift];
  1.1066 +	}
  1.1067 +#endif
  1.1068 +
  1.1069 +
  1.1070 +TMappingPermissions MM::MappingPermissions(TBool aUser, TBool aWrite, TBool aExecute)
  1.1071 +	{
  1.1072 +	TUint perm	= 0;
  1.1073 +	if(aUser)
  1.1074 +		perm |= EUser;
  1.1075 +	if(aWrite)
  1.1076 +		perm |= EReadWrite;
  1.1077 +	if(aExecute)
  1.1078 +		perm |= EExecute;
  1.1079 +	return (TMappingPermissions)perm;
  1.1080 +	}
  1.1081 +
  1.1082 +
  1.1083 +TInt MM::MappingPermissions(TMappingPermissions& aPermissions, TMappingAttributes2 aLegacyAttributes)
  1.1084 +	{
  1.1085 +	TUint attr2 = *(TUint32*)&aLegacyAttributes;
  1.1086 +
  1.1087 +	TUint read = attr2&EMapAttrReadMask;
  1.1088 +	TUint write = (attr2&EMapAttrWriteMask)>>4;
  1.1089 +	TUint execute = (attr2&EMapAttrExecMask)>>8;
  1.1090 +
  1.1091 +	read |= execute; 	// execute access requires read access
  1.1092 +
  1.1093 +	if(write==0) 		// no write required
  1.1094 +		{
  1.1095 +		if((read&5)==0)
  1.1096 +			return KErrNotSupported; // neither supervisor nor user read specified
  1.1097 +		}
  1.1098 +	else if(write<4)	// supervisor write required
  1.1099 +		{
  1.1100 +		if(read>=4)
  1.1101 +			return KErrNotSupported; // user read requested (but no user write)
  1.1102 +		}
  1.1103 +
  1.1104 +	read |= write;		// write access implies read access
  1.1105 +
  1.1106 +	TUint user = read&4;
  1.1107 +	aPermissions = MappingPermissions(user,write,execute);
  1.1108 +
  1.1109 +	return KErrNone;
  1.1110 +	}
  1.1111 +
  1.1112 +
  1.1113 +TInt MM::MemoryAttributes(TMemoryAttributes& aAttributes, TMappingAttributes2 aLegacyAttributes)
  1.1114 +	{
  1.1115 +	TUint attr = aLegacyAttributes.Type();
  1.1116 +	if (aLegacyAttributes.Shared())
  1.1117 +		attr |= EMemoryAttributeShareable;
  1.1118 +	if (aLegacyAttributes.Parity())
  1.1119 +		attr |= EMemoryAttributeUseECC;
  1.1120 +	aAttributes = Mmu::CanonicalMemoryAttributes((TMemoryAttributes)attr);
  1.1121 +	return KErrNone;
  1.1122 +	}
  1.1123 +
  1.1124 +
  1.1125 +TMappingAttributes2 MM::LegacyMappingAttributes(TMemoryAttributes aAttributes, TMappingPermissions aPermissions)
  1.1126 +	{
  1.1127 +	TUint attr = Mmu::CanonicalMemoryAttributes(aAttributes);
  1.1128 +	return TMappingAttributes2
  1.1129 +		(
  1.1130 +		(TMemoryType)(attr&EMemoryAttributeTypeMask),
  1.1131 +		aPermissions&EUser,
  1.1132 +		aPermissions&EReadWrite,
  1.1133 +		aPermissions&EExecute,
  1.1134 +		attr&EMemoryAttributeShareable,
  1.1135 +		attr&EMemoryAttributeUseECC
  1.1136 +		);
  1.1137 +	}
  1.1138 +
  1.1139 +