os/kernelhwsrv/kernel/eka/memmodel/epoc/flexible/mshbuf.cpp
changeset 0 bde4ae8d615e
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/os/kernelhwsrv/kernel/eka/memmodel/epoc/flexible/mshbuf.cpp	Fri Jun 15 03:10:57 2012 +0200
     1.3 @@ -0,0 +1,1908 @@
     1.4 +// Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies).
     1.5 +// All rights reserved.
     1.6 +// This component and the accompanying materials are made available
     1.7 +// under the terms of the License "Eclipse Public License v1.0"
     1.8 +// which accompanies this distribution, and is available
     1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
    1.10 +//
    1.11 +// Initial Contributors:
    1.12 +// Nokia Corporation - initial contribution.
    1.13 +//
    1.14 +// Contributors:
    1.15 +//
    1.16 +// Description:
    1.17 +// e32/memmodel/epoc/flexible/mshbuf.cpp
    1.18 +// Shareable Data Buffers
    1.19 +
    1.20 +#include <memmodel.h>
    1.21 +#include "mmu/mm.h"
    1.22 +#include "mmboot.h"
    1.23 +#include <kernel/smap.h>
    1.24 +
    1.25 +_LIT(KLitDMemModelAlignedShPool,"DMMAlignedShPool");	// Must be no more than 16 characters!
    1.26 +
    1.27 +struct TWait
    1.28 +	{
    1.29 +	void Link(TWait*& aList)
    1.30 +		{
    1.31 +		iSem.SetOwner(NULL);
    1.32 +		iNext = aList;
    1.33 +		aList = this;
    1.34 +		};
    1.35 +	void Wait()
    1.36 +		{
    1.37 +		NKern::FSWait(&iSem);
    1.38 +		}
    1.39 +	NFastSemaphore iSem;
    1.40 +	TWait* iNext;
    1.41 +
    1.42 +	static void SignalAll(TWait* aList)
    1.43 +		{
    1.44 +		while (aList)
    1.45 +			{
    1.46 +			TWait* next = aList->iNext;
    1.47 +			NKern::FSSignal(&aList->iSem);
    1.48 +			aList = next;
    1.49 +			}
    1.50 +		}
    1.51 +	};
    1.52 +
    1.53 +
    1.54 +class DShBufMapping : public DBase
    1.55 +	{
    1.56 +public:
    1.57 +	SDblQueLink iObjLink;
    1.58 +	DMemoryMapping* iMapping;
    1.59 +	TInt iOsAsid;
    1.60 +	TWait* iTransitions; // Mapping and Unmapping operations
    1.61 +	TBool iTransitioning;
    1.62 +	};
    1.63 +
    1.64 +
    1.65 +DMemModelShPool::DMemModelShPool() : DShPool()
    1.66 +	{
    1.67 +	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelShPool::DMemModelShPool"));
    1.68 +	}
    1.69 +
    1.70 +DMemModelShPool::~DMemModelShPool()
    1.71 +	{
    1.72 +	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelShPool::~DMemModelShPool"));
    1.73 +	}
    1.74 +
    1.75 +void DMemModelShPool::DestroyClientResources(DProcess* aProcess)
    1.76 +	{
    1.77 +	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelShPool::DestroyClientResources"));
    1.78 +
    1.79 +	TInt r = DestroyAllMappingsAndReservedHandles(aProcess);
    1.80 +	__NK_ASSERT_DEBUG((r == KErrNone) || (r == KErrDied));
    1.81 +	(void)r;		// Silence warnings
    1.82 +	}
    1.83 +
    1.84 +DMemModelAlignedShBuf::DMemModelAlignedShBuf(DShPool* aPool) : DShBuf(aPool)
    1.85 +	{
    1.86 +	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShBuf::DMemModelAlignedShBuf()"));
    1.87 +	}
    1.88 +
    1.89 +TInt DMemModelAlignedShBuf::Construct()
    1.90 +	{
    1.91 +	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShBuf::Construct()"));
    1.92 +
    1.93 +	TInt r = KErrNone;
    1.94 +
    1.95 +	r = DShBuf::Construct();
    1.96 +
    1.97 +	if (r == KErrNone)
    1.98 +		r = Create();
    1.99 +
   1.100 +	return r;
   1.101 +	}
   1.102 +
   1.103 +TInt DMemModelAlignedShBuf::Close(TAny* aPtr)
   1.104 +	{
   1.105 +	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShBuf::Close(0x%08x)", aPtr));
   1.106 +
   1.107 +	if (aPtr)
   1.108 +		{
   1.109 +		DProcess* pP = reinterpret_cast<DProcess*>(aPtr);
   1.110 +		UnMap(pP);
   1.111 +		iPool->CloseClient(pP);
   1.112 +		}
   1.113 +
   1.114 +	return DShBuf::Close(aPtr);
   1.115 +	}
   1.116 +
   1.117 +TInt DMemModelAlignedShBuf::AddToProcess(DProcess* aProcess, TUint aAttr)
   1.118 +	{
   1.119 +	__KTRACE_OPT(KMMU,Kern::Printf("Adding DMemModelShBuf %O to process %O",this,aProcess));
   1.120 +	TInt r;
   1.121 +	TLinAddr base;
   1.122 +	TUint flags;
   1.123 +
   1.124 +	r = iPool->OpenClient(aProcess, flags);
   1.125 +
   1.126 +	if (r == KErrNone)
   1.127 +		{
   1.128 +		if ((flags & EShPoolAutoMapBuf) && ((aAttr & EShPoolNoMapBuf) == 0))
   1.129 +			{
   1.130 +			// note we use the client's pool flags and not the buffer attributes
   1.131 +			r = Map(flags, aProcess, base);
   1.132 +
   1.133 +			if (aProcess == K::TheKernelProcess)
   1.134 +				iRelAddress = static_cast<TLinAddr>(base);
   1.135 +			}
   1.136 +		}
   1.137 +
   1.138 +	return r;
   1.139 +	}
   1.140 +
   1.141 +TInt DMemModelAlignedShBuf::Create()
   1.142 +	{
   1.143 +	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShBuf::Create()"));
   1.144 +	TInt r = KErrNone;
   1.145 +
   1.146 +	// calculate memory type...
   1.147 +	TMemoryObjectType memoryType =  EMemoryObjectUnpaged;
   1.148 +
   1.149 +	TMemoryAttributes attr = EMemoryAttributeStandard;
   1.150 +
   1.151 +	// calculate memory flags...
   1.152 +	TMemoryCreateFlags flags = static_cast<TMemoryCreateFlags>((EMemoryCreateDefault|EMemoryCreateUseCustomWipeByte|(0xAA<<EMemoryCreateWipeByteShift)));
   1.153 +
   1.154 +	// note that any guard pages will be included in iBufGap, however the amount of memory committed
   1.155 +	// will be iBufSize rounded up to a page
   1.156 +	r = MM::MemoryNew(iMemoryObject, memoryType, MM::RoundToPageCount(iPool->iBufGap), flags, attr);
   1.157 +
   1.158 +	if(r!=KErrNone)
   1.159 +		return r;
   1.160 +
   1.161 +	if (iPool->iPoolFlags & EShPoolContiguous)
   1.162 +		{
   1.163 +		TPhysAddr paddr;
   1.164 +		r = MM::MemoryAllocContiguous(iMemoryObject, 0, MM::RoundToPageCount(iPool->iBufSize), 0, paddr);
   1.165 +		}
   1.166 +	else
   1.167 +		{
   1.168 +		r = MM::MemoryAlloc(iMemoryObject, 0, MM::RoundToPageCount(iPool->iBufSize));
   1.169 +		}
   1.170 +
   1.171 +	return r;
   1.172 +	}
   1.173 +
   1.174 +DMemModelAlignedShBuf::~DMemModelAlignedShBuf()
   1.175 +	{
   1.176 +	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShBuf::~DMemModelAlignedShBuf()"));
   1.177 +
   1.178 +	__NK_ASSERT_DEBUG(iMappings.IsEmpty());
   1.179 +
   1.180 +	MM::MemoryDestroy(iMemoryObject);
   1.181 +	}
   1.182 +
   1.183 +TInt DMemModelAlignedShBuf::Map(TUint aMapAttr, DProcess* aProcess, TLinAddr& aBase)
   1.184 +	{
   1.185 +	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShBuf::Map()"));
   1.186 +	TInt r = KErrNone;
   1.187 +
   1.188 +	DShBufMapping* m = NULL;
   1.189 +	DMemoryMapping* mapping = NULL;
   1.190 +	DMemModelProcess* pP = reinterpret_cast<DMemModelProcess*>(aProcess);
   1.191 +
   1.192 +	TBool write = (TBool)EFalse;
   1.193 +
   1.194 +	// User = ETrue, ReadOnlyWrite = ETrue, Execute = EFalse
   1.195 +	if (aMapAttr & EShPoolWriteable)
   1.196 +		write = (TBool)ETrue;
   1.197 +
   1.198 +	TMappingPermissions perm = MM::MappingPermissions(pP!=K::TheKernelProcess, write, (TBool)EFalse);
   1.199 +	TWait wait;
   1.200 +
   1.201 +	for(;;)
   1.202 +		{
   1.203 +		iPool->LockPool();
   1.204 +		r = FindMapping(m, pP);
   1.205 +
   1.206 +		if (r != KErrNone)
   1.207 +			break;
   1.208 +		
   1.209 +		if (m->iTransitioning)
   1.210 +			{
   1.211 +			wait.Link(m->iTransitions);
   1.212 +			iPool->UnlockPool();
   1.213 +			wait.Wait();
   1.214 +			}
   1.215 +		else
   1.216 +			{
   1.217 +			iPool->UnlockPool();
   1.218 +			return KErrAlreadyExists;
   1.219 +			}
   1.220 +		}
   1.221 +
   1.222 +	DMemModelAlignedShPoolClient* client = reinterpret_cast<DMemModelAlignedShPoolClient*>(iPool->iClientMap->Find(reinterpret_cast<TUint>(aProcess)));
   1.223 +
   1.224 +	__NK_ASSERT_DEBUG(client);
   1.225 +
   1.226 +	DMemModelAlignedShPool* pool = reinterpret_cast<DMemModelAlignedShPool*>(iPool);
   1.227 +
   1.228 +	__NK_ASSERT_DEBUG(m == NULL);
   1.229 +	r = pool->GetFreeMapping(m, client);
   1.230 +
   1.231 +	if (r == KErrNone)
   1.232 +		{
   1.233 +		iMappings.AddHead(&m->iObjLink);
   1.234 +		m->iTransitioning = ETrue;
   1.235 +
   1.236 +		mapping = m->iMapping;
   1.237 +		iPool->UnlockPool(); // have to release fast lock for MappingMap
   1.238 +
   1.239 +		r = MM::MappingMap(mapping, perm, iMemoryObject, 0, MM::RoundToPageCount(pool->iBufSize));
   1.240 +
   1.241 +		iPool->LockPool();
   1.242 +
   1.243 +		TWait* list = m->iTransitions;
   1.244 +		m->iTransitions = NULL;
   1.245 +
   1.246 +		if (r != KErrNone)
   1.247 +		    pool->ReleaseMapping(m, client);
   1.248 +		else
   1.249 +		    aBase = MM::MappingBase(mapping);
   1.250 +
   1.251 +		m->iTransitioning = EFalse;
   1.252 +		iPool->UnlockPool();
   1.253 +
   1.254 +		TWait::SignalAll(list);
   1.255 +		}
   1.256 +	else
   1.257 +		iPool->UnlockPool();
   1.258 +
   1.259 +	return r;
   1.260 +	}
   1.261 +
   1.262 +TInt DMemModelAlignedShBuf::FindMapping(DShBufMapping*& aMapping, DMemModelProcess* aProcess)
   1.263 +	{
   1.264 +	// Must be in critical section so we don't leak os asid references.
   1.265 +	__ASSERT_CRITICAL;
   1.266 +	__NK_ASSERT_DEBUG(iPool->iLock.HeldByCurrentThread());
   1.267 +
   1.268 +	TInt r = KErrNotFound;
   1.269 +	aMapping = NULL;
   1.270 +
   1.271 +	// Open a reference on aProcess's os asid so that it can't be freed and 
   1.272 +	// reused while searching.
   1.273 +	TInt osAsid = aProcess->TryOpenOsAsid();
   1.274 +	if (osAsid < 0)
   1.275 +		{// aProcess has died and freed its os asid.
   1.276 +		return KErrDied;
   1.277 +		}
   1.278 +
   1.279 +	SDblQueLink* pLink = iMappings.First();
   1.280 +	SDblQueLink* end = reinterpret_cast<SDblQueLink*>(&iMappings);
   1.281 +	DShBufMapping* m = NULL;
   1.282 +
   1.283 +	while (pLink != end)
   1.284 +		{
   1.285 +		m = _LOFF(pLink, DShBufMapping, iObjLink);
   1.286 +
   1.287 +		if (m->iOsAsid == osAsid)
   1.288 +			{
   1.289 +			aMapping = m;
   1.290 +			r = KErrNone;
   1.291 +			break;
   1.292 +			}
   1.293 +		pLink = pLink->iNext;
   1.294 +		}
   1.295 +
   1.296 +	// Close the reference on the os asid as if we have a mapping then its lifetime will 
   1.297 +	// determine whether the process still owns an os asid.
   1.298 +	aProcess->CloseOsAsid();	
   1.299 +	return r;
   1.300 +	}
   1.301 +
   1.302 +TInt DMemModelAlignedShBuf::UnMap(DProcess* aProcess)
   1.303 +	{
   1.304 +	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShBuf::UnMap()"));
   1.305 +
   1.306 +	TInt r = KErrNone;
   1.307 +
   1.308 +	DMemModelProcess* pP = reinterpret_cast<DMemModelProcess*>(aProcess);
   1.309 +
   1.310 +	DShBufMapping* m = NULL;
   1.311 +	TWait wait;
   1.312 +
   1.313 +	for(;;)
   1.314 +		{
   1.315 +		iPool->LockPool();
   1.316 +		r = FindMapping(m, pP);
   1.317 +
   1.318 +		if (r != KErrNone)
   1.319 +			{
   1.320 +			iPool->UnlockPool();
   1.321 +			return KErrNotFound;
   1.322 +			}
   1.323 +
   1.324 +		if (m->iTransitioning)
   1.325 +			{
   1.326 +			wait.Link(m->iTransitions);
   1.327 +			iPool->UnlockPool();
   1.328 +			wait.Wait();
   1.329 +			}
   1.330 +		else
   1.331 +			{
   1.332 +			break;
   1.333 +			}
   1.334 +		}
   1.335 +
   1.336 +	m->iTransitioning = ETrue;
   1.337 +	iPool->UnlockPool();
   1.338 +
   1.339 +	MM::MappingUnmap(m->iMapping);
   1.340 +
   1.341 +	iPool->LockPool();
   1.342 +	DMemModelAlignedShPoolClient* client = reinterpret_cast<DMemModelAlignedShPoolClient*>(iPool->iClientMap->Find(reinterpret_cast<TUint>(aProcess)));
   1.343 +
   1.344 +	__NK_ASSERT_DEBUG(client);
   1.345 +
   1.346 +	TWait* list = m->iTransitions;
   1.347 +	m->iTransitions = NULL;
   1.348 +	m->iObjLink.Deque();
   1.349 +	m->iTransitioning = EFalse;
   1.350 +
   1.351 +	DMemModelAlignedShPool* pool = reinterpret_cast<DMemModelAlignedShPool*>(iPool);
   1.352 +	pool->ReleaseMapping(m, client);
   1.353 +
   1.354 +	if (aProcess == K::TheKernelProcess)
   1.355 +	    iRelAddress = NULL;
   1.356 +
   1.357 +	iPool->UnlockPool();
   1.358 +
   1.359 +	wait.SignalAll(list);
   1.360 +	return KErrNone;
   1.361 +	}
   1.362 +
   1.363 +TUint8* DMemModelAlignedShBuf::Base(DProcess* aProcess)
   1.364 +	{
   1.365 +	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShBuf::Base()"));
   1.366 +	DMemModelProcess* pP = reinterpret_cast<DMemModelProcess*>(aProcess);
   1.367 +
   1.368 +	DShBufMapping* mapping = NULL;
   1.369 +	iPool->LockPool();
   1.370 +	TInt r = FindMapping(mapping, pP);
   1.371 +	TUint8* base = NULL;
   1.372 +
   1.373 +	if (r == KErrNone)
   1.374 +		base = reinterpret_cast<TUint8*>(MM::MappingBase(mapping->iMapping));
   1.375 +	iPool->UnlockPool();
   1.376 +
   1.377 +	return base;
   1.378 +	}
   1.379 +
   1.380 +TUint8* DMemModelAlignedShBuf::Base()
   1.381 +	{
   1.382 +	return reinterpret_cast<TUint8*>(iRelAddress);
   1.383 +	}
   1.384 +
   1.385 +TInt DMemModelAlignedShBuf::Pin(TPhysicalPinObject* aPinObject, TBool aReadOnly, TPhysAddr& aAddress, TPhysAddr* aPages, TUint32& aMapAttr, TUint& aColour)
   1.386 +	{
   1.387 +	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"DMemModelAlignedShBuf::Pin");
   1.388 +
   1.389 +	TInt r = MM::PinPhysicalMemory(iMemoryObject, (DPhysicalPinMapping*)aPinObject, 0,
   1.390 +								   MM::RoundToPageCount(Size()),
   1.391 +								   aReadOnly, aAddress, aPages, aMapAttr, aColour);
   1.392 +
   1.393 +	return r;
   1.394 +	}
   1.395 +
   1.396 +TInt DMemModelAlignedShPool::GetFreeMapping(DShBufMapping*& aMapping, DMemModelAlignedShPoolClient* aClient)
   1.397 +	{
   1.398 +	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::GetFreeMapping()"));
   1.399 +	__NK_ASSERT_DEBUG(iLock.HeldByCurrentThread());
   1.400 +
   1.401 +	TInt r = KErrNotFound;
   1.402 +	aMapping = NULL;
   1.403 +
   1.404 +	if (aClient)
   1.405 +		{
   1.406 +		if (!aClient->iMappingFreeList.IsEmpty())
   1.407 +			{
   1.408 +			aMapping = _LOFF(aClient->iMappingFreeList.GetFirst(), DShBufMapping, iObjLink);
   1.409 +			r = KErrNone;
   1.410 +			}
   1.411 +		else
   1.412 +			{
   1.413 +			r = KErrNoMemory;
   1.414 +			}
   1.415 +		}
   1.416 +
   1.417 +	__KTRACE_OPT(KMMU2, Kern::Printf("DMemModelAlignedShPool::GetFreeMapping(0x%08x, 0x%08x) returns %d", aMapping, aClient, r));
   1.418 +	return r;
   1.419 +	}
   1.420 +
   1.421 +TInt DMemModelAlignedShPool::ReleaseMapping(DShBufMapping*& aMapping, DMemModelAlignedShPoolClient* aClient)
   1.422 +	{
   1.423 +	__KTRACE_OPT(KMMU2, Kern::Printf("DMemModelAlignedShPool::ReleaseMapping(0x%08x,0x%08x)",aMapping,aClient));
   1.424 +	__NK_ASSERT_DEBUG(iLock.HeldByCurrentThread());
   1.425 +
   1.426 +	TInt r = KErrNone;
   1.427 +
   1.428 +	if (aClient)
   1.429 +		{
   1.430 +		aClient->iMappingFreeList.AddHead(&aMapping->iObjLink);
   1.431 +		aMapping = NULL;
   1.432 +		}
   1.433 +	else
   1.434 +		{
   1.435 +		// pool has probably been closed delete mapping
   1.436 +		r = KErrNotFound;
   1.437 +		__KTRACE_OPT(KMMU2, Kern::Printf("DMemModelAlignedShPool::ReleaseMapping delete 0x%08x",aMapping));
   1.438 +		UnlockPool(); // have to release fast lock for MappingDestroy
   1.439 +		MM::MappingDestroy(aMapping->iMapping);
   1.440 +		delete aMapping;
   1.441 +		aMapping = NULL;
   1.442 +		LockPool();
   1.443 +		}
   1.444 +
   1.445 +	return r;
   1.446 +	}
   1.447 +
   1.448 +TInt DMemModelAlignedShPool::SetBufferWindow(DProcess* aProcess, TInt aWindowSize)
   1.449 +	{
   1.450 +	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::SetBufferWindow()"));
   1.451 +
   1.452 +	// Create and construct mappings but do not map
   1.453 +	// also allocate reserved handles
   1.454 +	TInt r = KErrNone;
   1.455 +	TUint noOfBuffers = aWindowSize;
   1.456 +
   1.457 +	if (aWindowSize > static_cast<TInt>(iMaxBuffers))
   1.458 +		return KErrArgument;
   1.459 +
   1.460 +	Kern::MutexWait(*iProcessLock);
   1.461 +
   1.462 +	LockPool();
   1.463 +	DMemModelAlignedShPoolClient* client = reinterpret_cast<DMemModelAlignedShPoolClient*>(iClientMap->Find(reinterpret_cast<TUint>(aProcess)));
   1.464 +	UnlockPool();
   1.465 +
   1.466 +	if (client)
   1.467 +		{
   1.468 +		if (client->iWindowSize != 0)
   1.469 +			{
   1.470 +			Kern::MutexSignal(*iProcessLock);
   1.471 +			return KErrAlreadyExists;
   1.472 +			}
   1.473 +
   1.474 +		if (aWindowSize < 0)
   1.475 +			{
   1.476 +			noOfBuffers = iTotalBuffers;
   1.477 +			}
   1.478 +
   1.479 +		DMemModelProcess* pP = reinterpret_cast<DMemModelProcess*>(aProcess);
   1.480 +		r = CreateMappings(client, noOfBuffers, pP);
   1.481 +
   1.482 +		if (r == KErrNone)
   1.483 +			{
   1.484 +			client->iWindowSize = aWindowSize;
   1.485 +			}
   1.486 +		else
   1.487 +			{
   1.488 +			DestroyMappings(client, noOfBuffers);
   1.489 +			}
   1.490 +		}
   1.491 +	else
   1.492 +		{
   1.493 +		r = KErrNotFound;
   1.494 +		}
   1.495 +
   1.496 +	Kern::MutexSignal(*iProcessLock);
   1.497 +
   1.498 +	return r;
   1.499 +	}
   1.500 +
   1.501 +TInt DMemModelAlignedShPool::MappingNew(DShBufMapping*& aMapping, DMemModelProcess* aProcess)
   1.502 +	{
   1.503 +	// Must be in critical section so we don't leak os asid references.
   1.504 +	__ASSERT_CRITICAL;
   1.505 +
   1.506 +	TMappingCreateFlags flags=EMappingCreateDefault;
   1.507 +
   1.508 +	FlagSet(flags, EMappingCreateReserveAllResources);
   1.509 +
   1.510 +	// Open a reference to aProcess's os so it isn't freed and reused while
   1.511 +	// we're creating this mapping.
   1.512 +	TInt osAsid = aProcess->TryOpenOsAsid();
   1.513 +	if (osAsid < 0)
   1.514 +		{// The process has freed its os asid so can't create a new mapping.
   1.515 +		return KErrDied;
   1.516 +		}
   1.517 +
   1.518 +	DMemoryMapping* mapping = NULL;
   1.519 +	DShBufMapping* m = NULL;
   1.520 +	TInt r = MM::MappingNew(mapping, MM::RoundToPageCount(iBufGap), osAsid, flags);
   1.521 +
   1.522 +	if (r == KErrNone)
   1.523 +		{
   1.524 +		m = new DShBufMapping;
   1.525 +
   1.526 +		if (m)
   1.527 +			{
   1.528 +			m->iMapping = mapping;
   1.529 +			m->iOsAsid = osAsid;
   1.530 +			}
   1.531 +		else
   1.532 +			{
   1.533 +			MM::MappingDestroy(mapping);
   1.534 +			r = KErrNoMemory;
   1.535 +			}
   1.536 +		}
   1.537 +
   1.538 +	// Close the reference on the os asid as while aMapping is valid then the 
   1.539 +	// os asid must be also.
   1.540 +	aProcess->CloseOsAsid();
   1.541 +
   1.542 +	aMapping = m;
   1.543 +	__KTRACE_OPT(KMMU2, Kern::Printf("DMemModelAlignedShPool::MappingNew returns 0x%08x,%d",aMapping,r));
   1.544 +	return r;
   1.545 +	}
   1.546 +
   1.547 +TInt DMemModelAlignedShPool::AddToProcess(DProcess* aProcess, TUint aAttr)
   1.548 +	{
   1.549 +	__KTRACE_OPT(KMMU,Kern::Printf("Adding DMemModelAlignedShPool %O to process %O",this,aProcess));
   1.550 +	TInt r = KErrNone;
   1.551 +
   1.552 +	Kern::MutexWait(*iProcessLock);
   1.553 +
   1.554 +	LockPool();
   1.555 +	DShPoolClient* client = reinterpret_cast<DShPoolClient*>(iClientMap->Find(reinterpret_cast<TUint>(aProcess)));
   1.556 +	UnlockPool();
   1.557 +
   1.558 +	if (!client)
   1.559 +		{
   1.560 +		client = new DMemModelAlignedShPoolClient;
   1.561 +		if (client)
   1.562 +			{
   1.563 +			client->iFlags = aAttr;
   1.564 +			r = iClientMap->Add(reinterpret_cast<TUint>(aProcess), client);
   1.565 +
   1.566 +			if (r == KErrNone)
   1.567 +				{
   1.568 +				if (aProcess != K::TheKernelProcess)
   1.569 +					{
   1.570 +					r = aProcess->iHandles.Reserve(iTotalBuffers);
   1.571 +
   1.572 +					if (r != KErrNone)
   1.573 +						{
   1.574 +						iClientMap->Remove(reinterpret_cast<TUint>(aProcess));
   1.575 +						}
   1.576 +					}
   1.577 +				}
   1.578 +			if (r != KErrNone)
   1.579 +				{
   1.580 +				delete client;
   1.581 +				r = KErrNoMemory;
   1.582 +				}
   1.583 +			}
   1.584 +		else
   1.585 +			{
   1.586 +			r = KErrNoMemory;
   1.587 +			}
   1.588 +		}
   1.589 +	else
   1.590 +		{
   1.591 +		LockPool();
   1.592 +		client->iAccessCount++;
   1.593 +		UnlockPool();
   1.594 +		}
   1.595 +
   1.596 +	Kern::MutexSignal(*iProcessLock);
   1.597 +
   1.598 +	return r;
   1.599 +	}
   1.600 +
   1.601 +DMemModelAlignedShPool::DMemModelAlignedShPool() :	DMemModelShPool()
   1.602 +
   1.603 +	{
   1.604 +	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::DMemModelAlignedShPool"));
   1.605 +	}
   1.606 +
   1.607 +void DMemModelAlignedShPool::Free(DShBuf* aBuf)
   1.608 +	{
   1.609 +	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::Free (aBuf = 0x%08x)", aBuf));
   1.610 +
   1.611 +	LockPool();
   1.612 +#ifdef _DEBUG
   1.613 +	// Remove from allocated list
   1.614 +	aBuf->iObjLink.Deque();
   1.615 +#endif
   1.616 +
   1.617 +	DMemModelAlignedShBuf* buf = reinterpret_cast<DMemModelAlignedShBuf*>(aBuf);
   1.618 +
   1.619 +	if (MM::MemoryIsNotMapped(buf->iMemoryObject))
   1.620 +		{
   1.621 +		UnlockPool(); // have to release fast mutex
   1.622 +		MM::MemoryWipe(buf->iMemoryObject);
   1.623 +		LockPool();
   1.624 +
   1.625 +		// we want to put the initial buffers at the head of the free list
   1.626 +		// and the grown buffers at the tail as this makes shrinking more efficient
   1.627 +		if (aBuf >= iInitialBuffersArray && aBuf < (iInitialBuffersArray + iInitialBuffers))
   1.628 +			{
   1.629 +			iFreeList.AddHead(&aBuf->iObjLink);
   1.630 +			}
   1.631 +		else
   1.632 +			{
   1.633 +			iFreeList.Add(&aBuf->iObjLink);
   1.634 +			}
   1.635 +		++iFreeBuffers;
   1.636 +#ifdef _DEBUG
   1.637 +		--iAllocatedBuffers;
   1.638 +#endif
   1.639 +		}
   1.640 +	else
   1.641 +		{
   1.642 +		iPendingList.Add(&aBuf->iObjLink);
   1.643 +		}
   1.644 +
   1.645 +	iPoolFlags &= ~EShPoolSuppressShrink;		// Allow shrinking again, if it was blocked
   1.646 +	UnlockPool();
   1.647 +
   1.648 +	// queue ManagementDfc which completes notifications as appropriate
   1.649 +	if (HaveWorkToDo())
   1.650 +		KickManagementDfc();
   1.651 +
   1.652 +	DShPool::Close(NULL); // decrement pool reference count
   1.653 +	}
   1.654 +
   1.655 +TInt DMemModelAlignedShPool::UpdateFreeList()
   1.656 +	{
   1.657 +	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::UpdateFreeList"));
   1.658 +
   1.659 +	LockPool();
   1.660 +	SDblQueLink* pLink = iPendingList.First();
   1.661 +	UnlockPool();
   1.662 +
   1.663 +	SDblQueLink* anchor = &iPendingList.iA;
   1.664 +
   1.665 +	while (pLink != anchor)
   1.666 +		{
   1.667 +		DMemModelAlignedShBuf* buf = _LOFF(pLink, DMemModelAlignedShBuf, iObjLink);
   1.668 +		LockPool();
   1.669 +		pLink = pLink->iNext;
   1.670 +		UnlockPool();
   1.671 +
   1.672 +		if (MM::MemoryIsNotMapped(buf->iMemoryObject))
   1.673 +			{
   1.674 +			LockPool();
   1.675 +			buf->iObjLink.Deque();
   1.676 +			UnlockPool();
   1.677 +
   1.678 +			MM::MemoryWipe(buf->iMemoryObject);
   1.679 +
   1.680 +			LockPool();
   1.681 +			if (buf >= iInitialBuffersArray && buf < (iInitialBuffersArray + iInitialBuffers))
   1.682 +				{
   1.683 +				iFreeList.AddHead(&buf->iObjLink);
   1.684 +				}
   1.685 +			else
   1.686 +				{
   1.687 +				iFreeList.Add(&buf->iObjLink);
   1.688 +				}
   1.689 +			++iFreeBuffers;
   1.690 +#ifdef _DEBUG
   1.691 +			--iAllocatedBuffers;
   1.692 +#endif
   1.693 +			UnlockPool();
   1.694 +			}
   1.695 +		}
   1.696 +
   1.697 +	__KTRACE_OPT(KMMU, Kern::Printf("<DMemModelAlignedShPool::UpdateFreeList"));
   1.698 +	return KErrNone;
   1.699 +	}
   1.700 +
   1.701 +DMemModelAlignedShPool::~DMemModelAlignedShPool()
   1.702 +	{
   1.703 +	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::~DMemModelAlignedShPool"));
   1.704 +	}
   1.705 +
   1.706 +TInt DMemModelAlignedShPool::DoCreate(TShPoolCreateInfo& aInfo)
   1.707 +	{
   1.708 +
   1.709 +	TUint64 maxSize64 = static_cast<TUint64>(aInfo.iInfo.iMaxBufs) * static_cast<TUint64>(iBufGap);
   1.710 +
   1.711 +	if (maxSize64 > static_cast<TUint64>(KMaxTInt) || maxSize64 <= static_cast<TUint64>(0))
   1.712 +		return KErrArgument;
   1.713 +
   1.714 +	iMaxPages = MM::RoundToPageCount(static_cast<TInt>(maxSize64));
   1.715 +
   1.716 +	return KErrNone;
   1.717 +	}
   1.718 +
   1.719 +TInt DMemModelAlignedShPool::DestroyAllMappingsAndReservedHandles(DProcess* aProcess)
   1.720 +	{
   1.721 +	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::DestroyAllMappingsAndReservedHandles(0x%08x)", aProcess));
   1.722 +
   1.723 +	TInt r = KErrNone;
   1.724 +	Kern::MutexWait(*iProcessLock);
   1.725 +	DMemModelAlignedShPoolClient* client = reinterpret_cast<DMemModelAlignedShPoolClient*>(iClientMap->Remove(reinterpret_cast<TUint>(aProcess)));
   1.726 +
   1.727 +	__NK_ASSERT_DEBUG(client);
   1.728 +	__NK_ASSERT_DEBUG(client->iAccessCount == 0);
   1.729 +
   1.730 +	DestroyMappings(client, KMaxTInt);
   1.731 +	delete client;
   1.732 +
   1.733 +	if (aProcess != K::TheKernelProcess)
   1.734 +		{
   1.735 +		// Remove reserved handles
   1.736 +		r = aProcess->iHandles.Reserve(-iTotalBuffers);
   1.737 +		}
   1.738 +
   1.739 +	Kern::MutexSignal(*iProcessLock);
   1.740 +
   1.741 +	__KTRACE_OPT(KMMU, Kern::Printf("<DMemModelAlignedShPool::DestroyAllMappingsAndReservedHandles(0x%08x)", aProcess));
   1.742 +
   1.743 +	return r;
   1.744 +	}
   1.745 +
   1.746 +TInt DMemModelAlignedShPool::DestroyMappings(DMemModelAlignedShPoolClient* aClient, TInt aNoOfMappings)
   1.747 +	{
   1.748 +	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::DestroyMappings(0x%08x)", aClient));
   1.749 +
   1.750 +	TInt r = KErrNone;
   1.751 +	TInt i = 0;
   1.752 +
   1.753 +	DShBufMapping* m = NULL;
   1.754 +	SDblQueLink* pLink = NULL;
   1.755 +
   1.756 +	while (i < aNoOfMappings && !aClient->iMappingFreeList.IsEmpty())
   1.757 +		{
   1.758 +		LockPool();
   1.759 +		pLink = aClient->iMappingFreeList.GetFirst();
   1.760 +		UnlockPool();
   1.761 +
   1.762 +		if (pLink == NULL)
   1.763 +			break;
   1.764 +
   1.765 +		m = _LOFF(pLink, DShBufMapping, iObjLink);
   1.766 +		__KTRACE_OPT(KMMU2, Kern::Printf("DMemModelAlignedShPool::DestroyMappings delete 0x%08x",m));
   1.767 +		MM::MappingClose(m->iMapping);
   1.768 +		delete m;
   1.769 +		++i;
   1.770 +		}
   1.771 +
   1.772 +	__KTRACE_OPT(KMMU, Kern::Printf("<DMemModelAlignedShPool::DestroyMappings"));
   1.773 +
   1.774 +	return r;
   1.775 +	}
   1.776 +
   1.777 +
   1.778 +TInt DMemModelAlignedShPool::CreateMappings(DMemModelAlignedShPoolClient* aClient, TInt aNoOfMappings, DMemModelProcess* aProcess)
   1.779 +	{
   1.780 +	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::CreateMappings"));
   1.781 +
   1.782 +	__ASSERT_MUTEX(iProcessLock);
   1.783 +
   1.784 +	TInt r = KErrNone;
   1.785 +
   1.786 +	for (TInt i = 0; i < aNoOfMappings; ++i)
   1.787 +		{
   1.788 +		DShBufMapping* mapping;
   1.789 +		r = MappingNew(mapping, aProcess);
   1.790 +		if (r == KErrNone)
   1.791 +			{
   1.792 +			LockPool();
   1.793 +			aClient->iMappingFreeList.AddHead(&mapping->iObjLink);
   1.794 +			UnlockPool();
   1.795 +			}
   1.796 +		else
   1.797 +			{
   1.798 +			r = KErrNoMemory;
   1.799 +			break;
   1.800 +			}
   1.801 +		}
   1.802 +
   1.803 +	return r;
   1.804 +	}
   1.805 +
   1.806 +TInt DMemModelAlignedShPool::UpdateMappingsAndReservedHandles(TInt aNoOfBuffers)
   1.807 +	{
   1.808 +	__KTRACE_OPT(KMMU2, Kern::Printf(">DMemModelAlignedShPool::UpdateMappingsAndReservedHandles(0x%08x)", aNoOfBuffers));
   1.809 +
   1.810 +	SMap::TIterator iter(*iClientMap);
   1.811 +	SMap::TEntry* entry;
   1.812 +	SMap::TEntry* lastEntry = NULL;
   1.813 +	DMemModelProcess* pP;
   1.814 +	DMemModelAlignedShPoolClient* client;
   1.815 +	TInt result = KErrNone;
   1.816 +
   1.817 +	Kern::MutexWait(*iProcessLock);
   1.818 +
   1.819 +	// First handle the case of increasing allocation
   1.820 +	if (aNoOfBuffers > 0)
   1.821 +		while ((entry = iter.Next()) != lastEntry)
   1.822 +			{
   1.823 +			// Try to update handle reservation; skip if process is null or has gone away
   1.824 +			client = (DMemModelAlignedShPoolClient*)(entry->iObj);
   1.825 +			pP = (DMemModelProcess*)(entry->iKey);
   1.826 +			if (!pP)
   1.827 +				continue;
   1.828 +			TInt r = pP->iHandles.Reserve(aNoOfBuffers);
   1.829 +			if (r)
   1.830 +				__KTRACE_OPT(KMMU2, Kern::Printf("?DMemModelAlignedShPool::UpdateMappingsAndReservedHandles(0x%08x) Reserve failed %d", aNoOfBuffers, r));
   1.831 +			if (r == KErrDied)
   1.832 +				continue;
   1.833 +
   1.834 +			if (r == KErrNone && client->iWindowSize <= 0)
   1.835 +				{
   1.836 +				// A positive window size means the number of mappings is fixed, so we don't need to reserve more.
   1.837 +				// But here zero or negative means a variable number, so we need to create extra mappings now.
   1.838 +				r = CreateMappings(client, aNoOfBuffers, pP);
   1.839 +				if (r != KErrNone)
   1.840 +					{
   1.841 +					__KTRACE_OPT(KMMU2, Kern::Printf("?DMemModelAlignedShPool::UpdateMappingsAndReservedHandles(0x%08x) CreateMappings failed %d", aNoOfBuffers, r));
   1.842 +					pP->iHandles.Reserve(-aNoOfBuffers); // Creation failed, so release the handles reserved above
   1.843 +					}
   1.844 +				}
   1.845 +
   1.846 +			if (r != KErrNone)
   1.847 +				{
   1.848 +				// Some problem; cleanup as best we can by falling into the loop below to undo what we've done
   1.849 +				result = r;
   1.850 +				iter.Reset();
   1.851 +				lastEntry = entry;
   1.852 +				aNoOfBuffers = -aNoOfBuffers;
   1.853 +				break;
   1.854 +				}
   1.855 +			}
   1.856 +
   1.857 +	// Now handle the case of decreasing allocation; also used for recovery from errors, in which case
   1.858 +	// this loop iterates only over the elements that were *successfully* processed by the loop above
   1.859 +	if (aNoOfBuffers < 0)
   1.860 +		while ((entry = iter.Next()) != lastEntry)
   1.861 +			{
   1.862 +			// Try to update handle reservation; skip if process is null or has gone away
   1.863 +			client = (DMemModelAlignedShPoolClient*)(entry->iObj);
   1.864 +			pP = (DMemModelProcess*)(entry->iKey);
   1.865 +			if (!pP)
   1.866 +				continue;
   1.867 +			TInt r = pP->iHandles.Reserve(aNoOfBuffers);
   1.868 +			if (r == KErrDied)
   1.869 +				continue;
   1.870 +
   1.871 +			if (r == KErrNone && client->iWindowSize <= 0)
   1.872 +				r = DestroyMappings(client, -aNoOfBuffers);
   1.873 +			// De-allocation by Reserve(-n) and/or DestroyMappings() should never fail
   1.874 +			if (r != KErrNone)
   1.875 +				Kern::PanicCurrentThread(KLitDMemModelAlignedShPool, r);
   1.876 +			}
   1.877 +
   1.878 +	Kern::MutexSignal(*iProcessLock);
   1.879 +
   1.880 +	__KTRACE_OPT(KMMU2, Kern::Printf("<DMemModelAlignedShPool::UpdateMappingsAndReservedHandles(0x%08x) returning %d", aNoOfBuffers, result));
   1.881 +	return result;
   1.882 +	}
   1.883 +
   1.884 +TInt DMemModelAlignedShPool::DeleteInitialBuffers()
   1.885 +	{
   1.886 +	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::DeleteInitialBuffers"));
   1.887 +
   1.888 +	if (iInitialBuffersArray != NULL)
   1.889 +		{
   1.890 +		for (TUint i = 0; i < iInitialBuffers; i++)
   1.891 +			{
   1.892 +			iInitialBuffersArray[i].iObjLink.Deque(); // remove from free list
   1.893 +			iInitialBuffersArray[i].Dec();
   1.894 +			iInitialBuffersArray[i].~DMemModelAlignedShBuf();
   1.895 +			}
   1.896 +		}
   1.897 +
   1.898 +	Kern::Free(iInitialBuffersArray);
   1.899 +	iInitialBuffersArray = NULL;
   1.900 +
   1.901 +	return KErrNone;
   1.902 +	}
   1.903 +
   1.904 +TInt DMemModelAlignedShPool::Close(TAny* aPtr)
   1.905 +	{
   1.906 +	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::Close(0x%08x)", aPtr));
   1.907 +
   1.908 +	if (aPtr)
   1.909 +		{
   1.910 +		DProcess* pP = reinterpret_cast<DProcess*>(aPtr);
   1.911 +
   1.912 +		CloseClient(pP);
   1.913 +		}
   1.914 +	__KTRACE_OPT(KMMU, Kern::Printf("<DMemModelAlignedShPool::Close(0x%08x)", aPtr));
   1.915 +	return DShPool::Close(aPtr);
   1.916 +	}
   1.917 +
   1.918 +TInt DMemModelAlignedShPool::CreateInitialBuffers()
   1.919 +	{
   1.920 +	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::CreateInitialBuffers"));
   1.921 +
   1.922 +	iInitialBuffersArray = reinterpret_cast<DMemModelAlignedShBuf*>(Kern::Alloc(iInitialBuffers * sizeof(DMemModelAlignedShBuf)));
   1.923 +
   1.924 +	if (iInitialBuffersArray == NULL)
   1.925 +		return KErrNoMemory;
   1.926 +
   1.927 +	for (TUint i = 0; i < iInitialBuffers; i++)
   1.928 +		{
   1.929 +		// always use kernel linear address in DShBuf
   1.930 +		DMemModelAlignedShBuf *buf = new (&iInitialBuffersArray[i]) DMemModelAlignedShBuf(this);
   1.931 +		TInt r = buf->Construct();
   1.932 +
   1.933 +		if (r == KErrNone)
   1.934 +			{
   1.935 +			iFreeList.Add(&buf->iObjLink);
   1.936 +			}
   1.937 +		else
   1.938 +			{
   1.939 +			iInitialBuffers = i;
   1.940 +			return KErrNoMemory;
   1.941 +			}
   1.942 +		}
   1.943 +
   1.944 +	iFreeBuffers  = iInitialBuffers;
   1.945 +	iTotalBuffers = iInitialBuffers;
   1.946 +	return KErrNone;
   1.947 +	}
   1.948 +
   1.949 +
   1.950 +TInt DMemModelAlignedShPool::GrowPool()
   1.951 +	{
   1.952 +	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::GrowPool()"));
   1.953 +	TInt r = KErrNone;
   1.954 +	SDblQue temp;
   1.955 +
   1.956 +	Kern::MutexWait(*iProcessLock);
   1.957 +
   1.958 +	TUint32 headroom = iMaxBuffers - iTotalBuffers;
   1.959 +
   1.960 +	// How many buffers to grow by?
   1.961 +	TUint32 grow = mult_fx248(iTotalBuffers, iGrowByRatio);
   1.962 +	if (grow == 0)			// Handle round-to-zero
   1.963 +		grow = 1;
   1.964 +	if (grow > headroom)
   1.965 +		grow = headroom;
   1.966 +
   1.967 +	TUint i;
   1.968 +	for (i = 0; i < grow; ++i)
   1.969 +		{
   1.970 +		DMemModelAlignedShBuf *buf = new DMemModelAlignedShBuf(this);
   1.971 +
   1.972 +		if (buf == NULL)
   1.973 +			{
   1.974 +			r = KErrNoMemory;
   1.975 +			break;
   1.976 +			}
   1.977 +
   1.978 +		TInt r = buf->Construct();
   1.979 +
   1.980 +		if (r != KErrNone)
   1.981 +			{
   1.982 +			buf->DObject::Close(NULL);
   1.983 +			break;
   1.984 +			}
   1.985 +
   1.986 +		temp.Add(&buf->iObjLink);
   1.987 +		}
   1.988 +
   1.989 +	r = UpdateMappingsAndReservedHandles(i);
   1.990 +
   1.991 +	if (r == KErrNone)
   1.992 +		{
   1.993 +		LockPool();
   1.994 +		iFreeList.MoveFrom(&temp);
   1.995 +		iFreeBuffers += i;
   1.996 +		iTotalBuffers += i;
   1.997 +		UnlockPool();
   1.998 +		}
   1.999 +	else
  1.1000 +		{
  1.1001 +		// couldn't create either the mappings or reserve handles so have no choice but to
  1.1002 +		// delete the buffers
  1.1003 +		SDblQueLink *pLink;
  1.1004 +		while ((pLink = temp.GetFirst()) != NULL)
  1.1005 +			{
  1.1006 +			DShBuf* buf = _LOFF(pLink, DShBuf, iObjLink);
  1.1007 +			buf->DObject::Close(NULL);
  1.1008 +			}
  1.1009 +		}
  1.1010 +
  1.1011 +	CalculateGrowShrinkTriggers();
  1.1012 +
  1.1013 +	Kern::MutexSignal(*iProcessLock);
  1.1014 +
  1.1015 +	__KTRACE_OPT(KMMU, Kern::Printf("<DMemModelAlignedShPool::GrowPool()"));
  1.1016 +	return r;
  1.1017 +	}
  1.1018 +
  1.1019 +TInt DMemModelAlignedShPool::ShrinkPool()
  1.1020 +	{
  1.1021 +	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::ShrinkPool()"))
  1.1022 +
  1.1023 +	Kern::MutexWait(*iProcessLock);
  1.1024 +
  1.1025 +	TUint32 grownBy = iTotalBuffers - iInitialBuffers;
  1.1026 +
  1.1027 +	// How many buffers to shrink by?
  1.1028 +	TUint32 shrink = mult_fx248(iTotalBuffers, iShrinkByRatio);
  1.1029 +	if (shrink == 0)		// Handle round-to-zero
  1.1030 +		shrink = 1;
  1.1031 +	if (shrink > grownBy)
  1.1032 +		shrink = grownBy;
  1.1033 +	if (shrink > iFreeBuffers)
  1.1034 +		shrink = iFreeBuffers;
  1.1035 +
  1.1036 +	// work backwards as the grown buffers should be at the back
  1.1037 +	TUint i;
  1.1038 +	for (i = 0; i < shrink; i++)
  1.1039 +		{
  1.1040 +		LockPool();
  1.1041 +
  1.1042 +		if (iFreeList.IsEmpty())
  1.1043 +			{
  1.1044 +			UnlockPool();
  1.1045 +			break;
  1.1046 +			}
  1.1047 +
  1.1048 +		DShBuf* buf = _LOFF(iFreeList.Last(), DShBuf, iObjLink);
  1.1049 +
  1.1050 +		// can't delete initial buffers
  1.1051 +		if (buf >= iInitialBuffersArray && buf < (iInitialBuffersArray + iInitialBuffers))
  1.1052 +			{
  1.1053 +			UnlockPool();
  1.1054 +			break;
  1.1055 +			}
  1.1056 +
  1.1057 +		buf->iObjLink.Deque();
  1.1058 +		--iFreeBuffers;
  1.1059 +		--iTotalBuffers;
  1.1060 +		UnlockPool();
  1.1061 +		buf->DObject::Close(NULL);
  1.1062 +		}
  1.1063 +
  1.1064 +	TInt r = UpdateMappingsAndReservedHandles(-i);
  1.1065 +
  1.1066 +	// If we couldn't shrink the pool by this many buffers, wait until we Free() another
  1.1067 +	// buffer before trying to shrink again.
  1.1068 +	if (i < shrink)
  1.1069 +		iPoolFlags |= EShPoolSuppressShrink;
  1.1070 +
  1.1071 +	CalculateGrowShrinkTriggers();
  1.1072 +
  1.1073 +	Kern::MutexSignal(*iProcessLock);
  1.1074 +
  1.1075 +	__KTRACE_OPT(KMMU, Kern::Printf("<DMemModelAlignedShPool::ShrinkPool()"));
  1.1076 +	return r;
  1.1077 +	}
  1.1078 +
  1.1079 +// Kernel side API
  1.1080 +TInt DMemModelAlignedShPool::Alloc(DShBuf*& aShBuf)
  1.1081 +	{
  1.1082 +	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::Alloc (DShBuf)"));
  1.1083 +
  1.1084 +	TInt r = KErrNoMemory;
  1.1085 +	aShBuf = NULL;
  1.1086 +
  1.1087 +	LockPool();
  1.1088 +
  1.1089 +	if (!iFreeList.IsEmpty())
  1.1090 +		{
  1.1091 +		aShBuf = _LOFF(iFreeList.GetFirst(), DShBuf, iObjLink);
  1.1092 +
  1.1093 +#ifdef _DEBUG
  1.1094 +		iAllocated.Add(&aShBuf->iObjLink);
  1.1095 +		iAllocatedBuffers++;
  1.1096 +#endif
  1.1097 +		--iFreeBuffers;
  1.1098 +		Open(); // increment pool reference count
  1.1099 +		r = KErrNone;
  1.1100 +		}
  1.1101 +
  1.1102 +	UnlockPool();
  1.1103 +
  1.1104 +	if (HaveWorkToDo())
  1.1105 +		KickManagementDfc();
  1.1106 +
  1.1107 +	__KTRACE_OPT(KMMU, Kern::Printf("<DMemModelAlignedShPool::Alloc return buf = 0x%08x", aShBuf));
  1.1108 +	return r;
  1.1109 +	}
  1.1110 +
  1.1111 +DMemModelNonAlignedShBuf::DMemModelNonAlignedShBuf(DShPool* aPool, TLinAddr aRelAddr) : DShBuf(aPool, aRelAddr)
  1.1112 +	{
  1.1113 +	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShBuf::DMemModelNonAlignedShBuf()"));
  1.1114 +	}
  1.1115 +
  1.1116 +DMemModelNonAlignedShBuf::~DMemModelNonAlignedShBuf()
  1.1117 +	{
  1.1118 +	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShBuf::~DMemModelNonAlignedShBuf()"));
  1.1119 +	}
  1.1120 +
  1.1121 +TInt DMemModelNonAlignedShBuf::Close(TAny* aPtr)
  1.1122 +	{
  1.1123 +	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShBuf::Close(0x%08x)", aPtr));
  1.1124 +
  1.1125 +	if (aPtr)
  1.1126 +		{
  1.1127 +		DProcess* pP = reinterpret_cast<DProcess*>(aPtr);
  1.1128 +
  1.1129 +		// there no per buffer resources for kernel clients for non-aligned buffers
  1.1130 +		if (pP != K::TheKernelProcess)
  1.1131 +		    iPool->CloseClient(pP);
  1.1132 +		}
  1.1133 +
  1.1134 +	return DShBuf::Close(aPtr);
  1.1135 +	}
  1.1136 +
  1.1137 +TInt DMemModelNonAlignedShBuf::AddToProcess(DProcess* aProcess, TUint /* aAttr */)
  1.1138 +	{
  1.1139 +	__KTRACE_OPT(KMMU, Kern::Printf("Adding DMemModelShBuf %O to process %O", this, aProcess));
  1.1140 +	TUint flags;
  1.1141 +
  1.1142 +	return iPool->OpenClient(aProcess, flags);
  1.1143 +	}
  1.1144 +
  1.1145 +
  1.1146 +TUint8* DMemModelNonAlignedShBuf::Base(DProcess* aProcess)
  1.1147 +	{
  1.1148 +	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShBuf::Base(0x%x)", aProcess));
  1.1149 +
  1.1150 +	TUint8* base = reinterpret_cast<DMemModelNonAlignedShPool*>(iPool)->Base(aProcess) + (TUint)iRelAddress;
  1.1151 +
  1.1152 +	return base;
  1.1153 +	}
  1.1154 +
  1.1155 +TUint8* DMemModelNonAlignedShBuf::Base()
  1.1156 +	{
  1.1157 +	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShBuf::Base()"));
  1.1158 +
  1.1159 +	TUint8* base = reinterpret_cast<DMemModelNonAlignedShPool*>(iPool)->Base();
  1.1160 +
  1.1161 +	return base ? base + iRelAddress : NULL;
  1.1162 +	}
  1.1163 +
  1.1164 +TInt DMemModelNonAlignedShBuf::Map(TUint /* aMapAttr */, DProcess* /* aProcess */, TLinAddr& /* aBase */)
  1.1165 +	{
  1.1166 +	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShBuf::Map()"));
  1.1167 +
  1.1168 +	return KErrNotSupported;
  1.1169 +	}
  1.1170 +
  1.1171 +TInt DMemModelNonAlignedShBuf::UnMap(DProcess* /* aProcess */)
  1.1172 +	{
  1.1173 +	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShBuf::UnMap()"));
  1.1174 +
  1.1175 +	return KErrNotSupported;
  1.1176 +	}
  1.1177 +
  1.1178 +TInt DMemModelNonAlignedShBuf::Pin(TPhysicalPinObject* aPinObject, TBool aReadOnly, TPhysAddr& aAddress, TPhysAddr* aPages, TUint32& aMapAttr, TUint& aColour)
  1.1179 +	{
  1.1180 +	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"DMemModelNonAlignedShBuf::Pin");
  1.1181 +
  1.1182 +	DMemModelNonAlignedShPool* pool = reinterpret_cast<DMemModelNonAlignedShPool*>(iPool);
  1.1183 +
  1.1184 +	NKern::ThreadEnterCS();
  1.1185 +
  1.1186 +	TInt startPage = iRelAddress >> KPageShift;
  1.1187 +	TInt lastPage = MM::RoundToPageCount(iRelAddress + Size());
  1.1188 +
  1.1189 +	TInt pages = lastPage - startPage;
  1.1190 +
  1.1191 +	if (!pages) pages++;
  1.1192 +
  1.1193 +	TInt r = MM::PinPhysicalMemory(pool->iMemoryObject, (DPhysicalPinMapping*)aPinObject,
  1.1194 +									startPage, pages, aReadOnly, aAddress, aPages, aMapAttr, aColour);
  1.1195 +
  1.1196 +	// adjust physical address to start of the buffer
  1.1197 +	if (r == KErrNone)
  1.1198 +		{
  1.1199 +		aAddress += (iRelAddress - (startPage << KPageShift));
  1.1200 +		}
  1.1201 +	NKern::ThreadLeaveCS();
  1.1202 +	return r;
  1.1203 +	}
  1.1204 +
  1.1205 +DMemModelNonAlignedShPool::DMemModelNonAlignedShPool() : DMemModelShPool()
  1.1206 +	{
  1.1207 +	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShPool::DMemModelNonAlignedShPool"));
  1.1208 +	}
  1.1209 +
  1.1210 +DMemModelNonAlignedShPool::~DMemModelNonAlignedShPool()
  1.1211 +	{
  1.1212 +	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShPool::~DMemModelNonAlignedShPool"));
  1.1213 +
  1.1214 +	MM::MemoryDestroy(iMemoryObject);
  1.1215 +
  1.1216 +	delete iPagesMap;
  1.1217 +	delete iBufMap;
  1.1218 +	}
  1.1219 +
  1.1220 +TInt DMemModelNonAlignedShPool::DoCreate(TShPoolCreateInfo& aInfo)
  1.1221 +	{
  1.1222 +	__KTRACE_OPT(KMMU, Kern::Printf("DMemModelNonAlignedShPool::DoCreate(%d, %d, %d)", aInfo.iInfo.iMaxBufs, iBufGap, iBufSize));
  1.1223 +
  1.1224 +	TInt r;
  1.1225 +	TUint64 maxSize64 = static_cast<TUint64>(aInfo.iInfo.iMaxBufs) * static_cast<TUint64>(iBufGap);
  1.1226 +
  1.1227 +	if (maxSize64 > static_cast<TUint64>(KMaxTInt) || maxSize64 <= static_cast<TUint64>(0))
  1.1228 +		return KErrArgument;
  1.1229 +
  1.1230 +	TInt maxPages = MM::RoundToPageCount(static_cast<TInt>(maxSize64));
  1.1231 +
  1.1232 +	iBufMap = TBitMapAllocator::New(aInfo.iInfo.iMaxBufs, (TBool)ETrue);
  1.1233 +	if (iBufMap == NULL)
  1.1234 +		return KErrNoMemory;
  1.1235 +
  1.1236 +	iPagesMap = TBitMapAllocator::New(maxPages, (TBool)ETrue);
  1.1237 +	if (iPagesMap == NULL)
  1.1238 +		return KErrNoMemory;
  1.1239 +
  1.1240 +	// Memory attributes
  1.1241 +	TMemoryAttributes attr = EMemoryAttributeStandard;
  1.1242 +
  1.1243 +	// Memory type
  1.1244 +	TMemoryObjectType memoryType = (iPoolFlags & EShPoolPhysicalMemoryPool) ? EMemoryObjectHardware : EMemoryObjectUnpaged;
  1.1245 +
  1.1246 +	// Memory flags
  1.1247 +	TMemoryCreateFlags memoryFlags = EMemoryCreateDefault;	// Don't leave previous contents of memory
  1.1248 +
  1.1249 +	// Now create the memory object
  1.1250 +	r = MM::MemoryNew(iMemoryObject, memoryType, maxPages, memoryFlags, attr);
  1.1251 +	if (r != KErrNone)
  1.1252 +		return r;
  1.1253 +
  1.1254 +	// Make sure we give the caller the number of buffers they were expecting
  1.1255 +	iCommittedPages = MM::RoundToPageCount(iInitialBuffers * iBufGap);
  1.1256 +
  1.1257 +	if (iPoolFlags & EShPoolPhysicalMemoryPool)
  1.1258 +		{
  1.1259 +		__KTRACE_OPT(KMMU, Kern::Printf("DMemModelNonAlignedShPool::DoCreate(iCommittedPages = 0x%08x, aInfo.iPhysAddr.iPhysAddrList = 0x%08x )", iCommittedPages, aInfo.iPhysAddr.iPhysAddrList));
  1.1260 +		if (iPoolFlags & EShPoolContiguous)
  1.1261 +			{
  1.1262 +			r = MM::MemoryAddContiguous(iMemoryObject, 0, iCommittedPages, aInfo.iPhysAddr.iPhysAddr);
  1.1263 +			}
  1.1264 +		else
  1.1265 +			{
  1.1266 +			r = MM::MemoryAddPages(iMemoryObject, 0, iCommittedPages, aInfo.iPhysAddr.iPhysAddrList);
  1.1267 +			}
  1.1268 +
  1.1269 +		iMaxPages = iCommittedPages;
  1.1270 +		}
  1.1271 +	else
  1.1272 +		{
  1.1273 +		__KTRACE_OPT(KMMU, Kern::Printf("DMemModelNonAlignedShPool::DoCreate(iCommittedPages = %d, contig = %d)", iCommittedPages, iPoolFlags & EShPoolContiguous));
  1.1274 +
  1.1275 +		if (iPoolFlags & EShPoolContiguous)
  1.1276 +			{
  1.1277 +			TPhysAddr paddr;
  1.1278 +			r = MM::MemoryAllocContiguous(iMemoryObject, 0, iCommittedPages, 0, paddr);
  1.1279 +			}
  1.1280 +		else
  1.1281 +			{
  1.1282 +			r = MM::MemoryAlloc(iMemoryObject, 0, iCommittedPages);
  1.1283 +			}
  1.1284 +
  1.1285 +		iMaxPages = maxPages;
  1.1286 +		}
  1.1287 +
  1.1288 +	iPagesMap->Alloc(0, iCommittedPages);
  1.1289 +	
  1.1290 +	return r;
  1.1291 +	}
  1.1292 +
  1.1293 +TUint8* DMemModelNonAlignedShPool::Base(DProcess* aProcess)
  1.1294 +	{
  1.1295 +	TUint8 *base = 0;
  1.1296 +
  1.1297 +	LockPool();
  1.1298 +	DMemModelNonAlignedShPoolClient* client = reinterpret_cast<DMemModelNonAlignedShPoolClient*>(iClientMap->Find(reinterpret_cast<TUint>(aProcess)));
  1.1299 +
  1.1300 +	__NK_ASSERT_DEBUG(client); // ASSERT because pool must be already opened in the clients address space
  1.1301 +	__NK_ASSERT_DEBUG(client->iMapping); // ASSERT because non-aligned buffers are mapped by default in user space
  1.1302 +
  1.1303 +	base = reinterpret_cast<TUint8*>(MM::MappingBase(client->iMapping));
  1.1304 +
  1.1305 +	UnlockPool();
  1.1306 +
  1.1307 +	return base;
  1.1308 +	}
  1.1309 +
  1.1310 +TInt DMemModelNonAlignedShPool::CreateInitialBuffers()
  1.1311 +	{
  1.1312 +	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShPool::CreateInitialBuffers"));
  1.1313 +
  1.1314 +	iInitialBuffersArray = reinterpret_cast<DMemModelNonAlignedShBuf*>(Kern::Alloc(iInitialBuffers * sizeof(DMemModelNonAlignedShBuf)));
  1.1315 +
  1.1316 +	if (iInitialBuffersArray == NULL)
  1.1317 +		return KErrNoMemory;
  1.1318 +
  1.1319 +	TLinAddr offset = 0;
  1.1320 +	for (TUint i = 0; i < iInitialBuffers; i++)
  1.1321 +		{
  1.1322 +		DMemModelNonAlignedShBuf *buf = new (&iInitialBuffersArray[i]) DMemModelNonAlignedShBuf(this, offset);
  1.1323 +		TInt r = buf->Construct();
  1.1324 +
  1.1325 +		if (r == KErrNone)
  1.1326 +			{
  1.1327 +			iFreeList.Add(&buf->iObjLink);
  1.1328 +			}
  1.1329 +		else
  1.1330 +			{
  1.1331 +			iInitialBuffers = i;
  1.1332 +			return KErrNoMemory;
  1.1333 +			}
  1.1334 +
  1.1335 +		offset += iBufGap;
  1.1336 +		}
  1.1337 +
  1.1338 +	iFreeBuffers  = iInitialBuffers;
  1.1339 +	iTotalBuffers = iInitialBuffers;
  1.1340 +	iBufMap->Alloc(0, iInitialBuffers);
  1.1341 +
  1.1342 +	return KErrNone;
  1.1343 +	}
  1.1344 +
  1.1345 +TInt DMemModelNonAlignedShPool::AddToProcess(DProcess* aProcess, TUint aAttr)
  1.1346 +	{
  1.1347 +	// Must be in critical section so we don't leak os asid references.
  1.1348 +	__ASSERT_CRITICAL;
  1.1349 +	__KTRACE_OPT(KMMU, Kern::Printf("Adding DMemModelShPool %O to process %O", this, aProcess));
  1.1350 +
  1.1351 +	DMemoryMapping* mapping = NULL;
  1.1352 +
  1.1353 +	TBool write = (TBool)EFalse;
  1.1354 +
  1.1355 +	// User = ETrue, ReadOnlyWrite = ETrue, Execute = EFalse
  1.1356 +	if (aAttr & EShPoolWriteable)
  1.1357 +		write = (TBool)ETrue;
  1.1358 +
  1.1359 +	TMappingPermissions perm = MM::MappingPermissions(ETrue,	// user
  1.1360 +													  write,	// writeable
  1.1361 +													  EFalse);	// execute
  1.1362 +
  1.1363 +	TMappingCreateFlags mappingFlags = EMappingCreateDefault;
  1.1364 +
  1.1365 +	DMemModelProcess* pP = reinterpret_cast<DMemModelProcess*>(aProcess);
  1.1366 +
  1.1367 +	Kern::MutexWait(*iProcessLock);
  1.1368 +	TInt r = KErrNone;
  1.1369 +
  1.1370 +	LockPool();
  1.1371 +	DMemModelNonAlignedShPoolClient* client = reinterpret_cast<DMemModelNonAlignedShPoolClient*>(iClientMap->Find(reinterpret_cast<TUint>(aProcess)));
  1.1372 +	UnlockPool();
  1.1373 +
  1.1374 +	if (!client)
  1.1375 +		{
  1.1376 +		client = new DMemModelNonAlignedShPoolClient;
  1.1377 +
  1.1378 +		if (client)
  1.1379 +			{
  1.1380 +			// map non aligned pools in userside processes by default
  1.1381 +			if (aAttr & EShPoolAutoMapBuf || pP != K::TheKernelProcess)
  1.1382 +				{
  1.1383 +				// Open a reference on the os asid so it doesn't get freed and reused.
  1.1384 +				TInt osAsid = pP->TryOpenOsAsid();
  1.1385 +				if (osAsid < 0)
  1.1386 +					{// The process freed its os asid so can't create a new mapping.
  1.1387 +					r = KErrDied;
  1.1388 +					}
  1.1389 +				else
  1.1390 +					{
  1.1391 +					r = MM::MappingNew(mapping, iMemoryObject, perm, osAsid, mappingFlags);
  1.1392 +					// Close the reference as the mapping will be destroyed if the process dies.
  1.1393 +					pP->CloseOsAsid();
  1.1394 +					}
  1.1395 +
  1.1396 +				if ((r == KErrNone) && (pP == K::TheKernelProcess))
  1.1397 +					{
  1.1398 +					iBaseAddress = MM::MappingBase(mapping);
  1.1399 +					}
  1.1400 +				}
  1.1401 +
  1.1402 +			if (r == KErrNone)
  1.1403 +				{
  1.1404 +				client->iMapping = mapping;
  1.1405 +				client->iFlags = aAttr;
  1.1406 +				r = iClientMap->Add(reinterpret_cast<TUint>(aProcess), client);
  1.1407 +
  1.1408 +				if (r == KErrNone)
  1.1409 +					{
  1.1410 +					if (pP != K::TheKernelProcess)
  1.1411 +						{
  1.1412 +						r = aProcess->iHandles.Reserve(iTotalBuffers);
  1.1413 +
  1.1414 +						if (r != KErrNone)
  1.1415 +							{
  1.1416 +							iClientMap->Remove(reinterpret_cast<TUint>(aProcess));
  1.1417 +							}
  1.1418 +						}
  1.1419 +					}
  1.1420 +
  1.1421 +				if (r != KErrNone)
  1.1422 +					{
  1.1423 +					delete client;
  1.1424 +					MM::MappingDestroy(mapping);
  1.1425 +					}
  1.1426 +				}
  1.1427 +			else
  1.1428 +				{
  1.1429 +				delete client;
  1.1430 +				}
  1.1431 +			}
  1.1432 +		else
  1.1433 +			{
  1.1434 +			r = KErrNoMemory;
  1.1435 +			}
  1.1436 +		}
  1.1437 +	else
  1.1438 +		{
  1.1439 +		LockPool();
  1.1440 +		client->iAccessCount++;
  1.1441 +		UnlockPool();
  1.1442 +		}
  1.1443 +
  1.1444 +	Kern::MutexSignal(*iProcessLock);
  1.1445 +
  1.1446 +	return r;
  1.1447 +	}
  1.1448 +
  1.1449 +TInt DMemModelNonAlignedShPool::DeleteInitialBuffers()
  1.1450 +	{
  1.1451 +	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShPool::DeleteInitialBuffers"));
  1.1452 +
  1.1453 +	if (iInitialBuffersArray != NULL)
  1.1454 +		{
  1.1455 +		for (TUint i = 0; i < iInitialBuffers; i++)
  1.1456 +			{
  1.1457 +			iInitialBuffersArray[i].iObjLink.Deque(); // remove from free list
  1.1458 +			iInitialBuffersArray[i].Dec();
  1.1459 +			iInitialBuffersArray[i].~DMemModelNonAlignedShBuf();
  1.1460 +			}
  1.1461 +		}
  1.1462 +
  1.1463 +	Kern::Free(iInitialBuffersArray);
  1.1464 +	iInitialBuffersArray = NULL;
  1.1465 +
  1.1466 +	return KErrNone;
  1.1467 +	}
  1.1468 +
  1.1469 +TInt DMemModelNonAlignedShPool::DestroyAllMappingsAndReservedHandles(DProcess* aProcess)
  1.1470 +	{
  1.1471 +	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShPool::DestroyAllMappingsAndReservedHandles(0x%08x)", aProcess));
  1.1472 +
  1.1473 +	TInt r = KErrNone;
  1.1474 +	Kern::MutexWait(*iProcessLock);
  1.1475 +	DMemModelNonAlignedShPoolClient* client = reinterpret_cast<DMemModelNonAlignedShPoolClient*>(iClientMap->Remove(reinterpret_cast<TUint>(aProcess)));
  1.1476 +
  1.1477 +	__NK_ASSERT_DEBUG(client);
  1.1478 +	__NK_ASSERT_DEBUG(client->iAccessCount == 0);
  1.1479 +
  1.1480 +	if (client->iMapping)
  1.1481 +		{
  1.1482 +		MM::MappingDestroy(client->iMapping);
  1.1483 +		}
  1.1484 +	delete client;
  1.1485 +
  1.1486 +	if (aProcess != K::TheKernelProcess)
  1.1487 +		{
  1.1488 +		// Remove reserved handles
  1.1489 +		r = aProcess->iHandles.Reserve(-(iTotalBuffers));
  1.1490 +		}
  1.1491 +	else
  1.1492 +		{
  1.1493 +		iBaseAddress = 0;
  1.1494 +		}
  1.1495 +
  1.1496 +	Kern::MutexSignal(*iProcessLock);
  1.1497 +
  1.1498 +	__KTRACE_OPT(KMMU, Kern::Printf("<DMemModelNonAlignedShPool::DestroyAllMappingsAndReservedHandles(0x%08x)", aProcess));
  1.1499 +
  1.1500 +	return r;
  1.1501 +	}
  1.1502 +
  1.1503 +
  1.1504 +TInt DMemModelNonAlignedShPool::Close(TAny* aPtr)
  1.1505 +	{
  1.1506 +	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShPool::Close(0x%08x)", aPtr));
  1.1507 +
  1.1508 +	if (aPtr)
  1.1509 +		{
  1.1510 +		DProcess* pP = reinterpret_cast<DProcess*>(aPtr);
  1.1511 +
  1.1512 +		CloseClient(pP);
  1.1513 +		}
  1.1514 +
  1.1515 +	return DShPool::Close(aPtr);
  1.1516 +	}
  1.1517 +
  1.1518 +void DMemModelNonAlignedShPool::FreeBufferPages(TUint aOffset)
  1.1519 +	{
  1.1520 +	TLinAddr firstByte = aOffset;	// offset of first byte in buffer
  1.1521 +	TLinAddr lastByte = firstByte+iBufGap-1;	// offset of last byte in buffer
  1.1522 +	TUint firstPage = firstByte>>KPageShift;	// index of first page containing part of the buffer
  1.1523 +	TUint lastPage = lastByte>>KPageShift;		// index of last page containing part of the buffer
  1.1524 +
  1.1525 +	TUint firstBuffer = (firstByte&~KPageMask)/iBufGap; // index of first buffer which lies in firstPage
  1.1526 +	TUint lastBuffer = (lastByte|KPageMask)/iBufGap;    // index of last buffer which lies in lastPage
  1.1527 +	TUint thisBuffer = firstByte/iBufGap;				// index of the buffer to be freed
  1.1528 +
  1.1529 +	// Ensure lastBuffer is within bounds (there may be room in the last
  1.1530 +	// page for more buffers than we have allocated).
  1.1531 +	if (lastBuffer >= iMaxBuffers)
  1.1532 +		lastBuffer = iMaxBuffers-1;
  1.1533 +
  1.1534 +	if(firstBuffer!=thisBuffer && iBufMap->NotFree(firstBuffer,thisBuffer-firstBuffer))
  1.1535 +		{
  1.1536 +		// first page has other allocated buffers in it,
  1.1537 +		// so we can't free it and must move on to next one...
  1.1538 +		if (firstPage >= lastPage)
  1.1539 +			return;
  1.1540 +		++firstPage;
  1.1541 +		}
  1.1542 +
  1.1543 +	if(lastBuffer!=thisBuffer && iBufMap->NotFree(thisBuffer+1,lastBuffer-thisBuffer))
  1.1544 +		{
  1.1545 +		// last page has other allocated buffers in it,
  1.1546 +		// so we can't free it and must step back to previous one...
  1.1547 +		if (lastPage <= firstPage)
  1.1548 +			return;
  1.1549 +		--lastPage;
  1.1550 +		}
  1.1551 +
  1.1552 +	if(firstPage<=lastPage)
  1.1553 +		{
  1.1554 +		// we can free pages firstPage trough to lastPage...
  1.1555 +		TUint numPages = lastPage-firstPage+1;
  1.1556 +		iPagesMap->SelectiveFree(firstPage,numPages);
  1.1557 +		MM::MemoryLock(iMemoryObject);
  1.1558 +		MM::MemoryFree(iMemoryObject, firstPage, numPages);
  1.1559 +		MM::MemoryUnlock(iMemoryObject);
  1.1560 +		iCommittedPages -= numPages;
  1.1561 +		}
  1.1562 +	}
  1.1563 +
  1.1564 +TInt DMemModelNonAlignedShPool::GrowPool()
  1.1565 +	{
  1.1566 +	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShPool::GrowPool()"));
  1.1567 +
  1.1568 +	// Don't do anything with physical memory pools
  1.1569 +	if (iPoolFlags & EShPoolPhysicalMemoryPool)
  1.1570 +		return KErrNone;
  1.1571 +
  1.1572 +	Kern::MutexWait(*iProcessLock);
  1.1573 +
  1.1574 +	TUint32 headroom = iMaxBuffers - iTotalBuffers;
  1.1575 +
  1.1576 +	// How many buffers to grow by?
  1.1577 +	TUint32 grow = mult_fx248(iTotalBuffers, iGrowByRatio);
  1.1578 +	if (grow == 0)			// Handle round-to-zero
  1.1579 +		grow = 1;
  1.1580 +	if (grow > headroom)
  1.1581 +		grow = headroom;
  1.1582 +
  1.1583 +	TInt r = KErrNone;
  1.1584 +	SDblQue temp;
  1.1585 +
  1.1586 +	TUint i;
  1.1587 +	for (i = 0; i < grow; ++i)
  1.1588 +		{
  1.1589 +		TInt offset = iBufMap->Alloc();
  1.1590 +
  1.1591 +		if (offset < 0)
  1.1592 +			{
  1.1593 +			r = KErrNoMemory;
  1.1594 +			break;
  1.1595 +			}
  1.1596 +
  1.1597 +		offset *= iBufGap;
  1.1598 +
  1.1599 +		TInt lastPage = (offset + iBufSize - 1) >> KPageShift;
  1.1600 +
  1.1601 +		// Allocate one page at a time.
  1.1602 +		for (TInt page = offset >> KPageShift; page <= lastPage; ++page)
  1.1603 +			{
  1.1604 +			// Is the page allocated?
  1.1605 +			if (iPagesMap->NotAllocated(page, 1))
  1.1606 +				{
  1.1607 +				MM::MemoryLock(iMemoryObject);
  1.1608 +				r = MM::MemoryAlloc(iMemoryObject, page, 1);
  1.1609 +				MM::MemoryUnlock(iMemoryObject);
  1.1610 +
  1.1611 +				if (r != KErrNone)
  1.1612 +					{
  1.1613 +					break;
  1.1614 +					}
  1.1615 +
  1.1616 +				++iCommittedPages;
  1.1617 +				iPagesMap->Alloc(page, 1);
  1.1618 +				}
  1.1619 +			}
  1.1620 +
  1.1621 +		if (r != KErrNone)
  1.1622 +			{
  1.1623 +			iBufMap->Free(offset / iBufGap);
  1.1624 +			FreeBufferPages(offset);
  1.1625 +			break;
  1.1626 +			}
  1.1627 +
  1.1628 +		DMemModelNonAlignedShBuf *buf = new DMemModelNonAlignedShBuf(this, offset);
  1.1629 +
  1.1630 +		if (buf == NULL)
  1.1631 +			{
  1.1632 +			iBufMap->Free(offset / iBufGap);
  1.1633 +			FreeBufferPages(offset);
  1.1634 +			r = KErrNoMemory;
  1.1635 +			break;
  1.1636 +			}
  1.1637 +
  1.1638 +		r = buf->Construct();
  1.1639 +
  1.1640 +		if (r != KErrNone)
  1.1641 +			{
  1.1642 +			iBufMap->Free(offset / iBufGap);
  1.1643 +			FreeBufferPages(offset);
  1.1644 +			buf->DObject::Close(NULL);
  1.1645 +			break;
  1.1646 +			}
  1.1647 +
  1.1648 +		temp.Add(&buf->iObjLink);
  1.1649 +		}
  1.1650 +
  1.1651 +	r = UpdateReservedHandles(i);
  1.1652 +
  1.1653 +	if (r == KErrNone)
  1.1654 +		{
  1.1655 +		LockPool();
  1.1656 +		iFreeList.MoveFrom(&temp);
  1.1657 +		iFreeBuffers += i;
  1.1658 +		iTotalBuffers += i;
  1.1659 +		UnlockPool();
  1.1660 +		}
  1.1661 +	else
  1.1662 +		{
  1.1663 +		// couldn't reserve handles so have no choice but to
  1.1664 +		// delete the buffers
  1.1665 +		__KTRACE_OPT(KMMU, Kern::Printf("GrowPool failed with %d, deleting buffers", r));
  1.1666 +		SDblQueLink *pLink;
  1.1667 +		while ((pLink = temp.GetFirst()) != NULL)
  1.1668 +			{
  1.1669 +			DShBuf* buf = _LOFF(pLink, DShBuf, iObjLink);
  1.1670 +			TLinAddr offset = buf->iRelAddress;
  1.1671 +			iBufMap->Free(offset / iBufGap);
  1.1672 +			FreeBufferPages(offset);
  1.1673 +			buf->DObject::Close(NULL);
  1.1674 +			}
  1.1675 +		__KTRACE_OPT(KMMU, Kern::Printf("Buffers deleted"));
  1.1676 +		}
  1.1677 +
  1.1678 +	CalculateGrowShrinkTriggers();
  1.1679 +
  1.1680 +	Kern::MutexSignal(*iProcessLock);
  1.1681 +
  1.1682 +	__KTRACE_OPT(KMMU, Kern::Printf("<DMemModelNonAlignedShPool::GrowPool()"));
  1.1683 +	return r;
  1.1684 +	}
  1.1685 +
  1.1686 +TInt DMemModelNonAlignedShPool::ShrinkPool()
  1.1687 +	{
  1.1688 +	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShPool::ShrinkPool()"));
  1.1689 +
  1.1690 +	// Don't do anything with physical memory pools
  1.1691 +	if (iPoolFlags & EShPoolPhysicalMemoryPool)
  1.1692 +		return KErrNone;
  1.1693 +
  1.1694 +	Kern::MutexWait(*iProcessLock);
  1.1695 +
  1.1696 +	TUint32 grownBy = iTotalBuffers - iInitialBuffers;
  1.1697 +
  1.1698 +	// How many buffers to shrink by?
  1.1699 +	TUint32 shrink = mult_fx248(iTotalBuffers, iShrinkByRatio);
  1.1700 +	if (shrink == 0)		// Handle round-to-zero
  1.1701 +		shrink = 1;
  1.1702 +	if (shrink > grownBy)
  1.1703 +		shrink = grownBy;
  1.1704 +	if (shrink > iFreeBuffers)
  1.1705 +		shrink = iFreeBuffers;
  1.1706 +
  1.1707 +	TUint i;
  1.1708 +	for (i = 0; i < shrink; ++i)
  1.1709 +		{
  1.1710 +		LockPool();
  1.1711 +
  1.1712 +		if (iFreeList.IsEmpty())
  1.1713 +			{
  1.1714 +			UnlockPool();
  1.1715 +			break;
  1.1716 +			}
  1.1717 +
  1.1718 +		// work from the back of the queue
  1.1719 +		SDblQueLink *pLink = iFreeList.Last();
  1.1720 +
  1.1721 +		DShBuf* pBuf = _LOFF(pLink, DShBuf, iObjLink);
  1.1722 +
  1.1723 +		if (pBuf >= iInitialBuffersArray && pBuf < (iInitialBuffersArray + iInitialBuffers))
  1.1724 +			{
  1.1725 +			UnlockPool();
  1.1726 +			break;
  1.1727 +			}
  1.1728 +
  1.1729 +		--iFreeBuffers;
  1.1730 +		--iTotalBuffers;
  1.1731 +		pLink->Deque();
  1.1732 +		UnlockPool();
  1.1733 +
  1.1734 +		TLinAddr offset = pBuf->iRelAddress;
  1.1735 +		iBufMap->Free(offset / iBufGap);
  1.1736 +		FreeBufferPages(offset);
  1.1737 +
  1.1738 +		pBuf->DObject::Close(NULL);
  1.1739 +		}
  1.1740 +
  1.1741 +	UpdateReservedHandles(-(TInt)i);
  1.1742 +
  1.1743 +	// If we couldn't shrink the pool by this many buffers, wait until we Free() another
  1.1744 +	// buffer before trying to shrink again.
  1.1745 +	if (i < shrink)
  1.1746 +		iPoolFlags |= EShPoolSuppressShrink;
  1.1747 +
  1.1748 +	CalculateGrowShrinkTriggers();
  1.1749 +
  1.1750 +	Kern::MutexSignal(*iProcessLock);
  1.1751 +
  1.1752 +	__KTRACE_OPT(KMMU, Kern::Printf("<DMemModelNonAlignedShPool::ShrinkPool()"));
  1.1753 +
  1.1754 +	return KErrNone;
  1.1755 +	}
  1.1756 +
  1.1757 +TInt DMemModelNonAlignedShPool::UpdateFreeList()
  1.1758 +	{
  1.1759 +	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShPool::UpdateFreeList"));
  1.1760 +
  1.1761 +	SDblQue temp;
  1.1762 +
  1.1763 +	LockPool();
  1.1764 +	while(!iAltFreeList.IsEmpty())
  1.1765 +		{
  1.1766 +		// sort a temporary list of 'n' object with the lowest index first
  1.1767 +		for (TInt n = 0; n < 8 && !iAltFreeList.IsEmpty(); ++n)
  1.1768 +			{
  1.1769 +			// bit of an assumption, lets assume that the lower indexes will be allocated and freed first
  1.1770 +			// and therefore will be nearer the front of the list
  1.1771 +			DShBuf* buf = _LOFF(iAltFreeList.GetFirst(), DShBuf, iObjLink);
  1.1772 +
  1.1773 +			SDblQueLink* anchor = reinterpret_cast<SDblQueLink*>(&temp);
  1.1774 +			SDblQueLink* pLink = temp.Last();
  1.1775 +
  1.1776 +			while (ETrue)
  1.1777 +				{
  1.1778 +				// traverse the list starting at the back
  1.1779 +				if ((pLink != anchor) && (_LOFF(pLink, DShBuf, iObjLink)->iRelAddress > buf->iRelAddress))
  1.1780 +					{
  1.1781 +					pLink = pLink->iPrev;
  1.1782 +					}
  1.1783 +				else
  1.1784 +					{
  1.1785 +					buf->iObjLink.InsertAfter(pLink);
  1.1786 +					break;
  1.1787 +					}
  1.1788 +				}
  1.1789 +			}
  1.1790 +
  1.1791 +		// now merge with the free list
  1.1792 +		while(!temp.IsEmpty())
  1.1793 +			{
  1.1794 +			if (iFreeList.IsEmpty())
  1.1795 +				{
  1.1796 +				iFreeList.MoveFrom(&temp);
  1.1797 +				break;
  1.1798 +				}
  1.1799 +
  1.1800 +			// working backwards with the highest index
  1.1801 +			DShBuf* buf = _LOFF(temp.Last(), DShBuf, iObjLink);
  1.1802 +			SDblQueLink* anchor = reinterpret_cast<SDblQueLink*>(&iFreeList);
  1.1803 +			SDblQueLink* pLink = iFreeList.Last();
  1.1804 +
  1.1805 +			while (!NKern::FMFlash(&iLock))
  1.1806 +				{
  1.1807 +				if ((pLink != anchor) && (_LOFF(pLink, DShBuf, iObjLink)->iRelAddress > buf->iRelAddress))
  1.1808 +					{
  1.1809 +					pLink = pLink->iPrev;
  1.1810 +					}
  1.1811 +				else
  1.1812 +					{
  1.1813 +					buf->iObjLink.Deque();
  1.1814 +					buf->iObjLink.InsertAfter(pLink);
  1.1815 +					// next buffer
  1.1816 +					if (temp.IsEmpty())
  1.1817 +						break;
  1.1818 +					buf = _LOFF(temp.Last(), DShBuf, iObjLink);
  1.1819 +					}
  1.1820 +				}
  1.1821 +			}
  1.1822 +		NKern::FMFlash(&iLock);
  1.1823 +		}
  1.1824 +	UnlockPool();
  1.1825 +
  1.1826 +	__KTRACE_OPT(KMMU, Kern::Printf("<DMemModelNonAlignedShPool::UpdateFreeList"));
  1.1827 +	return KErrNone;
  1.1828 +	}
  1.1829 +
  1.1830 +void DMemModelNonAlignedShPool::Free(DShBuf* aBuf)
  1.1831 +	{
  1.1832 +	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShPool::Free (aBuf = 0x%08x, aBuf->Base() 0x%08x)", aBuf, aBuf->iRelAddress));
  1.1833 +
  1.1834 +	LockPool();
  1.1835 +#ifdef _DEBUG
  1.1836 +	// Remove from allocated list
  1.1837 +	aBuf->iObjLink.Deque();
  1.1838 +#endif
  1.1839 +
  1.1840 +	// we want to put the initial buffers at the head of the free list
  1.1841 +	// and the grown buffers at the tail as this makes shrinking more efficient
  1.1842 +	if (aBuf >= iInitialBuffersArray && aBuf < (iInitialBuffersArray + iInitialBuffers))
  1.1843 +		{
  1.1844 +		iFreeList.AddHead(&aBuf->iObjLink);
  1.1845 +		}
  1.1846 +	else
  1.1847 +		{
  1.1848 +		iAltFreeList.Add(&aBuf->iObjLink);
  1.1849 +		}
  1.1850 +
  1.1851 +	++iFreeBuffers;
  1.1852 +#ifdef _DEBUG
  1.1853 +	--iAllocatedBuffers;
  1.1854 +#endif
  1.1855 +	iPoolFlags &= ~EShPoolSuppressShrink;		// Allow shrinking again, if it was blocked
  1.1856 +	UnlockPool();
  1.1857 +
  1.1858 +	// queue ManagementDfc which completes notifications as appropriate
  1.1859 +	if (HaveWorkToDo())
  1.1860 +		KickManagementDfc();
  1.1861 +
  1.1862 +	DShPool::Close(NULL); // decrement pool reference count
  1.1863 +	}
  1.1864 +
  1.1865 +// Kernel side API
  1.1866 +TInt DMemModelNonAlignedShPool::Alloc(DShBuf*& aShBuf)
  1.1867 +	{
  1.1868 +	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShPool::Alloc (DShBuf)"));
  1.1869 +
  1.1870 +	aShBuf = NULL;
  1.1871 +
  1.1872 +	LockPool();
  1.1873 +
  1.1874 +	if (!iFreeList.IsEmpty())
  1.1875 +		{
  1.1876 +		aShBuf = _LOFF(iFreeList.GetFirst(), DShBuf, iObjLink);
  1.1877 +#ifdef _DEBUG
  1.1878 +		iAllocated.Add(&aShBuf->iObjLink);
  1.1879 +		iAllocatedBuffers++;
  1.1880 +#endif
  1.1881 +		}
  1.1882 +	else
  1.1883 +		{
  1.1884 +		// try alternative free list
  1.1885 +		if (!iAltFreeList.IsEmpty())
  1.1886 +			{
  1.1887 +			aShBuf = _LOFF(iAltFreeList.GetFirst(), DShBuf, iObjLink);
  1.1888 +#ifdef _DEBUG
  1.1889 +			iAllocated.Add(&aShBuf->iObjLink);
  1.1890 +			iAllocatedBuffers++;
  1.1891 +#endif
  1.1892 +			}
  1.1893 +		else
  1.1894 +			{
  1.1895 +			UnlockPool();
  1.1896 +			KickManagementDfc(); // Try to grow
  1.1897 +			return KErrNoMemory;
  1.1898 +			}
  1.1899 +		}
  1.1900 +
  1.1901 +	--iFreeBuffers;
  1.1902 +	Open(); // increment pool reference count
  1.1903 +
  1.1904 +	UnlockPool();
  1.1905 +
  1.1906 +	if (HaveWorkToDo())
  1.1907 +		KickManagementDfc();
  1.1908 +
  1.1909 +	__KTRACE_OPT(KMMU, Kern::Printf("<DMemModelNonAlignedShPool::Alloc return buf = 0x%08x", aShBuf));
  1.1910 +	return KErrNone;
  1.1911 +	}