os/kernelhwsrv/kernel/eka/memmodel/epoc/flexible/mshbuf.cpp
author sl
Tue, 10 Jun 2014 14:32:02 +0200
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
sl@0
     1
// Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
// e32/memmodel/epoc/flexible/mshbuf.cpp
sl@0
    15
// Shareable Data Buffers
sl@0
    16
sl@0
    17
#include <memmodel.h>
sl@0
    18
#include "mmu/mm.h"
sl@0
    19
#include "mmboot.h"
sl@0
    20
#include <kernel/smap.h>
sl@0
    21
sl@0
    22
_LIT(KLitDMemModelAlignedShPool,"DMMAlignedShPool");	// Must be no more than 16 characters!
sl@0
    23
sl@0
    24
struct TWait
sl@0
    25
	{
sl@0
    26
	void Link(TWait*& aList)
sl@0
    27
		{
sl@0
    28
		iSem.SetOwner(NULL);
sl@0
    29
		iNext = aList;
sl@0
    30
		aList = this;
sl@0
    31
		};
sl@0
    32
	void Wait()
sl@0
    33
		{
sl@0
    34
		NKern::FSWait(&iSem);
sl@0
    35
		}
sl@0
    36
	NFastSemaphore iSem;
sl@0
    37
	TWait* iNext;
sl@0
    38
sl@0
    39
	static void SignalAll(TWait* aList)
sl@0
    40
		{
sl@0
    41
		while (aList)
sl@0
    42
			{
sl@0
    43
			TWait* next = aList->iNext;
sl@0
    44
			NKern::FSSignal(&aList->iSem);
sl@0
    45
			aList = next;
sl@0
    46
			}
sl@0
    47
		}
sl@0
    48
	};
sl@0
    49
sl@0
    50
sl@0
    51
class DShBufMapping : public DBase
sl@0
    52
	{
sl@0
    53
public:
sl@0
    54
	SDblQueLink iObjLink;
sl@0
    55
	DMemoryMapping* iMapping;
sl@0
    56
	TInt iOsAsid;
sl@0
    57
	TWait* iTransitions; // Mapping and Unmapping operations
sl@0
    58
	TBool iTransitioning;
sl@0
    59
	};
sl@0
    60
sl@0
    61
sl@0
    62
DMemModelShPool::DMemModelShPool() : DShPool()
sl@0
    63
	{
sl@0
    64
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelShPool::DMemModelShPool"));
sl@0
    65
	}
sl@0
    66
sl@0
    67
DMemModelShPool::~DMemModelShPool()
sl@0
    68
	{
sl@0
    69
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelShPool::~DMemModelShPool"));
sl@0
    70
	}
sl@0
    71
sl@0
    72
void DMemModelShPool::DestroyClientResources(DProcess* aProcess)
sl@0
    73
	{
sl@0
    74
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelShPool::DestroyClientResources"));
sl@0
    75
sl@0
    76
	TInt r = DestroyAllMappingsAndReservedHandles(aProcess);
sl@0
    77
	__NK_ASSERT_DEBUG((r == KErrNone) || (r == KErrDied));
sl@0
    78
	(void)r;		// Silence warnings
sl@0
    79
	}
sl@0
    80
sl@0
    81
DMemModelAlignedShBuf::DMemModelAlignedShBuf(DShPool* aPool) : DShBuf(aPool)
sl@0
    82
	{
sl@0
    83
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShBuf::DMemModelAlignedShBuf()"));
sl@0
    84
	}
sl@0
    85
sl@0
    86
TInt DMemModelAlignedShBuf::Construct()
sl@0
    87
	{
sl@0
    88
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShBuf::Construct()"));
sl@0
    89
sl@0
    90
	TInt r = KErrNone;
sl@0
    91
sl@0
    92
	r = DShBuf::Construct();
sl@0
    93
sl@0
    94
	if (r == KErrNone)
sl@0
    95
		r = Create();
sl@0
    96
sl@0
    97
	return r;
sl@0
    98
	}
sl@0
    99
sl@0
   100
TInt DMemModelAlignedShBuf::Close(TAny* aPtr)
sl@0
   101
	{
sl@0
   102
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShBuf::Close(0x%08x)", aPtr));
sl@0
   103
sl@0
   104
	if (aPtr)
sl@0
   105
		{
sl@0
   106
		DProcess* pP = reinterpret_cast<DProcess*>(aPtr);
sl@0
   107
		UnMap(pP);
sl@0
   108
		iPool->CloseClient(pP);
sl@0
   109
		}
sl@0
   110
sl@0
   111
	return DShBuf::Close(aPtr);
sl@0
   112
	}
sl@0
   113
sl@0
   114
TInt DMemModelAlignedShBuf::AddToProcess(DProcess* aProcess, TUint aAttr)
sl@0
   115
	{
sl@0
   116
	__KTRACE_OPT(KMMU,Kern::Printf("Adding DMemModelShBuf %O to process %O",this,aProcess));
sl@0
   117
	TInt r;
sl@0
   118
	TLinAddr base;
sl@0
   119
	TUint flags;
sl@0
   120
sl@0
   121
	r = iPool->OpenClient(aProcess, flags);
sl@0
   122
sl@0
   123
	if (r == KErrNone)
sl@0
   124
		{
sl@0
   125
		if ((flags & EShPoolAutoMapBuf) && ((aAttr & EShPoolNoMapBuf) == 0))
sl@0
   126
			{
sl@0
   127
			// note we use the client's pool flags and not the buffer attributes
sl@0
   128
			r = Map(flags, aProcess, base);
sl@0
   129
sl@0
   130
			if (aProcess == K::TheKernelProcess)
sl@0
   131
				iRelAddress = static_cast<TLinAddr>(base);
sl@0
   132
			}
sl@0
   133
		}
sl@0
   134
sl@0
   135
	return r;
sl@0
   136
	}
sl@0
   137
sl@0
   138
TInt DMemModelAlignedShBuf::Create()
sl@0
   139
	{
sl@0
   140
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShBuf::Create()"));
sl@0
   141
	TInt r = KErrNone;
sl@0
   142
sl@0
   143
	// calculate memory type...
sl@0
   144
	TMemoryObjectType memoryType =  EMemoryObjectUnpaged;
sl@0
   145
sl@0
   146
	TMemoryAttributes attr = EMemoryAttributeStandard;
sl@0
   147
sl@0
   148
	// calculate memory flags...
sl@0
   149
	TMemoryCreateFlags flags = static_cast<TMemoryCreateFlags>((EMemoryCreateDefault|EMemoryCreateUseCustomWipeByte|(0xAA<<EMemoryCreateWipeByteShift)));
sl@0
   150
sl@0
   151
	// note that any guard pages will be included in iBufGap, however the amount of memory committed
sl@0
   152
	// will be iBufSize rounded up to a page
sl@0
   153
	r = MM::MemoryNew(iMemoryObject, memoryType, MM::RoundToPageCount(iPool->iBufGap), flags, attr);
sl@0
   154
sl@0
   155
	if(r!=KErrNone)
sl@0
   156
		return r;
sl@0
   157
sl@0
   158
	if (iPool->iPoolFlags & EShPoolContiguous)
sl@0
   159
		{
sl@0
   160
		TPhysAddr paddr;
sl@0
   161
		r = MM::MemoryAllocContiguous(iMemoryObject, 0, MM::RoundToPageCount(iPool->iBufSize), 0, paddr);
sl@0
   162
		}
sl@0
   163
	else
sl@0
   164
		{
sl@0
   165
		r = MM::MemoryAlloc(iMemoryObject, 0, MM::RoundToPageCount(iPool->iBufSize));
sl@0
   166
		}
sl@0
   167
sl@0
   168
	return r;
sl@0
   169
	}
sl@0
   170
sl@0
   171
DMemModelAlignedShBuf::~DMemModelAlignedShBuf()
sl@0
   172
	{
sl@0
   173
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShBuf::~DMemModelAlignedShBuf()"));
sl@0
   174
sl@0
   175
	__NK_ASSERT_DEBUG(iMappings.IsEmpty());
sl@0
   176
sl@0
   177
	MM::MemoryDestroy(iMemoryObject);
sl@0
   178
	}
sl@0
   179
sl@0
   180
TInt DMemModelAlignedShBuf::Map(TUint aMapAttr, DProcess* aProcess, TLinAddr& aBase)
sl@0
   181
	{
sl@0
   182
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShBuf::Map()"));
sl@0
   183
	TInt r = KErrNone;
sl@0
   184
sl@0
   185
	DShBufMapping* m = NULL;
sl@0
   186
	DMemoryMapping* mapping = NULL;
sl@0
   187
	DMemModelProcess* pP = reinterpret_cast<DMemModelProcess*>(aProcess);
sl@0
   188
sl@0
   189
	TBool write = (TBool)EFalse;
sl@0
   190
sl@0
   191
	// User = ETrue, ReadOnlyWrite = ETrue, Execute = EFalse
sl@0
   192
	if (aMapAttr & EShPoolWriteable)
sl@0
   193
		write = (TBool)ETrue;
sl@0
   194
sl@0
   195
	TMappingPermissions perm = MM::MappingPermissions(pP!=K::TheKernelProcess, write, (TBool)EFalse);
sl@0
   196
	TWait wait;
sl@0
   197
sl@0
   198
	for(;;)
sl@0
   199
		{
sl@0
   200
		iPool->LockPool();
sl@0
   201
		r = FindMapping(m, pP);
sl@0
   202
sl@0
   203
		if (r != KErrNone)
sl@0
   204
			break;
sl@0
   205
		
sl@0
   206
		if (m->iTransitioning)
sl@0
   207
			{
sl@0
   208
			wait.Link(m->iTransitions);
sl@0
   209
			iPool->UnlockPool();
sl@0
   210
			wait.Wait();
sl@0
   211
			}
sl@0
   212
		else
sl@0
   213
			{
sl@0
   214
			iPool->UnlockPool();
sl@0
   215
			return KErrAlreadyExists;
sl@0
   216
			}
sl@0
   217
		}
sl@0
   218
sl@0
   219
	DMemModelAlignedShPoolClient* client = reinterpret_cast<DMemModelAlignedShPoolClient*>(iPool->iClientMap->Find(reinterpret_cast<TUint>(aProcess)));
sl@0
   220
sl@0
   221
	__NK_ASSERT_DEBUG(client);
sl@0
   222
sl@0
   223
	DMemModelAlignedShPool* pool = reinterpret_cast<DMemModelAlignedShPool*>(iPool);
sl@0
   224
sl@0
   225
	__NK_ASSERT_DEBUG(m == NULL);
sl@0
   226
	r = pool->GetFreeMapping(m, client);
sl@0
   227
sl@0
   228
	if (r == KErrNone)
sl@0
   229
		{
sl@0
   230
		iMappings.AddHead(&m->iObjLink);
sl@0
   231
		m->iTransitioning = ETrue;
sl@0
   232
sl@0
   233
		mapping = m->iMapping;
sl@0
   234
		iPool->UnlockPool(); // have to release fast lock for MappingMap
sl@0
   235
sl@0
   236
		r = MM::MappingMap(mapping, perm, iMemoryObject, 0, MM::RoundToPageCount(pool->iBufSize));
sl@0
   237
sl@0
   238
		iPool->LockPool();
sl@0
   239
sl@0
   240
		TWait* list = m->iTransitions;
sl@0
   241
		m->iTransitions = NULL;
sl@0
   242
sl@0
   243
		if (r != KErrNone)
sl@0
   244
		    pool->ReleaseMapping(m, client);
sl@0
   245
		else
sl@0
   246
		    aBase = MM::MappingBase(mapping);
sl@0
   247
sl@0
   248
		m->iTransitioning = EFalse;
sl@0
   249
		iPool->UnlockPool();
sl@0
   250
sl@0
   251
		TWait::SignalAll(list);
sl@0
   252
		}
sl@0
   253
	else
sl@0
   254
		iPool->UnlockPool();
sl@0
   255
sl@0
   256
	return r;
sl@0
   257
	}
sl@0
   258
sl@0
   259
TInt DMemModelAlignedShBuf::FindMapping(DShBufMapping*& aMapping, DMemModelProcess* aProcess)
sl@0
   260
	{
sl@0
   261
	// Must be in critical section so we don't leak os asid references.
sl@0
   262
	__ASSERT_CRITICAL;
sl@0
   263
	__NK_ASSERT_DEBUG(iPool->iLock.HeldByCurrentThread());
sl@0
   264
sl@0
   265
	TInt r = KErrNotFound;
sl@0
   266
	aMapping = NULL;
sl@0
   267
sl@0
   268
	// Open a reference on aProcess's os asid so that it can't be freed and 
sl@0
   269
	// reused while searching.
sl@0
   270
	TInt osAsid = aProcess->TryOpenOsAsid();
sl@0
   271
	if (osAsid < 0)
sl@0
   272
		{// aProcess has died and freed its os asid.
sl@0
   273
		return KErrDied;
sl@0
   274
		}
sl@0
   275
sl@0
   276
	SDblQueLink* pLink = iMappings.First();
sl@0
   277
	SDblQueLink* end = reinterpret_cast<SDblQueLink*>(&iMappings);
sl@0
   278
	DShBufMapping* m = NULL;
sl@0
   279
sl@0
   280
	while (pLink != end)
sl@0
   281
		{
sl@0
   282
		m = _LOFF(pLink, DShBufMapping, iObjLink);
sl@0
   283
sl@0
   284
		if (m->iOsAsid == osAsid)
sl@0
   285
			{
sl@0
   286
			aMapping = m;
sl@0
   287
			r = KErrNone;
sl@0
   288
			break;
sl@0
   289
			}
sl@0
   290
		pLink = pLink->iNext;
sl@0
   291
		}
sl@0
   292
sl@0
   293
	// Close the reference on the os asid as if we have a mapping then its lifetime will 
sl@0
   294
	// determine whether the process still owns an os asid.
sl@0
   295
	aProcess->CloseOsAsid();	
sl@0
   296
	return r;
sl@0
   297
	}
sl@0
   298
sl@0
   299
TInt DMemModelAlignedShBuf::UnMap(DProcess* aProcess)
sl@0
   300
	{
sl@0
   301
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShBuf::UnMap()"));
sl@0
   302
sl@0
   303
	TInt r = KErrNone;
sl@0
   304
sl@0
   305
	DMemModelProcess* pP = reinterpret_cast<DMemModelProcess*>(aProcess);
sl@0
   306
sl@0
   307
	DShBufMapping* m = NULL;
sl@0
   308
	TWait wait;
sl@0
   309
sl@0
   310
	for(;;)
sl@0
   311
		{
sl@0
   312
		iPool->LockPool();
sl@0
   313
		r = FindMapping(m, pP);
sl@0
   314
sl@0
   315
		if (r != KErrNone)
sl@0
   316
			{
sl@0
   317
			iPool->UnlockPool();
sl@0
   318
			return KErrNotFound;
sl@0
   319
			}
sl@0
   320
sl@0
   321
		if (m->iTransitioning)
sl@0
   322
			{
sl@0
   323
			wait.Link(m->iTransitions);
sl@0
   324
			iPool->UnlockPool();
sl@0
   325
			wait.Wait();
sl@0
   326
			}
sl@0
   327
		else
sl@0
   328
			{
sl@0
   329
			break;
sl@0
   330
			}
sl@0
   331
		}
sl@0
   332
sl@0
   333
	m->iTransitioning = ETrue;
sl@0
   334
	iPool->UnlockPool();
sl@0
   335
sl@0
   336
	MM::MappingUnmap(m->iMapping);
sl@0
   337
sl@0
   338
	iPool->LockPool();
sl@0
   339
	DMemModelAlignedShPoolClient* client = reinterpret_cast<DMemModelAlignedShPoolClient*>(iPool->iClientMap->Find(reinterpret_cast<TUint>(aProcess)));
sl@0
   340
sl@0
   341
	__NK_ASSERT_DEBUG(client);
sl@0
   342
sl@0
   343
	TWait* list = m->iTransitions;
sl@0
   344
	m->iTransitions = NULL;
sl@0
   345
	m->iObjLink.Deque();
sl@0
   346
	m->iTransitioning = EFalse;
sl@0
   347
sl@0
   348
	DMemModelAlignedShPool* pool = reinterpret_cast<DMemModelAlignedShPool*>(iPool);
sl@0
   349
	pool->ReleaseMapping(m, client);
sl@0
   350
sl@0
   351
	if (aProcess == K::TheKernelProcess)
sl@0
   352
	    iRelAddress = NULL;
sl@0
   353
sl@0
   354
	iPool->UnlockPool();
sl@0
   355
sl@0
   356
	wait.SignalAll(list);
sl@0
   357
	return KErrNone;
sl@0
   358
	}
sl@0
   359
sl@0
   360
TUint8* DMemModelAlignedShBuf::Base(DProcess* aProcess)
sl@0
   361
	{
sl@0
   362
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShBuf::Base()"));
sl@0
   363
	DMemModelProcess* pP = reinterpret_cast<DMemModelProcess*>(aProcess);
sl@0
   364
sl@0
   365
	DShBufMapping* mapping = NULL;
sl@0
   366
	iPool->LockPool();
sl@0
   367
	TInt r = FindMapping(mapping, pP);
sl@0
   368
	TUint8* base = NULL;
sl@0
   369
sl@0
   370
	if (r == KErrNone)
sl@0
   371
		base = reinterpret_cast<TUint8*>(MM::MappingBase(mapping->iMapping));
sl@0
   372
	iPool->UnlockPool();
sl@0
   373
sl@0
   374
	return base;
sl@0
   375
	}
sl@0
   376
sl@0
   377
TUint8* DMemModelAlignedShBuf::Base()
sl@0
   378
	{
sl@0
   379
	return reinterpret_cast<TUint8*>(iRelAddress);
sl@0
   380
	}
sl@0
   381
sl@0
   382
TInt DMemModelAlignedShBuf::Pin(TPhysicalPinObject* aPinObject, TBool aReadOnly, TPhysAddr& aAddress, TPhysAddr* aPages, TUint32& aMapAttr, TUint& aColour)
sl@0
   383
	{
sl@0
   384
	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"DMemModelAlignedShBuf::Pin");
sl@0
   385
sl@0
   386
	TInt r = MM::PinPhysicalMemory(iMemoryObject, (DPhysicalPinMapping*)aPinObject, 0,
sl@0
   387
								   MM::RoundToPageCount(Size()),
sl@0
   388
								   aReadOnly, aAddress, aPages, aMapAttr, aColour);
sl@0
   389
sl@0
   390
	return r;
sl@0
   391
	}
sl@0
   392
sl@0
   393
TInt DMemModelAlignedShPool::GetFreeMapping(DShBufMapping*& aMapping, DMemModelAlignedShPoolClient* aClient)
sl@0
   394
	{
sl@0
   395
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::GetFreeMapping()"));
sl@0
   396
	__NK_ASSERT_DEBUG(iLock.HeldByCurrentThread());
sl@0
   397
sl@0
   398
	TInt r = KErrNotFound;
sl@0
   399
	aMapping = NULL;
sl@0
   400
sl@0
   401
	if (aClient)
sl@0
   402
		{
sl@0
   403
		if (!aClient->iMappingFreeList.IsEmpty())
sl@0
   404
			{
sl@0
   405
			aMapping = _LOFF(aClient->iMappingFreeList.GetFirst(), DShBufMapping, iObjLink);
sl@0
   406
			r = KErrNone;
sl@0
   407
			}
sl@0
   408
		else
sl@0
   409
			{
sl@0
   410
			r = KErrNoMemory;
sl@0
   411
			}
sl@0
   412
		}
sl@0
   413
sl@0
   414
	__KTRACE_OPT(KMMU2, Kern::Printf("DMemModelAlignedShPool::GetFreeMapping(0x%08x, 0x%08x) returns %d", aMapping, aClient, r));
sl@0
   415
	return r;
sl@0
   416
	}
sl@0
   417
sl@0
   418
TInt DMemModelAlignedShPool::ReleaseMapping(DShBufMapping*& aMapping, DMemModelAlignedShPoolClient* aClient)
sl@0
   419
	{
sl@0
   420
	__KTRACE_OPT(KMMU2, Kern::Printf("DMemModelAlignedShPool::ReleaseMapping(0x%08x,0x%08x)",aMapping,aClient));
sl@0
   421
	__NK_ASSERT_DEBUG(iLock.HeldByCurrentThread());
sl@0
   422
sl@0
   423
	TInt r = KErrNone;
sl@0
   424
sl@0
   425
	if (aClient)
sl@0
   426
		{
sl@0
   427
		aClient->iMappingFreeList.AddHead(&aMapping->iObjLink);
sl@0
   428
		aMapping = NULL;
sl@0
   429
		}
sl@0
   430
	else
sl@0
   431
		{
sl@0
   432
		// pool has probably been closed delete mapping
sl@0
   433
		r = KErrNotFound;
sl@0
   434
		__KTRACE_OPT(KMMU2, Kern::Printf("DMemModelAlignedShPool::ReleaseMapping delete 0x%08x",aMapping));
sl@0
   435
		UnlockPool(); // have to release fast lock for MappingDestroy
sl@0
   436
		MM::MappingDestroy(aMapping->iMapping);
sl@0
   437
		delete aMapping;
sl@0
   438
		aMapping = NULL;
sl@0
   439
		LockPool();
sl@0
   440
		}
sl@0
   441
sl@0
   442
	return r;
sl@0
   443
	}
sl@0
   444
sl@0
   445
TInt DMemModelAlignedShPool::SetBufferWindow(DProcess* aProcess, TInt aWindowSize)
sl@0
   446
	{
sl@0
   447
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::SetBufferWindow()"));
sl@0
   448
sl@0
   449
	// Create and construct mappings but do not map
sl@0
   450
	// also allocate reserved handles
sl@0
   451
	TInt r = KErrNone;
sl@0
   452
	TUint noOfBuffers = aWindowSize;
sl@0
   453
sl@0
   454
	if (aWindowSize > static_cast<TInt>(iMaxBuffers))
sl@0
   455
		return KErrArgument;
sl@0
   456
sl@0
   457
	Kern::MutexWait(*iProcessLock);
sl@0
   458
sl@0
   459
	LockPool();
sl@0
   460
	DMemModelAlignedShPoolClient* client = reinterpret_cast<DMemModelAlignedShPoolClient*>(iClientMap->Find(reinterpret_cast<TUint>(aProcess)));
sl@0
   461
	UnlockPool();
sl@0
   462
sl@0
   463
	if (client)
sl@0
   464
		{
sl@0
   465
		if (client->iWindowSize != 0)
sl@0
   466
			{
sl@0
   467
			Kern::MutexSignal(*iProcessLock);
sl@0
   468
			return KErrAlreadyExists;
sl@0
   469
			}
sl@0
   470
sl@0
   471
		if (aWindowSize < 0)
sl@0
   472
			{
sl@0
   473
			noOfBuffers = iTotalBuffers;
sl@0
   474
			}
sl@0
   475
sl@0
   476
		DMemModelProcess* pP = reinterpret_cast<DMemModelProcess*>(aProcess);
sl@0
   477
		r = CreateMappings(client, noOfBuffers, pP);
sl@0
   478
sl@0
   479
		if (r == KErrNone)
sl@0
   480
			{
sl@0
   481
			client->iWindowSize = aWindowSize;
sl@0
   482
			}
sl@0
   483
		else
sl@0
   484
			{
sl@0
   485
			DestroyMappings(client, noOfBuffers);
sl@0
   486
			}
sl@0
   487
		}
sl@0
   488
	else
sl@0
   489
		{
sl@0
   490
		r = KErrNotFound;
sl@0
   491
		}
sl@0
   492
sl@0
   493
	Kern::MutexSignal(*iProcessLock);
sl@0
   494
sl@0
   495
	return r;
sl@0
   496
	}
sl@0
   497
sl@0
   498
TInt DMemModelAlignedShPool::MappingNew(DShBufMapping*& aMapping, DMemModelProcess* aProcess)
sl@0
   499
	{
sl@0
   500
	// Must be in critical section so we don't leak os asid references.
sl@0
   501
	__ASSERT_CRITICAL;
sl@0
   502
sl@0
   503
	TMappingCreateFlags flags=EMappingCreateDefault;
sl@0
   504
sl@0
   505
	FlagSet(flags, EMappingCreateReserveAllResources);
sl@0
   506
sl@0
   507
	// Open a reference to aProcess's os so it isn't freed and reused while
sl@0
   508
	// we're creating this mapping.
sl@0
   509
	TInt osAsid = aProcess->TryOpenOsAsid();
sl@0
   510
	if (osAsid < 0)
sl@0
   511
		{// The process has freed its os asid so can't create a new mapping.
sl@0
   512
		return KErrDied;
sl@0
   513
		}
sl@0
   514
sl@0
   515
	DMemoryMapping* mapping = NULL;
sl@0
   516
	DShBufMapping* m = NULL;
sl@0
   517
	TInt r = MM::MappingNew(mapping, MM::RoundToPageCount(iBufGap), osAsid, flags);
sl@0
   518
sl@0
   519
	if (r == KErrNone)
sl@0
   520
		{
sl@0
   521
		m = new DShBufMapping;
sl@0
   522
sl@0
   523
		if (m)
sl@0
   524
			{
sl@0
   525
			m->iMapping = mapping;
sl@0
   526
			m->iOsAsid = osAsid;
sl@0
   527
			}
sl@0
   528
		else
sl@0
   529
			{
sl@0
   530
			MM::MappingDestroy(mapping);
sl@0
   531
			r = KErrNoMemory;
sl@0
   532
			}
sl@0
   533
		}
sl@0
   534
sl@0
   535
	// Close the reference on the os asid as while aMapping is valid then the 
sl@0
   536
	// os asid must be also.
sl@0
   537
	aProcess->CloseOsAsid();
sl@0
   538
sl@0
   539
	aMapping = m;
sl@0
   540
	__KTRACE_OPT(KMMU2, Kern::Printf("DMemModelAlignedShPool::MappingNew returns 0x%08x,%d",aMapping,r));
sl@0
   541
	return r;
sl@0
   542
	}
sl@0
   543
sl@0
   544
TInt DMemModelAlignedShPool::AddToProcess(DProcess* aProcess, TUint aAttr)
sl@0
   545
	{
sl@0
   546
	__KTRACE_OPT(KMMU,Kern::Printf("Adding DMemModelAlignedShPool %O to process %O",this,aProcess));
sl@0
   547
	TInt r = KErrNone;
sl@0
   548
sl@0
   549
	Kern::MutexWait(*iProcessLock);
sl@0
   550
sl@0
   551
	LockPool();
sl@0
   552
	DShPoolClient* client = reinterpret_cast<DShPoolClient*>(iClientMap->Find(reinterpret_cast<TUint>(aProcess)));
sl@0
   553
	UnlockPool();
sl@0
   554
sl@0
   555
	if (!client)
sl@0
   556
		{
sl@0
   557
		client = new DMemModelAlignedShPoolClient;
sl@0
   558
		if (client)
sl@0
   559
			{
sl@0
   560
			client->iFlags = aAttr;
sl@0
   561
			r = iClientMap->Add(reinterpret_cast<TUint>(aProcess), client);
sl@0
   562
sl@0
   563
			if (r == KErrNone)
sl@0
   564
				{
sl@0
   565
				if (aProcess != K::TheKernelProcess)
sl@0
   566
					{
sl@0
   567
					r = aProcess->iHandles.Reserve(iTotalBuffers);
sl@0
   568
sl@0
   569
					if (r != KErrNone)
sl@0
   570
						{
sl@0
   571
						iClientMap->Remove(reinterpret_cast<TUint>(aProcess));
sl@0
   572
						}
sl@0
   573
					}
sl@0
   574
				}
sl@0
   575
			if (r != KErrNone)
sl@0
   576
				{
sl@0
   577
				delete client;
sl@0
   578
				r = KErrNoMemory;
sl@0
   579
				}
sl@0
   580
			}
sl@0
   581
		else
sl@0
   582
			{
sl@0
   583
			r = KErrNoMemory;
sl@0
   584
			}
sl@0
   585
		}
sl@0
   586
	else
sl@0
   587
		{
sl@0
   588
		LockPool();
sl@0
   589
		client->iAccessCount++;
sl@0
   590
		UnlockPool();
sl@0
   591
		}
sl@0
   592
sl@0
   593
	Kern::MutexSignal(*iProcessLock);
sl@0
   594
sl@0
   595
	return r;
sl@0
   596
	}
sl@0
   597
sl@0
   598
DMemModelAlignedShPool::DMemModelAlignedShPool() :	DMemModelShPool()
sl@0
   599
sl@0
   600
	{
sl@0
   601
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::DMemModelAlignedShPool"));
sl@0
   602
	}
sl@0
   603
sl@0
   604
void DMemModelAlignedShPool::Free(DShBuf* aBuf)
sl@0
   605
	{
sl@0
   606
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::Free (aBuf = 0x%08x)", aBuf));
sl@0
   607
sl@0
   608
	LockPool();
sl@0
   609
#ifdef _DEBUG
sl@0
   610
	// Remove from allocated list
sl@0
   611
	aBuf->iObjLink.Deque();
sl@0
   612
#endif
sl@0
   613
sl@0
   614
	DMemModelAlignedShBuf* buf = reinterpret_cast<DMemModelAlignedShBuf*>(aBuf);
sl@0
   615
sl@0
   616
	if (MM::MemoryIsNotMapped(buf->iMemoryObject))
sl@0
   617
		{
sl@0
   618
		UnlockPool(); // have to release fast mutex
sl@0
   619
		MM::MemoryWipe(buf->iMemoryObject);
sl@0
   620
		LockPool();
sl@0
   621
sl@0
   622
		// we want to put the initial buffers at the head of the free list
sl@0
   623
		// and the grown buffers at the tail as this makes shrinking more efficient
sl@0
   624
		if (aBuf >= iInitialBuffersArray && aBuf < (iInitialBuffersArray + iInitialBuffers))
sl@0
   625
			{
sl@0
   626
			iFreeList.AddHead(&aBuf->iObjLink);
sl@0
   627
			}
sl@0
   628
		else
sl@0
   629
			{
sl@0
   630
			iFreeList.Add(&aBuf->iObjLink);
sl@0
   631
			}
sl@0
   632
		++iFreeBuffers;
sl@0
   633
#ifdef _DEBUG
sl@0
   634
		--iAllocatedBuffers;
sl@0
   635
#endif
sl@0
   636
		}
sl@0
   637
	else
sl@0
   638
		{
sl@0
   639
		iPendingList.Add(&aBuf->iObjLink);
sl@0
   640
		}
sl@0
   641
sl@0
   642
	iPoolFlags &= ~EShPoolSuppressShrink;		// Allow shrinking again, if it was blocked
sl@0
   643
	UnlockPool();
sl@0
   644
sl@0
   645
	// queue ManagementDfc which completes notifications as appropriate
sl@0
   646
	if (HaveWorkToDo())
sl@0
   647
		KickManagementDfc();
sl@0
   648
sl@0
   649
	DShPool::Close(NULL); // decrement pool reference count
sl@0
   650
	}
sl@0
   651
sl@0
   652
TInt DMemModelAlignedShPool::UpdateFreeList()
sl@0
   653
	{
sl@0
   654
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::UpdateFreeList"));
sl@0
   655
sl@0
   656
	LockPool();
sl@0
   657
	SDblQueLink* pLink = iPendingList.First();
sl@0
   658
	UnlockPool();
sl@0
   659
sl@0
   660
	SDblQueLink* anchor = &iPendingList.iA;
sl@0
   661
sl@0
   662
	while (pLink != anchor)
sl@0
   663
		{
sl@0
   664
		DMemModelAlignedShBuf* buf = _LOFF(pLink, DMemModelAlignedShBuf, iObjLink);
sl@0
   665
		LockPool();
sl@0
   666
		pLink = pLink->iNext;
sl@0
   667
		UnlockPool();
sl@0
   668
sl@0
   669
		if (MM::MemoryIsNotMapped(buf->iMemoryObject))
sl@0
   670
			{
sl@0
   671
			LockPool();
sl@0
   672
			buf->iObjLink.Deque();
sl@0
   673
			UnlockPool();
sl@0
   674
sl@0
   675
			MM::MemoryWipe(buf->iMemoryObject);
sl@0
   676
sl@0
   677
			LockPool();
sl@0
   678
			if (buf >= iInitialBuffersArray && buf < (iInitialBuffersArray + iInitialBuffers))
sl@0
   679
				{
sl@0
   680
				iFreeList.AddHead(&buf->iObjLink);
sl@0
   681
				}
sl@0
   682
			else
sl@0
   683
				{
sl@0
   684
				iFreeList.Add(&buf->iObjLink);
sl@0
   685
				}
sl@0
   686
			++iFreeBuffers;
sl@0
   687
#ifdef _DEBUG
sl@0
   688
			--iAllocatedBuffers;
sl@0
   689
#endif
sl@0
   690
			UnlockPool();
sl@0
   691
			}
sl@0
   692
		}
sl@0
   693
sl@0
   694
	__KTRACE_OPT(KMMU, Kern::Printf("<DMemModelAlignedShPool::UpdateFreeList"));
sl@0
   695
	return KErrNone;
sl@0
   696
	}
sl@0
   697
sl@0
   698
DMemModelAlignedShPool::~DMemModelAlignedShPool()
sl@0
   699
	{
sl@0
   700
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::~DMemModelAlignedShPool"));
sl@0
   701
	}
sl@0
   702
sl@0
   703
TInt DMemModelAlignedShPool::DoCreate(TShPoolCreateInfo& aInfo)
sl@0
   704
	{
sl@0
   705
sl@0
   706
	TUint64 maxSize64 = static_cast<TUint64>(aInfo.iInfo.iMaxBufs) * static_cast<TUint64>(iBufGap);
sl@0
   707
sl@0
   708
	if (maxSize64 > static_cast<TUint64>(KMaxTInt) || maxSize64 <= static_cast<TUint64>(0))
sl@0
   709
		return KErrArgument;
sl@0
   710
sl@0
   711
	iMaxPages = MM::RoundToPageCount(static_cast<TInt>(maxSize64));
sl@0
   712
sl@0
   713
	return KErrNone;
sl@0
   714
	}
sl@0
   715
sl@0
   716
TInt DMemModelAlignedShPool::DestroyAllMappingsAndReservedHandles(DProcess* aProcess)
sl@0
   717
	{
sl@0
   718
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::DestroyAllMappingsAndReservedHandles(0x%08x)", aProcess));
sl@0
   719
sl@0
   720
	TInt r = KErrNone;
sl@0
   721
	Kern::MutexWait(*iProcessLock);
sl@0
   722
	DMemModelAlignedShPoolClient* client = reinterpret_cast<DMemModelAlignedShPoolClient*>(iClientMap->Remove(reinterpret_cast<TUint>(aProcess)));
sl@0
   723
sl@0
   724
	__NK_ASSERT_DEBUG(client);
sl@0
   725
	__NK_ASSERT_DEBUG(client->iAccessCount == 0);
sl@0
   726
sl@0
   727
	DestroyMappings(client, KMaxTInt);
sl@0
   728
	delete client;
sl@0
   729
sl@0
   730
	if (aProcess != K::TheKernelProcess)
sl@0
   731
		{
sl@0
   732
		// Remove reserved handles
sl@0
   733
		r = aProcess->iHandles.Reserve(-iTotalBuffers);
sl@0
   734
		}
sl@0
   735
sl@0
   736
	Kern::MutexSignal(*iProcessLock);
sl@0
   737
sl@0
   738
	__KTRACE_OPT(KMMU, Kern::Printf("<DMemModelAlignedShPool::DestroyAllMappingsAndReservedHandles(0x%08x)", aProcess));
sl@0
   739
sl@0
   740
	return r;
sl@0
   741
	}
sl@0
   742
sl@0
   743
TInt DMemModelAlignedShPool::DestroyMappings(DMemModelAlignedShPoolClient* aClient, TInt aNoOfMappings)
sl@0
   744
	{
sl@0
   745
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::DestroyMappings(0x%08x)", aClient));
sl@0
   746
sl@0
   747
	TInt r = KErrNone;
sl@0
   748
	TInt i = 0;
sl@0
   749
sl@0
   750
	DShBufMapping* m = NULL;
sl@0
   751
	SDblQueLink* pLink = NULL;
sl@0
   752
sl@0
   753
	while (i < aNoOfMappings && !aClient->iMappingFreeList.IsEmpty())
sl@0
   754
		{
sl@0
   755
		LockPool();
sl@0
   756
		pLink = aClient->iMappingFreeList.GetFirst();
sl@0
   757
		UnlockPool();
sl@0
   758
sl@0
   759
		if (pLink == NULL)
sl@0
   760
			break;
sl@0
   761
sl@0
   762
		m = _LOFF(pLink, DShBufMapping, iObjLink);
sl@0
   763
		__KTRACE_OPT(KMMU2, Kern::Printf("DMemModelAlignedShPool::DestroyMappings delete 0x%08x",m));
sl@0
   764
		MM::MappingClose(m->iMapping);
sl@0
   765
		delete m;
sl@0
   766
		++i;
sl@0
   767
		}
sl@0
   768
sl@0
   769
	__KTRACE_OPT(KMMU, Kern::Printf("<DMemModelAlignedShPool::DestroyMappings"));
sl@0
   770
sl@0
   771
	return r;
sl@0
   772
	}
sl@0
   773
sl@0
   774
sl@0
   775
TInt DMemModelAlignedShPool::CreateMappings(DMemModelAlignedShPoolClient* aClient, TInt aNoOfMappings, DMemModelProcess* aProcess)
sl@0
   776
	{
sl@0
   777
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::CreateMappings"));
sl@0
   778
sl@0
   779
	__ASSERT_MUTEX(iProcessLock);
sl@0
   780
sl@0
   781
	TInt r = KErrNone;
sl@0
   782
sl@0
   783
	for (TInt i = 0; i < aNoOfMappings; ++i)
sl@0
   784
		{
sl@0
   785
		DShBufMapping* mapping;
sl@0
   786
		r = MappingNew(mapping, aProcess);
sl@0
   787
		if (r == KErrNone)
sl@0
   788
			{
sl@0
   789
			LockPool();
sl@0
   790
			aClient->iMappingFreeList.AddHead(&mapping->iObjLink);
sl@0
   791
			UnlockPool();
sl@0
   792
			}
sl@0
   793
		else
sl@0
   794
			{
sl@0
   795
			r = KErrNoMemory;
sl@0
   796
			break;
sl@0
   797
			}
sl@0
   798
		}
sl@0
   799
sl@0
   800
	return r;
sl@0
   801
	}
sl@0
   802
sl@0
   803
TInt DMemModelAlignedShPool::UpdateMappingsAndReservedHandles(TInt aNoOfBuffers)
sl@0
   804
	{
sl@0
   805
	__KTRACE_OPT(KMMU2, Kern::Printf(">DMemModelAlignedShPool::UpdateMappingsAndReservedHandles(0x%08x)", aNoOfBuffers));
sl@0
   806
sl@0
   807
	SMap::TIterator iter(*iClientMap);
sl@0
   808
	SMap::TEntry* entry;
sl@0
   809
	SMap::TEntry* lastEntry = NULL;
sl@0
   810
	DMemModelProcess* pP;
sl@0
   811
	DMemModelAlignedShPoolClient* client;
sl@0
   812
	TInt result = KErrNone;
sl@0
   813
sl@0
   814
	Kern::MutexWait(*iProcessLock);
sl@0
   815
sl@0
   816
	// First handle the case of increasing allocation
sl@0
   817
	if (aNoOfBuffers > 0)
sl@0
   818
		while ((entry = iter.Next()) != lastEntry)
sl@0
   819
			{
sl@0
   820
			// Try to update handle reservation; skip if process is null or has gone away
sl@0
   821
			client = (DMemModelAlignedShPoolClient*)(entry->iObj);
sl@0
   822
			pP = (DMemModelProcess*)(entry->iKey);
sl@0
   823
			if (!pP)
sl@0
   824
				continue;
sl@0
   825
			TInt r = pP->iHandles.Reserve(aNoOfBuffers);
sl@0
   826
			if (r)
sl@0
   827
				__KTRACE_OPT(KMMU2, Kern::Printf("?DMemModelAlignedShPool::UpdateMappingsAndReservedHandles(0x%08x) Reserve failed %d", aNoOfBuffers, r));
sl@0
   828
			if (r == KErrDied)
sl@0
   829
				continue;
sl@0
   830
sl@0
   831
			if (r == KErrNone && client->iWindowSize <= 0)
sl@0
   832
				{
sl@0
   833
				// A positive window size means the number of mappings is fixed, so we don't need to reserve more.
sl@0
   834
				// But here zero or negative means a variable number, so we need to create extra mappings now.
sl@0
   835
				r = CreateMappings(client, aNoOfBuffers, pP);
sl@0
   836
				if (r != KErrNone)
sl@0
   837
					{
sl@0
   838
					__KTRACE_OPT(KMMU2, Kern::Printf("?DMemModelAlignedShPool::UpdateMappingsAndReservedHandles(0x%08x) CreateMappings failed %d", aNoOfBuffers, r));
sl@0
   839
					pP->iHandles.Reserve(-aNoOfBuffers); // Creation failed, so release the handles reserved above
sl@0
   840
					}
sl@0
   841
				}
sl@0
   842
sl@0
   843
			if (r != KErrNone)
sl@0
   844
				{
sl@0
   845
				// Some problem; cleanup as best we can by falling into the loop below to undo what we've done
sl@0
   846
				result = r;
sl@0
   847
				iter.Reset();
sl@0
   848
				lastEntry = entry;
sl@0
   849
				aNoOfBuffers = -aNoOfBuffers;
sl@0
   850
				break;
sl@0
   851
				}
sl@0
   852
			}
sl@0
   853
sl@0
   854
	// Now handle the case of decreasing allocation; also used for recovery from errors, in which case
sl@0
   855
	// this loop iterates only over the elements that were *successfully* processed by the loop above
sl@0
   856
	if (aNoOfBuffers < 0)
sl@0
   857
		while ((entry = iter.Next()) != lastEntry)
sl@0
   858
			{
sl@0
   859
			// Try to update handle reservation; skip if process is null or has gone away
sl@0
   860
			client = (DMemModelAlignedShPoolClient*)(entry->iObj);
sl@0
   861
			pP = (DMemModelProcess*)(entry->iKey);
sl@0
   862
			if (!pP)
sl@0
   863
				continue;
sl@0
   864
			TInt r = pP->iHandles.Reserve(aNoOfBuffers);
sl@0
   865
			if (r == KErrDied)
sl@0
   866
				continue;
sl@0
   867
sl@0
   868
			if (r == KErrNone && client->iWindowSize <= 0)
sl@0
   869
				r = DestroyMappings(client, -aNoOfBuffers);
sl@0
   870
			// De-allocation by Reserve(-n) and/or DestroyMappings() should never fail
sl@0
   871
			if (r != KErrNone)
sl@0
   872
				Kern::PanicCurrentThread(KLitDMemModelAlignedShPool, r);
sl@0
   873
			}
sl@0
   874
sl@0
   875
	Kern::MutexSignal(*iProcessLock);
sl@0
   876
sl@0
   877
	__KTRACE_OPT(KMMU2, Kern::Printf("<DMemModelAlignedShPool::UpdateMappingsAndReservedHandles(0x%08x) returning %d", aNoOfBuffers, result));
sl@0
   878
	return result;
sl@0
   879
	}
sl@0
   880
sl@0
   881
TInt DMemModelAlignedShPool::DeleteInitialBuffers()
sl@0
   882
	{
sl@0
   883
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::DeleteInitialBuffers"));
sl@0
   884
sl@0
   885
	if (iInitialBuffersArray != NULL)
sl@0
   886
		{
sl@0
   887
		for (TUint i = 0; i < iInitialBuffers; i++)
sl@0
   888
			{
sl@0
   889
			iInitialBuffersArray[i].iObjLink.Deque(); // remove from free list
sl@0
   890
			iInitialBuffersArray[i].Dec();
sl@0
   891
			iInitialBuffersArray[i].~DMemModelAlignedShBuf();
sl@0
   892
			}
sl@0
   893
		}
sl@0
   894
sl@0
   895
	Kern::Free(iInitialBuffersArray);
sl@0
   896
	iInitialBuffersArray = NULL;
sl@0
   897
sl@0
   898
	return KErrNone;
sl@0
   899
	}
sl@0
   900
sl@0
   901
TInt DMemModelAlignedShPool::Close(TAny* aPtr)
sl@0
   902
	{
sl@0
   903
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::Close(0x%08x)", aPtr));
sl@0
   904
sl@0
   905
	if (aPtr)
sl@0
   906
		{
sl@0
   907
		DProcess* pP = reinterpret_cast<DProcess*>(aPtr);
sl@0
   908
sl@0
   909
		CloseClient(pP);
sl@0
   910
		}
sl@0
   911
	__KTRACE_OPT(KMMU, Kern::Printf("<DMemModelAlignedShPool::Close(0x%08x)", aPtr));
sl@0
   912
	return DShPool::Close(aPtr);
sl@0
   913
	}
sl@0
   914
sl@0
   915
TInt DMemModelAlignedShPool::CreateInitialBuffers()
sl@0
   916
	{
sl@0
   917
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::CreateInitialBuffers"));
sl@0
   918
sl@0
   919
	iInitialBuffersArray = reinterpret_cast<DMemModelAlignedShBuf*>(Kern::Alloc(iInitialBuffers * sizeof(DMemModelAlignedShBuf)));
sl@0
   920
sl@0
   921
	if (iInitialBuffersArray == NULL)
sl@0
   922
		return KErrNoMemory;
sl@0
   923
sl@0
   924
	for (TUint i = 0; i < iInitialBuffers; i++)
sl@0
   925
		{
sl@0
   926
		// always use kernel linear address in DShBuf
sl@0
   927
		DMemModelAlignedShBuf *buf = new (&iInitialBuffersArray[i]) DMemModelAlignedShBuf(this);
sl@0
   928
		TInt r = buf->Construct();
sl@0
   929
sl@0
   930
		if (r == KErrNone)
sl@0
   931
			{
sl@0
   932
			iFreeList.Add(&buf->iObjLink);
sl@0
   933
			}
sl@0
   934
		else
sl@0
   935
			{
sl@0
   936
			iInitialBuffers = i;
sl@0
   937
			return KErrNoMemory;
sl@0
   938
			}
sl@0
   939
		}
sl@0
   940
sl@0
   941
	iFreeBuffers  = iInitialBuffers;
sl@0
   942
	iTotalBuffers = iInitialBuffers;
sl@0
   943
	return KErrNone;
sl@0
   944
	}
sl@0
   945
sl@0
   946
sl@0
   947
TInt DMemModelAlignedShPool::GrowPool()
sl@0
   948
	{
sl@0
   949
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::GrowPool()"));
sl@0
   950
	TInt r = KErrNone;
sl@0
   951
	SDblQue temp;
sl@0
   952
sl@0
   953
	Kern::MutexWait(*iProcessLock);
sl@0
   954
sl@0
   955
	TUint32 headroom = iMaxBuffers - iTotalBuffers;
sl@0
   956
sl@0
   957
	// How many buffers to grow by?
sl@0
   958
	TUint32 grow = mult_fx248(iTotalBuffers, iGrowByRatio);
sl@0
   959
	if (grow == 0)			// Handle round-to-zero
sl@0
   960
		grow = 1;
sl@0
   961
	if (grow > headroom)
sl@0
   962
		grow = headroom;
sl@0
   963
sl@0
   964
	TUint i;
sl@0
   965
	for (i = 0; i < grow; ++i)
sl@0
   966
		{
sl@0
   967
		DMemModelAlignedShBuf *buf = new DMemModelAlignedShBuf(this);
sl@0
   968
sl@0
   969
		if (buf == NULL)
sl@0
   970
			{
sl@0
   971
			r = KErrNoMemory;
sl@0
   972
			break;
sl@0
   973
			}
sl@0
   974
sl@0
   975
		TInt r = buf->Construct();
sl@0
   976
sl@0
   977
		if (r != KErrNone)
sl@0
   978
			{
sl@0
   979
			buf->DObject::Close(NULL);
sl@0
   980
			break;
sl@0
   981
			}
sl@0
   982
sl@0
   983
		temp.Add(&buf->iObjLink);
sl@0
   984
		}
sl@0
   985
sl@0
   986
	r = UpdateMappingsAndReservedHandles(i);
sl@0
   987
sl@0
   988
	if (r == KErrNone)
sl@0
   989
		{
sl@0
   990
		LockPool();
sl@0
   991
		iFreeList.MoveFrom(&temp);
sl@0
   992
		iFreeBuffers += i;
sl@0
   993
		iTotalBuffers += i;
sl@0
   994
		UnlockPool();
sl@0
   995
		}
sl@0
   996
	else
sl@0
   997
		{
sl@0
   998
		// couldn't create either the mappings or reserve handles so have no choice but to
sl@0
   999
		// delete the buffers
sl@0
  1000
		SDblQueLink *pLink;
sl@0
  1001
		while ((pLink = temp.GetFirst()) != NULL)
sl@0
  1002
			{
sl@0
  1003
			DShBuf* buf = _LOFF(pLink, DShBuf, iObjLink);
sl@0
  1004
			buf->DObject::Close(NULL);
sl@0
  1005
			}
sl@0
  1006
		}
sl@0
  1007
sl@0
  1008
	CalculateGrowShrinkTriggers();
sl@0
  1009
sl@0
  1010
	Kern::MutexSignal(*iProcessLock);
sl@0
  1011
sl@0
  1012
	__KTRACE_OPT(KMMU, Kern::Printf("<DMemModelAlignedShPool::GrowPool()"));
sl@0
  1013
	return r;
sl@0
  1014
	}
sl@0
  1015
sl@0
  1016
TInt DMemModelAlignedShPool::ShrinkPool()
sl@0
  1017
	{
sl@0
  1018
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::ShrinkPool()"))
sl@0
  1019
sl@0
  1020
	Kern::MutexWait(*iProcessLock);
sl@0
  1021
sl@0
  1022
	TUint32 grownBy = iTotalBuffers - iInitialBuffers;
sl@0
  1023
sl@0
  1024
	// How many buffers to shrink by?
sl@0
  1025
	TUint32 shrink = mult_fx248(iTotalBuffers, iShrinkByRatio);
sl@0
  1026
	if (shrink == 0)		// Handle round-to-zero
sl@0
  1027
		shrink = 1;
sl@0
  1028
	if (shrink > grownBy)
sl@0
  1029
		shrink = grownBy;
sl@0
  1030
	if (shrink > iFreeBuffers)
sl@0
  1031
		shrink = iFreeBuffers;
sl@0
  1032
sl@0
  1033
	// work backwards as the grown buffers should be at the back
sl@0
  1034
	TUint i;
sl@0
  1035
	for (i = 0; i < shrink; i++)
sl@0
  1036
		{
sl@0
  1037
		LockPool();
sl@0
  1038
sl@0
  1039
		if (iFreeList.IsEmpty())
sl@0
  1040
			{
sl@0
  1041
			UnlockPool();
sl@0
  1042
			break;
sl@0
  1043
			}
sl@0
  1044
sl@0
  1045
		DShBuf* buf = _LOFF(iFreeList.Last(), DShBuf, iObjLink);
sl@0
  1046
sl@0
  1047
		// can't delete initial buffers
sl@0
  1048
		if (buf >= iInitialBuffersArray && buf < (iInitialBuffersArray + iInitialBuffers))
sl@0
  1049
			{
sl@0
  1050
			UnlockPool();
sl@0
  1051
			break;
sl@0
  1052
			}
sl@0
  1053
sl@0
  1054
		buf->iObjLink.Deque();
sl@0
  1055
		--iFreeBuffers;
sl@0
  1056
		--iTotalBuffers;
sl@0
  1057
		UnlockPool();
sl@0
  1058
		buf->DObject::Close(NULL);
sl@0
  1059
		}
sl@0
  1060
sl@0
  1061
	TInt r = UpdateMappingsAndReservedHandles(-i);
sl@0
  1062
sl@0
  1063
	// If we couldn't shrink the pool by this many buffers, wait until we Free() another
sl@0
  1064
	// buffer before trying to shrink again.
sl@0
  1065
	if (i < shrink)
sl@0
  1066
		iPoolFlags |= EShPoolSuppressShrink;
sl@0
  1067
sl@0
  1068
	CalculateGrowShrinkTriggers();
sl@0
  1069
sl@0
  1070
	Kern::MutexSignal(*iProcessLock);
sl@0
  1071
sl@0
  1072
	__KTRACE_OPT(KMMU, Kern::Printf("<DMemModelAlignedShPool::ShrinkPool()"));
sl@0
  1073
	return r;
sl@0
  1074
	}
sl@0
  1075
sl@0
  1076
// Kernel side API
sl@0
  1077
TInt DMemModelAlignedShPool::Alloc(DShBuf*& aShBuf)
sl@0
  1078
	{
sl@0
  1079
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::Alloc (DShBuf)"));
sl@0
  1080
sl@0
  1081
	TInt r = KErrNoMemory;
sl@0
  1082
	aShBuf = NULL;
sl@0
  1083
sl@0
  1084
	LockPool();
sl@0
  1085
sl@0
  1086
	if (!iFreeList.IsEmpty())
sl@0
  1087
		{
sl@0
  1088
		aShBuf = _LOFF(iFreeList.GetFirst(), DShBuf, iObjLink);
sl@0
  1089
sl@0
  1090
#ifdef _DEBUG
sl@0
  1091
		iAllocated.Add(&aShBuf->iObjLink);
sl@0
  1092
		iAllocatedBuffers++;
sl@0
  1093
#endif
sl@0
  1094
		--iFreeBuffers;
sl@0
  1095
		Open(); // increment pool reference count
sl@0
  1096
		r = KErrNone;
sl@0
  1097
		}
sl@0
  1098
sl@0
  1099
	UnlockPool();
sl@0
  1100
sl@0
  1101
	if (HaveWorkToDo())
sl@0
  1102
		KickManagementDfc();
sl@0
  1103
sl@0
  1104
	__KTRACE_OPT(KMMU, Kern::Printf("<DMemModelAlignedShPool::Alloc return buf = 0x%08x", aShBuf));
sl@0
  1105
	return r;
sl@0
  1106
	}
sl@0
  1107
sl@0
  1108
DMemModelNonAlignedShBuf::DMemModelNonAlignedShBuf(DShPool* aPool, TLinAddr aRelAddr) : DShBuf(aPool, aRelAddr)
sl@0
  1109
	{
sl@0
  1110
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShBuf::DMemModelNonAlignedShBuf()"));
sl@0
  1111
	}
sl@0
  1112
sl@0
  1113
DMemModelNonAlignedShBuf::~DMemModelNonAlignedShBuf()
sl@0
  1114
	{
sl@0
  1115
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShBuf::~DMemModelNonAlignedShBuf()"));
sl@0
  1116
	}
sl@0
  1117
sl@0
  1118
TInt DMemModelNonAlignedShBuf::Close(TAny* aPtr)
sl@0
  1119
	{
sl@0
  1120
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShBuf::Close(0x%08x)", aPtr));
sl@0
  1121
sl@0
  1122
	if (aPtr)
sl@0
  1123
		{
sl@0
  1124
		DProcess* pP = reinterpret_cast<DProcess*>(aPtr);
sl@0
  1125
sl@0
  1126
		// there no per buffer resources for kernel clients for non-aligned buffers
sl@0
  1127
		if (pP != K::TheKernelProcess)
sl@0
  1128
		    iPool->CloseClient(pP);
sl@0
  1129
		}
sl@0
  1130
sl@0
  1131
	return DShBuf::Close(aPtr);
sl@0
  1132
	}
sl@0
  1133
sl@0
  1134
TInt DMemModelNonAlignedShBuf::AddToProcess(DProcess* aProcess, TUint /* aAttr */)
sl@0
  1135
	{
sl@0
  1136
	__KTRACE_OPT(KMMU, Kern::Printf("Adding DMemModelShBuf %O to process %O", this, aProcess));
sl@0
  1137
	TUint flags;
sl@0
  1138
sl@0
  1139
	return iPool->OpenClient(aProcess, flags);
sl@0
  1140
	}
sl@0
  1141
sl@0
  1142
sl@0
  1143
TUint8* DMemModelNonAlignedShBuf::Base(DProcess* aProcess)
sl@0
  1144
	{
sl@0
  1145
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShBuf::Base(0x%x)", aProcess));
sl@0
  1146
sl@0
  1147
	TUint8* base = reinterpret_cast<DMemModelNonAlignedShPool*>(iPool)->Base(aProcess) + (TUint)iRelAddress;
sl@0
  1148
sl@0
  1149
	return base;
sl@0
  1150
	}
sl@0
  1151
sl@0
  1152
TUint8* DMemModelNonAlignedShBuf::Base()
sl@0
  1153
	{
sl@0
  1154
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShBuf::Base()"));
sl@0
  1155
sl@0
  1156
	TUint8* base = reinterpret_cast<DMemModelNonAlignedShPool*>(iPool)->Base();
sl@0
  1157
sl@0
  1158
	return base ? base + iRelAddress : NULL;
sl@0
  1159
	}
sl@0
  1160
sl@0
  1161
TInt DMemModelNonAlignedShBuf::Map(TUint /* aMapAttr */, DProcess* /* aProcess */, TLinAddr& /* aBase */)
sl@0
  1162
	{
sl@0
  1163
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShBuf::Map()"));
sl@0
  1164
sl@0
  1165
	return KErrNotSupported;
sl@0
  1166
	}
sl@0
  1167
sl@0
  1168
TInt DMemModelNonAlignedShBuf::UnMap(DProcess* /* aProcess */)
sl@0
  1169
	{
sl@0
  1170
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShBuf::UnMap()"));
sl@0
  1171
sl@0
  1172
	return KErrNotSupported;
sl@0
  1173
	}
sl@0
  1174
sl@0
  1175
TInt DMemModelNonAlignedShBuf::Pin(TPhysicalPinObject* aPinObject, TBool aReadOnly, TPhysAddr& aAddress, TPhysAddr* aPages, TUint32& aMapAttr, TUint& aColour)
sl@0
  1176
	{
sl@0
  1177
	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"DMemModelNonAlignedShBuf::Pin");
sl@0
  1178
sl@0
  1179
	DMemModelNonAlignedShPool* pool = reinterpret_cast<DMemModelNonAlignedShPool*>(iPool);
sl@0
  1180
sl@0
  1181
	NKern::ThreadEnterCS();
sl@0
  1182
sl@0
  1183
	TInt startPage = iRelAddress >> KPageShift;
sl@0
  1184
	TInt lastPage = MM::RoundToPageCount(iRelAddress + Size());
sl@0
  1185
sl@0
  1186
	TInt pages = lastPage - startPage;
sl@0
  1187
sl@0
  1188
	if (!pages) pages++;
sl@0
  1189
sl@0
  1190
	TInt r = MM::PinPhysicalMemory(pool->iMemoryObject, (DPhysicalPinMapping*)aPinObject,
sl@0
  1191
									startPage, pages, aReadOnly, aAddress, aPages, aMapAttr, aColour);
sl@0
  1192
sl@0
  1193
	// adjust physical address to start of the buffer
sl@0
  1194
	if (r == KErrNone)
sl@0
  1195
		{
sl@0
  1196
		aAddress += (iRelAddress - (startPage << KPageShift));
sl@0
  1197
		}
sl@0
  1198
	NKern::ThreadLeaveCS();
sl@0
  1199
	return r;
sl@0
  1200
	}
sl@0
  1201
sl@0
  1202
DMemModelNonAlignedShPool::DMemModelNonAlignedShPool() : DMemModelShPool()
sl@0
  1203
	{
sl@0
  1204
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShPool::DMemModelNonAlignedShPool"));
sl@0
  1205
	}
sl@0
  1206
sl@0
  1207
DMemModelNonAlignedShPool::~DMemModelNonAlignedShPool()
sl@0
  1208
	{
sl@0
  1209
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShPool::~DMemModelNonAlignedShPool"));
sl@0
  1210
sl@0
  1211
	MM::MemoryDestroy(iMemoryObject);
sl@0
  1212
sl@0
  1213
	delete iPagesMap;
sl@0
  1214
	delete iBufMap;
sl@0
  1215
	}
sl@0
  1216
sl@0
  1217
TInt DMemModelNonAlignedShPool::DoCreate(TShPoolCreateInfo& aInfo)
sl@0
  1218
	{
sl@0
  1219
	__KTRACE_OPT(KMMU, Kern::Printf("DMemModelNonAlignedShPool::DoCreate(%d, %d, %d)", aInfo.iInfo.iMaxBufs, iBufGap, iBufSize));
sl@0
  1220
sl@0
  1221
	TInt r;
sl@0
  1222
	TUint64 maxSize64 = static_cast<TUint64>(aInfo.iInfo.iMaxBufs) * static_cast<TUint64>(iBufGap);
sl@0
  1223
sl@0
  1224
	if (maxSize64 > static_cast<TUint64>(KMaxTInt) || maxSize64 <= static_cast<TUint64>(0))
sl@0
  1225
		return KErrArgument;
sl@0
  1226
sl@0
  1227
	TInt maxPages = MM::RoundToPageCount(static_cast<TInt>(maxSize64));
sl@0
  1228
sl@0
  1229
	iBufMap = TBitMapAllocator::New(aInfo.iInfo.iMaxBufs, (TBool)ETrue);
sl@0
  1230
	if (iBufMap == NULL)
sl@0
  1231
		return KErrNoMemory;
sl@0
  1232
sl@0
  1233
	iPagesMap = TBitMapAllocator::New(maxPages, (TBool)ETrue);
sl@0
  1234
	if (iPagesMap == NULL)
sl@0
  1235
		return KErrNoMemory;
sl@0
  1236
sl@0
  1237
	// Memory attributes
sl@0
  1238
	TMemoryAttributes attr = EMemoryAttributeStandard;
sl@0
  1239
sl@0
  1240
	// Memory type
sl@0
  1241
	TMemoryObjectType memoryType = (iPoolFlags & EShPoolPhysicalMemoryPool) ? EMemoryObjectHardware : EMemoryObjectUnpaged;
sl@0
  1242
sl@0
  1243
	// Memory flags
sl@0
  1244
	TMemoryCreateFlags memoryFlags = EMemoryCreateDefault;	// Don't leave previous contents of memory
sl@0
  1245
sl@0
  1246
	// Now create the memory object
sl@0
  1247
	r = MM::MemoryNew(iMemoryObject, memoryType, maxPages, memoryFlags, attr);
sl@0
  1248
	if (r != KErrNone)
sl@0
  1249
		return r;
sl@0
  1250
sl@0
  1251
	// Make sure we give the caller the number of buffers they were expecting
sl@0
  1252
	iCommittedPages = MM::RoundToPageCount(iInitialBuffers * iBufGap);
sl@0
  1253
sl@0
  1254
	if (iPoolFlags & EShPoolPhysicalMemoryPool)
sl@0
  1255
		{
sl@0
  1256
		__KTRACE_OPT(KMMU, Kern::Printf("DMemModelNonAlignedShPool::DoCreate(iCommittedPages = 0x%08x, aInfo.iPhysAddr.iPhysAddrList = 0x%08x )", iCommittedPages, aInfo.iPhysAddr.iPhysAddrList));
sl@0
  1257
		if (iPoolFlags & EShPoolContiguous)
sl@0
  1258
			{
sl@0
  1259
			r = MM::MemoryAddContiguous(iMemoryObject, 0, iCommittedPages, aInfo.iPhysAddr.iPhysAddr);
sl@0
  1260
			}
sl@0
  1261
		else
sl@0
  1262
			{
sl@0
  1263
			r = MM::MemoryAddPages(iMemoryObject, 0, iCommittedPages, aInfo.iPhysAddr.iPhysAddrList);
sl@0
  1264
			}
sl@0
  1265
sl@0
  1266
		iMaxPages = iCommittedPages;
sl@0
  1267
		}
sl@0
  1268
	else
sl@0
  1269
		{
sl@0
  1270
		__KTRACE_OPT(KMMU, Kern::Printf("DMemModelNonAlignedShPool::DoCreate(iCommittedPages = %d, contig = %d)", iCommittedPages, iPoolFlags & EShPoolContiguous));
sl@0
  1271
sl@0
  1272
		if (iPoolFlags & EShPoolContiguous)
sl@0
  1273
			{
sl@0
  1274
			TPhysAddr paddr;
sl@0
  1275
			r = MM::MemoryAllocContiguous(iMemoryObject, 0, iCommittedPages, 0, paddr);
sl@0
  1276
			}
sl@0
  1277
		else
sl@0
  1278
			{
sl@0
  1279
			r = MM::MemoryAlloc(iMemoryObject, 0, iCommittedPages);
sl@0
  1280
			}
sl@0
  1281
sl@0
  1282
		iMaxPages = maxPages;
sl@0
  1283
		}
sl@0
  1284
sl@0
  1285
	iPagesMap->Alloc(0, iCommittedPages);
sl@0
  1286
	
sl@0
  1287
	return r;
sl@0
  1288
	}
sl@0
  1289
sl@0
  1290
TUint8* DMemModelNonAlignedShPool::Base(DProcess* aProcess)
sl@0
  1291
	{
sl@0
  1292
	TUint8 *base = 0;
sl@0
  1293
sl@0
  1294
	LockPool();
sl@0
  1295
	DMemModelNonAlignedShPoolClient* client = reinterpret_cast<DMemModelNonAlignedShPoolClient*>(iClientMap->Find(reinterpret_cast<TUint>(aProcess)));
sl@0
  1296
sl@0
  1297
	__NK_ASSERT_DEBUG(client); // ASSERT because pool must be already opened in the clients address space
sl@0
  1298
	__NK_ASSERT_DEBUG(client->iMapping); // ASSERT because non-aligned buffers are mapped by default in user space
sl@0
  1299
sl@0
  1300
	base = reinterpret_cast<TUint8*>(MM::MappingBase(client->iMapping));
sl@0
  1301
sl@0
  1302
	UnlockPool();
sl@0
  1303
sl@0
  1304
	return base;
sl@0
  1305
	}
sl@0
  1306
sl@0
  1307
TInt DMemModelNonAlignedShPool::CreateInitialBuffers()
sl@0
  1308
	{
sl@0
  1309
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShPool::CreateInitialBuffers"));
sl@0
  1310
sl@0
  1311
	iInitialBuffersArray = reinterpret_cast<DMemModelNonAlignedShBuf*>(Kern::Alloc(iInitialBuffers * sizeof(DMemModelNonAlignedShBuf)));
sl@0
  1312
sl@0
  1313
	if (iInitialBuffersArray == NULL)
sl@0
  1314
		return KErrNoMemory;
sl@0
  1315
sl@0
  1316
	TLinAddr offset = 0;
sl@0
  1317
	for (TUint i = 0; i < iInitialBuffers; i++)
sl@0
  1318
		{
sl@0
  1319
		DMemModelNonAlignedShBuf *buf = new (&iInitialBuffersArray[i]) DMemModelNonAlignedShBuf(this, offset);
sl@0
  1320
		TInt r = buf->Construct();
sl@0
  1321
sl@0
  1322
		if (r == KErrNone)
sl@0
  1323
			{
sl@0
  1324
			iFreeList.Add(&buf->iObjLink);
sl@0
  1325
			}
sl@0
  1326
		else
sl@0
  1327
			{
sl@0
  1328
			iInitialBuffers = i;
sl@0
  1329
			return KErrNoMemory;
sl@0
  1330
			}
sl@0
  1331
sl@0
  1332
		offset += iBufGap;
sl@0
  1333
		}
sl@0
  1334
sl@0
  1335
	iFreeBuffers  = iInitialBuffers;
sl@0
  1336
	iTotalBuffers = iInitialBuffers;
sl@0
  1337
	iBufMap->Alloc(0, iInitialBuffers);
sl@0
  1338
sl@0
  1339
	return KErrNone;
sl@0
  1340
	}
sl@0
  1341
sl@0
  1342
TInt DMemModelNonAlignedShPool::AddToProcess(DProcess* aProcess, TUint aAttr)
sl@0
  1343
	{
sl@0
  1344
	// Must be in critical section so we don't leak os asid references.
sl@0
  1345
	__ASSERT_CRITICAL;
sl@0
  1346
	__KTRACE_OPT(KMMU, Kern::Printf("Adding DMemModelShPool %O to process %O", this, aProcess));
sl@0
  1347
sl@0
  1348
	DMemoryMapping* mapping = NULL;
sl@0
  1349
sl@0
  1350
	TBool write = (TBool)EFalse;
sl@0
  1351
sl@0
  1352
	// User = ETrue, ReadOnlyWrite = ETrue, Execute = EFalse
sl@0
  1353
	if (aAttr & EShPoolWriteable)
sl@0
  1354
		write = (TBool)ETrue;
sl@0
  1355
sl@0
  1356
	TMappingPermissions perm = MM::MappingPermissions(ETrue,	// user
sl@0
  1357
													  write,	// writeable
sl@0
  1358
													  EFalse);	// execute
sl@0
  1359
sl@0
  1360
	TMappingCreateFlags mappingFlags = EMappingCreateDefault;
sl@0
  1361
sl@0
  1362
	DMemModelProcess* pP = reinterpret_cast<DMemModelProcess*>(aProcess);
sl@0
  1363
sl@0
  1364
	Kern::MutexWait(*iProcessLock);
sl@0
  1365
	TInt r = KErrNone;
sl@0
  1366
sl@0
  1367
	LockPool();
sl@0
  1368
	DMemModelNonAlignedShPoolClient* client = reinterpret_cast<DMemModelNonAlignedShPoolClient*>(iClientMap->Find(reinterpret_cast<TUint>(aProcess)));
sl@0
  1369
	UnlockPool();
sl@0
  1370
sl@0
  1371
	if (!client)
sl@0
  1372
		{
sl@0
  1373
		client = new DMemModelNonAlignedShPoolClient;
sl@0
  1374
sl@0
  1375
		if (client)
sl@0
  1376
			{
sl@0
  1377
			// map non aligned pools in userside processes by default
sl@0
  1378
			if (aAttr & EShPoolAutoMapBuf || pP != K::TheKernelProcess)
sl@0
  1379
				{
sl@0
  1380
				// Open a reference on the os asid so it doesn't get freed and reused.
sl@0
  1381
				TInt osAsid = pP->TryOpenOsAsid();
sl@0
  1382
				if (osAsid < 0)
sl@0
  1383
					{// The process freed its os asid so can't create a new mapping.
sl@0
  1384
					r = KErrDied;
sl@0
  1385
					}
sl@0
  1386
				else
sl@0
  1387
					{
sl@0
  1388
					r = MM::MappingNew(mapping, iMemoryObject, perm, osAsid, mappingFlags);
sl@0
  1389
					// Close the reference as the mapping will be destroyed if the process dies.
sl@0
  1390
					pP->CloseOsAsid();
sl@0
  1391
					}
sl@0
  1392
sl@0
  1393
				if ((r == KErrNone) && (pP == K::TheKernelProcess))
sl@0
  1394
					{
sl@0
  1395
					iBaseAddress = MM::MappingBase(mapping);
sl@0
  1396
					}
sl@0
  1397
				}
sl@0
  1398
sl@0
  1399
			if (r == KErrNone)
sl@0
  1400
				{
sl@0
  1401
				client->iMapping = mapping;
sl@0
  1402
				client->iFlags = aAttr;
sl@0
  1403
				r = iClientMap->Add(reinterpret_cast<TUint>(aProcess), client);
sl@0
  1404
sl@0
  1405
				if (r == KErrNone)
sl@0
  1406
					{
sl@0
  1407
					if (pP != K::TheKernelProcess)
sl@0
  1408
						{
sl@0
  1409
						r = aProcess->iHandles.Reserve(iTotalBuffers);
sl@0
  1410
sl@0
  1411
						if (r != KErrNone)
sl@0
  1412
							{
sl@0
  1413
							iClientMap->Remove(reinterpret_cast<TUint>(aProcess));
sl@0
  1414
							}
sl@0
  1415
						}
sl@0
  1416
					}
sl@0
  1417
sl@0
  1418
				if (r != KErrNone)
sl@0
  1419
					{
sl@0
  1420
					delete client;
sl@0
  1421
					MM::MappingDestroy(mapping);
sl@0
  1422
					}
sl@0
  1423
				}
sl@0
  1424
			else
sl@0
  1425
				{
sl@0
  1426
				delete client;
sl@0
  1427
				}
sl@0
  1428
			}
sl@0
  1429
		else
sl@0
  1430
			{
sl@0
  1431
			r = KErrNoMemory;
sl@0
  1432
			}
sl@0
  1433
		}
sl@0
  1434
	else
sl@0
  1435
		{
sl@0
  1436
		LockPool();
sl@0
  1437
		client->iAccessCount++;
sl@0
  1438
		UnlockPool();
sl@0
  1439
		}
sl@0
  1440
sl@0
  1441
	Kern::MutexSignal(*iProcessLock);
sl@0
  1442
sl@0
  1443
	return r;
sl@0
  1444
	}
sl@0
  1445
sl@0
  1446
TInt DMemModelNonAlignedShPool::DeleteInitialBuffers()
sl@0
  1447
	{
sl@0
  1448
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShPool::DeleteInitialBuffers"));
sl@0
  1449
sl@0
  1450
	if (iInitialBuffersArray != NULL)
sl@0
  1451
		{
sl@0
  1452
		for (TUint i = 0; i < iInitialBuffers; i++)
sl@0
  1453
			{
sl@0
  1454
			iInitialBuffersArray[i].iObjLink.Deque(); // remove from free list
sl@0
  1455
			iInitialBuffersArray[i].Dec();
sl@0
  1456
			iInitialBuffersArray[i].~DMemModelNonAlignedShBuf();
sl@0
  1457
			}
sl@0
  1458
		}
sl@0
  1459
sl@0
  1460
	Kern::Free(iInitialBuffersArray);
sl@0
  1461
	iInitialBuffersArray = NULL;
sl@0
  1462
sl@0
  1463
	return KErrNone;
sl@0
  1464
	}
sl@0
  1465
sl@0
  1466
TInt DMemModelNonAlignedShPool::DestroyAllMappingsAndReservedHandles(DProcess* aProcess)
sl@0
  1467
	{
sl@0
  1468
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShPool::DestroyAllMappingsAndReservedHandles(0x%08x)", aProcess));
sl@0
  1469
sl@0
  1470
	TInt r = KErrNone;
sl@0
  1471
	Kern::MutexWait(*iProcessLock);
sl@0
  1472
	DMemModelNonAlignedShPoolClient* client = reinterpret_cast<DMemModelNonAlignedShPoolClient*>(iClientMap->Remove(reinterpret_cast<TUint>(aProcess)));
sl@0
  1473
sl@0
  1474
	__NK_ASSERT_DEBUG(client);
sl@0
  1475
	__NK_ASSERT_DEBUG(client->iAccessCount == 0);
sl@0
  1476
sl@0
  1477
	if (client->iMapping)
sl@0
  1478
		{
sl@0
  1479
		MM::MappingDestroy(client->iMapping);
sl@0
  1480
		}
sl@0
  1481
	delete client;
sl@0
  1482
sl@0
  1483
	if (aProcess != K::TheKernelProcess)
sl@0
  1484
		{
sl@0
  1485
		// Remove reserved handles
sl@0
  1486
		r = aProcess->iHandles.Reserve(-(iTotalBuffers));
sl@0
  1487
		}
sl@0
  1488
	else
sl@0
  1489
		{
sl@0
  1490
		iBaseAddress = 0;
sl@0
  1491
		}
sl@0
  1492
sl@0
  1493
	Kern::MutexSignal(*iProcessLock);
sl@0
  1494
sl@0
  1495
	__KTRACE_OPT(KMMU, Kern::Printf("<DMemModelNonAlignedShPool::DestroyAllMappingsAndReservedHandles(0x%08x)", aProcess));
sl@0
  1496
sl@0
  1497
	return r;
sl@0
  1498
	}
sl@0
  1499
sl@0
  1500
sl@0
  1501
TInt DMemModelNonAlignedShPool::Close(TAny* aPtr)
sl@0
  1502
	{
sl@0
  1503
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShPool::Close(0x%08x)", aPtr));
sl@0
  1504
sl@0
  1505
	if (aPtr)
sl@0
  1506
		{
sl@0
  1507
		DProcess* pP = reinterpret_cast<DProcess*>(aPtr);
sl@0
  1508
sl@0
  1509
		CloseClient(pP);
sl@0
  1510
		}
sl@0
  1511
sl@0
  1512
	return DShPool::Close(aPtr);
sl@0
  1513
	}
sl@0
  1514
sl@0
  1515
void DMemModelNonAlignedShPool::FreeBufferPages(TUint aOffset)
sl@0
  1516
	{
sl@0
  1517
	TLinAddr firstByte = aOffset;	// offset of first byte in buffer
sl@0
  1518
	TLinAddr lastByte = firstByte+iBufGap-1;	// offset of last byte in buffer
sl@0
  1519
	TUint firstPage = firstByte>>KPageShift;	// index of first page containing part of the buffer
sl@0
  1520
	TUint lastPage = lastByte>>KPageShift;		// index of last page containing part of the buffer
sl@0
  1521
sl@0
  1522
	TUint firstBuffer = (firstByte&~KPageMask)/iBufGap; // index of first buffer which lies in firstPage
sl@0
  1523
	TUint lastBuffer = (lastByte|KPageMask)/iBufGap;    // index of last buffer which lies in lastPage
sl@0
  1524
	TUint thisBuffer = firstByte/iBufGap;				// index of the buffer to be freed
sl@0
  1525
sl@0
  1526
	// Ensure lastBuffer is within bounds (there may be room in the last
sl@0
  1527
	// page for more buffers than we have allocated).
sl@0
  1528
	if (lastBuffer >= iMaxBuffers)
sl@0
  1529
		lastBuffer = iMaxBuffers-1;
sl@0
  1530
sl@0
  1531
	if(firstBuffer!=thisBuffer && iBufMap->NotFree(firstBuffer,thisBuffer-firstBuffer))
sl@0
  1532
		{
sl@0
  1533
		// first page has other allocated buffers in it,
sl@0
  1534
		// so we can't free it and must move on to next one...
sl@0
  1535
		if (firstPage >= lastPage)
sl@0
  1536
			return;
sl@0
  1537
		++firstPage;
sl@0
  1538
		}
sl@0
  1539
sl@0
  1540
	if(lastBuffer!=thisBuffer && iBufMap->NotFree(thisBuffer+1,lastBuffer-thisBuffer))
sl@0
  1541
		{
sl@0
  1542
		// last page has other allocated buffers in it,
sl@0
  1543
		// so we can't free it and must step back to previous one...
sl@0
  1544
		if (lastPage <= firstPage)
sl@0
  1545
			return;
sl@0
  1546
		--lastPage;
sl@0
  1547
		}
sl@0
  1548
sl@0
  1549
	if(firstPage<=lastPage)
sl@0
  1550
		{
sl@0
  1551
		// we can free pages firstPage trough to lastPage...
sl@0
  1552
		TUint numPages = lastPage-firstPage+1;
sl@0
  1553
		iPagesMap->SelectiveFree(firstPage,numPages);
sl@0
  1554
		MM::MemoryLock(iMemoryObject);
sl@0
  1555
		MM::MemoryFree(iMemoryObject, firstPage, numPages);
sl@0
  1556
		MM::MemoryUnlock(iMemoryObject);
sl@0
  1557
		iCommittedPages -= numPages;
sl@0
  1558
		}
sl@0
  1559
	}
sl@0
  1560
sl@0
  1561
TInt DMemModelNonAlignedShPool::GrowPool()
sl@0
  1562
	{
sl@0
  1563
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShPool::GrowPool()"));
sl@0
  1564
sl@0
  1565
	// Don't do anything with physical memory pools
sl@0
  1566
	if (iPoolFlags & EShPoolPhysicalMemoryPool)
sl@0
  1567
		return KErrNone;
sl@0
  1568
sl@0
  1569
	Kern::MutexWait(*iProcessLock);
sl@0
  1570
sl@0
  1571
	TUint32 headroom = iMaxBuffers - iTotalBuffers;
sl@0
  1572
sl@0
  1573
	// How many buffers to grow by?
sl@0
  1574
	TUint32 grow = mult_fx248(iTotalBuffers, iGrowByRatio);
sl@0
  1575
	if (grow == 0)			// Handle round-to-zero
sl@0
  1576
		grow = 1;
sl@0
  1577
	if (grow > headroom)
sl@0
  1578
		grow = headroom;
sl@0
  1579
sl@0
  1580
	TInt r = KErrNone;
sl@0
  1581
	SDblQue temp;
sl@0
  1582
sl@0
  1583
	TUint i;
sl@0
  1584
	for (i = 0; i < grow; ++i)
sl@0
  1585
		{
sl@0
  1586
		TInt offset = iBufMap->Alloc();
sl@0
  1587
sl@0
  1588
		if (offset < 0)
sl@0
  1589
			{
sl@0
  1590
			r = KErrNoMemory;
sl@0
  1591
			break;
sl@0
  1592
			}
sl@0
  1593
sl@0
  1594
		offset *= iBufGap;
sl@0
  1595
sl@0
  1596
		TInt lastPage = (offset + iBufSize - 1) >> KPageShift;
sl@0
  1597
sl@0
  1598
		// Allocate one page at a time.
sl@0
  1599
		for (TInt page = offset >> KPageShift; page <= lastPage; ++page)
sl@0
  1600
			{
sl@0
  1601
			// Is the page allocated?
sl@0
  1602
			if (iPagesMap->NotAllocated(page, 1))
sl@0
  1603
				{
sl@0
  1604
				MM::MemoryLock(iMemoryObject);
sl@0
  1605
				r = MM::MemoryAlloc(iMemoryObject, page, 1);
sl@0
  1606
				MM::MemoryUnlock(iMemoryObject);
sl@0
  1607
sl@0
  1608
				if (r != KErrNone)
sl@0
  1609
					{
sl@0
  1610
					break;
sl@0
  1611
					}
sl@0
  1612
sl@0
  1613
				++iCommittedPages;
sl@0
  1614
				iPagesMap->Alloc(page, 1);
sl@0
  1615
				}
sl@0
  1616
			}
sl@0
  1617
sl@0
  1618
		if (r != KErrNone)
sl@0
  1619
			{
sl@0
  1620
			iBufMap->Free(offset / iBufGap);
sl@0
  1621
			FreeBufferPages(offset);
sl@0
  1622
			break;
sl@0
  1623
			}
sl@0
  1624
sl@0
  1625
		DMemModelNonAlignedShBuf *buf = new DMemModelNonAlignedShBuf(this, offset);
sl@0
  1626
sl@0
  1627
		if (buf == NULL)
sl@0
  1628
			{
sl@0
  1629
			iBufMap->Free(offset / iBufGap);
sl@0
  1630
			FreeBufferPages(offset);
sl@0
  1631
			r = KErrNoMemory;
sl@0
  1632
			break;
sl@0
  1633
			}
sl@0
  1634
sl@0
  1635
		r = buf->Construct();
sl@0
  1636
sl@0
  1637
		if (r != KErrNone)
sl@0
  1638
			{
sl@0
  1639
			iBufMap->Free(offset / iBufGap);
sl@0
  1640
			FreeBufferPages(offset);
sl@0
  1641
			buf->DObject::Close(NULL);
sl@0
  1642
			break;
sl@0
  1643
			}
sl@0
  1644
sl@0
  1645
		temp.Add(&buf->iObjLink);
sl@0
  1646
		}
sl@0
  1647
sl@0
  1648
	r = UpdateReservedHandles(i);
sl@0
  1649
sl@0
  1650
	if (r == KErrNone)
sl@0
  1651
		{
sl@0
  1652
		LockPool();
sl@0
  1653
		iFreeList.MoveFrom(&temp);
sl@0
  1654
		iFreeBuffers += i;
sl@0
  1655
		iTotalBuffers += i;
sl@0
  1656
		UnlockPool();
sl@0
  1657
		}
sl@0
  1658
	else
sl@0
  1659
		{
sl@0
  1660
		// couldn't reserve handles so have no choice but to
sl@0
  1661
		// delete the buffers
sl@0
  1662
		__KTRACE_OPT(KMMU, Kern::Printf("GrowPool failed with %d, deleting buffers", r));
sl@0
  1663
		SDblQueLink *pLink;
sl@0
  1664
		while ((pLink = temp.GetFirst()) != NULL)
sl@0
  1665
			{
sl@0
  1666
			DShBuf* buf = _LOFF(pLink, DShBuf, iObjLink);
sl@0
  1667
			TLinAddr offset = buf->iRelAddress;
sl@0
  1668
			iBufMap->Free(offset / iBufGap);
sl@0
  1669
			FreeBufferPages(offset);
sl@0
  1670
			buf->DObject::Close(NULL);
sl@0
  1671
			}
sl@0
  1672
		__KTRACE_OPT(KMMU, Kern::Printf("Buffers deleted"));
sl@0
  1673
		}
sl@0
  1674
sl@0
  1675
	CalculateGrowShrinkTriggers();
sl@0
  1676
sl@0
  1677
	Kern::MutexSignal(*iProcessLock);
sl@0
  1678
sl@0
  1679
	__KTRACE_OPT(KMMU, Kern::Printf("<DMemModelNonAlignedShPool::GrowPool()"));
sl@0
  1680
	return r;
sl@0
  1681
	}
sl@0
  1682
sl@0
  1683
TInt DMemModelNonAlignedShPool::ShrinkPool()
sl@0
  1684
	{
sl@0
  1685
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShPool::ShrinkPool()"));
sl@0
  1686
sl@0
  1687
	// Don't do anything with physical memory pools
sl@0
  1688
	if (iPoolFlags & EShPoolPhysicalMemoryPool)
sl@0
  1689
		return KErrNone;
sl@0
  1690
sl@0
  1691
	Kern::MutexWait(*iProcessLock);
sl@0
  1692
sl@0
  1693
	TUint32 grownBy = iTotalBuffers - iInitialBuffers;
sl@0
  1694
sl@0
  1695
	// How many buffers to shrink by?
sl@0
  1696
	TUint32 shrink = mult_fx248(iTotalBuffers, iShrinkByRatio);
sl@0
  1697
	if (shrink == 0)		// Handle round-to-zero
sl@0
  1698
		shrink = 1;
sl@0
  1699
	if (shrink > grownBy)
sl@0
  1700
		shrink = grownBy;
sl@0
  1701
	if (shrink > iFreeBuffers)
sl@0
  1702
		shrink = iFreeBuffers;
sl@0
  1703
sl@0
  1704
	TUint i;
sl@0
  1705
	for (i = 0; i < shrink; ++i)
sl@0
  1706
		{
sl@0
  1707
		LockPool();
sl@0
  1708
sl@0
  1709
		if (iFreeList.IsEmpty())
sl@0
  1710
			{
sl@0
  1711
			UnlockPool();
sl@0
  1712
			break;
sl@0
  1713
			}
sl@0
  1714
sl@0
  1715
		// work from the back of the queue
sl@0
  1716
		SDblQueLink *pLink = iFreeList.Last();
sl@0
  1717
sl@0
  1718
		DShBuf* pBuf = _LOFF(pLink, DShBuf, iObjLink);
sl@0
  1719
sl@0
  1720
		if (pBuf >= iInitialBuffersArray && pBuf < (iInitialBuffersArray + iInitialBuffers))
sl@0
  1721
			{
sl@0
  1722
			UnlockPool();
sl@0
  1723
			break;
sl@0
  1724
			}
sl@0
  1725
sl@0
  1726
		--iFreeBuffers;
sl@0
  1727
		--iTotalBuffers;
sl@0
  1728
		pLink->Deque();
sl@0
  1729
		UnlockPool();
sl@0
  1730
sl@0
  1731
		TLinAddr offset = pBuf->iRelAddress;
sl@0
  1732
		iBufMap->Free(offset / iBufGap);
sl@0
  1733
		FreeBufferPages(offset);
sl@0
  1734
sl@0
  1735
		pBuf->DObject::Close(NULL);
sl@0
  1736
		}
sl@0
  1737
sl@0
  1738
	UpdateReservedHandles(-(TInt)i);
sl@0
  1739
sl@0
  1740
	// If we couldn't shrink the pool by this many buffers, wait until we Free() another
sl@0
  1741
	// buffer before trying to shrink again.
sl@0
  1742
	if (i < shrink)
sl@0
  1743
		iPoolFlags |= EShPoolSuppressShrink;
sl@0
  1744
sl@0
  1745
	CalculateGrowShrinkTriggers();
sl@0
  1746
sl@0
  1747
	Kern::MutexSignal(*iProcessLock);
sl@0
  1748
sl@0
  1749
	__KTRACE_OPT(KMMU, Kern::Printf("<DMemModelNonAlignedShPool::ShrinkPool()"));
sl@0
  1750
sl@0
  1751
	return KErrNone;
sl@0
  1752
	}
sl@0
  1753
sl@0
  1754
TInt DMemModelNonAlignedShPool::UpdateFreeList()
sl@0
  1755
	{
sl@0
  1756
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShPool::UpdateFreeList"));
sl@0
  1757
sl@0
  1758
	SDblQue temp;
sl@0
  1759
sl@0
  1760
	LockPool();
sl@0
  1761
	while(!iAltFreeList.IsEmpty())
sl@0
  1762
		{
sl@0
  1763
		// sort a temporary list of 'n' object with the lowest index first
sl@0
  1764
		for (TInt n = 0; n < 8 && !iAltFreeList.IsEmpty(); ++n)
sl@0
  1765
			{
sl@0
  1766
			// bit of an assumption, lets assume that the lower indexes will be allocated and freed first
sl@0
  1767
			// and therefore will be nearer the front of the list
sl@0
  1768
			DShBuf* buf = _LOFF(iAltFreeList.GetFirst(), DShBuf, iObjLink);
sl@0
  1769
sl@0
  1770
			SDblQueLink* anchor = reinterpret_cast<SDblQueLink*>(&temp);
sl@0
  1771
			SDblQueLink* pLink = temp.Last();
sl@0
  1772
sl@0
  1773
			while (ETrue)
sl@0
  1774
				{
sl@0
  1775
				// traverse the list starting at the back
sl@0
  1776
				if ((pLink != anchor) && (_LOFF(pLink, DShBuf, iObjLink)->iRelAddress > buf->iRelAddress))
sl@0
  1777
					{
sl@0
  1778
					pLink = pLink->iPrev;
sl@0
  1779
					}
sl@0
  1780
				else
sl@0
  1781
					{
sl@0
  1782
					buf->iObjLink.InsertAfter(pLink);
sl@0
  1783
					break;
sl@0
  1784
					}
sl@0
  1785
				}
sl@0
  1786
			}
sl@0
  1787
sl@0
  1788
		// now merge with the free list
sl@0
  1789
		while(!temp.IsEmpty())
sl@0
  1790
			{
sl@0
  1791
			if (iFreeList.IsEmpty())
sl@0
  1792
				{
sl@0
  1793
				iFreeList.MoveFrom(&temp);
sl@0
  1794
				break;
sl@0
  1795
				}
sl@0
  1796
sl@0
  1797
			// working backwards with the highest index
sl@0
  1798
			DShBuf* buf = _LOFF(temp.Last(), DShBuf, iObjLink);
sl@0
  1799
			SDblQueLink* anchor = reinterpret_cast<SDblQueLink*>(&iFreeList);
sl@0
  1800
			SDblQueLink* pLink = iFreeList.Last();
sl@0
  1801
sl@0
  1802
			while (!NKern::FMFlash(&iLock))
sl@0
  1803
				{
sl@0
  1804
				if ((pLink != anchor) && (_LOFF(pLink, DShBuf, iObjLink)->iRelAddress > buf->iRelAddress))
sl@0
  1805
					{
sl@0
  1806
					pLink = pLink->iPrev;
sl@0
  1807
					}
sl@0
  1808
				else
sl@0
  1809
					{
sl@0
  1810
					buf->iObjLink.Deque();
sl@0
  1811
					buf->iObjLink.InsertAfter(pLink);
sl@0
  1812
					// next buffer
sl@0
  1813
					if (temp.IsEmpty())
sl@0
  1814
						break;
sl@0
  1815
					buf = _LOFF(temp.Last(), DShBuf, iObjLink);
sl@0
  1816
					}
sl@0
  1817
				}
sl@0
  1818
			}
sl@0
  1819
		NKern::FMFlash(&iLock);
sl@0
  1820
		}
sl@0
  1821
	UnlockPool();
sl@0
  1822
sl@0
  1823
	__KTRACE_OPT(KMMU, Kern::Printf("<DMemModelNonAlignedShPool::UpdateFreeList"));
sl@0
  1824
	return KErrNone;
sl@0
  1825
	}
sl@0
  1826
sl@0
  1827
void DMemModelNonAlignedShPool::Free(DShBuf* aBuf)
sl@0
  1828
	{
sl@0
  1829
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShPool::Free (aBuf = 0x%08x, aBuf->Base() 0x%08x)", aBuf, aBuf->iRelAddress));
sl@0
  1830
sl@0
  1831
	LockPool();
sl@0
  1832
#ifdef _DEBUG
sl@0
  1833
	// Remove from allocated list
sl@0
  1834
	aBuf->iObjLink.Deque();
sl@0
  1835
#endif
sl@0
  1836
sl@0
  1837
	// we want to put the initial buffers at the head of the free list
sl@0
  1838
	// and the grown buffers at the tail as this makes shrinking more efficient
sl@0
  1839
	if (aBuf >= iInitialBuffersArray && aBuf < (iInitialBuffersArray + iInitialBuffers))
sl@0
  1840
		{
sl@0
  1841
		iFreeList.AddHead(&aBuf->iObjLink);
sl@0
  1842
		}
sl@0
  1843
	else
sl@0
  1844
		{
sl@0
  1845
		iAltFreeList.Add(&aBuf->iObjLink);
sl@0
  1846
		}
sl@0
  1847
sl@0
  1848
	++iFreeBuffers;
sl@0
  1849
#ifdef _DEBUG
sl@0
  1850
	--iAllocatedBuffers;
sl@0
  1851
#endif
sl@0
  1852
	iPoolFlags &= ~EShPoolSuppressShrink;		// Allow shrinking again, if it was blocked
sl@0
  1853
	UnlockPool();
sl@0
  1854
sl@0
  1855
	// queue ManagementDfc which completes notifications as appropriate
sl@0
  1856
	if (HaveWorkToDo())
sl@0
  1857
		KickManagementDfc();
sl@0
  1858
sl@0
  1859
	DShPool::Close(NULL); // decrement pool reference count
sl@0
  1860
	}
sl@0
  1861
sl@0
  1862
// Kernel side API
sl@0
  1863
TInt DMemModelNonAlignedShPool::Alloc(DShBuf*& aShBuf)
sl@0
  1864
	{
sl@0
  1865
	__KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShPool::Alloc (DShBuf)"));
sl@0
  1866
sl@0
  1867
	aShBuf = NULL;
sl@0
  1868
sl@0
  1869
	LockPool();
sl@0
  1870
sl@0
  1871
	if (!iFreeList.IsEmpty())
sl@0
  1872
		{
sl@0
  1873
		aShBuf = _LOFF(iFreeList.GetFirst(), DShBuf, iObjLink);
sl@0
  1874
#ifdef _DEBUG
sl@0
  1875
		iAllocated.Add(&aShBuf->iObjLink);
sl@0
  1876
		iAllocatedBuffers++;
sl@0
  1877
#endif
sl@0
  1878
		}
sl@0
  1879
	else
sl@0
  1880
		{
sl@0
  1881
		// try alternative free list
sl@0
  1882
		if (!iAltFreeList.IsEmpty())
sl@0
  1883
			{
sl@0
  1884
			aShBuf = _LOFF(iAltFreeList.GetFirst(), DShBuf, iObjLink);
sl@0
  1885
#ifdef _DEBUG
sl@0
  1886
			iAllocated.Add(&aShBuf->iObjLink);
sl@0
  1887
			iAllocatedBuffers++;
sl@0
  1888
#endif
sl@0
  1889
			}
sl@0
  1890
		else
sl@0
  1891
			{
sl@0
  1892
			UnlockPool();
sl@0
  1893
			KickManagementDfc(); // Try to grow
sl@0
  1894
			return KErrNoMemory;
sl@0
  1895
			}
sl@0
  1896
		}
sl@0
  1897
sl@0
  1898
	--iFreeBuffers;
sl@0
  1899
	Open(); // increment pool reference count
sl@0
  1900
sl@0
  1901
	UnlockPool();
sl@0
  1902
sl@0
  1903
	if (HaveWorkToDo())
sl@0
  1904
		KickManagementDfc();
sl@0
  1905
sl@0
  1906
	__KTRACE_OPT(KMMU, Kern::Printf("<DMemModelNonAlignedShPool::Alloc return buf = 0x%08x", aShBuf));
sl@0
  1907
	return KErrNone;
sl@0
  1908
	}