os/kernelhwsrv/kernel/eka/memmodel/emul/win32/mshbuf.cpp
author sl
Tue, 10 Jun 2014 14:32:02 +0200
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
sl@0
     1
// Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
// e32/memmodel/emul/win32/mshbuf.cpp
sl@0
    15
// Shareable Data Buffers
sl@0
    16
sl@0
    17
#include "memmodel.h"
sl@0
    18
#include <kernel/smap.h>
sl@0
    19
sl@0
    20
_LIT(KLitDWin32ShPool,"DWin32ShPool");
sl@0
    21
_LIT(KLitDWin32AlignedShPool,"DWin32AlignedShPool");
sl@0
    22
_LIT(KLitDWin32NonAlignedShPool,"DWin32NonAlignedShPool");
sl@0
    23
sl@0
    24
sl@0
    25
DWin32ShBuf::DWin32ShBuf(DShPool* aPool, TLinAddr aRelAddr) : DShBuf(aPool, aRelAddr)
sl@0
    26
	{
sl@0
    27
	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShBuf::DWin32ShBuf()"));
sl@0
    28
	}
sl@0
    29
sl@0
    30
DWin32ShBuf::~DWin32ShBuf()
sl@0
    31
	{
sl@0
    32
	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShBuf::~DWin32ShBuf()"));
sl@0
    33
	}
sl@0
    34
sl@0
    35
TUint8* DWin32ShBuf::Base(DProcess* aProcess)
sl@0
    36
	{
sl@0
    37
	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShBuf::Base(0x%x)", aProcess));
sl@0
    38
sl@0
    39
	TUint8* base = reinterpret_cast<DWin32ShPool*>(iPool)->Base(aProcess) + (TUint)iRelAddress;
sl@0
    40
sl@0
    41
	return base;
sl@0
    42
	}
sl@0
    43
sl@0
    44
TUint8* DWin32ShBuf::Base()
sl@0
    45
	{
sl@0
    46
	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShBuf::Base()"));
sl@0
    47
sl@0
    48
	TUint8* base = reinterpret_cast<DWin32ShPool*>(iPool)->Base() + (TUint)iRelAddress;
sl@0
    49
sl@0
    50
	return base;
sl@0
    51
	}
sl@0
    52
sl@0
    53
TInt DWin32ShBuf::Map(TUint /* aMapAttr */, DProcess* /* aProcess */, TLinAddr& aBase)
sl@0
    54
	{
sl@0
    55
	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShBuf::Map()"));
sl@0
    56
sl@0
    57
	TInt r = KErrNotSupported;
sl@0
    58
sl@0
    59
	if (iPool->iPoolFlags & EShPoolPageAlignedBuffer)
sl@0
    60
		{
sl@0
    61
		if(iMapped)
sl@0
    62
			{
sl@0
    63
			r = KErrAlreadyExists;
sl@0
    64
			}
sl@0
    65
		else
sl@0
    66
			{
sl@0
    67
			aBase = reinterpret_cast<TUint>(reinterpret_cast<DWin32ShPool*>(iPool)->Base() + (TUint)iRelAddress);
sl@0
    68
			iMapped = ETrue;
sl@0
    69
			r = KErrNone;
sl@0
    70
			}
sl@0
    71
		}
sl@0
    72
sl@0
    73
	return r;
sl@0
    74
	}
sl@0
    75
sl@0
    76
TInt DWin32ShBuf::UnMap(DProcess* /* aProcess */)
sl@0
    77
	{
sl@0
    78
	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShBuf::UnMap()"));
sl@0
    79
sl@0
    80
	TInt r = KErrNotSupported;
sl@0
    81
sl@0
    82
	if (iPool->iPoolFlags & EShPoolPageAlignedBuffer)
sl@0
    83
		{
sl@0
    84
		if(iMapped)
sl@0
    85
			{
sl@0
    86
			iMapped = EFalse;
sl@0
    87
			r = KErrNone;
sl@0
    88
			}
sl@0
    89
		else
sl@0
    90
			{
sl@0
    91
			r = KErrNotFound;
sl@0
    92
			}
sl@0
    93
		}
sl@0
    94
sl@0
    95
	return r;
sl@0
    96
	}
sl@0
    97
sl@0
    98
TInt DWin32ShBuf::AddToProcess(DProcess* aProcess, TUint /* aAttr */)
sl@0
    99
	{
sl@0
   100
	__KTRACE_OPT(KMMU, Kern::Printf("Adding DWin32ShBuf %O to process %O", this, aProcess));
sl@0
   101
	TUint flags;
sl@0
   102
	TInt r = KErrNone;
sl@0
   103
sl@0
   104
	if (aProcess != K::TheKernelProcess)
sl@0
   105
	    r = iPool->OpenClient(aProcess, flags);
sl@0
   106
sl@0
   107
	return r;
sl@0
   108
	}
sl@0
   109
sl@0
   110
TInt DWin32ShBuf::Close(TAny* aPtr)
sl@0
   111
	{
sl@0
   112
	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShBuf::Close(0x%08x)", aPtr));
sl@0
   113
sl@0
   114
	if (aPtr)
sl@0
   115
		{
sl@0
   116
		DProcess* pP = reinterpret_cast<DProcess*>(aPtr);
sl@0
   117
sl@0
   118
		if (pP != K::TheKernelProcess)
sl@0
   119
		    iPool->CloseClient(pP);
sl@0
   120
		}
sl@0
   121
sl@0
   122
	return DShBuf::Close(aPtr);
sl@0
   123
	}
sl@0
   124
sl@0
   125
DWin32ShPool::DWin32ShPool()
sl@0
   126
  : DShPool()
sl@0
   127
	{
sl@0
   128
	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShPool::DWin32ShPool"));
sl@0
   129
	}
sl@0
   130
sl@0
   131
sl@0
   132
DWin32ShPool::~DWin32ShPool()
sl@0
   133
	{
sl@0
   134
	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShPool::~DWin32ShPool"));
sl@0
   135
sl@0
   136
	if (iWin32MemoryBase)
sl@0
   137
		{
sl@0
   138
		TUint64 maxSize = static_cast<TUint64>(iMaxBuffers) * static_cast<TUint64>(iBufGap);
sl@0
   139
sl@0
   140
		// We know that maxSize is less than KMaxTInt as we tested for this in DoCreate().
sl@0
   141
		VirtualFree(LPVOID(iWin32MemoryBase), (SIZE_T)maxSize, MEM_DECOMMIT);
sl@0
   142
		VirtualFree(LPVOID(iWin32MemoryBase), 0, MEM_RELEASE);
sl@0
   143
		MM::Wait();
sl@0
   144
		MM::FreeMemory += iWin32MemorySize;
sl@0
   145
		MM::Signal();
sl@0
   146
		}
sl@0
   147
sl@0
   148
	delete iBufMap;
sl@0
   149
	}
sl@0
   150
sl@0
   151
void DWin32ShPool::DestroyClientResources(DProcess* aProcess)
sl@0
   152
	{
sl@0
   153
	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShPool::DestroyClientResources"));
sl@0
   154
sl@0
   155
	TInt r = DestroyHandles(aProcess);
sl@0
   156
	__NK_ASSERT_DEBUG((r == KErrNone) || (r == KErrDied));
sl@0
   157
	(void)r;		// Silence warnings
sl@0
   158
	}
sl@0
   159
sl@0
   160
TInt DWin32ShPool::DeleteInitialBuffers()
sl@0
   161
	{
sl@0
   162
	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShPool::DeleteInitialBuffers"));
sl@0
   163
sl@0
   164
	if (iInitialBuffersArray != NULL)
sl@0
   165
		{
sl@0
   166
		for (TUint i = 0; i < iInitialBuffers; i++)
sl@0
   167
			{
sl@0
   168
			iInitialBuffersArray[i].iObjLink.Deque(); // remove from free list
sl@0
   169
			iInitialBuffersArray[i].Dec();
sl@0
   170
			iInitialBuffersArray[i].~DWin32ShBuf();
sl@0
   171
			}
sl@0
   172
sl@0
   173
		Kern::Free(iInitialBuffersArray);
sl@0
   174
		iInitialBuffersArray = NULL;
sl@0
   175
		}
sl@0
   176
sl@0
   177
	return KErrNone;
sl@0
   178
	}
sl@0
   179
sl@0
   180
TInt DWin32ShPool::DestroyHandles(DProcess* aProcess)
sl@0
   181
	{
sl@0
   182
	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShPool::DestroyHandles(0x%08x)", aProcess));
sl@0
   183
sl@0
   184
	TInt r = KErrNone;
sl@0
   185
	Kern::MutexWait(*iProcessLock);
sl@0
   186
	DShPoolClient* client = reinterpret_cast<DShPoolClient*>(iClientMap->Remove(reinterpret_cast<TUint>(aProcess)));
sl@0
   187
sl@0
   188
	__NK_ASSERT_DEBUG(client);
sl@0
   189
	__NK_ASSERT_DEBUG(client->iAccessCount == 0);
sl@0
   190
sl@0
   191
	delete client;
sl@0
   192
sl@0
   193
	if (aProcess != K::TheKernelProcess)
sl@0
   194
		{
sl@0
   195
		// Remove reserved handles
sl@0
   196
		r = aProcess->iHandles.Reserve(-TInt(iTotalBuffers));
sl@0
   197
		}
sl@0
   198
sl@0
   199
	Kern::MutexSignal(*iProcessLock);
sl@0
   200
sl@0
   201
	return r;
sl@0
   202
	}
sl@0
   203
sl@0
   204
sl@0
   205
TInt DWin32ShPool::Close(TAny* aPtr)
sl@0
   206
	{
sl@0
   207
	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShPool::Close(0x%08x)", aPtr));
sl@0
   208
sl@0
   209
	if (aPtr) // not NULL must be user side
sl@0
   210
		{
sl@0
   211
		DProcess* pP = reinterpret_cast<DProcess*>(aPtr);
sl@0
   212
sl@0
   213
		CloseClient(pP);
sl@0
   214
		}
sl@0
   215
sl@0
   216
	return DShPool::Close(aPtr);
sl@0
   217
	}
sl@0
   218
sl@0
   219
sl@0
   220
TInt DWin32ShPool::CreateInitialBuffers()
sl@0
   221
	{
sl@0
   222
	__KTRACE_OPT(KMMU,Kern::Printf(">DWin32ShPool::CreateInitialBuffers"));
sl@0
   223
sl@0
   224
	iInitialBuffersArray = reinterpret_cast<DWin32ShBuf*>(Kern::Alloc(iInitialBuffers * sizeof(DWin32ShBuf)));
sl@0
   225
sl@0
   226
	if (iInitialBuffersArray == NULL)
sl@0
   227
		return KErrNoMemory;
sl@0
   228
sl@0
   229
	TLinAddr offset = 0;
sl@0
   230
	for (TUint i = 0; i < iInitialBuffers; i++)
sl@0
   231
		{
sl@0
   232
		DWin32ShBuf *buf = new (&iInitialBuffersArray[i]) DWin32ShBuf(this, offset);
sl@0
   233
		TInt r = buf->Construct();
sl@0
   234
sl@0
   235
		if (r == KErrNone)
sl@0
   236
			{
sl@0
   237
			iFreeList.Add(&buf->iObjLink);
sl@0
   238
			}
sl@0
   239
		else
sl@0
   240
			{
sl@0
   241
			iInitialBuffers = i;
sl@0
   242
			return KErrNoMemory;
sl@0
   243
			}
sl@0
   244
sl@0
   245
		offset += iBufGap;
sl@0
   246
		}
sl@0
   247
sl@0
   248
	iFreeBuffers = iInitialBuffers;
sl@0
   249
	iTotalBuffers = iInitialBuffers;
sl@0
   250
sl@0
   251
	iBufMap->Alloc(0, iInitialBuffers);
sl@0
   252
sl@0
   253
	return KErrNone;
sl@0
   254
	}
sl@0
   255
sl@0
   256
sl@0
   257
TUint8* DWin32ShPool::Base()
sl@0
   258
	{
sl@0
   259
	return iWin32MemoryBase;
sl@0
   260
	}
sl@0
   261
sl@0
   262
sl@0
   263
TUint8* DWin32ShPool::Base(DProcess* /*aProcess*/)
sl@0
   264
	{
sl@0
   265
	return iWin32MemoryBase;
sl@0
   266
	}
sl@0
   267
sl@0
   268
sl@0
   269
TInt DWin32ShPool::AddToProcess(DProcess* aProcess, TUint aAttr)
sl@0
   270
	{
sl@0
   271
	__KTRACE_OPT(KEXEC, Kern::Printf("Adding DWin32ShPool %O to process %O", this, aProcess));
sl@0
   272
sl@0
   273
	TInt r = KErrNone;
sl@0
   274
sl@0
   275
	Kern::MutexWait(*iProcessLock);
sl@0
   276
	LockPool();
sl@0
   277
	DShPoolClient* client = reinterpret_cast<DShPoolClient*>(iClientMap->Find(reinterpret_cast<TUint>(aProcess)));
sl@0
   278
	UnlockPool();
sl@0
   279
sl@0
   280
	if (!client)
sl@0
   281
		{
sl@0
   282
		client = new DShPoolClient;
sl@0
   283
sl@0
   284
		if (client)
sl@0
   285
			{
sl@0
   286
			client->iFlags = aAttr;
sl@0
   287
			r = iClientMap->Add(reinterpret_cast<TUint>(aProcess), client);
sl@0
   288
sl@0
   289
			if (r == KErrNone)
sl@0
   290
				{
sl@0
   291
				if (aProcess != K::TheKernelProcess)
sl@0
   292
					{
sl@0
   293
					r = aProcess->iHandles.Reserve(iTotalBuffers);
sl@0
   294
sl@0
   295
					if (r != KErrNone)
sl@0
   296
						{
sl@0
   297
						iClientMap->Remove(reinterpret_cast<TUint>(aProcess));
sl@0
   298
						}
sl@0
   299
					}
sl@0
   300
				}
sl@0
   301
sl@0
   302
			if (r != KErrNone)
sl@0
   303
				{
sl@0
   304
				delete client;
sl@0
   305
				}
sl@0
   306
			}
sl@0
   307
		else
sl@0
   308
			{
sl@0
   309
			r = KErrNoMemory;
sl@0
   310
			}
sl@0
   311
		}
sl@0
   312
	else
sl@0
   313
		{
sl@0
   314
		LockPool();
sl@0
   315
		client->iAccessCount++;
sl@0
   316
		UnlockPool();
sl@0
   317
		}
sl@0
   318
sl@0
   319
	Kern::MutexSignal(*iProcessLock);
sl@0
   320
sl@0
   321
	return r;
sl@0
   322
	}
sl@0
   323
sl@0
   324
sl@0
   325
TInt DWin32ShPool::DoCreate(TShPoolCreateInfo& aInfo)
sl@0
   326
	{
sl@0
   327
	TUint64 maxSize = static_cast<TUint64>(aInfo.iInfo.iMaxBufs) * static_cast<TUint64>(iBufGap);
sl@0
   328
sl@0
   329
	if (maxSize > static_cast<TUint64>(KMaxTInt))
sl@0
   330
		{
sl@0
   331
		return KErrArgument;
sl@0
   332
		}
sl@0
   333
sl@0
   334
	__KTRACE_OPT(KMMU,Kern::Printf("DWin32ShPool::DoCreate (maxSize = 0x%08x, iBufGap = 0x%08x)",
sl@0
   335
		static_cast<TInt>(maxSize), iBufGap));
sl@0
   336
sl@0
   337
	iWin32MemoryBase = (TUint8*) VirtualAlloc(NULL, (SIZE_T)maxSize, MEM_RESERVE, PAGE_READWRITE);
sl@0
   338
	if (iWin32MemoryBase == NULL)
sl@0
   339
		{
sl@0
   340
		return KErrNoMemory;
sl@0
   341
		}
sl@0
   342
sl@0
   343
	__KTRACE_OPT(KMMU,Kern::Printf("DWin32ShPool::DoCreate (iWin32MemoryBase = 0x%08x)", iWin32MemoryBase));
sl@0
   344
sl@0
   345
	iBufMap = TBitMapAllocator::New(aInfo.iInfo.iMaxBufs, (TBool)ETrue);
sl@0
   346
	if (iBufMap == NULL)
sl@0
   347
		{
sl@0
   348
		return KErrNoMemory;
sl@0
   349
		}
sl@0
   350
sl@0
   351
	return KErrNone;
sl@0
   352
	}
sl@0
   353
sl@0
   354
sl@0
   355
TBool DWin32ShPool::IsOpen(DProcess* /*aProcess*/)
sl@0
   356
	{
sl@0
   357
	// could do we some kind of check here?
sl@0
   358
	return (TBool)ETrue;
sl@0
   359
	}
sl@0
   360
sl@0
   361
sl@0
   362
TInt DWin32ShPool::UpdateFreeList()
sl@0
   363
	{
sl@0
   364
	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShPool::UpdateFreeList"));
sl@0
   365
sl@0
   366
	SDblQue temp;
sl@0
   367
	SDblQueLink* anchor = reinterpret_cast<SDblQueLink*>(&iFreeList);
sl@0
   368
sl@0
   369
	LockPool();
sl@0
   370
	while(!iAltFreeList.IsEmpty())
sl@0
   371
		{
sl@0
   372
		// sort a temporary list of 'n' object with the lowest index first
sl@0
   373
		for (TInt n = 0; n < 8 && !iAltFreeList.IsEmpty(); ++n)
sl@0
   374
			{
sl@0
   375
			// bit of an assumption, lets assume that the lower indexes will be allocated and freed first
sl@0
   376
			// and therefore will be nearer the front of the list
sl@0
   377
			DShBuf* buf = _LOFF(iAltFreeList.GetFirst(), DShBuf, iObjLink);
sl@0
   378
sl@0
   379
			SDblQueLink* anchor = reinterpret_cast<SDblQueLink*>(&temp);
sl@0
   380
			SDblQueLink* pLink = temp.Last();
sl@0
   381
sl@0
   382
			for (;;)
sl@0
   383
				{
sl@0
   384
				// traverse the list starting at the back
sl@0
   385
				if ((pLink != anchor) && (_LOFF(pLink, DShBuf, iObjLink)->iRelAddress > buf->iRelAddress))
sl@0
   386
					{
sl@0
   387
					pLink = pLink->iPrev;
sl@0
   388
					}
sl@0
   389
				else
sl@0
   390
					{
sl@0
   391
					buf->iObjLink.InsertAfter(pLink);
sl@0
   392
					break;
sl@0
   393
					}
sl@0
   394
				}
sl@0
   395
			}
sl@0
   396
sl@0
   397
		// now merge with the free list
sl@0
   398
		while(!temp.IsEmpty())
sl@0
   399
			{
sl@0
   400
			if (iFreeList.IsEmpty())
sl@0
   401
				{
sl@0
   402
				iFreeList.MoveFrom(&temp);
sl@0
   403
				break;
sl@0
   404
				}
sl@0
   405
sl@0
   406
			// working backwards with the highest index
sl@0
   407
			DShBuf* buf = _LOFF(temp.Last(), DShBuf, iObjLink);
sl@0
   408
			SDblQueLink* pLink = iFreeList.Last();
sl@0
   409
sl@0
   410
			while (!NKern::FMFlash(&iLock))
sl@0
   411
				{
sl@0
   412
				if ((pLink != anchor) && (_LOFF(pLink, DShBuf, iObjLink)->iRelAddress > buf->iRelAddress))
sl@0
   413
					{
sl@0
   414
					pLink = pLink->iPrev;
sl@0
   415
					}
sl@0
   416
				else
sl@0
   417
					{
sl@0
   418
					buf->iObjLink.Deque();
sl@0
   419
					buf->iObjLink.InsertAfter(pLink);
sl@0
   420
					// next buffer
sl@0
   421
					if (temp.IsEmpty())
sl@0
   422
						break;
sl@0
   423
					buf = _LOFF(temp.Last(), DShBuf, iObjLink);
sl@0
   424
					}
sl@0
   425
				}
sl@0
   426
			}
sl@0
   427
		NKern::FMFlash(&iLock);
sl@0
   428
		}
sl@0
   429
	UnlockPool();
sl@0
   430
sl@0
   431
	__KTRACE_OPT(KMMU, Kern::Printf("<DWin32ShPool::UpdateFreeList"));
sl@0
   432
	return KErrNone;
sl@0
   433
	}
sl@0
   434
sl@0
   435
sl@0
   436
void DWin32ShPool::Free(DShBuf* aBuf)
sl@0
   437
	{
sl@0
   438
	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShPool::Free (aBuf = 0x%08x, aBuf->Base() 0x%08x)", aBuf, aBuf->Base()));
sl@0
   439
sl@0
   440
	TLinAddr newAddr = (TLinAddr)aBuf->Base();
sl@0
   441
#ifdef _DEBUG
sl@0
   442
	memset((TAny*)newAddr,0xde,aBuf->Size());
sl@0
   443
#else
sl@0
   444
	memclr((TAny*)newAddr,aBuf->Size());
sl@0
   445
#endif
sl@0
   446
sl@0
   447
	LockPool();
sl@0
   448
#ifdef _DEBUG
sl@0
   449
	// Remove from allocated list
sl@0
   450
	aBuf->iObjLink.Deque();
sl@0
   451
#endif
sl@0
   452
	// we want to put the initial buffers at the head of the free list
sl@0
   453
	// and the grown buffers at the tail as this makes shrinking more efficient
sl@0
   454
	if (aBuf >= iInitialBuffersArray && aBuf < (iInitialBuffersArray + iInitialBuffers))
sl@0
   455
		{
sl@0
   456
		iFreeList.AddHead(&aBuf->iObjLink);
sl@0
   457
		}
sl@0
   458
	else
sl@0
   459
		{
sl@0
   460
		iAltFreeList.Add(&aBuf->iObjLink);
sl@0
   461
		}
sl@0
   462
sl@0
   463
	++iFreeBuffers;
sl@0
   464
#ifdef _DEBUG
sl@0
   465
	--iAllocatedBuffers;
sl@0
   466
#endif
sl@0
   467
	iPoolFlags &= ~EShPoolSuppressShrink;		// Allow shrinking again, if it was blocked
sl@0
   468
	UnlockPool();
sl@0
   469
sl@0
   470
	// queue ManagementDfc which completes notifications as appropriate
sl@0
   471
	if (HaveWorkToDo())
sl@0
   472
		KickManagementDfc();
sl@0
   473
sl@0
   474
	Close(NULL); // decrement pool reference count
sl@0
   475
	}
sl@0
   476
sl@0
   477
// Kernel side API
sl@0
   478
TInt DWin32ShPool::Alloc(DShBuf*& aShBuf)
sl@0
   479
	{
sl@0
   480
	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShPool::Alloc (DShBuf)"));
sl@0
   481
sl@0
   482
	TInt r = KErrNoMemory;
sl@0
   483
	aShBuf = NULL;
sl@0
   484
sl@0
   485
	LockPool();
sl@0
   486
sl@0
   487
	if (!iFreeList.IsEmpty())
sl@0
   488
		{
sl@0
   489
		aShBuf = _LOFF(iFreeList.GetFirst(), DShBuf, iObjLink);
sl@0
   490
#ifdef _DEBUG
sl@0
   491
		iAllocated.Add(&aShBuf->iObjLink);
sl@0
   492
		iAllocatedBuffers++;
sl@0
   493
#endif
sl@0
   494
		--iFreeBuffers;
sl@0
   495
		Open(); // increment pool reference count
sl@0
   496
		r = KErrNone;
sl@0
   497
		}
sl@0
   498
	else
sl@0
   499
		{
sl@0
   500
		// try alternative free list
sl@0
   501
		if (!iAltFreeList.IsEmpty())
sl@0
   502
			{
sl@0
   503
			aShBuf = _LOFF(iAltFreeList.GetFirst(), DShBuf, iObjLink);
sl@0
   504
#ifdef _DEBUG
sl@0
   505
			iAllocated.Add(&aShBuf->iObjLink);
sl@0
   506
			iAllocatedBuffers++;
sl@0
   507
#endif
sl@0
   508
			--iFreeBuffers;
sl@0
   509
			Open(); // increment pool reference count
sl@0
   510
			r = KErrNone;
sl@0
   511
			}
sl@0
   512
		}
sl@0
   513
sl@0
   514
	UnlockPool();
sl@0
   515
sl@0
   516
	if (HaveWorkToDo())
sl@0
   517
		KickManagementDfc();
sl@0
   518
sl@0
   519
	__KTRACE_OPT(KMMU, Kern::Printf("<DWin32ShPool::Alloc return buf = 0x%08x", aShBuf));
sl@0
   520
	return r;
sl@0
   521
	}
sl@0
   522
sl@0
   523
sl@0
   524
DWin32AlignedShPool::DWin32AlignedShPool()
sl@0
   525
  : DWin32ShPool()
sl@0
   526
	{
sl@0
   527
	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32AlignedShPool::DWin32AlignedShPool"));
sl@0
   528
	}
sl@0
   529
sl@0
   530
sl@0
   531
DWin32AlignedShPool::~DWin32AlignedShPool()
sl@0
   532
	{
sl@0
   533
	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32AlignedShPool::~DWin32AlignedShPool"));
sl@0
   534
	}
sl@0
   535
sl@0
   536
sl@0
   537
TInt DWin32AlignedShPool::DoCreate(TShPoolCreateInfo& aInfo)
sl@0
   538
	{
sl@0
   539
	TInt r;
sl@0
   540
	// Create Chunk
sl@0
   541
	r = DWin32ShPool::DoCreate(aInfo);
sl@0
   542
	if (r != KErrNone)
sl@0
   543
		{
sl@0
   544
		return r;
sl@0
   545
		}
sl@0
   546
sl@0
   547
	if (iPoolFlags & EShPoolGuardPages)
sl@0
   548
		{
sl@0
   549
		TUint numOfBytes = iBufGap - MM::RamPageSize;
sl@0
   550
		iCommittedPages = MM::RoundToPageSize(iInitialBuffers * numOfBytes) >> MM::RamPageShift;
sl@0
   551
sl@0
   552
		for (TUint i = 0; i < iInitialBuffers; ++i)
sl@0
   553
			{
sl@0
   554
			TUint offset = iBufGap * i;
sl@0
   555
sl@0
   556
			MM::Wait();
sl@0
   557
			if (MM::Commit(reinterpret_cast<TLinAddr>(iWin32MemoryBase+offset), numOfBytes, 0xFF, EFalse) != KErrNone)
sl@0
   558
				{
sl@0
   559
				MM::Signal();
sl@0
   560
				return KErrNoMemory;
sl@0
   561
				}
sl@0
   562
			iWin32MemorySize += numOfBytes;
sl@0
   563
sl@0
   564
			MM::Signal();
sl@0
   565
			}
sl@0
   566
sl@0
   567
		iMaxPages = MM::RoundToPageSize(aInfo.iInfo.iMaxBufs * numOfBytes) >> MM::RamPageShift;
sl@0
   568
		}
sl@0
   569
	else
sl@0
   570
		{
sl@0
   571
		// Make sure we give the caller the number of buffers they were expecting
sl@0
   572
		iCommittedPages = MM::RoundToPageSize(iInitialBuffers * iBufGap) >> MM::RamPageShift;
sl@0
   573
		MM::Wait();
sl@0
   574
		if (MM::Commit(reinterpret_cast<TLinAddr>(iWin32MemoryBase), iCommittedPages << MM::RamPageShift, 0xFF, EFalse) != KErrNone)
sl@0
   575
			{
sl@0
   576
			MM::Signal();
sl@0
   577
			return KErrNoMemory;
sl@0
   578
			}
sl@0
   579
		iWin32MemorySize = iCommittedPages << MM::RamPageShift;
sl@0
   580
sl@0
   581
		MM::Signal();
sl@0
   582
sl@0
   583
		iMaxPages = MM::RoundToPageSize(aInfo.iInfo.iMaxBufs * iBufGap) >> MM::RamPageShift;
sl@0
   584
		}
sl@0
   585
sl@0
   586
	return r;
sl@0
   587
	}
sl@0
   588
sl@0
   589
sl@0
   590
TInt DWin32AlignedShPool::SetBufferWindow(DProcess* /*aProcess*/, TInt /*aWindowSize*/ )
sl@0
   591
	{
sl@0
   592
	return KErrNone;
sl@0
   593
	}
sl@0
   594
sl@0
   595
sl@0
   596
TInt DWin32AlignedShPool::GrowPool()
sl@0
   597
	{
sl@0
   598
	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32AlignedShPool::GrowPool()"));
sl@0
   599
sl@0
   600
	Kern::MutexWait(*iProcessLock);
sl@0
   601
sl@0
   602
	// How many bytes to commit for each new buffer (must be whole number of pages)
sl@0
   603
	TUint bytes = (iPoolFlags & EShPoolGuardPages) ? iBufGap - MM::RamPageSize : iBufGap;
sl@0
   604
sl@0
   605
	__ASSERT_DEBUG(!(bytes % MM::RamPageSize), Kern::PanicCurrentThread(KLitDWin32AlignedShPool, __LINE__));
sl@0
   606
sl@0
   607
	TInt pages = bytes >> MM::RamPageShift;
sl@0
   608
sl@0
   609
	TUint32 headroom = iMaxBuffers - iTotalBuffers;
sl@0
   610
sl@0
   611
	// How many buffers to grow by?
sl@0
   612
	TUint32 grow = mult_fx248(iTotalBuffers, iGrowByRatio);
sl@0
   613
	if (grow == 0)			// Handle round-to-zero
sl@0
   614
		grow = 1;
sl@0
   615
	if (grow > headroom)
sl@0
   616
		grow = headroom;
sl@0
   617
sl@0
   618
	TInt r = KErrNone;
sl@0
   619
	SDblQue temp;
sl@0
   620
sl@0
   621
	TUint i;
sl@0
   622
	for (i = 0; i < grow; ++i)
sl@0
   623
		{
sl@0
   624
		TInt offset = iBufMap->Alloc();
sl@0
   625
sl@0
   626
		if (offset < 0)
sl@0
   627
			{
sl@0
   628
			r = KErrNoMemory;
sl@0
   629
			break;
sl@0
   630
			}
sl@0
   631
sl@0
   632
		offset *= iBufGap;
sl@0
   633
sl@0
   634
		MM::Wait();
sl@0
   635
		if (MM::Commit(reinterpret_cast<TLinAddr>(iWin32MemoryBase+offset), bytes, 0xFF, EFalse) != KErrNone)
sl@0
   636
			{
sl@0
   637
			r = KErrNoMemory;
sl@0
   638
			}
sl@0
   639
		iWin32MemorySize += bytes;
sl@0
   640
		MM::Signal();
sl@0
   641
sl@0
   642
		if (r != KErrNone)
sl@0
   643
			{
sl@0
   644
			iBufMap->Free(offset / iBufGap);
sl@0
   645
			break;
sl@0
   646
			}
sl@0
   647
sl@0
   648
		DWin32ShBuf *buf = new DWin32ShBuf(this, offset);
sl@0
   649
sl@0
   650
		if (buf == NULL)
sl@0
   651
			{
sl@0
   652
			MM::Wait();
sl@0
   653
			MM::Decommit(reinterpret_cast<TLinAddr>(iWin32MemoryBase+offset), bytes);
sl@0
   654
			iWin32MemorySize -= bytes;
sl@0
   655
			MM::Signal();
sl@0
   656
			iBufMap->Free(offset / iBufGap);
sl@0
   657
			r = KErrNoMemory;
sl@0
   658
			break;
sl@0
   659
			}
sl@0
   660
sl@0
   661
		TInt r = buf->Construct();
sl@0
   662
sl@0
   663
		if (r != KErrNone)
sl@0
   664
			{
sl@0
   665
			MM::Wait();
sl@0
   666
			MM::Decommit(reinterpret_cast<TLinAddr>(iWin32MemoryBase+offset), bytes);
sl@0
   667
			iWin32MemorySize -= bytes;
sl@0
   668
			MM::Signal();
sl@0
   669
			iBufMap->Free(offset / iBufGap);
sl@0
   670
			buf->DObject::Close(NULL);
sl@0
   671
			break;
sl@0
   672
			}
sl@0
   673
sl@0
   674
		iCommittedPages += pages;
sl@0
   675
sl@0
   676
		temp.Add(&buf->iObjLink);
sl@0
   677
		}
sl@0
   678
sl@0
   679
	r = UpdateReservedHandles(i);
sl@0
   680
sl@0
   681
	if (r == KErrNone)
sl@0
   682
		{
sl@0
   683
		LockPool();
sl@0
   684
		iFreeList.MoveFrom(&temp);
sl@0
   685
		iFreeBuffers += i;
sl@0
   686
		iTotalBuffers += i;
sl@0
   687
		UnlockPool();
sl@0
   688
		}
sl@0
   689
	else
sl@0
   690
		{
sl@0
   691
		// else delete buffers
sl@0
   692
		SDblQueLink *pLink;
sl@0
   693
		while ((pLink = temp.GetFirst()) != NULL)
sl@0
   694
			{
sl@0
   695
			DShBuf* buf = _LOFF(pLink, DShBuf, iObjLink);
sl@0
   696
			TLinAddr offset = buf->iRelAddress;
sl@0
   697
			iBufMap->Free(offset / iBufGap);
sl@0
   698
			MM::Wait();
sl@0
   699
			MM::Decommit(reinterpret_cast<TLinAddr>(iWin32MemoryBase+offset), bytes);
sl@0
   700
			iWin32MemorySize -= bytes;
sl@0
   701
			MM::Signal();
sl@0
   702
			iCommittedPages -= pages;
sl@0
   703
			buf->DObject::Close(NULL);
sl@0
   704
			}
sl@0
   705
		}
sl@0
   706
sl@0
   707
	CalculateGrowShrinkTriggers();
sl@0
   708
sl@0
   709
	Kern::MutexSignal(*iProcessLock);
sl@0
   710
sl@0
   711
	__KTRACE_OPT(KMMU, Kern::Printf("<DWin32AlignedShPool::GrowPool()"));
sl@0
   712
	return r;
sl@0
   713
	} // DWin32AlignedShPool::GrowPool
sl@0
   714
sl@0
   715
sl@0
   716
TInt DWin32AlignedShPool::ShrinkPool()
sl@0
   717
	{
sl@0
   718
	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32AlignedShPool::ShrinkPool()"));
sl@0
   719
sl@0
   720
	Kern::MutexWait(*iProcessLock);
sl@0
   721
sl@0
   722
	// How many bytes to commit for each new buffer (must be whole number of pages)
sl@0
   723
	TUint bytes = (iPoolFlags & EShPoolGuardPages) ? iBufGap - MM::RamPageSize : iBufGap;
sl@0
   724
sl@0
   725
	__ASSERT_DEBUG(!(bytes % MM::RamPageSize), Kern::PanicCurrentThread(KLitDWin32AlignedShPool, __LINE__));
sl@0
   726
sl@0
   727
	TInt pages = bytes >> MM::RamPageShift;
sl@0
   728
sl@0
   729
	// Grab pool stats
sl@0
   730
	TUint32 grownBy = iTotalBuffers - iInitialBuffers;
sl@0
   731
sl@0
   732
	// How many buffers to shrink by?
sl@0
   733
	TUint32 shrink = mult_fx248(iTotalBuffers, iShrinkByRatio);
sl@0
   734
	if (shrink == 0)		// Handle round-to-zero
sl@0
   735
		shrink = 1;
sl@0
   736
	if (shrink > grownBy)
sl@0
   737
		shrink = grownBy;
sl@0
   738
	if (shrink > iFreeBuffers)
sl@0
   739
		shrink = iFreeBuffers;
sl@0
   740
sl@0
   741
	// work backwards
sl@0
   742
	TUint i;
sl@0
   743
	for (i = 0; i < shrink; ++i)
sl@0
   744
		{
sl@0
   745
		LockPool();
sl@0
   746
		if (iFreeList.IsEmpty())
sl@0
   747
			{
sl@0
   748
			UnlockPool();
sl@0
   749
			break;
sl@0
   750
			}
sl@0
   751
		// work from the back of the queue
sl@0
   752
		SDblQueLink *pLink = iFreeList.Last();
sl@0
   753
sl@0
   754
		DShBuf* pBuf = _LOFF(pLink, DShBuf, iObjLink);
sl@0
   755
sl@0
   756
		if (pBuf >= iInitialBuffersArray && pBuf < (iInitialBuffersArray + iInitialBuffers))
sl@0
   757
			{
sl@0
   758
			UnlockPool();
sl@0
   759
			break;
sl@0
   760
			}
sl@0
   761
sl@0
   762
		--iFreeBuffers;
sl@0
   763
		--iTotalBuffers;
sl@0
   764
		pLink->Deque();
sl@0
   765
		iCommittedPages -= pages;
sl@0
   766
		UnlockPool();
sl@0
   767
sl@0
   768
		TLinAddr offset = pBuf->iRelAddress;
sl@0
   769
sl@0
   770
		iBufMap->Free(offset / iBufGap);
sl@0
   771
sl@0
   772
		MM::Wait();
sl@0
   773
		MM::Decommit(reinterpret_cast<TLinAddr>(iWin32MemoryBase+offset), iBufSize);
sl@0
   774
		iWin32MemorySize -= iBufSize;
sl@0
   775
		MM::Signal();
sl@0
   776
		pBuf->DObject::Close(NULL);
sl@0
   777
		}
sl@0
   778
sl@0
   779
	TInt r = UpdateReservedHandles(-(TInt)i);
sl@0
   780
sl@0
   781
	// If we couldn't shrink the pool by this many buffers, wait until we Free() another
sl@0
   782
	// buffer before trying to shrink again.
sl@0
   783
	if (i < shrink)
sl@0
   784
		iPoolFlags |= EShPoolSuppressShrink;
sl@0
   785
sl@0
   786
	CalculateGrowShrinkTriggers();
sl@0
   787
sl@0
   788
	Kern::MutexSignal(*iProcessLock);
sl@0
   789
sl@0
   790
	__KTRACE_OPT(KMMU, Kern::Printf("<DWin32AlignedShPool::ShrinkPool()"));
sl@0
   791
	return r;
sl@0
   792
	} // DWin32AlignedShPool::ShrinkPool
sl@0
   793
sl@0
   794
sl@0
   795
DWin32NonAlignedShPool::DWin32NonAlignedShPool()
sl@0
   796
  : DWin32ShPool()
sl@0
   797
	{
sl@0
   798
	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32NonAlignedShPool::DWin32NonAlignedShPool"));
sl@0
   799
	}
sl@0
   800
sl@0
   801
sl@0
   802
DWin32NonAlignedShPool::~DWin32NonAlignedShPool()
sl@0
   803
	{
sl@0
   804
	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32NonAlignedShPool::~DWin32NonAlignedShPool"));
sl@0
   805
sl@0
   806
	delete iPagesMap;
sl@0
   807
	}
sl@0
   808
sl@0
   809
sl@0
   810
TInt DWin32NonAlignedShPool::DoCreate(TShPoolCreateInfo& aInfo)
sl@0
   811
	{
sl@0
   812
	// Create Chunk
sl@0
   813
	TInt r;
sl@0
   814
sl@0
   815
	r = DWin32ShPool::DoCreate(aInfo);
sl@0
   816
sl@0
   817
	if (r != KErrNone)
sl@0
   818
		{
sl@0
   819
		return r;
sl@0
   820
		}
sl@0
   821
sl@0
   822
	if (iPoolFlags & EShPoolPhysicalMemoryPool)
sl@0
   823
		{
sl@0
   824
		return KErrNotSupported;
sl@0
   825
		}
sl@0
   826
	else
sl@0
   827
		{
sl@0
   828
		// Make sure we give the caller the number of buffers they were expecting
sl@0
   829
		iCommittedPages = MM::RoundToPageSize(iInitialBuffers * iBufGap) >> MM::RamPageShift;
sl@0
   830
sl@0
   831
		MM::Wait();
sl@0
   832
		if (MM::Commit(reinterpret_cast<TLinAddr>(iWin32MemoryBase), iCommittedPages << MM::RamPageShift, 0xFF, EFalse) != KErrNone)
sl@0
   833
			{
sl@0
   834
			MM::Signal();
sl@0
   835
			return KErrNoMemory;
sl@0
   836
			}
sl@0
   837
		iWin32MemorySize = iCommittedPages << MM::RamPageShift;
sl@0
   838
sl@0
   839
		MM::Signal();
sl@0
   840
		iMaxPages = MM::RoundToPageSize(aInfo.iInfo.iMaxBufs * iBufGap) >> MM::RamPageShift;
sl@0
   841
		}
sl@0
   842
sl@0
   843
	iPagesMap = TBitMapAllocator::New(iMaxPages, (TBool)ETrue);
sl@0
   844
sl@0
   845
	if(!iPagesMap)
sl@0
   846
		{
sl@0
   847
		return KErrNoMemory;
sl@0
   848
		}
sl@0
   849
sl@0
   850
	iPagesMap->Alloc(0, iCommittedPages);
sl@0
   851
	return r;
sl@0
   852
	}
sl@0
   853
sl@0
   854
sl@0
   855
void DWin32NonAlignedShPool::FreeBufferPages(TUint aOffset)
sl@0
   856
	{
sl@0
   857
	TLinAddr firstByte = aOffset;	// offset of first byte in buffer
sl@0
   858
	TLinAddr lastByte = firstByte+iBufGap-1;	// offset of last byte in buffer
sl@0
   859
	TUint firstPage = firstByte>>MM::RamPageShift;	// index of first page containing part of the buffer
sl@0
   860
	TUint lastPage = lastByte>>MM::RamPageShift;		// index of last page containing part of the buffer
sl@0
   861
sl@0
   862
	TUint firstBuffer = (firstByte&~(MM::RamPageSize - 1))/iBufGap; // index of first buffer which lies in firstPage
sl@0
   863
	TUint lastBuffer = (lastByte|(MM::RamPageSize - 1))/iBufGap;    // index of last buffer which lies in lastPage
sl@0
   864
	TUint thisBuffer = firstByte/iBufGap;				// index of the buffer to be freed
sl@0
   865
sl@0
   866
	// Ensure lastBuffer is within bounds (there may be room in the last
sl@0
   867
	// page for more buffers than we have allocated).
sl@0
   868
	if (lastBuffer >= iMaxBuffers)
sl@0
   869
		lastBuffer = iMaxBuffers-1;
sl@0
   870
sl@0
   871
	if(firstBuffer!=thisBuffer && iBufMap->NotFree(firstBuffer,thisBuffer-firstBuffer))
sl@0
   872
		{
sl@0
   873
		// first page has other allocated buffers in it,
sl@0
   874
		// so we can't free it and must move on to next one...
sl@0
   875
		if (firstPage >= lastPage)
sl@0
   876
			return;
sl@0
   877
		++firstPage;
sl@0
   878
		}
sl@0
   879
sl@0
   880
	if(lastBuffer!=thisBuffer && iBufMap->NotFree(thisBuffer+1,lastBuffer-thisBuffer))
sl@0
   881
		{
sl@0
   882
		// last page has other allocated buffers in it,
sl@0
   883
		// so we can't free it and must step back to previous one...
sl@0
   884
		if (lastPage <= firstPage)
sl@0
   885
			return;
sl@0
   886
		--lastPage;
sl@0
   887
		}
sl@0
   888
sl@0
   889
	if(firstPage<=lastPage)
sl@0
   890
		{
sl@0
   891
		// we can free pages firstPage trough to lastPage...
sl@0
   892
		TUint numPages = lastPage-firstPage+1;
sl@0
   893
		iPagesMap->SelectiveFree(firstPage,numPages);
sl@0
   894
		MM::Wait();
sl@0
   895
		MM::Decommit(reinterpret_cast<TLinAddr>(iWin32MemoryBase+(firstPage << MM::RamPageShift)), (numPages << MM::RamPageShift));
sl@0
   896
		iWin32MemorySize -= (numPages << MM::RamPageShift);
sl@0
   897
		MM::Signal();
sl@0
   898
		iCommittedPages -= numPages;
sl@0
   899
		}
sl@0
   900
	}
sl@0
   901
sl@0
   902
sl@0
   903
TInt DWin32NonAlignedShPool::GrowPool()
sl@0
   904
	{
sl@0
   905
	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32NonAlignedShPool::GrowPool()"));
sl@0
   906
sl@0
   907
	Kern::MutexWait(*iProcessLock);
sl@0
   908
sl@0
   909
	TUint32 headroom = iMaxBuffers - iTotalBuffers;
sl@0
   910
sl@0
   911
	// How many buffers to grow by?
sl@0
   912
	TUint32 grow = mult_fx248(iTotalBuffers, iGrowByRatio);
sl@0
   913
	if (grow == 0)			// Handle round-to-zero
sl@0
   914
		grow = 1;
sl@0
   915
	if (grow > headroom)
sl@0
   916
		grow = headroom;
sl@0
   917
sl@0
   918
	TInt r = KErrNone;
sl@0
   919
	SDblQue temp;
sl@0
   920
sl@0
   921
	TUint i;
sl@0
   922
	for (i = 0; i < grow; ++i)
sl@0
   923
		{
sl@0
   924
		TInt offset = iBufMap->Alloc();
sl@0
   925
sl@0
   926
		if (offset < 0)
sl@0
   927
			{
sl@0
   928
			r = KErrNoMemory;
sl@0
   929
			break;
sl@0
   930
			}
sl@0
   931
sl@0
   932
		offset *= iBufGap;
sl@0
   933
sl@0
   934
		TInt lastPage = (offset + iBufSize - 1) >> MM::RamPageShift;
sl@0
   935
sl@0
   936
		// Allocate one page at a time.
sl@0
   937
		for (TInt page = offset >> MM::RamPageShift; page <= lastPage; ++page)
sl@0
   938
			{
sl@0
   939
			// Is the page allocated?
sl@0
   940
			if (iPagesMap->NotAllocated(page, 1))
sl@0
   941
				{
sl@0
   942
				MM::Wait();
sl@0
   943
				if (MM::Commit(reinterpret_cast<TLinAddr>(iWin32MemoryBase+(page << MM::RamPageShift)), MM::RamPageSize, 0xFF, EFalse) != KErrNone)
sl@0
   944
					{
sl@0
   945
					MM::Signal();
sl@0
   946
					r = KErrNoMemory;
sl@0
   947
					break;
sl@0
   948
					}
sl@0
   949
				iWin32MemorySize += MM::RamPageSize;
sl@0
   950
sl@0
   951
				MM::Signal();
sl@0
   952
				++iCommittedPages;
sl@0
   953
				iPagesMap->Alloc(page, 1);
sl@0
   954
				}
sl@0
   955
			}
sl@0
   956
sl@0
   957
		if (r != KErrNone)
sl@0
   958
			{
sl@0
   959
			iBufMap->Free(offset / iBufGap);
sl@0
   960
			FreeBufferPages(offset);
sl@0
   961
			break;
sl@0
   962
			}
sl@0
   963
sl@0
   964
		DWin32ShBuf *buf = new DWin32ShBuf(this, offset);
sl@0
   965
sl@0
   966
		if (buf == NULL)
sl@0
   967
			{
sl@0
   968
			iBufMap->Free(offset / iBufGap);
sl@0
   969
			FreeBufferPages(offset);
sl@0
   970
			r = KErrNoMemory;
sl@0
   971
			break;
sl@0
   972
			}
sl@0
   973
sl@0
   974
		r = buf->Construct();
sl@0
   975
sl@0
   976
		if (r != KErrNone)
sl@0
   977
			{
sl@0
   978
			iBufMap->Free(offset / iBufGap);
sl@0
   979
			FreeBufferPages(offset);
sl@0
   980
			buf->DObject::Close(NULL);
sl@0
   981
			break;
sl@0
   982
			}
sl@0
   983
sl@0
   984
		temp.Add(&buf->iObjLink);
sl@0
   985
		}
sl@0
   986
sl@0
   987
	r = UpdateReservedHandles(i);
sl@0
   988
sl@0
   989
	if (r == KErrNone)
sl@0
   990
		{
sl@0
   991
		LockPool();
sl@0
   992
		iFreeList.MoveFrom(&temp);
sl@0
   993
		iFreeBuffers += i;
sl@0
   994
		iTotalBuffers += i;
sl@0
   995
		UnlockPool();
sl@0
   996
		}
sl@0
   997
	else
sl@0
   998
		{
sl@0
   999
		// couldn't reserve handles so have no choice but to
sl@0
  1000
		// delete the buffers
sl@0
  1001
		__KTRACE_OPT(KMMU, Kern::Printf("GrowPool failed with %d, deleting buffers", r));
sl@0
  1002
		SDblQueLink *pLink;
sl@0
  1003
		while ((pLink = temp.GetFirst()) != NULL)
sl@0
  1004
			{
sl@0
  1005
			DShBuf* buf = _LOFF(pLink, DShBuf, iObjLink);
sl@0
  1006
			TLinAddr offset = buf->iRelAddress;
sl@0
  1007
			iBufMap->Free(offset / iBufGap);
sl@0
  1008
			FreeBufferPages(offset);
sl@0
  1009
			buf->DObject::Close(NULL);
sl@0
  1010
			}
sl@0
  1011
		__KTRACE_OPT(KMMU, Kern::Printf("Buffers deleted"));
sl@0
  1012
		}
sl@0
  1013
sl@0
  1014
	CalculateGrowShrinkTriggers();
sl@0
  1015
sl@0
  1016
	Kern::MutexSignal(*iProcessLock);
sl@0
  1017
sl@0
  1018
	__KTRACE_OPT(KMMU, Kern::Printf("<DWin32NonAlignedShPool::GrowPool()"));
sl@0
  1019
	return r;
sl@0
  1020
	} // DWin32NonAlignedShPool::GrowPool
sl@0
  1021
sl@0
  1022
sl@0
  1023
TInt DWin32NonAlignedShPool::ShrinkPool()
sl@0
  1024
	{
sl@0
  1025
	__KTRACE_OPT(KMMU, Kern::Printf(">DWin32NonAlignedShPool::ShrinkPool()"));
sl@0
  1026
sl@0
  1027
	Kern::MutexWait(*iProcessLock);
sl@0
  1028
sl@0
  1029
	// Grab pool stats
sl@0
  1030
	TUint32 grownBy = iTotalBuffers - iInitialBuffers;
sl@0
  1031
sl@0
  1032
	// How many buffers to shrink by?
sl@0
  1033
	TUint32 shrink = mult_fx248(iTotalBuffers, iShrinkByRatio);
sl@0
  1034
	if (shrink == 0)		// Handle round-to-zero
sl@0
  1035
		shrink = 1;
sl@0
  1036
	if (shrink > grownBy)
sl@0
  1037
		shrink = grownBy;
sl@0
  1038
	if (shrink > iFreeBuffers)
sl@0
  1039
		shrink = iFreeBuffers;
sl@0
  1040
sl@0
  1041
	TUint i;
sl@0
  1042
	for (i = 0; i < shrink; ++i)
sl@0
  1043
		{
sl@0
  1044
		LockPool();
sl@0
  1045
		if (iFreeList.IsEmpty())
sl@0
  1046
			{
sl@0
  1047
			UnlockPool();
sl@0
  1048
			break;
sl@0
  1049
			}
sl@0
  1050
		// work from the back of the queue
sl@0
  1051
		SDblQueLink *pLink = iFreeList.Last();
sl@0
  1052
sl@0
  1053
		DShBuf* pBuf = _LOFF(pLink, DShBuf, iObjLink);
sl@0
  1054
sl@0
  1055
		if (pBuf >= iInitialBuffersArray && pBuf < (iInitialBuffersArray + iInitialBuffers))
sl@0
  1056
			{
sl@0
  1057
			UnlockPool();
sl@0
  1058
			break;
sl@0
  1059
			}
sl@0
  1060
sl@0
  1061
		--iFreeBuffers;
sl@0
  1062
		--iTotalBuffers;
sl@0
  1063
		pLink->Deque();
sl@0
  1064
		UnlockPool();
sl@0
  1065
sl@0
  1066
		TLinAddr offset = pBuf->iRelAddress;
sl@0
  1067
sl@0
  1068
		iBufMap->Free(offset / iBufGap);
sl@0
  1069
		FreeBufferPages(offset);
sl@0
  1070
		pBuf->DObject::Close(NULL);
sl@0
  1071
		}
sl@0
  1072
sl@0
  1073
	UpdateReservedHandles(-(TInt)i);
sl@0
  1074
sl@0
  1075
	// If we couldn't shrink the pool by this many buffers, wait until we Free() another
sl@0
  1076
	// buffer before trying to shrink again.
sl@0
  1077
	if (i < shrink)
sl@0
  1078
		iPoolFlags |= EShPoolSuppressShrink;
sl@0
  1079
sl@0
  1080
	CalculateGrowShrinkTriggers();
sl@0
  1081
sl@0
  1082
	Kern::MutexSignal(*iProcessLock);
sl@0
  1083
sl@0
  1084
	__KTRACE_OPT(KMMU, Kern::Printf("<DWin32NonAlignedShPool::ShrinkPool()"));
sl@0
  1085
sl@0
  1086
	return KErrNone;
sl@0
  1087
	} // DWin32NonAlignedShPool::ShrinkPool