os/kernelhwsrv/userlibandfileserver/fileserver/sfile/sf_cache_man.cpp
author sl
Tue, 10 Jun 2014 14:32:02 +0200
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
sl@0
     1
// Copyright (c) 2006-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
// f32\sfile\sf_cache_man.cpp
sl@0
    15
// 
sl@0
    16
//
sl@0
    17
sl@0
    18
/**
sl@0
    19
 @file
sl@0
    20
 @internalTechnology
sl@0
    21
*/
sl@0
    22
sl@0
    23
sl@0
    24
#include <e32std.h>
sl@0
    25
#include <e32std_private.h>
sl@0
    26
#include "sf_std.h"
sl@0
    27
#include <e32uid.h>
sl@0
    28
#include <e32wins.h>
sl@0
    29
#include <f32file.h>
sl@0
    30
#include <hal.h>
sl@0
    31
#include "sf_cache_man.h"
sl@0
    32
#include "sf_cache_client.h"
sl@0
    33
#include "sf_file_cache.h"
sl@0
    34
#include "sf_file_cache_defs.h"
sl@0
    35
sl@0
    36
//#undef __SIMULATE_LOCK_FAILURES__
sl@0
    37
//#define __RAMDOM_LOCK_FAILURES__
sl@0
    38
//#define __PSEUDO_RANDOM_FAILURES__		
sl@0
    39
//#define __LOCK_ENTIRE_CACHELINE__
sl@0
    40
sl@0
    41
#ifdef __RAMDOM_LOCK_FAILURES__	
sl@0
    42
#include <e32math.h>
sl@0
    43
#endif
sl@0
    44
sl@0
    45
#define CACHE_NUM_TO_ADDR(n) (iBase + (n << iCacheLineSizeLog2))
sl@0
    46
#define ADDR_TO_CACHELINE_ADDR(a) ((((a - iBase) >> iCacheLineSizeLog2) << iCacheLineSizeLog2) + iBase)
sl@0
    47
#define ADDR_TO_INDEX(a) ((a - iBase) >> iCacheLineSizeLog2)
sl@0
    48
sl@0
    49
#if defined(_DEBUG)
sl@0
    50
	#define __CACHELINE_INVARIANT(aCacheLine)						\
sl@0
    51
	__ASSERT_DEBUG(												\
sl@0
    52
		aCacheLine.iDirtySegments <= aCacheLine.iFilledSegments &&	\
sl@0
    53
		aCacheLine.iFilledSegments <= aCacheLine.iAllocatedSegments,	\
sl@0
    54
		Fault(EInvalidCacheLine));
sl@0
    55
sl@0
    56
#else
sl@0
    57
	#define __CACHELINE_INVARIANT(aCacheLine)
sl@0
    58
#endif
sl@0
    59
sl@0
    60
const TInt KSegmentSize = 4096;
sl@0
    61
const TInt KSegmentSizeLog2 = 12;
sl@0
    62
const TInt64 KSegmentSizeMask = MAKE_TINT64(0x7FFFFFFF,0xFFFFF000);
sl@0
    63
sl@0
    64
const TInt KCacheLineSizeLog2 = 17;	// 128K
sl@0
    65
const TInt KCacheLineSize = (1 << KCacheLineSizeLog2 );
sl@0
    66
const TInt KCacheLineSizeInSegments = (KCacheLineSize >> KSegmentSizeLog2);
sl@0
    67
sl@0
    68
CCacheManager* CCacheManagerFactory::iCacheManager = NULL;
sl@0
    69
sl@0
    70
sl@0
    71
LOCAL_C void Fault(TCacheManagerFault aFault)
sl@0
    72
//
sl@0
    73
// Report a fault in the cache manager
sl@0
    74
//
sl@0
    75
	{
sl@0
    76
	User::Panic(_L("FSCACHEMAN"), aFault);
sl@0
    77
	}
sl@0
    78
sl@0
    79
sl@0
    80
/*
sl@0
    81
Indicates if a number passed in is a power of two
sl@0
    82
sl@0
    83
@param aNum number to be tested
sl@0
    84
@return Flag to indicate the result of the test
sl@0
    85
*/
sl@0
    86
LOCAL_C TBool IsPowerOfTwo(TInt aNum)
sl@0
    87
	{
sl@0
    88
	return (aNum != 0 && (aNum & -aNum) == aNum);
sl@0
    89
	}
sl@0
    90
sl@0
    91
sl@0
    92
//********************
sl@0
    93
// CCacheManagerFactory
sl@0
    94
//********************
sl@0
    95
void CCacheManagerFactory::CreateL()
sl@0
    96
	{
sl@0
    97
	__ASSERT_ALWAYS(iCacheManager == NULL, Fault(ECacheAlreadyCreated));
sl@0
    98
sl@0
    99
	if (TGlobalFileCacheSettings::Enabled())
sl@0
   100
		{
sl@0
   101
		iCacheManager = CCacheManager::NewCacheL(TGlobalFileCacheSettings::CacheSize());
sl@0
   102
		}
sl@0
   103
	else
sl@0
   104
		{
sl@0
   105
		Destroy(); 
sl@0
   106
		}
sl@0
   107
	}
sl@0
   108
sl@0
   109
CCacheManager* CCacheManagerFactory::CacheManager()
sl@0
   110
	{
sl@0
   111
	return iCacheManager;
sl@0
   112
	}
sl@0
   113
sl@0
   114
TInt CCacheManagerFactory::Destroy()
sl@0
   115
	{
sl@0
   116
	delete iCacheManager;
sl@0
   117
	iCacheManager = NULL;
sl@0
   118
	return KErrNone;
sl@0
   119
	}
sl@0
   120
sl@0
   121
//********************
sl@0
   122
// CCacheManager
sl@0
   123
//********************
sl@0
   124
CCacheManager* CCacheManager::NewCacheL(TInt aCacheSize)
sl@0
   125
	{
sl@0
   126
	CCacheManager* cacheManager = new (ELeave) CCacheManager(aCacheSize);
sl@0
   127
sl@0
   128
	CleanupStack::PushL(cacheManager);
sl@0
   129
	cacheManager->ConstructL();
sl@0
   130
	CleanupStack::Pop(1, cacheManager);
sl@0
   131
sl@0
   132
	return cacheManager;
sl@0
   133
	}
sl@0
   134
sl@0
   135
CCacheManager::~CCacheManager()
sl@0
   136
	{
sl@0
   137
	CacheLock();
sl@0
   138
sl@0
   139
	iFreeQueue.Close();
sl@0
   140
	iUsedQueue.Close();
sl@0
   141
sl@0
   142
	delete [] iCacheLines;
sl@0
   143
sl@0
   144
	CacheUnlock();
sl@0
   145
	iLock.Close();
sl@0
   146
sl@0
   147
	iChunk.Close();
sl@0
   148
	}
sl@0
   149
sl@0
   150
sl@0
   151
CCacheManager::CCacheManager(TUint aCacheSize)
sl@0
   152
	{
sl@0
   153
	// Get the page size
sl@0
   154
	TInt pageSize;
sl@0
   155
	TInt r = HAL::Get(HAL::EMemoryPageSize, pageSize);
sl@0
   156
	if (r != KErrNone)
sl@0
   157
		pageSize = KSegmentSize;
sl@0
   158
	__ASSERT_ALWAYS(IsPowerOfTwo(pageSize), Fault(EIllegalPageSize));
sl@0
   159
sl@0
   160
	// For the time being we only a support page size of 4K and we assume
sl@0
   161
	// that page size = segment size, since the extra overhead of supporting 
sl@0
   162
	// variable page sizes now is non-trivial.
sl@0
   163
	//
sl@0
   164
	// If a processor with a different page size needs to be supported 
sl@0
   165
	// in the future, then we need to either rework this code to be able to
sl@0
   166
	// devide up pages into smaller segments or analyze the impact of having 
sl@0
   167
	// a different page size on performance.
sl@0
   168
	__ASSERT_ALWAYS(pageSize == KSegmentSize, Fault(EIllegalPageSize));
sl@0
   169
sl@0
   170
	iCacheLineSize = KCacheLineSize;
sl@0
   171
	iCacheLineSizeLog2 = KCacheLineSizeLog2;
sl@0
   172
sl@0
   173
	iCacheSize = aCacheSize;
sl@0
   174
sl@0
   175
	iSegmentsPerCacheLine = 1 << (iCacheLineSizeLog2 - KSegmentSizeLog2);
sl@0
   176
sl@0
   177
	iMaxLockedSegments = TGlobalFileCacheSettings::MaxLockedSize() >> KSegmentSizeLog2;
sl@0
   178
sl@0
   179
#ifdef __SIMULATE_LOCK_FAILURES__
sl@0
   180
	iSimulateLockFailureMode = ETrue;
sl@0
   181
#endif
sl@0
   182
	}
sl@0
   183
sl@0
   184
void CCacheManager::ConstructL()
sl@0
   185
	{
sl@0
   186
sl@0
   187
	// calculate the low-memory threshold below which we fail any attempt to allocate memory
sl@0
   188
	TMemoryInfoV1Buf meminfo;
sl@0
   189
	TInt r = UserHal::MemoryInfo(meminfo);
sl@0
   190
	__ASSERT_ALWAYS(r==KErrNone,Fault(EMemoryInfoFailed));
sl@0
   191
sl@0
   192
	iLowMemoryThreshold = (meminfo().iTotalRamInBytes * TGlobalFileCacheSettings::LowMemoryThreshold()) / 100;
sl@0
   193
	__CACHE_PRINT4(_L("CACHEMAN: totalRAM %d freeRAM %d KDefaultLowMemoryThresholdPercent %d iLowMemoryThreshold %d"), 
sl@0
   194
		meminfo().iTotalRamInBytes, meminfo().iFreeRamInBytes, KDefaultLowMemoryThreshold, iLowMemoryThreshold);
sl@0
   195
	__CACHE_PRINT1(_L("CACHEMAN: iCacheSize %d"), iCacheSize);
sl@0
   196
	TChunkCreateInfo createInfo;
sl@0
   197
	createInfo.SetCache(iCacheSize);
sl@0
   198
	createInfo.SetOwner(EOwnerProcess);
sl@0
   199
	r = iChunk.Create(createInfo);
sl@0
   200
	User::LeaveIfError(r);
sl@0
   201
sl@0
   202
	
sl@0
   203
	TInt mm = UserSvr::HalFunction(EHalGroupKernel, EKernelHalMemModelInfo, 0, 0) & EMemModelTypeMask;
sl@0
   204
	if (mm < EMemModelTypeFlexible)
sl@0
   205
		{
sl@0
   206
		// On memory models before flexible, File System has to register chunks that will be DMA-ed.
sl@0
   207
		// On flexible memory model, local media is using new physical pinning Kernel interface that
sl@0
   208
		// doesn't require registration of the chunk.
sl@0
   209
		UserSvr::RegisterTrustedChunk(iChunk.Handle());
sl@0
   210
		}
sl@0
   211
	
sl@0
   212
sl@0
   213
	__ASSERT_ALWAYS(iCacheSize > KSegmentSize, Fault(EIllegalCacheSize));
sl@0
   214
sl@0
   215
	r = iLock.CreateLocal();
sl@0
   216
	User::LeaveIfError(r);
sl@0
   217
sl@0
   218
	iNumOfCacheLines = iCacheSize >> iCacheLineSizeLog2;
sl@0
   219
	iBase = Base();
sl@0
   220
sl@0
   221
	iCacheLines = new (ELeave) TCacheLine[iNumOfCacheLines];
sl@0
   222
sl@0
   223
	for (TInt n=0; n<iNumOfCacheLines; n++)
sl@0
   224
		{
sl@0
   225
		// coverity[var_decl]
sl@0
   226
		TCacheLine cacheLine;
sl@0
   227
		cacheLine.iAddr = CACHE_NUM_TO_ADDR(n);
sl@0
   228
		cacheLine.iAllocatedSegments = 0;
sl@0
   229
		cacheLine.iFilledSegments = 0;
sl@0
   230
		cacheLine.iDirtySegments = 0;
sl@0
   231
		cacheLine.iLockCount = 0;
sl@0
   232
		cacheLine.iLockedSegmentStart = 0;
sl@0
   233
		cacheLine.iLockedSegmentCount = 0;
sl@0
   234
		cacheLine.iClient = NULL;
sl@0
   235
		// coverity[uninit_use]
sl@0
   236
		iCacheLines[n] = cacheLine;
sl@0
   237
sl@0
   238
		r = iFreeQueue.Append(&iCacheLines[n]);
sl@0
   239
		User::LeaveIfError(r);
sl@0
   240
		}
sl@0
   241
sl@0
   242
	}
sl@0
   243
sl@0
   244
CCacheClient* CCacheManager::CreateClientL()
sl@0
   245
	{
sl@0
   246
	CCacheClient* client = CCacheClient::NewL(*this);
sl@0
   247
	return client;
sl@0
   248
	}
sl@0
   249
sl@0
   250
void CCacheManager::RegisterClient(CCacheClient& /*aClient*/)
sl@0
   251
	{
sl@0
   252
#if defined(_DEBUG) || defined(_DEBUG_RELEASE)
sl@0
   253
	iStats.iFileCount++;
sl@0
   254
#endif
sl@0
   255
	}
sl@0
   256
sl@0
   257
void CCacheManager::DeregisterClient(CCacheClient& /*aClient*/)
sl@0
   258
	{
sl@0
   259
#if defined(_DEBUG) || defined(_DEBUG_RELEASE)
sl@0
   260
	iStats.iFileCount--;
sl@0
   261
#endif
sl@0
   262
	}
sl@0
   263
sl@0
   264
sl@0
   265
void CCacheManager::FreeMemoryChanged(TBool aMemoryLow)
sl@0
   266
	{
sl@0
   267
	iMemoryLow = aMemoryLow;
sl@0
   268
	}
sl@0
   269
sl@0
   270
TBool CCacheManager::TooManyLockedSegments()
sl@0
   271
	{
sl@0
   272
	return (iLockedSegmentCount >= iMaxLockedSegments)?(TBool)ETrue:(TBool)EFalse;
sl@0
   273
	}
sl@0
   274
sl@0
   275
TInt CCacheManager::SegmentSize()
sl@0
   276
	{
sl@0
   277
	return KSegmentSize;
sl@0
   278
	}
sl@0
   279
sl@0
   280
TInt CCacheManager::SegmentSizeLog2()
sl@0
   281
	{
sl@0
   282
	return KSegmentSizeLog2;
sl@0
   283
	}
sl@0
   284
sl@0
   285
TInt64 CCacheManager::SegmentSizeMask()
sl@0
   286
	{
sl@0
   287
	return KSegmentSizeMask;
sl@0
   288
	}
sl@0
   289
sl@0
   290
TInt CCacheManager::CacheLineSize()
sl@0
   291
	{
sl@0
   292
	return KCacheLineSize;
sl@0
   293
	}
sl@0
   294
sl@0
   295
TInt CCacheManager::CacheLineSizeInSegments()
sl@0
   296
	{
sl@0
   297
	return KCacheLineSizeInSegments;
sl@0
   298
	}
sl@0
   299
sl@0
   300
TInt CCacheManager::CacheLineSizeLog2()
sl@0
   301
	{
sl@0
   302
	return KCacheLineSizeLog2;
sl@0
   303
	}
sl@0
   304
sl@0
   305
sl@0
   306
#if defined(_DEBUG) || defined(_DEBUG_RELEASE)
sl@0
   307
TFileCacheStats& CCacheManager::Stats()
sl@0
   308
	{
sl@0
   309
	iStats.iFreeCount = iFreeQueue.Count();
sl@0
   310
	iStats.iUsedCount = iUsedQueue.Count();
sl@0
   311
	iStats.iLockedSegmentCount = iLockedSegmentCount;
sl@0
   312
	iStats.iFilesOnClosedQueue = TClosedFileUtils::Count();
sl@0
   313
	__ASSERT_DEBUG(iStats.iFreeCount >= 0, Fault(EInvalidStats));
sl@0
   314
	__ASSERT_DEBUG(iStats.iUsedCount >= 0, Fault(EInvalidStats));
sl@0
   315
	__ASSERT_DEBUG(iStats.iLockedSegmentCount >= 0, Fault(EInvalidStats));
sl@0
   316
	__ASSERT_DEBUG(iStats.iFilesOnClosedQueue >= 0, Fault(EInvalidStats));
sl@0
   317
	return iStats;
sl@0
   318
	}
sl@0
   319
#endif	// #if defined(_DEBUG) || defined(_DEBUG_RELEASE)
sl@0
   320
sl@0
   321
sl@0
   322
sl@0
   323
void CCacheManager::CacheLock()
sl@0
   324
	{
sl@0
   325
	iLock.Wait();
sl@0
   326
sl@0
   327
#if defined(_DEBUG) || defined(_DEBUG_RELEASE)
sl@0
   328
	iManagerLocked = ETrue;
sl@0
   329
#endif
sl@0
   330
	}
sl@0
   331
sl@0
   332
void CCacheManager::CacheUnlock()
sl@0
   333
	{
sl@0
   334
#if defined(_DEBUG) || defined(_DEBUG_RELEASE)
sl@0
   335
	iManagerLocked = EFalse;
sl@0
   336
#endif
sl@0
   337
	iLock.Signal();
sl@0
   338
	}
sl@0
   339
sl@0
   340
sl@0
   341
TInt CCacheManager::AllocateAndLockCacheLine(CCacheClient* aClient, TInt64 aPos, const TCacheLine*& aCacheLine, TInt aSegmentCount)
sl@0
   342
	{
sl@0
   343
	__ASSERT_DEBUG(aSegmentCount <= iSegmentsPerCacheLine, Fault(EInvalidAllocCount));
sl@0
   344
sl@0
   345
	// check for low system memory
sl@0
   346
	TMemoryInfoV1Buf meminfo;
sl@0
   347
	TInt r = UserHal::MemoryInfo(meminfo);
sl@0
   348
	__ASSERT_ALWAYS(r==KErrNone,Fault(EMemoryInfoFailed));
sl@0
   349
	if (iMemoryLow || (meminfo().iFreeRamInBytes < iLowMemoryThreshold))
sl@0
   350
		{
sl@0
   351
		__CACHE_PRINT(_L("CACHEMAN: free RAM below threshold !!!"));
sl@0
   352
		return KErrNoMemory;
sl@0
   353
		}
sl@0
   354
sl@0
   355
	CacheLock();
sl@0
   356
sl@0
   357
	__CACHE_PRINT1(_L("CACHEMAN: Allocate %d segments, Lock %d"), aSegmentCount);
sl@0
   358
	__CACHE_PRINT2(_L("CACHEMAN: iFreeQueue %d iUsedQueue %d"), iFreeQueue.Count(), iUsedQueue.Count());
sl@0
   359
sl@0
   360
#ifdef  __SIMULATE_LOCK_FAILURES__
sl@0
   361
	if (SimulatedFailure(iAllocFailureCount))
sl@0
   362
		{
sl@0
   363
		__CACHE_PRINT(_L("CACHEMAN: simulating allocation failure"));
sl@0
   364
		CacheUnlock();
sl@0
   365
		return KErrNoMemory;
sl@0
   366
		}
sl@0
   367
#endif
sl@0
   368
sl@0
   369
	// any cachelines free ?
sl@0
   370
	TInt freeCacheLines = iFreeQueue.Count();
sl@0
   371
	TCacheLine* cacheLine = NULL;
sl@0
   372
	if (freeCacheLines == 0 && !StealCacheLine(aClient))
sl@0
   373
		{
sl@0
   374
		CacheUnlock();
sl@0
   375
		return KErrNotFound;
sl@0
   376
		}
sl@0
   377
sl@0
   378
	cacheLine = iFreeQueue[0];
sl@0
   379
sl@0
   380
	__CACHELINE_INVARIANT((*cacheLine));
sl@0
   381
	__ASSERT_DEBUG( cacheLine->iAllocatedSegments == 0, Fault(EInvalidAllocCount));
sl@0
   382
	__ASSERT_DEBUG( cacheLine->iFilledSegments == 0, Fault(EInvalidFillCount));
sl@0
   383
	__ASSERT_DEBUG( cacheLine->iDirtySegments == 0, Fault(EInvalidFillCount));
sl@0
   384
	__ASSERT_DEBUG( cacheLine->iLockCount == 0, Fault(EInvalidLockCount));
sl@0
   385
	__ASSERT_DEBUG( cacheLine->iLockedSegmentStart == 0, Fault(EInvalidLockedPageStart));
sl@0
   386
	__ASSERT_DEBUG( cacheLine->iLockedSegmentCount == 0, Fault(EInvalidLockedPageCount));
sl@0
   387
	__ASSERT_DEBUG( cacheLine->iClient == NULL, Fault(EInvalidClient));
sl@0
   388
sl@0
   389
	TUint8* addr = cacheLine->iAddr;
sl@0
   390
sl@0
   391
	// Commit it
sl@0
   392
	r = Commit(addr, aSegmentCount);
sl@0
   393
	if (r != KErrNone)
sl@0
   394
		{
sl@0
   395
		CacheUnlock();
sl@0
   396
		return r;
sl@0
   397
		}
sl@0
   398
	cacheLine->iAllocatedSegments = (TUint8) (aSegmentCount);
sl@0
   399
	cacheLine->iLockedSegmentStart = 0;
sl@0
   400
	cacheLine->iLockedSegmentCount = (TUint8) aSegmentCount;
sl@0
   401
sl@0
   402
	// Add to used queue
sl@0
   403
	r = iUsedQueue.InsertInAddressOrder(cacheLine);
sl@0
   404
	if (r != KErrNone)
sl@0
   405
		{
sl@0
   406
		Decommit(addr, aSegmentCount);
sl@0
   407
		CacheUnlock();
sl@0
   408
		return r;
sl@0
   409
		}
sl@0
   410
sl@0
   411
	cacheLine->iClient = aClient;
sl@0
   412
	cacheLine->iPos = aPos;
sl@0
   413
sl@0
   414
	// Remove from free queue
sl@0
   415
	iFreeQueue.Remove(0);
sl@0
   416
sl@0
   417
	// RChunk will lock segments initially unless explicitly unlocked
sl@0
   418
sl@0
   419
	cacheLine->iLockCount++;
sl@0
   420
	
sl@0
   421
	__CACHE_PRINT2(_L("CACHEMAN: LockCount for %08X is %d"), cacheLine->iAddr, cacheLine->iLockCount);
sl@0
   422
sl@0
   423
	CacheUnlock();
sl@0
   424
sl@0
   425
	aCacheLine = cacheLine;
sl@0
   426
	return r;
sl@0
   427
sl@0
   428
	}
sl@0
   429
sl@0
   430
/**
sl@0
   431
CCacheManager::ExtendCacheLine()
sl@0
   432
sl@0
   433
Attempts to extend the length of an existing cacheline by committing extra segments
sl@0
   434
The cacheline must be owned and locked already.
sl@0
   435
sl@0
   436
@param aCacheLine A reference to a locked, owned cacheline
sl@0
   437
@param aSegmentCount The new length of the cacheline (including the existing segments)
sl@0
   438
					 This must be greater then the existing length.
sl@0
   439
sl@0
   440
@return KErrNone if successful
sl@0
   441
		or other system wide error code.
sl@0
   442
*/
sl@0
   443
TInt CCacheManager::ExtendCacheLine(CCacheClient* aClient, const TCacheLine& aCacheLine, TInt aSegmentCount)
sl@0
   444
	{
sl@0
   445
	CacheLock();
sl@0
   446
sl@0
   447
	__ASSERT_DEBUG(aSegmentCount > 0, Fault(EInvalidSegmentCount));
sl@0
   448
sl@0
   449
	TCacheLine& cacheLine = const_cast<TCacheLine&>(aCacheLine);
sl@0
   450
	__CACHELINE_INVARIANT((cacheLine));
sl@0
   451
	__ASSERT_DEBUG(aSegmentCount <= iSegmentsPerCacheLine, Fault(EInvalidSegmentCount));
sl@0
   452
	__ASSERT_DEBUG(aSegmentCount > cacheLine.iAllocatedSegments, Fault(EInvalidSegmentCount));
sl@0
   453
	__ASSERT_DEBUG(cacheLine.iLockCount > 0, Fault(EInvalidLockCount));	// must be locked already
sl@0
   454
	__ASSERT_ALWAYS(cacheLine.iClient == aClient, Fault(EExtendingUnownedCacheline));
sl@0
   455
sl@0
   456
	// ensure all pages in cachline are locked
sl@0
   457
	__ASSERT_DEBUG(cacheLine.iLockedSegmentStart == 0, Fault(EInvalidLockRange));
sl@0
   458
	__ASSERT_DEBUG(cacheLine.iLockedSegmentCount == cacheLine.iAllocatedSegments, Fault(EInvalidLockRange));
sl@0
   459
sl@0
   460
	__CACHE_PRINT2(_L("CACHEMAN: ExtendCacheLine(%08X, %d)"), cacheLine.iAddr, aSegmentCount);
sl@0
   461
sl@0
   462
	// Commit the new segments
sl@0
   463
	TUint8* addrNewSegment = cacheLine.iAddr + (cacheLine.iAllocatedSegments << KSegmentSizeLog2);
sl@0
   464
	TInt segmentCountNew = aSegmentCount - cacheLine.iAllocatedSegments;
sl@0
   465
sl@0
   466
	TInt r = Commit(addrNewSegment, segmentCountNew);
sl@0
   467
	if (r == KErrNone)
sl@0
   468
		{
sl@0
   469
		cacheLine.iAllocatedSegments = cacheLine.iLockedSegmentCount = (TUint8) aSegmentCount;
sl@0
   470
		}
sl@0
   471
	
sl@0
   472
sl@0
   473
	CacheUnlock();
sl@0
   474
	return r;
sl@0
   475
	}
sl@0
   476
sl@0
   477
/**
sl@0
   478
CCacheManager::ReUseCacheLine()
sl@0
   479
sl@0
   480
Attempts to lock and then extend or shrink an already owned cacheline, ready for re-use.
sl@0
   481
If successful the cacheline is returned locked and all segments are marked as empty.
sl@0
   482
The cacheline must initially be unlocked.
sl@0
   483
sl@0
   484
@param aCacheLine A reference to an unlocked cacheline
sl@0
   485
@param aSegmentCount The new length of the cacheline;
sl@0
   486
					 this may be greater or less than the existing length.
sl@0
   487
sl@0
   488
@return KErrNone if successful
sl@0
   489
		or other system wide error code.
sl@0
   490
*/
sl@0
   491
TInt CCacheManager::ReAllocateAndLockCacheLine(CCacheClient* aClient, TInt64 aPos, const TCacheLine& aCacheLine, TInt aSegmentCount)
sl@0
   492
	{
sl@0
   493
	TCacheLine& cacheLine = const_cast<TCacheLine&>(aCacheLine);
sl@0
   494
	__CACHELINE_INVARIANT((cacheLine));
sl@0
   495
sl@0
   496
	__ASSERT_DEBUG(aSegmentCount > 0, Fault(EInvalidSegmentCount));
sl@0
   497
	__ASSERT_DEBUG(aSegmentCount <= iSegmentsPerCacheLine, Fault(EInvalidSegmentCount));
sl@0
   498
sl@0
   499
	__CACHE_PRINT2(_L("CACHEMAN: ReUseCacheLine(%08X, %d)"), cacheLine.iAddr, aSegmentCount);
sl@0
   500
sl@0
   501
	TInt r;
sl@0
   502
	// old cacheline same size or bigger ?
sl@0
   503
	if (cacheLine.iAllocatedSegments >= aSegmentCount)
sl@0
   504
		{
sl@0
   505
		r = LockCacheLine(aClient, aCacheLine, 0, aSegmentCount);
sl@0
   506
		if (r!= KErrNone)
sl@0
   507
			return r;
sl@0
   508
		if (cacheLine.iAllocatedSegments > aSegmentCount)
sl@0
   509
			{
sl@0
   510
			cacheLine.iFilledSegments = (TUint8) aSegmentCount;
sl@0
   511
			RemoveEmptySegments(aClient, aCacheLine);
sl@0
   512
			}
sl@0
   513
		}
sl@0
   514
	// old cacheline smaller
sl@0
   515
	else 
sl@0
   516
		{
sl@0
   517
		r = LockCacheLine(aClient, aCacheLine, 0, cacheLine.iAllocatedSegments);
sl@0
   518
		if (r != KErrNone)
sl@0
   519
			return r;
sl@0
   520
		r = ExtendCacheLine(aClient, aCacheLine, aSegmentCount);
sl@0
   521
		if (r != KErrNone)
sl@0
   522
			{
sl@0
   523
			UnlockCacheLine(aClient, aCacheLine);
sl@0
   524
			return r;
sl@0
   525
			}
sl@0
   526
		}
sl@0
   527
	cacheLine.iFilledSegments = 0;
sl@0
   528
	cacheLine.iPos = aPos;
sl@0
   529
sl@0
   530
	return KErrNone;
sl@0
   531
	}
sl@0
   532
sl@0
   533
sl@0
   534
/**
sl@0
   535
CCacheManager::LockCacheLine()
sl@0
   536
sl@0
   537
@return KErrNone on success
sl@0
   538
*/
sl@0
   539
TInt CCacheManager::LockCacheLine(
sl@0
   540
	CCacheClient* aClient, 
sl@0
   541
	const TCacheLine& aCacheLine,
sl@0
   542
	TInt aLockedPageStart,
sl@0
   543
	TInt aLockedPageCount)
sl@0
   544
	{
sl@0
   545
	CacheLock();
sl@0
   546
sl@0
   547
sl@0
   548
	TCacheLine& cacheLine = const_cast<TCacheLine&>(aCacheLine);
sl@0
   549
	__CACHELINE_INVARIANT((cacheLine));
sl@0
   550
	
sl@0
   551
	// has the cacheline been stolen ?
sl@0
   552
	if (cacheLine.iClient != aClient)
sl@0
   553
		{
sl@0
   554
		__CACHE_PRINT(_L("CCacheManager::LockCacheLine(), Cacheline Stolen !\n"));
sl@0
   555
		CacheUnlock();
sl@0
   556
		return KErrNotFound;
sl@0
   557
		}
sl@0
   558
	
sl@0
   559
	// validate lock range
sl@0
   560
	__ASSERT_DEBUG(aLockedPageStart >= 0, Fault(EInvalidLockRange));
sl@0
   561
	__ASSERT_DEBUG(aLockedPageStart + aLockedPageCount <= cacheLine.iAllocatedSegments, Fault(EInvalidLockRange));
sl@0
   562
	__ASSERT_DEBUG(aLockedPageCount <= iSegmentsPerCacheLine, Fault(EInvalidLockRange));
sl@0
   563
	// if already locked, don't allow lock range to grow down or up (to simplify code)
sl@0
   564
	__ASSERT_DEBUG(cacheLine.iLockCount == 0 || 
sl@0
   565
			aLockedPageStart >= cacheLine.iLockedSegmentStart, 
sl@0
   566
		Fault(EInvalidLockRange));
sl@0
   567
	__ASSERT_DEBUG(cacheLine.iLockCount == 0 || 
sl@0
   568
		aLockedPageStart + aLockedPageCount <= cacheLine.iLockedSegmentStart + cacheLine.iLockedSegmentCount, 
sl@0
   569
		Fault(EInvalidLockRange));
sl@0
   570
	
sl@0
   571
	__CACHE_PRINT1(_L("CACHEMAN: LockCacheLine(%08X, %d)"), cacheLine.iAddr);
sl@0
   572
sl@0
   573
	if (InUse(aCacheLine))
sl@0
   574
		{
sl@0
   575
		__CACHE_PRINT(_L("CCacheManager::LockCacheLine(), Cacheline in use !\n"));
sl@0
   576
		CacheUnlock();
sl@0
   577
		return KErrInUse;
sl@0
   578
		}
sl@0
   579
sl@0
   580
	TInt lockErr = KErrNone;
sl@0
   581
	
sl@0
   582
	// increment the lock count
sl@0
   583
	// if not already locked, lock requested segments
sl@0
   584
	if (cacheLine.iLockCount++ == 0)
sl@0
   585
		{
sl@0
   586
#ifdef __LOCK_ENTIRE_CACHELINE__
sl@0
   587
		lockErr = Lock(cacheLine.iAddr, cacheLine.iAllocatedSegments);
sl@0
   588
#else
sl@0
   589
		__ASSERT_DEBUG(cacheLine.iDirtySegments == 0, Fault(ELockingAndAlreadyDirty));
sl@0
   590
		lockErr = Lock(	cacheLine.iAddr + (aLockedPageStart<<KSegmentSizeLog2), aLockedPageCount);
sl@0
   591
#endif
sl@0
   592
sl@0
   593
		if (lockErr == KErrNone)
sl@0
   594
			{
sl@0
   595
			cacheLine.iLockedSegmentStart = (TUint8) aLockedPageStart;
sl@0
   596
			cacheLine.iLockedSegmentCount = (TUint8) aLockedPageCount;
sl@0
   597
			}
sl@0
   598
		else	// if (lockErr != KErrNone)
sl@0
   599
			{
sl@0
   600
			__CACHE_PRINT2(_L("CACHEMAN: LockCacheLine(%08X) failure %d"), cacheLine.iAddr, lockErr);
sl@0
   601
			cacheLine.iLockCount--;
sl@0
   602
			// NB a lock failure implies segment is decomitted
sl@0
   603
			FreeCacheLine(cacheLine);
sl@0
   604
			}
sl@0
   605
		}
sl@0
   606
	// if already locked (because cacheline is dirty) ensure lock range 
sl@0
   607
	// isn't growing (this isn't allowed to keep the code simpler) 
sl@0
   608
	else
sl@0
   609
		{
sl@0
   610
		__ASSERT_DEBUG(cacheLine.iLockedSegmentStart == 0, Fault(EInvalidLockRange));
sl@0
   611
		__ASSERT_DEBUG(cacheLine.iLockedSegmentCount >= aLockedPageStart + aLockedPageCount, Fault(EInvalidLockRange));
sl@0
   612
		}
sl@0
   613
sl@0
   614
	CacheUnlock();
sl@0
   615
	return lockErr;
sl@0
   616
	}
sl@0
   617
sl@0
   618
/**
sl@0
   619
UnlockCacheLine()
sl@0
   620
*/
sl@0
   621
TBool CCacheManager::UnlockCacheLine(CCacheClient* aClient, const TCacheLine& aCacheLine)
sl@0
   622
	{
sl@0
   623
	CacheLock();
sl@0
   624
sl@0
   625
	TCacheLine& cacheLine = const_cast<TCacheLine&>(aCacheLine);
sl@0
   626
	__CACHELINE_INVARIANT((cacheLine));
sl@0
   627
	
sl@0
   628
	__ASSERT_DEBUG(cacheLine.iLockCount > 0, Fault(EInvalidLockCount));
sl@0
   629
	__ASSERT_ALWAYS(cacheLine.iClient == aClient, Fault(EUnlockingUnownedCacheline));
sl@0
   630
sl@0
   631
	
sl@0
   632
	__CACHE_PRINT2(_L("CACHEMAN: UnlockCacheLine(%08X, %d)"), cacheLine.iAddr, cacheLine.iAllocatedSegments);
sl@0
   633
sl@0
   634
sl@0
   635
	// decrement the lock count
sl@0
   636
	TBool success = ETrue;
sl@0
   637
sl@0
   638
	if (cacheLine.iLockCount > 1)
sl@0
   639
		{
sl@0
   640
		cacheLine.iLockCount--;
sl@0
   641
		}
sl@0
   642
	else
sl@0
   643
		{
sl@0
   644
		if (cacheLine.iDirtySegments == 0)
sl@0
   645
			{
sl@0
   646
			cacheLine.iLockCount--;
sl@0
   647
#ifdef __LOCK_ENTIRE_CACHELINE__
sl@0
   648
			Unlock(cacheLine.iAddr, cacheLine.iAllocatedSegments);
sl@0
   649
#else
sl@0
   650
			Unlock(
sl@0
   651
				cacheLine.iAddr + (cacheLine.iLockedSegmentStart<<KSegmentSizeLog2), 
sl@0
   652
				cacheLine.iLockedSegmentCount);
sl@0
   653
#endif
sl@0
   654
sl@0
   655
			cacheLine.iLockedSegmentStart = cacheLine.iLockedSegmentCount = 0;
sl@0
   656
			}
sl@0
   657
		else
sl@0
   658
			{
sl@0
   659
			__CACHE_PRINT(_L("CACHEMAN: UnlockCacheLine - not unlocking segment dirty"));
sl@0
   660
			success = EFalse;
sl@0
   661
			}
sl@0
   662
		}
sl@0
   663
sl@0
   664
	CacheUnlock();
sl@0
   665
sl@0
   666
	return success;
sl@0
   667
	}
sl@0
   668
sl@0
   669
sl@0
   670
sl@0
   671
TBool CCacheManager::StealCacheLine(CCacheClient* aClient)
sl@0
   672
	{
sl@0
   673
	__ASSERT_DEBUG(iManagerLocked, Fault(EManagerNotLocked));
sl@0
   674
sl@0
   675
	TInt usedQueueSize = iUsedQueue.Count();
sl@0
   676
sl@0
   677
	#define INC_INDEX(x) (x++, x = (x >= usedQueueSize ? 0 : x))
sl@0
   678
sl@0
   679
	__CACHE_PRINT2(_L("CACHEMAN: StealCacheLine, iNextCacheLineToSteal %d used count %d"), iNextCacheLineToSteal, iUsedQueue.Count());
sl@0
   680
sl@0
   681
	TInt iInitIndex = iNextCacheLineToSteal;
sl@0
   682
sl@0
   683
	// Loop through all used cachelines, starting at the last stolen 
sl@0
   684
	// cacheline index + 1 and ending when we find a suitable cacheline 
sl@0
   685
	// to steal or we have looped back to the start
sl@0
   686
	for (INC_INDEX(iNextCacheLineToSteal);
sl@0
   687
		iNextCacheLineToSteal != iInitIndex; 
sl@0
   688
		INC_INDEX(iNextCacheLineToSteal))
sl@0
   689
		{
sl@0
   690
		TCacheLine& cacheLine = *iUsedQueue[iNextCacheLineToSteal];
sl@0
   691
		if (cacheLine.iLockCount == 0 && cacheLine.iClient != aClient)
sl@0
   692
			{
sl@0
   693
			__CACHE_PRINT3(_L("CACHEMAN: StealCacheLine, stealing %d from %08X to %08X"), 
sl@0
   694
				  iNextCacheLineToSteal, cacheLine.iClient, aClient);
sl@0
   695
			FreeCacheLine(cacheLine);
sl@0
   696
			return ETrue;
sl@0
   697
			}
sl@0
   698
		}
sl@0
   699
	return EFalse;
sl@0
   700
	}
sl@0
   701
sl@0
   702
sl@0
   703
TBool CCacheManager::FreeCacheLine(CCacheClient* aClient, const TCacheLine& aCacheLine)
sl@0
   704
	{
sl@0
   705
	CacheLock();
sl@0
   706
sl@0
   707
	TCacheLine& cacheLine = const_cast<TCacheLine&>(aCacheLine);
sl@0
   708
	__CACHELINE_INVARIANT((cacheLine));
sl@0
   709
sl@0
   710
	// Has the cacheline been stolen ? (Assume success if it has)
sl@0
   711
	if (cacheLine.iClient != aClient)
sl@0
   712
		{
sl@0
   713
		__CACHE_PRINT(_L("CCacheManager::FreeCacheLine(), Cacheline Stolen !!!!\n"));
sl@0
   714
		CacheUnlock();
sl@0
   715
		return ETrue;		
sl@0
   716
		}
sl@0
   717
	
sl@0
   718
	// Is the cacheline locked ?
sl@0
   719
	if (cacheLine.iLockCount > 0)
sl@0
   720
		{
sl@0
   721
		__CACHE_PRINT(_L("CCacheManager::FreeCacheLine(), attempt to free locked cacheline\n"));
sl@0
   722
		CacheUnlock();
sl@0
   723
		return EFalse;
sl@0
   724
		}
sl@0
   725
sl@0
   726
	FreeCacheLine(cacheLine);
sl@0
   727
sl@0
   728
	CacheUnlock();
sl@0
   729
	return ETrue;
sl@0
   730
	}
sl@0
   731
sl@0
   732
TInt CCacheManager::LockCount(CCacheClient* aClient, const TCacheLine& aCacheLine)
sl@0
   733
	{
sl@0
   734
	TCacheLine& cacheLine = const_cast<TCacheLine&>(aCacheLine);
sl@0
   735
	__CACHELINE_INVARIANT((cacheLine));
sl@0
   736
	
sl@0
   737
	// cacheline stolen ?
sl@0
   738
	if (cacheLine.iClient != aClient)
sl@0
   739
		return 0;
sl@0
   740
sl@0
   741
	return cacheLine.iLockCount;
sl@0
   742
	}
sl@0
   743
sl@0
   744
TInt CCacheManager::FillCount(CCacheClient* aClient, const TCacheLine& aCacheLine)
sl@0
   745
	{
sl@0
   746
	TCacheLine& cacheLine = const_cast<TCacheLine&>(aCacheLine);
sl@0
   747
	__CACHELINE_INVARIANT((cacheLine));
sl@0
   748
	
sl@0
   749
	// cacheline stolen ?
sl@0
   750
	if (cacheLine.iClient != aClient)
sl@0
   751
		return 0;
sl@0
   752
sl@0
   753
	return cacheLine.iFilledSegments;
sl@0
   754
	}
sl@0
   755
sl@0
   756
TInt CCacheManager::DirtyCount(CCacheClient* aClient, const TCacheLine& aCacheLine)
sl@0
   757
	{
sl@0
   758
	TCacheLine& cacheLine = const_cast<TCacheLine&>(aCacheLine);
sl@0
   759
	__CACHELINE_INVARIANT((cacheLine));
sl@0
   760
sl@0
   761
	// cacheline stolen ?
sl@0
   762
	if (cacheLine.iClient != aClient)
sl@0
   763
		return 0;
sl@0
   764
sl@0
   765
	return cacheLine.iDirtySegments;
sl@0
   766
	}
sl@0
   767
sl@0
   768
TBool CCacheManager::InUse(const TCacheLine& aCacheLine)
sl@0
   769
	{
sl@0
   770
	TCacheLine& cacheLine = const_cast<TCacheLine&>(aCacheLine);
sl@0
   771
	__CACHELINE_INVARIANT((cacheLine));
sl@0
   772
sl@0
   773
	// busy if lock count >= 1 and there are no dirty segments
sl@0
   774
	// or   if lock count >= 2 and there are dirty segments
sl@0
   775
	return (cacheLine.iLockCount - 
sl@0
   776
		   (cacheLine.iDirtySegments?1:0)) > 0 ? (TBool)ETrue : (TBool)EFalse;
sl@0
   777
	}
sl@0
   778
sl@0
   779
void CCacheManager::SetFilled(CCacheClient* aClient, const TCacheLine& aCacheLine,  TInt aSegmentCount)
sl@0
   780
	{
sl@0
   781
	TCacheLine& cacheLine = const_cast<TCacheLine&>(aCacheLine);
sl@0
   782
	__CACHELINE_INVARIANT((cacheLine));
sl@0
   783
	__ASSERT_DEBUG(aSegmentCount <= iSegmentsPerCacheLine, Fault(EInvalidSegmentCount));
sl@0
   784
	__ASSERT_DEBUG(aSegmentCount <= cacheLine.iAllocatedSegments , Fault(EInvalidDirtyCount));
sl@0
   785
	__ASSERT_DEBUG(cacheLine.iLockCount > 0, Fault(ESetFilledNotLocked));
sl@0
   786
	__ASSERT_ALWAYS(cacheLine.iClient == aClient, Fault(EExtendingUnownedCacheline));
sl@0
   787
sl@0
   788
	
sl@0
   789
	cacheLine.iFilledSegments = Max(cacheLine.iFilledSegments, (TUint8) aSegmentCount);
sl@0
   790
sl@0
   791
	__ASSERT_DEBUG(cacheLine.iFilledSegments >= cacheLine.iDirtySegments , Fault(EInvalidDirtyCount));
sl@0
   792
	}
sl@0
   793
sl@0
   794
void CCacheManager::SetDirty(CCacheClient* aClient, const TCacheLine& aCacheLine,  TInt aSegmentCount)
sl@0
   795
	{
sl@0
   796
	TCacheLine& cacheLine = const_cast<TCacheLine&>(aCacheLine);
sl@0
   797
	__CACHELINE_INVARIANT((cacheLine));
sl@0
   798
	__ASSERT_DEBUG(aSegmentCount <= iSegmentsPerCacheLine, Fault(EInvalidSegmentCount));
sl@0
   799
	__ASSERT_DEBUG(aSegmentCount <= cacheLine.iAllocatedSegments , Fault(EInvalidDirtyCount));
sl@0
   800
	__ASSERT_DEBUG(cacheLine.iLockCount > 0, Fault(ESetDirtyNotLocked));
sl@0
   801
	__ASSERT_ALWAYS(cacheLine.iClient == aClient, Fault(EExtendingUnownedCacheline));
sl@0
   802
sl@0
   803
	// ensure that lock range is valid - we insist on dirty
sl@0
   804
	// cachelines having all dirty pages locked up to the the last dirty page
sl@0
   805
	__ASSERT_DEBUG(cacheLine.iLockedSegmentStart == 0, Fault(ESetDirtyInvalidLockRange));
sl@0
   806
	__ASSERT_DEBUG(cacheLine.iLockedSegmentCount >= aSegmentCount, Fault(ESetDirtyInvalidLockRange));
sl@0
   807
	
sl@0
   808
	cacheLine.iFilledSegments = Max(cacheLine.iFilledSegments, (TUint8) aSegmentCount);
sl@0
   809
	cacheLine.iDirtySegments = Max(cacheLine.iDirtySegments, (TUint8) aSegmentCount);
sl@0
   810
sl@0
   811
	__ASSERT_DEBUG(cacheLine.iFilledSegments >= cacheLine.iDirtySegments , Fault(EInvalidDirtyCount));
sl@0
   812
	}
sl@0
   813
sl@0
   814
void CCacheManager::ClearDirty(CCacheClient* aClient, const TCacheLine& aCacheLine)
sl@0
   815
	{
sl@0
   816
	TCacheLine& cacheLine = const_cast<TCacheLine&>(aCacheLine);
sl@0
   817
	__CACHELINE_INVARIANT((cacheLine));
sl@0
   818
	__ASSERT_DEBUG(cacheLine.iLockCount > 0, Fault(EClearDirtyNotLocked));
sl@0
   819
	__ASSERT_ALWAYS(cacheLine.iClient == aClient, Fault(EExtendingUnownedCacheline));
sl@0
   820
sl@0
   821
	TInt dirtySegments = cacheLine.iDirtySegments;
sl@0
   822
	cacheLine.iDirtySegments = 0;
sl@0
   823
	SetFilled(aClient, cacheLine, dirtySegments);
sl@0
   824
	UnlockCacheLine(aClient, cacheLine);
sl@0
   825
	}
sl@0
   826
sl@0
   827
sl@0
   828
void CCacheManager::RemoveEmptySegments(CCacheClient* aClient, const TCacheLine& aCacheLine)
sl@0
   829
	{
sl@0
   830
	CacheLock();
sl@0
   831
sl@0
   832
	TCacheLine& cacheLine = const_cast<TCacheLine&>(aCacheLine);
sl@0
   833
	__CACHELINE_INVARIANT((cacheLine));
sl@0
   834
sl@0
   835
	// has the cacheline been stolen ?
sl@0
   836
	if (cacheLine.iClient != aClient)
sl@0
   837
		{
sl@0
   838
		__CACHE_PRINT(_L("CCacheManager::RemoveEmptySegments((), Cacheline Stolen ! !!!\n"));
sl@0
   839
		CacheUnlock();
sl@0
   840
		return;
sl@0
   841
		}
sl@0
   842
		
sl@0
   843
	__ASSERT_DEBUG(cacheLine.iLockCount > 0, Fault(ERemovingEmptyUnlocked));
sl@0
   844
sl@0
   845
	// Unlock any locked segments past the last filled segment
sl@0
   846
	TInt filledSegmentCount = cacheLine.iFilledSegments;
sl@0
   847
	TInt firstSegmentToUnlock;
sl@0
   848
	TInt segmentsToUnlock;
sl@0
   849
#ifdef __LOCK_ENTIRE_CACHELINE__
sl@0
   850
	firstSegmentToUnlock = filledSegmentCount;
sl@0
   851
	segmentsToUnlock = cacheLine.iAllocatedSegments - filledSegmentCount;
sl@0
   852
#else
sl@0
   853
	TInt firstLockedSegment = cacheLine.iLockedSegmentStart;
sl@0
   854
	if (firstLockedSegment <= filledSegmentCount)
sl@0
   855
		{
sl@0
   856
		firstSegmentToUnlock = filledSegmentCount;
sl@0
   857
		segmentsToUnlock = firstLockedSegment + cacheLine.iLockedSegmentCount - filledSegmentCount;
sl@0
   858
		}
sl@0
   859
	else
sl@0
   860
		{
sl@0
   861
		firstSegmentToUnlock = firstLockedSegment;
sl@0
   862
		segmentsToUnlock = cacheLine.iLockedSegmentCount;
sl@0
   863
		}
sl@0
   864
#endif
sl@0
   865
	TUint8* addrFirstSegmentToUnlock = 
sl@0
   866
		cacheLine.iAddr + (firstSegmentToUnlock << KSegmentSizeLog2);
sl@0
   867
	if (segmentsToUnlock > 0)
sl@0
   868
		{
sl@0
   869
		Unlock(addrFirstSegmentToUnlock, segmentsToUnlock);
sl@0
   870
		cacheLine.iLockedSegmentCount = 
sl@0
   871
			(TUint8) (cacheLine.iLockedSegmentCount - segmentsToUnlock);
sl@0
   872
		}
sl@0
   873
sl@0
   874
	// Decommit any segments past the last filled segment
sl@0
   875
	Decommit(
sl@0
   876
		cacheLine.iAddr + (filledSegmentCount << KSegmentSizeLog2), 
sl@0
   877
		cacheLine.iAllocatedSegments - filledSegmentCount);
sl@0
   878
	cacheLine.iAllocatedSegments = (TUint8) filledSegmentCount;
sl@0
   879
sl@0
   880
	CacheUnlock();
sl@0
   881
	}
sl@0
   882
sl@0
   883
sl@0
   884
void CCacheManager::FreeCacheLine(TCacheLine& aCacheLine)
sl@0
   885
	{
sl@0
   886
	__ASSERT_DEBUG(iManagerLocked, Fault(EManagerNotLocked));
sl@0
   887
	aCacheLine.iClient = NULL;
sl@0
   888
	__ASSERT_ALWAYS( (aCacheLine.iLockCount == 0), Fault(EFreeingLockedCacheLine));
sl@0
   889
	__ASSERT_ALWAYS( (aCacheLine.iDirtySegments == 0), Fault(EFreeingDirtyCacheLine));
sl@0
   890
sl@0
   891
	Decommit(aCacheLine.iAddr, aCacheLine.iAllocatedSegments);
sl@0
   892
	aCacheLine.iAllocatedSegments = 0;
sl@0
   893
	aCacheLine.iFilledSegments = 0;
sl@0
   894
	aCacheLine.iLockedSegmentStart = 0;
sl@0
   895
	aCacheLine.iLockedSegmentCount = 0;
sl@0
   896
sl@0
   897
	// Remove from used queue
sl@0
   898
	TInt index = iUsedQueue.FindInAddressOrder(&aCacheLine);
sl@0
   899
	__ASSERT_ALWAYS(index != KErrNotFound, Fault(ESegmentNotFound));
sl@0
   900
	iUsedQueue.Remove(index);
sl@0
   901
sl@0
   902
	// put back on free queue
sl@0
   903
//	TInt r = iFreeQueue.Append(&aCacheLine);
sl@0
   904
	TInt r = iFreeQueue.InsertInAddressOrder(&aCacheLine);
sl@0
   905
	__ASSERT_ALWAYS(r == KErrNone, Fault(EAppendToFreeQueueFailed));
sl@0
   906
sl@0
   907
	__CACHE_PRINT2(_L("CACHEMAN: FreeCacheLine, iFreeQueue %d iUsedQueue %d"), iFreeQueue.Count(), iUsedQueue.Count());
sl@0
   908
	}
sl@0
   909
sl@0
   910
sl@0
   911
#if defined(_DEBUG) || defined(_DEBUG_RELEASE)
sl@0
   912
void CCacheManager::DumpCacheLine(TCacheLine& aCacheLine)
sl@0
   913
	{
sl@0
   914
	RDebug::Print(_L("CACHEMAN: Cacheline client %08X addr %08X pos %d alloc %d filled %d dirty %d lock %d \tData: %02X %02X %02X %02X %02X %02X %02X %02X "), 
sl@0
   915
		aCacheLine.iClient,
sl@0
   916
		aCacheLine.iAddr,
sl@0
   917
		I64LOW(aCacheLine.iPos),
sl@0
   918
sl@0
   919
		aCacheLine.iAllocatedSegments,
sl@0
   920
		aCacheLine.iFilledSegments,
sl@0
   921
		aCacheLine.iDirtySegments,
sl@0
   922
		aCacheLine.iLockCount,
sl@0
   923
sl@0
   924
		aCacheLine.iAddr[0], 
sl@0
   925
		aCacheLine.iAddr[1], 
sl@0
   926
		aCacheLine.iAddr[2], 
sl@0
   927
		aCacheLine.iAddr[3], 
sl@0
   928
		aCacheLine.iAddr[4], 
sl@0
   929
		aCacheLine.iAddr[5], 
sl@0
   930
		aCacheLine.iAddr[6], 
sl@0
   931
		aCacheLine.iAddr[7]
sl@0
   932
		);
sl@0
   933
	}
sl@0
   934
sl@0
   935
void CCacheManager::DumpCache()
sl@0
   936
	{
sl@0
   937
	CacheLock();
sl@0
   938
	
sl@0
   939
	RPointerArray<CCacheClient> clients;
sl@0
   940
sl@0
   941
	RDebug::Print(_L("**** CACHEMAN: CacheLines ****\n"));
sl@0
   942
	TInt usedQueueSize = iUsedQueue.Count();
sl@0
   943
	TInt n;
sl@0
   944
	for (n=0; n<usedQueueSize; n++)
sl@0
   945
		{
sl@0
   946
		TCacheLine& cacheLine = *iUsedQueue[n];
sl@0
   947
		DumpCacheLine(cacheLine);
sl@0
   948
sl@0
   949
		clients.InsertInAddressOrder(cacheLine.iClient);
sl@0
   950
		}
sl@0
   951
sl@0
   952
	TInt clientCount = clients.Count();
sl@0
   953
sl@0
   954
	for (n=0; n<clientCount; n++)
sl@0
   955
		{
sl@0
   956
		RDebug::Print(_L("**** CACHEMAN: CacheClient #%d ****\n"), n);
sl@0
   957
		clients[n]->DumpCache();
sl@0
   958
		}
sl@0
   959
sl@0
   960
	clients.Close();
sl@0
   961
sl@0
   962
	CacheUnlock();
sl@0
   963
	}
sl@0
   964
#endif
sl@0
   965
sl@0
   966
sl@0
   967
sl@0
   968
sl@0
   969
TUint8* CCacheManager::Base()
sl@0
   970
	{
sl@0
   971
	return iChunk.Base();
sl@0
   972
	}
sl@0
   973
sl@0
   974
sl@0
   975
TInt CCacheManager::Lock(TUint8* const aAddr, TInt aSegmentCount)
sl@0
   976
	{
sl@0
   977
	TInt r = iChunk.Lock(aAddr-iBase, aSegmentCount << KSegmentSizeLog2);
sl@0
   978
//RDebug::Print(_L("RChunk::Lock(%08X, %d) %d"), aAddr-iBase, aSegmentCount << KSegmentSizeLog2, r);
sl@0
   979
	__ASSERT_DEBUG(r == KErrNone || r == KErrNoMemory || r == KErrNotFound, Fault(EUnexpectedLockFailure));
sl@0
   980
sl@0
   981
	__CACHE_PRINT3(_L("CACHEMAN: LOCK %08X %d %d"), aAddr, aSegmentCount, r);
sl@0
   982
sl@0
   983
#ifdef  __SIMULATE_LOCK_FAILURES__
sl@0
   984
	if (SimulatedFailure(iLockFailureCount))
sl@0
   985
		{
sl@0
   986
		__CACHE_PRINT(_L("CACHEMAN: simulating lock failure"));
sl@0
   987
		r = KErrNotFound;
sl@0
   988
		}
sl@0
   989
#endif
sl@0
   990
sl@0
   991
	if (r == KErrNone)
sl@0
   992
		{
sl@0
   993
		iLockedSegmentCount+= aSegmentCount;
sl@0
   994
		}
sl@0
   995
	else
sl@0
   996
		{
sl@0
   997
		__CACHE_PRINT(_L("CACHEMAN: LOCK FAILED"));
sl@0
   998
#if defined(_DEBUG) || defined(_DEBUG_RELEASE)
sl@0
   999
		iStats.iLockFailureCount++;
sl@0
  1000
#endif
sl@0
  1001
		}
sl@0
  1002
sl@0
  1003
	return r;
sl@0
  1004
	}
sl@0
  1005
sl@0
  1006
TInt CCacheManager::Unlock(TUint8* const aAddr, TInt aSegmentCount)
sl@0
  1007
	{
sl@0
  1008
	TInt r = iChunk.Unlock(aAddr-iBase, aSegmentCount << KSegmentSizeLog2);
sl@0
  1009
//RDebug::Print(_L("RChunk::Unlock(%08X, %d) %d"), aAddr-iBase, aSegmentCount << KSegmentSizeLog2, r);
sl@0
  1010
sl@0
  1011
	__CACHE_PRINT3(_L("CACHEMAN: UNLOCK %08X %d %d"), aAddr, aSegmentCount, r);
sl@0
  1012
	if (r == KErrNone)
sl@0
  1013
		{
sl@0
  1014
		iLockedSegmentCount-= aSegmentCount;
sl@0
  1015
		}
sl@0
  1016
	else
sl@0
  1017
		{
sl@0
  1018
		__CACHE_PRINT(_L("CACHEMAN: UNLOCK FAILED"));
sl@0
  1019
		}
sl@0
  1020
sl@0
  1021
	return r;
sl@0
  1022
	}
sl@0
  1023
sl@0
  1024
TInt CCacheManager::Commit(TUint8* const aAddr, TInt aSegmentCount)
sl@0
  1025
	{
sl@0
  1026
#ifdef  __SIMULATE_LOCK_FAILURES__
sl@0
  1027
	if (SimulatedFailure(iCommitFailureCount))
sl@0
  1028
		{
sl@0
  1029
		__CACHE_PRINT(_L("CACHEMAN: simulating commit failure "));
sl@0
  1030
		return KErrNoMemory;
sl@0
  1031
		}
sl@0
  1032
#endif
sl@0
  1033
sl@0
  1034
	TInt r = iChunk.Commit(aAddr-iBase, aSegmentCount << KSegmentSizeLog2);
sl@0
  1035
//RDebug::Print(_L("RChunk::Commit(%08X, %d) %d, "), aAddr-iBase, aSegmentCount << KSegmentSizeLog2, r);
sl@0
  1036
	
sl@0
  1037
	__CACHE_PRINT3(_L("CACHEMAN: COMMIT: %08X %d %d, "), aAddr, aSegmentCount, r);
sl@0
  1038
	if (r == KErrNone)
sl@0
  1039
		{
sl@0
  1040
		iLockedSegmentCount+= aSegmentCount;
sl@0
  1041
#if defined(_DEBUG) || defined(_DEBUG_RELEASE)
sl@0
  1042
		iStats.iAllocatedSegmentCount+= aSegmentCount;
sl@0
  1043
#endif
sl@0
  1044
		}
sl@0
  1045
	else
sl@0
  1046
		{
sl@0
  1047
		__CACHE_PRINT(_L("CACHEMAN: COMMIT FAILED"));
sl@0
  1048
#if defined(_DEBUG) || defined(_DEBUG_RELEASE)
sl@0
  1049
		iStats.iCommitFailureCount++;
sl@0
  1050
#endif
sl@0
  1051
		}
sl@0
  1052
sl@0
  1053
sl@0
  1054
	__ASSERT_DEBUG(r == KErrNone || r == KErrNoMemory, Fault(EUnexpectedCommitFailure));
sl@0
  1055
sl@0
  1056
	return r;
sl@0
  1057
	}
sl@0
  1058
sl@0
  1059
TInt CCacheManager::Decommit(TUint8* const aAddr, TInt aSegmentCount)
sl@0
  1060
	{
sl@0
  1061
	TInt r = iChunk.Decommit(aAddr-iBase, aSegmentCount << KSegmentSizeLog2);
sl@0
  1062
//RDebug::Print(_L("RChunk::Decommit(%08X, %d), %d"), aAddr-iBase, aSegmentCount << KSegmentSizeLog2, r);
sl@0
  1063
sl@0
  1064
	__CACHE_PRINT3(_L("CACHEMAN: DECOMMIT: %08X %d %d, "), aAddr, aSegmentCount, r);
sl@0
  1065
	if (r == KErrNone)
sl@0
  1066
		{
sl@0
  1067
#if defined(_DEBUG) || defined(_DEBUG_RELEASE)
sl@0
  1068
		iStats.iAllocatedSegmentCount-= aSegmentCount;
sl@0
  1069
#endif
sl@0
  1070
		}
sl@0
  1071
	else
sl@0
  1072
		{
sl@0
  1073
		__CACHE_PRINT(_L("CACHEMAN: DECOMMIT FAILED"));
sl@0
  1074
		}
sl@0
  1075
sl@0
  1076
	return r;
sl@0
  1077
	}
sl@0
  1078
sl@0
  1079
#if defined(_DEBUG) || defined(_DEBUG_RELEASE)
sl@0
  1080
sl@0
  1081
void CCacheManager::SimulateLockFailureMode(TBool aEnable)
sl@0
  1082
	{
sl@0
  1083
	iSimulateLockFailureMode = aEnable;
sl@0
  1084
__CACHE_PRINT1(_L("CACHEMAN: SimulateLockFailureMode: %d, "), iSimulateLockFailureMode);
sl@0
  1085
	}
sl@0
  1086
sl@0
  1087
void CCacheManager::AllocateMaxSegments(TBool aEnable)
sl@0
  1088
	{
sl@0
  1089
	iAllocateMaxSegments = aEnable;
sl@0
  1090
__CACHE_PRINT1(_L("CACHEMAN: iAllocateMaxSegments: %d, "), iAllocateMaxSegments);
sl@0
  1091
	}
sl@0
  1092
sl@0
  1093
TBool CCacheManager::AllocateMaxSegments()
sl@0
  1094
	{
sl@0
  1095
	return iAllocateMaxSegments;
sl@0
  1096
	}
sl@0
  1097
sl@0
  1098
void CCacheManager::SimulateWriteFailure()
sl@0
  1099
	{
sl@0
  1100
	iSimulateWriteFailure = ETrue;
sl@0
  1101
	}
sl@0
  1102
sl@0
  1103
TBool CCacheManager::SimulateWriteFailureEnabled()
sl@0
  1104
	{
sl@0
  1105
	TBool b = iSimulateWriteFailure;
sl@0
  1106
	iSimulateWriteFailure = EFalse;
sl@0
  1107
	return b;
sl@0
  1108
	}
sl@0
  1109
sl@0
  1110
TBool CCacheManager::SimulatedFailure(TInt& aFailureCount)
sl@0
  1111
	{
sl@0
  1112
#ifdef  __SIMULATE_LOCK_FAILURES__
sl@0
  1113
	if (iSimulateLockFailureMode)
sl@0
  1114
		{
sl@0
  1115
#if defined  (__RAMDOM_LOCK_FAILURES__)
sl@0
  1116
		static TInt FailCount = 15;
sl@0
  1117
		if (++aFailureCount >= FailCount)
sl@0
  1118
#elif defined (__PSEUDO_RANDOM_FAILURES__)
sl@0
  1119
		const TInt FailCounts[] = {15,2,0,21,1,12,24};
sl@0
  1120
		const TInt FailCountSize = sizeof(FailCounts) / sizeof(TInt);
sl@0
  1121
		static TInt FailCountIndex = 0; 
sl@0
  1122
		if (++aFailureCount >= FailCounts[FailCountIndex])
sl@0
  1123
#else
sl@0
  1124
		const TInt KFailCount = 15;
sl@0
  1125
		if (++aFailureCount >= KFailCount)
sl@0
  1126
#endif
sl@0
  1127
			{
sl@0
  1128
			aFailureCount = 0;
sl@0
  1129
#if defined (__RAMDOM_LOCK_FAILURES__)
sl@0
  1130
			FailCount = Math::Random() & 0x1F;
sl@0
  1131
			__CACHE_PRINT1(_L("FailCount %d"), FailCount);
sl@0
  1132
#endif
sl@0
  1133
sl@0
  1134
#if defined (__PSEUDO_RANDOM_FAILURES__)
sl@0
  1135
			FailCountIndex = (FailCountIndex +1) % FailCountSize ;
sl@0
  1136
			__CACHE_PRINT1(_L("FailCount %d"), FailCounts[FailCountIndex]);
sl@0
  1137
#endif
sl@0
  1138
sl@0
  1139
			return ETrue;
sl@0
  1140
			}
sl@0
  1141
		}
sl@0
  1142
#endif
sl@0
  1143
	return EFalse;
sl@0
  1144
	}
sl@0
  1145
#endif	// defined(_DEBUG) || defined(_DEBUG_RELEASE)
sl@0
  1146
sl@0
  1147
//************************************
sl@0
  1148
// TGlobalFileCacheSettings
sl@0
  1149
//************************************
sl@0
  1150
sl@0
  1151
//TGlobalCacheFlags TGlobalFileCacheSettings::iFlags = TGlobalCacheFlags(ECacheEnabled);
sl@0
  1152
TInt32 TGlobalFileCacheSettings::iCacheSize	= KDefaultGlobalCacheSize << KByteToByteShift;
sl@0
  1153
TInt32 TGlobalFileCacheSettings::iMaxLockedSize	= KDefaultGlobalCacheMaxLockedSize << KByteToByteShift;
sl@0
  1154
TInt32 TGlobalFileCacheSettings::iLowMemoryThreshold = KDefaultLowMemoryThreshold;
sl@0
  1155
TBool TGlobalFileCacheSettings::iEnabled	= KDefaultGlobalCacheEnabled; 
sl@0
  1156
sl@0
  1157
_LIT8(KLitSectionNameFileCache,"FileCache");
sl@0
  1158
sl@0
  1159
void TGlobalFileCacheSettings::ReadPropertiesFile()
sl@0
  1160
	{
sl@0
  1161
	F32Properties::GetBool(KLitSectionNameFileCache, _L8("GlobalCacheEnabled"), iEnabled);
sl@0
  1162
	
sl@0
  1163
	// Get size of cache in kilobytes
sl@0
  1164
	TInt32 cacheSizeInKBytes;
sl@0
  1165
	if (F32Properties::GetInt(KLitSectionNameFileCache, _L8("GlobalCacheSize"), cacheSizeInKBytes))
sl@0
  1166
		iCacheSize = cacheSizeInKBytes << KByteToByteShift;
sl@0
  1167
sl@0
  1168
	// Get maximum amount of locked data i.e data unavailable for paging
sl@0
  1169
	TInt32 maxLockedSize;
sl@0
  1170
	if (F32Properties::GetInt(KLitSectionNameFileCache, _L8("GlobalCacheMaxLockedSize"), maxLockedSize))
sl@0
  1171
		iMaxLockedSize = maxLockedSize << KByteToByteShift;
sl@0
  1172
sl@0
  1173
	// Get low memory threshold
sl@0
  1174
	TInt32 lowMemoryThreshold;
sl@0
  1175
	if (F32Properties::GetInt(KLitSectionNameFileCache, _L8("LowMemoryThreshold"), lowMemoryThreshold))
sl@0
  1176
		iLowMemoryThreshold = lowMemoryThreshold;
sl@0
  1177
sl@0
  1178
	}
sl@0
  1179
sl@0
  1180
sl@0
  1181
TBool TGlobalFileCacheSettings::Enabled()
sl@0
  1182
	{
sl@0
  1183
	return iEnabled;
sl@0
  1184
	}
sl@0
  1185
sl@0
  1186
TInt TGlobalFileCacheSettings::CacheSize()
sl@0
  1187
	{
sl@0
  1188
	return iCacheSize;
sl@0
  1189
	}
sl@0
  1190
TInt TGlobalFileCacheSettings::MaxLockedSize()
sl@0
  1191
	{
sl@0
  1192
	return iMaxLockedSize;
sl@0
  1193
	}
sl@0
  1194
sl@0
  1195
TInt TGlobalFileCacheSettings::LowMemoryThreshold()
sl@0
  1196
	{
sl@0
  1197
	return iLowMemoryThreshold;
sl@0
  1198
	}