First public contribution.
1 // Copyright (c) 2006-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // f32\sfile\sf_cache_man.cpp
25 #include <e32std_private.h>
31 #include "sf_cache_man.h"
32 #include "sf_cache_client.h"
33 #include "sf_file_cache.h"
34 #include "sf_file_cache_defs.h"
36 //#undef __SIMULATE_LOCK_FAILURES__
37 //#define __RAMDOM_LOCK_FAILURES__
38 //#define __PSEUDO_RANDOM_FAILURES__
39 //#define __LOCK_ENTIRE_CACHELINE__
41 #ifdef __RAMDOM_LOCK_FAILURES__
45 #define CACHE_NUM_TO_ADDR(n) (iBase + (n << iCacheLineSizeLog2))
46 #define ADDR_TO_CACHELINE_ADDR(a) ((((a - iBase) >> iCacheLineSizeLog2) << iCacheLineSizeLog2) + iBase)
47 #define ADDR_TO_INDEX(a) ((a - iBase) >> iCacheLineSizeLog2)
50 #define __CACHELINE_INVARIANT(aCacheLine) \
52 aCacheLine.iDirtySegments <= aCacheLine.iFilledSegments && \
53 aCacheLine.iFilledSegments <= aCacheLine.iAllocatedSegments, \
54 Fault(EInvalidCacheLine));
57 #define __CACHELINE_INVARIANT(aCacheLine)
60 const TInt KSegmentSize = 4096;
61 const TInt KSegmentSizeLog2 = 12;
62 const TInt64 KSegmentSizeMask = MAKE_TINT64(0x7FFFFFFF,0xFFFFF000);
64 const TInt KCacheLineSizeLog2 = 17; // 128K
65 const TInt KCacheLineSize = (1 << KCacheLineSizeLog2 );
66 const TInt KCacheLineSizeInSegments = (KCacheLineSize >> KSegmentSizeLog2);
68 CCacheManager* CCacheManagerFactory::iCacheManager = NULL;
71 LOCAL_C void Fault(TCacheManagerFault aFault)
73 // Report a fault in the cache manager
76 User::Panic(_L("FSCACHEMAN"), aFault);
81 Indicates if a number passed in is a power of two
83 @param aNum number to be tested
84 @return Flag to indicate the result of the test
86 LOCAL_C TBool IsPowerOfTwo(TInt aNum)
88 return (aNum != 0 && (aNum & -aNum) == aNum);
92 //********************
93 // CCacheManagerFactory
94 //********************
95 void CCacheManagerFactory::CreateL()
97 __ASSERT_ALWAYS(iCacheManager == NULL, Fault(ECacheAlreadyCreated));
99 if (TGlobalFileCacheSettings::Enabled())
101 iCacheManager = CCacheManager::NewCacheL(TGlobalFileCacheSettings::CacheSize());
109 CCacheManager* CCacheManagerFactory::CacheManager()
111 return iCacheManager;
114 TInt CCacheManagerFactory::Destroy()
116 delete iCacheManager;
117 iCacheManager = NULL;
121 //********************
123 //********************
124 CCacheManager* CCacheManager::NewCacheL(TInt aCacheSize)
126 CCacheManager* cacheManager = new (ELeave) CCacheManager(aCacheSize);
128 CleanupStack::PushL(cacheManager);
129 cacheManager->ConstructL();
130 CleanupStack::Pop(1, cacheManager);
135 CCacheManager::~CCacheManager()
142 delete [] iCacheLines;
151 CCacheManager::CCacheManager(TUint aCacheSize)
155 TInt r = HAL::Get(HAL::EMemoryPageSize, pageSize);
157 pageSize = KSegmentSize;
158 __ASSERT_ALWAYS(IsPowerOfTwo(pageSize), Fault(EIllegalPageSize));
160 // For the time being we only a support page size of 4K and we assume
161 // that page size = segment size, since the extra overhead of supporting
162 // variable page sizes now is non-trivial.
164 // If a processor with a different page size needs to be supported
165 // in the future, then we need to either rework this code to be able to
166 // devide up pages into smaller segments or analyze the impact of having
167 // a different page size on performance.
168 __ASSERT_ALWAYS(pageSize == KSegmentSize, Fault(EIllegalPageSize));
170 iCacheLineSize = KCacheLineSize;
171 iCacheLineSizeLog2 = KCacheLineSizeLog2;
173 iCacheSize = aCacheSize;
175 iSegmentsPerCacheLine = 1 << (iCacheLineSizeLog2 - KSegmentSizeLog2);
177 iMaxLockedSegments = TGlobalFileCacheSettings::MaxLockedSize() >> KSegmentSizeLog2;
179 #ifdef __SIMULATE_LOCK_FAILURES__
180 iSimulateLockFailureMode = ETrue;
184 void CCacheManager::ConstructL()
187 // calculate the low-memory threshold below which we fail any attempt to allocate memory
188 TMemoryInfoV1Buf meminfo;
189 TInt r = UserHal::MemoryInfo(meminfo);
190 __ASSERT_ALWAYS(r==KErrNone,Fault(EMemoryInfoFailed));
192 iLowMemoryThreshold = (meminfo().iTotalRamInBytes * TGlobalFileCacheSettings::LowMemoryThreshold()) / 100;
193 __CACHE_PRINT4(_L("CACHEMAN: totalRAM %d freeRAM %d KDefaultLowMemoryThresholdPercent %d iLowMemoryThreshold %d"),
194 meminfo().iTotalRamInBytes, meminfo().iFreeRamInBytes, KDefaultLowMemoryThreshold, iLowMemoryThreshold);
195 __CACHE_PRINT1(_L("CACHEMAN: iCacheSize %d"), iCacheSize);
196 TChunkCreateInfo createInfo;
197 createInfo.SetCache(iCacheSize);
198 createInfo.SetOwner(EOwnerProcess);
199 r = iChunk.Create(createInfo);
200 User::LeaveIfError(r);
203 TInt mm = UserSvr::HalFunction(EHalGroupKernel, EKernelHalMemModelInfo, 0, 0) & EMemModelTypeMask;
204 if (mm < EMemModelTypeFlexible)
206 // On memory models before flexible, File System has to register chunks that will be DMA-ed.
207 // On flexible memory model, local media is using new physical pinning Kernel interface that
208 // doesn't require registration of the chunk.
209 UserSvr::RegisterTrustedChunk(iChunk.Handle());
213 __ASSERT_ALWAYS(iCacheSize > KSegmentSize, Fault(EIllegalCacheSize));
215 r = iLock.CreateLocal();
216 User::LeaveIfError(r);
218 iNumOfCacheLines = iCacheSize >> iCacheLineSizeLog2;
221 iCacheLines = new (ELeave) TCacheLine[iNumOfCacheLines];
223 for (TInt n=0; n<iNumOfCacheLines; n++)
225 // coverity[var_decl]
226 TCacheLine cacheLine;
227 cacheLine.iAddr = CACHE_NUM_TO_ADDR(n);
228 cacheLine.iAllocatedSegments = 0;
229 cacheLine.iFilledSegments = 0;
230 cacheLine.iDirtySegments = 0;
231 cacheLine.iLockCount = 0;
232 cacheLine.iLockedSegmentStart = 0;
233 cacheLine.iLockedSegmentCount = 0;
234 cacheLine.iClient = NULL;
235 // coverity[uninit_use]
236 iCacheLines[n] = cacheLine;
238 r = iFreeQueue.Append(&iCacheLines[n]);
239 User::LeaveIfError(r);
244 CCacheClient* CCacheManager::CreateClientL()
246 CCacheClient* client = CCacheClient::NewL(*this);
250 void CCacheManager::RegisterClient(CCacheClient& /*aClient*/)
252 #if defined(_DEBUG) || defined(_DEBUG_RELEASE)
257 void CCacheManager::DeregisterClient(CCacheClient& /*aClient*/)
259 #if defined(_DEBUG) || defined(_DEBUG_RELEASE)
265 void CCacheManager::FreeMemoryChanged(TBool aMemoryLow)
267 iMemoryLow = aMemoryLow;
270 TBool CCacheManager::TooManyLockedSegments()
272 return (iLockedSegmentCount >= iMaxLockedSegments)?(TBool)ETrue:(TBool)EFalse;
275 TInt CCacheManager::SegmentSize()
280 TInt CCacheManager::SegmentSizeLog2()
282 return KSegmentSizeLog2;
285 TInt64 CCacheManager::SegmentSizeMask()
287 return KSegmentSizeMask;
290 TInt CCacheManager::CacheLineSize()
292 return KCacheLineSize;
295 TInt CCacheManager::CacheLineSizeInSegments()
297 return KCacheLineSizeInSegments;
300 TInt CCacheManager::CacheLineSizeLog2()
302 return KCacheLineSizeLog2;
306 #if defined(_DEBUG) || defined(_DEBUG_RELEASE)
307 TFileCacheStats& CCacheManager::Stats()
309 iStats.iFreeCount = iFreeQueue.Count();
310 iStats.iUsedCount = iUsedQueue.Count();
311 iStats.iLockedSegmentCount = iLockedSegmentCount;
312 iStats.iFilesOnClosedQueue = TClosedFileUtils::Count();
313 __ASSERT_DEBUG(iStats.iFreeCount >= 0, Fault(EInvalidStats));
314 __ASSERT_DEBUG(iStats.iUsedCount >= 0, Fault(EInvalidStats));
315 __ASSERT_DEBUG(iStats.iLockedSegmentCount >= 0, Fault(EInvalidStats));
316 __ASSERT_DEBUG(iStats.iFilesOnClosedQueue >= 0, Fault(EInvalidStats));
319 #endif // #if defined(_DEBUG) || defined(_DEBUG_RELEASE)
323 void CCacheManager::CacheLock()
327 #if defined(_DEBUG) || defined(_DEBUG_RELEASE)
328 iManagerLocked = ETrue;
332 void CCacheManager::CacheUnlock()
334 #if defined(_DEBUG) || defined(_DEBUG_RELEASE)
335 iManagerLocked = EFalse;
341 TInt CCacheManager::AllocateAndLockCacheLine(CCacheClient* aClient, TInt64 aPos, const TCacheLine*& aCacheLine, TInt aSegmentCount)
343 __ASSERT_DEBUG(aSegmentCount <= iSegmentsPerCacheLine, Fault(EInvalidAllocCount));
345 // check for low system memory
346 TMemoryInfoV1Buf meminfo;
347 TInt r = UserHal::MemoryInfo(meminfo);
348 __ASSERT_ALWAYS(r==KErrNone,Fault(EMemoryInfoFailed));
349 if (iMemoryLow || (meminfo().iFreeRamInBytes < iLowMemoryThreshold))
351 __CACHE_PRINT(_L("CACHEMAN: free RAM below threshold !!!"));
357 __CACHE_PRINT1(_L("CACHEMAN: Allocate %d segments, Lock %d"), aSegmentCount);
358 __CACHE_PRINT2(_L("CACHEMAN: iFreeQueue %d iUsedQueue %d"), iFreeQueue.Count(), iUsedQueue.Count());
360 #ifdef __SIMULATE_LOCK_FAILURES__
361 if (SimulatedFailure(iAllocFailureCount))
363 __CACHE_PRINT(_L("CACHEMAN: simulating allocation failure"));
369 // any cachelines free ?
370 TInt freeCacheLines = iFreeQueue.Count();
371 TCacheLine* cacheLine = NULL;
372 if (freeCacheLines == 0 && !StealCacheLine(aClient))
378 cacheLine = iFreeQueue[0];
380 __CACHELINE_INVARIANT((*cacheLine));
381 __ASSERT_DEBUG( cacheLine->iAllocatedSegments == 0, Fault(EInvalidAllocCount));
382 __ASSERT_DEBUG( cacheLine->iFilledSegments == 0, Fault(EInvalidFillCount));
383 __ASSERT_DEBUG( cacheLine->iDirtySegments == 0, Fault(EInvalidFillCount));
384 __ASSERT_DEBUG( cacheLine->iLockCount == 0, Fault(EInvalidLockCount));
385 __ASSERT_DEBUG( cacheLine->iLockedSegmentStart == 0, Fault(EInvalidLockedPageStart));
386 __ASSERT_DEBUG( cacheLine->iLockedSegmentCount == 0, Fault(EInvalidLockedPageCount));
387 __ASSERT_DEBUG( cacheLine->iClient == NULL, Fault(EInvalidClient));
389 TUint8* addr = cacheLine->iAddr;
392 r = Commit(addr, aSegmentCount);
398 cacheLine->iAllocatedSegments = (TUint8) (aSegmentCount);
399 cacheLine->iLockedSegmentStart = 0;
400 cacheLine->iLockedSegmentCount = (TUint8) aSegmentCount;
403 r = iUsedQueue.InsertInAddressOrder(cacheLine);
406 Decommit(addr, aSegmentCount);
411 cacheLine->iClient = aClient;
412 cacheLine->iPos = aPos;
414 // Remove from free queue
415 iFreeQueue.Remove(0);
417 // RChunk will lock segments initially unless explicitly unlocked
419 cacheLine->iLockCount++;
421 __CACHE_PRINT2(_L("CACHEMAN: LockCount for %08X is %d"), cacheLine->iAddr, cacheLine->iLockCount);
425 aCacheLine = cacheLine;
431 CCacheManager::ExtendCacheLine()
433 Attempts to extend the length of an existing cacheline by committing extra segments
434 The cacheline must be owned and locked already.
436 @param aCacheLine A reference to a locked, owned cacheline
437 @param aSegmentCount The new length of the cacheline (including the existing segments)
438 This must be greater then the existing length.
440 @return KErrNone if successful
441 or other system wide error code.
443 TInt CCacheManager::ExtendCacheLine(CCacheClient* aClient, const TCacheLine& aCacheLine, TInt aSegmentCount)
447 __ASSERT_DEBUG(aSegmentCount > 0, Fault(EInvalidSegmentCount));
449 TCacheLine& cacheLine = const_cast<TCacheLine&>(aCacheLine);
450 __CACHELINE_INVARIANT((cacheLine));
451 __ASSERT_DEBUG(aSegmentCount <= iSegmentsPerCacheLine, Fault(EInvalidSegmentCount));
452 __ASSERT_DEBUG(aSegmentCount > cacheLine.iAllocatedSegments, Fault(EInvalidSegmentCount));
453 __ASSERT_DEBUG(cacheLine.iLockCount > 0, Fault(EInvalidLockCount)); // must be locked already
454 __ASSERT_ALWAYS(cacheLine.iClient == aClient, Fault(EExtendingUnownedCacheline));
456 // ensure all pages in cachline are locked
457 __ASSERT_DEBUG(cacheLine.iLockedSegmentStart == 0, Fault(EInvalidLockRange));
458 __ASSERT_DEBUG(cacheLine.iLockedSegmentCount == cacheLine.iAllocatedSegments, Fault(EInvalidLockRange));
460 __CACHE_PRINT2(_L("CACHEMAN: ExtendCacheLine(%08X, %d)"), cacheLine.iAddr, aSegmentCount);
462 // Commit the new segments
463 TUint8* addrNewSegment = cacheLine.iAddr + (cacheLine.iAllocatedSegments << KSegmentSizeLog2);
464 TInt segmentCountNew = aSegmentCount - cacheLine.iAllocatedSegments;
466 TInt r = Commit(addrNewSegment, segmentCountNew);
469 cacheLine.iAllocatedSegments = cacheLine.iLockedSegmentCount = (TUint8) aSegmentCount;
478 CCacheManager::ReUseCacheLine()
480 Attempts to lock and then extend or shrink an already owned cacheline, ready for re-use.
481 If successful the cacheline is returned locked and all segments are marked as empty.
482 The cacheline must initially be unlocked.
484 @param aCacheLine A reference to an unlocked cacheline
485 @param aSegmentCount The new length of the cacheline;
486 this may be greater or less than the existing length.
488 @return KErrNone if successful
489 or other system wide error code.
491 TInt CCacheManager::ReAllocateAndLockCacheLine(CCacheClient* aClient, TInt64 aPos, const TCacheLine& aCacheLine, TInt aSegmentCount)
493 TCacheLine& cacheLine = const_cast<TCacheLine&>(aCacheLine);
494 __CACHELINE_INVARIANT((cacheLine));
496 __ASSERT_DEBUG(aSegmentCount > 0, Fault(EInvalidSegmentCount));
497 __ASSERT_DEBUG(aSegmentCount <= iSegmentsPerCacheLine, Fault(EInvalidSegmentCount));
499 __CACHE_PRINT2(_L("CACHEMAN: ReUseCacheLine(%08X, %d)"), cacheLine.iAddr, aSegmentCount);
502 // old cacheline same size or bigger ?
503 if (cacheLine.iAllocatedSegments >= aSegmentCount)
505 r = LockCacheLine(aClient, aCacheLine, 0, aSegmentCount);
508 if (cacheLine.iAllocatedSegments > aSegmentCount)
510 cacheLine.iFilledSegments = (TUint8) aSegmentCount;
511 RemoveEmptySegments(aClient, aCacheLine);
514 // old cacheline smaller
517 r = LockCacheLine(aClient, aCacheLine, 0, cacheLine.iAllocatedSegments);
520 r = ExtendCacheLine(aClient, aCacheLine, aSegmentCount);
523 UnlockCacheLine(aClient, aCacheLine);
527 cacheLine.iFilledSegments = 0;
528 cacheLine.iPos = aPos;
535 CCacheManager::LockCacheLine()
537 @return KErrNone on success
539 TInt CCacheManager::LockCacheLine(
540 CCacheClient* aClient,
541 const TCacheLine& aCacheLine,
542 TInt aLockedPageStart,
543 TInt aLockedPageCount)
548 TCacheLine& cacheLine = const_cast<TCacheLine&>(aCacheLine);
549 __CACHELINE_INVARIANT((cacheLine));
551 // has the cacheline been stolen ?
552 if (cacheLine.iClient != aClient)
554 __CACHE_PRINT(_L("CCacheManager::LockCacheLine(), Cacheline Stolen !\n"));
559 // validate lock range
560 __ASSERT_DEBUG(aLockedPageStart >= 0, Fault(EInvalidLockRange));
561 __ASSERT_DEBUG(aLockedPageStart + aLockedPageCount <= cacheLine.iAllocatedSegments, Fault(EInvalidLockRange));
562 __ASSERT_DEBUG(aLockedPageCount <= iSegmentsPerCacheLine, Fault(EInvalidLockRange));
563 // if already locked, don't allow lock range to grow down or up (to simplify code)
564 __ASSERT_DEBUG(cacheLine.iLockCount == 0 ||
565 aLockedPageStart >= cacheLine.iLockedSegmentStart,
566 Fault(EInvalidLockRange));
567 __ASSERT_DEBUG(cacheLine.iLockCount == 0 ||
568 aLockedPageStart + aLockedPageCount <= cacheLine.iLockedSegmentStart + cacheLine.iLockedSegmentCount,
569 Fault(EInvalidLockRange));
571 __CACHE_PRINT1(_L("CACHEMAN: LockCacheLine(%08X, %d)"), cacheLine.iAddr);
573 if (InUse(aCacheLine))
575 __CACHE_PRINT(_L("CCacheManager::LockCacheLine(), Cacheline in use !\n"));
580 TInt lockErr = KErrNone;
582 // increment the lock count
583 // if not already locked, lock requested segments
584 if (cacheLine.iLockCount++ == 0)
586 #ifdef __LOCK_ENTIRE_CACHELINE__
587 lockErr = Lock(cacheLine.iAddr, cacheLine.iAllocatedSegments);
589 __ASSERT_DEBUG(cacheLine.iDirtySegments == 0, Fault(ELockingAndAlreadyDirty));
590 lockErr = Lock( cacheLine.iAddr + (aLockedPageStart<<KSegmentSizeLog2), aLockedPageCount);
593 if (lockErr == KErrNone)
595 cacheLine.iLockedSegmentStart = (TUint8) aLockedPageStart;
596 cacheLine.iLockedSegmentCount = (TUint8) aLockedPageCount;
598 else // if (lockErr != KErrNone)
600 __CACHE_PRINT2(_L("CACHEMAN: LockCacheLine(%08X) failure %d"), cacheLine.iAddr, lockErr);
601 cacheLine.iLockCount--;
602 // NB a lock failure implies segment is decomitted
603 FreeCacheLine(cacheLine);
606 // if already locked (because cacheline is dirty) ensure lock range
607 // isn't growing (this isn't allowed to keep the code simpler)
610 __ASSERT_DEBUG(cacheLine.iLockedSegmentStart == 0, Fault(EInvalidLockRange));
611 __ASSERT_DEBUG(cacheLine.iLockedSegmentCount >= aLockedPageStart + aLockedPageCount, Fault(EInvalidLockRange));
621 TBool CCacheManager::UnlockCacheLine(CCacheClient* aClient, const TCacheLine& aCacheLine)
625 TCacheLine& cacheLine = const_cast<TCacheLine&>(aCacheLine);
626 __CACHELINE_INVARIANT((cacheLine));
628 __ASSERT_DEBUG(cacheLine.iLockCount > 0, Fault(EInvalidLockCount));
629 __ASSERT_ALWAYS(cacheLine.iClient == aClient, Fault(EUnlockingUnownedCacheline));
632 __CACHE_PRINT2(_L("CACHEMAN: UnlockCacheLine(%08X, %d)"), cacheLine.iAddr, cacheLine.iAllocatedSegments);
635 // decrement the lock count
636 TBool success = ETrue;
638 if (cacheLine.iLockCount > 1)
640 cacheLine.iLockCount--;
644 if (cacheLine.iDirtySegments == 0)
646 cacheLine.iLockCount--;
647 #ifdef __LOCK_ENTIRE_CACHELINE__
648 Unlock(cacheLine.iAddr, cacheLine.iAllocatedSegments);
651 cacheLine.iAddr + (cacheLine.iLockedSegmentStart<<KSegmentSizeLog2),
652 cacheLine.iLockedSegmentCount);
655 cacheLine.iLockedSegmentStart = cacheLine.iLockedSegmentCount = 0;
659 __CACHE_PRINT(_L("CACHEMAN: UnlockCacheLine - not unlocking segment dirty"));
671 TBool CCacheManager::StealCacheLine(CCacheClient* aClient)
673 __ASSERT_DEBUG(iManagerLocked, Fault(EManagerNotLocked));
675 TInt usedQueueSize = iUsedQueue.Count();
677 #define INC_INDEX(x) (x++, x = (x >= usedQueueSize ? 0 : x))
679 __CACHE_PRINT2(_L("CACHEMAN: StealCacheLine, iNextCacheLineToSteal %d used count %d"), iNextCacheLineToSteal, iUsedQueue.Count());
681 TInt iInitIndex = iNextCacheLineToSteal;
683 // Loop through all used cachelines, starting at the last stolen
684 // cacheline index + 1 and ending when we find a suitable cacheline
685 // to steal or we have looped back to the start
686 for (INC_INDEX(iNextCacheLineToSteal);
687 iNextCacheLineToSteal != iInitIndex;
688 INC_INDEX(iNextCacheLineToSteal))
690 TCacheLine& cacheLine = *iUsedQueue[iNextCacheLineToSteal];
691 if (cacheLine.iLockCount == 0 && cacheLine.iClient != aClient)
693 __CACHE_PRINT3(_L("CACHEMAN: StealCacheLine, stealing %d from %08X to %08X"),
694 iNextCacheLineToSteal, cacheLine.iClient, aClient);
695 FreeCacheLine(cacheLine);
703 TBool CCacheManager::FreeCacheLine(CCacheClient* aClient, const TCacheLine& aCacheLine)
707 TCacheLine& cacheLine = const_cast<TCacheLine&>(aCacheLine);
708 __CACHELINE_INVARIANT((cacheLine));
710 // Has the cacheline been stolen ? (Assume success if it has)
711 if (cacheLine.iClient != aClient)
713 __CACHE_PRINT(_L("CCacheManager::FreeCacheLine(), Cacheline Stolen !!!!\n"));
718 // Is the cacheline locked ?
719 if (cacheLine.iLockCount > 0)
721 __CACHE_PRINT(_L("CCacheManager::FreeCacheLine(), attempt to free locked cacheline\n"));
726 FreeCacheLine(cacheLine);
732 TInt CCacheManager::LockCount(CCacheClient* aClient, const TCacheLine& aCacheLine)
734 TCacheLine& cacheLine = const_cast<TCacheLine&>(aCacheLine);
735 __CACHELINE_INVARIANT((cacheLine));
737 // cacheline stolen ?
738 if (cacheLine.iClient != aClient)
741 return cacheLine.iLockCount;
744 TInt CCacheManager::FillCount(CCacheClient* aClient, const TCacheLine& aCacheLine)
746 TCacheLine& cacheLine = const_cast<TCacheLine&>(aCacheLine);
747 __CACHELINE_INVARIANT((cacheLine));
749 // cacheline stolen ?
750 if (cacheLine.iClient != aClient)
753 return cacheLine.iFilledSegments;
756 TInt CCacheManager::DirtyCount(CCacheClient* aClient, const TCacheLine& aCacheLine)
758 TCacheLine& cacheLine = const_cast<TCacheLine&>(aCacheLine);
759 __CACHELINE_INVARIANT((cacheLine));
761 // cacheline stolen ?
762 if (cacheLine.iClient != aClient)
765 return cacheLine.iDirtySegments;
768 TBool CCacheManager::InUse(const TCacheLine& aCacheLine)
770 TCacheLine& cacheLine = const_cast<TCacheLine&>(aCacheLine);
771 __CACHELINE_INVARIANT((cacheLine));
773 // busy if lock count >= 1 and there are no dirty segments
774 // or if lock count >= 2 and there are dirty segments
775 return (cacheLine.iLockCount -
776 (cacheLine.iDirtySegments?1:0)) > 0 ? (TBool)ETrue : (TBool)EFalse;
779 void CCacheManager::SetFilled(CCacheClient* aClient, const TCacheLine& aCacheLine, TInt aSegmentCount)
781 TCacheLine& cacheLine = const_cast<TCacheLine&>(aCacheLine);
782 __CACHELINE_INVARIANT((cacheLine));
783 __ASSERT_DEBUG(aSegmentCount <= iSegmentsPerCacheLine, Fault(EInvalidSegmentCount));
784 __ASSERT_DEBUG(aSegmentCount <= cacheLine.iAllocatedSegments , Fault(EInvalidDirtyCount));
785 __ASSERT_DEBUG(cacheLine.iLockCount > 0, Fault(ESetFilledNotLocked));
786 __ASSERT_ALWAYS(cacheLine.iClient == aClient, Fault(EExtendingUnownedCacheline));
789 cacheLine.iFilledSegments = Max(cacheLine.iFilledSegments, (TUint8) aSegmentCount);
791 __ASSERT_DEBUG(cacheLine.iFilledSegments >= cacheLine.iDirtySegments , Fault(EInvalidDirtyCount));
794 void CCacheManager::SetDirty(CCacheClient* aClient, const TCacheLine& aCacheLine, TInt aSegmentCount)
796 TCacheLine& cacheLine = const_cast<TCacheLine&>(aCacheLine);
797 __CACHELINE_INVARIANT((cacheLine));
798 __ASSERT_DEBUG(aSegmentCount <= iSegmentsPerCacheLine, Fault(EInvalidSegmentCount));
799 __ASSERT_DEBUG(aSegmentCount <= cacheLine.iAllocatedSegments , Fault(EInvalidDirtyCount));
800 __ASSERT_DEBUG(cacheLine.iLockCount > 0, Fault(ESetDirtyNotLocked));
801 __ASSERT_ALWAYS(cacheLine.iClient == aClient, Fault(EExtendingUnownedCacheline));
803 // ensure that lock range is valid - we insist on dirty
804 // cachelines having all dirty pages locked up to the the last dirty page
805 __ASSERT_DEBUG(cacheLine.iLockedSegmentStart == 0, Fault(ESetDirtyInvalidLockRange));
806 __ASSERT_DEBUG(cacheLine.iLockedSegmentCount >= aSegmentCount, Fault(ESetDirtyInvalidLockRange));
808 cacheLine.iFilledSegments = Max(cacheLine.iFilledSegments, (TUint8) aSegmentCount);
809 cacheLine.iDirtySegments = Max(cacheLine.iDirtySegments, (TUint8) aSegmentCount);
811 __ASSERT_DEBUG(cacheLine.iFilledSegments >= cacheLine.iDirtySegments , Fault(EInvalidDirtyCount));
814 void CCacheManager::ClearDirty(CCacheClient* aClient, const TCacheLine& aCacheLine)
816 TCacheLine& cacheLine = const_cast<TCacheLine&>(aCacheLine);
817 __CACHELINE_INVARIANT((cacheLine));
818 __ASSERT_DEBUG(cacheLine.iLockCount > 0, Fault(EClearDirtyNotLocked));
819 __ASSERT_ALWAYS(cacheLine.iClient == aClient, Fault(EExtendingUnownedCacheline));
821 TInt dirtySegments = cacheLine.iDirtySegments;
822 cacheLine.iDirtySegments = 0;
823 SetFilled(aClient, cacheLine, dirtySegments);
824 UnlockCacheLine(aClient, cacheLine);
828 void CCacheManager::RemoveEmptySegments(CCacheClient* aClient, const TCacheLine& aCacheLine)
832 TCacheLine& cacheLine = const_cast<TCacheLine&>(aCacheLine);
833 __CACHELINE_INVARIANT((cacheLine));
835 // has the cacheline been stolen ?
836 if (cacheLine.iClient != aClient)
838 __CACHE_PRINT(_L("CCacheManager::RemoveEmptySegments((), Cacheline Stolen ! !!!\n"));
843 __ASSERT_DEBUG(cacheLine.iLockCount > 0, Fault(ERemovingEmptyUnlocked));
845 // Unlock any locked segments past the last filled segment
846 TInt filledSegmentCount = cacheLine.iFilledSegments;
847 TInt firstSegmentToUnlock;
848 TInt segmentsToUnlock;
849 #ifdef __LOCK_ENTIRE_CACHELINE__
850 firstSegmentToUnlock = filledSegmentCount;
851 segmentsToUnlock = cacheLine.iAllocatedSegments - filledSegmentCount;
853 TInt firstLockedSegment = cacheLine.iLockedSegmentStart;
854 if (firstLockedSegment <= filledSegmentCount)
856 firstSegmentToUnlock = filledSegmentCount;
857 segmentsToUnlock = firstLockedSegment + cacheLine.iLockedSegmentCount - filledSegmentCount;
861 firstSegmentToUnlock = firstLockedSegment;
862 segmentsToUnlock = cacheLine.iLockedSegmentCount;
865 TUint8* addrFirstSegmentToUnlock =
866 cacheLine.iAddr + (firstSegmentToUnlock << KSegmentSizeLog2);
867 if (segmentsToUnlock > 0)
869 Unlock(addrFirstSegmentToUnlock, segmentsToUnlock);
870 cacheLine.iLockedSegmentCount =
871 (TUint8) (cacheLine.iLockedSegmentCount - segmentsToUnlock);
874 // Decommit any segments past the last filled segment
876 cacheLine.iAddr + (filledSegmentCount << KSegmentSizeLog2),
877 cacheLine.iAllocatedSegments - filledSegmentCount);
878 cacheLine.iAllocatedSegments = (TUint8) filledSegmentCount;
884 void CCacheManager::FreeCacheLine(TCacheLine& aCacheLine)
886 __ASSERT_DEBUG(iManagerLocked, Fault(EManagerNotLocked));
887 aCacheLine.iClient = NULL;
888 __ASSERT_ALWAYS( (aCacheLine.iLockCount == 0), Fault(EFreeingLockedCacheLine));
889 __ASSERT_ALWAYS( (aCacheLine.iDirtySegments == 0), Fault(EFreeingDirtyCacheLine));
891 Decommit(aCacheLine.iAddr, aCacheLine.iAllocatedSegments);
892 aCacheLine.iAllocatedSegments = 0;
893 aCacheLine.iFilledSegments = 0;
894 aCacheLine.iLockedSegmentStart = 0;
895 aCacheLine.iLockedSegmentCount = 0;
897 // Remove from used queue
898 TInt index = iUsedQueue.FindInAddressOrder(&aCacheLine);
899 __ASSERT_ALWAYS(index != KErrNotFound, Fault(ESegmentNotFound));
900 iUsedQueue.Remove(index);
902 // put back on free queue
903 // TInt r = iFreeQueue.Append(&aCacheLine);
904 TInt r = iFreeQueue.InsertInAddressOrder(&aCacheLine);
905 __ASSERT_ALWAYS(r == KErrNone, Fault(EAppendToFreeQueueFailed));
907 __CACHE_PRINT2(_L("CACHEMAN: FreeCacheLine, iFreeQueue %d iUsedQueue %d"), iFreeQueue.Count(), iUsedQueue.Count());
911 #if defined(_DEBUG) || defined(_DEBUG_RELEASE)
912 void CCacheManager::DumpCacheLine(TCacheLine& aCacheLine)
914 RDebug::Print(_L("CACHEMAN: Cacheline client %08X addr %08X pos %d alloc %d filled %d dirty %d lock %d \tData: %02X %02X %02X %02X %02X %02X %02X %02X "),
917 I64LOW(aCacheLine.iPos),
919 aCacheLine.iAllocatedSegments,
920 aCacheLine.iFilledSegments,
921 aCacheLine.iDirtySegments,
922 aCacheLine.iLockCount,
935 void CCacheManager::DumpCache()
939 RPointerArray<CCacheClient> clients;
941 RDebug::Print(_L("**** CACHEMAN: CacheLines ****\n"));
942 TInt usedQueueSize = iUsedQueue.Count();
944 for (n=0; n<usedQueueSize; n++)
946 TCacheLine& cacheLine = *iUsedQueue[n];
947 DumpCacheLine(cacheLine);
949 clients.InsertInAddressOrder(cacheLine.iClient);
952 TInt clientCount = clients.Count();
954 for (n=0; n<clientCount; n++)
956 RDebug::Print(_L("**** CACHEMAN: CacheClient #%d ****\n"), n);
957 clients[n]->DumpCache();
969 TUint8* CCacheManager::Base()
971 return iChunk.Base();
975 TInt CCacheManager::Lock(TUint8* const aAddr, TInt aSegmentCount)
977 TInt r = iChunk.Lock(aAddr-iBase, aSegmentCount << KSegmentSizeLog2);
978 //RDebug::Print(_L("RChunk::Lock(%08X, %d) %d"), aAddr-iBase, aSegmentCount << KSegmentSizeLog2, r);
979 __ASSERT_DEBUG(r == KErrNone || r == KErrNoMemory || r == KErrNotFound, Fault(EUnexpectedLockFailure));
981 __CACHE_PRINT3(_L("CACHEMAN: LOCK %08X %d %d"), aAddr, aSegmentCount, r);
983 #ifdef __SIMULATE_LOCK_FAILURES__
984 if (SimulatedFailure(iLockFailureCount))
986 __CACHE_PRINT(_L("CACHEMAN: simulating lock failure"));
993 iLockedSegmentCount+= aSegmentCount;
997 __CACHE_PRINT(_L("CACHEMAN: LOCK FAILED"));
998 #if defined(_DEBUG) || defined(_DEBUG_RELEASE)
999 iStats.iLockFailureCount++;
1006 TInt CCacheManager::Unlock(TUint8* const aAddr, TInt aSegmentCount)
1008 TInt r = iChunk.Unlock(aAddr-iBase, aSegmentCount << KSegmentSizeLog2);
1009 //RDebug::Print(_L("RChunk::Unlock(%08X, %d) %d"), aAddr-iBase, aSegmentCount << KSegmentSizeLog2, r);
1011 __CACHE_PRINT3(_L("CACHEMAN: UNLOCK %08X %d %d"), aAddr, aSegmentCount, r);
1014 iLockedSegmentCount-= aSegmentCount;
1018 __CACHE_PRINT(_L("CACHEMAN: UNLOCK FAILED"));
1024 TInt CCacheManager::Commit(TUint8* const aAddr, TInt aSegmentCount)
1026 #ifdef __SIMULATE_LOCK_FAILURES__
1027 if (SimulatedFailure(iCommitFailureCount))
1029 __CACHE_PRINT(_L("CACHEMAN: simulating commit failure "));
1030 return KErrNoMemory;
1034 TInt r = iChunk.Commit(aAddr-iBase, aSegmentCount << KSegmentSizeLog2);
1035 //RDebug::Print(_L("RChunk::Commit(%08X, %d) %d, "), aAddr-iBase, aSegmentCount << KSegmentSizeLog2, r);
1037 __CACHE_PRINT3(_L("CACHEMAN: COMMIT: %08X %d %d, "), aAddr, aSegmentCount, r);
1040 iLockedSegmentCount+= aSegmentCount;
1041 #if defined(_DEBUG) || defined(_DEBUG_RELEASE)
1042 iStats.iAllocatedSegmentCount+= aSegmentCount;
1047 __CACHE_PRINT(_L("CACHEMAN: COMMIT FAILED"));
1048 #if defined(_DEBUG) || defined(_DEBUG_RELEASE)
1049 iStats.iCommitFailureCount++;
1054 __ASSERT_DEBUG(r == KErrNone || r == KErrNoMemory, Fault(EUnexpectedCommitFailure));
1059 TInt CCacheManager::Decommit(TUint8* const aAddr, TInt aSegmentCount)
1061 TInt r = iChunk.Decommit(aAddr-iBase, aSegmentCount << KSegmentSizeLog2);
1062 //RDebug::Print(_L("RChunk::Decommit(%08X, %d), %d"), aAddr-iBase, aSegmentCount << KSegmentSizeLog2, r);
1064 __CACHE_PRINT3(_L("CACHEMAN: DECOMMIT: %08X %d %d, "), aAddr, aSegmentCount, r);
1067 #if defined(_DEBUG) || defined(_DEBUG_RELEASE)
1068 iStats.iAllocatedSegmentCount-= aSegmentCount;
1073 __CACHE_PRINT(_L("CACHEMAN: DECOMMIT FAILED"));
1079 #if defined(_DEBUG) || defined(_DEBUG_RELEASE)
1081 void CCacheManager::SimulateLockFailureMode(TBool aEnable)
1083 iSimulateLockFailureMode = aEnable;
1084 __CACHE_PRINT1(_L("CACHEMAN: SimulateLockFailureMode: %d, "), iSimulateLockFailureMode);
1087 void CCacheManager::AllocateMaxSegments(TBool aEnable)
1089 iAllocateMaxSegments = aEnable;
1090 __CACHE_PRINT1(_L("CACHEMAN: iAllocateMaxSegments: %d, "), iAllocateMaxSegments);
1093 TBool CCacheManager::AllocateMaxSegments()
1095 return iAllocateMaxSegments;
1098 void CCacheManager::SimulateWriteFailure()
1100 iSimulateWriteFailure = ETrue;
1103 TBool CCacheManager::SimulateWriteFailureEnabled()
1105 TBool b = iSimulateWriteFailure;
1106 iSimulateWriteFailure = EFalse;
1110 TBool CCacheManager::SimulatedFailure(TInt& aFailureCount)
1112 #ifdef __SIMULATE_LOCK_FAILURES__
1113 if (iSimulateLockFailureMode)
1115 #if defined (__RAMDOM_LOCK_FAILURES__)
1116 static TInt FailCount = 15;
1117 if (++aFailureCount >= FailCount)
1118 #elif defined (__PSEUDO_RANDOM_FAILURES__)
1119 const TInt FailCounts[] = {15,2,0,21,1,12,24};
1120 const TInt FailCountSize = sizeof(FailCounts) / sizeof(TInt);
1121 static TInt FailCountIndex = 0;
1122 if (++aFailureCount >= FailCounts[FailCountIndex])
1124 const TInt KFailCount = 15;
1125 if (++aFailureCount >= KFailCount)
1129 #if defined (__RAMDOM_LOCK_FAILURES__)
1130 FailCount = Math::Random() & 0x1F;
1131 __CACHE_PRINT1(_L("FailCount %d"), FailCount);
1134 #if defined (__PSEUDO_RANDOM_FAILURES__)
1135 FailCountIndex = (FailCountIndex +1) % FailCountSize ;
1136 __CACHE_PRINT1(_L("FailCount %d"), FailCounts[FailCountIndex]);
1145 #endif // defined(_DEBUG) || defined(_DEBUG_RELEASE)
1147 //************************************
1148 // TGlobalFileCacheSettings
1149 //************************************
1151 //TGlobalCacheFlags TGlobalFileCacheSettings::iFlags = TGlobalCacheFlags(ECacheEnabled);
1152 TInt32 TGlobalFileCacheSettings::iCacheSize = KDefaultGlobalCacheSize << KByteToByteShift;
1153 TInt32 TGlobalFileCacheSettings::iMaxLockedSize = KDefaultGlobalCacheMaxLockedSize << KByteToByteShift;
1154 TInt32 TGlobalFileCacheSettings::iLowMemoryThreshold = KDefaultLowMemoryThreshold;
1155 TBool TGlobalFileCacheSettings::iEnabled = KDefaultGlobalCacheEnabled;
1157 _LIT8(KLitSectionNameFileCache,"FileCache");
1159 void TGlobalFileCacheSettings::ReadPropertiesFile()
1161 F32Properties::GetBool(KLitSectionNameFileCache, _L8("GlobalCacheEnabled"), iEnabled);
1163 // Get size of cache in kilobytes
1164 TInt32 cacheSizeInKBytes;
1165 if (F32Properties::GetInt(KLitSectionNameFileCache, _L8("GlobalCacheSize"), cacheSizeInKBytes))
1166 iCacheSize = cacheSizeInKBytes << KByteToByteShift;
1168 // Get maximum amount of locked data i.e data unavailable for paging
1169 TInt32 maxLockedSize;
1170 if (F32Properties::GetInt(KLitSectionNameFileCache, _L8("GlobalCacheMaxLockedSize"), maxLockedSize))
1171 iMaxLockedSize = maxLockedSize << KByteToByteShift;
1173 // Get low memory threshold
1174 TInt32 lowMemoryThreshold;
1175 if (F32Properties::GetInt(KLitSectionNameFileCache, _L8("LowMemoryThreshold"), lowMemoryThreshold))
1176 iLowMemoryThreshold = lowMemoryThreshold;
1181 TBool TGlobalFileCacheSettings::Enabled()
1186 TInt TGlobalFileCacheSettings::CacheSize()
1190 TInt TGlobalFileCacheSettings::MaxLockedSize()
1192 return iMaxLockedSize;
1195 TInt TGlobalFileCacheSettings::LowMemoryThreshold()
1197 return iLowMemoryThreshold;