os/kernelhwsrv/kerneltest/e32test/mmu/d_cache.cpp
author sl
Tue, 10 Jun 2014 14:32:02 +0200
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
sl@0
     1
// Copyright (c) 2006-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
// e32test\debug\d_cache.cpp
sl@0
    15
// See e32test\mmu\t_cache.cpp for details
sl@0
    16
// 
sl@0
    17
//
sl@0
    18
sl@0
    19
#include "d_cache.h"
sl@0
    20
#include <kernel/kern_priv.h>
sl@0
    21
#include <kernel/cache.h>
sl@0
    22
sl@0
    23
extern TUint32 GetCacheType();
sl@0
    24
extern void TestCodeFunc();
sl@0
    25
extern TInt TestCodeFuncSize();
sl@0
    26
extern void DataSegmetTestFunct(void* aBase, TInt aSize);
sl@0
    27
sl@0
    28
#ifdef __XSCALE_L2_CACHE__
sl@0
    29
extern TUint32 L2CacheTypeReg();
sl@0
    30
#endif
sl@0
    31
sl@0
    32
#if defined(__CPU_MEMORY_TYPE_REMAPPING)
sl@0
    33
extern TUint32 CtrlRegister();
sl@0
    34
extern TUint32 PRRRRegister();
sl@0
    35
extern TUint32 NRRRRegister();
sl@0
    36
extern void SetPRRR(TUint32);
sl@0
    37
extern void SetNRRR(TUint32);
sl@0
    38
#endif
sl@0
    39
sl@0
    40
sl@0
    41
typedef void(CodeTest) ();
sl@0
    42
sl@0
    43
class DCacheTest : public DLogicalChannelBase
sl@0
    44
	{
sl@0
    45
public:
sl@0
    46
	DCacheTest();
sl@0
    47
	~DCacheTest();
sl@0
    48
protected:
sl@0
    49
	virtual TInt DoCreate(TInt aUnit, const TDesC8* anInfo, const TVersion& aVer);
sl@0
    50
	virtual TInt Request(TInt aFunction, TAny* a1, TAny* a2);
sl@0
    51
private:
sl@0
    52
	TInt GetCacheInfo(TAny* a1);
sl@0
    53
	TInt TestDataChunk(TAny* a1);
sl@0
    54
	TInt TestCodeChunk(TAny* a1);
sl@0
    55
	TInt TestWriteBackMode(TAny* a1, TBool aWriteAlloc);
sl@0
    56
	TInt TestL2Maintenance();
sl@0
    57
	TInt GetThreshold(TAny* a1);
sl@0
    58
	TInt SetThreshold(TAny* a1);
sl@0
    59
	TInt TestUseCase(TAny* a1);
sl@0
    60
	void LoopTestCodeFunc(CodeTest* f);
sl@0
    61
sl@0
    62
sl@0
    63
	void GetExternalCacheInfo(RCacheTestDevice::TCacheInfo& info);
sl@0
    64
sl@0
    65
	void CheckRemapping(RCacheTestDevice::TCacheInfo& info);
sl@0
    66
	void Remap(RCacheTestDevice::TCacheAttr aCacheAttr);
sl@0
    67
sl@0
    68
	TInt UseCase_ReadFromChunk(RCacheTestDevice::TChunkTest& info);
sl@0
    69
	TInt UseCase_ReadFromChunk_ReadFromHeap(RCacheTestDevice::TChunkTest& info);
sl@0
    70
	TInt UseCase_WriteToChunk(RCacheTestDevice::TChunkTest& info);
sl@0
    71
	TInt UseCase_WriteToChunk_ReadFromHeap(RCacheTestDevice::TChunkTest& info);
sl@0
    72
sl@0
    73
sl@0
    74
	//Phys. memory and shared chunk alloc/dealloc primitives
sl@0
    75
	TInt AllocPhysicalRam(TInt aSize);
sl@0
    76
	void FreePhysicalRam();
sl@0
    77
	TInt CreateSharedChunk(TInt aMapAttr, TUint32& aActualMapAttr);
sl@0
    78
	void CloseSharedChunk();
sl@0
    79
sl@0
    80
private:
sl@0
    81
	DChunk* 	iSharedChunk;	// Shared chunk used in the test
sl@0
    82
	TPhysAddr 	iPhysAddr;		// Physical address of the allocated memory assigned to the chunk
sl@0
    83
	TUint 		iSize;			// The size of the allocated memory.
sl@0
    84
	TLinAddr 	iChunkBase;		// Base linear address of the shared chunk.
sl@0
    85
sl@0
    86
	TInt* iHeap1;
sl@0
    87
	TInt* iHeap2;
sl@0
    88
	TUint32 iDummy;
sl@0
    89
	};
sl@0
    90
sl@0
    91
DCacheTest* CacheTestDriver;
sl@0
    92
sl@0
    93
DCacheTest::DCacheTest() 	{}
sl@0
    94
sl@0
    95
DCacheTest::~DCacheTest()	{CacheTestDriver = NULL;}
sl@0
    96
sl@0
    97
/**Creates the channel*/
sl@0
    98
TInt DCacheTest::DoCreate(TInt /*aUnit*/, const TDesC8* /*anInfo*/, const TVersion& /*aVer*/) {return KErrNone;}
sl@0
    99
sl@0
   100
/** Allocates physical memory and sets iPhysAddr & iSize accordingly.*/
sl@0
   101
TInt DCacheTest::AllocPhysicalRam(TInt aSize)
sl@0
   102
	{
sl@0
   103
	iSize = aSize;
sl@0
   104
	NKern::ThreadEnterCS();
sl@0
   105
	TInt r = Epoc::AllocPhysicalRam(aSize, iPhysAddr, 0); //Allocate physical RAM. This will set iPhysAddr
sl@0
   106
	NKern::ThreadLeaveCS();
sl@0
   107
	return r;
sl@0
   108
	}
sl@0
   109
sl@0
   110
/** Frees physical memory.*/
sl@0
   111
void DCacheTest::FreePhysicalRam()
sl@0
   112
	{
sl@0
   113
	NKern::ThreadEnterCS();
sl@0
   114
	Epoc::FreePhysicalRam(iPhysAddr, iSize);
sl@0
   115
	NKern::ThreadLeaveCS();
sl@0
   116
	}
sl@0
   117
sl@0
   118
/**
sl@0
   119
Creates shared chunks with allocated physical memory and sets iChunkBase accordingly.
sl@0
   120
@pre Physical memory is allocated (iPhysAddr & iSize are set accordingly).
sl@0
   121
*/
sl@0
   122
TInt DCacheTest::CreateSharedChunk(TInt aMapAttr, TUint32& aActualMapAttr)
sl@0
   123
	{
sl@0
   124
	TInt r;
sl@0
   125
    TChunkCreateInfo chunkInfo;
sl@0
   126
    chunkInfo.iType         = TChunkCreateInfo::ESharedKernelSingle;
sl@0
   127
    chunkInfo.iMaxSize      = iSize;
sl@0
   128
    chunkInfo.iMapAttr      = aMapAttr;
sl@0
   129
    chunkInfo.iOwnsMemory   = EFalse;
sl@0
   130
    chunkInfo.iDestroyedDfc = NULL;
sl@0
   131
sl@0
   132
	NKern::ThreadEnterCS();
sl@0
   133
    if (KErrNone != (r = Kern::ChunkCreate(chunkInfo, iSharedChunk, iChunkBase, aActualMapAttr)))
sl@0
   134
		{
sl@0
   135
		FreePhysicalRam();
sl@0
   136
		NKern::ThreadLeaveCS();
sl@0
   137
		return r;
sl@0
   138
		}
sl@0
   139
	r = Kern::ChunkCommitPhysical(iSharedChunk,0,iSize, iPhysAddr);
sl@0
   140
    if(r!=KErrNone)
sl@0
   141
        {
sl@0
   142
		CloseSharedChunk();
sl@0
   143
		FreePhysicalRam();
sl@0
   144
		NKern::ThreadLeaveCS();
sl@0
   145
		return r;
sl@0
   146
		}
sl@0
   147
	NKern::ThreadLeaveCS();
sl@0
   148
	return KErrNone;
sl@0
   149
	}
sl@0
   150
sl@0
   151
/** Closes shared chunk.*/
sl@0
   152
void DCacheTest::CloseSharedChunk()
sl@0
   153
	{
sl@0
   154
	NKern::ThreadEnterCS();
sl@0
   155
	Kern::ChunkClose(iSharedChunk);
sl@0
   156
	Kern::HalFunction(EHalGroupKernel, EKernelHalSupervisorBarrier, 0, 0);	// make sure async close has happened
sl@0
   157
	NKern::ThreadLeaveCS();
sl@0
   158
	}
sl@0
   159
sl@0
   160
sl@0
   161
#if defined(__CPU_ARMV7)
sl@0
   162
extern TUint32 CacheTypeRegister();
sl@0
   163
extern TUint32 CacheLevelIDRegister();
sl@0
   164
extern TUint32 CacheSizeIdRegister(TUint32 aType/*0-1*/, TUint32 aLevel/*0-7*/);
sl@0
   165
sl@0
   166
void ParseCacheLevelInfo(TInt aCacheSizeIDReg, RCacheTestDevice::TCacheSingle& aCS)
sl@0
   167
	{
sl@0
   168
	aCS.iSets = ((aCacheSizeIDReg>>13)& 0x7fff)+1;
sl@0
   169
	aCS.iWays =   ((aCacheSizeIDReg>>3)& 0x3ff)+1;
sl@0
   170
	aCS.iLineSize =1<<((aCacheSizeIDReg & 0x7)+4);//+2 (and +2 as we count in bytes)
sl@0
   171
	aCS.iSize = aCS.iSets * aCS.iWays * aCS.iLineSize;
sl@0
   172
	}
sl@0
   173
#endif
sl@0
   174
sl@0
   175
sl@0
   176
void AppendTo(TDes8& aDes, const char* aFmt, ...)
sl@0
   177
	{
sl@0
   178
	VA_LIST list;
sl@0
   179
	VA_START(list,aFmt);
sl@0
   180
	Kern::AppendFormat(aDes,aFmt,list);
sl@0
   181
	}
sl@0
   182
sl@0
   183
/** Checks Memory Remap settings (both memory type and access permission remapping).*/
sl@0
   184
void DCacheTest::CheckRemapping(RCacheTestDevice::TCacheInfo& info)
sl@0
   185
	{
sl@0
   186
#if defined(__CPU_MEMORY_TYPE_REMAPPING)
sl@0
   187
	TUint32 cr = CtrlRegister();
sl@0
   188
	TUint32 prrr =PRRRRegister();
sl@0
   189
	TUint32 nrrr =NRRRRegister();
sl@0
   190
	AppendTo(info.iDesc,"Memory Remapping: CtrlReg:%xH, PRRR:%xH NRRR:%xH\n", cr, prrr, nrrr);
sl@0
   191
sl@0
   192
	if ( (cr&0x30000000) == 0x30000000)
sl@0
   193
		info.iMemoryRemapping = 1;
sl@0
   194
	else
sl@0
   195
		AppendTo(info.iDesc,"Error:Memory Remapping is OFF \n");
sl@0
   196
#endif
sl@0
   197
	}
sl@0
   198
sl@0
   199
//Remaps aCacheAttr memory type into EMemAttKernelInternal4
sl@0
   200
void DCacheTest::Remap(RCacheTestDevice::TCacheAttr aCacheAttr)
sl@0
   201
	{
sl@0
   202
#if defined(__CPU_MEMORY_TYPE_REMAPPING)
sl@0
   203
	TInt inner, outer;
sl@0
   204
	switch(aCacheAttr)
sl@0
   205
		{
sl@0
   206
		case RCacheTestDevice::E_InnerWT_Remapped: 	inner=2;outer=0;break;
sl@0
   207
		case RCacheTestDevice::E_InnerWBRA_Remapped:inner=3;outer=0;break;
sl@0
   208
		case RCacheTestDevice::E_InnerWB_Remapped:	inner=1;outer=0;break;
sl@0
   209
		case RCacheTestDevice::E_OuterWT_Remapped:	inner=0;outer=2;break;
sl@0
   210
		case RCacheTestDevice::E_OuterWBRA_Remapped:inner=0;outer=3;break;
sl@0
   211
		case RCacheTestDevice::E_OuterWB_Remapped:	inner=0;outer=1;break;
sl@0
   212
		case RCacheTestDevice::E_InOutWT_Remapped:	inner=2;outer=2;break;
sl@0
   213
		case RCacheTestDevice::E_InOutWBRA_Remapped:inner=3;outer=3;break;
sl@0
   214
		case RCacheTestDevice::E_InOutWB_Remapped:	inner=1;outer=1;break;
sl@0
   215
		default:Kern::PanicCurrentThread(_L("d_cache driver error"),0);return;
sl@0
   216
		}
sl@0
   217
sl@0
   218
	TUint32 prrr =PRRRRegister();
sl@0
   219
	TUint32 nrrr =NRRRRegister();
sl@0
   220
	prrr &= ~(3<<8);	// Clear EMemAttKernelInternal4 setting for memory type
sl@0
   221
	nrrr &= ~(3<<8); 	// Clear EMemAttKernelInternal4 setting for normal memory type, inner cache
sl@0
   222
	nrrr &= ~(3<<24);	// Clear EMemAttKernelInternal4 setting for normal memory type, outer cache
sl@0
   223
	prrr |= 2 <<8; 		// Set EMemAttKernelInternal4 as normal memory
sl@0
   224
	nrrr |= inner <<8;	// Set inner cache for EMemAttKernelInternal4 
sl@0
   225
	nrrr |= outer << 24;// Set outer cache for EMemAttKernelInternal4 
sl@0
   226
sl@0
   227
	SetPRRR(prrr);
sl@0
   228
	SetNRRR(nrrr);
sl@0
   229
#endif
sl@0
   230
	}
sl@0
   231
sl@0
   232
sl@0
   233
sl@0
   234
/** Fills in info structure with external cache parameters. */
sl@0
   235
void DCacheTest::GetExternalCacheInfo(RCacheTestDevice::TCacheInfo& info)
sl@0
   236
	{
sl@0
   237
#if defined(__HAS_EXTERNAL_CACHE__)
sl@0
   238
	info.iOuterCache=1;
sl@0
   239
sl@0
   240
#if defined(__ARM_L210_CACHE__)
sl@0
   241
	AppendTo(info.iDesc,"Built as L210 Cache;\n");
sl@0
   242
#elif defined(__ARM_L220_CACHE__)
sl@0
   243
	AppendTo(info.iDesc,"Built as L220 Cache:\n");
sl@0
   244
#elif defined(__ARM_PL310_CACHE__)
sl@0
   245
	AppendTo(info.iDesc,"Built as PL310 Cache:\n");
sl@0
   246
#endif
sl@0
   247
sl@0
   248
	TInt cacheController = Kern::SuperPage().iArmL2CacheBase;
sl@0
   249
	if (!cacheController)
sl@0
   250
		{
sl@0
   251
		AppendTo(info.iDesc,"Warning:No CCB Address in Super Page?\n");
sl@0
   252
		return;
sl@0
   253
		}
sl@0
   254
		
sl@0
   255
	TInt rawData = *(TInt*)(cacheController);   //reg 0 in controller is Cache ID Register
sl@0
   256
	AppendTo(info.iDesc,"L2 ID Reg:%xH\n", rawData);
sl@0
   257
sl@0
   258
	rawData = *(TInt*)(cacheController+4); //reg 4 in controller is Cache Type Register
sl@0
   259
	AppendTo(info.iDesc,"L2 Type Reg:%xH\n", rawData);
sl@0
   260
sl@0
   261
	RCacheTestDevice::TCacheSingle& cs = info.iCache[info.iCacheCount];
sl@0
   262
sl@0
   263
	cs.iLineSize=32; //always
sl@0
   264
#if defined(__ARM_L210_CACHE__) || defined(__ARM_L220_CACHE__)
sl@0
   265
	cs.iWays = (rawData>>3)&0x0f;	if (cs.iWays > 8) cs.iWays = 8;
sl@0
   266
#elif defined(__ARM_PL310_CACHE__)
sl@0
   267
	cs.iWays = (rawData&0x40) ? 16:8;
sl@0
   268
#endif
sl@0
   269
	TInt waySize;
sl@0
   270
	switch((rawData>>8)&7)
sl@0
   271
		{
sl@0
   272
		case 0:		waySize = 0x4000;  break;
sl@0
   273
		case 1:		waySize = 0x4000;  break;
sl@0
   274
		case 2:		waySize = 0x8000;  break;
sl@0
   275
		case 3:		waySize = 0x10000; break;
sl@0
   276
		case 4:		waySize = 0x20000; break;
sl@0
   277
#if defined(__ARM_L210_CACHE__) || defined(__ARM_L220_CACHE__)
sl@0
   278
		default:	waySize = 0x40000; break;
sl@0
   279
#elif defined(__ARM_PL310_CACHE__)
sl@0
   280
		case 5:		waySize = 0x40000; break;
sl@0
   281
		default:	waySize = 0x80000; break;
sl@0
   282
#endif
sl@0
   283
		}
sl@0
   284
	cs.iSize = waySize * cs.iWays;
sl@0
   285
	cs.iSets = waySize >> 5; // = waySize / lineLen 
sl@0
   286
sl@0
   287
sl@0
   288
	cs.iLevel = 2;
sl@0
   289
	cs.iCode = 1;
sl@0
   290
	cs.iData = 1;
sl@0
   291
	cs.iDesc.SetLength(0);
sl@0
   292
	AppendTo(cs.iDesc,"Outer Unified PAPT");
sl@0
   293
sl@0
   294
	info.iMaxCacheSize = Max(info.iMaxCacheSize, cs.iSize);
sl@0
   295
	info.iCacheCount++;
sl@0
   296
#endif //defined(__HAS_EXTERNAL_CACHE__)
sl@0
   297
	}
sl@0
   298
sl@0
   299
sl@0
   300
/** Passes cache configuration parameters to the user side*/
sl@0
   301
TInt DCacheTest::GetCacheInfo(TAny* a1)
sl@0
   302
	{
sl@0
   303
	TInt ret = KErrNone;
sl@0
   304
	RCacheTestDevice::TCacheInfo info;
sl@0
   305
sl@0
   306
	info.iDesc.SetLength(0);
sl@0
   307
	info.iCacheCount=0;
sl@0
   308
	info.iMaxCacheSize=0;
sl@0
   309
	info.iMemoryRemapping=0;
sl@0
   310
	info.iOuterCache=0;
sl@0
   311
sl@0
   312
////////////////////////
sl@0
   313
#if defined(__CPU_ARMV7)
sl@0
   314
////////////////////////
sl@0
   315
	info.iOuterCache=1;
sl@0
   316
sl@0
   317
	TUint32 ctr=CacheTypeRegister();
sl@0
   318
	TUint32 clr=CacheLevelIDRegister();
sl@0
   319
	TInt LoC = (clr>>24)&7;	//The number of levels to be purged/clean to Point-to-Coherency
sl@0
   320
	TInt LoU = (clr>>27)&7;	//The number of levels to be purged/clean to Point-to-Unification
sl@0
   321
	AppendTo(info.iDesc,"ARMv7 cache - CTR:%xH CLR:%xH LoC:%d LoU:%d\n", ctr, clr, LoC, LoU);
sl@0
   322
	
sl@0
   323
	RCacheTestDevice::TCacheSingle* cs = &info.iCache[info.iCacheCount];
sl@0
   324
	TInt level;
sl@0
   325
	for (level=0;level<LoC;level++)
sl@0
   326
		{
sl@0
   327
		TInt type = (clr >> (level*3)) & 7; //000:NoCache 001:ICache 010:DCache 011:Both 100:Unified
sl@0
   328
		
sl@0
   329
		if (type==0)		// No Cache. Also no cache below this level
sl@0
   330
			break;
sl@0
   331
		
sl@0
   332
		if(type & 1) 	// Instruction Cache
sl@0
   333
			{
sl@0
   334
			TInt csr = CacheSizeIdRegister(1,level);
sl@0
   335
			ParseCacheLevelInfo(csr, *cs);
sl@0
   336
			cs->iLevel = level+1;
sl@0
   337
			cs->iCode = 1;
sl@0
   338
			cs->iData = 0;
sl@0
   339
			AppendTo(cs->iDesc,"ICache CSR:%xH",csr);
sl@0
   340
			info.iMaxCacheSize = Max(info.iMaxCacheSize, cs->iSize);
sl@0
   341
			cs = &info.iCache[++info.iCacheCount];
sl@0
   342
			}
sl@0
   343
			
sl@0
   344
		if(type & 2) 	// Data Cache
sl@0
   345
			{
sl@0
   346
			TInt csr = CacheSizeIdRegister(0,level);
sl@0
   347
			ParseCacheLevelInfo(csr, *cs);
sl@0
   348
			cs->iLevel = level+1;
sl@0
   349
			cs->iCode = 0;
sl@0
   350
			cs->iData = 1;
sl@0
   351
			AppendTo(cs->iDesc,"DCache CSR:%xH",csr);
sl@0
   352
			info.iMaxCacheSize = Max(info.iMaxCacheSize, cs->iSize);
sl@0
   353
			cs = &info.iCache[++info.iCacheCount];
sl@0
   354
			}
sl@0
   355
sl@0
   356
		if(type & 4) 	// Unified Cache
sl@0
   357
			{
sl@0
   358
			TInt csr = CacheSizeIdRegister(0,level);
sl@0
   359
			ParseCacheLevelInfo(csr, *cs);
sl@0
   360
			cs->iLevel = level+1;
sl@0
   361
			cs->iCode = 1;
sl@0
   362
			cs->iData = 1;
sl@0
   363
			AppendTo(cs->iDesc,"Unified CSR:%xH",csr);
sl@0
   364
			info.iMaxCacheSize = Max(info.iMaxCacheSize, cs->iSize);
sl@0
   365
			cs = &info.iCache[++info.iCacheCount];
sl@0
   366
			}
sl@0
   367
		}
sl@0
   368
sl@0
   369
///////////////////////////////////
sl@0
   370
#elif defined(__CPU_HAS_CACHE_TYPE_REGISTER)
sl@0
   371
///////////////////////////////////
sl@0
   372
sl@0
   373
	TInt rawData=GetCacheType();
sl@0
   374
	TInt splitCache=rawData&0x01000000;
sl@0
   375
	AppendTo(info.iDesc,"L1 Cache TypeReg=%xH\n", rawData);
sl@0
   376
sl@0
   377
	//Cache #1	
sl@0
   378
	TUint32 s=(rawData>>12)&0xfff;  		//s = P[11]:0:size[9:5]:assoc[5:3]:M[2]:len[1:0] 
sl@0
   379
	info.iCache[info.iCacheCount].iLineSize = 1 << ((s&2) + 3); 							//1<<(len+3)
sl@0
   380
	info.iCache[info.iCacheCount].iWays = (2 + ((s>>2)&1)) << (((s>>3)&0x7) - 1);			//(2+M) << (assoc-1)
sl@0
   381
	info.iCache[info.iCacheCount].iSize = (2 + ((s>>2)&1)) << (((s>>6)&0xf) + 8);			//(2+M) << (size+8)
sl@0
   382
	info.iCache[info.iCacheCount].iSets = 1 << (((s>>6)&0xf) + 6 - ((s>>3)&0x7) - (s&2));	//(2+M) <<(size + 6 -assoc - len)
sl@0
   383
	info.iCache[info.iCacheCount].iData = 1;
sl@0
   384
	info.iCache[info.iCacheCount].iLevel = 1;
sl@0
   385
sl@0
   386
	if (splitCache)
sl@0
   387
		{
sl@0
   388
		info.iCache[info.iCacheCount].iCode = 0;
sl@0
   389
		info.iCache[info.iCacheCount].iDesc.SetLength(0);
sl@0
   390
		AppendTo(info.iCache[info.iCacheCount].iDesc,"Inner DCache");
sl@0
   391
sl@0
   392
		#if defined(__CPU_ARMV6)
sl@0
   393
		AppendTo(info.iCache[info.iCacheCount].iDesc," VAPT");
sl@0
   394
		#else
sl@0
   395
		AppendTo(info.iCache[info.iCacheCount].iDesc," VAVT");
sl@0
   396
		#endif		
sl@0
   397
		info.iMaxCacheSize = Max(info.iMaxCacheSize, info.iCache[info.iCacheCount].iSize);
sl@0
   398
		info.iCacheCount++;
sl@0
   399
sl@0
   400
		// Cache #2
sl@0
   401
		s=rawData&0xfff;  		//s = P[11]:0:size[9:5]:assoc[5:3]:M[2]:len[1:0] 
sl@0
   402
		info.iCache[info.iCacheCount].iLineSize = 1 << ((s&2) + 3); 							//1<<(len+3)
sl@0
   403
		info.iCache[info.iCacheCount].iWays = (2 + ((s>>2)&1)) << (((s>>3)&0x7) - 1);			//(2+M) << (assoc-1)
sl@0
   404
		info.iCache[info.iCacheCount].iSize = (2 + ((s>>2)&1)) << (((s>>6)&0xf) + 8);			//(2+M) << (size+8)
sl@0
   405
		info.iCache[info.iCacheCount].iSets = 1 << (((s>>6)&0xf) + 6 - ((s>>3)&0x7) - (s&2));	//(2+M) <<(size + 6 -assoc - len)
sl@0
   406
		info.iCache[info.iCacheCount].iLevel = 1;
sl@0
   407
		info.iCache[info.iCacheCount].iCode = 1;
sl@0
   408
		info.iCache[info.iCacheCount].iData = 0;
sl@0
   409
		info.iCache[info.iCacheCount].iDesc.SetLength(0);
sl@0
   410
		AppendTo(info.iCache[info.iCacheCount].iDesc,"Inner ICache");
sl@0
   411
		#if defined(__CPU_ARMV6)
sl@0
   412
		AppendTo(info.iCache[info.iCacheCount].iDesc," VAPT");
sl@0
   413
		#else
sl@0
   414
		AppendTo(info.iCache[info.iCacheCount].iDesc," VAVT");
sl@0
   415
		#endif		
sl@0
   416
		}
sl@0
   417
	else
sl@0
   418
	{
sl@0
   419
		info.iCache[info.iCacheCount].iCode = 1;
sl@0
   420
		info.iCache[info.iCacheCount].iDesc.SetLength(0);
sl@0
   421
		AppendTo(info.iCache[info.iCacheCount].iDesc,"Inner Unified");
sl@0
   422
		#if defined(__CPU_ARMV6)
sl@0
   423
		AppendTo(info.iCache[info.iCacheCount].iDesc," VAPT");
sl@0
   424
		#else
sl@0
   425
		AppendTo(info.iCache[info.iCacheCount].iDesc," VAVT");
sl@0
   426
		#endif		
sl@0
   427
	}		
sl@0
   428
	info.iMaxCacheSize = Max(info.iMaxCacheSize, info.iCache[info.iCacheCount].iSize);
sl@0
   429
	info.iCacheCount++;
sl@0
   430
sl@0
   431
/////
sl@0
   432
#else
sl@0
   433
/////
sl@0
   434
sl@0
   435
	ret = KErrNotSupported;
sl@0
   436
sl@0
   437
#endif
sl@0
   438
sl@0
   439
	GetExternalCacheInfo(info); // Get ARMl210/20 info
sl@0
   440
	CheckRemapping(info);		// Get memory remapping info
sl@0
   441
sl@0
   442
	info.iDmaBufferAlignment = Cache::DmaBufferAlignment();
sl@0
   443
	kumemput(a1,&info,sizeof(info));
sl@0
   444
	return ret;
sl@0
   445
	}
sl@0
   446
sl@0
   447
/** Get cache thresholds.*/
sl@0
   448
TInt DCacheTest::GetThreshold(TAny* a1)
sl@0
   449
	{
sl@0
   450
	RCacheTestDevice::TThresholdInfo info;
sl@0
   451
	kumemget(&info,a1,sizeof(info));
sl@0
   452
sl@0
   453
	TCacheThresholds thresholds;
sl@0
   454
	TInt r = Cache::GetThresholds(thresholds, info.iCacheType);
sl@0
   455
	if (r==KErrNone)
sl@0
   456
		{
sl@0
   457
		info.iPurge = thresholds.iPurge;	
sl@0
   458
		info.iClean = thresholds.iClean;	
sl@0
   459
		info.iFlush = thresholds.iFlush;	
sl@0
   460
		kumemput(a1,&info,sizeof(info));
sl@0
   461
		}
sl@0
   462
	return r;
sl@0
   463
	}
sl@0
   464
sl@0
   465
/** Set cache thresholds.*/
sl@0
   466
TInt DCacheTest::SetThreshold(TAny* a1)
sl@0
   467
	{
sl@0
   468
	RCacheTestDevice::TThresholdInfo info;
sl@0
   469
	kumemget(&info,a1,sizeof(info));
sl@0
   470
sl@0
   471
	TCacheThresholds thresholds;
sl@0
   472
	thresholds.iPurge = info.iPurge;
sl@0
   473
	thresholds.iClean = info.iClean;
sl@0
   474
	thresholds.iFlush = info.iFlush;
sl@0
   475
	return Cache::SetThresholds(thresholds, info.iCacheType);
sl@0
   476
	}
sl@0
   477
sl@0
   478
// Runs DataSegmetTestFunct against data from a chunk.
sl@0
   479
// Chunk cache attributes and its size are specified in input arguments.
sl@0
   480
// Measures and returns the time spent.
sl@0
   481
TInt DCacheTest::TestDataChunk(TAny* a1)
sl@0
   482
	{
sl@0
   483
	TInt r = KErrNone;
sl@0
   484
	TInt time;
sl@0
   485
	
sl@0
   486
	RCacheTestDevice::TChunkTest info;
sl@0
   487
	kumemget(&info,a1,sizeof(info));
sl@0
   488
sl@0
   489
sl@0
   490
	TUint32 chunkAttr = EMapAttrSupRw;
sl@0
   491
	if (info.iShared) chunkAttr |= EMapAttrShared;
sl@0
   492
#ifdef __SMP__
sl@0
   493
	TUint32 force_shared = EMapAttrShared;
sl@0
   494
#else
sl@0
   495
	TUint32 force_shared = 0;
sl@0
   496
#endif
sl@0
   497
sl@0
   498
	switch (info.iCacheAttr)
sl@0
   499
		{
sl@0
   500
sl@0
   501
		case RCacheTestDevice::E_FullyBlocking:	chunkAttr |= EMapAttrFullyBlocking; break;
sl@0
   502
		case RCacheTestDevice::E_Buffered_NC:	chunkAttr |= EMapAttrBufferedNC; break;
sl@0
   503
		case RCacheTestDevice::E_Buffered_C:	chunkAttr |= EMapAttrBufferedC; break;
sl@0
   504
sl@0
   505
		case RCacheTestDevice::E_InnerWT:		chunkAttr |= EMapAttrCachedWTRA|force_shared; break;
sl@0
   506
		case RCacheTestDevice::E_InnerWBRA:		chunkAttr |= EMapAttrCachedWBRA|force_shared; break;
sl@0
   507
		case RCacheTestDevice::E_InnerWB:		chunkAttr |= EMapAttrCachedWBWA|force_shared; break;
sl@0
   508
sl@0
   509
		case RCacheTestDevice::E_OuterWT:		chunkAttr |= EMapAttrL2CachedWTRA; break;
sl@0
   510
		case RCacheTestDevice::E_OuterWBRA:		chunkAttr |= EMapAttrL2CachedWBRA; break;
sl@0
   511
		case RCacheTestDevice::E_OuterWB:		chunkAttr |= EMapAttrL2CachedWBWA; break;
sl@0
   512
sl@0
   513
		case RCacheTestDevice::E_InOutWT:		chunkAttr |= EMapAttrCachedWTRA|EMapAttrL2CachedWTRA|force_shared; break;
sl@0
   514
		case RCacheTestDevice::E_InOutWBRA:		chunkAttr |= EMapAttrCachedWBRA|EMapAttrL2CachedWBRA|force_shared; break;
sl@0
   515
		case RCacheTestDevice::E_InOutWB:		chunkAttr |= EMapAttrCachedWBWA|EMapAttrL2CachedWBWA|force_shared; break;
sl@0
   516
sl@0
   517
		case RCacheTestDevice::E_StronglyOrder:
sl@0
   518
			new (&chunkAttr) TMappingAttributes2(EMemAttStronglyOrdered,EFalse,ETrue,EFalse,info.iShared?ETrue:EFalse);
sl@0
   519
			break;
sl@0
   520
		case RCacheTestDevice::E_Device:
sl@0
   521
			new (&chunkAttr) TMappingAttributes2(EMemAttDevice,EFalse,ETrue,EFalse,info.iShared?ETrue:EFalse);
sl@0
   522
			break;
sl@0
   523
		case RCacheTestDevice::E_Normal_Uncached:
sl@0
   524
			new (&chunkAttr) TMappingAttributes2(EMemAttNormalUncached,EFalse,ETrue,EFalse,info.iShared?ETrue:EFalse);
sl@0
   525
			break;
sl@0
   526
		case RCacheTestDevice::E_Normal_Cached:
sl@0
   527
			new (&chunkAttr) TMappingAttributes2(EMemAttNormalCached,EFalse,ETrue,EFalse,(info.iShared|force_shared)?ETrue:EFalse);
sl@0
   528
			break;
sl@0
   529
		case RCacheTestDevice::E_KernelInternal4:
sl@0
   530
			new (&chunkAttr) TMappingAttributes2(EMemAttKernelInternal4,EFalse,ETrue,ETrue,(info.iShared|force_shared)?ETrue:EFalse);
sl@0
   531
			break;
sl@0
   532
		case RCacheTestDevice::E_PlatformSpecific5:
sl@0
   533
			new (&chunkAttr) TMappingAttributes2(EMemAttPlatformSpecific5,EFalse,ETrue,ETrue,(info.iShared|force_shared)?ETrue:EFalse);
sl@0
   534
			break;
sl@0
   535
		case RCacheTestDevice::E_PlatformSpecific6:
sl@0
   536
			new (&chunkAttr) TMappingAttributes2(EMemAttPlatformSpecific6,EFalse,ETrue,ETrue,(info.iShared|force_shared)?ETrue:EFalse);
sl@0
   537
			break;
sl@0
   538
		case RCacheTestDevice::E_PlatformSpecific7:
sl@0
   539
			new (&chunkAttr) TMappingAttributes2(EMemAttPlatformSpecific7,EFalse,ETrue,ETrue,(info.iShared|force_shared)?ETrue:EFalse);
sl@0
   540
			break;
sl@0
   541
sl@0
   542
#if defined(__CPU_MEMORY_TYPE_REMAPPING)
sl@0
   543
		case RCacheTestDevice::E_InnerWT_Remapped:
sl@0
   544
		case RCacheTestDevice::E_InnerWBRA_Remapped:
sl@0
   545
		case RCacheTestDevice::E_InnerWB_Remapped:
sl@0
   546
		case RCacheTestDevice::E_InOutWT_Remapped:
sl@0
   547
		case RCacheTestDevice::E_InOutWBRA_Remapped:
sl@0
   548
		case RCacheTestDevice::E_InOutWB_Remapped:
sl@0
   549
#ifdef __SMP__
sl@0
   550
			info.iShared = ETrue;
sl@0
   551
#endif
sl@0
   552
		case RCacheTestDevice::E_OuterWT_Remapped:
sl@0
   553
		case RCacheTestDevice::E_OuterWBRA_Remapped:
sl@0
   554
		case RCacheTestDevice::E_OuterWB_Remapped:
sl@0
   555
			Remap(info.iCacheAttr);
sl@0
   556
			new (&chunkAttr) TMappingAttributes2(EMemAttKernelInternal4,EFalse,ETrue,EFalse,info.iShared?ETrue:EFalse);
sl@0
   557
			break;
sl@0
   558
#endif
sl@0
   559
			
sl@0
   560
		case RCacheTestDevice::E_Default:
sl@0
   561
			{
sl@0
   562
			// Run the test against memory from kernel heap (no need for extra memory chunks)
sl@0
   563
			NKern::ThreadEnterCS();
sl@0
   564
			TLinAddr bufferBase = (TLinAddr)Kern::Alloc(info.iSize);
sl@0
   565
			NKern::ThreadLeaveCS();
sl@0
   566
			if (!bufferBase)
sl@0
   567
					return KErrNoMemory;
sl@0
   568
		
sl@0
   569
			//You can't purge  allocated heap memory as it will invalidate other data from the same cache line.
sl@0
   570
			//Cache::SyncMemoryAfterDmaRead((TLinAddr)bufferBase, info.iSize);
sl@0
   571
sl@0
   572
			// Execute the test
sl@0
   573
			time = NKern::TickCount();
sl@0
   574
			DataSegmetTestFunct((void*)bufferBase, info.iSize);
sl@0
   575
			info.iTime = NKern::TickCount() - time;
sl@0
   576
			info.iActualMapAttr = 0;
sl@0
   577
			kumemput(a1,&info,sizeof(info));
sl@0
   578
sl@0
   579
			NKern::ThreadEnterCS();
sl@0
   580
			Kern::Free((TAny*)bufferBase);
sl@0
   581
			NKern::ThreadLeaveCS();
sl@0
   582
sl@0
   583
			return KErrNone;
sl@0
   584
			}
sl@0
   585
		default:
sl@0
   586
			return KErrArgument;		
sl@0
   587
		}
sl@0
   588
sl@0
   589
	// Run the test against chunk with cache attributes as specified in info.iCacheState.
sl@0
   590
	if (KErrNone!=(r=AllocPhysicalRam(Kern::RoundToPageSize(info.iSize)))) return r;
sl@0
   591
	if (KErrNone!=(r=CreateSharedChunk(chunkAttr, info.iActualMapAttr))) return r;
sl@0
   592
	
sl@0
   593
	Cache::SyncMemoryAfterDmaRead(iChunkBase, info.iSize); // Invalidate (aka purge) cache.
sl@0
   594
sl@0
   595
	time = NKern::TickCount();
sl@0
   596
	DataSegmetTestFunct((void*)iChunkBase, info.iSize);
sl@0
   597
	info.iTime = NKern::TickCount() - time;
sl@0
   598
sl@0
   599
	CloseSharedChunk();
sl@0
   600
	FreePhysicalRam();
sl@0
   601
sl@0
   602
	kumemput(a1,&info,sizeof(info));
sl@0
   603
	return KErrNone;
sl@0
   604
	}
sl@0
   605
sl@0
   606
void DCacheTest::LoopTestCodeFunc(CodeTest* f)
sl@0
   607
	{
sl@0
   608
	for (TInt x = 0;x<5000;x++)
sl@0
   609
		(*f)();
sl@0
   610
	}
sl@0
   611
sl@0
   612
// Runs TestCodeFunc (contains nops with ret at the end) from a chunk.
sl@0
   613
// Chunk cache attributes and the size of function are specified in input arguments
sl@0
   614
// Measures and returns the time spent.
sl@0
   615
TInt DCacheTest::TestCodeChunk(TAny* a1)
sl@0
   616
	{
sl@0
   617
	TInt r = KErrNone;
sl@0
   618
	TInt time;
sl@0
   619
	
sl@0
   620
	RCacheTestDevice::TChunkTest info;
sl@0
   621
	kumemget(&info,a1,sizeof(info));
sl@0
   622
sl@0
   623
sl@0
   624
	info.iActualMapAttr = EMapAttrSupRwx;
sl@0
   625
	if (info.iShared) info.iActualMapAttr |= EMapAttrShared;
sl@0
   626
#ifdef __SMP__
sl@0
   627
	TUint32 force_shared = EMapAttrShared;
sl@0
   628
#else
sl@0
   629
	TUint32 force_shared = 0;
sl@0
   630
#endif
sl@0
   631
sl@0
   632
	switch (info.iCacheAttr)
sl@0
   633
		{
sl@0
   634
		case RCacheTestDevice::E_FullyBlocking:	info.iActualMapAttr |= EMapAttrFullyBlocking; break;
sl@0
   635
		case RCacheTestDevice::E_Buffered_NC:	info.iActualMapAttr |= EMapAttrBufferedNC; break;
sl@0
   636
		case RCacheTestDevice::E_Buffered_C:	info.iActualMapAttr |= EMapAttrBufferedC; break;
sl@0
   637
sl@0
   638
		case RCacheTestDevice::E_InnerWT:		info.iActualMapAttr |= EMapAttrCachedWTRA|force_shared; break;
sl@0
   639
		case RCacheTestDevice::E_InnerWBRA:		info.iActualMapAttr |= EMapAttrCachedWBRA|force_shared; break;
sl@0
   640
		case RCacheTestDevice::E_InnerWB:		info.iActualMapAttr |= EMapAttrCachedWBWA|force_shared; break;
sl@0
   641
sl@0
   642
		case RCacheTestDevice::E_OuterWT:		info.iActualMapAttr |= EMapAttrL2CachedWTRA; break;
sl@0
   643
		case RCacheTestDevice::E_OuterWBRA:		info.iActualMapAttr |= EMapAttrL2CachedWBRA; break;
sl@0
   644
		case RCacheTestDevice::E_OuterWB:		info.iActualMapAttr |= EMapAttrL2CachedWBWA; break;
sl@0
   645
sl@0
   646
		case RCacheTestDevice::E_InOutWT:		info.iActualMapAttr |= EMapAttrCachedWTRA|EMapAttrL2CachedWTRA|force_shared; break;
sl@0
   647
		case RCacheTestDevice::E_InOutWBRA:		info.iActualMapAttr |= EMapAttrCachedWBRA|EMapAttrL2CachedWBRA|force_shared; break;
sl@0
   648
		case RCacheTestDevice::E_InOutWB:		info.iActualMapAttr |= EMapAttrCachedWBWA|EMapAttrL2CachedWBWA|force_shared; break;
sl@0
   649
sl@0
   650
		case RCacheTestDevice::E_StronglyOrder:
sl@0
   651
			new (&info.iActualMapAttr) TMappingAttributes2(EMemAttStronglyOrdered,EFalse,ETrue,ETrue,info.iShared?ETrue:EFalse);
sl@0
   652
			break;
sl@0
   653
		case RCacheTestDevice::E_Device:
sl@0
   654
			new (&info.iActualMapAttr) TMappingAttributes2(EMemAttDevice,EFalse,ETrue,ETrue,info.iShared?ETrue:EFalse);
sl@0
   655
			break;
sl@0
   656
		case RCacheTestDevice::E_Normal_Uncached:
sl@0
   657
			new (&info.iActualMapAttr) TMappingAttributes2(EMemAttNormalUncached,EFalse,ETrue,ETrue,info.iShared?ETrue:EFalse);
sl@0
   658
			break;
sl@0
   659
		case RCacheTestDevice::E_Normal_Cached:
sl@0
   660
			new (&info.iActualMapAttr) TMappingAttributes2(EMemAttNormalCached,EFalse,ETrue,ETrue,info.iShared?ETrue:EFalse);
sl@0
   661
			break;
sl@0
   662
sl@0
   663
#if defined(__CPU_MEMORY_TYPE_REMAPPING)
sl@0
   664
		case RCacheTestDevice::E_InnerWT_Remapped:
sl@0
   665
		case RCacheTestDevice::E_InnerWBRA_Remapped:
sl@0
   666
		case RCacheTestDevice::E_InnerWB_Remapped:
sl@0
   667
		case RCacheTestDevice::E_InOutWT_Remapped:
sl@0
   668
		case RCacheTestDevice::E_InOutWBRA_Remapped:
sl@0
   669
		case RCacheTestDevice::E_InOutWB_Remapped:
sl@0
   670
#ifdef __SMP__
sl@0
   671
			info.iShared = ETrue;
sl@0
   672
#endif
sl@0
   673
		case RCacheTestDevice::E_OuterWT_Remapped:
sl@0
   674
		case RCacheTestDevice::E_OuterWBRA_Remapped:
sl@0
   675
		case RCacheTestDevice::E_OuterWB_Remapped:
sl@0
   676
			Remap(info.iCacheAttr);
sl@0
   677
			new (&info.iActualMapAttr) TMappingAttributes2(EMemAttKernelInternal4,EFalse,ETrue,ETrue,info.iShared?ETrue:EFalse);
sl@0
   678
			break;
sl@0
   679
#endif
sl@0
   680
			
sl@0
   681
		case RCacheTestDevice::E_Default:
sl@0
   682
			{
sl@0
   683
			// Run the test against test function from rom image (no need for extra memory chunks)
sl@0
   684
			if (info.iSize > TestCodeFuncSize())
sl@0
   685
				return KErrNoMemory; // TestCodeFunc is not big enough to conduct the test.
sl@0
   686
			
sl@0
   687
			TInt startAddr = (TInt)TestCodeFunc + TestCodeFuncSize() - info.iSize;
sl@0
   688
			
sl@0
   689
			// This will invalidate (aka purge) test function from L2 cache.
sl@0
   690
			Cache::SyncMemoryAfterDmaRead((TLinAddr)startAddr, info.iSize); 
sl@0
   691
sl@0
   692
			// Execute the test
sl@0
   693
			time = NKern::TickCount();
sl@0
   694
			LoopTestCodeFunc((CodeTest*)startAddr);
sl@0
   695
			info.iTime = NKern::TickCount() - time;
sl@0
   696
sl@0
   697
			info.iActualMapAttr = 0; //Not relevant.
sl@0
   698
			kumemput(a1,&info,sizeof(info));
sl@0
   699
			return KErrNone;
sl@0
   700
			}
sl@0
   701
		default:
sl@0
   702
			return KErrArgument;		
sl@0
   703
		}
sl@0
   704
sl@0
   705
	// Run the test against test function from memory chunk with cache attributes as specified in info.iCacheState.
sl@0
   706
	// As we need a chunk with eXecutable permission attribute, can't use shared chunk. Take HwChunk instead.
sl@0
   707
	DPlatChunkHw* chunk;
sl@0
   708
	TPhysAddr physBase;		// This will be base physical address of the chunk
sl@0
   709
    TLinAddr linearBase;	// This will be base linear address of the chunk
sl@0
   710
	NKern::ThreadEnterCS();
sl@0
   711
	r = Epoc::AllocPhysicalRam(Kern::RoundToPageSize(info.iSize), physBase, 0);//Allocate RAM. This will set aPhysAddr
sl@0
   712
	if (r)
sl@0
   713
		{
sl@0
   714
		NKern::ThreadLeaveCS();
sl@0
   715
		return r;
sl@0
   716
		}
sl@0
   717
	r = DPlatChunkHw::New (chunk, physBase, Kern::RoundToPageSize(info.iSize), info.iActualMapAttr);//Create chunk
sl@0
   718
	if (r)
sl@0
   719
		{
sl@0
   720
		Epoc::FreePhysicalRam(physBase, Kern::RoundToPageSize(info.iSize));
sl@0
   721
		NKern::ThreadLeaveCS();
sl@0
   722
		return r;
sl@0
   723
		}
sl@0
   724
	NKern::ThreadLeaveCS();
sl@0
   725
sl@0
   726
	linearBase = chunk->LinearAddress();
sl@0
   727
sl@0
   728
	// Create nop,nop,...,nop,ret sequence at the start of the chunk with size = info.iSize
sl@0
   729
	TInt nopInstr = ((TInt*)TestCodeFunc)[0]; 						// NOP is the first instruction from TestCodeFunc
sl@0
   730
	TInt retInstr = ((TInt*)TestCodeFunc)[TestCodeFuncSize()/4-1];	// RET is the last instruction in TestCodeFunc 	
sl@0
   731
	for (TInt i = 0; i < (info.iSize/4-1) ; i++)  	// Put all NOPs...
sl@0
   732
		((TInt*)linearBase)[i] = nopInstr;			// ...
sl@0
   733
	((TInt*)linearBase)[info.iSize/4-1] = retInstr;	// ... and add RET at the end.
sl@0
   734
sl@0
   735
	Cache::IMB_Range((TLinAddr)linearBase, info.iSize); 			// Sync L1 Instruction & Data cache
sl@0
   736
	//Fluch the memory from which the test funcion executes. This will give fair chance to all test cases.
sl@0
   737
	Cache::SyncMemoryBeforeDmaWrite(linearBase, info.iSize);		// This will clean L1&L2 cache.
sl@0
   738
	Cache::SyncMemoryAfterDmaRead(linearBase, info.iSize);			// This will invalidate (aka purge) L1&L2 cache.
sl@0
   739
sl@0
   740
	// Execute the test
sl@0
   741
	time = NKern::TickCount();
sl@0
   742
	LoopTestCodeFunc((CodeTest*)linearBase);
sl@0
   743
	info.iTime = NKern::TickCount() - time;
sl@0
   744
sl@0
   745
	kumemput(a1,&info,sizeof(info));
sl@0
   746
sl@0
   747
	NKern::ThreadEnterCS();
sl@0
   748
	chunk->Close(NULL);
sl@0
   749
	Epoc::FreePhysicalRam(physBase, Kern::RoundToPageSize(info.iSize));
sl@0
   750
	NKern::ThreadLeaveCS();
sl@0
   751
	return KErrNone;
sl@0
   752
	}
sl@0
   753
sl@0
   754
/**
sl@0
   755
Tests WriteBack mode:
sl@0
   756
	(1)Writes down data into BW cached memory.
sl@0
   757
	(2)Purge the cache.
sl@0
   758
	(3)Counts the bytes that reach the main memory.
sl@0
   759
@param aWriteAlloc True if WriteAllocate to test, EFalse if ReadAllocate
sl@0
   760
*/
sl@0
   761
TInt DCacheTest::TestWriteBackMode(TAny* a1, TBool aWriteAlloc)
sl@0
   762
	{
sl@0
   763
	TInt r, cacheAttr = EMapAttrSupRw;
sl@0
   764
	TUint i, counter = 0;
sl@0
   765
	const TInt pattern = 0xabcdef12;
sl@0
   766
sl@0
   767
	RCacheTestDevice::TChunkTest info;
sl@0
   768
	kumemget(&info,a1,sizeof(info));
sl@0
   769
#ifdef __SMP__
sl@0
   770
	TUint32 force_shared = EMapAttrShared;
sl@0
   771
#else
sl@0
   772
	TUint32 force_shared = 0;
sl@0
   773
#endif
sl@0
   774
sl@0
   775
	switch (info.iCacheAttr)
sl@0
   776
		{
sl@0
   777
#if defined(__CPU_MEMORY_TYPE_REMAPPING)
sl@0
   778
		case RCacheTestDevice::E_InnerWBRA_Remapped:
sl@0
   779
		case RCacheTestDevice::E_InnerWB_Remapped:
sl@0
   780
		case RCacheTestDevice::E_OuterWBRA_Remapped:
sl@0
   781
		case RCacheTestDevice::E_OuterWB_Remapped:
sl@0
   782
			Remap(info.iCacheAttr);
sl@0
   783
			new (&cacheAttr) TMappingAttributes2(EMemAttKernelInternal4,EFalse,ETrue,ETrue,force_shared);
sl@0
   784
			break;
sl@0
   785
#endif
sl@0
   786
		case RCacheTestDevice::E_InnerWBRA:	cacheAttr |= EMapAttrCachedWBRA|force_shared; 	break;
sl@0
   787
		case RCacheTestDevice::E_InnerWB:	cacheAttr |= EMapAttrCachedWBWA|force_shared; 	break;
sl@0
   788
		case RCacheTestDevice::E_OuterWBRA:	cacheAttr |= EMapAttrL2CachedWBRA|force_shared;	break;
sl@0
   789
		case RCacheTestDevice::E_OuterWB:	cacheAttr |= EMapAttrL2CachedWBWA|force_shared;	break;
sl@0
   790
		default: return KErrArgument;
sl@0
   791
		}
sl@0
   792
	// Create chunk
sl@0
   793
	if (KErrNone!=(r=AllocPhysicalRam(info.iSize))) return r;
sl@0
   794
	if (KErrNone!=(r=CreateSharedChunk(cacheAttr, info.iActualMapAttr))) return r;
sl@0
   795
	
sl@0
   796
	for (i=0; i<(iSize>>2) ; i++) ((TInt*)iChunkBase)[i] = 0;   //Zero-fill cache and...
sl@0
   797
	Cache::SyncMemoryBeforeDmaWrite(iChunkBase, iSize);			//... clean the cache down to memory
sl@0
   798
sl@0
   799
	Cache::SyncMemoryAfterDmaRead(iChunkBase, iSize);			//Invalidate (aka purge).
sl@0
   800
sl@0
   801
	// Fill in cached region with the pattern.
sl@0
   802
	for (i=0; i<(iSize>>2); i++)
sl@0
   803
	 	{
sl@0
   804
	 	if (!aWriteAlloc) iDummy = ((TInt*)iChunkBase)[i]; 		// Don't read if WriteAllocate is tested
sl@0
   805
	 	((TInt*)iChunkBase)[i] = pattern;
sl@0
   806
	 	}
sl@0
   807
		
sl@0
   808
	Cache::SyncMemoryAfterDmaRead(iChunkBase, iSize);	//Invalidate (aka purge) cache. Data in cache should be destroyed
sl@0
   809
	CloseSharedChunk();									// Close cached chunk.
sl@0
   810
	
sl@0
   811
	//Create non-cached chunk over the same physical memory
sl@0
   812
	if (KErrNone!=(r=CreateSharedChunk(EMapAttrSupRw , iDummy))) return r;
sl@0
   813
sl@0
   814
	// Counts out how many bytes have reached RAM
sl@0
   815
	for (i=0; i<(iSize>>2); i++) if (((TInt*)iChunkBase)[i] == pattern) counter++;
sl@0
   816
sl@0
   817
	info.iSize = counter<<2; //Return the number of bytes that reached the main memory
sl@0
   818
	CloseSharedChunk();
sl@0
   819
	FreePhysicalRam();
sl@0
   820
	
sl@0
   821
	kumemput(a1,&info,sizeof(info));
sl@0
   822
	return r;
sl@0
   823
	}
sl@0
   824
sl@0
   825
/**
sl@0
   826
Exercises SyncMemoryBeforeDmaWrite & SyncMemoryAfterDmaRead (that call L1/L2 Cache Clean & Purge methods)
sl@0
   827
This just ensures that they do not panic (doesn't do any functional test).
sl@0
   828
*/
sl@0
   829
TInt DCacheTest::TestL2Maintenance()
sl@0
   830
	{
sl@0
   831
	// Create 20000h big chunk with the the memory commited as:
sl@0
   832
	// |0| NotCommited |64K| Commited |128K| NotCommited |192K| Commited |256K| 
sl@0
   833
#ifdef __SMP__
sl@0
   834
	TUint32 force_shared = EMapAttrShared;
sl@0
   835
#else
sl@0
   836
	TUint32 force_shared = 0;
sl@0
   837
#endif
sl@0
   838
	TInt r;
sl@0
   839
	TChunkCreateInfo info;
sl@0
   840
    info.iType         = TChunkCreateInfo::ESharedKernelSingle;
sl@0
   841
	info.iMaxSize      = 0x40000;
sl@0
   842
	info.iMapAttr      = EMapAttrSupRw | EMapAttrCachedWBWA | EMapAttrL2CachedWBWA | force_shared;
sl@0
   843
	info.iOwnsMemory   = ETrue; // Use memory from system's free pool
sl@0
   844
	info.iDestroyedDfc = NULL;
sl@0
   845
sl@0
   846
    TLinAddr chunkAddr;
sl@0
   847
    TUint32 mapAttr;
sl@0
   848
    DChunk* chunk;
sl@0
   849
	TInt pageSize = 0x1000; //4K
sl@0
   850
sl@0
   851
	NKern::ThreadEnterCS();
sl@0
   852
    if (KErrNone != (r = Kern::ChunkCreate(info, chunk, chunkAddr, mapAttr)))
sl@0
   853
		{
sl@0
   854
		NKern::ThreadLeaveCS();
sl@0
   855
		return r;
sl@0
   856
		}
sl@0
   857
	r = Kern::ChunkCommit(chunk,0x10000,0x10000);
sl@0
   858
    if(r!=KErrNone)
sl@0
   859
        {
sl@0
   860
		Kern::ChunkClose(chunk);
sl@0
   861
		NKern::ThreadLeaveCS();
sl@0
   862
		return r;
sl@0
   863
		}
sl@0
   864
	r = Kern::ChunkCommit(chunk,0x30000,0x10000);
sl@0
   865
    if(r!=KErrNone)
sl@0
   866
        {
sl@0
   867
		Kern::ChunkClose(chunk);
sl@0
   868
		NKern::ThreadLeaveCS();
sl@0
   869
		return r;
sl@0
   870
		}
sl@0
   871
sl@0
   872
	NKern::ThreadLeaveCS();
sl@0
   873
sl@0
   874
	TInt valid = chunkAddr+0x10000;
sl@0
   875
sl@0
   876
	#if defined(__ARM_L220_CACHE__) || defined(__ARM_L210_CACHE__)
sl@0
   877
	// Check L2 cache maintenance for invalid addresses.
sl@0
   878
	// On ARMv6, clean/purge L1 cache of the region with invalid addresses panics.
sl@0
   879
	// However, cleaning/purging a large region above the threshold will clean/purge entire L1 cache(which doesn't panic).
sl@0
   880
	// That is why the following calls run against 256KB. 
sl@0
   881
	//We cannot do that on XScale L2 cache as it would generate page walk data abort.
sl@0
   882
	TInt invalid = chunkAddr;
sl@0
   883
	Cache::SyncMemoryBeforeDmaWrite(invalid+20, 0x40000-20);
sl@0
   884
	Cache::SyncMemoryAfterDmaRead(invalid+100,0x40000-101);
sl@0
   885
	#endif
sl@0
   886
	
sl@0
   887
	
sl@0
   888
	// The following calls operate against valid memory regions.
sl@0
   889
	Cache::SyncMemoryAfterDmaRead(valid+1, 0);
sl@0
   890
	Cache::SyncMemoryAfterDmaRead(valid+32, 12);
sl@0
   891
	Cache::SyncMemoryAfterDmaRead(valid+1, 0);
sl@0
   892
	Cache::SyncMemoryBeforeDmaWrite(valid+2, 1);
sl@0
   893
	Cache::SyncMemoryAfterDmaRead(valid+3, 2);
sl@0
   894
	Cache::SyncMemoryBeforeDmaWrite(valid+4, 3);
sl@0
   895
	Cache::SyncMemoryAfterDmaRead(valid+5, 4);
sl@0
   896
	Cache::SyncMemoryBeforeDmaWrite(valid+6, 5);
sl@0
   897
	Cache::SyncMemoryAfterDmaRead(valid+7, 6);
sl@0
   898
	Cache::SyncMemoryBeforeDmaWrite(valid+8, 7);
sl@0
   899
	Cache::SyncMemoryAfterDmaRead(valid+9, 8);
sl@0
   900
	Cache::SyncMemoryBeforeDmaWrite(valid+10, 9);
sl@0
   901
	Cache::SyncMemoryAfterDmaRead(valid+11, 10);
sl@0
   902
	Cache::SyncMemoryBeforeDmaWrite(valid+12, 11);
sl@0
   903
	Cache::SyncMemoryAfterDmaRead(valid+13, 12);
sl@0
   904
	Cache::SyncMemoryBeforeDmaWrite(valid+14, 13);
sl@0
   905
	Cache::SyncMemoryAfterDmaRead(valid+15, 14);
sl@0
   906
sl@0
   907
	TLinAddr page = (valid+2*pageSize);
sl@0
   908
	Cache::SyncMemoryBeforeDmaWrite(page, 0);
sl@0
   909
	Cache::SyncMemoryAfterDmaRead(page, 0);
sl@0
   910
	Cache::SyncMemoryBeforeDmaWrite(page-1, 2);
sl@0
   911
	Cache::SyncMemoryAfterDmaRead(page-2, 4);
sl@0
   912
	Cache::SyncMemoryBeforeDmaWrite(page-3, 6);
sl@0
   913
	Cache::SyncMemoryAfterDmaRead(page-4, 8);
sl@0
   914
	Cache::SyncMemoryBeforeDmaWrite(page-5, 10);
sl@0
   915
	Cache::SyncMemoryAfterDmaRead(page-6, 12);
sl@0
   916
sl@0
   917
	Cache::SyncMemoryBeforeDmaWrite(page, 2*pageSize);
sl@0
   918
	Cache::SyncMemoryAfterDmaRead(page-1, 2*pageSize);
sl@0
   919
	Cache::SyncMemoryBeforeDmaWrite(page+1, 2*pageSize);
sl@0
   920
	Cache::SyncMemoryAfterDmaRead(page+3, 2*pageSize);
sl@0
   921
	Cache::SyncMemoryBeforeDmaWrite(page-3, 2*pageSize);
sl@0
   922
sl@0
   923
	Cache::SyncMemoryBeforeDmaWrite(valid, 64, EMapAttrCachedMax);
sl@0
   924
	Cache::SyncMemoryBeforeDmaRead(valid, 64, EMapAttrCachedMax);
sl@0
   925
	Cache::SyncMemoryAfterDmaRead(valid, 64, EMapAttrCachedMax);
sl@0
   926
sl@0
   927
	
sl@0
   928
	Cache::IMB_Range(0, 0xffffffff);//will cause: Clean all DCache & Purge all ICache
sl@0
   929
	// Close the chunk
sl@0
   930
	NKern::ThreadEnterCS();
sl@0
   931
	Kern::ChunkClose(chunk);
sl@0
   932
	NKern::ThreadLeaveCS();
sl@0
   933
sl@0
   934
sl@0
   935
	//Check maintenance functions against entire cache (we need memory region >=8*cache size)
sl@0
   936
    info.iType         = TChunkCreateInfo::ESharedKernelSingle;
sl@0
   937
	info.iMaxSize      = 0x100000; //1MB will do
sl@0
   938
	info.iMapAttr      = EMapAttrSupRw | EMapAttrCachedWBWA | EMapAttrL2CachedWBWA | force_shared;
sl@0
   939
	info.iOwnsMemory   = ETrue; // Use memory from system's free pool
sl@0
   940
	info.iDestroyedDfc = NULL;
sl@0
   941
sl@0
   942
	NKern::ThreadEnterCS();
sl@0
   943
    if (KErrNone != (r = Kern::ChunkCreate(info, chunk, chunkAddr, mapAttr)))
sl@0
   944
		{
sl@0
   945
		NKern::ThreadLeaveCS();
sl@0
   946
		return r;
sl@0
   947
		}
sl@0
   948
	r = Kern::ChunkCommit(chunk,0x0,0x100000);
sl@0
   949
    if(r!=KErrNone)
sl@0
   950
        {
sl@0
   951
		Kern::ChunkClose(chunk);
sl@0
   952
		NKern::ThreadLeaveCS();
sl@0
   953
		return r;
sl@0
   954
		}
sl@0
   955
	NKern::ThreadLeaveCS();
sl@0
   956
sl@0
   957
	Cache::SyncMemoryBeforeDmaWrite(chunkAddr, 0x100000);
sl@0
   958
	Cache::SyncMemoryAfterDmaRead(chunkAddr, 0x100000);
sl@0
   959
sl@0
   960
	// Close the chunk
sl@0
   961
	NKern::ThreadEnterCS();
sl@0
   962
	Kern::ChunkClose(chunk);
sl@0
   963
	NKern::ThreadLeaveCS();
sl@0
   964
sl@0
   965
	return KErrNone;
sl@0
   966
	}
sl@0
   967
sl@0
   968
sl@0
   969
TInt DCacheTest::TestUseCase(TAny* a1)
sl@0
   970
	{
sl@0
   971
	TInt r = KErrNone;
sl@0
   972
	TInt time;
sl@0
   973
	
sl@0
   974
	RCacheTestDevice::TChunkTest info;
sl@0
   975
	kumemget(&info,a1,sizeof(info));
sl@0
   976
sl@0
   977
	TUint32 chunkAttr = EMapAttrSupRw;
sl@0
   978
#ifdef __SMP__
sl@0
   979
	TUint32 force_shared = EMapAttrShared;
sl@0
   980
#else
sl@0
   981
	TUint32 force_shared = 0;
sl@0
   982
#endif
sl@0
   983
	if (info.iShared) chunkAttr |= EMapAttrShared;
sl@0
   984
sl@0
   985
	switch (info.iCacheAttr)
sl@0
   986
		{
sl@0
   987
		case RCacheTestDevice::E_StronglyOrder:
sl@0
   988
			new (&chunkAttr) TMappingAttributes2(EMemAttStronglyOrdered,EFalse,ETrue,EFalse,info.iShared?ETrue:EFalse);
sl@0
   989
			break;
sl@0
   990
		case RCacheTestDevice::E_Device:
sl@0
   991
			new (&chunkAttr) TMappingAttributes2(EMemAttDevice,EFalse,ETrue,EFalse,info.iShared?ETrue:EFalse);
sl@0
   992
			break;
sl@0
   993
		case RCacheTestDevice::E_Normal_Uncached:
sl@0
   994
			new (&chunkAttr) TMappingAttributes2(EMemAttNormalUncached,EFalse,ETrue,EFalse,info.iShared?ETrue:EFalse);
sl@0
   995
			break;
sl@0
   996
		case RCacheTestDevice::E_Normal_Cached:
sl@0
   997
			new (&chunkAttr) TMappingAttributes2(EMemAttNormalCached,EFalse,ETrue,EFalse,info.iShared?ETrue:EFalse);
sl@0
   998
			break;
sl@0
   999
		#if defined(__CPU_MEMORY_TYPE_REMAPPING)
sl@0
  1000
		case RCacheTestDevice::E_InOutWT_Remapped:
sl@0
  1001
			Remap(info.iCacheAttr);
sl@0
  1002
			new (&chunkAttr) TMappingAttributes2(EMemAttKernelInternal4,EFalse,ETrue,EFalse,(info.iShared|force_shared)?ETrue:EFalse);
sl@0
  1003
		#else
sl@0
  1004
		case RCacheTestDevice::E_InOutWT:		chunkAttr |= EMapAttrCachedWTRA|EMapAttrL2CachedWTRA|force_shared;
sl@0
  1005
		#endif
sl@0
  1006
			break;
sl@0
  1007
		default:
sl@0
  1008
			return KErrArgument;		
sl@0
  1009
		}
sl@0
  1010
sl@0
  1011
	// Create chunk
sl@0
  1012
	if (KErrNone!=(r=AllocPhysicalRam(Kern::RoundToPageSize(info.iSize)))) return r;
sl@0
  1013
	if (KErrNone!=(r=CreateSharedChunk(chunkAttr, info.iActualMapAttr))) return r;
sl@0
  1014
	
sl@0
  1015
	//Alloc from the heap
sl@0
  1016
	NKern::ThreadEnterCS();
sl@0
  1017
	iHeap1 = (TInt*)Kern::Alloc(Max(info.iSize,0x8000));
sl@0
  1018
	if (iHeap1==NULL) {NKern::ThreadLeaveCS();return KErrNoMemory;}
sl@0
  1019
	iHeap2 = (TInt*)Kern::Alloc(0x8000);
sl@0
  1020
	if (iHeap2==NULL) {Kern::Free((TAny*)iHeap1);NKern::ThreadLeaveCS();return KErrNoMemory;}
sl@0
  1021
	NKern::ThreadLeaveCS();
sl@0
  1022
	
sl@0
  1023
	Cache::SyncMemoryAfterDmaRead(iChunkBase, info.iSize); // Invalidate (aka purge) cache.
sl@0
  1024
	time = NKern::TickCount();
sl@0
  1025
	switch(info.iUseCase)
sl@0
  1026
		{
sl@0
  1027
		case 0:  r = UseCase_ReadFromChunk(info);break;
sl@0
  1028
		case 1:  r = UseCase_ReadFromChunk_ReadFromHeap(info);break;
sl@0
  1029
		case 2:  r = UseCase_WriteToChunk(info);break;
sl@0
  1030
		case 3:  r = UseCase_WriteToChunk_ReadFromHeap(info);break;
sl@0
  1031
		default: r = KErrArgument;
sl@0
  1032
		}
sl@0
  1033
	info.iTime = NKern::TickCount() - time;
sl@0
  1034
sl@0
  1035
	NKern::ThreadEnterCS();
sl@0
  1036
	Kern::Free((TAny*)iHeap1);
sl@0
  1037
	Kern::Free((TAny*)iHeap2);
sl@0
  1038
	NKern::ThreadLeaveCS();
sl@0
  1039
	
sl@0
  1040
	CloseSharedChunk();
sl@0
  1041
	FreePhysicalRam();
sl@0
  1042
sl@0
  1043
	kumemput(a1,&info,sizeof(info));
sl@0
  1044
	return r;
sl@0
  1045
	}
sl@0
  1046
sl@0
  1047
TInt DCacheTest::UseCase_ReadFromChunk(RCacheTestDevice::TChunkTest& info)
sl@0
  1048
	{
sl@0
  1049
	TInt i;
sl@0
  1050
	for (i=0; i< info.iLoops; i++)
sl@0
  1051
		{
sl@0
  1052
		//Simulate - evict the chunk from the cache)
sl@0
  1053
		Cache::SyncMemoryBeforeDmaRead(iChunkBase, info.iSize, info.iActualMapAttr); // Invalidate (aka purge) cache.
sl@0
  1054
sl@0
  1055
		//Read DMA data
sl@0
  1056
		memcpy((TAny*)iHeap1, (const TAny*)iChunkBase, info.iSize);
sl@0
  1057
		//for (j=0; j < info.iSize>>2; j++) iDummy = *((TInt*)iChunkBase+j);
sl@0
  1058
		}
sl@0
  1059
	return KErrNone;
sl@0
  1060
	}
sl@0
  1061
sl@0
  1062
TInt DCacheTest::UseCase_ReadFromChunk_ReadFromHeap(RCacheTestDevice::TChunkTest& info)
sl@0
  1063
	{
sl@0
  1064
	TInt i;
sl@0
  1065
	for (i=0; i< info.iLoops; i++)
sl@0
  1066
		{
sl@0
  1067
		//Simulate - evict the chunk memory from the cache
sl@0
  1068
		Cache::SyncMemoryBeforeDmaRead(iChunkBase, info.iSize, info.iActualMapAttr); // Invalidate (aka purge) cache.
sl@0
  1069
sl@0
  1070
		//Read DMA memory
sl@0
  1071
		memcpy((TAny*)iHeap1, (const TAny*)iChunkBase, info.iSize);
sl@0
  1072
sl@0
  1073
		//Simulate Kernel activities - reading heap2
sl@0
  1074
		memcpy((TAny*)iHeap1, (const TAny*)iHeap2, 0x8000);
sl@0
  1075
		}
sl@0
  1076
	return KErrNone;
sl@0
  1077
	}
sl@0
  1078
sl@0
  1079
TInt DCacheTest::UseCase_WriteToChunk(RCacheTestDevice::TChunkTest& info)
sl@0
  1080
	{
sl@0
  1081
	TInt i;
sl@0
  1082
	for (i=0; i< info.iLoops; i++)
sl@0
  1083
		{
sl@0
  1084
		//Simulate - evict the chunk memory from the cache
sl@0
  1085
		Cache::SyncMemoryBeforeDmaRead(iChunkBase, info.iSize, info.iActualMapAttr); // Invalidate (aka purge) cache.
sl@0
  1086
sl@0
  1087
		//Write DMA memory
sl@0
  1088
		memcpy((TAny*)iChunkBase, (const TAny*)iHeap1, info.iSize);
sl@0
  1089
		Cache::SyncMemoryBeforeDmaWrite(iChunkBase, info.iSize, info.iActualMapAttr); // Clean cache.
sl@0
  1090
sl@0
  1091
		}
sl@0
  1092
	return KErrNone;
sl@0
  1093
	}
sl@0
  1094
sl@0
  1095
TInt DCacheTest::UseCase_WriteToChunk_ReadFromHeap(RCacheTestDevice::TChunkTest& info)
sl@0
  1096
	{
sl@0
  1097
	TInt i;
sl@0
  1098
	for (i=0; i< info.iLoops; i++)
sl@0
  1099
		{
sl@0
  1100
		//Simulate - evict the chunk memory from the cache
sl@0
  1101
		Cache::SyncMemoryBeforeDmaRead(iChunkBase, info.iSize, info.iActualMapAttr); // Invalidate (aka purge) cache.
sl@0
  1102
sl@0
  1103
		//Write DMA memory
sl@0
  1104
		memcpy((TAny*)iChunkBase, (const TAny*)iHeap1, info.iSize);
sl@0
  1105
		Cache::SyncMemoryBeforeDmaWrite(iChunkBase, info.iSize, info.iActualMapAttr); // Clean cache.
sl@0
  1106
		
sl@0
  1107
		//Simulate Kernel activities - reading heap2
sl@0
  1108
		memcpy((TAny*)iHeap1, (const TAny*)iHeap2, 0x8000);
sl@0
  1109
		}
sl@0
  1110
	return KErrNone;
sl@0
  1111
	}
sl@0
  1112
sl@0
  1113
sl@0
  1114
// Entry point
sl@0
  1115
TInt DCacheTest::Request(TInt aFunction, TAny* a1, TAny* a2)
sl@0
  1116
	{
sl@0
  1117
	TInt r = KErrNone;
sl@0
  1118
#ifdef __SMP__
sl@0
  1119
	TUint32 affinity = NKern::ThreadSetCpuAffinity(NKern::CurrentThread(), 0);
sl@0
  1120
#endif
sl@0
  1121
	switch (aFunction)
sl@0
  1122
	{
sl@0
  1123
		case RCacheTestDevice::EGetCacheInfo:				r = GetCacheInfo(a1);		break;
sl@0
  1124
		case RCacheTestDevice::ETestDataChunk:				r = TestDataChunk(a1);		break;
sl@0
  1125
		case RCacheTestDevice::ETestCodeChunk:				r = TestCodeChunk(a1);		break;
sl@0
  1126
		case RCacheTestDevice::ETestWriteBackReadAllocate:	r = TestWriteBackMode(a1, EFalse);	break;
sl@0
  1127
		case RCacheTestDevice::ETestWriteBackWriteAllocate:	r = TestWriteBackMode(a1, ETrue);	break;
sl@0
  1128
		case RCacheTestDevice::ETesL2Maintenance:			r = TestL2Maintenance();	break;
sl@0
  1129
		case RCacheTestDevice::EGetThreshold:				r = GetThreshold(a1);		break;
sl@0
  1130
		case RCacheTestDevice::ESetThreshold:				r = SetThreshold(a1);		break;
sl@0
  1131
		case RCacheTestDevice::ETestUseCase:				r = TestUseCase(a1);		break;
sl@0
  1132
		default:											r=KErrNotSupported;
sl@0
  1133
		}
sl@0
  1134
#ifdef __SMP__
sl@0
  1135
	NKern::ThreadSetCpuAffinity(NKern::CurrentThread(), affinity);
sl@0
  1136
#endif
sl@0
  1137
	return r;
sl@0
  1138
	}
sl@0
  1139
sl@0
  1140
//////////////////////////////////////////
sl@0
  1141
class DTestFactory : public DLogicalDevice
sl@0
  1142
	{
sl@0
  1143
public:
sl@0
  1144
	DTestFactory();
sl@0
  1145
	// from DLogicalDevice
sl@0
  1146
	virtual TInt Install();
sl@0
  1147
	virtual void GetCaps(TDes8& aDes) const;
sl@0
  1148
	virtual TInt Create(DLogicalChannelBase*& aChannel);
sl@0
  1149
	};
sl@0
  1150
sl@0
  1151
DTestFactory::DTestFactory()
sl@0
  1152
    {
sl@0
  1153
    iParseMask = KDeviceAllowUnit;
sl@0
  1154
    iUnitsMask = 0x3;
sl@0
  1155
    }
sl@0
  1156
sl@0
  1157
TInt DTestFactory::Create(DLogicalChannelBase*& aChannel)
sl@0
  1158
    {
sl@0
  1159
	CacheTestDriver = new DCacheTest;
sl@0
  1160
	aChannel = CacheTestDriver;
sl@0
  1161
	return (aChannel ? KErrNone : KErrNoMemory);
sl@0
  1162
    }
sl@0
  1163
sl@0
  1164
TInt DTestFactory::Install()
sl@0
  1165
    {
sl@0
  1166
    return SetName(&KCacheTestDriverName);
sl@0
  1167
    }
sl@0
  1168
sl@0
  1169
void DTestFactory::GetCaps(TDes8& /*aDes*/) const
sl@0
  1170
    {
sl@0
  1171
    }
sl@0
  1172
sl@0
  1173
DECLARE_STANDARD_LDD()
sl@0
  1174
	{
sl@0
  1175
    return new DTestFactory;
sl@0
  1176
	}