os/kernelhwsrv/kerneltest/e32test/demandpaging/t_datapaging.cpp
changeset 0 bde4ae8d615e
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/os/kernelhwsrv/kerneltest/e32test/demandpaging/t_datapaging.cpp	Fri Jun 15 03:10:57 2012 +0200
     1.3 @@ -0,0 +1,1257 @@
     1.4 +// Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
     1.5 +// All rights reserved.
     1.6 +// This component and the accompanying materials are made available
     1.7 +// under the terms of the License "Eclipse Public License v1.0"
     1.8 +// which accompanies this distribution, and is available
     1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
    1.10 +//
    1.11 +// Initial Contributors:
    1.12 +// Nokia Corporation - initial contribution.
    1.13 +//
    1.14 +// Contributors:
    1.15 +//
    1.16 +// Description:
    1.17 +// e32test\demandpaging\t_datapaging.cpp
    1.18 +// Functional tests for data paging.
    1.19 +// 002 Test UserHeap::ChunkHeap data paging attributes
    1.20 +// 003 Test RThread::Create data paging attributes
    1.21 +// 
    1.22 +//
    1.23 +
    1.24 +//! @SYMTestCaseID			KBASE-T_DATAPAGING
    1.25 +//! @SYMTestType			UT
    1.26 +//! @SYMPREQ				PREQ1954
    1.27 +//! @SYMTestCaseDesc		Data Paging functional tests.
    1.28 +//! @SYMTestActions			001 Test RChunk data paging attributes
    1.29 +//! @SYMTestExpectedResults All tests should pass.
    1.30 +//! @SYMTestPriority        High
    1.31 +//! @SYMTestStatus          Implemented
    1.32 +
    1.33 +#define __E32TEST_EXTENSION__
    1.34 +#include <e32test.h>
    1.35 +#include <dptest.h>
    1.36 +#include <e32hal.h>
    1.37 +#include <u32exec.h>
    1.38 +#include <e32svr.h>
    1.39 +#include <e32panic.h>
    1.40 +#include "u32std.h"
    1.41 +#include <e32msgqueue.h>
    1.42 +#include <e32atomics.h>
    1.43 +#include <e32math.h>
    1.44 +
    1.45 +#include "t_dpcmn.h"
    1.46 +#include "../mmu/mmudetect.h"
    1.47 +#include "../mmu/d_memorytest.h"
    1.48 +#include "../mmu/paging_info.h"
    1.49 +
    1.50 +RTest test(_L("T_DATAPAGING"));
    1.51 +
    1.52 +_LIT(KChunkName, "t_datapaging chunk");
    1.53 +
    1.54 +class TRandom
    1.55 +	{
    1.56 +public:
    1.57 +	TRandom();
    1.58 +	TUint32 Next();
    1.59 +
    1.60 +private:
    1.61 +	enum
    1.62 +		{
    1.63 +		KA = 1664525,
    1.64 +		KB = 1013904223
    1.65 +		};
    1.66 +	TUint32 iV;
    1.67 +	};
    1.68 +
    1.69 +TRandom::TRandom()
    1.70 +	{
    1.71 +	iV = RThread().Id() + User::NTickCount() + 23;
    1.72 +	}
    1.73 +
    1.74 +TUint32 TRandom::Next()
    1.75 +	{
    1.76 +	iV = KA * iV + KB;
    1.77 +	return iV;
    1.78 +	}
    1.79 +
    1.80 +void CreatePagedChunk(TInt aSizeInPages, TInt aWipeByte = -1)
    1.81 +	{
    1.82 +	test_Equal(0,gChunk.Handle());
    1.83 +	
    1.84 +	TChunkCreateInfo createInfo;
    1.85 +	TInt size = aSizeInPages * gPageSize;
    1.86 +	createInfo.SetNormal(size, size);
    1.87 +	createInfo.SetPaging(TChunkCreateInfo::EPaged);
    1.88 +	createInfo.SetOwner(EOwnerProcess);
    1.89 +	createInfo.SetGlobal(KChunkName);
    1.90 +	if (aWipeByte != -1)
    1.91 +		createInfo.SetClearByte(aWipeByte);
    1.92 +	test_KErrNone(gChunk.Create(createInfo));
    1.93 +	test(gChunk.IsPaged()); // this is only ever called if data paging is supported
    1.94 +	}
    1.95 +
    1.96 +// The contents of a page is represented as type from enum below ORed with a byte value
    1.97 +enum TPageContent
    1.98 +	{
    1.99 +	ETypeUniform    = 0 << 8,
   1.100 +	ETypeIncreasing = 1 << 8,
   1.101 +
   1.102 +	EContentValueMask = 255,
   1.103 +	EContentTypeMask  = 255 << 8
   1.104 +	};
   1.105 +
   1.106 +// Write to a page to page it in and verify its previous contents
   1.107 +void WritePage(TInt aIndex, TUint aExpectedContents, TUint aNewContents)
   1.108 +	{
   1.109 +	test.Printf(_L("  %3d Write %x\n"), aIndex, aNewContents);
   1.110 +	
   1.111 +	TUint oldType = aExpectedContents & EContentTypeMask;
   1.112 +	TUint oldValue = aExpectedContents & EContentValueMask;
   1.113 +	
   1.114 +	TUint type = aNewContents & EContentTypeMask;
   1.115 +	TUint value = aNewContents & EContentValueMask;
   1.116 +	
   1.117 +	TUint8* page = gChunk.Base() + (gPageSize * aIndex);
   1.118 +
   1.119 +	// write first byte first so page is paged in or rejuvenated with write permissions
   1.120 +	page[0] = 0;
   1.121 +	
   1.122 +	for (TInt i = 0 ; i < gPageSize ; ++i)
   1.123 +		{
   1.124 +		if (i != 0)
   1.125 +			test_Equal(oldValue, page[i]);
   1.126 +		if (oldType == ETypeIncreasing)
   1.127 +			oldValue = (oldValue + 1) & 255;
   1.128 +		
   1.129 +		page[i] = value;
   1.130 +		if (type == ETypeIncreasing)
   1.131 +			value = (value + 1) & 255;
   1.132 +		}
   1.133 +	}
   1.134 +
   1.135 +// Read a page and verify its contents
   1.136 +void ReadPage(TInt aIndex, TUint aExpectedContents)
   1.137 +	{
   1.138 +	test.Printf(_L("  %3d Read  %x\n"), aIndex, aExpectedContents);
   1.139 +	TUint type = aExpectedContents & EContentTypeMask;
   1.140 +	TUint value = aExpectedContents & EContentValueMask;
   1.141 +	TUint8* page = gChunk.Base() + (gPageSize * aIndex);
   1.142 +	for (TInt i = 0 ; i < gPageSize ; ++i)
   1.143 +		{
   1.144 +		test_Equal(value, page[i]);
   1.145 +		if (type == ETypeIncreasing)
   1.146 +			value = (value + 1) & 255;
   1.147 +		}
   1.148 +	}
   1.149 +
   1.150 +void PageOut()
   1.151 +	{
   1.152 +	test.Printf(_L("      PageOut\n"));
   1.153 +	DPTest::FlushCache();
   1.154 +	}
   1.155 +
   1.156 +void TestOnePage()
   1.157 +	{
   1.158 +	CreatePagedChunk(1, 0xed);
   1.159 +
   1.160 +	// Test initial contents (read)
   1.161 +	ReadPage(0, ETypeUniform | 0xed);
   1.162 +
   1.163 +	// Test read initial contents after flush (may or may not actually been paged out)
   1.164 +	PageOut();
   1.165 +	ReadPage(0, ETypeUniform | 0xed);
   1.166 +
   1.167 +	// Test page out / page in (read) of dirty contents
   1.168 +	WritePage(0, ETypeUniform | 0xed, ETypeIncreasing | 0x1a);
   1.169 +	PageOut();
   1.170 +	ReadPage(0, ETypeIncreasing | 0x1a);
   1.171 +
   1.172 +	// Test page out / page in (read) of clean contents
   1.173 +	PageOut();
   1.174 +	ReadPage(0, ETypeIncreasing | 0x1a);
   1.175 + 
   1.176 +	// Test page out / page in (write) of dirty contents
   1.177 +	WritePage(0, ETypeIncreasing | 0x1a, ETypeIncreasing | 0x23);
   1.178 +	PageOut();
   1.179 +	WritePage(0, ETypeIncreasing | 0x23, ETypeIncreasing | 0x45);
   1.180 +
   1.181 +	CLOSE_AND_WAIT(gChunk);
   1.182 +	CreatePagedChunk(1, 0x0d);
   1.183 +
   1.184 +	// Test initial contents (write)
   1.185 +	WritePage(0, ETypeUniform | 0x0d, ETypeIncreasing | 0x1a);
   1.186 +
   1.187 +	// Test page out / page in (read) of dirty contents
   1.188 +	PageOut();
   1.189 +	ReadPage(0, ETypeIncreasing | 0x1a);
   1.190 +	
   1.191 +	CLOSE_AND_WAIT(gChunk);
   1.192 +	}
   1.193 +
   1.194 +TInt PageInThreadFunc(TAny* aArg)
   1.195 +	{
   1.196 +	TUint8* page = (TUint8*)aArg;
   1.197 +	for (;;)
   1.198 +		{
   1.199 +		DPTest::FlushCache();
   1.200 +		RDebug::Printf("Start page in...");
   1.201 +		volatile TInt i = page[0];
   1.202 +		(void)i;
   1.203 +		RDebug::Printf("  done.");
   1.204 +		}
   1.205 +	}
   1.206 +
   1.207 +TInt PageOutThreadFunc(TAny* aArg)
   1.208 +	{
   1.209 +	TUint8* page = (TUint8*)aArg;
   1.210 +	for (;;)
   1.211 +		{
   1.212 +		page[0] = 1;  // make page dirty
   1.213 +		RDebug::Printf("Start page out...");
   1.214 +		DPTest::FlushCache();
   1.215 +		RDebug::Printf("  done.");
   1.216 +		}
   1.217 +	}
   1.218 +
   1.219 +void TestKillThread(TThreadFunction aFunc, TInt aIterations)
   1.220 +	{
   1.221 +	__KHEAP_MARK;
   1.222 +	TRandom random;
   1.223 +	CreatePagedChunk(1);
   1.224 +	TUint8* page = gChunk.Base();
   1.225 +	page[0] = 0;  // make page dirty
   1.226 +	DPTest::FlushCache();
   1.227 +	for (TInt i = 0 ; i < aIterations ; ++i)
   1.228 +		{
   1.229 +		RThread thread;
   1.230 +		test_KErrNone(thread.Create(KNullDesC, aFunc, gPageSize, NULL, page));
   1.231 +		TRequestStatus status;
   1.232 +		thread.Logon(status);
   1.233 +		thread.Resume();
   1.234 +		User::AfterHighRes((random.Next() % 50 + 1) * 1000);
   1.235 +		thread.Kill(123);
   1.236 +		User::WaitForRequest(status);
   1.237 +		test_Equal(123, status.Int());
   1.238 +		CLOSE_AND_WAIT(thread);
   1.239 +		}
   1.240 +	CLOSE_AND_WAIT(gChunk);
   1.241 +	User::After(1000000);
   1.242 +	__KHEAP_MARKEND;
   1.243 +	}
   1.244 +
   1.245 +struct SSoakTestArgs
   1.246 +	{
   1.247 +	TInt iThreadIndex;
   1.248 +	TInt iPages;
   1.249 +	};
   1.250 +
   1.251 +TUint32* PageBasePtr(TInt aPage)
   1.252 +	{
   1.253 +	return (TUint32*)(gChunk.Base() + (gPageSize * aPage));
   1.254 +	}
   1.255 +
   1.256 +TUint32* PageDataPtr(TInt aPage, TInt aThreadIndex)
   1.257 +	{
   1.258 +	return (TUint32*)((TUint8*)PageBasePtr(aPage) + ((aThreadIndex * 2 + 1) * sizeof(TUint32)));
   1.259 +	}
   1.260 +
   1.261 +TUint32 PageTag(TInt aPage)
   1.262 +	{
   1.263 +	return 0x80000000 | aPage;
   1.264 +	}	
   1.265 +
   1.266 +void StopSoakTest(RMsgQueue<TInt> aMsgQueue)
   1.267 +	{
   1.268 +	while(aMsgQueue.Send(0) != KErrOverflow)
   1.269 +		;
   1.270 +	}
   1.271 +
   1.272 +TBool ContinueSoakTest(RMsgQueue<TInt> aMsgQueue)
   1.273 +	{
   1.274 +	TInt msg;
   1.275 +	return aMsgQueue.Receive(msg) == KErrUnderflow;
   1.276 +	}
   1.277 +
   1.278 +_LIT(KMsgQueueName, "t_datapaging_queue");
   1.279 +
   1.280 +TInt PinPagesFunc(TAny* aArg)
   1.281 +	{
   1.282 +	SSoakTestArgs* args = (SSoakTestArgs*)aArg;
   1.283 +
   1.284 +	RMemoryTestLdd ldd;
   1.285 +	TInt r = ldd.Open();
   1.286 +	if (r != KErrNone)
   1.287 +		return r;
   1.288 +	r = ldd.CreateVirtualPinObject();
   1.289 +	if (r != KErrNone)
   1.290 +		return r;
   1.291 +
   1.292 +	RMsgQueue<TInt> msgQueue;
   1.293 +	r = msgQueue.OpenGlobal(KMsgQueueName, EOwnerThread);
   1.294 +	if (r != KErrNone)
   1.295 +		return r;
   1.296 +
   1.297 +	TInt i = 0;
   1.298 +	TRandom random;
   1.299 +	while (ContinueSoakTest(msgQueue))
   1.300 +		{
   1.301 +		TInt count = 1 + random.Next() % (args->iPages / 4);
   1.302 +		TInt start = random.Next() % (args->iPages - count);
   1.303 +		TInt sleepInMs = 1 + random.Next() % 20;
   1.304 +		TUint32* ptr = PageBasePtr(start);
   1.305 +
   1.306 +		r = ldd.PinVirtualMemory((TLinAddr)ptr, count * gPageSize);
   1.307 +		if (r != KErrNone)
   1.308 +			return r;
   1.309 +
   1.310 +		User::AfterHighRes(sleepInMs * 1000);
   1.311 +
   1.312 +		r = ldd.UnpinVirtualMemory();
   1.313 +		if (r != KErrNone)
   1.314 +			return r;
   1.315 +	
   1.316 +		++i;
   1.317 +		}
   1.318 +
   1.319 +	msgQueue.Close();
   1.320 +
   1.321 +	r = ldd.DestroyVirtualPinObject();
   1.322 +	if (r != KErrNone)
   1.323 +		return r;
   1.324 +	ldd.Close();
   1.325 +					
   1.326 +	RDebug::Printf("  thread %d performed %d iterations (pinning)", args->iThreadIndex, i);
   1.327 +	return KErrNone;
   1.328 +	}
   1.329 +
   1.330 +TBool TestReadWord(TUint32* aPtr, TUint32 aExpected, TInt aThread, TInt aPage, TInt aIteration, TInt aLine, RMsgQueue<TInt> aMsgQueue)
   1.331 +	{
   1.332 +	TUint32 aActual = *aPtr;
   1.333 +	if (aActual != aExpected)
   1.334 +		{
   1.335 +		StopSoakTest(aMsgQueue);
   1.336 +		RDebug::Printf("  thread %d failure reading page %d at iteration %d address %08x: expected %08x but got %08x",
   1.337 +					   aThread, aPage, aIteration, aPtr, aExpected, aActual);
   1.338 +		return EFalse;
   1.339 +		}
   1.340 +	return ETrue;
   1.341 +	}
   1.342 +
   1.343 +TInt SoakTestFunc(TAny* aArg)
   1.344 +	{
   1.345 +	SSoakTestArgs* args = (SSoakTestArgs*)aArg;
   1.346 +
   1.347 +	
   1.348 +	RMsgQueue<TInt> msgQueue;
   1.349 +	TInt r = msgQueue.OpenGlobal(KMsgQueueName, EOwnerThread);
   1.350 +	if (r != KErrNone)
   1.351 +		return r;
   1.352 +
   1.353 +	TUint32* contents = new TUint32[args->iPages];
   1.354 +	if (contents == NULL)
   1.355 +		return KErrNoMemory;
   1.356 +	Mem::Fill(contents, args->iPages * sizeof(TUint32), 0);
   1.357 +
   1.358 +	TInt i = 0;
   1.359 +	TRandom random;
   1.360 +	while (ContinueSoakTest(msgQueue))
   1.361 +		{
   1.362 +		TUint32 rand = random.Next();
   1.363 +		TInt page = rand % args->iPages;
   1.364 +		TUint32* ptr = PageDataPtr(page, args->iThreadIndex);
   1.365 +		TInt action = rand >> 31;
   1.366 +		if (action == 0)
   1.367 +			{
   1.368 +			if (!TestReadWord(PageBasePtr(page), PageTag(page), args->iThreadIndex, page, i, __LINE__, msgQueue))
   1.369 +				return KErrGeneral;
   1.370 +			if (!TestReadWord(&ptr[0], contents[page], args->iThreadIndex, page, i, __LINE__, msgQueue))
   1.371 +				return KErrGeneral;
   1.372 +			if (!TestReadWord(&ptr[1], contents[page], args->iThreadIndex, page, i, __LINE__, msgQueue))
   1.373 +				return KErrGeneral;
   1.374 +			}
   1.375 +		else
   1.376 +			{
   1.377 +			TUint newContents = args->iThreadIndex+0x100+(contents[page]&~0xff);
   1.378 +			ptr[0] = newContents;
   1.379 +			if (!TestReadWord(PageBasePtr(page), PageTag(page), args->iThreadIndex, page, i, __LINE__, msgQueue))
   1.380 +				return KErrGeneral;
   1.381 +			if (!TestReadWord(&ptr[1], contents[page], args->iThreadIndex, page, i, __LINE__, msgQueue))
   1.382 +				return KErrGeneral;
   1.383 +			ptr[1] = newContents;
   1.384 +			contents[page] = newContents;
   1.385 +			}
   1.386 +		++i;
   1.387 +		}
   1.388 +	
   1.389 +	for (TInt j = 0 ; j < args->iPages ; ++j)
   1.390 +		{
   1.391 +		TUint32* ptr = PageDataPtr(j, args->iThreadIndex);
   1.392 +		if (!TestReadWord(PageBasePtr(j), PageTag(j), args->iThreadIndex, j, i, __LINE__, msgQueue))
   1.393 +			return KErrGeneral;
   1.394 +		if (!TestReadWord(&ptr[0], contents[j], args->iThreadIndex, j, i, __LINE__, msgQueue))
   1.395 +			return KErrGeneral;
   1.396 +		if (!TestReadWord(&ptr[1], contents[j], args->iThreadIndex, j, i, __LINE__, msgQueue))
   1.397 +			return KErrGeneral;
   1.398 +		}
   1.399 +
   1.400 +	delete [] contents;
   1.401 +	msgQueue.Close();
   1.402 +
   1.403 +	RDebug::Printf("  thread %d performed %d iterations", args->iThreadIndex, i);
   1.404 +	return KErrNone;
   1.405 +	}
   1.406 +
   1.407 +TInt SoakProcess(TInt aProcessIndex, TInt aThreads, TInt aPages, TBool aPinPages)
   1.408 +	{
   1.409 +	TInt pinThreadIndex = aPinPages ? aThreads++ : -1;
   1.410 +
   1.411 +	test_KErrNone(gChunk.OpenGlobal(KChunkName, EFalse));
   1.412 +	
   1.413 +	SSoakTestArgs* testArgs = new SSoakTestArgs[aThreads];
   1.414 +	test_NotNull(testArgs);
   1.415 +			
   1.416 +	RThread* threads = new RThread[aThreads];
   1.417 +	test_NotNull(threads);
   1.418 +	
   1.419 +	TRequestStatus* statuses = new TRequestStatus[aThreads];
   1.420 +	test_NotNull(statuses);
   1.421 +	
   1.422 +	TInt i;
   1.423 +	for (i = 0 ; i < aThreads ; ++i)
   1.424 +		{
   1.425 +		testArgs[i].iThreadIndex = aProcessIndex * aThreads + i;
   1.426 +		testArgs[i].iPages = aPages;
   1.427 +		TThreadFunction func = i == pinThreadIndex ? PinPagesFunc : SoakTestFunc;
   1.428 +		test_KErrNone(threads[i].Create(KNullDesC, func, gPageSize, NULL, &testArgs[i]));
   1.429 +		threads[i].Logon(statuses[i]);
   1.430 +		}
   1.431 +
   1.432 +	// todo: rendezvous here?
   1.433 +	
   1.434 +	for (i = 0 ; i < aThreads ; ++i)
   1.435 +		threads[i].Resume();
   1.436 +	
   1.437 +	TBool ok = ETrue;		
   1.438 +	for (i = 0 ; i < aThreads ; ++i)
   1.439 +		{
   1.440 +		User::WaitForRequest(statuses[i]);
   1.441 +		if (threads[i].ExitType() != EExitKill || statuses[i].Int() != KErrNone)
   1.442 +			ok = EFalse;
   1.443 +		threads[i].Close();
   1.444 +		}
   1.445 +	
   1.446 +	delete [] testArgs;
   1.447 +	delete [] threads;
   1.448 +	delete [] statuses;
   1.449 +	gChunk.Close();
   1.450 +	
   1.451 +	return ok ? KErrNone : KErrGeneral;
   1.452 +	}
   1.453 +
   1.454 +TInt RunSoakProcess()
   1.455 +	{
   1.456 +	TBuf<80> buf;
   1.457 +	if (User::CommandLineLength() > buf.MaxLength())
   1.458 +		return KErrArgument;
   1.459 +	User::CommandLine(buf);
   1.460 +	TLex lex(buf);
   1.461 +
   1.462 +	TInt index;
   1.463 +	TInt r = lex.Val(index);
   1.464 +	if (r != KErrNone)
   1.465 +		return r;
   1.466 +	lex.SkipSpace();
   1.467 +
   1.468 +	TInt threads;
   1.469 +	r = lex.Val(threads);
   1.470 +	if (r != KErrNone)
   1.471 +		return r;
   1.472 +	lex.SkipSpace();
   1.473 +	
   1.474 +	TInt pages;
   1.475 +	r = lex.Val(pages);
   1.476 +	if (r != KErrNone)
   1.477 +		return r;
   1.478 +	lex.SkipSpace();
   1.479 +	
   1.480 +	TBool pinPages;
   1.481 +	r = lex.Val(pinPages);
   1.482 +	if (r != KErrNone)
   1.483 +		return r;
   1.484 +	
   1.485 +	return SoakProcess(index, threads, pages, pinPages);
   1.486 +	}
   1.487 +
   1.488 +void SoakTest(TInt aProcesses, TInt aThreads, TInt aPages, TBool aPinPages, TInt aDurationInSeconds)
   1.489 +	{
   1.490 +	RDebug::Printf("Soak test: %d processes, %d threads, %d pages, %s pinning for %d seconds",
   1.491 +				   aProcesses, aThreads, aPages, (aPinPages ? "with" : "without"), aDurationInSeconds);
   1.492 +	DPTest::FlushCache();
   1.493 +	
   1.494 +	TInt totalThreads = (aThreads + (aPinPages ? 1 : 0)) * aProcesses;
   1.495 +	test(totalThreads < 512); // each thread uses two words in a page
   1.496 +
   1.497 +	TMediaPagingStats dummy=EMediaPagingStatsRomAndCode;
   1.498 +	PagingInfo::ResetBenchmarks(-1, dummy);	// Don't worry about locmedia stats.
   1.499 +
   1.500 +	RMsgQueue<TInt> msgQueue;
   1.501 +	test_KErrNone(msgQueue.CreateGlobal(KMsgQueueName, totalThreads, EOwnerThread));
   1.502 +
   1.503 +	CreatePagedChunk(aPages, 0);
   1.504 +	TInt i;
   1.505 +	for (i = 0 ; i < aPages ; ++i)
   1.506 +		*PageBasePtr(i) = PageTag(i);
   1.507 +			
   1.508 +	RProcess* processes = new RProcess[aProcesses];
   1.509 +	TRequestStatus* statuses = new TRequestStatus[aProcesses];
   1.510 +	for (i = 0 ; i < aProcesses ; ++i)
   1.511 +		{
   1.512 +		TBuf<80> args;
   1.513 +		args.AppendFormat(_L("%d %d %d %d"), i, aThreads, aPages, aPinPages);
   1.514 +		test_KErrNone(processes[i].Create(_L("t_datapaging"), args));
   1.515 +		processes[i].Logon(statuses[i]);
   1.516 +		}
   1.517 +
   1.518 +	RThread().SetPriority(EPriorityMore); // so we don't get starved of CPU by worker threads
   1.519 +
   1.520 +	for (i = 0 ; i < aProcesses ; ++i)
   1.521 +		processes[i].Resume();
   1.522 +
   1.523 +	User::After(aDurationInSeconds * 1000000);
   1.524 +	StopSoakTest(msgQueue);
   1.525 +	
   1.526 +	TBool ok = ETrue;		
   1.527 +	for (i = 0 ; i < aProcesses ; ++i)
   1.528 +		{
   1.529 +		User::WaitForRequest(statuses[i]);
   1.530 +		if (processes[i].ExitType() != EExitKill || statuses[i].Int() != KErrNone)
   1.531 +			{
   1.532 +			ok = EFalse;
   1.533 +			RDebug::Printf("  process %i died with %d,%d", i, processes[i].ExitType(), statuses[i].Int());
   1.534 +			}
   1.535 +		processes[i].Close();
   1.536 +		}
   1.537 +
   1.538 +	RThread().SetPriority(EPriorityNormal);
   1.539 +
   1.540 +	if (!ok)
   1.541 +		{
   1.542 +		for (i = 0 ; i < aPages ; ++i)
   1.543 +			{
   1.544 +			test.Printf(_L("%3d %08x"), i, *PageBasePtr(i));
   1.545 +			for (TInt j = 0 ; j < totalThreads ; ++j)
   1.546 +				{
   1.547 +				TUint32* ptr = PageDataPtr(i, j);
   1.548 +				test.Printf(_L(" %08x,%08x"), ptr[0], ptr[1]);
   1.549 +				}
   1.550 +			test.Printf(_L("\n"), i);
   1.551 +			}
   1.552 +		}
   1.553 +	test(ok);	
   1.554 +
   1.555 +	gChunk.Close();
   1.556 +	
   1.557 +	User::After(1000000);
   1.558 +	RDebug::Printf("  done");
   1.559 +	RDebug::Printf("\n");
   1.560 +	
   1.561 +	msgQueue.Close();
   1.562 +	delete [] processes;
   1.563 +	delete [] statuses;
   1.564 +
   1.565 +	PagingInfo::PrintBenchmarks(-1, dummy);	// Don't worry about locmedia stats.
   1.566 +	}
   1.567 +
   1.568 +void CommitPage(RChunk chunk, TInt aPageIndex)
   1.569 +	{
   1.570 +	test_KErrNone(chunk.Commit(aPageIndex * gPageSize, gPageSize));
   1.571 +	}
   1.572 +
   1.573 +void DecommitPage(RChunk chunk, TInt aPageIndex)
   1.574 +	{
   1.575 +	test_KErrNone(chunk.Decommit(aPageIndex * gPageSize, gPageSize));
   1.576 +	}
   1.577 +
   1.578 +void WaitForNotifiers()
   1.579 +	{
   1.580 +	// wait until notifiers have had chance to signal us...
   1.581 +	UserSvr::HalFunction(EHalGroupKernel, EKernelHalSupervisorBarrier, 0, 0);
   1.582 +	}
   1.583 +
   1.584 +void TestSwapHal()
   1.585 +	{
   1.586 +	test.Next(_L("Test EVMHalGetSwapInfo"));
   1.587 +
   1.588 +	TChunkCreateInfo createInfo;
   1.589 +	createInfo.SetDisconnected(0, 0, 256 * gPageSize);
   1.590 +	createInfo.SetPaging(TChunkCreateInfo::EPaged);
   1.591 +	RChunk chunk;
   1.592 +	test_KErrNone(chunk.Create(createInfo));
   1.593 +	if (gDataPagingSupported)
   1.594 +		test(chunk.IsPaged());
   1.595 +	
   1.596 +	SVMSwapInfo swapInfo;
   1.597 +	test_KErrNone(UserSvr::HalFunction(EHalGroupVM, EVMHalGetSwapInfo, &swapInfo, 0));
   1.598 +	test(swapInfo.iSwapFree <= swapInfo.iSwapSize);
   1.599 +	test.Printf(_L("  Swap size == 0x%x bytes\n"), swapInfo.iSwapSize);
   1.600 +	test.Printf(_L("  Swap free == 0x%x bytes\n"), swapInfo.iSwapFree);
   1.601 +	if (!gDataPagingSupported)
   1.602 +		{
   1.603 +		test_Equal(0, swapInfo.iSwapSize);
   1.604 +		}
   1.605 +	else
   1.606 +		{
   1.607 +		test(swapInfo.iSwapSize != 0);
   1.608 +		
   1.609 +		CommitPage(chunk, 0);
   1.610 +		SVMSwapInfo swapInfo2;
   1.611 +		test_KErrNone(UserSvr::HalFunction(EHalGroupVM, EVMHalGetSwapInfo, &swapInfo2, 0));
   1.612 +		test_Equal(swapInfo.iSwapSize, swapInfo2.iSwapSize);
   1.613 +		test_Equal(swapInfo.iSwapFree - gPageSize, swapInfo2.iSwapFree);
   1.614 +		
   1.615 +		DecommitPage(chunk, 0);
   1.616 +		test_KErrNone(UserSvr::HalFunction(EHalGroupVM, EVMHalGetSwapInfo, &swapInfo2, 0));
   1.617 +		test_Equal(swapInfo.iSwapSize, swapInfo2.iSwapSize);
   1.618 +		test_Equal(swapInfo.iSwapFree, swapInfo2.iSwapFree);
   1.619 +
   1.620 +		// Test that closing the chunk releases the swap page.
   1.621 +		CommitPage(chunk, 0);
   1.622 +		test_KErrNone(UserSvr::HalFunction(EHalGroupVM, EVMHalGetSwapInfo, &swapInfo2, 0));
   1.623 +		test_Equal(swapInfo.iSwapSize, swapInfo2.iSwapSize);
   1.624 +		test_Equal(swapInfo.iSwapFree - gPageSize, swapInfo2.iSwapFree);
   1.625 +		
   1.626 +		chunk.Close();
   1.627 +		test_KErrNone(UserSvr::HalFunction(EHalGroupVM, EVMHalGetSwapInfo, &swapInfo2, 0));
   1.628 +		test_Equal(swapInfo.iSwapSize, swapInfo2.iSwapSize);
   1.629 +		test_Equal(swapInfo.iSwapFree, swapInfo2.iSwapFree);
   1.630 +
   1.631 +		// Chunk must be created for rest of testing.
   1.632 +		test_KErrNone(chunk.Create(createInfo));
   1.633 +		if (gDataPagingSupported)
   1.634 +			test(chunk.IsPaged());
   1.635 +		}
   1.636 +	
   1.637 +	//	EVMHalSetSwapThresholds,
   1.638 +	test.Next(_L("Test EVMHalSetSwapThresholds"));
   1.639 +	SVMSwapThresholds thresholds;
   1.640 +	thresholds.iLowThreshold = 1;
   1.641 +	thresholds.iGoodThreshold = 0;
   1.642 +	test_Equal(KErrArgument, UserSvr::HalFunction(EHalGroupVM, EVMHalSetSwapThresholds, &thresholds, 0));
   1.643 +	thresholds.iLowThreshold = swapInfo.iSwapSize + 1;
   1.644 +	thresholds.iGoodThreshold = swapInfo.iSwapSize + 1;
   1.645 +	test_Equal(KErrArgument, UserSvr::HalFunction(EHalGroupVM, EVMHalSetSwapThresholds, &thresholds, 0));
   1.646 +	thresholds.iLowThreshold = 0;
   1.647 +	thresholds.iGoodThreshold = 0;
   1.648 +	test_KErrNone(UserSvr::HalFunction(EHalGroupVM, EVMHalSetSwapThresholds, &thresholds, 0));
   1.649 +	thresholds.iLowThreshold = swapInfo.iSwapSize;
   1.650 +	thresholds.iGoodThreshold = swapInfo.iSwapSize;
   1.651 +	test_KErrNone(UserSvr::HalFunction(EHalGroupVM, EVMHalSetSwapThresholds, &thresholds, 0));
   1.652 +
   1.653 +	// test thresholds trigger ok
   1.654 +	
   1.655 +	RChangeNotifier changes;
   1.656 +	test_KErrNone(changes.Create());
   1.657 +	TRequestStatus status;
   1.658 +	test_KErrNone(changes.Logon(status));
   1.659 +	User::WaitForRequest(status);
   1.660 +	test_KErrNone(changes.Logon(status));
   1.661 +	test_Equal(KRequestPending, status.Int());
   1.662 +	
   1.663 +	thresholds.iLowThreshold = swapInfo.iSwapFree - 2 * gPageSize;
   1.664 +	thresholds.iGoodThreshold = swapInfo.iSwapFree - gPageSize;
   1.665 +	test_KErrNone(UserSvr::HalFunction(EHalGroupVM, EVMHalSetSwapThresholds, &thresholds, 0));
   1.666 +
   1.667 +	CommitPage(chunk, 0);
   1.668 +	CommitPage(chunk, 1);
   1.669 +	WaitForNotifiers();
   1.670 +	test_Equal(KRequestPending, status.Int());
   1.671 +	CommitPage(chunk, 2);
   1.672 +	WaitForNotifiers();
   1.673 +	test_Equal(EChangesFreeMemory | EChangesLowMemory, status.Int());
   1.674 +	User::WaitForRequest(status);
   1.675 +	
   1.676 +	test_KErrNone(changes.Logon(status));
   1.677 +	DecommitPage(chunk, 2);
   1.678 +	WaitForNotifiers();
   1.679 +	test_Equal(KRequestPending, status.Int());
   1.680 +	DecommitPage(chunk, 1);
   1.681 +	WaitForNotifiers();
   1.682 +	test_Equal(EChangesFreeMemory, status.Int());
   1.683 +	User::WaitForRequest(status);
   1.684 +	DecommitPage(chunk, 0);
   1.685 +	
   1.686 +	CLOSE_AND_WAIT(changes);
   1.687 +
   1.688 +	// leave some sensible thresholds set
   1.689 +	thresholds.iLowThreshold = (10 * swapInfo.iSwapSize) / 100;
   1.690 +	thresholds.iGoodThreshold = (20 * swapInfo.iSwapSize) / 100;
   1.691 +	test_KErrNone(UserSvr::HalFunction(EHalGroupVM, EVMHalSetSwapThresholds, &thresholds, 0));
   1.692 +
   1.693 +	CLOSE_AND_WAIT(chunk);
   1.694 +	}
   1.695 +
   1.696 +void TestSwapHalNotSupported()
   1.697 +	{
   1.698 +	test_Equal(KErrNotSupported, UserSvr::HalFunction(EHalGroupVM, EVMHalGetSwapInfo, 0, 0));
   1.699 +	test_Equal(KErrNotSupported, UserSvr::HalFunction(EHalGroupVM, EVMHalSetSwapThresholds, 0, 0));
   1.700 +	}
   1.701 +
   1.702 +void TestHal()
   1.703 +	{
   1.704 +	if (gDataPagingSupported)
   1.705 +		TestSwapHal();
   1.706 +	else
   1.707 +		TestSwapHalNotSupported();
   1.708 +	}
   1.709 +
   1.710 +
   1.711 +TBool gStealEnable = false;
   1.712 +
   1.713 +TInt DecommitThread(TAny*)
   1.714 +	{
   1.715 +	RThread().SetPriority(EPriorityLess); // so this thread gets pre-empted by StealThread
   1.716 +	TUint8* base = gChunk.Base();
   1.717 +	TInt size = gChunk.MaxSize();
   1.718 +	for(;;)
   1.719 +		{
   1.720 +		// dirty all pages
   1.721 +		for(TInt i=0; i<size; i+=gPageSize)
   1.722 +			base[i] = 0;
   1.723 +		// free pages...
   1.724 +		gStealEnable = true;
   1.725 +		gChunk.Adjust(0);
   1.726 +		gStealEnable = false;
   1.727 +		// recommit pages...
   1.728 +		TInt r = gChunk.Adjust(size);
   1.729 +		if(r!=KErrNone)
   1.730 +			return r; // error
   1.731 +		}
   1.732 +	}
   1.733 +
   1.734 +
   1.735 +TInt StealThread(TAny*)
   1.736 +	{
   1.737 +	for(;;)
   1.738 +		{
   1.739 +		while(!gStealEnable)
   1.740 +			User::AfterHighRes(0);
   1.741 +		DPTest::FlushCache();
   1.742 +		}
   1.743 +	}
   1.744 +
   1.745 +
   1.746 +void TestDecommitAndStealInteraction(TInt aSeconds)
   1.747 +	{
   1.748 +	__KHEAP_MARK;
   1.749 +
   1.750 +	CreatePagedChunk(256);
   1.751 +
   1.752 +	RThread thread1;
   1.753 +	test_KErrNone(thread1.Create(_L("DecommitThread"), DecommitThread, gPageSize, NULL, 0));
   1.754 +	TRequestStatus status1;
   1.755 +	thread1.Logon(status1);
   1.756 +
   1.757 +	RThread thread2;
   1.758 +	test_KErrNone(thread2.Create(_L("StealThread"), StealThread, gPageSize, NULL, 0));
   1.759 +	TRequestStatus status2;
   1.760 +	thread1.Logon(status2);
   1.761 +
   1.762 +	RTimer timer;
   1.763 +	test_KErrNone(timer.CreateLocal());
   1.764 +	TRequestStatus timeoutStatus;
   1.765 +	timer.After(timeoutStatus,aSeconds*1000000);
   1.766 +
   1.767 +	thread1.Resume();
   1.768 +	thread2.Resume();
   1.769 +	User::WaitForAnyRequest();
   1.770 +
   1.771 +	thread1.Kill(123);
   1.772 +	User::WaitForRequest(status1);
   1.773 +	test_Equal(123, status1.Int());
   1.774 +	CLOSE_AND_WAIT(thread1);
   1.775 +
   1.776 +	thread2.Kill(123);
   1.777 +	User::WaitForRequest(status2);
   1.778 +	test_Equal(123, status2.Int());
   1.779 +	CLOSE_AND_WAIT(thread2);
   1.780 +
   1.781 +	CLOSE_AND_WAIT(timer);
   1.782 +	test_KErrNone(timeoutStatus.Int());
   1.783 +	
   1.784 +	CLOSE_AND_WAIT(gChunk);
   1.785 +	__KHEAP_MARKEND;
   1.786 +	}
   1.787 +
   1.788 +TInt ThreadAtomic64Flush(TAny*)
   1.789 +	{
   1.790 +	TInt64 seed = 0x33333333;
   1.791 +	FOREVER
   1.792 +		{
   1.793 +		DPTest::FlushCache();
   1.794 +		User::After(Math::Rand(seed) & 0x48);
   1.795 +		}
   1.796 +	}
   1.797 +
   1.798 +enum TAtomic64Test
   1.799 +	{
   1.800 +	EAtomic64Add,
   1.801 +	EAtomic64Logic,
   1.802 +	EAtomic64Cas,
   1.803 +	EAtomic64Steps,
   1.804 +	};
   1.805 +
   1.806 +struct SAtomic64Args
   1.807 +	{
   1.808 +	TUint iIters;
   1.809 +	TUint64* iData;
   1.810 +	TInt iIncs;
   1.811 +	TUint iClears[64];
   1.812 +	TUint iSets[64];
   1.813 +	};
   1.814 +
   1.815 +
   1.816 +TInt ThreadAtomic64Cas(TAny* aArgs)
   1.817 +	{
   1.818 +	SAtomic64Args& args = *(SAtomic64Args*)aArgs;
   1.819 +	for (TUint i = 0; i < args.iIters; i++)
   1.820 +		{
   1.821 +		TUint64 setMask = UI64LIT(0xffffffffffffffff);
   1.822 +		TUint64 clrMask = 0;
   1.823 +		if (__e32_atomic_cas_ord64(args.iData, &setMask, clrMask))
   1.824 +			args.iClears[0]++;
   1.825 +		// Undo any clearing of setMask which will happen if iData is 0.
   1.826 +		setMask = UI64LIT(0xffffffffffffffff);
   1.827 +		if (__e32_atomic_cas_ord64(args.iData, &clrMask, setMask))
   1.828 +			args.iSets[0]++;
   1.829 +		}
   1.830 +	return KErrNone;
   1.831 +	}
   1.832 +
   1.833 +
   1.834 +TInt ThreadAtomic64Logic(TAny* aArgs)
   1.835 +	{
   1.836 +	TInt r = KErrNone;
   1.837 +	SAtomic64Args& args = *(SAtomic64Args*)aArgs;
   1.838 +	for(TUint i = 0; i < args.iIters; i++)
   1.839 +		{
   1.840 +		TUint bitNo = (i & 0x3f);
   1.841 +		TUint64 bitMask = ((TUint64)1) << bitNo;
   1.842 +		TUint64 andMask = ~bitMask;
   1.843 +
   1.844 +		TUint64 old = __e32_atomic_and_ord64(args.iData, andMask);
   1.845 +		if (old & bitMask)
   1.846 +			args.iClears[bitNo]++;
   1.847 +
   1.848 +		old = __e32_atomic_ior_ord64(args.iData, bitMask);
   1.849 +		if (!(old & bitMask))
   1.850 +			args.iSets[bitNo]++;
   1.851 +
   1.852 +		old = __e32_atomic_xor_ord64(args.iData, bitMask);
   1.853 +		if (old & bitMask)
   1.854 +			args.iClears[bitNo]++;
   1.855 +		else
   1.856 +			args.iSets[bitNo]++;
   1.857 +
   1.858 +		old = __e32_atomic_axo_ord64(args.iData, UI64LIT(0xffffffffffffffff), bitMask);
   1.859 +		if (old & bitMask)
   1.860 +			args.iClears[bitNo]++;
   1.861 +		else
   1.862 +			args.iSets[bitNo]++;
   1.863 +		
   1.864 +		}
   1.865 +	return r;
   1.866 +	}
   1.867 +
   1.868 +
   1.869 +TInt ThreadAtomic64Add(TAny* aArgs)
   1.870 +	{
   1.871 +	TInt r = KErrNone;
   1.872 +	SAtomic64Args& args = *(SAtomic64Args*)aArgs;
   1.873 +	for(TUint i = 0; i < args.iIters; i++)
   1.874 +		{
   1.875 +		TUint64 old = __e32_atomic_add_ord64(args.iData, 1);
   1.876 +		args.iIncs += 1;
   1.877 +		old = __e32_atomic_tau_ord64(args.iData, 1000, 1, 2);
   1.878 +		args.iIncs += (old >= 1000)? 1 : 2;
   1.879 +		old = __e32_atomic_tas_ord64(args.iData, 1000, 1, -1);
   1.880 +		args.iIncs += (old >= 1000)? 1 : -1;
   1.881 +		}
   1.882 +	return r;
   1.883 +	}
   1.884 +
   1.885 +
   1.886 +void TestAtomic64()
   1.887 +	{
   1.888 +	CreatePagedChunk(sizeof(TUint64));
   1.889 +	TUint64* data = (TUint64*)gChunk.Base();
   1.890 +
   1.891 +	const TUint KThreads = 25;
   1.892 +	RThread threads[KThreads];
   1.893 +	TRequestStatus stats[KThreads];
   1.894 +	SAtomic64Args* args = new SAtomic64Args[KThreads];
   1.895 +	test_NotNull(args);
   1.896 +
   1.897 +	for (TInt testStep = EAtomic64Add; testStep < EAtomic64Steps; testStep++)
   1.898 +		{
   1.899 +		switch (testStep)
   1.900 +			{
   1.901 +			case EAtomic64Add:
   1.902 +				test.Next(_L("Test 64-bit atomic addition operations"));
   1.903 +					break;
   1.904 +			case EAtomic64Logic:
   1.905 +				test.Next(_L("Test 64-bit atomic logic operations"));
   1.906 +				break;
   1.907 +			case EAtomic64Cas:
   1.908 +				test.Next(_L("Test 64-bit atomic cas operations"));
   1.909 +				break;
   1.910 +			}
   1.911 +		*data = 0;
   1.912 +		RThread threadFlush;
   1.913 +		test_KErrNone(threadFlush.Create(_L("ThreadAtomicFlush"), ThreadAtomic64Flush, gPageSize, NULL, NULL));
   1.914 +		TRequestStatus status1;
   1.915 +		threadFlush.Logon(status1);
   1.916 +		threadFlush.SetPriority(EPriorityAbsoluteHigh);
   1.917 +
   1.918 +		memclr(args, sizeof(SAtomic64Args)*KThreads);
   1.919 +		TUint i = 0;
   1.920 +		for (; i < KThreads; i++)
   1.921 +			{
   1.922 +			args[i].iIters = 10000;
   1.923 +			args[i].iData = data;
   1.924 +			switch (testStep)
   1.925 +				{
   1.926 +				case EAtomic64Add:
   1.927 +					test_KErrNone(threads[i].Create(KNullDesC, ThreadAtomic64Add, gPageSize, NULL, (TAny*)&args[i]));
   1.928 +					break;
   1.929 +				case EAtomic64Logic:
   1.930 +					test_KErrNone(threads[i].Create(KNullDesC, ThreadAtomic64Logic, gPageSize, NULL, (TAny*)&args[i]));
   1.931 +					break;
   1.932 +				case EAtomic64Cas:
   1.933 +					test_KErrNone(threads[i].Create(KNullDesC, ThreadAtomic64Cas, gPageSize, NULL, (TAny*)&args[i]));
   1.934 +					break;
   1.935 +				}
   1.936 +			threads[i].Logon(stats[i]);
   1.937 +			}
   1.938 +		threadFlush.Resume();
   1.939 +		for (i = 0; i < KThreads; i++)
   1.940 +			{
   1.941 +			threads[i].Resume();
   1.942 +			}
   1.943 +
   1.944 +		// Wait for add threads to complete and kill flushing thread.
   1.945 +		for (i = 0; i < KThreads; i++)
   1.946 +			{
   1.947 +			User::WaitForRequest(stats[i]);
   1.948 +			test_KErrNone(stats[i].Int());
   1.949 +			}
   1.950 +		threadFlush.Kill(KErrNone);
   1.951 +		User::WaitForRequest(status1);
   1.952 +		test_KErrNone(status1.Int());
   1.953 +		TInt64 expected = 0;
   1.954 +		switch (testStep)
   1.955 +			{
   1.956 +			case EAtomic64Add:
   1.957 +				{
   1.958 +				for (TUint i = 0; i < KThreads; i++)
   1.959 +					{
   1.960 +					threads[i].Close();
   1.961 +					expected += args[i].iIncs;
   1.962 +					}
   1.963 +				break;
   1.964 +				}
   1.965 +			case EAtomic64Logic:
   1.966 +				{
   1.967 +				TUint totalSets[64];
   1.968 +				TUint totalClears[64];
   1.969 +				memclr(totalSets, sizeof(TUint)*64);
   1.970 +				memclr(totalClears, sizeof(TUint)*64);
   1.971 +				for (TUint i = 0; i < KThreads; i++)
   1.972 +					{
   1.973 +					threads[i].Close();
   1.974 +					for (TUint j = 0; j < 64; j++)
   1.975 +						{
   1.976 +						totalSets[j] += args[i].iSets[j];
   1.977 +						totalClears[j] += args[i].iClears[j];
   1.978 +						}
   1.979 +					}
   1.980 +				for (TUint j = 0; j < 64; j++)
   1.981 +					{
   1.982 +					TUint64 bitMask = 1 << j;
   1.983 +					if (totalSets[j] > totalClears[j])
   1.984 +						{
   1.985 +						test_Equal(totalSets[j] - 1, totalClears[j]);
   1.986 +						expected |= bitMask;
   1.987 +						}
   1.988 +					else
   1.989 +						{// Can only clear a bit if it was previously set.
   1.990 +						test_Equal(totalClears[j], totalSets[j]);
   1.991 +						}
   1.992 +					}
   1.993 +				break;
   1.994 +				}
   1.995 +			case EAtomic64Cas:
   1.996 +				{
   1.997 +				TUint totalSets = 0;
   1.998 +				TUint totalClears = 0;
   1.999 +				for (TUint i = 0; i < KThreads; i++)
  1.1000 +					{
  1.1001 +					threads[i].Close();
  1.1002 +					totalSets += args[i].iSets[0];
  1.1003 +					totalClears += args[i].iClears[0];
  1.1004 +					}
  1.1005 +				if (totalSets > totalClears)
  1.1006 +					{
  1.1007 +					test_Equal(totalSets - 1, totalClears);
  1.1008 +					expected = UI64LIT(0xffffffffffffffff);
  1.1009 +					}
  1.1010 +				else
  1.1011 +					{// Can only clear a word if it was previously set.
  1.1012 +					test_Equal(totalClears, totalSets);
  1.1013 +					}
  1.1014 +				break;
  1.1015 +				}
  1.1016 +			}
  1.1017 +		test_Equal(expected, *data);
  1.1018 +		CLOSE_AND_WAIT(threadFlush);
  1.1019 +		}
  1.1020 +	delete[] args;
  1.1021 +	CLOSE_AND_WAIT(gChunk);
  1.1022 +	}
  1.1023 +
  1.1024 +
  1.1025 +//
  1.1026 +// soak test for writeable paged code...
  1.1027 +//
  1.1028 +
  1.1029 +const TUint KCodeStride = 20; // spacing between generated code
  1.1030 +
  1.1031 +void CodeStart(TUint8* aCode, TUint8* aTarget, TUint32 aInit)
  1.1032 +	{
  1.1033 +#if defined(__CPU_X86)
  1.1034 +	aCode[0] = 0xb8; *(TUint32*)&(aCode[1]) = aInit;				// mov eax,aInit
  1.1035 +	aCode[5] = 0xe9; *(TUint32*)&(aCode[6]) = aTarget-(aCode+10);	// jmp aTarget
  1.1036 +	__ASSERT_COMPILE(KCodeStride>=10);
  1.1037 +
  1.1038 +#elif defined(__CPU_ARM)
  1.1039 +	*(TUint32*)&(aCode[0]) = 0xe59f0000;			// ldr r0, [pc, #0]
  1.1040 +	TInt32 offset = (aTarget-aCode-4-8)/4;
  1.1041 +	if(offset&0xff000000u)
  1.1042 +		{
  1.1043 +		offset ^= 0xff000000u;
  1.1044 +		test_Equal(0,offset&0xff000000u);
  1.1045 +		}
  1.1046 +	*(TUint32*)&(aCode[4]) = 0xea000000|offset;		// b aTarget
  1.1047 +	*(TUint32*)&(aCode[8]) = aInit;					// dcd aInit
  1.1048 +	__ASSERT_COMPILE(KCodeStride>=12);
  1.1049 +
  1.1050 +#else
  1.1051 +#error Unknown CPU
  1.1052 +#endif
  1.1053 +	}
  1.1054 +
  1.1055 +
  1.1056 +void CodeStep(TUint8* aCode, TUint8* aTarget, TUint32 aAdd)
  1.1057 +	{
  1.1058 +#if defined(__CPU_X86)
  1.1059 +	aCode[0] = 0xd1; aCode[1] = 0xc0;								// rol eax, 1
  1.1060 +	aCode[2] = 0x05; *(TUint32*)&(aCode[3]) = aAdd;					// add eax, aAdd
  1.1061 +	aCode[7] = 0xe9; *(TUint32*)&(aCode[8]) = aTarget-(aCode+12);	// jmp aTarget
  1.1062 +	__ASSERT_COMPILE(KCodeStride>=12);
  1.1063 +
  1.1064 +#elif defined(__CPU_ARM)
  1.1065 +	*(TUint32*)&(aCode[0]) = 0xe1a00fe0;			// ror r0, r0, #31
  1.1066 +	*(TUint32*)&(aCode[4]) = 0xe59f1004;			// ldr r1, [pc, #4]
  1.1067 +	*(TUint32*)&(aCode[8]) = 0xe0800001;			// add r0, r0, r1
  1.1068 +	TInt32 offset = (aTarget-aCode-12-8)/4;
  1.1069 +	if(offset&0xff000000u)
  1.1070 +		{
  1.1071 +		offset ^= 0xff000000u;
  1.1072 +		test_Equal(0,offset&0xff000000u);
  1.1073 +		}
  1.1074 +	*(TUint32*)&(aCode[12]) = 0xea000000|offset;	// b aTarget
  1.1075 +	*(TUint32*)&(aCode[16]) = aAdd;					// dcd aAdd
  1.1076 +	__ASSERT_COMPILE(KCodeStride>=20);
  1.1077 +
  1.1078 +#else
  1.1079 +#error Unknown CPU
  1.1080 +#endif
  1.1081 +	}
  1.1082 +
  1.1083 +
  1.1084 +void CodeEnd(TUint8* aCode)
  1.1085 +	{
  1.1086 +#if defined(__CPU_X86)
  1.1087 +	aCode[0] = 0xc3;						// ret
  1.1088 +	__ASSERT_COMPILE(KCodeStride>=1);
  1.1089 +
  1.1090 +#elif defined(__CPU_ARM)
  1.1091 +	*(TUint32*)&(aCode[0]) = 0xe12fff1e;	// bx lr
  1.1092 +	__ASSERT_COMPILE(KCodeStride>=4);
  1.1093 +
  1.1094 +#else
  1.1095 +#error Unknown CPU
  1.1096 +#endif
  1.1097 +	}
  1.1098 +
  1.1099 +
  1.1100 +void TestExecutableMemory()
  1.1101 +	{
  1.1102 +	__KHEAP_MARK;
  1.1103 +
  1.1104 +#if defined(__CPU_ARM)
  1.1105 +	const TUint KMaxChunkSize = 31*1024*1024; // ARM branch instruction limit
  1.1106 +#else
  1.1107 +	const TUint KMaxChunkSize = 1024*1024*1024; // 1GB
  1.1108 +#endif
  1.1109 +	const TUint KMaxPages = KMaxChunkSize/gPageSize;
  1.1110 +	TUint sizeInPages = gMaxCacheSize*2;
  1.1111 +	if(sizeInPages>KMaxPages)
  1.1112 +		sizeInPages = KMaxPages;
  1.1113 +
  1.1114 +	// create code chunk...
  1.1115 +	test.Start(_L("Create code chunk"));
  1.1116 +	TChunkCreateInfo createInfo;
  1.1117 +	TInt size = sizeInPages * gPageSize;
  1.1118 +	createInfo.SetCode(size, size);
  1.1119 +	createInfo.SetPaging(TChunkCreateInfo::EPaged);
  1.1120 +	createInfo.SetClearByte(0);
  1.1121 +	RChunk chunk;
  1.1122 +	test_KErrNone(chunk.Create(createInfo));
  1.1123 +	test(chunk.IsPaged()); // this is only ever called if data paging is supported
  1.1124 +	TUint8* base = chunk.Base();
  1.1125 +
  1.1126 +	// create code path through the pages in the chunk with quadratic distribution...
  1.1127 +	test.Next(_L("Weave path"));
  1.1128 +	TInt pathLength = 0;
  1.1129 +	const TUint maxStepsPerPage = gPageSize/KCodeStride;
  1.1130 +	const TInt maxPathLength = sizeInPages*maxStepsPerPage;
  1.1131 +	TUint8** path = (TUint8**)User::Alloc(maxPathLength*sizeof(TUint8*));
  1.1132 +	test(path!=0);
  1.1133 +	for(TUint page=0; page<sizeInPages; ++page)
  1.1134 +		{
  1.1135 +		TUint step = (maxStepsPerPage-1)*(page*page)/(sizeInPages*sizeInPages)+1;
  1.1136 +		do path[pathLength++] = base+page*gPageSize+step*KCodeStride;
  1.1137 +		while(--step);
  1.1138 +		}
  1.1139 +	TUint32 rand = 0x12345678;
  1.1140 +	for(TUint scramble=pathLength*4; scramble>0; --scramble)
  1.1141 +		{
  1.1142 +		// swap random pair of entries on path...
  1.1143 +		TUint i = (TUint)(TUint64(TUint64(rand)*TUint64(pathLength))>>32);
  1.1144 +		rand = rand*69069+1;
  1.1145 +		TUint j = (TUint)(TUint64(TUint64(rand)*TUint64(pathLength))>>32);
  1.1146 +		rand = rand*69069+1;
  1.1147 +		TUint8* t = path[i];
  1.1148 +		path[i] = path[j];
  1.1149 +		path[j] = t;
  1.1150 +		}
  1.1151 +
  1.1152 +	// write code to generated path...
  1.1153 +	test.Next(_L("Write code"));
  1.1154 +	TUint32 a = 0;
  1.1155 +	TUint32 (*code)() = (TUint32 (*)())path[pathLength-1];
  1.1156 +	CodeStart(path[pathLength-1],path[pathLength-2],a);
  1.1157 +	while(--pathLength>1)
  1.1158 +		{
  1.1159 +		rand = rand*69069+1;
  1.1160 +		CodeStep(path[pathLength-1],path[pathLength-2],rand);
  1.1161 +		a = (a<<1)+(a>>31);
  1.1162 +		a += rand;
  1.1163 +		}
  1.1164 +	CodeEnd(path[0]);
  1.1165 +	--pathLength;
  1.1166 +	test_Equal(0,pathLength);
  1.1167 +	test.Next(_L("IMB"));
  1.1168 +	User::IMB_Range(base,base+chunk.Size());
  1.1169 +
  1.1170 +	// run code...
  1.1171 +	TMediaPagingStats dummy=EMediaPagingStatsRomAndCode;
  1.1172 +	PagingInfo::ResetBenchmarks(-1, dummy);	// Don't worry about locmedia stats.
  1.1173 +	test.Next(_L("Execute code"));
  1.1174 +	TUint32 result = code();
  1.1175 +	test_Equal(a,result);
  1.1176 +	PagingInfo::PrintBenchmarks(-1, dummy);	// Don't worry about locmedia stats.
  1.1177 +
  1.1178 +	// cleanup...
  1.1179 +	test.Next(_L("Cleanup"));
  1.1180 +	User::Free(path);
  1.1181 +	CLOSE_AND_WAIT(chunk);
  1.1182 +
  1.1183 +	test.End();
  1.1184 +
  1.1185 +	UserSvr::HalFunction(EHalGroupKernel, EKernelHalSupervisorBarrier, 0, 0);
  1.1186 +	__KHEAP_MARKEND;
  1.1187 +	}
  1.1188 +
  1.1189 +
  1.1190 +
  1.1191 +TInt E32Main()
  1.1192 +	{
  1.1193 +	test_KErrNone(UserHal::PageSizeInBytes(gPageSize));
  1.1194 +	
  1.1195 +	if (User::CommandLineLength() != 0)
  1.1196 +		return RunSoakProcess();
  1.1197 +	
  1.1198 +	test.Title();
  1.1199 +	test_KErrNone(GetGlobalPolicies());
  1.1200 +
  1.1201 +	test.Start(_L("Test HAL APIs"));
  1.1202 +	TestHal();
  1.1203 +
  1.1204 +	if (gDataPagingSupported)
  1.1205 +		{
  1.1206 +		test.Next(_L("Test reading and writing to a single page"));
  1.1207 +		TestOnePage();
  1.1208 +
  1.1209 +		test.Next(_L("Test 64-bit atomic operations are atomic with paged out data"));
  1.1210 +		TestAtomic64();
  1.1211 +
  1.1212 +		test.Next(_L("Test interaction between decommit and steal"));
  1.1213 +		TestDecommitAndStealInteraction(10);
  1.1214 +
  1.1215 +		test.Next(_L("Test killing a thread while it's paging in"));
  1.1216 +		TestKillThread(PageInThreadFunc, 200);
  1.1217 +				
  1.1218 +		test.Next(_L("Test killing a thread while it's paging out"));
  1.1219 +		TestKillThread(PageOutThreadFunc, 200);
  1.1220 +		
  1.1221 +		test.Next(_L("Test executable memory"));
  1.1222 +		TestExecutableMemory();
  1.1223 +
  1.1224 +		test.Next(_L("Soak tests"));
  1.1225 +		DPTest::FlushCache();
  1.1226 +
  1.1227 +		test.Next(_L("Soak test: change maximum cache size to minimal"));
  1.1228 +		TUint cacheOriginalMin = 0;
  1.1229 +		TUint cacheOriginalMax = 0;
  1.1230 +		TUint cacheCurrentSize = 0;
  1.1231 +		//store original values
  1.1232 +		DPTest::CacheSize(cacheOriginalMin, cacheOriginalMax, cacheCurrentSize);
  1.1233 +		gMaxCacheSize = 256;
  1.1234 +		gMinCacheSize = 64;
  1.1235 +		test_KErrNone(DPTest::SetCacheSize(gMinCacheSize * gPageSize, gMaxCacheSize * gPageSize));
  1.1236 +
  1.1237 +		for (TUint totalThreads = 1 ; totalThreads <= 64 ; totalThreads *= 4)
  1.1238 +			{
  1.1239 +			for (TUint processes = 1 ; processes <= 16 && processes <= totalThreads ; processes *= 4)
  1.1240 +				{
  1.1241 +				TUint threads = totalThreads / processes;
  1.1242 +				for (TUint pages = gMaxCacheSize / 2 ; pages <= gMaxCacheSize * 2 ; pages *= 2)
  1.1243 +					{
  1.1244 +					for (TUint pin = 0 ; pin <= 1 ; ++pin)
  1.1245 +						{
  1.1246 +						test.Printf(_L("processes=%d threads=%d pages=%d maxcachesize=%d pin=%d\r\n"),processes, threads, pages, gMaxCacheSize,pin);
  1.1247 +						SoakTest(processes, threads, pages, pin, 3);
  1.1248 +						}
  1.1249 +					}
  1.1250 +				}
  1.1251 +			}
  1.1252 +
  1.1253 +			//Reset the cache size to normal
  1.1254 +			test.Next(_L("Soak test: Reset cache size to normal"));
  1.1255 +			test_KErrNone(DPTest::SetCacheSize(cacheOriginalMin, cacheOriginalMax)); 
  1.1256 +		}
  1.1257 +
  1.1258 +	test.End();
  1.1259 +	return 0;
  1.1260 +	}