Update contrib.
1 // Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32test\demandpaging\t_datapaging.cpp
15 // Functional tests for data paging.
16 // 002 Test UserHeap::ChunkHeap data paging attributes
17 // 003 Test RThread::Create data paging attributes
21 //! @SYMTestCaseID KBASE-T_DATAPAGING
24 //! @SYMTestCaseDesc Data Paging functional tests.
25 //! @SYMTestActions 001 Test RChunk data paging attributes
26 //! @SYMTestExpectedResults All tests should pass.
27 //! @SYMTestPriority High
28 //! @SYMTestStatus Implemented
30 #define __E32TEST_EXTENSION__
38 #include <e32msgqueue.h>
39 #include <e32atomics.h>
43 #include "../mmu/mmudetect.h"
44 #include "../mmu/d_memorytest.h"
45 #include "../mmu/paging_info.h"
47 RTest test(_L("T_DATAPAGING"));
49 _LIT(KChunkName, "t_datapaging chunk");
68 iV = RThread().Id() + User::NTickCount() + 23;
71 TUint32 TRandom::Next()
77 void CreatePagedChunk(TInt aSizeInPages, TInt aWipeByte = -1)
79 test_Equal(0,gChunk.Handle());
81 TChunkCreateInfo createInfo;
82 TInt size = aSizeInPages * gPageSize;
83 createInfo.SetNormal(size, size);
84 createInfo.SetPaging(TChunkCreateInfo::EPaged);
85 createInfo.SetOwner(EOwnerProcess);
86 createInfo.SetGlobal(KChunkName);
88 createInfo.SetClearByte(aWipeByte);
89 test_KErrNone(gChunk.Create(createInfo));
90 test(gChunk.IsPaged()); // this is only ever called if data paging is supported
93 // The contents of a page is represented as type from enum below ORed with a byte value
96 ETypeUniform = 0 << 8,
97 ETypeIncreasing = 1 << 8,
99 EContentValueMask = 255,
100 EContentTypeMask = 255 << 8
103 // Write to a page to page it in and verify its previous contents
104 void WritePage(TInt aIndex, TUint aExpectedContents, TUint aNewContents)
106 test.Printf(_L(" %3d Write %x\n"), aIndex, aNewContents);
108 TUint oldType = aExpectedContents & EContentTypeMask;
109 TUint oldValue = aExpectedContents & EContentValueMask;
111 TUint type = aNewContents & EContentTypeMask;
112 TUint value = aNewContents & EContentValueMask;
114 TUint8* page = gChunk.Base() + (gPageSize * aIndex);
116 // write first byte first so page is paged in or rejuvenated with write permissions
119 for (TInt i = 0 ; i < gPageSize ; ++i)
122 test_Equal(oldValue, page[i]);
123 if (oldType == ETypeIncreasing)
124 oldValue = (oldValue + 1) & 255;
127 if (type == ETypeIncreasing)
128 value = (value + 1) & 255;
132 // Read a page and verify its contents
133 void ReadPage(TInt aIndex, TUint aExpectedContents)
135 test.Printf(_L(" %3d Read %x\n"), aIndex, aExpectedContents);
136 TUint type = aExpectedContents & EContentTypeMask;
137 TUint value = aExpectedContents & EContentValueMask;
138 TUint8* page = gChunk.Base() + (gPageSize * aIndex);
139 for (TInt i = 0 ; i < gPageSize ; ++i)
141 test_Equal(value, page[i]);
142 if (type == ETypeIncreasing)
143 value = (value + 1) & 255;
149 test.Printf(_L(" PageOut\n"));
150 DPTest::FlushCache();
155 CreatePagedChunk(1, 0xed);
157 // Test initial contents (read)
158 ReadPage(0, ETypeUniform | 0xed);
160 // Test read initial contents after flush (may or may not actually been paged out)
162 ReadPage(0, ETypeUniform | 0xed);
164 // Test page out / page in (read) of dirty contents
165 WritePage(0, ETypeUniform | 0xed, ETypeIncreasing | 0x1a);
167 ReadPage(0, ETypeIncreasing | 0x1a);
169 // Test page out / page in (read) of clean contents
171 ReadPage(0, ETypeIncreasing | 0x1a);
173 // Test page out / page in (write) of dirty contents
174 WritePage(0, ETypeIncreasing | 0x1a, ETypeIncreasing | 0x23);
176 WritePage(0, ETypeIncreasing | 0x23, ETypeIncreasing | 0x45);
178 CLOSE_AND_WAIT(gChunk);
179 CreatePagedChunk(1, 0x0d);
181 // Test initial contents (write)
182 WritePage(0, ETypeUniform | 0x0d, ETypeIncreasing | 0x1a);
184 // Test page out / page in (read) of dirty contents
186 ReadPage(0, ETypeIncreasing | 0x1a);
188 CLOSE_AND_WAIT(gChunk);
191 TInt PageInThreadFunc(TAny* aArg)
193 TUint8* page = (TUint8*)aArg;
196 DPTest::FlushCache();
197 RDebug::Printf("Start page in...");
198 volatile TInt i = page[0];
200 RDebug::Printf(" done.");
204 TInt PageOutThreadFunc(TAny* aArg)
206 TUint8* page = (TUint8*)aArg;
209 page[0] = 1; // make page dirty
210 RDebug::Printf("Start page out...");
211 DPTest::FlushCache();
212 RDebug::Printf(" done.");
216 void TestKillThread(TThreadFunction aFunc, TInt aIterations)
221 TUint8* page = gChunk.Base();
222 page[0] = 0; // make page dirty
223 DPTest::FlushCache();
224 for (TInt i = 0 ; i < aIterations ; ++i)
227 test_KErrNone(thread.Create(KNullDesC, aFunc, gPageSize, NULL, page));
228 TRequestStatus status;
229 thread.Logon(status);
231 User::AfterHighRes((random.Next() % 50 + 1) * 1000);
233 User::WaitForRequest(status);
234 test_Equal(123, status.Int());
235 CLOSE_AND_WAIT(thread);
237 CLOSE_AND_WAIT(gChunk);
238 User::After(1000000);
248 TUint32* PageBasePtr(TInt aPage)
250 return (TUint32*)(gChunk.Base() + (gPageSize * aPage));
253 TUint32* PageDataPtr(TInt aPage, TInt aThreadIndex)
255 return (TUint32*)((TUint8*)PageBasePtr(aPage) + ((aThreadIndex * 2 + 1) * sizeof(TUint32)));
258 TUint32 PageTag(TInt aPage)
260 return 0x80000000 | aPage;
263 void StopSoakTest(RMsgQueue<TInt> aMsgQueue)
265 while(aMsgQueue.Send(0) != KErrOverflow)
269 TBool ContinueSoakTest(RMsgQueue<TInt> aMsgQueue)
272 return aMsgQueue.Receive(msg) == KErrUnderflow;
275 _LIT(KMsgQueueName, "t_datapaging_queue");
277 TInt PinPagesFunc(TAny* aArg)
279 SSoakTestArgs* args = (SSoakTestArgs*)aArg;
285 r = ldd.CreateVirtualPinObject();
289 RMsgQueue<TInt> msgQueue;
290 r = msgQueue.OpenGlobal(KMsgQueueName, EOwnerThread);
296 while (ContinueSoakTest(msgQueue))
298 TInt count = 1 + random.Next() % (args->iPages / 4);
299 TInt start = random.Next() % (args->iPages - count);
300 TInt sleepInMs = 1 + random.Next() % 20;
301 TUint32* ptr = PageBasePtr(start);
303 r = ldd.PinVirtualMemory((TLinAddr)ptr, count * gPageSize);
307 User::AfterHighRes(sleepInMs * 1000);
309 r = ldd.UnpinVirtualMemory();
318 r = ldd.DestroyVirtualPinObject();
323 RDebug::Printf(" thread %d performed %d iterations (pinning)", args->iThreadIndex, i);
327 TBool TestReadWord(TUint32* aPtr, TUint32 aExpected, TInt aThread, TInt aPage, TInt aIteration, TInt aLine, RMsgQueue<TInt> aMsgQueue)
329 TUint32 aActual = *aPtr;
330 if (aActual != aExpected)
332 StopSoakTest(aMsgQueue);
333 RDebug::Printf(" thread %d failure reading page %d at iteration %d address %08x: expected %08x but got %08x",
334 aThread, aPage, aIteration, aPtr, aExpected, aActual);
340 TInt SoakTestFunc(TAny* aArg)
342 SSoakTestArgs* args = (SSoakTestArgs*)aArg;
345 RMsgQueue<TInt> msgQueue;
346 TInt r = msgQueue.OpenGlobal(KMsgQueueName, EOwnerThread);
350 TUint32* contents = new TUint32[args->iPages];
351 if (contents == NULL)
353 Mem::Fill(contents, args->iPages * sizeof(TUint32), 0);
357 while (ContinueSoakTest(msgQueue))
359 TUint32 rand = random.Next();
360 TInt page = rand % args->iPages;
361 TUint32* ptr = PageDataPtr(page, args->iThreadIndex);
362 TInt action = rand >> 31;
365 if (!TestReadWord(PageBasePtr(page), PageTag(page), args->iThreadIndex, page, i, __LINE__, msgQueue))
367 if (!TestReadWord(&ptr[0], contents[page], args->iThreadIndex, page, i, __LINE__, msgQueue))
369 if (!TestReadWord(&ptr[1], contents[page], args->iThreadIndex, page, i, __LINE__, msgQueue))
374 TUint newContents = args->iThreadIndex+0x100+(contents[page]&~0xff);
375 ptr[0] = newContents;
376 if (!TestReadWord(PageBasePtr(page), PageTag(page), args->iThreadIndex, page, i, __LINE__, msgQueue))
378 if (!TestReadWord(&ptr[1], contents[page], args->iThreadIndex, page, i, __LINE__, msgQueue))
380 ptr[1] = newContents;
381 contents[page] = newContents;
386 for (TInt j = 0 ; j < args->iPages ; ++j)
388 TUint32* ptr = PageDataPtr(j, args->iThreadIndex);
389 if (!TestReadWord(PageBasePtr(j), PageTag(j), args->iThreadIndex, j, i, __LINE__, msgQueue))
391 if (!TestReadWord(&ptr[0], contents[j], args->iThreadIndex, j, i, __LINE__, msgQueue))
393 if (!TestReadWord(&ptr[1], contents[j], args->iThreadIndex, j, i, __LINE__, msgQueue))
400 RDebug::Printf(" thread %d performed %d iterations", args->iThreadIndex, i);
404 TInt SoakProcess(TInt aProcessIndex, TInt aThreads, TInt aPages, TBool aPinPages)
406 TInt pinThreadIndex = aPinPages ? aThreads++ : -1;
408 test_KErrNone(gChunk.OpenGlobal(KChunkName, EFalse));
410 SSoakTestArgs* testArgs = new SSoakTestArgs[aThreads];
411 test_NotNull(testArgs);
413 RThread* threads = new RThread[aThreads];
414 test_NotNull(threads);
416 TRequestStatus* statuses = new TRequestStatus[aThreads];
417 test_NotNull(statuses);
420 for (i = 0 ; i < aThreads ; ++i)
422 testArgs[i].iThreadIndex = aProcessIndex * aThreads + i;
423 testArgs[i].iPages = aPages;
424 TThreadFunction func = i == pinThreadIndex ? PinPagesFunc : SoakTestFunc;
425 test_KErrNone(threads[i].Create(KNullDesC, func, gPageSize, NULL, &testArgs[i]));
426 threads[i].Logon(statuses[i]);
429 // todo: rendezvous here?
431 for (i = 0 ; i < aThreads ; ++i)
435 for (i = 0 ; i < aThreads ; ++i)
437 User::WaitForRequest(statuses[i]);
438 if (threads[i].ExitType() != EExitKill || statuses[i].Int() != KErrNone)
448 return ok ? KErrNone : KErrGeneral;
451 TInt RunSoakProcess()
454 if (User::CommandLineLength() > buf.MaxLength())
456 User::CommandLine(buf);
460 TInt r = lex.Val(index);
466 r = lex.Val(threads);
478 r = lex.Val(pinPages);
482 return SoakProcess(index, threads, pages, pinPages);
485 void SoakTest(TInt aProcesses, TInt aThreads, TInt aPages, TBool aPinPages, TInt aDurationInSeconds)
487 RDebug::Printf("Soak test: %d processes, %d threads, %d pages, %s pinning for %d seconds",
488 aProcesses, aThreads, aPages, (aPinPages ? "with" : "without"), aDurationInSeconds);
489 DPTest::FlushCache();
491 TInt totalThreads = (aThreads + (aPinPages ? 1 : 0)) * aProcesses;
492 test(totalThreads < 512); // each thread uses two words in a page
494 TMediaPagingStats dummy=EMediaPagingStatsRomAndCode;
495 PagingInfo::ResetBenchmarks(-1, dummy); // Don't worry about locmedia stats.
497 RMsgQueue<TInt> msgQueue;
498 test_KErrNone(msgQueue.CreateGlobal(KMsgQueueName, totalThreads, EOwnerThread));
500 CreatePagedChunk(aPages, 0);
502 for (i = 0 ; i < aPages ; ++i)
503 *PageBasePtr(i) = PageTag(i);
505 RProcess* processes = new RProcess[aProcesses];
506 TRequestStatus* statuses = new TRequestStatus[aProcesses];
507 for (i = 0 ; i < aProcesses ; ++i)
510 args.AppendFormat(_L("%d %d %d %d"), i, aThreads, aPages, aPinPages);
511 test_KErrNone(processes[i].Create(_L("t_datapaging"), args));
512 processes[i].Logon(statuses[i]);
515 RThread().SetPriority(EPriorityMore); // so we don't get starved of CPU by worker threads
517 for (i = 0 ; i < aProcesses ; ++i)
518 processes[i].Resume();
520 User::After(aDurationInSeconds * 1000000);
521 StopSoakTest(msgQueue);
524 for (i = 0 ; i < aProcesses ; ++i)
526 User::WaitForRequest(statuses[i]);
527 if (processes[i].ExitType() != EExitKill || statuses[i].Int() != KErrNone)
530 RDebug::Printf(" process %i died with %d,%d", i, processes[i].ExitType(), statuses[i].Int());
532 processes[i].Close();
535 RThread().SetPriority(EPriorityNormal);
539 for (i = 0 ; i < aPages ; ++i)
541 test.Printf(_L("%3d %08x"), i, *PageBasePtr(i));
542 for (TInt j = 0 ; j < totalThreads ; ++j)
544 TUint32* ptr = PageDataPtr(i, j);
545 test.Printf(_L(" %08x,%08x"), ptr[0], ptr[1]);
547 test.Printf(_L("\n"), i);
554 User::After(1000000);
555 RDebug::Printf(" done");
556 RDebug::Printf("\n");
562 PagingInfo::PrintBenchmarks(-1, dummy); // Don't worry about locmedia stats.
565 void CommitPage(RChunk chunk, TInt aPageIndex)
567 test_KErrNone(chunk.Commit(aPageIndex * gPageSize, gPageSize));
570 void DecommitPage(RChunk chunk, TInt aPageIndex)
572 test_KErrNone(chunk.Decommit(aPageIndex * gPageSize, gPageSize));
575 void WaitForNotifiers()
577 // wait until notifiers have had chance to signal us...
578 UserSvr::HalFunction(EHalGroupKernel, EKernelHalSupervisorBarrier, 0, 0);
583 test.Next(_L("Test EVMHalGetSwapInfo"));
585 TChunkCreateInfo createInfo;
586 createInfo.SetDisconnected(0, 0, 256 * gPageSize);
587 createInfo.SetPaging(TChunkCreateInfo::EPaged);
589 test_KErrNone(chunk.Create(createInfo));
590 if (gDataPagingSupported)
591 test(chunk.IsPaged());
593 SVMSwapInfo swapInfo;
594 test_KErrNone(UserSvr::HalFunction(EHalGroupVM, EVMHalGetSwapInfo, &swapInfo, 0));
595 test(swapInfo.iSwapFree <= swapInfo.iSwapSize);
596 test.Printf(_L(" Swap size == 0x%x bytes\n"), swapInfo.iSwapSize);
597 test.Printf(_L(" Swap free == 0x%x bytes\n"), swapInfo.iSwapFree);
598 if (!gDataPagingSupported)
600 test_Equal(0, swapInfo.iSwapSize);
604 test(swapInfo.iSwapSize != 0);
606 CommitPage(chunk, 0);
607 SVMSwapInfo swapInfo2;
608 test_KErrNone(UserSvr::HalFunction(EHalGroupVM, EVMHalGetSwapInfo, &swapInfo2, 0));
609 test_Equal(swapInfo.iSwapSize, swapInfo2.iSwapSize);
610 test_Equal(swapInfo.iSwapFree - gPageSize, swapInfo2.iSwapFree);
612 DecommitPage(chunk, 0);
613 test_KErrNone(UserSvr::HalFunction(EHalGroupVM, EVMHalGetSwapInfo, &swapInfo2, 0));
614 test_Equal(swapInfo.iSwapSize, swapInfo2.iSwapSize);
615 test_Equal(swapInfo.iSwapFree, swapInfo2.iSwapFree);
617 // Test that closing the chunk releases the swap page.
618 CommitPage(chunk, 0);
619 test_KErrNone(UserSvr::HalFunction(EHalGroupVM, EVMHalGetSwapInfo, &swapInfo2, 0));
620 test_Equal(swapInfo.iSwapSize, swapInfo2.iSwapSize);
621 test_Equal(swapInfo.iSwapFree - gPageSize, swapInfo2.iSwapFree);
624 test_KErrNone(UserSvr::HalFunction(EHalGroupVM, EVMHalGetSwapInfo, &swapInfo2, 0));
625 test_Equal(swapInfo.iSwapSize, swapInfo2.iSwapSize);
626 test_Equal(swapInfo.iSwapFree, swapInfo2.iSwapFree);
628 // Chunk must be created for rest of testing.
629 test_KErrNone(chunk.Create(createInfo));
630 if (gDataPagingSupported)
631 test(chunk.IsPaged());
634 // EVMHalSetSwapThresholds,
635 test.Next(_L("Test EVMHalSetSwapThresholds"));
636 SVMSwapThresholds thresholds;
637 thresholds.iLowThreshold = 1;
638 thresholds.iGoodThreshold = 0;
639 test_Equal(KErrArgument, UserSvr::HalFunction(EHalGroupVM, EVMHalSetSwapThresholds, &thresholds, 0));
640 thresholds.iLowThreshold = swapInfo.iSwapSize + 1;
641 thresholds.iGoodThreshold = swapInfo.iSwapSize + 1;
642 test_Equal(KErrArgument, UserSvr::HalFunction(EHalGroupVM, EVMHalSetSwapThresholds, &thresholds, 0));
643 thresholds.iLowThreshold = 0;
644 thresholds.iGoodThreshold = 0;
645 test_KErrNone(UserSvr::HalFunction(EHalGroupVM, EVMHalSetSwapThresholds, &thresholds, 0));
646 thresholds.iLowThreshold = swapInfo.iSwapSize;
647 thresholds.iGoodThreshold = swapInfo.iSwapSize;
648 test_KErrNone(UserSvr::HalFunction(EHalGroupVM, EVMHalSetSwapThresholds, &thresholds, 0));
650 // test thresholds trigger ok
652 RChangeNotifier changes;
653 test_KErrNone(changes.Create());
654 TRequestStatus status;
655 test_KErrNone(changes.Logon(status));
656 User::WaitForRequest(status);
657 test_KErrNone(changes.Logon(status));
658 test_Equal(KRequestPending, status.Int());
660 thresholds.iLowThreshold = swapInfo.iSwapFree - 2 * gPageSize;
661 thresholds.iGoodThreshold = swapInfo.iSwapFree - gPageSize;
662 test_KErrNone(UserSvr::HalFunction(EHalGroupVM, EVMHalSetSwapThresholds, &thresholds, 0));
664 CommitPage(chunk, 0);
665 CommitPage(chunk, 1);
667 test_Equal(KRequestPending, status.Int());
668 CommitPage(chunk, 2);
670 test_Equal(EChangesFreeMemory | EChangesLowMemory, status.Int());
671 User::WaitForRequest(status);
673 test_KErrNone(changes.Logon(status));
674 DecommitPage(chunk, 2);
676 test_Equal(KRequestPending, status.Int());
677 DecommitPage(chunk, 1);
679 test_Equal(EChangesFreeMemory, status.Int());
680 User::WaitForRequest(status);
681 DecommitPage(chunk, 0);
683 CLOSE_AND_WAIT(changes);
685 // leave some sensible thresholds set
686 thresholds.iLowThreshold = (10 * swapInfo.iSwapSize) / 100;
687 thresholds.iGoodThreshold = (20 * swapInfo.iSwapSize) / 100;
688 test_KErrNone(UserSvr::HalFunction(EHalGroupVM, EVMHalSetSwapThresholds, &thresholds, 0));
690 CLOSE_AND_WAIT(chunk);
693 void TestSwapHalNotSupported()
695 test_Equal(KErrNotSupported, UserSvr::HalFunction(EHalGroupVM, EVMHalGetSwapInfo, 0, 0));
696 test_Equal(KErrNotSupported, UserSvr::HalFunction(EHalGroupVM, EVMHalSetSwapThresholds, 0, 0));
701 if (gDataPagingSupported)
704 TestSwapHalNotSupported();
708 TBool gStealEnable = false;
710 TInt DecommitThread(TAny*)
712 RThread().SetPriority(EPriorityLess); // so this thread gets pre-empted by StealThread
713 TUint8* base = gChunk.Base();
714 TInt size = gChunk.MaxSize();
718 for(TInt i=0; i<size; i+=gPageSize)
723 gStealEnable = false;
725 TInt r = gChunk.Adjust(size);
732 TInt StealThread(TAny*)
737 User::AfterHighRes(0);
738 DPTest::FlushCache();
743 void TestDecommitAndStealInteraction(TInt aSeconds)
747 CreatePagedChunk(256);
750 test_KErrNone(thread1.Create(_L("DecommitThread"), DecommitThread, gPageSize, NULL, 0));
751 TRequestStatus status1;
752 thread1.Logon(status1);
755 test_KErrNone(thread2.Create(_L("StealThread"), StealThread, gPageSize, NULL, 0));
756 TRequestStatus status2;
757 thread1.Logon(status2);
760 test_KErrNone(timer.CreateLocal());
761 TRequestStatus timeoutStatus;
762 timer.After(timeoutStatus,aSeconds*1000000);
766 User::WaitForAnyRequest();
769 User::WaitForRequest(status1);
770 test_Equal(123, status1.Int());
771 CLOSE_AND_WAIT(thread1);
774 User::WaitForRequest(status2);
775 test_Equal(123, status2.Int());
776 CLOSE_AND_WAIT(thread2);
778 CLOSE_AND_WAIT(timer);
779 test_KErrNone(timeoutStatus.Int());
781 CLOSE_AND_WAIT(gChunk);
785 TInt ThreadAtomic64Flush(TAny*)
787 TInt64 seed = 0x33333333;
790 DPTest::FlushCache();
791 User::After(Math::Rand(seed) & 0x48);
813 TInt ThreadAtomic64Cas(TAny* aArgs)
815 SAtomic64Args& args = *(SAtomic64Args*)aArgs;
816 for (TUint i = 0; i < args.iIters; i++)
818 TUint64 setMask = UI64LIT(0xffffffffffffffff);
820 if (__e32_atomic_cas_ord64(args.iData, &setMask, clrMask))
822 // Undo any clearing of setMask which will happen if iData is 0.
823 setMask = UI64LIT(0xffffffffffffffff);
824 if (__e32_atomic_cas_ord64(args.iData, &clrMask, setMask))
831 TInt ThreadAtomic64Logic(TAny* aArgs)
834 SAtomic64Args& args = *(SAtomic64Args*)aArgs;
835 for(TUint i = 0; i < args.iIters; i++)
837 TUint bitNo = (i & 0x3f);
838 TUint64 bitMask = ((TUint64)1) << bitNo;
839 TUint64 andMask = ~bitMask;
841 TUint64 old = __e32_atomic_and_ord64(args.iData, andMask);
843 args.iClears[bitNo]++;
845 old = __e32_atomic_ior_ord64(args.iData, bitMask);
846 if (!(old & bitMask))
849 old = __e32_atomic_xor_ord64(args.iData, bitMask);
851 args.iClears[bitNo]++;
855 old = __e32_atomic_axo_ord64(args.iData, UI64LIT(0xffffffffffffffff), bitMask);
857 args.iClears[bitNo]++;
866 TInt ThreadAtomic64Add(TAny* aArgs)
869 SAtomic64Args& args = *(SAtomic64Args*)aArgs;
870 for(TUint i = 0; i < args.iIters; i++)
872 TUint64 old = __e32_atomic_add_ord64(args.iData, 1);
874 old = __e32_atomic_tau_ord64(args.iData, 1000, 1, 2);
875 args.iIncs += (old >= 1000)? 1 : 2;
876 old = __e32_atomic_tas_ord64(args.iData, 1000, 1, -1);
877 args.iIncs += (old >= 1000)? 1 : -1;
885 CreatePagedChunk(sizeof(TUint64));
886 TUint64* data = (TUint64*)gChunk.Base();
888 const TUint KThreads = 25;
889 RThread threads[KThreads];
890 TRequestStatus stats[KThreads];
891 SAtomic64Args* args = new SAtomic64Args[KThreads];
894 for (TInt testStep = EAtomic64Add; testStep < EAtomic64Steps; testStep++)
899 test.Next(_L("Test 64-bit atomic addition operations"));
902 test.Next(_L("Test 64-bit atomic logic operations"));
905 test.Next(_L("Test 64-bit atomic cas operations"));
910 test_KErrNone(threadFlush.Create(_L("ThreadAtomicFlush"), ThreadAtomic64Flush, gPageSize, NULL, NULL));
911 TRequestStatus status1;
912 threadFlush.Logon(status1);
913 threadFlush.SetPriority(EPriorityAbsoluteHigh);
915 memclr(args, sizeof(SAtomic64Args)*KThreads);
917 for (; i < KThreads; i++)
919 args[i].iIters = 10000;
920 args[i].iData = data;
924 test_KErrNone(threads[i].Create(KNullDesC, ThreadAtomic64Add, gPageSize, NULL, (TAny*)&args[i]));
927 test_KErrNone(threads[i].Create(KNullDesC, ThreadAtomic64Logic, gPageSize, NULL, (TAny*)&args[i]));
930 test_KErrNone(threads[i].Create(KNullDesC, ThreadAtomic64Cas, gPageSize, NULL, (TAny*)&args[i]));
933 threads[i].Logon(stats[i]);
935 threadFlush.Resume();
936 for (i = 0; i < KThreads; i++)
941 // Wait for add threads to complete and kill flushing thread.
942 for (i = 0; i < KThreads; i++)
944 User::WaitForRequest(stats[i]);
945 test_KErrNone(stats[i].Int());
947 threadFlush.Kill(KErrNone);
948 User::WaitForRequest(status1);
949 test_KErrNone(status1.Int());
955 for (TUint i = 0; i < KThreads; i++)
958 expected += args[i].iIncs;
965 TUint totalClears[64];
966 memclr(totalSets, sizeof(TUint)*64);
967 memclr(totalClears, sizeof(TUint)*64);
968 for (TUint i = 0; i < KThreads; i++)
971 for (TUint j = 0; j < 64; j++)
973 totalSets[j] += args[i].iSets[j];
974 totalClears[j] += args[i].iClears[j];
977 for (TUint j = 0; j < 64; j++)
979 TUint64 bitMask = 1 << j;
980 if (totalSets[j] > totalClears[j])
982 test_Equal(totalSets[j] - 1, totalClears[j]);
986 {// Can only clear a bit if it was previously set.
987 test_Equal(totalClears[j], totalSets[j]);
995 TUint totalClears = 0;
996 for (TUint i = 0; i < KThreads; i++)
999 totalSets += args[i].iSets[0];
1000 totalClears += args[i].iClears[0];
1002 if (totalSets > totalClears)
1004 test_Equal(totalSets - 1, totalClears);
1005 expected = UI64LIT(0xffffffffffffffff);
1008 {// Can only clear a word if it was previously set.
1009 test_Equal(totalClears, totalSets);
1014 test_Equal(expected, *data);
1015 CLOSE_AND_WAIT(threadFlush);
1018 CLOSE_AND_WAIT(gChunk);
1023 // soak test for writeable paged code...
1026 const TUint KCodeStride = 20; // spacing between generated code
1028 void CodeStart(TUint8* aCode, TUint8* aTarget, TUint32 aInit)
1030 #if defined(__CPU_X86)
1031 aCode[0] = 0xb8; *(TUint32*)&(aCode[1]) = aInit; // mov eax,aInit
1032 aCode[5] = 0xe9; *(TUint32*)&(aCode[6]) = aTarget-(aCode+10); // jmp aTarget
1033 __ASSERT_COMPILE(KCodeStride>=10);
1035 #elif defined(__CPU_ARM)
1036 *(TUint32*)&(aCode[0]) = 0xe59f0000; // ldr r0, [pc, #0]
1037 TInt32 offset = (aTarget-aCode-4-8)/4;
1038 if(offset&0xff000000u)
1040 offset ^= 0xff000000u;
1041 test_Equal(0,offset&0xff000000u);
1043 *(TUint32*)&(aCode[4]) = 0xea000000|offset; // b aTarget
1044 *(TUint32*)&(aCode[8]) = aInit; // dcd aInit
1045 __ASSERT_COMPILE(KCodeStride>=12);
1053 void CodeStep(TUint8* aCode, TUint8* aTarget, TUint32 aAdd)
1055 #if defined(__CPU_X86)
1056 aCode[0] = 0xd1; aCode[1] = 0xc0; // rol eax, 1
1057 aCode[2] = 0x05; *(TUint32*)&(aCode[3]) = aAdd; // add eax, aAdd
1058 aCode[7] = 0xe9; *(TUint32*)&(aCode[8]) = aTarget-(aCode+12); // jmp aTarget
1059 __ASSERT_COMPILE(KCodeStride>=12);
1061 #elif defined(__CPU_ARM)
1062 *(TUint32*)&(aCode[0]) = 0xe1a00fe0; // ror r0, r0, #31
1063 *(TUint32*)&(aCode[4]) = 0xe59f1004; // ldr r1, [pc, #4]
1064 *(TUint32*)&(aCode[8]) = 0xe0800001; // add r0, r0, r1
1065 TInt32 offset = (aTarget-aCode-12-8)/4;
1066 if(offset&0xff000000u)
1068 offset ^= 0xff000000u;
1069 test_Equal(0,offset&0xff000000u);
1071 *(TUint32*)&(aCode[12]) = 0xea000000|offset; // b aTarget
1072 *(TUint32*)&(aCode[16]) = aAdd; // dcd aAdd
1073 __ASSERT_COMPILE(KCodeStride>=20);
1081 void CodeEnd(TUint8* aCode)
1083 #if defined(__CPU_X86)
1084 aCode[0] = 0xc3; // ret
1085 __ASSERT_COMPILE(KCodeStride>=1);
1087 #elif defined(__CPU_ARM)
1088 *(TUint32*)&(aCode[0]) = 0xe12fff1e; // bx lr
1089 __ASSERT_COMPILE(KCodeStride>=4);
1097 void TestExecutableMemory()
1101 #if defined(__CPU_ARM)
1102 const TUint KMaxChunkSize = 31*1024*1024; // ARM branch instruction limit
1104 const TUint KMaxChunkSize = 1024*1024*1024; // 1GB
1106 const TUint KMaxPages = KMaxChunkSize/gPageSize;
1107 TUint sizeInPages = gMaxCacheSize*2;
1108 if(sizeInPages>KMaxPages)
1109 sizeInPages = KMaxPages;
1111 // create code chunk...
1112 test.Start(_L("Create code chunk"));
1113 TChunkCreateInfo createInfo;
1114 TInt size = sizeInPages * gPageSize;
1115 createInfo.SetCode(size, size);
1116 createInfo.SetPaging(TChunkCreateInfo::EPaged);
1117 createInfo.SetClearByte(0);
1119 test_KErrNone(chunk.Create(createInfo));
1120 test(chunk.IsPaged()); // this is only ever called if data paging is supported
1121 TUint8* base = chunk.Base();
1123 // create code path through the pages in the chunk with quadratic distribution...
1124 test.Next(_L("Weave path"));
1125 TInt pathLength = 0;
1126 const TUint maxStepsPerPage = gPageSize/KCodeStride;
1127 const TInt maxPathLength = sizeInPages*maxStepsPerPage;
1128 TUint8** path = (TUint8**)User::Alloc(maxPathLength*sizeof(TUint8*));
1130 for(TUint page=0; page<sizeInPages; ++page)
1132 TUint step = (maxStepsPerPage-1)*(page*page)/(sizeInPages*sizeInPages)+1;
1133 do path[pathLength++] = base+page*gPageSize+step*KCodeStride;
1136 TUint32 rand = 0x12345678;
1137 for(TUint scramble=pathLength*4; scramble>0; --scramble)
1139 // swap random pair of entries on path...
1140 TUint i = (TUint)(TUint64(TUint64(rand)*TUint64(pathLength))>>32);
1141 rand = rand*69069+1;
1142 TUint j = (TUint)(TUint64(TUint64(rand)*TUint64(pathLength))>>32);
1143 rand = rand*69069+1;
1144 TUint8* t = path[i];
1149 // write code to generated path...
1150 test.Next(_L("Write code"));
1152 TUint32 (*code)() = (TUint32 (*)())path[pathLength-1];
1153 CodeStart(path[pathLength-1],path[pathLength-2],a);
1154 while(--pathLength>1)
1156 rand = rand*69069+1;
1157 CodeStep(path[pathLength-1],path[pathLength-2],rand);
1163 test_Equal(0,pathLength);
1164 test.Next(_L("IMB"));
1165 User::IMB_Range(base,base+chunk.Size());
1168 TMediaPagingStats dummy=EMediaPagingStatsRomAndCode;
1169 PagingInfo::ResetBenchmarks(-1, dummy); // Don't worry about locmedia stats.
1170 test.Next(_L("Execute code"));
1171 TUint32 result = code();
1172 test_Equal(a,result);
1173 PagingInfo::PrintBenchmarks(-1, dummy); // Don't worry about locmedia stats.
1176 test.Next(_L("Cleanup"));
1178 CLOSE_AND_WAIT(chunk);
1182 UserSvr::HalFunction(EHalGroupKernel, EKernelHalSupervisorBarrier, 0, 0);
1190 test_KErrNone(UserHal::PageSizeInBytes(gPageSize));
1192 if (User::CommandLineLength() != 0)
1193 return RunSoakProcess();
1196 test_KErrNone(GetGlobalPolicies());
1198 test.Start(_L("Test HAL APIs"));
1201 if (gDataPagingSupported)
1203 test.Next(_L("Test reading and writing to a single page"));
1206 test.Next(_L("Test 64-bit atomic operations are atomic with paged out data"));
1209 test.Next(_L("Test interaction between decommit and steal"));
1210 TestDecommitAndStealInteraction(10);
1212 test.Next(_L("Test killing a thread while it's paging in"));
1213 TestKillThread(PageInThreadFunc, 200);
1215 test.Next(_L("Test killing a thread while it's paging out"));
1216 TestKillThread(PageOutThreadFunc, 200);
1218 test.Next(_L("Test executable memory"));
1219 TestExecutableMemory();
1221 test.Next(_L("Soak tests"));
1222 DPTest::FlushCache();
1224 test.Next(_L("Soak test: change maximum cache size to minimal"));
1225 TUint cacheOriginalMin = 0;
1226 TUint cacheOriginalMax = 0;
1227 TUint cacheCurrentSize = 0;
1228 //store original values
1229 DPTest::CacheSize(cacheOriginalMin, cacheOriginalMax, cacheCurrentSize);
1230 gMaxCacheSize = 256;
1232 test_KErrNone(DPTest::SetCacheSize(gMinCacheSize * gPageSize, gMaxCacheSize * gPageSize));
1234 for (TUint totalThreads = 1 ; totalThreads <= 64 ; totalThreads *= 4)
1236 for (TUint processes = 1 ; processes <= 16 && processes <= totalThreads ; processes *= 4)
1238 TUint threads = totalThreads / processes;
1239 for (TUint pages = gMaxCacheSize / 2 ; pages <= gMaxCacheSize * 2 ; pages *= 2)
1241 for (TUint pin = 0 ; pin <= 1 ; ++pin)
1243 test.Printf(_L("processes=%d threads=%d pages=%d maxcachesize=%d pin=%d\r\n"),processes, threads, pages, gMaxCacheSize,pin);
1244 SoakTest(processes, threads, pages, pin, 3);
1250 //Reset the cache size to normal
1251 test.Next(_L("Soak test: Reset cache size to normal"));
1252 test_KErrNone(DPTest::SetCacheSize(cacheOriginalMin, cacheOriginalMax));