sl@0: // Copyright (c) 2005-2009 Nokia Corporation and/or its subsidiary(-ies). sl@0: // All rights reserved. sl@0: // This component and the accompanying materials are made available sl@0: // under the terms of the License "Eclipse Public License v1.0" sl@0: // which accompanies this distribution, and is available sl@0: // at the URL "http://www.eclipse.org/legal/epl-v10.html". sl@0: // sl@0: // Initial Contributors: sl@0: // Nokia Corporation - initial contribution. sl@0: // sl@0: // Contributors: sl@0: // sl@0: // Description: sl@0: // e32test\misc\t_cputime.cpp sl@0: // Tests User::FastCounter() and RThread::GetCpuTime() sl@0: // Note: This test only works on the emulator when run in textshell mode. The sl@0: // reason for this is that is assumes that it will be able to use 100% of CPU sl@0: // time, but when techview is starting up there are many other threads consuming sl@0: // CPU time. sl@0: // sl@0: // sl@0: sl@0: #include sl@0: #include sl@0: #include sl@0: #include sl@0: #ifdef __WINS__ sl@0: #include sl@0: #endif sl@0: sl@0: RTest test(_L("T_CPUTIME")); sl@0: sl@0: _LIT(KUp, "up"); sl@0: _LIT(KDown, "down"); sl@0: sl@0: const TInt KLongWait = 3000000; // 3 seconds sl@0: const TInt KShortWait = 100000; // 0.1 seconds sl@0: const TInt KTolerance = 1000; // 1 ms sl@0: const TInt numCpus = UserSvr::HalFunction(EHalGroupKernel, EKernelHalNumLogicalCpus, 0, 0); sl@0: sl@0: #define FailIfError(EXPR) \ sl@0: { \ sl@0: TInt aErr = (EXPR); \ sl@0: if (aErr != KErrNone) \ sl@0: { \ sl@0: test.Printf(_L("Return code == %d\n"), aErr); \ sl@0: test(EFalse); \ sl@0: } \ sl@0: } sl@0: sl@0: class TThreadParam sl@0: { sl@0: public: sl@0: TInt iCpu; sl@0: RSemaphore iSem; sl@0: }; sl@0: sl@0: TBool GetCpuTimeIsSupported() sl@0: { sl@0: RThread thread; sl@0: TTimeIntervalMicroSeconds time; sl@0: TInt err = thread.GetCpuTime(time); sl@0: test(err == KErrNone || err == KErrNotSupported); sl@0: return err == KErrNone; sl@0: } sl@0: sl@0: TInt SetCpuAffinity(TInt aCore) sl@0: { sl@0: TInt r = UserSvr::HalFunction(EHalGroupKernel, EKernelHalLockThreadToCpu, (TAny *)aCore, 0); sl@0: test(r==KErrNone); sl@0: return r; sl@0: } sl@0: sl@0: sl@0: //! @SYMTestCaseID t_cputime_0 sl@0: //! @SYMTestType CT sl@0: //! @SYMTestCaseDesc Fast counter tests sl@0: //! @SYMREQ CR RFID-66JJKX sl@0: //! @SYMTestActions Compares the high res timer against the nanokernel microsecond tick sl@0: //! @SYMTestExpectedResults The differnce measured should be < 1% sl@0: //! @SYMTestPriority High sl@0: //! @SYMTestStatus Defined sl@0: void TestFastCounter() sl@0: { sl@0: test.Start(_L("Comparing NTickCount with FastCounter")); sl@0: sl@0: TInt tickPeriod = 0; sl@0: FailIfError(HAL::Get(HAL::ENanoTickPeriod, tickPeriod)); sl@0: test.Printf(_L(" tick period == %d\n"), tickPeriod); sl@0: sl@0: TInt countFreq = 0; sl@0: FailIfError(HAL::Get(HAL::EFastCounterFrequency, countFreq)); sl@0: test.Printf(_L(" count freq == %d\n"), countFreq); sl@0: sl@0: TBool fcCountsUp = 0; sl@0: FailIfError(HAL::Get(HAL::EFastCounterCountsUp, fcCountsUp)); sl@0: test.Printf(_L(" count dir == %S\n"), fcCountsUp ? &KUp : &KDown); sl@0: sl@0: TUint startTick = User::NTickCount(); sl@0: TUint startCount = User::FastCounter(); sl@0: sl@0: User::After(KLongWait); sl@0: sl@0: TUint endTick = User::NTickCount(); sl@0: TUint endCount = User::FastCounter(); sl@0: sl@0: TInt tickDiff = endTick - startTick; sl@0: TInt countDiff = fcCountsUp ? (endCount - startCount) : (startCount - endCount); sl@0: sl@0: test.Printf(_L(" tick difference == %d\n"), tickDiff); sl@0: test.Printf(_L(" fast count difference == %d\n"), countDiff); sl@0: sl@0: TInt elapsedTickUs = tickDiff * tickPeriod; sl@0: TInt elapsedCountUs = (TInt)(((TInt64)1000000 * countDiff) / countFreq); sl@0: sl@0: test.Printf(_L(" tick time == %d\n"), elapsedTickUs); sl@0: test.Printf(_L(" count time == %d\n"), elapsedCountUs); sl@0: sl@0: TReal diff = (100.0 * Abs(elapsedCountUs - elapsedTickUs)) / elapsedTickUs; sl@0: sl@0: test.Printf(_L(" %% difference == %f\n"), diff); sl@0: test(diff < 1.0); sl@0: test.End(); sl@0: } sl@0: sl@0: TInt ThreadFunction(TAny* aParam) sl@0: { sl@0: if (numCpus > 1) sl@0: { sl@0: TInt& core = (static_cast(aParam))->iCpu; sl@0: FailIfError(SetCpuAffinity(core)); sl@0: } sl@0: sl@0: RSemaphore& semaphore = (static_cast(aParam))->iSem; sl@0: semaphore.Wait(); sl@0: for (;;) sl@0: { sl@0: // Spin sl@0: } sl@0: } sl@0: sl@0: void EnsureSystemIdle() sl@0: { sl@0: // This test assumes 100% cpu resource is available, so it can fail on sl@0: // windows builds if something else is running in the background. This sl@0: // function attempts to wait for the system to become idle. sl@0: sl@0: #ifdef __WINS__ sl@0: sl@0: const TInt KMaxWait = 60 * 1000000; sl@0: const TInt KSampleTime = 1 * 1000000; sl@0: const TInt KWaitTime = 5 * 1000000; sl@0: sl@0: test.Start(_L("Waiting for system to become idle")); sl@0: TInt totalTime = 0; sl@0: TBool idle; sl@0: do sl@0: { sl@0: test(totalTime < KMaxWait); sl@0: sl@0: TThreadParam threadParam; sl@0: FailIfError((threadParam.iSem).CreateLocal(0)); sl@0: threadParam.iCpu = 1; sl@0: sl@0: RThread thread; sl@0: FailIfError(thread.Create(_L("Thread"), ThreadFunction, 1024, NULL, &threadParam)); sl@0: thread.SetPriority(EPriorityLess); sl@0: thread.Resume(); sl@0: sl@0: User::After(KShortWait); // Pause to allow thread setup sl@0: sl@0: (threadParam.iSem).Signal(); sl@0: User::After(KSampleTime); sl@0: thread.Suspend(); sl@0: sl@0: TTimeIntervalMicroSeconds time; sl@0: FailIfError(thread.GetCpuTime(time)); sl@0: TReal error = (100.0 * Abs(time.Int64() - KSampleTime)) / KSampleTime; sl@0: test.Printf(_L(" time == %ld, error == %f%%\n"), time, error); sl@0: sl@0: idle = error < 2.0; sl@0: sl@0: thread.Kill(KErrNone); sl@0: TRequestStatus status; sl@0: thread.Logon(status); sl@0: User::WaitForRequest(status); sl@0: test(status == KErrNone); sl@0: CLOSE_AND_WAIT(thread); sl@0: sl@0: (threadParam.iSem).Close(); sl@0: sl@0: if (!idle) sl@0: User::After(KWaitTime); // Allow system to finish whatever it's doing sl@0: sl@0: totalTime += KShortWait + KSampleTime + KWaitTime; sl@0: } sl@0: while(!idle); sl@0: sl@0: test.End(); sl@0: sl@0: #endif sl@0: } sl@0: sl@0: //! @SYMTestCaseID t_cputime_1 sl@0: //! @SYMTestType CT sl@0: //! @SYMTestCaseDesc Thread CPU time tests sl@0: //! @SYMREQ CR RFID-66JJKX sl@0: //! @SYMTestActions Tests cpu time when a thread is put through the various states sl@0: //! @SYMTestExpectedResults Reported cpu time increses only when the thread is running sl@0: //! @SYMTestPriority High sl@0: //! @SYMTestStatus Defined sl@0: void TestThreadCpuTime() sl@0: { sl@0: test.Start(_L("CPU thread time unit tests")); sl@0: sl@0: TThreadParam threadParam; sl@0: FailIfError((threadParam.iSem).CreateLocal(0)); sl@0: threadParam.iCpu = 0; // Later tests will exercise other CPUs sl@0: sl@0: RThread thread; sl@0: RUndertaker u; sl@0: TInt h; sl@0: TRequestStatus s; sl@0: FailIfError(thread.Create(_L("Thread"), ThreadFunction, 1024, NULL, &threadParam)); sl@0: thread.SetPriority(EPriorityLess); sl@0: FailIfError(u.Create()); sl@0: FailIfError(u.Logon(s,h)); sl@0: test(s==KRequestPending); sl@0: sl@0: TTimeIntervalMicroSeconds time, time2; sl@0: sl@0: // Test time is initially zero sl@0: FailIfError(thread.GetCpuTime(time)); sl@0: test(time == 0); sl@0: sl@0: // Test not increased while waiting on semaphore sl@0: thread.Resume(); sl@0: User::After(KShortWait); sl@0: FailIfError(thread.GetCpuTime(time)); sl@0: test(time < KTolerance); // wait happens in less than 0.5ms sl@0: sl@0: // Test increases when thread allowed to run sl@0: (threadParam.iSem).Signal(); sl@0: User::After(KShortWait); sl@0: FailIfError(thread.GetCpuTime(time)); sl@0: test(time > (KShortWait - KTolerance)); sl@0: sl@0: // Test not increased while suspended sl@0: thread.Suspend(); sl@0: FailIfError(thread.GetCpuTime(time)); sl@0: User::After(KShortWait); sl@0: FailIfError(thread.GetCpuTime(time2)); sl@0: test(time == time2); sl@0: thread.Resume(); sl@0: sl@0: // Test not increased while dead sl@0: thread.Kill(KErrNone); sl@0: User::WaitForRequest(s); // wait on undertaker since that completes in supervisor thread sl@0: FailIfError(thread.GetCpuTime(time)); sl@0: User::After(KShortWait); sl@0: FailIfError(thread.GetCpuTime(time2)); sl@0: test(time == time2); sl@0: sl@0: RThread t; sl@0: t.SetHandle(h); sl@0: test(t.Id()==thread.Id()); sl@0: t.Close(); sl@0: u.Close(); sl@0: thread.Close(); sl@0: (threadParam.iSem).Close(); sl@0: test.End(); sl@0: } sl@0: sl@0: //! @SYMTestCaseID t_cputime_2 sl@0: //! @SYMTestType CT sl@0: //! @SYMTestCaseDesc Thread CPU time tests sl@0: //! @SYMREQ CR RFID-66JJKX sl@0: //! @SYMTestActions Tests cpu time when multiple threads are running sl@0: //! @SYMTestExpectedResults Total time is divided evenly among running threads sl@0: //! @SYMTestPriority High sl@0: //! @SYMTestStatus Defined sl@0: sl@0: TBool DoTestThreadCpuTime2() // Returns ETrue if test passed sl@0: { sl@0: test.Start(_L("Testing time shared between threads")); sl@0: sl@0: if (numCpus > 1) sl@0: { sl@0: test.Printf(_L("** SMP system detected - not testing time shared between threads until load balancing optimized **\n")); sl@0: return ETrue; sl@0: } sl@0: sl@0: const TInt KMaxThreads = 4; sl@0: sl@0: TThreadParam threadParam; sl@0: sl@0: RThread* threads = NULL; sl@0: threads = new(ELeave) RThread[numCpus*KMaxThreads]; sl@0: FailIfError((threadParam.iSem).CreateLocal(0)); sl@0: sl@0: TBool pass = ETrue; sl@0: for (TInt numThreads = 1 ; pass && numThreads <= KMaxThreads ; ++numThreads) sl@0: { sl@0: test.Printf(_L(" testing with %d threads on each of %d CPUs:\n"), numThreads, numCpus); sl@0: sl@0: TInt i, j, k; sl@0: for (i = 0 ; i < numThreads ; ++i) sl@0: { sl@0: for (j = 0 ; j < numCpus ; ++j) sl@0: { sl@0: TBuf<16> name; sl@0: name.AppendFormat(_L("Thread%d%d"), i, j); sl@0: threadParam.iCpu = j; sl@0: k = i+j*KMaxThreads; sl@0: FailIfError(threads[k].Create(name, ThreadFunction, 1024, NULL, &threadParam)); sl@0: threads[k].SetPriority(EPriorityLess); sl@0: threads[k].Resume(); sl@0: } sl@0: } sl@0: sl@0: User::After(KShortWait); // Pause to allow thread setup sl@0: sl@0: (threadParam.iSem).Signal(numThreads*numCpus); sl@0: User::After(KLongWait); sl@0: for (i = 0 ; i < numThreads ; ++i) sl@0: for (j = 0 ; j < numCpus ; ++j) sl@0: threads[i+j*KMaxThreads].Suspend(); sl@0: sl@0: TInt expected = KLongWait / numThreads; sl@0: for (i = 0 ; i < numThreads ; ++i) sl@0: { sl@0: for (j = 0 ; j < numCpus ; ++j) sl@0: { sl@0: k = i+j*KMaxThreads; sl@0: TTimeIntervalMicroSeconds time; sl@0: FailIfError(threads[k].GetCpuTime(time)); sl@0: sl@0: TReal error = (100.0 * Abs(time.Int64() - expected)) / expected; sl@0: sl@0: test.Printf(_L(" %d%d: time == %ld, error == %d%%\n"), i, j, time.Int64(), TInt(error)); sl@0: sl@0: if (error >= 5.0) sl@0: pass = EFalse; sl@0: sl@0: threads[k].Kill(KErrNone); sl@0: TRequestStatus status; sl@0: threads[k].Logon(status); sl@0: User::WaitForRequest(status); sl@0: test(status == KErrNone); sl@0: CLOSE_AND_WAIT(threads[k]); sl@0: } sl@0: } sl@0: } sl@0: sl@0: (threadParam.iSem).Close(); sl@0: test.End(); sl@0: sl@0: return pass; sl@0: } sl@0: sl@0: void TestThreadCpuTime2() sl@0: { sl@0: #ifdef __WINS__ sl@0: TBool pass = EFalse; sl@0: for (TInt retry = 0 ; !pass && retry < 5 ; ++retry) sl@0: { sl@0: if (retry > 0) sl@0: { sl@0: test.Printf(_L("Test failed, retrying...\n")); sl@0: EnsureSystemIdle(); sl@0: } sl@0: pass = DoTestThreadCpuTime2(); sl@0: } sl@0: test(pass); sl@0: #else sl@0: test(DoTestThreadCpuTime2()); sl@0: #endif sl@0: } sl@0: sl@0: TInt ThreadFunction2(TAny* aParam) sl@0: { sl@0: TTimeIntervalMicroSeconds& time = *(TTimeIntervalMicroSeconds*)aParam; sl@0: RThread thread; sl@0: return thread.GetCpuTime(time); sl@0: } sl@0: sl@0: #ifdef __MARM__ sl@0: sl@0: void DoTestThreadCpuTime3(TAny* aParam, TExitType aExpectedExitType, TInt aExpectedExitReason) sl@0: { sl@0: RThread thread; sl@0: FailIfError(thread.Create(_L("TestThread"), ThreadFunction2, 1024, NULL, aParam)); sl@0: thread.Resume(); sl@0: TRequestStatus status; sl@0: thread.Logon(status); sl@0: User::WaitForRequest(status); sl@0: sl@0: TExitCategoryName exitCat = thread.ExitCategory(); sl@0: test.Printf(_L("Thread exit with type == %d, reason == %d, cat == %S\n"), sl@0: thread.ExitType(), thread.ExitReason(), &exitCat); sl@0: sl@0: test(thread.ExitType() == aExpectedExitType); sl@0: test(thread.ExitReason() == aExpectedExitReason); sl@0: CLOSE_AND_WAIT(thread); sl@0: } sl@0: sl@0: void TestThreadCpuTime3() sl@0: { sl@0: // Test kernel writes the return value back to user-space with the correct permissions sl@0: TTimeIntervalMicroSeconds time; sl@0: DoTestThreadCpuTime3(&time, EExitKill, 0); // ok sl@0: DoTestThreadCpuTime3((TAny*)0, EExitPanic, 3); // null pointer sl@0: DoTestThreadCpuTime3((TAny*)0x64000000, EExitPanic, 3); // start of kernel data on moving memory model sl@0: DoTestThreadCpuTime3((TAny*)0xc8000000, EExitPanic, 3); // start of kernel data on moving multiple model sl@0: } sl@0: sl@0: #endif sl@0: sl@0: GLDEF_C TInt E32Main() sl@0: { sl@0: test.Title(); sl@0: test.Start(_L("T_CPUTIME")); sl@0: sl@0: if (numCpus > 1) sl@0: FailIfError(SetCpuAffinity(0)); sl@0: sl@0: TestFastCounter(); sl@0: if (GetCpuTimeIsSupported()) sl@0: { sl@0: EnsureSystemIdle(); sl@0: TestThreadCpuTime(); sl@0: TestThreadCpuTime2(); sl@0: #ifdef __MARM__ sl@0: TestThreadCpuTime3(); sl@0: #endif sl@0: } sl@0: test.End(); sl@0: return 0; sl@0: }