sl@0: // Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies). sl@0: // All rights reserved. sl@0: // This component and the accompanying materials are made available sl@0: // under the terms of the License "Eclipse Public License v1.0" sl@0: // which accompanies this distribution, and is available sl@0: // at the URL "http://www.eclipse.org/legal/epl-v10.html". sl@0: // sl@0: // Initial Contributors: sl@0: // Nokia Corporation - initial contribution. sl@0: // sl@0: // Contributors: sl@0: // sl@0: // Description: sl@0: // f32test\demandpaging\t_wdpstress.cpp sl@0: // Data Paging Stress Tests sl@0: // Common command lines: sl@0: // t_wdpstress lowmem sl@0: // debug - switch on debugging information sl@0: // silent - no output to the screen or serial port sl@0: // single - run the tests in a single thread sl@0: // multiple - run the tests in multiple threads where (max 50 simultaneous threads) sl@0: // interleave - force thread interleaving sl@0: // prio - each thread reschedules in between each function call, causes lots of context changes sl@0: // media - perform media access during the tests, very stressful sl@0: // lowmem - low memory tests sl@0: // stack - perform autotest only with stack paging tests sl@0: // chunk - perform autotest only with chunk paging tests sl@0: // commit - perform autotest only with committing and decommitting paging tests sl@0: // ipc - perform autotest only with ipc pinning tests sl@0: // all - perform autotest with all paging tests(ipc, stack, chunk and commit) sl@0: // badserver - perform ipc pinning tests with dead server sl@0: // iters - the number of times to loop sl@0: // sl@0: // sl@0: sl@0: //! @SYMTestCaseID KBASE-T_WDPSTRESS-xxx sl@0: //! @SYMTestType UT sl@0: //! @SYMPREQ PREQ1954 sl@0: //! @SYMTestCaseDesc Writable Data Paging Stress Tests sl@0: //! @SYMTestActions sl@0: //! @SYMTestExpectedResults All tests should pass. sl@0: //! @SYMTestPriority High sl@0: //! @SYMTestStatus Implemented sl@0: //---------------------------------------------------------------------------------------------- sl@0: // sl@0: #define __E32TEST_EXTENSION__ sl@0: #include sl@0: #include sl@0: RTest test(_L("T_WDPSTRESS")); sl@0: sl@0: #include sl@0: #include sl@0: #include sl@0: #include sl@0: #include sl@0: #include sl@0: #include sl@0: #include sl@0: #include sl@0: #include sl@0: #include "testdefs.h" sl@0: sl@0: #ifdef __X86__ sl@0: #define TEST_ON_UNPAGED sl@0: #endif sl@0: sl@0: #include "t_pagestress.h" sl@0: sl@0: TBool TestDebug = EFalse; sl@0: TBool TestSilent = EFalse; sl@0: TBool TestExit = EFalse; sl@0: sl@0: sl@0: TInt gPerformTestLoop = 10; // Number of times to perform test on a thread sl@0: const TUint KMaxTestThreads = 20; // The maximum number of threads allowed to run simultaniously sl@0: TInt gNumTestThreads = KMaxTestThreads; // The number of threads to run simultaneously sl@0: sl@0: #define TEST_INTERLEAVE_PRIO EPriorityMore sl@0: sl@0: TBool TestWeAreTheTestBase = EFalse; sl@0: sl@0: #define TEST_NONE 0x0 sl@0: #define TEST_IPC 0x1 sl@0: #define TEST_STACK 0x2 sl@0: #define TEST_CHUNK 0x4 sl@0: #define TEST_COMMIT 0x8 sl@0: #define TEST_ALL (TEST_COMMIT | TEST_CHUNK | TEST_STACK | TEST_IPC) sl@0: sl@0: TUint32 gSetTests = TEST_ALL; sl@0: TUint32 gTestWhichTests = gSetTests; sl@0: TBuf<32> gTestNameBuffer; sl@0: TBool gTestPrioChange = EFalse; sl@0: TBool gTestStopMedia = EFalse; sl@0: TBool gTestMediaAccess = EFalse; sl@0: TBool gTestInterleave = EFalse; sl@0: TBool gTestBadServer = EFalse; sl@0: sl@0: #define TEST_LM_NUM_FREE 0 sl@0: #define TEST_LM_BLOCKSIZE 1 sl@0: #define TEST_LM_BLOCKS_FREE 4 sl@0: sl@0: RPageStressTestLdd Ldd; sl@0: RSemaphore TestMultiSem; sl@0: RMsgQueue > TestMsgQueue; sl@0: sl@0: TBool gIsDemandPaged = ETrue; sl@0: TBool gTestRunning = EFalse; // To control when to stop flushing sl@0: TBool gMaxChunksReached = EFalse; // On moving memory model, the number of chunks per process is capped sl@0: sl@0: TInt gPageSize; // The number of bytes per page sl@0: TUint gPageShift; sl@0: TUint gChunksAllocd = 0; // The total number of chunks that have been allocated sl@0: TUint gMaxChunks = 0; // The max amount of chunks after which KErrOverflow will be returned sl@0: RHeap* gThreadHeap = NULL; sl@0: RHeap* gStackHeap = NULL; sl@0: sl@0: TInt gTestType = -1; // The type of test that is to be performed sl@0: sl@0: #define TEST_NEXT(__args) \ sl@0: if (!TestSilent)\ sl@0: test.Next __args; sl@0: sl@0: #define RDBGD_PRINT(__args)\ sl@0: if (TestDebug)\ sl@0: RDebug::Printf __args ;\ sl@0: sl@0: #define RDBGS_PRINT(__args)\ sl@0: if (!TestSilent)\ sl@0: RDebug::Printf __args ;\ sl@0: sl@0: #define DEBUG_PRINT(__args)\ sl@0: if (!TestSilent)\ sl@0: {\ sl@0: if (aTestArguments.iMsgQueue && aTestArguments.iBuffer && aTestArguments.iTheSem)\ sl@0: {\ sl@0: aTestArguments.iBuffer->Zero();\ sl@0: aTestArguments.iBuffer->Format __args ;\ sl@0: aTestArguments.iTheSem->Wait();\ sl@0: aTestArguments.iMsgQueue->SendBlocking(*aTestArguments.iBuffer);\ sl@0: aTestArguments.iTheSem->Signal();\ sl@0: }\ sl@0: else\ sl@0: {\ sl@0: test.Printf __args ;\ sl@0: }\ sl@0: } sl@0: sl@0: #define RUNTEST(__test, __error)\ sl@0: if (!TestSilent)\ sl@0: test(__test == __error);\ sl@0: else\ sl@0: __test; sl@0: sl@0: #define RUNTEST1(__test)\ sl@0: if (!TestSilent)\ sl@0: test(__test); sl@0: sl@0: #define DEBUG_PRINT1(__args)\ sl@0: if (TestDebug)\ sl@0: {\ sl@0: DEBUG_PRINT(__args)\ sl@0: } sl@0: sl@0: #define DOTEST(__operation, __condition)\ sl@0: if (aLowMem) \ sl@0: {\ sl@0: __operation;\ sl@0: while (!__condition)\ sl@0: {\ sl@0: Ldd.DoReleaseSomeRam(TEST_LM_BLOCKS_FREE);\ sl@0: __operation;\ sl@0: }\ sl@0: RUNTEST1(__condition);\ sl@0: }\ sl@0: else\ sl@0: {\ sl@0: __operation;\ sl@0: RUNTEST1(__condition);\ sl@0: } sl@0: sl@0: #define DOTEST1(__operation, __condition)\ sl@0: if (aTestArguments.iLowMem) \ sl@0: {\ sl@0: __operation;\ sl@0: while (!__condition)\ sl@0: {\ sl@0: Ldd.DoReleaseSomeRam(TEST_LM_BLOCKS_FREE);\ sl@0: __operation;\ sl@0: }\ sl@0: RUNTEST1(__condition);\ sl@0: }\ sl@0: else\ sl@0: {\ sl@0: __operation;\ sl@0: RUNTEST1(__condition);\ sl@0: } sl@0: sl@0: struct SThreadExitResults sl@0: { sl@0: TInt iExitType; sl@0: TInt iExitReason; sl@0: }; sl@0: SThreadExitResults* gResultsArray; sl@0: const TInt KExitTypeReset = -1; sl@0: sl@0: struct SPerformTestArgs sl@0: { sl@0: TInt iThreadIndex; sl@0: RMsgQueue > *iMsgQueue; sl@0: TBuf<64> *iBuffer; sl@0: RSemaphore *iTheSem; sl@0: TBool iLowMem; sl@0: TInt iTestType; sl@0: }; sl@0: sl@0: sl@0: TInt DoTest(TInt gTestType, TBool aLowMem = EFalse); sl@0: enum sl@0: { sl@0: ETestSingle, sl@0: ETestMultiple, sl@0: ETestMedia, sl@0: ETestLowMem, sl@0: ETestInterleave, sl@0: ETestCommit, sl@0: ETestTypes, sl@0: // This is at the moment manual sl@0: ETestBadServer, sl@0: ETestTypeEnd, sl@0: }; sl@0: sl@0: TInt FreeRam() sl@0: { sl@0: // wait for any async cleanup in the supervisor to finish first... sl@0: UserSvr::HalFunction(EHalGroupKernel, EKernelHalSupervisorBarrier, 0, 0); sl@0: sl@0: TMemoryInfoV1Buf meminfo; sl@0: TInt r = UserHal::MemoryInfo(meminfo); sl@0: test_KErrNone(r); sl@0: return meminfo().iFreeRamInBytes; sl@0: } sl@0: sl@0: const TUint KStackSize = 20 * 4096; sl@0: TUint stackLimit = 150;//*** NEED TO WORK OUT HOW MUCH STACK WE HAVE*** sl@0: sl@0: /** sl@0: Recursive function sl@0: */ sl@0: void CallRecFunc(TUint aNum, TInt aThreadIndex) sl@0: { sl@0: RDBGD_PRINT(("ThreadId %d CallRecFunc, aNum = %d\n", aThreadIndex, aNum)); sl@0: if (aNum >= stackLimit) sl@0: {// To avoid a stack overflow sl@0: return; sl@0: } sl@0: else sl@0: { sl@0: CallRecFunc(++aNum, aThreadIndex); sl@0: User::After(0); sl@0: } sl@0: RDBGD_PRINT(("ThreadId %d CRF(%d)Returning...", aThreadIndex, aNum)); sl@0: return; sl@0: } sl@0: sl@0: /** sl@0: Thread that calls a recursive function sl@0: */ sl@0: TInt ThreadFunc(TAny* aThreadIndex) sl@0: { sl@0: for (TUint i=0; i<1; i++) sl@0: { sl@0: CallRecFunc(0, (TInt)aThreadIndex); sl@0: } sl@0: RDBGD_PRINT(("ThreadId %d ThreadFunc Returning...", (TInt)aThreadIndex)); sl@0: return KErrNone; sl@0: } sl@0: sl@0: /** sl@0: Thread continuously flushes the paging cache sl@0: */ sl@0: TInt FlushFunc(TAny* /*aPtr*/) sl@0: { sl@0: RThread().SetPriority(EPriorityMore); sl@0: while(gTestRunning) sl@0: { sl@0: DPTest::FlushCache(); sl@0: User::After((Math::Random()&0xfff)*10); sl@0: } sl@0: return KErrNone; sl@0: } sl@0: sl@0: sl@0: // sl@0: // TestStackPaging sl@0: // sl@0: // Create a paged thread which calls a recursive function. sl@0: // Calls to function will be placed on the stack, which is data paged sl@0: // sl@0: sl@0: TInt TestStackPaging(SPerformTestArgs& aTestArguments) sl@0: { sl@0: RDBGD_PRINT(("Creating test thread")); sl@0: TBuf<16> runThreadName; sl@0: runThreadName = _L(""); sl@0: TThreadCreateInfo threadCreateInfo(runThreadName, ThreadFunc, KStackSize, (TAny*) aTestArguments.iThreadIndex); sl@0: threadCreateInfo.SetCreateHeap(KMinHeapSize, KMinHeapSize); sl@0: //threadCreateInfo.SetUseHeap(NULL); sl@0: threadCreateInfo.SetPaging(TThreadCreateInfo::EPaged); sl@0: sl@0: RThread testThread; sl@0: TInt r; sl@0: for(;;) sl@0: { sl@0: r = testThread.Create(threadCreateInfo); sl@0: if(r != KErrNoMemory) sl@0: break; sl@0: if(!aTestArguments.iLowMem) sl@0: break; sl@0: if(Ldd.DoReleaseSomeRam(TEST_LM_BLOCKS_FREE) != KErrNone) sl@0: break; sl@0: RDBGD_PRINT(("TestStackPaging released some RAM\n")); sl@0: } sl@0: sl@0: RDBGD_PRINT(("TID(%d) TestStackPaging create r = %d freeRam = %d\n", aTestArguments.iThreadIndex, r, FreeRam())); sl@0: if (r != KErrNone) sl@0: return r; sl@0: sl@0: TRequestStatus threadStatus; sl@0: testThread.Logon(threadStatus); sl@0: sl@0: RDBGD_PRINT(("resuming test thread")); sl@0: testThread.Resume(); sl@0: sl@0: RDBGD_PRINT(("waiting for threadstatus")); sl@0: User::WaitForRequest(threadStatus); sl@0: sl@0: RDBGD_PRINT(("Killing threads\n")); sl@0: testThread.Close(); sl@0: sl@0: return KErrNone; sl@0: } sl@0: sl@0: //--------------------------Server Pinning stuff----------------------------------------------------- sl@0: _LIT(KTestServer,"CTestServer"); sl@0: const TUint KSemServer = 0; sl@0: sl@0: class CTestServer : public CServer2 sl@0: { sl@0: public: sl@0: CTestServer(TInt aPriority); sl@0: protected: sl@0: //override the pure virtual functions: sl@0: virtual CSession2* NewSessionL(const TVersion& aVersion,const RMessage2& aMessage) const; sl@0: }; sl@0: sl@0: sl@0: class CTestSession : public CSession2 sl@0: { sl@0: public: sl@0: enum TTestMode sl@0: { sl@0: EStop, sl@0: ERead, sl@0: EWrite, sl@0: EReadWrite, sl@0: }; sl@0: //Override pure virtual sl@0: IMPORT_C virtual void ServiceL(const RMessage2& aMessage); sl@0: private: sl@0: TInt ReadWrite(const RMessage2& aMessage, TBool aRead, TBool aWrite); sl@0: TBool iClientDied; sl@0: }; sl@0: sl@0: sl@0: class CMyActiveScheduler : public CActiveScheduler sl@0: { sl@0: public: sl@0: virtual void Error(TInt anError) const; //override pure virtual error function sl@0: }; sl@0: sl@0: sl@0: class RSession : public RSessionBase sl@0: { sl@0: public: sl@0: TInt PublicSendReceive(TInt aFunction, const TIpcArgs &aPtr) sl@0: { sl@0: return (SendReceive(aFunction, aPtr)); sl@0: } sl@0: TInt PublicCreateSession(const TDesC& aServer,TInt aMessageSlots) sl@0: { sl@0: return (CreateSession(aServer,User::Version(),aMessageSlots)); sl@0: } sl@0: }; sl@0: sl@0: struct SServerArgs sl@0: { sl@0: TBool iBadServer; sl@0: RSemaphore iSemArray; sl@0: }; sl@0: sl@0: SServerArgs gServerArgsArray[KMaxTestThreads]; sl@0: sl@0: CTestServer::CTestServer(TInt aPriority) sl@0: // sl@0: // Constructor - sets name sl@0: // sl@0: : CServer2(aPriority) sl@0: {} sl@0: sl@0: CSession2* CTestServer::NewSessionL(const TVersion& aVersion,const RMessage2& /*aMessage*/) const sl@0: // sl@0: // Virtual fn - checks version supported and creates a CTestSession sl@0: // sl@0: { sl@0: TVersion version(KE32MajorVersionNumber,KE32MinorVersionNumber,KE32BuildVersionNumber); sl@0: if (User::QueryVersionSupported(version,aVersion)==EFalse) sl@0: User::Leave(KErrNotSupported); sl@0: CTestSession* newCTestSession = new CTestSession; sl@0: if (newCTestSession==NULL) sl@0: User::Panic(_L("NewSessionL failure"), KErrNoMemory); sl@0: return(newCTestSession); sl@0: } sl@0: sl@0: TInt CTestSession::ReadWrite(const RMessage2& aMessage, TBool aRead, TBool aWrite) sl@0: { sl@0: TInt r = KErrNone; sl@0: for (TUint argIndex = 0; argIndex < 4; argIndex++) sl@0: { sl@0: // Get the length of the descriptor and verify it is as expected. sl@0: TInt length = aMessage.GetDesLength(argIndex); sl@0: if (length < KErrNone) sl@0: { sl@0: RDebug::Printf(" Error getting descriptor length %d", length); sl@0: return length; sl@0: } sl@0: sl@0: sl@0: if (aRead) sl@0: { sl@0: // Now read the descriptor sl@0: HBufC8* des = HBufC8::New(length); sl@0: if (!des) sl@0: return KErrNoMemory; sl@0: TPtr8 desPtr = des->Des(); sl@0: r = aMessage.Read(argIndex, desPtr); sl@0: if (r != KErrNone) sl@0: { sl@0: delete des; sl@0: return r; sl@0: } sl@0: //TODO: Verify the descriptor sl@0: delete des; sl@0: } sl@0: sl@0: if (aWrite) sl@0: { sl@0: // Now write to the maximum length of the descriptor. sl@0: TInt max = length; sl@0: HBufC8* argTmp = HBufC8::New(max); sl@0: if (!argTmp) sl@0: return KErrNoMemory; sl@0: sl@0: TPtr8 argPtr = argTmp->Des(); sl@0: argPtr.SetLength(max); sl@0: for (TInt i = 0; i < max; i++) sl@0: argPtr[i] = (TUint8)argIndex; sl@0: r = aMessage.Write(argIndex, argPtr); sl@0: delete argTmp; sl@0: if (r != KErrNone) sl@0: return r; sl@0: } sl@0: } sl@0: sl@0: return KErrNone; sl@0: } sl@0: sl@0: sl@0: void CTestSession::ServiceL(const RMessage2& aMessage) sl@0: // sl@0: // Virtual message-handler sl@0: // sl@0: { sl@0: TInt r = KErrNone; sl@0: iClientDied = EFalse; sl@0: switch (aMessage.Function()) sl@0: { sl@0: case EStop: sl@0: RDBGD_PRINT(("Stopping server")); sl@0: CActiveScheduler::Stop(); sl@0: break; sl@0: sl@0: case ERead: sl@0: r = ReadWrite(aMessage, ETrue, EFalse); sl@0: break; sl@0: case EWrite: sl@0: r = ReadWrite(aMessage, EFalse, ETrue); sl@0: break; sl@0: case EReadWrite: sl@0: r = ReadWrite(aMessage, ETrue, ETrue); sl@0: break; sl@0: sl@0: default: sl@0: r = KErrNotSupported; sl@0: sl@0: } sl@0: aMessage.Complete(r); sl@0: sl@0: // If descriptors aren't as expected then panic so the test will fail. sl@0: if (r != KErrNone) sl@0: User::Panic(_L("ServiceL failure"), r); sl@0: } sl@0: sl@0: // CTestSession funtions sl@0: sl@0: void CMyActiveScheduler::Error(TInt anError) const sl@0: // sl@0: // Virtual error handler sl@0: // sl@0: { sl@0: User::Panic(_L("CMyActiveScheduer::Error"), anError); sl@0: } sl@0: sl@0: sl@0: TInt ServerThread(TAny* aThreadIndex) sl@0: // sl@0: // Passed as the server thread in 2 tests - sets up and runs CTestServer sl@0: // sl@0: { sl@0: RDBGD_PRINT(("ServerThread")); sl@0: TUint threadIndex = (TUint)aThreadIndex; sl@0: sl@0: TBuf<16> serverName; sl@0: serverName = _L("ServerName_"); sl@0: serverName.AppendNum(threadIndex); sl@0: sl@0: sl@0: CMyActiveScheduler* pScheduler = new CMyActiveScheduler; sl@0: if (pScheduler == NULL) sl@0: { sl@0: gServerArgsArray[threadIndex].iBadServer = ETrue; sl@0: gServerArgsArray[threadIndex].iSemArray.Signal(); sl@0: return KErrNoMemory; sl@0: } sl@0: sl@0: CActiveScheduler::Install(pScheduler); sl@0: sl@0: CTestServer* pServer = new CTestServer(0); sl@0: if (pServer == NULL) sl@0: { sl@0: gServerArgsArray[threadIndex].iBadServer = ETrue; sl@0: gServerArgsArray[threadIndex].iSemArray.Signal(); sl@0: delete pScheduler; sl@0: return KErrNoMemory; sl@0: } sl@0: sl@0: //Starting a CServer2 also Adds it to the ActiveScheduler sl@0: TInt r = pServer->Start(serverName); sl@0: if (r != KErrNone) sl@0: { sl@0: gServerArgsArray[threadIndex].iBadServer = ETrue; sl@0: gServerArgsArray[threadIndex].iSemArray.Signal(); sl@0: delete pScheduler; sl@0: delete pServer; sl@0: return r; sl@0: } sl@0: sl@0: RDBGD_PRINT(("Start ActiveScheduler and signal to client")); sl@0: RDBGD_PRINT(("There might be something going on beneath this window\n")); sl@0: gServerArgsArray[threadIndex].iSemArray.Signal(); sl@0: CActiveScheduler::Start(); sl@0: sl@0: delete pScheduler; sl@0: delete pServer; sl@0: sl@0: return KErrNone; sl@0: } sl@0: sl@0: TInt BadServerThread(TAny* /*aThreadIndex*/) sl@0: // sl@0: // Passed as the server thread in 2 tests - sets up and runs CTestServer sl@0: // sl@0: { sl@0: RDBGD_PRINT(("BadServerThread")); sl@0: CMyActiveScheduler* pScheduler = new CMyActiveScheduler; sl@0: if (pScheduler == NULL) sl@0: { sl@0: RDBGD_PRINT(("BST:Fail1")); sl@0: gServerArgsArray[KSemServer].iBadServer = ETrue; sl@0: gServerArgsArray[KSemServer].iSemArray.Signal(); sl@0: return KErrNoMemory; sl@0: } sl@0: sl@0: CActiveScheduler::Install(pScheduler); sl@0: sl@0: CTestServer* pServer = new CTestServer(0); sl@0: if (pServer == NULL) sl@0: { sl@0: RDBGD_PRINT(("BST:Fail2")); sl@0: gServerArgsArray[KSemServer].iBadServer = ETrue; sl@0: gServerArgsArray[KSemServer].iSemArray.Signal(); sl@0: delete pScheduler; sl@0: return KErrNoMemory; sl@0: } sl@0: sl@0: //pServer->SetPinClientDescriptors(ETrue); sl@0: sl@0: sl@0: //Starting a CServer2 also Adds it to the ActiveScheduler sl@0: TInt r = pServer->Start(KTestServer); sl@0: if (r != KErrNone) sl@0: { sl@0: RDBGD_PRINT(("BST:Fail3")); sl@0: gServerArgsArray[KSemServer].iBadServer = ETrue; sl@0: gServerArgsArray[KSemServer].iSemArray.Signal(); sl@0: delete pScheduler; sl@0: delete pServer; sl@0: return r; sl@0: } sl@0: sl@0: RDBGD_PRINT(("Start ActiveScheduler and signal to client")); sl@0: RDBGD_PRINT(("There might be something going on beneath this window\n")); sl@0: gServerArgsArray[KSemServer].iSemArray.Signal(); sl@0: CActiveScheduler::Start(); sl@0: sl@0: delete pScheduler; sl@0: delete pServer; sl@0: RDBGD_PRINT(("BST:Pass1")); sl@0: return KErrNone; sl@0: } sl@0: sl@0: TInt SendMessages(TUint aIters, TUint aSize, TDesC& aServerName, TInt aIndex, TBool aLowMem = EFalse) sl@0: // sl@0: // Passed as the first client thread - signals the server to do several tests sl@0: // sl@0: { sl@0: HBufC8* argTmp1; sl@0: HBufC8* argTmp2; sl@0: HBufC8* argTmp3; sl@0: HBufC8* argTmp4; sl@0: sl@0: DOTEST((argTmp1 = HBufC8::New(aSize)), (argTmp1 != NULL)); sl@0: *argTmp1 = (const TUint8*)"argTmp1"; sl@0: TPtr8 ptr1 = argTmp1->Des(); sl@0: sl@0: DOTEST((argTmp2 = HBufC8::New(aSize)), (argTmp2 != NULL)); sl@0: *argTmp2 = (const TUint8*)"argTmp2"; sl@0: TPtr8 ptr2 = argTmp2->Des(); sl@0: sl@0: DOTEST((argTmp3 = HBufC8::New(aSize)), (argTmp3 != NULL)); sl@0: *argTmp3 = (const TUint8*)"argTmp3"; sl@0: TPtr8 ptr3 = argTmp3->Des(); sl@0: sl@0: DOTEST((argTmp4 = HBufC8::New(aSize)), (argTmp1 != NULL)); sl@0: *argTmp4 = (const TUint8*)"argTmp4"; sl@0: TPtr8 ptr4 = argTmp4->Des(); sl@0: sl@0: RSession session; sl@0: TInt r = KErrNone; sl@0: if(gTestBadServer) sl@0: {//Don't do bad server tests with lowmem sl@0: r = session.PublicCreateSession(aServerName,5); sl@0: } sl@0: else sl@0: { sl@0: DOTEST((r = session.PublicCreateSession(aServerName,5)), (r != KErrNoMemory)); sl@0: } sl@0: if (r != KErrNone) sl@0: { sl@0: RDBGD_PRINT(("SendMessages[%d] failed to create session r = %d", aIndex, r)); sl@0: return r; sl@0: } sl@0: sl@0: if(gTestBadServer) sl@0: { sl@0: RThread::Rendezvous(KErrNone); sl@0: RDBGD_PRINT(("Wait on sem %d", aIndex)); sl@0: //gServerArgsArray[KSemCliSessStarted].iSemArray.Wait(); sl@0: } sl@0: sl@0: RDBGD_PRINT(("ID (%d)ReadWrite" ,aIndex)); sl@0: for (TUint i = 0; i < aIters; i++) sl@0: { sl@0: TUint mode = (i&0x3) + CTestSession::ERead; sl@0: switch(mode) sl@0: { sl@0: case CTestSession::ERead: sl@0: DOTEST((r = session.PublicSendReceive(CTestSession::ERead, TIpcArgs(&ptr1, &ptr2, &ptr3, &ptr4).PinArgs())), sl@0: (r != KErrNoMemory)); sl@0: if (r != KErrNone) sl@0: return r; sl@0: break; sl@0: sl@0: case CTestSession::EWrite: sl@0: DOTEST((r = session.PublicSendReceive(CTestSession::EWrite, TIpcArgs(&ptr1, &ptr2, &ptr3, &ptr4).PinArgs())), sl@0: (r != KErrNoMemory)); sl@0: if (r != KErrNone) sl@0: return r; sl@0: break; sl@0: case CTestSession::EReadWrite: sl@0: DOTEST((r = session.PublicSendReceive(CTestSession::EReadWrite, TIpcArgs(&ptr1, &ptr2, &ptr3, &ptr4).PinArgs())), sl@0: (r != KErrNoMemory)); sl@0: if (r != KErrNone) sl@0: return r; sl@0: break; sl@0: sl@0: } sl@0: } sl@0: RDBGD_PRINT(("ID(%d) Closing session", aIndex)); sl@0: session.Close(); sl@0: return r; sl@0: } sl@0: sl@0: TInt TestIPCPinning(SPerformTestArgs& aTestArguments) sl@0: { sl@0: TInt r = KErrNone; sl@0: // Create the server thread it needs to have a unpaged stack and heap. sl@0: TBuf<16> serverThreadName; sl@0: serverThreadName = _L("ServerThread_"); sl@0: serverThreadName.AppendNum(aTestArguments.iThreadIndex); sl@0: TThreadCreateInfo serverInfo(serverThreadName, ServerThread, KDefaultStackSize, (TAny *) aTestArguments.iThreadIndex); sl@0: serverInfo.SetUseHeap(NULL); sl@0: sl@0: gServerArgsArray[aTestArguments.iThreadIndex].iBadServer = EFalse; sl@0: sl@0: // Create the semaphores for the IPC pinning tests sl@0: DOTEST1((r = gServerArgsArray[aTestArguments.iThreadIndex].iSemArray.CreateLocal(0)), (r != KErrNoMemory)); sl@0: if (r != KErrNone) sl@0: { sl@0: RDBGD_PRINT(("Failed to create semaphonre[%d] r = %d", aTestArguments.iThreadIndex, r)); sl@0: return r; sl@0: } sl@0: sl@0: RThread serverThread; sl@0: TInt r1 = KErrNone; sl@0: DOTEST1((r1 = serverThread.Create(serverInfo)), (r1 != KErrNoMemory)); sl@0: if (r1 != KErrNone) sl@0: { sl@0: RDBGD_PRINT(("Failed to create server thread[%d] r1 = %d", aTestArguments.iThreadIndex, r1)); sl@0: return r1; sl@0: } sl@0: TRequestStatus serverStat; sl@0: serverThread.Logon(serverStat); sl@0: serverThread.Resume(); sl@0: sl@0: // Wait for the server to start and then create a session to it. sl@0: TBuf<16> serverName; sl@0: serverName = _L("ServerName_"); sl@0: serverName.AppendNum(aTestArguments.iThreadIndex); sl@0: sl@0: gServerArgsArray[aTestArguments.iThreadIndex].iSemArray.Wait(); sl@0: sl@0: // First check that the server started successfully sl@0: if (gServerArgsArray[aTestArguments.iThreadIndex].iBadServer) sl@0: return KErrServerTerminated; sl@0: sl@0: RSession session; sl@0: DOTEST1((r1 = session.PublicCreateSession(serverName,5)), (r1 != KErrNoMemory)); sl@0: if (r1 != KErrNone) sl@0: { sl@0: RDBGD_PRINT(("Failed to create session[%d] r1 = %d", aTestArguments.iThreadIndex, r1)); sl@0: return r1; sl@0: } sl@0: sl@0: r1 = SendMessages(50, 10, serverName, aTestArguments.iThreadIndex, aTestArguments.iLowMem); sl@0: if (r1 != KErrNone) sl@0: { sl@0: RDBGD_PRINT(("SendMessages[%d] r1 = %d", aTestArguments.iThreadIndex, r1)); sl@0: return r1; sl@0: } sl@0: TInt r2 = KErrNone; sl@0: sl@0: // Signal to stop ActiveScheduler and wait for server to stop. sl@0: session.PublicSendReceive(CTestSession::EStop, TIpcArgs()); sl@0: session.Close(); sl@0: sl@0: User::WaitForRequest(serverStat); sl@0: if (serverThread.ExitType() == EExitKill && sl@0: serverThread.ExitReason() != KErrNone) sl@0: { sl@0: r2 = serverThread.ExitReason(); sl@0: } sl@0: if (serverThread.ExitType() != EExitKill) sl@0: { sl@0: RDBGD_PRINT(("Server thread panic'd")); sl@0: r2 = KErrGeneral; sl@0: } sl@0: sl@0: serverThread.Close(); sl@0: gServerArgsArray[aTestArguments.iThreadIndex].iSemArray.Close(); sl@0: sl@0: if (r1 != KErrNone) sl@0: return r1; sl@0: sl@0: return r2; sl@0: } sl@0: sl@0: TInt ClientThread(TAny* aClientThread) sl@0: { sl@0: TInt r = KErrNone; sl@0: sl@0: TBuf<16> serverName; sl@0: serverName = KTestServer; sl@0: RDBGD_PRINT(("CT(%d):Sending Messages" ,aClientThread)); sl@0: r = SendMessages(500, 10, serverName, (TInt) aClientThread); sl@0: if (r != KErrNone) sl@0: { sl@0: RDBGD_PRINT(("SendMessages[%d] r = %d", (TInt) aClientThread, r)); sl@0: return r; sl@0: } sl@0: return r; sl@0: } sl@0: sl@0: TInt TestIPCBadServer(SPerformTestArgs& aTestArguments) sl@0: { sl@0: TInt cliRet = KErrNone; sl@0: TInt serRet = KErrNone; sl@0: sl@0: // Create the server thread it needs to have a unpaged stack and heap. sl@0: TBuf<16> serverThreadName; sl@0: serverThreadName = _L("BadServerThread"); sl@0: TThreadCreateInfo serverInfo(serverThreadName, BadServerThread, KDefaultStackSize, NULL); sl@0: serverInfo.SetUseHeap(NULL); sl@0: sl@0: // Create the semaphores for the IPC pinning tests sl@0: DOTEST1((serRet = gServerArgsArray[KSemServer].iSemArray.CreateLocal(0)), (serRet != KErrNoMemory)); sl@0: if (serRet != KErrNone) sl@0: { sl@0: RDBGD_PRINT(("Failed to create semaphonre[%d] serRet = %d", KSemServer, serRet)); sl@0: return serRet; sl@0: } sl@0: sl@0: RThread serverThread; sl@0: DOTEST1((serRet = serverThread.Create(serverInfo)), (serRet != KErrNoMemory)); sl@0: if (serRet != KErrNone) sl@0: { sl@0: RDBGD_PRINT(("Failed to create server thread serRet = %d", serRet)); sl@0: return serRet; sl@0: } sl@0: TRequestStatus serverStat; sl@0: serverThread.Logon(serverStat); sl@0: serverThread.Resume(); sl@0: sl@0: // Wait for the server to start and then create a session to it. sl@0: gServerArgsArray[KSemServer].iSemArray.Wait(); sl@0: sl@0: // First check that the server started successfully sl@0: if (gServerArgsArray[KSemServer].iBadServer) sl@0: return KErrServerTerminated; sl@0: sl@0: sl@0: //create client threads sl@0: const TUint KNumClientThreads = 50; sl@0: RThread clientThreads[KNumClientThreads]; sl@0: TRequestStatus clientStarted[KNumClientThreads]; sl@0: TRequestStatus clientStats[KNumClientThreads]; sl@0: sl@0: // Create the client threads sl@0: TBuf<16> clientThreadName; sl@0: TUint i; sl@0: for (i = 0; i < KNumClientThreads; i++) sl@0: { sl@0: clientThreadName = _L("clientThread_"); sl@0: clientThreadName.AppendNum(i); sl@0: TThreadCreateInfo clientInfo(clientThreadName, ClientThread, KDefaultStackSize, (TAny*)i); sl@0: clientInfo.SetPaging(TThreadCreateInfo::EPaged); sl@0: clientInfo.SetCreateHeap(KMinHeapSize, KMinHeapSize); sl@0: cliRet = clientThreads[i].Create(clientInfo); sl@0: if (cliRet != KErrNone) sl@0: { sl@0: RDBGD_PRINT(("Failed to create client thread [%d] cliRet = %d", i, cliRet)); sl@0: return cliRet; sl@0: } sl@0: clientThreads[i].Rendezvous(clientStarted[i]); sl@0: clientThreads[i].Logon(clientStats[i]); sl@0: clientThreads[i].Resume(); sl@0: } sl@0: sl@0: // Wait for creation of the client thread sessions sl@0: for (i = 0; i < KNumClientThreads; i++) sl@0: { sl@0: User::WaitForRequest(clientStarted[i]); sl@0: if (clientStarted[i].Int() != KErrNone) sl@0: return clientStarted[i].Int(); sl@0: } sl@0: sl@0: sl@0: // Once the messages are being sent, create a session to the sl@0: // same server and signal to stop ActiveScheduler sl@0: RSession session; sl@0: serRet = session.PublicCreateSession(KTestServer,5); sl@0: if (serRet != KErrNone) sl@0: { sl@0: RDBGD_PRINT(("Failed to create session serRet = %d", serRet)); sl@0: return serRet; sl@0: } sl@0: session.PublicSendReceive(CTestSession::EStop, TIpcArgs()); sl@0: session.Close(); sl@0: sl@0: // Wait for the client thread to end. sl@0: cliRet = KErrNone; sl@0: for (i = 0; i < KNumClientThreads; i++) sl@0: { sl@0: User::WaitForRequest(clientStats[i]); sl@0: RDBGD_PRINT(("Thread complete clientStats[%d] = %d", i, clientStats[i].Int())); sl@0: if (clientStats[i].Int() != KErrNone && sl@0: clientStats[i].Int() != KErrServerTerminated) sl@0: { sl@0: cliRet = clientStats[i].Int(); sl@0: } sl@0: } sl@0: sl@0: // Check that the server ended correctly sl@0: serRet = KErrNone; sl@0: User::WaitForRequest(serverStat); sl@0: if (serverThread.ExitType() == EExitKill && sl@0: serverThread.ExitReason() != KErrNone) sl@0: { sl@0: serRet = serverThread.ExitReason(); sl@0: } sl@0: if (serverThread.ExitType() != EExitKill) sl@0: { sl@0: RDBGD_PRINT(("Server thread panic'd")); sl@0: serRet = KErrGeneral; sl@0: } sl@0: sl@0: // Close all the server thread and client threads sl@0: for (i = 0; i < KNumClientThreads; i++) sl@0: { sl@0: clientThreads[i].Close(); sl@0: } sl@0: serverThread.Close(); sl@0: sl@0: if (cliRet != KErrNone) sl@0: return cliRet; sl@0: sl@0: return serRet; sl@0: } sl@0: sl@0: sl@0: // sl@0: // RemoveChunkAlloc sl@0: // sl@0: // Remove ALL chunks allocated sl@0: // sl@0: // @param aChunkArray The array that stores a reference to the chunks created. sl@0: // @param aChunkArraySize The size of aChunkArray. sl@0: // sl@0: void RemoveChunkAlloc(RChunk*& aChunkArray, TUint aChunkArraySize) sl@0: { sl@0: if (aChunkArray == NULL) sl@0: {// The chunk array has already been deleted. sl@0: return; sl@0: } sl@0: sl@0: for (TUint i = 0; i < aChunkArraySize; i++) sl@0: { sl@0: if (aChunkArray[i].Handle() != NULL) sl@0: { sl@0: aChunkArray[i].Close(); sl@0: gChunksAllocd --; sl@0: if (gChunksAllocd < gMaxChunks) sl@0: gMaxChunksReached = EFalse; sl@0: } sl@0: } sl@0: delete[] aChunkArray; sl@0: aChunkArray = NULL; sl@0: } sl@0: sl@0: TInt WriteToChunk(RChunk* aChunkArray, TUint aChunkArraySize) sl@0: { sl@0: for (TUint j = 0; j < aChunkArraySize; j++) sl@0: { sl@0: if (aChunkArray[j].Handle() != NULL) sl@0: { sl@0: TUint32* base = (TUint32*)aChunkArray[j].Base(); sl@0: TUint32* end = (TUint32*)(aChunkArray[j].Base() + aChunkArray[j].Size()); sl@0: for (TUint32 k = 0; base < end; k++) sl@0: { sl@0: *base++ = k; // write index to the chunk sl@0: } sl@0: } sl@0: } sl@0: return KErrNone; sl@0: } sl@0: sl@0: TUint32 ReadByte(volatile TUint32* aPtr) sl@0: { sl@0: return *aPtr; sl@0: } sl@0: sl@0: TInt ReadChunk(RChunk* aChunkArray, TUint aChunkArraySize) sl@0: { sl@0: for (TUint j=0; j < aChunkArraySize; j++) //Read all open chunks sl@0: { sl@0: if (aChunkArray[j].Handle() != NULL) sl@0: { sl@0: TUint32* base = (TUint32*)aChunkArray[j].Base(); sl@0: TUint32* end = (TUint32*)(aChunkArray[j].Base() + aChunkArray[j].Size()); sl@0: for (TUint32 k = 0; base < end; k++) sl@0: { sl@0: TUint value = ReadByte((volatile TUint32*)base++); sl@0: if (value != k) sl@0: { sl@0: RDBGS_PRINT(("Read value incorrect expected 0x%x got 0x%x", k, value)); sl@0: return KErrGeneral; sl@0: } sl@0: } sl@0: } sl@0: } sl@0: return KErrNone; sl@0: } sl@0: sl@0: sl@0: TInt CreateChunks(SPerformTestArgs& aTestArguments, RChunk*& aChunkArray, TUint aChunkArraySize) sl@0: { sl@0: TInt r = KErrNone; sl@0: sl@0: TUint chunkSize = 1 << gPageShift; sl@0: sl@0: // Allocate as many chunks as is specified, either with the default chunk size or a specified chunk size sl@0: if (aChunkArray == NULL) sl@0: { sl@0: DOTEST1((aChunkArray = new RChunk[aChunkArraySize]), (aChunkArray != NULL)); sl@0: if (aChunkArray == NULL) sl@0: return KErrNoMemory; sl@0: } sl@0: sl@0: TChunkCreateInfo createInfo; sl@0: createInfo.SetNormal(chunkSize, chunkSize); sl@0: createInfo.SetPaging(TChunkCreateInfo::EPaged); sl@0: sl@0: sl@0: // Create chunks for each RChunk with a NULL handle. sl@0: for (TUint i = 0; i < aChunkArraySize; i++) sl@0: { sl@0: DOTEST1((r = aChunkArray[i].Create(createInfo)), (r != KErrNoMemory)); sl@0: if (r != KErrNone) sl@0: { sl@0: if (r == KErrOverflow) sl@0: { sl@0: gMaxChunks = gChunksAllocd; sl@0: RDBGD_PRINT(("Max Chunks Allowed = %d", gMaxChunks)); sl@0: gMaxChunksReached = ETrue; sl@0: } sl@0: return r; sl@0: } sl@0: gChunksAllocd++; sl@0: RDBGD_PRINT(("TID(%d) aChunkArray[%d], r = %d", aTestArguments.iThreadIndex, i, r)); sl@0: } sl@0: RDBGD_PRINT(("TID(%d) created chunks r = %d", aTestArguments.iThreadIndex, r)); sl@0: sl@0: return KErrNone; sl@0: } sl@0: // sl@0: // TestChunkPaging sl@0: // sl@0: // Create a number of chunks and write to them sl@0: // read the chunk back to ensure the values are correct sl@0: // sl@0: sl@0: TInt TestChunkPaging(SPerformTestArgs& aTestArguments) sl@0: { sl@0: TInt r = KErrNone; sl@0: const TUint KNumChunks = 10; sl@0: sl@0: sl@0: if(gMaxChunksReached) sl@0: {// We cant create any more chunks as the max number has been reached sl@0: return KErrNone; sl@0: } sl@0: sl@0: RChunk* chunkArray = NULL; sl@0: r = CreateChunks(aTestArguments, chunkArray, KNumChunks); sl@0: if (r != KErrNone) sl@0: { sl@0: if (r == KErrOverflow) sl@0: { sl@0: RDBGD_PRINT(("Max number of chunks reached")); sl@0: RemoveChunkAlloc(chunkArray, KNumChunks); sl@0: return KErrNone; sl@0: } sl@0: RDBGD_PRINT(("TID(%d) CreateChunks r = %d", aTestArguments.iThreadIndex, r)); sl@0: return r; sl@0: } sl@0: sl@0: r = WriteToChunk(chunkArray, KNumChunks); sl@0: if (r != KErrNone) sl@0: { sl@0: RemoveChunkAlloc(chunkArray, KNumChunks); sl@0: RDBGD_PRINT(("TID(%d) WriteToChunk r = %d", aTestArguments.iThreadIndex, r)); sl@0: return r; sl@0: } sl@0: sl@0: r = ReadChunk(chunkArray, KNumChunks); sl@0: if (r != KErrNone) sl@0: { sl@0: RemoveChunkAlloc(chunkArray, KNumChunks); sl@0: RDBGD_PRINT(("TID(%d) ReadChunk r = %d", aTestArguments.iThreadIndex, r)); sl@0: return r; sl@0: } sl@0: RemoveChunkAlloc(chunkArray, KNumChunks); sl@0: return KErrNone; sl@0: } sl@0: sl@0: sl@0: // sl@0: // TestChunkCommit sl@0: // sl@0: // Create a chunk sl@0: // commit a page at a time, write to that page and then decommit the page sl@0: // sl@0: sl@0: TInt TestChunkCommit(SPerformTestArgs& aTestArguments) sl@0: { sl@0: TInt r = KErrNone; sl@0: RChunk testChunk; sl@0: sl@0: TUint chunkSize = 70 << gPageShift; sl@0: sl@0: TChunkCreateInfo createInfo; sl@0: createInfo.SetDisconnected(0, 0, chunkSize); sl@0: createInfo.SetPaging(TChunkCreateInfo::EPaged); sl@0: DOTEST1((r = testChunk.Create(createInfo)), (r != KErrNoMemory)); sl@0: if (r != KErrNone) sl@0: { sl@0: return r; sl@0: } sl@0: TUint offset = 0; sl@0: while(offset < chunkSize) sl@0: { sl@0: // Commit a page sl@0: DOTEST1((r = testChunk.Commit(offset,gPageSize)), (r != KErrNoMemory)); sl@0: if (r != KErrNone) sl@0: { sl@0: return r; sl@0: } sl@0: sl@0: // Write to the page sl@0: TUint8* pageStart = testChunk.Base() + offset; sl@0: *pageStart = 0xed; sl@0: sl@0: sl@0: // Decommit the page sl@0: r = testChunk.Decommit(offset, gPageSize); sl@0: if (r != KErrNone) sl@0: { sl@0: return r; sl@0: } sl@0: sl@0: offset += gPageSize; sl@0: } sl@0: sl@0: sl@0: testChunk.Close(); sl@0: return r; sl@0: } sl@0: sl@0: // sl@0: // PerformTestThread sl@0: // sl@0: // This is the function that actually does the work. sl@0: // It is complicated a little because test.Printf can only be called from the first thread that calls it sl@0: // so if we are using multiple threads we need to use a message queue to pass the debug info from the sl@0: // child threads back to the parent for the parent to then call printf. sl@0: // sl@0: // sl@0: sl@0: LOCAL_C TInt PerformTestThread(SPerformTestArgs& aTestArguments) sl@0: { sl@0: TInt r = KErrNone; sl@0: TUint start = User::TickCount(); sl@0: sl@0: DEBUG_PRINT1((_L("%S : thread Starting %d\n"), &gTestNameBuffer, aTestArguments.iThreadIndex)); sl@0: // now select how we do the test... sl@0: TInt iterIndex = 0; sl@0: sl@0: sl@0: if (TEST_ALL == (gTestWhichTests & TEST_ALL)) sl@0: { sl@0: #define LOCAL_ORDER_INDEX1 6 sl@0: #define LOCAL_ORDER_INDEX2 4 sl@0: TInt order[LOCAL_ORDER_INDEX1][LOCAL_ORDER_INDEX2] = { {TEST_STACK, TEST_CHUNK,TEST_COMMIT, TEST_IPC}, sl@0: {TEST_STACK, TEST_COMMIT, TEST_CHUNK, TEST_IPC}, sl@0: {TEST_CHUNK,TEST_STACK, TEST_COMMIT, TEST_IPC}, sl@0: {TEST_CHUNK,TEST_COMMIT, TEST_STACK, TEST_IPC}, sl@0: {TEST_COMMIT, TEST_STACK, TEST_CHUNK, TEST_IPC}, sl@0: {TEST_COMMIT, TEST_CHUNK,TEST_STACK, TEST_IPC}}; sl@0: TInt whichOrder = 0; sl@0: iterIndex = 0; sl@0: for (iterIndex = 0; iterIndex < gPerformTestLoop; iterIndex ++) sl@0: { sl@0: DEBUG_PRINT1((_L("iterIndex = %d\n"), iterIndex)); sl@0: TInt selOrder = ((aTestArguments.iThreadIndex + 1) * (iterIndex + 1)) % LOCAL_ORDER_INDEX1; sl@0: for (whichOrder = 0; whichOrder < LOCAL_ORDER_INDEX2; whichOrder ++) sl@0: { sl@0: DEBUG_PRINT1((_L("whichOrder = %d\n"), whichOrder)); sl@0: switch (order[selOrder][whichOrder]) sl@0: { sl@0: case TEST_STACK: sl@0: DEBUG_PRINT1((_L("%S : %d Iter %d Stack\n"), &gTestNameBuffer, aTestArguments.iThreadIndex, iterIndex)); sl@0: r = TestStackPaging(aTestArguments); sl@0: DEBUG_PRINT1((_L("ThreadId %d Finished TestStackPaging() r = %d\n"), aTestArguments.iThreadIndex, r)); sl@0: if (r != KErrNone) sl@0: return r; sl@0: break; sl@0: sl@0: case TEST_CHUNK: sl@0: DEBUG_PRINT1((_L("%S : %d Iter %d Chunk\n"), &gTestNameBuffer, aTestArguments.iThreadIndex, iterIndex)); sl@0: r = TestChunkPaging(aTestArguments); sl@0: DEBUG_PRINT1((_L("ThreadId %d Finished TestChunkPaging() r = %d\n"), aTestArguments.iThreadIndex, r)); sl@0: if (r != KErrNone) sl@0: return r; sl@0: break; sl@0: sl@0: case TEST_COMMIT: sl@0: DEBUG_PRINT1((_L("%S : %d Iter %d Commit\n"), &gTestNameBuffer, aTestArguments.iThreadIndex, iterIndex)); sl@0: r = TestChunkCommit(aTestArguments); sl@0: DEBUG_PRINT1((_L("ThreadId %d Finished TestChunkCommit() r = %d\n"), aTestArguments.iThreadIndex, r)); sl@0: if (r != KErrNone) sl@0: return r; sl@0: break; sl@0: sl@0: case TEST_IPC: sl@0: sl@0: if (gTestBadServer) sl@0: { sl@0: DEBUG_PRINT1((_L("%S : %d Iter %d IPC-BadServer\n"), &gTestNameBuffer, aTestArguments.iThreadIndex, iterIndex)); sl@0: r = TestIPCBadServer(aTestArguments); sl@0: DEBUG_PRINT1((_L("ThreadId %d Finished TestIPCBadServer() r = %d\n"), aTestArguments.iThreadIndex, r)); sl@0: } sl@0: else sl@0: { sl@0: DEBUG_PRINT1((_L("%S : %d Iter %d IPC\n"), &gTestNameBuffer, aTestArguments.iThreadIndex, iterIndex)); sl@0: // Limit the IPC pinning stuff to 2 loops else will take a long time to run sl@0: if (gNumTestThreads > 1 && gPerformTestLoop > 2) sl@0: break; sl@0: r = TestIPCPinning(aTestArguments); sl@0: DEBUG_PRINT1((_L("ThreadId %d Finished TestIPCPinning() r = %d\n"), aTestArguments.iThreadIndex, r)); sl@0: if (r != KErrNone) sl@0: return r; sl@0: } sl@0: break; sl@0: sl@0: default: // this is really an error. sl@0: break; sl@0: } sl@0: iterIndex++; sl@0: } sl@0: } sl@0: } sl@0: else sl@0: { sl@0: if (gTestWhichTests & TEST_STACK) sl@0: { sl@0: for (iterIndex = 0; iterIndex < gPerformTestLoop; iterIndex ++) sl@0: { sl@0: DEBUG_PRINT1((_L("%S : %d Iter %d Stack\n"), &gTestNameBuffer, aTestArguments.iThreadIndex, iterIndex)); sl@0: r = TestStackPaging(aTestArguments); sl@0: DEBUG_PRINT1((_L("ThreadId %d Finished TestStackPaging() r = %d\n"), aTestArguments.iThreadIndex, r)); sl@0: if (r != KErrNone) sl@0: return r; sl@0: } sl@0: } sl@0: sl@0: if (gTestWhichTests & TEST_CHUNK) sl@0: { sl@0: for (iterIndex = 0; iterIndex < gPerformTestLoop; iterIndex ++) sl@0: { sl@0: DEBUG_PRINT1((_L("%S : %d Iter %d Chunk\n"), &gTestNameBuffer, aTestArguments.iThreadIndex, iterIndex)); sl@0: r = TestChunkPaging(aTestArguments); sl@0: DEBUG_PRINT1((_L("ThreadId %d Finished TestChunkPaging() r = %d\n"), aTestArguments.iThreadIndex, r)); sl@0: if (r != KErrNone) sl@0: return r; sl@0: } sl@0: } sl@0: sl@0: if (gTestWhichTests & TEST_COMMIT) sl@0: { sl@0: for (iterIndex = 0; iterIndex < gPerformTestLoop; iterIndex ++) sl@0: { sl@0: DEBUG_PRINT1((_L("%S : %d Iter %d Commit\n"), &gTestNameBuffer, aTestArguments.iThreadIndex, iterIndex)); sl@0: r = TestChunkCommit(aTestArguments); sl@0: DEBUG_PRINT1((_L("ThreadId %d Finished TestChunkCommit() r = %d\n"), aTestArguments.iThreadIndex, r)); sl@0: if (r != KErrNone) sl@0: return r; sl@0: } sl@0: } sl@0: sl@0: if (gTestWhichTests & TEST_IPC) sl@0: { sl@0: // In multiple thread case limit IPC test to 2 loops else will take a long time sl@0: TInt loops = (gPerformTestLoop <= 2 && gNumTestThreads) ? gPerformTestLoop : 2; sl@0: for (iterIndex = 0; iterIndex < loops; iterIndex ++) sl@0: { sl@0: if (gTestBadServer) sl@0: { sl@0: r = TestIPCBadServer(aTestArguments); sl@0: DEBUG_PRINT1((_L("ThreadId %d Finished TestIPCBadServer() r = %d\n"), aTestArguments.iThreadIndex, r)); sl@0: } sl@0: else sl@0: { sl@0: DEBUG_PRINT1((_L("%S : %d Iter %d IPC\n"), &gTestNameBuffer, aTestArguments.iThreadIndex, iterIndex)); sl@0: r = TestIPCPinning(aTestArguments); sl@0: DEBUG_PRINT1((_L("ThreadId %d Finished TestIPCPinning() r = %d\n"), aTestArguments.iThreadIndex, r)); sl@0: if (r != KErrNone) sl@0: return r; sl@0: } sl@0: } sl@0: } sl@0: } sl@0: sl@0: DEBUG_PRINT1((_L("%S : thread Exiting %d (tickcount %u)\n"), &gTestNameBuffer, aTestArguments.iThreadIndex, (User::TickCount() - start))); sl@0: return r; sl@0: } sl@0: sl@0: sl@0: // sl@0: // MultipleTestThread sl@0: // sl@0: // Thread function, one created for each thread in a multiple thread test. sl@0: // sl@0: sl@0: LOCAL_C TInt MultipleTestThread(TAny* aTestArgs) sl@0: { sl@0: TInt r = KErrNone; sl@0: TBuf<64> localBuffer; sl@0: sl@0: if (gTestInterleave) sl@0: { sl@0: RThread thisThread; sl@0: thisThread.SetPriority((TThreadPriority) TEST_INTERLEAVE_PRIO); sl@0: } sl@0: sl@0: SPerformTestArgs& testArgs = *(SPerformTestArgs*)aTestArgs; sl@0: testArgs.iBuffer = &localBuffer; sl@0: sl@0: RDBGD_PRINT(("Performing test thread ThreadID(%d)\n", testArgs.iThreadIndex)); sl@0: r = PerformTestThread(testArgs); sl@0: sl@0: return r; sl@0: } sl@0: sl@0: sl@0: sl@0: // sl@0: // FindMMCDriveNumber sl@0: // sl@0: // Find the first read write drive. sl@0: // sl@0: sl@0: TInt FindMMCDriveNumber(RFs& aFs) sl@0: { sl@0: TDriveInfo driveInfo; sl@0: for (TInt drvNum=0; drvNum= 0) sl@0: { sl@0: if (driveInfo.iType == EMediaHardDisk) sl@0: return (drvNum); sl@0: } sl@0: } sl@0: return -1; sl@0: } sl@0: sl@0: // sl@0: // PerformRomAndFileSystemAccess sl@0: // sl@0: // Access the rom and dump it out to one of the writeable partitions... sl@0: // really just to make the media server a little busy during the test. sl@0: // sl@0: sl@0: TInt PerformRomAndFileSystemAccessThread(SPerformTestArgs& aTestArguments) sl@0: { sl@0: TUint maxBytes = KMaxTUint; sl@0: TInt startTime = User::TickCount(); sl@0: sl@0: RFs fs; sl@0: RFile file; sl@0: if (KErrNone != fs.Connect()) sl@0: { sl@0: DEBUG_PRINT(_L("PerformRomAndFileSystemAccessThread : Can't connect to the FS\n")); sl@0: return KErrGeneral; sl@0: } sl@0: sl@0: // get info about the ROM... sl@0: TRomHeader* romHeader = (TRomHeader*)UserSvr::RomHeaderAddress(); sl@0: TUint8* start; sl@0: TUint8* end; sl@0: if(romHeader->iPageableRomStart) sl@0: { sl@0: start = (TUint8*)romHeader + romHeader->iPageableRomStart; sl@0: end = start + romHeader->iPageableRomSize; sl@0: } sl@0: else sl@0: { sl@0: start = (TUint8*)romHeader; sl@0: end = start + romHeader->iUncompressedSize; sl@0: } sl@0: if (end <= start) sl@0: return KErrGeneral; sl@0: sl@0: // read all ROM pages in a random order...and write out to file in ROFs, sl@0: TUint size = end - start - gPageSize; sl@0: if(size > maxBytes) sl@0: size = maxBytes; sl@0: sl@0: TUint32 random = 1; sl@0: TPtrC8 rom; sl@0: TUint8 *theAddr; sl@0: sl@0: //TInt drvNum = TestBootedFromMmc ? FindMMCDriveNumber(fs) : FindFsNANDDrive(fs); sl@0: TInt drvNum = FindMMCDriveNumber(fs); sl@0: TBuf<32> filename = _L("d:\\Pageldrtst.tmp"); sl@0: if (drvNum >= 0) sl@0: { sl@0: filename[0] = (TUint16)('a' + drvNum); sl@0: DEBUG_PRINT1((_L("%S : Filename %S\n"), &gTestNameBuffer, &filename)); sl@0: } sl@0: else sl@0: DEBUG_PRINT((_L("PerformRomAndFileSystemAccessThread : error getting drive num\n"))); sl@0: sl@0: for(TInt i = (size >> gPageShift); i > 0; --i) sl@0: { sl@0: DEBUG_PRINT1((_L("%S : Opening the file\n"), &gTestNameBuffer)); sl@0: if (KErrNone != file.Replace(fs, filename, EFileWrite)) sl@0: { sl@0: DEBUG_PRINT1((_L("%S : Opening the file Failed!\n"), &gTestNameBuffer)); sl@0: } sl@0: sl@0: random = random * 69069 + 1; sl@0: theAddr = (TUint8*)(start+((TInt64(random)*TInt64(size))>>32)); sl@0: if (theAddr + gPageSize > end) sl@0: { sl@0: DEBUG_PRINT1((_L("%S : address is past the end 0x%x / 0x%x\n"), &gTestNameBuffer, (TInt)theAddr, (TInt)end)); sl@0: } sl@0: rom.Set(theAddr,gPageSize); sl@0: DEBUG_PRINT1((_L("%S : Writing the file\n"), &gTestNameBuffer)); sl@0: TInt ret = file.Write(rom); sl@0: if (ret != KErrNone) sl@0: { sl@0: DEBUG_PRINT1((_L("%S : Write returned error %d\n"), &gTestNameBuffer, ret)); sl@0: } sl@0: DEBUG_PRINT1((_L("%S : Closing the file\n"), &gTestNameBuffer)); sl@0: file.Close(); sl@0: sl@0: DEBUG_PRINT1((_L("%S : Deleting the file\n"), &gTestNameBuffer)); sl@0: ret = fs.Delete(filename); sl@0: if (KErrNone != ret) sl@0: { sl@0: DEBUG_PRINT1((_L("%S : Delete returned error %d\n"), &gTestNameBuffer, ret)); sl@0: } sl@0: if (gTestStopMedia) sl@0: break; sl@0: } sl@0: fs.Close(); sl@0: DEBUG_PRINT1((_L("Done in %d ticks\n"), User::TickCount() - startTime)); sl@0: return KErrNone; sl@0: } sl@0: sl@0: sl@0: // sl@0: // PerformRomAndFileSystemAccess sl@0: // sl@0: // Thread function, kicks off the file system access. sl@0: // sl@0: sl@0: LOCAL_C TInt PerformRomAndFileSystemAccess(TAny* aTestArgs) sl@0: { sl@0: TBuf<64> localBuffer; sl@0: sl@0: SPerformTestArgs& testArgs = *(SPerformTestArgs*)aTestArgs; sl@0: testArgs.iBuffer = &localBuffer; sl@0: sl@0: PerformRomAndFileSystemAccessThread(testArgs); sl@0: sl@0: return KErrNone; sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: // sl@0: // StartFlushing sl@0: // sl@0: // Create a thread that will continuously flush the paging cache sl@0: // sl@0: void StartFlushing(TRequestStatus &aStatus, RThread &aFlushThread, TBool aLowMem = EFalse) sl@0: { sl@0: TInt ret; sl@0: gTestRunning = ETrue; sl@0: sl@0: TThreadCreateInfo flushThreadInfo(_L("FlushThread"), FlushFunc, KDefaultStackSize,NULL); sl@0: flushThreadInfo.SetCreateHeap(KMinHeapSize, KMinHeapSize); sl@0: sl@0: if (!aLowMem) sl@0: { sl@0: test_KErrNone(aFlushThread.Create(flushThreadInfo)); sl@0: } sl@0: else sl@0: { sl@0: DOTEST((ret = aFlushThread.Create(flushThreadInfo)), (ret != KErrNoMemory)); sl@0: test_KErrNone(ret); sl@0: } sl@0: sl@0: sl@0: aFlushThread.Logon(aStatus); sl@0: sl@0: aFlushThread.Resume(); sl@0: } sl@0: sl@0: // sl@0: // FinishFlushing sl@0: // sl@0: // Close the thread flushing the paging cache sl@0: // sl@0: void FinishFlushing(TRequestStatus &aStatus, RThread &aFlushThread) sl@0: { sl@0: gTestRunning = EFalse; sl@0: User::WaitForRequest(aStatus); sl@0: // TO DO: Check Exit tyoe sl@0: CLOSE_AND_WAIT(aFlushThread); sl@0: } sl@0: sl@0: sl@0: // sl@0: // ResetResults sl@0: // sl@0: // Clear the previous results from the results array sl@0: // sl@0: TInt ResetResults() sl@0: { sl@0: for (TUint i = 0; i < KMaxTestThreads; i++) sl@0: { sl@0: gResultsArray[i].iExitType = KExitTypeReset; sl@0: gResultsArray[i].iExitReason = KErrNone; sl@0: } sl@0: return KErrNone; sl@0: } sl@0: sl@0: sl@0: // sl@0: // CheckResults sl@0: // sl@0: // Check that the results are as expected sl@0: // sl@0: TInt CheckResults() sl@0: { sl@0: TUint i; sl@0: for (i = 0; i < KMaxTestThreads; i++) sl@0: { sl@0: if (gResultsArray[i].iExitType == KExitTypeReset) sl@0: continue; sl@0: RDBGD_PRINT(("%S : Thread %d ExitType(%d) ExitReason(%d)...\n", sl@0: &gTestNameBuffer, i, gResultsArray[i].iExitType, gResultsArray[i].iExitReason)); sl@0: } sl@0: sl@0: for (i = 0; i < KMaxTestThreads; i++) sl@0: { sl@0: if (gResultsArray[i].iExitType == KExitTypeReset) sl@0: continue; sl@0: sl@0: if (gResultsArray[i].iExitType != EExitKill) sl@0: { sl@0: RDBGS_PRINT(("Thread %d ExitType(%d) Expected(%d)\n", i, gResultsArray[i].iExitType, EExitKill)); sl@0: return KErrGeneral; sl@0: } sl@0: sl@0: // Allow for No Memory as we can run out of memory due to high number of threads and sl@0: // Overflow as the number of chunks that can be created on moving memory model is capped sl@0: if (gResultsArray[i].iExitReason != KErrNone && sl@0: gResultsArray[i].iExitReason != KErrNoMemory && sl@0: gResultsArray[i].iExitReason != KErrOverflow) sl@0: { sl@0: RDBGS_PRINT(("Thread %d ExitReason(%d) Expected either %d, %d or %d\n", sl@0: i, gResultsArray[i].iExitReason, KErrNone, KErrNoMemory, KErrOverflow)); sl@0: return KErrGeneral; sl@0: } sl@0: } sl@0: return KErrNone; sl@0: } sl@0: sl@0: sl@0: // sl@0: // PrintOptions sl@0: // sl@0: // Print out the options of the test sl@0: // sl@0: void PrintOptions() sl@0: { sl@0: SVMCacheInfo tempPages; sl@0: if (gIsDemandPaged) sl@0: { sl@0: UserSvr::HalFunction(EHalGroupVM,EVMHalGetCacheSize,&tempPages,0); sl@0: test.Printf(_L("PerformAutoTest : Start cache info: iMinSize 0x%x iMaxSize 0x%x iCurrentSize 0x%x iMaxFreeSize 0x%x\n"), sl@0: tempPages.iMinSize, tempPages.iMaxSize, tempPages.iCurrentSize ,tempPages.iMaxFreeSize); sl@0: } sl@0: sl@0: test.Printf(_L("Loops (%d), Threads (%d), Tests: "), gPerformTestLoop, gNumTestThreads); sl@0: if (TEST_ALL == (gTestWhichTests & TEST_ALL)) sl@0: { sl@0: test.Printf(_L("All, ")); sl@0: } sl@0: else if (gTestWhichTests & TEST_STACK) sl@0: { sl@0: test.Printf(_L("Stack, ")); sl@0: } sl@0: else if (gTestWhichTests & TEST_CHUNK) sl@0: { sl@0: test.Printf(_L("Chunk, ")); sl@0: } sl@0: else if (gTestWhichTests & TEST_COMMIT) sl@0: { sl@0: test.Printf(_L("Commit, ")); sl@0: } sl@0: else if (gTestWhichTests & TEST_IPC) sl@0: { sl@0: test.Printf(_L("IPC Pinning, ")); sl@0: } sl@0: else sl@0: { sl@0: test.Printf(_L("?, ")); sl@0: } sl@0: test.Printf(_L("\nOptions: ")); sl@0: sl@0: if(gTestInterleave) sl@0: test.Printf(_L("Interleave ")); sl@0: if(gTestPrioChange) sl@0: test.Printf(_L("Priority ")); sl@0: if(gTestMediaAccess) sl@0: test.Printf(_L("Media")); sl@0: if(gTestBadServer) sl@0: test.Printf(_L("BadServer")); sl@0: test.Printf(_L("\n")); sl@0: } sl@0: sl@0: // DoMultipleTest sl@0: // sl@0: // Perform the multiple thread test, spawning a number of threads. sl@0: // It is complicated a little because test.Printf can only be called from the first thread that calls it sl@0: // so if we are using multiple threads we need to use a message queue to pass the debug info from the sl@0: // child threads back to the parent for the parent to then call printf. sl@0: // sl@0: TInt DoMultipleTest(TBool aLowMem = EFalse) sl@0: { sl@0: SVMCacheInfo tempPages; sl@0: memset(&tempPages, 0, sizeof(tempPages)); sl@0: sl@0: if (gIsDemandPaged) sl@0: { sl@0: // get the old cache info sl@0: UserSvr::HalFunction(EHalGroupVM,EVMHalGetCacheSize,&tempPages,0); sl@0: // set the cache to our test value sl@0: UserSvr::HalFunction(EHalGroupVM,EVMHalSetCacheSize,(TAny*)tempPages.iMinSize,(TAny*)(tempPages.iMaxSize * gNumTestThreads)); sl@0: } sl@0: sl@0: if (!TestSilent) sl@0: PrintOptions(); sl@0: sl@0: TUint startTime = User::TickCount(); sl@0: TInt index; sl@0: TInt ret = KErrNone; sl@0: TBuf<16> multiThreadName; sl@0: TBuf<16> rerunThreadName; sl@0: sl@0: ResetResults(); sl@0: sl@0: TRequestStatus flushStatus; sl@0: RThread flushThread; sl@0: StartFlushing(flushStatus, flushThread, aLowMem); sl@0: sl@0: DOTEST((gThreadHeap = User::ChunkHeap(NULL, 0x1000, 0x1000)), (gThreadHeap != NULL)); sl@0: test_NotNull(gThreadHeap); sl@0: sl@0: DOTEST((gStackHeap = User::ChunkHeap(NULL, 0x1000, 0x1000)), (gStackHeap != NULL)); sl@0: test_NotNull(gStackHeap); sl@0: sl@0: TThreadCreateInfo *pThreadCreateInfo = (TThreadCreateInfo *)User::AllocZ(sizeof(TThreadCreateInfo) * gNumTestThreads); sl@0: RThread *pTheThreads = (RThread *)User::AllocZ(sizeof(RThread) * gNumTestThreads); sl@0: TInt *pThreadInUse = (TInt *)User::AllocZ(sizeof(TInt) * gNumTestThreads); sl@0: sl@0: TRequestStatus mediaStatus; sl@0: RThread mediaThread; sl@0: sl@0: sl@0: DOTEST((ret = TestMsgQueue.CreateLocal(gNumTestThreads * 10, EOwnerProcess)), sl@0: (KErrNone == ret)); sl@0: sl@0: DOTEST((ret = TestMultiSem.CreateLocal(1)), sl@0: (KErrNone == ret)); sl@0: sl@0: // make sure we have a priority higher than that of the threads we spawn... sl@0: RThread thisThread; sl@0: TThreadPriority savedThreadPriority = thisThread.Priority(); sl@0: const TThreadPriority KMainThreadPriority = EPriorityMuchMore; sl@0: __ASSERT_COMPILE(KMainThreadPriority>TEST_INTERLEAVE_PRIO); sl@0: thisThread.SetPriority(KMainThreadPriority); sl@0: sl@0: SPerformTestArgs mediaArgs; sl@0: mediaArgs.iMsgQueue = &TestMsgQueue; sl@0: mediaArgs.iTheSem = &TestMultiSem; sl@0: mediaArgs.iLowMem = aLowMem; sl@0: sl@0: if (gTestMediaAccess) sl@0: { sl@0: TThreadCreateInfo mediaInfo(_L(""),PerformRomAndFileSystemAccess,KDefaultStackSize,(TAny*)&mediaArgs); sl@0: mediaInfo.SetUseHeap(NULL); sl@0: mediaInfo.SetPaging(TThreadCreateInfo::EPaged); sl@0: gTestStopMedia = EFalse; sl@0: ret = mediaThread.Create(mediaInfo); sl@0: if (ret != KErrNone) sl@0: return ret; sl@0: mediaThread.Logon(mediaStatus); sl@0: RUNTEST1(mediaStatus == KRequestPending); sl@0: mediaThread.Resume(); sl@0: } sl@0: sl@0: TThreadCreateInfo** infoPtrs = new TThreadCreateInfo*[gNumTestThreads]; sl@0: if (infoPtrs == NULL) sl@0: return KErrNoMemory; sl@0: sl@0: SPerformTestArgs *testArgs = new SPerformTestArgs[gNumTestThreads]; sl@0: if (testArgs == NULL) sl@0: return KErrNoMemory; sl@0: sl@0: Mem::FillZ(testArgs, gNumTestThreads * sizeof(SPerformTestArgs)); sl@0: sl@0: for (index = 0; index < gNumTestThreads; index++) sl@0: { sl@0: RDBGD_PRINT(("%S : Starting thread.%d!\n", &gTestNameBuffer, index)); sl@0: multiThreadName = _L("TestThread_"); sl@0: multiThreadName.AppendNum(index); sl@0: sl@0: testArgs[index].iThreadIndex = index; sl@0: testArgs[index].iMsgQueue = &TestMsgQueue; sl@0: testArgs[index].iTheSem = &TestMultiSem; sl@0: testArgs[index].iLowMem = aLowMem; sl@0: sl@0: RDBGD_PRINT(("Creating thread.%d!\n", index)); sl@0: infoPtrs[index] = new TThreadCreateInfo(multiThreadName, MultipleTestThread, KDefaultStackSize, (TAny*)&testArgs[index]); sl@0: if (infoPtrs[index] == NULL) sl@0: continue; sl@0: infoPtrs[index]->SetCreateHeap(KMinHeapSize, KMinHeapSize); sl@0: infoPtrs[index]->SetPaging(TThreadCreateInfo::EPaged); sl@0: //infoPtrs[index]->SetUseHeap(gThreadHeap); sl@0: DOTEST((ret = pTheThreads[index].Create(*infoPtrs[index])), (ret != KErrNoMemory)); sl@0: if (ret != KErrNone) sl@0: continue; sl@0: pTheThreads[index].Resume(); sl@0: pThreadInUse[index] = 1; sl@0: } sl@0: sl@0: // now process any messages sent from the child threads. sl@0: TBool anyUsed = ETrue; sl@0: TBuf<64> localBuffer; sl@0: while(anyUsed) sl@0: { sl@0: anyUsed = EFalse; sl@0: // check the message queue and call printf if we get a message. sl@0: while (KErrNone == TestMsgQueue.Receive(localBuffer)) sl@0: { sl@0: if (!TestSilent) sl@0: test.Printf(localBuffer); sl@0: } sl@0: sl@0: // walk through the thread list to check which are still alive. sl@0: for (index = 0; index < gNumTestThreads; index++) sl@0: { sl@0: if (pThreadInUse[index]) sl@0: { sl@0: if (pTheThreads[index].ExitType() != EExitPending) sl@0: { sl@0: if (aLowMem && sl@0: pTheThreads[index].ExitType() == EExitKill && sl@0: pTheThreads[index].ExitReason() == KErrNoMemory && sl@0: Ldd.DoReleaseSomeRam(TEST_LM_BLOCKS_FREE) == KErrNone) sl@0: {// If thread was killed with no memory in a low mem scenario sl@0: // then release some RAM and restart the thread again sl@0: anyUsed = ETrue; sl@0: RDBGD_PRINT(("Thread index %d EExitKill KErrNoMemory\n", index)); sl@0: CLOSE_AND_WAIT(pTheThreads[index]); sl@0: sl@0: RDBGD_PRINT(("Re-running Thread index %d\n", index)); sl@0: rerunThreadName = _L("RRTestThread_"); sl@0: rerunThreadName.AppendNum(index); sl@0: sl@0: delete infoPtrs[index]; sl@0: infoPtrs[index] = new TThreadCreateInfo(rerunThreadName, MultipleTestThread, KDefaultStackSize, (TAny*)&testArgs[index]); sl@0: if (infoPtrs[index] == NULL) sl@0: continue; sl@0: infoPtrs[index]->SetCreateHeap(KMinHeapSize, KMinHeapSize); sl@0: infoPtrs[index]->SetPaging(TThreadCreateInfo::EPaged); sl@0: //infoPtrs[index]->SetUseHeap(gThreadHeap); sl@0: ret = pTheThreads[index].Create(*infoPtrs[index]); sl@0: if (ret != KErrNone) sl@0: continue; sl@0: pTheThreads[index].Resume(); sl@0: pThreadInUse[index] = 1; sl@0: continue; sl@0: } sl@0: if (pTheThreads[index].ExitType() == EExitPanic) sl@0: { sl@0: RDBGD_PRINT(("%S : Thread Panic'd %d...\n", &gTestNameBuffer, index)); sl@0: } sl@0: sl@0: //Store the results but let all the threads finish sl@0: gResultsArray[index].iExitType = pTheThreads[index].ExitType(); sl@0: gResultsArray[index].iExitReason = pTheThreads[index].ExitReason(); sl@0: sl@0: pThreadInUse[index] = EFalse; sl@0: pTheThreads[index].Close(); sl@0: } sl@0: else sl@0: { sl@0: anyUsed = ETrue; sl@0: } sl@0: } sl@0: } sl@0: sl@0: User::AfterHighRes(50000); sl@0: } sl@0: sl@0: if (gTestMediaAccess) sl@0: { sl@0: gTestStopMedia = ETrue; sl@0: RDBGD_PRINT(("%S : Waiting for media thread to exit...\n", &gTestNameBuffer)); sl@0: User::WaitForRequest(mediaStatus); sl@0: mediaThread.Close(); sl@0: } sl@0: sl@0: TestMsgQueue.Close(); sl@0: TestMultiSem.Close(); sl@0: sl@0: // cleanup the resources and exit. sl@0: User::Free(pTheThreads); sl@0: User::Free(pThreadInUse); sl@0: User::Free(pThreadCreateInfo); sl@0: delete infoPtrs; sl@0: delete testArgs; sl@0: sl@0: sl@0: FinishFlushing(flushStatus, flushThread); sl@0: gThreadHeap->Close(); sl@0: gStackHeap->Close(); sl@0: thisThread.SetPriority(savedThreadPriority); sl@0: ret = CheckResults(); sl@0: RDBGS_PRINT(("Test Complete (%u ticks)\n", User::TickCount() - startTime)); sl@0: sl@0: if (gIsDemandPaged) sl@0: { sl@0: // put the cache back to the the original values. sl@0: UserSvr::HalFunction(EHalGroupVM,EVMHalSetCacheSize,(TAny*)tempPages.iMinSize,(TAny*)tempPages.iMaxSize); sl@0: } sl@0: return ret; sl@0: } sl@0: sl@0: sl@0: // sl@0: // DoSingleTest sl@0: // sl@0: // Perform the single thread test,. sl@0: // sl@0: sl@0: LOCAL_C TInt DoSingleTest(TBool aLowMem = EFalse) sl@0: { sl@0: TUint origThreadCount = gNumTestThreads; sl@0: gNumTestThreads = 1; sl@0: TInt r = DoMultipleTest(aLowMem); sl@0: gNumTestThreads = origThreadCount; sl@0: return r; sl@0: } sl@0: sl@0: sl@0: sl@0: // sl@0: // ParseCommandLine sl@0: // sl@0: // read the arguments passed from the command line and set global variables to sl@0: // control the tests. sl@0: // sl@0: sl@0: TBool ParseCommandLine() sl@0: { sl@0: TBuf<256> args; sl@0: User::CommandLine(args); sl@0: TLex lex(args); sl@0: TBool retVal = ETrue; sl@0: sl@0: // initially test for arguments, the parse them, if not apply some sensible defaults. sl@0: TBool foundArgs = EFalse; sl@0: sl@0: FOREVER sl@0: { sl@0: TPtrC token=lex.NextToken(); sl@0: if(token.Length()!=0) sl@0: { sl@0: if ((token == _L("help")) || (token == _L("-h")) || (token == _L("-?"))) sl@0: { sl@0: RDBGS_PRINT(("\nUsage: %S n", &gTestNameBuffer)); sl@0: RDBGS_PRINT(("\ndebug: Prints out tracing in the test")); sl@0: RDBGS_PRINT(("\n[single | multiple ] : Specify to run in a single thread or multiple threads and how many")); sl@0: RDBGS_PRINT(("\n[ipc | stack | chunk| commit| all | badserver] : which type of test to run ")); sl@0: RDBGS_PRINT(("\n-> ipc: IPC Pinning tests")); sl@0: RDBGS_PRINT(("\n-> stack: Stack paging tests")); sl@0: RDBGS_PRINT(("\n-> chunk: Chunk paging tests")); sl@0: RDBGS_PRINT(("\n-> commit: Chunk committing tests")); sl@0: RDBGS_PRINT(("\n-> all: All the above tests")); sl@0: RDBGS_PRINT(("\n-> badserver: IPC Pinning tests with a dead server")); sl@0: RDBGS_PRINT(("\n[iters ] : Number of loops each test should perform")); sl@0: RDBGS_PRINT(("\n[media] : Perform multiple test with media activity in the background")); sl@0: RDBGS_PRINT(("\n[lowmem] : Perform testing in low memory situations ")); sl@0: RDBGS_PRINT(("\n[interleave]: Perform test with thread interleaving\n\n")); sl@0: test.Getch(); sl@0: TestExit = ETrue; sl@0: break; sl@0: } sl@0: else if (token == _L("debug")) sl@0: { sl@0: if (!TestSilent) sl@0: { sl@0: TestDebug = ETrue; sl@0: gTestPrioChange = ETrue; sl@0: } sl@0: } sl@0: else if (token == _L("silent")) sl@0: { sl@0: TestSilent = ETrue; sl@0: TestDebug = EFalse; sl@0: } sl@0: else if (token == _L("single")) sl@0: { sl@0: gTestType = ETestSingle; sl@0: } sl@0: else if (token == _L("multiple")) sl@0: { sl@0: TPtrC val=lex.NextToken(); sl@0: TLex lexv(val); sl@0: TInt value; sl@0: sl@0: if (lexv.Val(value) == KErrNone) sl@0: { sl@0: if ((value <= 0) || (value > (TInt)KMaxTestThreads)) sl@0: { sl@0: gNumTestThreads = KMaxTestThreads; sl@0: } sl@0: else sl@0: { sl@0: gNumTestThreads = value; sl@0: } sl@0: } sl@0: else sl@0: { sl@0: RDBGS_PRINT(("Bad value for thread count '%S' was ignored.\n", &val)); sl@0: } sl@0: gTestType = ETestMultiple; sl@0: } sl@0: else if (token == _L("prio")) sl@0: { sl@0: gTestPrioChange = !gTestPrioChange; sl@0: } sl@0: else if (token == _L("lowmem")) sl@0: { sl@0: gTestType = ETestLowMem; sl@0: } sl@0: else if (token == _L("media")) sl@0: { sl@0: gTestType = ETestMedia; sl@0: } sl@0: else if (token == _L("stack")) sl@0: { sl@0: gSetTests = TEST_STACK; sl@0: } sl@0: else if (token == _L("chunk")) sl@0: { sl@0: gSetTests = TEST_CHUNK; sl@0: } sl@0: else if (token == _L("commit")) sl@0: { sl@0: gTestType = ETestCommit; sl@0: gSetTests = TEST_COMMIT; sl@0: } sl@0: else if (token == _L("ipc")) sl@0: { sl@0: gSetTests = TEST_IPC; sl@0: } sl@0: else if (token == _L("badserver")) sl@0: { sl@0: gTestType = ETestBadServer; sl@0: } sl@0: else if (token == _L("all")) sl@0: { sl@0: gSetTests = TEST_ALL; sl@0: } sl@0: else if (token == _L("iters")) sl@0: { sl@0: TPtrC val=lex.NextToken(); sl@0: TLex lexv(val); sl@0: TInt value; sl@0: sl@0: if (lexv.Val(value) == KErrNone) sl@0: { sl@0: gPerformTestLoop = value; sl@0: } sl@0: else sl@0: { sl@0: RDBGS_PRINT(("Bad value for loop count '%S' was ignored.\n", &val)); sl@0: retVal = EFalse; sl@0: break; sl@0: } sl@0: } sl@0: else if (token == _L("interleave")) sl@0: { sl@0: gTestType = ETestInterleave; sl@0: } sl@0: else sl@0: { sl@0: if ((foundArgs == EFalse) && (token.Length() == 1)) sl@0: { sl@0: // Single letter argument...only run on 'd' sl@0: if (token.CompareF(_L("d")) == 0) sl@0: { sl@0: break; sl@0: } sl@0: else sl@0: { sl@0: if (!TestSilent) sl@0: { sl@0: test.Title(); sl@0: test.Start(_L("Skipping non drive 'd' - Test Exiting.")); sl@0: test.End(); sl@0: } sl@0: foundArgs = ETrue; sl@0: TestExit = ETrue; sl@0: break; sl@0: } sl@0: } sl@0: RDBGS_PRINT(("Unknown argument '%S' was ignored.\n", &token)); sl@0: break; sl@0: } sl@0: foundArgs = ETrue; sl@0: } sl@0: else sl@0: { sl@0: break; sl@0: } sl@0: } sl@0: if (!foundArgs) sl@0: { sl@0: retVal = EFalse; sl@0: } sl@0: return retVal; sl@0: } sl@0: sl@0: // sl@0: // AreWeTheTestBase sl@0: // sl@0: // Test whether we are the root of the tests. sl@0: // sl@0: sl@0: void AreWeTheTestBase(void) sl@0: { sl@0: if (!TestSilent) sl@0: { sl@0: TFileName filename(RProcess().FileName()); sl@0: sl@0: TParse myParse; sl@0: myParse.Set(filename, NULL, NULL); sl@0: gTestNameBuffer.Zero(); sl@0: gTestNameBuffer.Append(myParse.Name()); sl@0: gTestNameBuffer.Append(_L(".exe")); sl@0: sl@0: TestWeAreTheTestBase = !gTestNameBuffer.Compare(_L("t_wdpstress.exe")); sl@0: sl@0: } sl@0: else sl@0: { sl@0: gTestNameBuffer.Zero(); sl@0: gTestNameBuffer.Append(_L("t_wdpstress.exe")); sl@0: } sl@0: } sl@0: sl@0: // sl@0: // PerformAutoTest sl@0: // sl@0: // Perform the autotest sl@0: // sl@0: TInt PerformAutoTest() sl@0: { sl@0: TInt r = KErrNone; sl@0: sl@0: // Run all the different types of test sl@0: for (TUint testType = 0; testType < ETestTypes; testType++) sl@0: { sl@0: r = DoTest(testType); sl@0: if (r != KErrNone) sl@0: return r; sl@0: } sl@0: sl@0: return r; sl@0: } sl@0: sl@0: // sl@0: // DoLowMemTest sl@0: // sl@0: // Low Memory Test sl@0: // sl@0: sl@0: TInt DoLowMemTest() sl@0: { sl@0: TInt r = User::LoadLogicalDevice(KPageStressTestLddName); sl@0: RUNTEST1(r==KErrNone || r==KErrAlreadyExists); sl@0: RUNTEST(Ldd.Open(),KErrNone); sl@0: sl@0: SVMCacheInfo tempPages; sl@0: memset(&tempPages, 0, sizeof(tempPages)); sl@0: sl@0: if (gIsDemandPaged) sl@0: { sl@0: // get the old cache info sl@0: UserSvr::HalFunction(EHalGroupVM,EVMHalGetCacheSize,&tempPages,0); sl@0: TInt minSize = 8 << gPageShift; sl@0: TInt maxSize = 256 << gPageShift; sl@0: UserSvr::HalFunction(EHalGroupVM,EVMHalSetCacheSize,(TAny*)minSize,(TAny*)maxSize); sl@0: } sl@0: sl@0: sl@0: // First load some pages onto the page cache sl@0: gPerformTestLoop = 1; sl@0: r = DoTest(ETestSingle); sl@0: test_KErrNone(r); sl@0: sl@0: sl@0: Ldd.DoConsumeRamSetup(TEST_LM_NUM_FREE, TEST_LM_BLOCKSIZE); sl@0: TEST_NEXT((_L("Single thread with Low memory."))); sl@0: gNumTestThreads = KMaxTestThreads / 2; sl@0: gPerformTestLoop = 20; sl@0: sl@0: r = DoTest(ETestSingle, ETrue); sl@0: Ldd.DoConsumeRamFinish(); sl@0: test_KErrNone(r); sl@0: sl@0: TEST_NEXT((_L("Multiple thread with Low memory."))); sl@0: // First load some pages onto the page cache sl@0: gPerformTestLoop = 1; sl@0: r = DoTest(ETestSingle); sl@0: test_KErrNone(r); sl@0: sl@0: Ldd.DoConsumeRamSetup(TEST_LM_NUM_FREE, TEST_LM_BLOCKSIZE); sl@0: sl@0: gPerformTestLoop = 10; sl@0: gNumTestThreads = KMaxTestThreads / 2; sl@0: r = DoTest(ETestMultiple, ETrue); sl@0: Ldd.DoConsumeRamFinish(); sl@0: test_KErrNone(r); sl@0: sl@0: TEST_NEXT((_L("Multiple thread with Low memory, with starting free ram."))); sl@0: // First load some pages onto the page cache sl@0: gPerformTestLoop = 1; sl@0: r = DoTest(ETestSingle); sl@0: test_KErrNone(r); sl@0: sl@0: Ldd.DoConsumeRamSetup(32, TEST_LM_BLOCKSIZE); sl@0: sl@0: gPerformTestLoop = 10; sl@0: gNumTestThreads = KMaxTestThreads / 2; sl@0: r = DoTest(ETestMultiple, ETrue); sl@0: Ldd.DoConsumeRamFinish(); sl@0: test_KErrNone(r); sl@0: sl@0: TEST_NEXT((_L("Close test driver"))); sl@0: Ldd.Close(); sl@0: RUNTEST(User::FreeLogicalDevice(KPageStressTestLddName), KErrNone); sl@0: if (gIsDemandPaged) sl@0: { sl@0: TInt minSize = tempPages.iMinSize; sl@0: TInt maxSize = tempPages.iMaxSize; sl@0: UserSvr::HalFunction(EHalGroupVM,EVMHalSetCacheSize,(TAny*)minSize,(TAny*)maxSize); sl@0: } sl@0: sl@0: return r; sl@0: } sl@0: sl@0: void RestoreDefaults() sl@0: { sl@0: gPerformTestLoop = 10; sl@0: gNumTestThreads = KMaxTestThreads; sl@0: sl@0: gTestInterleave = EFalse; sl@0: sl@0: gTestWhichTests = gSetTests; sl@0: gTestPrioChange = EFalse; sl@0: gTestStopMedia = EFalse; sl@0: gTestMediaAccess = EFalse; sl@0: } sl@0: sl@0: sl@0: sl@0: TInt DoTest(TInt gTestType, TBool aLowMem) sl@0: { sl@0: TInt r = KErrNone; sl@0: sl@0: switch(gTestType) sl@0: { sl@0: case ETestSingle: sl@0: TEST_NEXT((_L("Single thread"))); sl@0: r = DoSingleTest(aLowMem); sl@0: break; sl@0: sl@0: case ETestMultiple: sl@0: TEST_NEXT((_L("Multiple thread"))); sl@0: sl@0: r = DoMultipleTest(aLowMem); sl@0: break; sl@0: sl@0: case ETestLowMem: sl@0: TEST_NEXT((_L("Low Memory Tests"))); sl@0: r = DoLowMemTest(); sl@0: break; sl@0: sl@0: case ETestMedia: sl@0: TEST_NEXT((_L("Background Media Activity Tests"))); sl@0: gTestMediaAccess = ETrue; sl@0: gPerformTestLoop = 2; sl@0: gNumTestThreads = KMaxTestThreads / 2; sl@0: r = DoMultipleTest(aLowMem); sl@0: break; sl@0: sl@0: case ETestCommit: sl@0: TEST_NEXT((_L("Committing and Decommitting Tests"))); sl@0: gTestWhichTests = TEST_COMMIT; sl@0: r = DoSingleTest(aLowMem); sl@0: break; sl@0: sl@0: case ETestInterleave: sl@0: TEST_NEXT((_L("Testing multiple with thread interleaving"))); sl@0: gTestInterleave = ETrue; sl@0: r = DoMultipleTest(aLowMem); sl@0: break; sl@0: sl@0: case ETestBadServer: sl@0: TEST_NEXT((_L("Testing multiple with thread interleaving"))); sl@0: gTestBadServer = ETrue; sl@0: gTestWhichTests = TEST_IPC; sl@0: r = DoSingleTest(aLowMem); sl@0: break; sl@0: sl@0: sl@0: } sl@0: RestoreDefaults(); sl@0: return r; sl@0: } sl@0: // sl@0: // E32Main sl@0: // sl@0: // Main entry point. sl@0: // sl@0: sl@0: TInt E32Main() sl@0: { sl@0: #ifndef TEST_ON_UNPAGED sl@0: TRomHeader* romHeader = (TRomHeader*)UserSvr::RomHeaderAddress(); sl@0: if(!romHeader->iPageableRomStart) sl@0: { sl@0: gIsDemandPaged = EFalse; sl@0: } sl@0: #endif sl@0: TUint start = User::TickCount(); sl@0: sl@0: gResultsArray = (SThreadExitResults *)User::AllocZ(sizeof(SThreadExitResults) * KMaxTestThreads); sl@0: if (gResultsArray == NULL) sl@0: return KErrNoMemory; sl@0: sl@0: AreWeTheTestBase(); sl@0: RestoreDefaults(); sl@0: sl@0: TBool parseResult = ParseCommandLine(); sl@0: sl@0: if (TestExit) sl@0: { sl@0: return KErrNone; sl@0: } sl@0: sl@0: // Retrieve the page size and use it to detemine the page shift (assumes 32-bit system). sl@0: TInt r = HAL::Get(HAL::EMemoryPageSize, gPageSize); sl@0: if (r != KErrNone) sl@0: { sl@0: RDBGS_PRINT(("Cannot obtain the page size\n")); sl@0: return r; sl@0: } sl@0: else sl@0: { sl@0: RDBGS_PRINT(("page size = %d\n", gPageSize)); sl@0: } sl@0: sl@0: sl@0: TUint32 pageMask = gPageSize; sl@0: TUint i = 0; sl@0: for (; i < 32; i++) sl@0: { sl@0: if (pageMask & 1) sl@0: { sl@0: if (pageMask & ~1u) sl@0: { sl@0: test.Printf(_L("ERROR - page size not a power of 2")); sl@0: return KErrNotSupported; sl@0: } sl@0: gPageShift = i; sl@0: break; sl@0: } sl@0: pageMask >>= 1; sl@0: } sl@0: sl@0: TInt minSize = 8 << gPageShift; sl@0: TInt maxSize = 64 << gPageShift; sl@0: SVMCacheInfo tempPages; sl@0: memset(&tempPages, 0, sizeof(tempPages)); sl@0: if (gIsDemandPaged) sl@0: { sl@0: // get the old cache info sl@0: UserSvr::HalFunction(EHalGroupVM,EVMHalGetCacheSize,&tempPages,0); sl@0: // set the cache to our test value sl@0: UserSvr::HalFunction(EHalGroupVM,EVMHalSetCacheSize,(TAny*)minSize,(TAny*)maxSize); sl@0: } sl@0: sl@0: sl@0: if (!TestSilent) sl@0: { sl@0: test.Title(); sl@0: test.Start(_L("Writable Data Paging stress tests...")); sl@0: test.Printf(_L("%S\n"), &gTestNameBuffer); sl@0: } sl@0: sl@0: if (parseResult) sl@0: { sl@0: if (!TestSilent) sl@0: { sl@0: extern TInt *CheckLdmiaInstr(void); sl@0: test.Printf(_L("%S : CheckLdmiaInstr\n"), &gTestNameBuffer); sl@0: TInt *theAddr = CheckLdmiaInstr(); sl@0: test.Printf(_L("%S : CheckLdmiaInstr complete 0x%x...\n"), &gTestNameBuffer, (TInt)theAddr); sl@0: } sl@0: sl@0: if (gTestType < 0 || gTestType >= ETestTypeEnd) sl@0: { sl@0: r = PerformAutoTest(); sl@0: test_KErrNone(r); sl@0: } sl@0: else sl@0: { sl@0: r = DoTest(gTestType); sl@0: test_KErrNone(r); sl@0: } sl@0: } sl@0: else sl@0: { sl@0: r = PerformAutoTest(); sl@0: test_KErrNone(r); sl@0: } sl@0: sl@0: if (gIsDemandPaged) sl@0: { sl@0: minSize = tempPages.iMinSize; sl@0: maxSize = tempPages.iMaxSize; sl@0: // put the cache back to the the original values. sl@0: UserSvr::HalFunction(EHalGroupVM,EVMHalSetCacheSize,(TAny*)minSize,(TAny*)maxSize); sl@0: } sl@0: sl@0: if (!TestSilent) sl@0: { sl@0: test.Printf(_L("%S : Complete (%u ticks)\n"), &gTestNameBuffer, User::TickCount() - start); sl@0: test.End(); sl@0: } sl@0: sl@0: User::Free(gResultsArray); sl@0: gResultsArray = NULL; sl@0: sl@0: return 0; sl@0: } sl@0: sl@0: