sl@0: // Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies). sl@0: // All rights reserved. sl@0: // This component and the accompanying materials are made available sl@0: // under the terms of the License "Eclipse Public License v1.0" sl@0: // which accompanies this distribution, and is available sl@0: // at the URL "http://www.eclipse.org/legal/epl-v10.html". sl@0: // sl@0: // Initial Contributors: sl@0: // Nokia Corporation - initial contribution. sl@0: // sl@0: // Contributors: sl@0: // sl@0: // Description: sl@0: // e32test\demandpaging\t_pagetable_limit.cpp sl@0: // Tests to expose the limit of page table virtual address space. sl@0: // sl@0: // sl@0: sl@0: //! @SYMTestCaseID KBASE-T_PAGETABLE_LIMIT sl@0: //! @SYMTestType UT sl@0: //! @SYMPREQ PREQ1490 sl@0: //! @SYMTestCaseDesc Tests to expose the limit of page table virtual address space. sl@0: //! @SYMTestActions Test that a paged page table can always be acquired. sl@0: //! @SYMTestExpectedResults All tests should pass. sl@0: //! @SYMTestPriority High sl@0: //! @SYMTestStatus Implemented sl@0: sl@0: #define __E32TEST_EXTENSION__ sl@0: #include sl@0: #include sl@0: #include sl@0: #include sl@0: #include sl@0: sl@0: #include "t_dpcmn.h" sl@0: sl@0: RTest test(_L("T_PAGETABLE_LIMIT")); sl@0: sl@0: sl@0: _LIT(KClientPtServerName, "CClientPtServer"); sl@0: _LIT(KClientProcessName, "T_PAGETABLE_LIMIT"); sl@0: sl@0: enum TClientMsgType sl@0: { sl@0: EClientConnect = -1, sl@0: EClientDisconnect = -2, sl@0: EClientGetChunk = 0, sl@0: EClientReadChunks = 1, sl@0: }; sl@0: sl@0: class RDataPagingSession : public RSessionBase sl@0: { sl@0: public: sl@0: TInt CreateSession(const TDesC& aServerName, TInt aMsgSlots) sl@0: { sl@0: return RSessionBase::CreateSession(aServerName,User::Version(),aMsgSlots); sl@0: } sl@0: TInt PublicSendReceive(TInt aFunction, const TIpcArgs &aPtr) sl@0: { sl@0: return (SendReceive(aFunction, aPtr)); sl@0: } sl@0: TInt PublicSend(TInt aFunction, const TIpcArgs &aPtr) sl@0: { sl@0: return (Send(aFunction, aPtr)); sl@0: } sl@0: }; sl@0: sl@0: sl@0: TInt ClientProcess(TInt aLen) sl@0: { sl@0: // Read the command line to get the number of chunk to map and whether or sl@0: // not to access their data. sl@0: HBufC* buf = HBufC::New(aLen); sl@0: test(buf != NULL); sl@0: TPtr ptr = buf->Des(); sl@0: User::CommandLine(ptr); sl@0: sl@0: TLex lex(ptr); sl@0: TInt chunkCount; sl@0: TInt r = lex.Val(chunkCount); sl@0: test_KErrNone(r); sl@0: lex.SkipSpace(); sl@0: sl@0: TBool accessData; sl@0: r = lex.Val(accessData); sl@0: test_KErrNone(r); sl@0: sl@0: sl@0: RDataPagingSession session; sl@0: test_KErrNone(session.CreateSession(KClientPtServerName, 1)); sl@0: sl@0: RChunk* chunks = new RChunk[chunkCount]; sl@0: for (TInt i = 0; i < chunkCount; i++) sl@0: { sl@0: TInt r = chunks[i].SetReturnedHandle(session.PublicSendReceive(EClientGetChunk, TIpcArgs(i))); sl@0: if (r != KErrNone) sl@0: { sl@0: test.Printf(_L("Failed to create a handle to the server's chunk r=%d\n"), r); sl@0: for (TInt j = 0; j < i; j++) sl@0: chunks[j].Close(); sl@0: session.Close(); sl@0: return r; sl@0: } sl@0: test_Value(chunks[i].Size(), chunks[i].Size() >= gPageSize); sl@0: } sl@0: if (!accessData) sl@0: { sl@0: // Touch the 1st page of each of the chunks. sl@0: for (TInt i = 0; i < chunkCount; i++) sl@0: { sl@0: // Write the chunk data from top to bottom of the chunk's first page. sl@0: TUint8* base = chunks[i].Base(); sl@0: TUint8* end = base + gPageSize - 1; sl@0: *base = *end; sl@0: } sl@0: // Tell parent we've touched each chunk. sl@0: TInt r = (TThreadId)session.PublicSendReceive(EClientReadChunks,TIpcArgs()); // Assumes id is only 32-bit. sl@0: test_KErrNone(r); sl@0: for(;;) sl@0: {// Wake up every 100ms to be killed by the main process. sl@0: User::After(100000); sl@0: } sl@0: } sl@0: else sl@0: { sl@0: for (;;) sl@0: { sl@0: TInt offset = 0; sl@0: for (TInt i = 0; i < chunkCount; i++) sl@0: { sl@0: // Write the chunk data from top to bottom of the chunk's first page. sl@0: TUint8* base = chunks[i].Base(); sl@0: TUint8* end = base + gPageSize - 1; sl@0: *(base + offset) = *(end - offset); sl@0: } sl@0: if (++offset >= (gPageSize >> 1)) sl@0: offset = 0; sl@0: } sl@0: } sl@0: } sl@0: sl@0: sl@0: void TestMaxPt() sl@0: { sl@0: // Flexible memory model reserves 0xF800000-0xFFF00000 for page tables sl@0: // this allows 130,048 pages tables. Therefore mapping 1000 one sl@0: // page chunks into 256 processes would require 256,000 page tables, i.e. sl@0: // more than enough to hit the limit. So that the limit is reached in the middle, sl@0: // map 500 unpaged and 500 paged chunks in each process. sl@0: const TUint KNumChunks = 1000; sl@0: const TUint KPagedChunksStart = (KNumChunks >> 1); sl@0: const TUint KNumProcesses = 256; sl@0: const TInt KMinFreeRam = (1000 * gPageSize) + (130048 * (gPageSize>>2)); sl@0: TInt freeRam; sl@0: HAL::Get(HALData::EMemoryRAMFree, freeRam); sl@0: if (freeRam < KMinFreeRam) sl@0: { sl@0: test.Printf(_L("Only 0x%x bytes of free RAM not enough to perform the test. Skipping test.\n"), freeRam); sl@0: return; sl@0: } sl@0: sl@0: // Remove the maximum limit on the cache size as the test requires that it can sl@0: // allocate as many page tables as possible but without stealing any pages as sl@0: // stealing pages may indirectly steal paged page table pages. sl@0: TUint minCacheSize, maxCacheSize, currentCacheSize; sl@0: DPTest::CacheSize(minCacheSize,maxCacheSize,currentCacheSize); sl@0: test_KErrNone(DPTest::SetCacheSize(minCacheSize, KMaxTUint)); sl@0: sl@0: RServer2 ptServer; sl@0: TInt r = ptServer.CreateGlobal(KClientPtServerName); sl@0: test_KErrNone(r); sl@0: sl@0: // Create the global unpaged chunks. They have one page committed sl@0: // but have a maximum size large enough to prevent their page tables being sl@0: // shared between the chunks. On arm with 4KB pages each page table maps 1MB sl@0: // so make chunk 1MB+4KB so chunk requires 2 page tables and is not aligned on sl@0: // a 1MB boundary so it is a fine memory object. sl@0: const TUint KChunkSize = (1024 * 1024) + gPageSize; sl@0: RChunk* chunks = new RChunk[KNumChunks]; sl@0: TChunkCreateInfo createInfo; sl@0: createInfo.SetNormal(gPageSize, KChunkSize); sl@0: createInfo.SetGlobal(KNullDesC); sl@0: createInfo.SetPaging(TChunkCreateInfo::EUnpaged); sl@0: TUint i = 0; sl@0: for (; i < KPagedChunksStart; i++) sl@0: { sl@0: r = chunks[i].Create(createInfo); sl@0: test_KErrNone(r); sl@0: } sl@0: // Create paged chunks. sl@0: createInfo.SetPaging(TChunkCreateInfo::EPaged); sl@0: for (; i< KNumChunks; i++) sl@0: { sl@0: r = chunks[i].Create(createInfo); sl@0: test_KErrNone(r); sl@0: } sl@0: sl@0: // Start remote processes, giving each process handles to each chunk. sl@0: RProcess* processes = new RProcess[KNumProcesses]; sl@0: RMessage2 ptMessage; sl@0: TUint processIndex = 0; sl@0: TUint processLimit = 0; sl@0: for (; processIndex < KNumProcesses; processIndex++) sl@0: { sl@0: // Start the process. sl@0: test.Printf(_L("Creating process %d\n"), processIndex); sl@0: TBuf<80> args; sl@0: args.AppendFormat(_L("%d %d"), KNumChunks, EFalse); sl@0: r = processes[processIndex].Create(KClientProcessName, args); sl@0: test_KErrNone(r); sl@0: TRequestStatus s; sl@0: processes[processIndex].Logon(s); sl@0: test_Equal(KRequestPending, s.Int()); sl@0: processes[processIndex].Resume(); sl@0: sl@0: ptServer.Receive(ptMessage); sl@0: test_Equal(EClientConnect, ptMessage.Function()); sl@0: ptMessage.Complete(KErrNone); sl@0: TInt func = EClientGetChunk; sl@0: TUint chunkIndex = 0; sl@0: for (; chunkIndex < KNumChunks && func == EClientGetChunk; chunkIndex++) sl@0: {// Pass handles to all the unpaged chunks to the new process. sl@0: ptServer.Receive(ptMessage); sl@0: func = ptMessage.Function(); sl@0: if (func == EClientGetChunk) sl@0: { sl@0: TUint index = ptMessage.Int0(); sl@0: ptMessage.Complete(chunks[index]); sl@0: } sl@0: } sl@0: if (func != EClientGetChunk) sl@0: { sl@0: // Should hit the limit of page tables and this process instance should exit sl@0: // sending a disconnect message in the process. sl@0: test_Equal(EClientDisconnect, func); sl@0: // Should only fail when mapping unpaged chunks. sl@0: test_Value(chunkIndex, chunkIndex < (KNumChunks >> 1)); sl@0: break; sl@0: } sl@0: // Wait for the process to access all the chunks and therefore sl@0: // allocate the paged page tables before moving onto the next process. sl@0: ptServer.Receive(ptMessage); sl@0: func = ptMessage.Function(); sl@0: test_Equal(EClientReadChunks, func); sl@0: ptMessage.Complete(KErrNone); sl@0: sl@0: // Should have mapped all the required chunks. sl@0: test_Equal(KNumChunks, chunkIndex); sl@0: } sl@0: // Should hit page table limit before KNumProcesses have been created. sl@0: test_Value(processIndex, processIndex < KNumProcesses - 1); sl@0: processLimit = processIndex; sl@0: sl@0: // Now create more processes to access paged data even though the page table sl@0: // address space has been exhausted. Limit to 10 more processes as test takes sl@0: // long enough already. sl@0: processIndex++; sl@0: TUint excessProcesses = KNumProcesses - processIndex; sl@0: TUint pagedIndexEnd = (excessProcesses > 10)? processIndex + 10 : processIndex + excessProcesses; sl@0: for (; processIndex < pagedIndexEnd; processIndex++) sl@0: { sl@0: // Start the process. sl@0: test.Printf(_L("Creating process %d\n"), processIndex); sl@0: TBuf<80> args; sl@0: args.AppendFormat(_L("%d %d"), KNumChunks-KPagedChunksStart, ETrue); sl@0: r = processes[processIndex].Create(KClientProcessName, args); sl@0: if (r != KErrNone) sl@0: {// Have hit the limit of processes. sl@0: processIndex--; sl@0: // Should have created at least one more process. sl@0: test_Value(processIndex, processIndex > processLimit); sl@0: break; sl@0: } sl@0: TRequestStatus s; sl@0: processes[processIndex].Logon(s); sl@0: test_Equal(KRequestPending, s.Int()); sl@0: processes[processIndex].Resume(); sl@0: sl@0: ptServer.Receive(ptMessage); sl@0: test_Equal(EClientConnect, ptMessage.Function()); sl@0: ptMessage.Complete(KErrNone); sl@0: sl@0: TInt func = EClientGetChunk; sl@0: TUint chunkIndex = KPagedChunksStart; sl@0: for (; chunkIndex < KNumChunks && func == EClientGetChunk; chunkIndex++) sl@0: {// Pass handles to all the unpaged chunks to the new process. sl@0: ptServer.Receive(ptMessage); sl@0: func = ptMessage.Function(); sl@0: if (func == EClientGetChunk) sl@0: { sl@0: TUint index = ptMessage.Int0() + KPagedChunksStart; sl@0: ptMessage.Complete(chunks[index]); sl@0: } sl@0: } sl@0: if (func != EClientGetChunk) sl@0: {// Reached memory limits so exit. sl@0: test_Equal(EClientDisconnect, func); sl@0: // Should have created at least one more process. sl@0: test_Value(processIndex, processIndex > processLimit+1); sl@0: break; sl@0: } sl@0: sl@0: // Should have mapped all the required chunks. sl@0: test_Equal(KNumChunks, chunkIndex); sl@0: } sl@0: // If we reached the end of then ensure that we kill only the running processes. sl@0: if (processIndex == pagedIndexEnd) sl@0: processIndex--; sl@0: // Kill all the remote processes sl@0: for(TInt j = processIndex; j >= 0; j--) sl@0: { sl@0: test.Printf(_L("killing process %d\n"), j); sl@0: TRequestStatus req; sl@0: processes[j].Logon(req); sl@0: if (req == KRequestPending) sl@0: { sl@0: processes[j].Kill(KErrNone); sl@0: User::WaitForRequest(req); sl@0: } sl@0: processes[j].Close(); sl@0: } sl@0: delete[] processes; sl@0: // Close the chunks. sl@0: for (TUint k = 0; k < KNumChunks; k++) sl@0: chunks[k].Close(); sl@0: delete[] chunks; sl@0: sl@0: test_KErrNone(DPTest::SetCacheSize(minCacheSize, maxCacheSize)); sl@0: } sl@0: sl@0: sl@0: TInt E32Main() sl@0: { sl@0: test_KErrNone(UserHal::PageSizeInBytes(gPageSize)); sl@0: sl@0: TUint len = User::CommandLineLength(); sl@0: if (len > 0) sl@0: { sl@0: return ClientProcess(len); sl@0: } sl@0: sl@0: test.Title(); sl@0: test_KErrNone(GetGlobalPolicies()); sl@0: sl@0: if (!gDataPagingSupported) sl@0: { sl@0: test.Printf(_L("Data paging not enabled so skipping test...\n")); sl@0: return KErrNone; sl@0: } sl@0: sl@0: test.Start(_L("Test the system can always acquire a paged page table")); sl@0: TestMaxPt(); sl@0: sl@0: test.End(); sl@0: return KErrNone; sl@0: }