sl@0: // Copyright (c) 2004-2009 Nokia Corporation and/or its subsidiary(-ies). sl@0: // All rights reserved. sl@0: // This component and the accompanying materials are made available sl@0: // under the terms of the License "Eclipse Public License v1.0" sl@0: // which accompanies this distribution, and is available sl@0: // at the URL "http://www.eclipse.org/legal/epl-v10.html". sl@0: // sl@0: // Initial Contributors: sl@0: // Nokia Corporation - initial contribution. sl@0: // sl@0: // Contributors: sl@0: // sl@0: // Description: sl@0: // e32test\mmu\t_sharedchunk.cpp sl@0: // Overview: sl@0: // Test sharing an RChunk with a logical device sl@0: // API Information: sl@0: // RChunk sl@0: // Details: sl@0: // - Load and open the logical device driver ("D_SHAREDCHUNK"). Verify sl@0: // results. sl@0: // - Test and verify results of creating shared chunks under OOM conditions. Also verify sl@0: // creating a chunk with a bad type, bad size and too large all fail as sl@0: // expected. sl@0: // - Test and verify opening and closing chunk user handles work as expected. sl@0: // - Test and verify thread local and process local handles work as expected sl@0: // - Test and verify setting restrictions on RChunk if created as shared chunk are as expected. sl@0: // - Test and verify memory access for multiply and singly shared chunks, sl@0: // is as expected. Including IPC, kernel, DFC and ISR reads & writes. sl@0: // - Test and verify discontinuous memory commits for multiply and singly sl@0: // shared chunks are as expected. sl@0: // - Test and verify continuous memory commits for multiply and singly shared sl@0: // chunks are as expected. sl@0: // - Test and verify discontinuous and continuous physical memory commits for sl@0: // multiply and singly shared chunks is as expected. sl@0: // - Test Kern::OpenSharedChunk for multiply and singly shared chunks. Verify sl@0: // results are as expected. sl@0: // - Test that physical memory can be freed immediately after the chunk that mapped sl@0: // it has been closed. sl@0: // Platforms/Drives/Compatibility: sl@0: // All. sl@0: // Assumptions/Requirement/Pre-requisites: sl@0: // Failures and causes: sl@0: // Base Port information: sl@0: // sl@0: // sl@0: sl@0: //! @file sl@0: //! @SYMTestCaseID KBASE-T_SHAREDCHUNK sl@0: //! @SYMREQ 3699 sl@0: //! @SYMTestPriority High sl@0: //! @SYMTestActions Check creation, memory allocation and access to Shared Chunks sl@0: //! @SYMTestExpectedResults Test runs until this message is emitted: RTEST: SUCCESS : T_SHAREDCHUNK test completed O.K. sl@0: //! @SYMTestType UT sl@0: #define __E32TEST_EXTENSION__ sl@0: sl@0: #include "d_sharedchunk.h" sl@0: #include "d_gobble.h" sl@0: #include sl@0: #include sl@0: #include "u32std.h" sl@0: #include sl@0: #include sl@0: #include sl@0: #include sl@0: #include sl@0: #include "freeram.h" sl@0: sl@0: enum TSlaveCommand sl@0: { sl@0: ESlaveCheckChunk, sl@0: ESlaveCreateChunk, sl@0: }; sl@0: sl@0: // sl@0: // Global data sl@0: // sl@0: sl@0: sl@0: RSharedChunkLdd Ldd; sl@0: RChunk TheChunk; sl@0: TInt PageSize; sl@0: TUint32 MemModelAttributes; sl@0: TBool PhysicalCommitSupported; sl@0: TBool CachingAttributesSupported; sl@0: TUint8 BssMem[100]; sl@0: sl@0: const TUint ChunkSize = 0x400000; // 4 meg reserved space for test chunk sl@0: sl@0: _LIT(KSecondProcessName,"t_sharedchunk"); sl@0: #ifdef __T_SHAREDCHUNKF__ sl@0: _LIT(KOtherProcessName,"t_sharedchunk"); sl@0: LOCAL_D RTest test(_L("T_SHAREDCHUNKF")); sl@0: #else sl@0: _LIT(KOtherProcessName,"t_sharedchunkf"); sl@0: LOCAL_D RTest test(_L("T_SHAREDCHUNK")); sl@0: #endif sl@0: sl@0: _LIT8(KTestString, "lks4b7qeyfcea5fyaifyaefyi4flwdysuxanabxa"); sl@0: _LIT8(KTestString2,"jhfcalurnhfirlxszhrvcvduhrvndrucxnshxcsx"); sl@0: const TUint32 KTestValue = 0x12345678; sl@0: const TUint32 KTestValue2 = KTestValue^0x0f0f0f0f; sl@0: sl@0: // sl@0: // Utilitiies for use by tests sl@0: // sl@0: sl@0: sl@0: _LIT(KLitKernExec, "KERN-EXEC"); sl@0: sl@0: void CheckFailMessage(const char* n,const char* c,const char* f,TInt r) sl@0: { sl@0: TPtrC8 nn((const TUint8*)n); sl@0: TPtrC8 cc((const TUint8*)c); sl@0: TPtrC8 ff((const TUint8*)f); sl@0: RBuf8 buf; sl@0: buf.Create((nn.Size()+cc.Size()+ff.Size()+64)*2); sl@0: buf.AppendFormat(_L8("\nCHECK failed: %S == 0x%x but was tested for %S%S\n"),&ff,r,&cc,&nn); sl@0: test.Printf(buf.Expand()); sl@0: buf.Close(); sl@0: } sl@0: sl@0: #define CHECK(n,c,f) \ sl@0: { \ sl@0: TInt _r=(TInt)(f); \ sl@0: if(!((TInt)(n)c(_r))) \ sl@0: { \ sl@0: CheckFailMessage(#n,#c,#f,_r); \ sl@0: test(0); \ sl@0: } \ sl@0: } sl@0: sl@0: #define KCHECK_MEMORY(result,offset) \ sl@0: { \ sl@0: /* test.Printf(_L("check offset 0x%08x\r"),offset); */ \ sl@0: CHECK(result,==,Ldd.CheckMemory(offset)); \ sl@0: } sl@0: sl@0: #define KWRITE_MEMORY(offset,value) \ sl@0: { \ sl@0: CHECK(KErrNone,==,Ldd.WriteMemory(offset,value)); \ sl@0: } sl@0: sl@0: #define UREAD_MEMORY(offset,value) \ sl@0: { \ sl@0: CHECK(value,==,*(TUint*)(Base+offset)); \ sl@0: } sl@0: sl@0: inline TUint32 Tag(TUint32 offset) sl@0: { return (69069u*(offset*4+1)); } sl@0: sl@0: TInt MemoryAccessThread(TAny* aAddress) sl@0: { sl@0: TInt r = *(volatile TUint8*)aAddress; // read from aAddress sl@0: (void)r; sl@0: return 0; sl@0: } sl@0: sl@0: TInt CheckUMemory(TAny* aAddress) sl@0: { sl@0: RThread thread; sl@0: thread.Create(KNullDesC,MemoryAccessThread,PageSize,&User::Heap(),aAddress); sl@0: TRequestStatus status; sl@0: thread.Logon(status); sl@0: TBool jit = User::JustInTime(); sl@0: User::SetJustInTime(EFalse); sl@0: thread.Resume(); sl@0: User::WaitForRequest(status); sl@0: User::SetJustInTime(jit); sl@0: TInt r; sl@0: if(thread.ExitType()==EExitKill && thread.ExitReason()==0) sl@0: r = 1; // Memory access pass sl@0: else if(thread.ExitType()==EExitPanic && thread.ExitCategory()==KLitKernExec && thread.ExitReason()==3 ) sl@0: r = 0; // Memory access failed sl@0: else sl@0: r = -1; // Unexpected result sl@0: CLOSE_AND_WAIT(thread); sl@0: return r; sl@0: } sl@0: sl@0: #define UCHECK_MEMORY(result,offset) \ sl@0: { \ sl@0: /* test.Printf(_L("ucheck offset 0x%08x\r"),offset); */ \ sl@0: CHECK(result,==,CheckUMemory(Base+offset)); \ sl@0: } sl@0: sl@0: TInt CheckPlatSecPanic(TThreadFunction aThreadFunction,TInt aThreadArgument) sl@0: { sl@0: RThread thread; sl@0: thread.Create(KNullDesC,aThreadFunction,PageSize,&User::Heap(),(TAny*)aThreadArgument); sl@0: TRequestStatus status; sl@0: thread.Logon(status); sl@0: TBool jit = User::JustInTime(); sl@0: User::SetJustInTime(EFalse); sl@0: thread.Resume(); sl@0: User::WaitForRequest(status); sl@0: User::SetJustInTime(jit); sl@0: TInt r; sl@0: if(thread.ExitType()==EExitPanic && thread.ExitCategory()==KLitKernExec && thread.ExitReason()==46 ) sl@0: r = 1; // PlatSec panic sl@0: else if(thread.ExitType()==EExitKill && thread.ExitReason()==0) sl@0: r = 0; // Exit without error sl@0: else sl@0: r = -1; // Unexpected result sl@0: CLOSE_AND_WAIT(thread); sl@0: return r; sl@0: } sl@0: sl@0: // sl@0: // The tests sl@0: // sl@0: sl@0: void CreateWithOomCheck(TInt aCreateFlags) sl@0: { sl@0: TInt failResult=KErrGeneral; sl@0: for(TInt failCount=1; failCount<1000; failCount++) sl@0: { sl@0: test.Printf(_L("alloc fail count = %d\n"),failCount); sl@0: User::__DbgSetBurstAllocFail(ETrue,RAllocator::EFailNext,failCount,1000); sl@0: __KHEAP_MARK; sl@0: failResult = Ldd.CreateChunk(aCreateFlags); sl@0: if(failResult==KErrNone) sl@0: break; sl@0: CHECK(KErrNoMemory,==,failResult); sl@0: Ldd.IsDestroyed(); // This includes delay to let idle thread do cleanup sl@0: __KHEAP_MARKEND; sl@0: } sl@0: User::__DbgSetAllocFail(ETrue,RAllocator::ENone,0); sl@0: __KHEAP_RESET; sl@0: CHECK(KErrNone,==,failResult); sl@0: } sl@0: sl@0: void TestCreate() sl@0: { sl@0: test.Start(_L("Creating chunk type Single,OwnsMemory")); sl@0: CreateWithOomCheck(ChunkSize|ESingle|EOwnsMemory); sl@0: CHECK(0,==,Ldd.IsDestroyed()); sl@0: sl@0: test.Next(_L("Close kernel handle")); sl@0: CHECK(1,==,Ldd.CloseChunk()); // 1==DObject::EObjectDeleted sl@0: CHECK(1,==,Ldd.IsDestroyed()); sl@0: sl@0: test.Next(_L("Creating chunk type Multiple,OwnsMemory")); sl@0: CreateWithOomCheck(ChunkSize|EMultiple|EOwnsMemory); sl@0: CHECK(0,==,Ldd.IsDestroyed()); sl@0: sl@0: test.Next(_L("Close kernel handle")); sl@0: CHECK(1,==,Ldd.CloseChunk()); // 1==DObject::EObjectDeleted sl@0: CHECK(1,==,Ldd.IsDestroyed()); sl@0: sl@0: if(PhysicalCommitSupported) sl@0: { sl@0: test.Next(_L("Creating chunk type Single,!OwnsMemory")); sl@0: CreateWithOomCheck(ChunkSize|ESingle); sl@0: CHECK(0,==,Ldd.IsDestroyed()); sl@0: sl@0: test.Next(_L("Close kernel handle")); sl@0: CHECK(1,==,Ldd.CloseChunk()); // 1==DObject::EObjectDeleted sl@0: CHECK(1,==,Ldd.IsDestroyed()); sl@0: sl@0: test.Next(_L("Creating chunk type Multiple,!OwnsMemory")); sl@0: CreateWithOomCheck(ChunkSize|EMultiple); sl@0: CHECK(0,==,Ldd.IsDestroyed()); sl@0: sl@0: test.Next(_L("Close kernel handle")); sl@0: CHECK(1,==,Ldd.CloseChunk()); // 1==DObject::EObjectDeleted sl@0: CHECK(1,==,Ldd.IsDestroyed()); sl@0: } sl@0: else sl@0: { sl@0: test.Next(_L("Try creating unsupported chunk type Single,!OwnsMemory")); sl@0: CHECK(KErrNotSupported,==,Ldd.CreateChunk(ChunkSize|ESingle)); sl@0: sl@0: test.Next(_L("Try creating unsupported chunk type Multiple,!OwnsMemory")); sl@0: CHECK(KErrNotSupported,==,Ldd.CreateChunk(ChunkSize|EMultiple)); sl@0: } sl@0: sl@0: test.Next(_L("__KHEAP_MARK")); sl@0: __KHEAP_MARK; sl@0: sl@0: test.Next(_L("Creating chunk (bad type)")); sl@0: CHECK(KErrArgument,==,Ldd.CreateChunk(EBadType|EOwnsMemory|ChunkSize)); sl@0: sl@0: test.Next(_L("Creating chunk (bad size)")); sl@0: CHECK(KErrArgument,==,Ldd.CreateChunk(ESingle|EOwnsMemory|0xffffff00)); sl@0: sl@0: test.Next(_L("Creating chunk (size too big)")); sl@0: CHECK(KErrNoMemory,==,Ldd.CreateChunk(ESingle|EOwnsMemory|0x7fffff00)); sl@0: sl@0: test.Next(_L("__KHEAP_MARKEND")); sl@0: __KHEAP_MARKEND; sl@0: sl@0: test.End(); sl@0: } sl@0: sl@0: sl@0: void OpenWithOomCheck(RChunk& aChunk) sl@0: { sl@0: TInt failResult=KErrGeneral; sl@0: for(TInt failCount=1; failCount<1000; failCount++) sl@0: { sl@0: test.Printf(_L("alloc fail count = %d\n"),failCount); sl@0: User::__DbgSetBurstAllocFail(ETrue,RAllocator::EFailNext,failCount,1000); sl@0: __KHEAP_MARK; sl@0: failResult = Ldd.GetChunkHandle(aChunk); sl@0: if(failResult==KErrNone) sl@0: break; sl@0: CHECK(KErrNoMemory,==,failResult); sl@0: Ldd.IsDestroyed(); // This includes delay to let idle thread do cleanup sl@0: __KHEAP_MARKEND; sl@0: } sl@0: User::__DbgSetAllocFail(ETrue,RAllocator::ENone,0); sl@0: __KHEAP_RESET; sl@0: CHECK(KErrNone,==,failResult); sl@0: } sl@0: sl@0: void TestHandles() sl@0: { sl@0: TUint ChunkAttribs = ChunkSize|ESingle|EOwnsMemory; sl@0: sl@0: test.Start(_L("Create chunk")); sl@0: CHECK(KErrNone,==,Ldd.CreateChunk(ChunkAttribs)); sl@0: sl@0: test.Next(_L("Open user handle")); sl@0: OpenWithOomCheck(TheChunk); sl@0: sl@0: test.Next(_L("Close user handle")); sl@0: TheChunk.Close(); sl@0: sl@0: test.Next(_L("Check chunk not destroyed")); sl@0: CHECK(0,==,Ldd.IsDestroyed()); sl@0: sl@0: test.Next(_L("Close kernel handle")); sl@0: CHECK(1,==,Ldd.CloseChunk()); // 1==DObject::EObjectDeleted sl@0: sl@0: test.Next(_L("Check chunk destroyed")); sl@0: CHECK(1,==,Ldd.IsDestroyed()); sl@0: sl@0: // Another chunk - closing handles in reverse order sl@0: sl@0: test.Next(_L("Create chunk")); sl@0: CHECK(KErrNone,==,Ldd.CreateChunk(ChunkAttribs)); sl@0: sl@0: test.Next(_L("Open user handle")); sl@0: OpenWithOomCheck(TheChunk); sl@0: sl@0: test.Next(_L("Close kernel handle")); sl@0: CHECK(KErrNone,==,Ldd.CloseChunk()); sl@0: sl@0: test.Next(_L("Check chunk not destroyed")); sl@0: CHECK(0,==,Ldd.IsDestroyed()); sl@0: sl@0: test.Next(_L("Using user handle to check chunk info")); sl@0: if((MemModelAttributes&EMemModelTypeMask)!=EMemModelTypeDirect) sl@0: { sl@0: CHECK(0,==,TheChunk.Size()); sl@0: } sl@0: CHECK(ChunkSize,==,TheChunk.MaxSize()); sl@0: sl@0: test.Next(_L("Close user handle")); sl@0: TheChunk.Close(); sl@0: sl@0: test.Next(_L("Check chunk destroyed")); sl@0: CHECK(1,==,Ldd.IsDestroyed()); sl@0: sl@0: test.End(); sl@0: } sl@0: sl@0: TInt HandleOwnershipThread(TAny* aArg) sl@0: { sl@0: // Use existing handle and attempt to read from chunk sl@0: TInt handle = (TInt) aArg; sl@0: RChunk chunk; sl@0: chunk.SetHandle(handle); sl@0: TInt r = *(volatile TUint8*)chunk.Base(); sl@0: (void)r; sl@0: CLOSE_AND_WAIT(chunk); sl@0: return KErrNone; sl@0: } sl@0: sl@0: void TestHandleOwnership() sl@0: { sl@0: TUint ChunkAttribs = ChunkSize|ESingle|EOwnsMemory; sl@0: RThread thread; sl@0: TRequestStatus rs; sl@0: sl@0: test.Start(_L("Create chunk")); sl@0: CHECK(KErrNone,==,Ldd.CreateChunk(ChunkAttribs)); sl@0: sl@0: test.Next(_L("Commit page to chunk")); sl@0: CHECK(KErrNone,==,Ldd.CommitMemory(EDiscontiguous,PageSize)); sl@0: sl@0: test.Next(_L("Check can access memory kernel side")); sl@0: KCHECK_MEMORY(ETrue, 0); sl@0: sl@0: // Handle is thread-owned sl@0: test.Next(_L("Open user handle (thread-owned)")); sl@0: CHECK(0,<=,Ldd.GetChunkHandle(TheChunk, ETrue)); sl@0: sl@0: test.Next(_L("Get memory size info")); sl@0: if((MemModelAttributes&EMemModelTypeMask)!=EMemModelTypeDirect) sl@0: { sl@0: CHECK(PageSize,==,TheChunk.Size()); sl@0: } sl@0: CHECK(ChunkSize,==,TheChunk.MaxSize()); sl@0: TUint8* Base = TheChunk.Base(); sl@0: CHECK(Base,!=,0); sl@0: sl@0: test.Next(_L("Check can access memory user side")); sl@0: UCHECK_MEMORY(ETrue, 0); sl@0: sl@0: test.Next(_L("Use handle in a new thread")); sl@0: CHECK(KErrNone,==,thread.Create(_L("thread1"), HandleOwnershipThread, KDefaultStackSize, KMinHeapSize, KMinHeapSize, (TAny*)TheChunk.Handle())); sl@0: thread.Logon(rs); sl@0: thread.Resume(); sl@0: User::WaitForRequest(rs); sl@0: CHECK(EExitPanic,==,thread.ExitType()); sl@0: CHECK(0,==,thread.ExitReason()); // KERN-EXEC 0 sl@0: CLOSE_AND_WAIT(thread); sl@0: sl@0: test.Next(_L("Close user handle")); sl@0: TheChunk.Close(); sl@0: sl@0: // Handle is process-owned sl@0: test.Next(_L("Open user handle (process-owned")); sl@0: CHECK(0,<=,Ldd.GetChunkHandle(TheChunk, EFalse)); sl@0: sl@0: test.Next(_L("Check can access memory user side")); sl@0: UCHECK_MEMORY(ETrue, 0); sl@0: sl@0: test.Next(_L("Close kernel handle")); sl@0: CHECK(KErrNone,==,Ldd.CloseChunk()); sl@0: sl@0: test.Next(_L("Check chunk destroyed")); sl@0: CHECK(0,==,Ldd.IsDestroyed()); sl@0: sl@0: test.Next(_L("Use handle in a new thread")); sl@0: CHECK(KErrNone,==,thread.Create(_L("thread2"), HandleOwnershipThread, KDefaultStackSize, KMinHeapSize, KMinHeapSize, (TAny*)TheChunk.Handle())); sl@0: thread.Logon(rs); sl@0: thread.Resume(); sl@0: User::WaitForRequest(rs); sl@0: CHECK(EExitKill,==,thread.ExitType()); sl@0: CHECK(KErrNone,==,thread.ExitReason()); sl@0: CLOSE_AND_WAIT(thread); sl@0: sl@0: test.Next(_L("Check chunk destroyed")); sl@0: CHECK(1,==,Ldd.IsDestroyed()); // Object was deleted sl@0: sl@0: test.End(); sl@0: } sl@0: sl@0: void SetCreateFlags(TUint& aCreateFlags,TCommitType aCommitType) sl@0: { sl@0: if(!((TInt)aCommitType&EPhysicalMask)) sl@0: aCreateFlags |= EOwnsMemory; sl@0: else sl@0: aCreateFlags &= ~EOwnsMemory; sl@0: } sl@0: sl@0: sl@0: void TestAccess(TUint aCreateFlags,TCommitType aCommitType) sl@0: { sl@0: const TUint32 offset = 0; sl@0: const TUint32 size = PageSize; sl@0: sl@0: SetCreateFlags(aCreateFlags,aCommitType); sl@0: sl@0: test.Start(_L("Create chunk")); sl@0: TUint8* kernelAddress; sl@0: CHECK(KErrNone,==,Ldd.CreateChunk(aCreateFlags|ChunkSize,(TAny**)&kernelAddress)); sl@0: sl@0: if((MemModelAttributes&TUint32(EMemModelAttrNonExProt)) && (MemModelAttributes&EMemModelTypeMask)!=EMemModelTypeDirect) sl@0: { sl@0: test.Next(_L("Check can't access memory")); sl@0: KCHECK_MEMORY(EFalse,offset); sl@0: } sl@0: sl@0: test.Next(_L("Commit page to chunk")); sl@0: CHECK(KErrNone,==,Ldd.CommitMemory(aCommitType|offset,size)); sl@0: sl@0: test.Next(_L("Check can access memory kernel side")); sl@0: KCHECK_MEMORY(ETrue, offset); sl@0: sl@0: if((MemModelAttributes&EMemModelAttrKernProt) && (MemModelAttributes&EMemModelTypeMask)!=EMemModelTypeDirect) sl@0: { sl@0: test.Next(_L("Check user side can't access kernel memory")); sl@0: TUint8* Base = kernelAddress; sl@0: UCHECK_MEMORY(EFalse, offset); sl@0: } sl@0: sl@0: test.Next(_L("Open user handle")); sl@0: CHECK(KErrNone,==,Ldd.GetChunkHandle(TheChunk)); sl@0: sl@0: test.Next(_L("Get memory size info")); sl@0: if((MemModelAttributes&EMemModelTypeMask)!=EMemModelTypeDirect) sl@0: { sl@0: CHECK(PageSize,==,TheChunk.Size()); sl@0: } sl@0: CHECK(ChunkSize,==,TheChunk.MaxSize()); sl@0: TUint8* Base = TheChunk.Base(); sl@0: CHECK(Base,!=,0); sl@0: sl@0: test.Next(_L("Check can access memory user side")); sl@0: UCHECK_MEMORY(ETrue, offset); sl@0: sl@0: test.Next(_L("Check user and kernel access same memory")); sl@0: KWRITE_MEMORY(offset,~Tag(offset)); sl@0: UREAD_MEMORY(offset,~Tag(offset)); sl@0: KWRITE_MEMORY(offset,Tag(offset)); sl@0: UREAD_MEMORY(offset,Tag(offset)); sl@0: sl@0: test.Next(_L("Close user handle")); sl@0: CHECK(0,==,Ldd.CloseChunkHandle(TheChunk)); sl@0: CHECK(0,==,Ldd.IsDestroyed()); sl@0: sl@0: if((MemModelAttributes&EMemModelAttrKernProt) && (MemModelAttributes&EMemModelTypeMask)!=EMemModelTypeDirect) sl@0: { sl@0: test.Next(_L("Check can no-longer access memory user side")); sl@0: UCHECK_MEMORY(EFalse,offset); sl@0: } sl@0: sl@0: test.Next(_L("Check can still access memory kernel side")); sl@0: KCHECK_MEMORY(ETrue, offset); sl@0: sl@0: test.Next(_L("Open user handle again")); sl@0: CHECK(KErrNone,==,Ldd.GetChunkHandle(TheChunk)); sl@0: sl@0: test.Next(_L("Check can access chunk user side again")); sl@0: CHECK(Base,==,TheChunk.Base()); sl@0: CHECK(ChunkSize,==,TheChunk.MaxSize()); sl@0: if((MemModelAttributes&EMemModelTypeMask)!=EMemModelTypeDirect) sl@0: { sl@0: CHECK(size,==,TheChunk.Size()); sl@0: } sl@0: UREAD_MEMORY(offset,Tag(offset)); sl@0: sl@0: test.Next(_L("Close kernel handle")); sl@0: CHECK(0,==,Ldd.CloseChunk()); sl@0: CHECK(0,==,Ldd.IsDestroyed()); sl@0: sl@0: test.Next(_L("Check can still access chunk user side")); sl@0: CHECK(Base,==,TheChunk.Base()); sl@0: CHECK(ChunkSize,==,TheChunk.MaxSize()); sl@0: if((MemModelAttributes&EMemModelTypeMask)!=EMemModelTypeDirect) sl@0: { sl@0: CHECK(size,==,TheChunk.Size()); sl@0: } sl@0: UREAD_MEMORY(offset,Tag(offset)); sl@0: sl@0: test.Next(_L("Close user handle")); sl@0: TheChunk.Close(); sl@0: CHECK(1,==,Ldd.IsDestroyed()); sl@0: sl@0: test.Next(_L("Create chunk in another process")); sl@0: sl@0: // Create test server sl@0: RServer2 server; sl@0: RMessage2 message; sl@0: TRequestStatus status; sl@0: CHECK(KErrNone,==,server.CreateGlobal(KSecondProcessName)); sl@0: server.Receive(message,status); sl@0: sl@0: // Launch slave process sl@0: RProcess process; sl@0: CHECK(KErrNone,==,process.Create(KSecondProcessName,KNullDesC)); sl@0: CHECK(KErrNone,==,process.SetParameter(1,ESlaveCreateChunk)); sl@0: CHECK(KErrNone,==,process.SetParameter(2,(RBusLogicalChannel&)Ldd)); sl@0: CHECK(KErrNone,==,process.SetParameter(3,ChunkSize|aCreateFlags)); sl@0: CHECK(KErrNone,==,process.SetParameter(4,aCommitType)); sl@0: CHECK(KErrNone,==,process.SetParameter(5,PageSize)); sl@0: TRequestStatus logon; sl@0: process.Logon(logon); sl@0: process.Resume(); sl@0: sl@0: // Wait for slave to connect to test server sl@0: User::WaitForRequest(logon,status); sl@0: CHECK(KRequestPending,==,logon.Int()) sl@0: CHECK(KErrNone,==,status.Int()); sl@0: CHECK(RMessage2::EConnect,==,message.Function()); sl@0: message.Complete(KErrNone); sl@0: server.Receive(message,status); sl@0: sl@0: // Wait for message sl@0: User::WaitForRequest(logon,status); sl@0: CHECK(KRequestPending,==,logon.Int()) sl@0: CHECK(KErrNone,==,status.Int()); sl@0: CHECK(0,==,message.Function()); sl@0: sl@0: test.Next(_L("Check IPC read/write")); sl@0: RBuf8 buf; sl@0: buf.Create(KTestString().Size()); sl@0: CHECK(KErrNone,==,message.Read(0,buf)); sl@0: CHECK(ETrue,==,buf==KTestString()); sl@0: CHECK(KErrNone,==,message.Write(0,KTestString2)); sl@0: CHECK(KErrNone,==,message.Read(0,buf)); sl@0: CHECK(ETrue,==,buf==KTestString2()); sl@0: sl@0: test.Next(_L("Check Kernel read/write")); sl@0: TInt n; sl@0: TUint32 value; sl@0: for(n=0; n=0) sl@0: if(CommitList[aIndex].iExpectedResult==KErrNone) sl@0: CheckTags(CommitList[aIndex]); sl@0: }; sl@0: sl@0: void CheckCommitState(TInt aIndex) sl@0: { sl@0: TInt page=0; sl@0: TInt lastPage=TheChunk.MaxSize()/PageSize; sl@0: while(page=0) sl@0: if(CommitList[i].iExpectedResult==KErrNone) sl@0: if((TUint)(page-CommitList[i].iOffset) < (TUint)CommitList[i].iSize) sl@0: break; sl@0: TInt offset = page*PageSize; sl@0: if(i>=0) sl@0: { sl@0: KCHECK_MEMORY(1,offset); // Check page exists sl@0: } sl@0: else sl@0: { sl@0: KCHECK_MEMORY(0,offset); // Check page doesn't exists sl@0: } sl@0: ++page; sl@0: } sl@0: }; sl@0: sl@0: sl@0: void TestCommit(TUint aCreateFlags,TCommitType aCommitType) sl@0: { sl@0: SetCreateFlags(aCreateFlags,aCommitType); sl@0: TUint ChunkAttribs = ChunkSize|aCreateFlags; sl@0: sl@0: test.Start(_L("Create chunk")); sl@0: CHECK(KErrNone,==,Ldd.CreateChunk(ChunkAttribs)); sl@0: sl@0: test.Next(_L("Check wrong commit type")); sl@0: CHECK(KErrNotSupported,==,Ldd.CommitMemory((aCommitType^EPhysicalMask)|0,PageSize)); sl@0: CHECK(KErrNotSupported,==,Ldd.CommitMemory((aCommitType^EPhysicalMask^EContiguous)|0,PageSize)); sl@0: sl@0: if((TInt)aCommitType&EPhysicalMask) sl@0: { sl@0: test.Next(_L("Check commit with bad pysical address")); sl@0: CHECK(KErrArgument,==,Ldd.CommitMemory((aCommitType|EBadPhysicalAddress)|0,PageSize)); sl@0: } sl@0: sl@0: test.Next(_L("Open user handle")); sl@0: CHECK(KErrNone,==,Ldd.GetChunkHandle(TheChunk)); sl@0: sl@0: const TCommitRegion* list = CommitList; sl@0: for(;list->iOffset>=0; ++list) sl@0: { sl@0: TInt offset = list->iOffset*PageSize; sl@0: TInt size = list->iSize*PageSize; sl@0: TInt expectedResult = list->iExpectedResult; sl@0: if((MemModelAttributes&EMemModelTypeMask)==EMemModelTypeDirect && expectedResult==KErrAlreadyExists) sl@0: continue; sl@0: TBuf<100> text; sl@0: text.AppendFormat(_L("Commit pages: offset=%08x size=%08x expectedResult=%d"),offset,size,expectedResult); sl@0: test.Next(text); sl@0: sl@0: test.Start(_L("Do the Commit")); sl@0: CHECK(expectedResult,==,Ldd.CommitMemory(aCommitType|offset,size)); sl@0: sl@0: if(expectedResult==KErrNone) sl@0: { sl@0: test.Next(_L("Check new memory has been comitted")); sl@0: CheckRegion(*list,!(aCommitType&EPhysicalMask)); sl@0: } sl@0: sl@0: if((MemModelAttributes&EMemModelTypeMask)!=EMemModelTypeDirect) sl@0: { sl@0: test.Next(_L("Check commit state of all pages in chunk")); sl@0: CheckCommitState(list-CommitList+1); sl@0: } sl@0: sl@0: test.Next(_L("Check contents of previous commited regions are unchanged")); sl@0: CheckCommitedContents(list-CommitList); sl@0: sl@0: if(expectedResult==KErrNone) sl@0: { sl@0: test.Next(_L("Mark new memory")); sl@0: SetTags(*list); sl@0: } sl@0: test.End(); sl@0: } sl@0: sl@0: if((aCreateFlags&EMultiple) && (MemModelAttributes&EMemModelTypeMask)!=EMemModelTypeDirect) sl@0: { sl@0: test.Next(_L("Check another process sees same chunk state")); sl@0: TInt regionCount = list-CommitList; sl@0: sl@0: // create another process sl@0: RProcess process; sl@0: CHECK(KErrNone,==,process.Create(KOtherProcessName,KNullDesC)); sl@0: CHECK(KErrNone,==,process.SetParameter(1,ESlaveCheckChunk)); sl@0: CHECK(KErrNone,==,process.SetParameter(2,(RBusLogicalChannel&)Ldd)); sl@0: CHECK(KErrNone,==,process.SetParameter(3,(RChunk&)TheChunk)); sl@0: CHECK(KErrNone,==,process.SetParameter(4,regionCount)); sl@0: TRequestStatus status; sl@0: process.Logon(status); sl@0: process.Resume(); sl@0: sl@0: // Check chunk again in this process, concurrently with other process sl@0: CheckCommitedContents(regionCount); sl@0: CheckCommitState(regionCount); sl@0: sl@0: // wait for other process to finish sl@0: User::WaitForRequest(status); sl@0: CHECK(EExitKill,==,process.ExitType()); sl@0: CHECK(0,==,process.ExitReason()); sl@0: CLOSE_AND_WAIT(process); sl@0: } sl@0: sl@0: test.Next(_L("Close handles")); sl@0: TheChunk.Close(); sl@0: CHECK(1,==,Ldd.CloseChunk()); sl@0: sl@0: if(aCommitType&EPhysicalMask) sl@0: { sl@0: // For Physical commit tests, check correct allocation by creating a new chunk sl@0: // and checking that pages comitted contain the old TAGs placed there by the sl@0: // tests above. sl@0: sl@0: test.Next(_L("Check commit uses correct physical pages")); sl@0: sl@0: test.Start(_L("Create chunk")); sl@0: CHECK(KErrNone,==,Ldd.CreateChunk(ChunkAttribs)); sl@0: sl@0: test.Next(_L("Open user handle")); sl@0: CHECK(KErrNone,==,Ldd.GetChunkHandle(TheChunk)); sl@0: sl@0: TInt offset = 0; sl@0: for(list=CommitList; list->iOffset>=0; ++list) sl@0: { sl@0: if(list->iExpectedResult!=KErrNone) sl@0: continue; sl@0: sl@0: TInt size = list->iSize*PageSize; sl@0: TBuf<100> text; sl@0: text.AppendFormat(_L("Commit pages: offset=%08x size=%08x"),offset,size); sl@0: test.Next(text); sl@0: CHECK(KErrNone,==,Ldd.CommitMemory(aCommitType|offset,size)); sl@0: sl@0: test.Next(_L("Check RAM contents preserved from previous usage")); sl@0: CheckOldTags(*list,offset); sl@0: offset += size; sl@0: } sl@0: sl@0: test.Next(_L("Close handles")); sl@0: TheChunk.Close(); sl@0: CHECK(1,==,Ldd.CloseChunk()); sl@0: sl@0: test.End(); sl@0: } sl@0: else sl@0: { sl@0: // We don't do these OOM tests for Physical commit because we can't do it reliably sl@0: // (as only a couple of page tables come from the free pool not the whole memory sl@0: // to be comitted) sl@0: test.Next(_L("Check Out Of Memory conditions")); sl@0: sl@0: // Make sure any clean up has happened otherwise the amount of free RAM may change. sl@0: UserSvr::HalFunction(EHalGroupKernel, EKernelHalSupervisorBarrier, 0, 0); sl@0: sl@0: test.Start(_L("Gobble up most of RAM")); sl@0: test.Next(_L("Load gobbler LDD")); sl@0: TInt r = User::LoadLogicalDevice(KGobblerLddFileName); sl@0: test(r==KErrNone || r==KErrAlreadyExists); sl@0: RGobbler gobbler; sl@0: r = gobbler.Open(); sl@0: test(r==KErrNone); sl@0: TUint32 taken = gobbler.GobbleRAM(2*1024*1024); sl@0: test.Printf(_L("Gobbled: %dK\n"), taken/1024); sl@0: test.Printf(_L("Free RAM 0x%08X bytes\n"),FreeRam()); sl@0: sl@0: test.Next(_L("Get baseline free memory")); sl@0: __KHEAP_MARK; sl@0: TInt freeRam1 = FreeRam(); sl@0: sl@0: test.Next(_L("Create shared chunk")); sl@0: CHECK(KErrNone,==,Ldd.CreateChunk(ChunkAttribs)); sl@0: TInt freeRam2 = FreeRam(); sl@0: sl@0: test.Next(_L("Commit memory which will causes OOM")); sl@0: CHECK(KErrNoMemory,==,Ldd.CommitMemory(aCommitType,4096*1024)); sl@0: sl@0: test.Next(_L("Check free RAM unchanged")); sl@0: CHECK(freeRam2,==,FreeRam()); sl@0: sl@0: test.Next(_L("Check OOM during ChunkCommit")); sl@0: TInt failResult=KErrGeneral; sl@0: for(TInt failCount=1; failCount<1000; failCount++) sl@0: { sl@0: User::__DbgSetAllocFail(ETrue,RAllocator::EFailNext,failCount); sl@0: failResult = Ldd.CommitMemory(aCommitType,1); sl@0: if(failResult==KErrNone) sl@0: break; sl@0: CHECK(KErrNoMemory,==,failResult); sl@0: } sl@0: User::__DbgSetAllocFail(ETrue,RAllocator::ENone,0); sl@0: CHECK(KErrNone,==,failResult); sl@0: sl@0: test.Next(_L("Destroy shared chunk")); sl@0: CHECK(1,==,Ldd.CloseChunk()); sl@0: CHECK(1,==,Ldd.IsDestroyed()); sl@0: sl@0: test.Next(_L("Check free memory returns to baseline")); sl@0: CHECK(freeRam1,==,FreeRam()); sl@0: __KHEAP_MARKEND; sl@0: sl@0: test.Next(_L("Free gobbled RAM")); sl@0: gobbler.Close(); sl@0: sl@0: test.End(); sl@0: } sl@0: sl@0: test.End(); sl@0: } sl@0: sl@0: sl@0: void TestOpenSharedChunk(TUint aCreateFlags,TCommitType aCommitType) sl@0: { sl@0: SetCreateFlags(aCreateFlags,aCommitType); sl@0: TUint ChunkAttribs = ChunkSize|aCreateFlags; sl@0: sl@0: test.Start(_L("Create chunk")); sl@0: CHECK(KErrNone,==,Ldd.CreateChunk(ChunkAttribs)); sl@0: sl@0: test.Next(_L("Open user handle")); sl@0: CHECK(KErrNone,==,Ldd.GetChunkHandle(TheChunk)); sl@0: sl@0: test.Next(_L("Commit some memory")); sl@0: CHECK(KErrNone,==,Ldd.CommitMemory(aCommitType|1*PageSize,PageSize)); sl@0: CHECK(KErrNone,==,Ldd.CommitMemory(aCommitType|2*PageSize,PageSize)); sl@0: CHECK(KErrNone,==,Ldd.CommitMemory(aCommitType|4*PageSize,PageSize)); sl@0: sl@0: test.Next(_L("Check OpenSharedChunk with handle")); sl@0: CHECK(KErrNone,==,Ldd.TestOpenHandle(TheChunk.Handle())); sl@0: sl@0: test.Next(_L("Check OpenSharedChunk with wrong chunk handle")); sl@0: RChunk testChunk; sl@0: CHECK(KErrNone,==,testChunk.CreateLocal(PageSize,PageSize)); sl@0: CHECK(KErrNotFound,==,Ldd.TestOpenHandle(testChunk.Handle())); sl@0: testChunk.Close(); sl@0: sl@0: test.Next(_L("Check OpenSharedChunk with wrong handle type")); sl@0: CHECK(KErrNotFound,==,Ldd.TestOpenHandle(RThread().Handle())); sl@0: sl@0: test.Next(_L("Check OpenSharedChunk with bad handle")); sl@0: CHECK(KErrNotFound,==,Ldd.TestOpenHandle(0)); sl@0: sl@0: test.Next(_L("Check OpenSharedChunk with address")); sl@0: TUint8* Base = TheChunk.Base(); sl@0: CHECK(KErrNone,==,Ldd.TestOpenAddress(Base)); sl@0: CHECK(KErrNone,==,Ldd.TestOpenAddress(Base+ChunkSize-1)); sl@0: sl@0: test.Next(_L("Check OpenSharedChunk with bad address")); sl@0: CHECK(KErrNotFound,==,Ldd.TestOpenAddress(Base-1)); sl@0: CHECK(KErrNotFound,==,Ldd.TestOpenAddress(Base+ChunkSize)); sl@0: CHECK(KErrNotFound,==,Ldd.TestOpenAddress(0)); sl@0: CHECK(KErrNotFound,==,Ldd.TestOpenAddress((TAny*)~0)); sl@0: sl@0: test.Next(_L("Check OpenSharedChunk with stack memory address")); sl@0: TUint8 stackMem[100]; sl@0: CHECK(KErrNotFound,==,Ldd.TestOpenAddress(stackMem)); sl@0: sl@0: test.Next(_L("Check OpenSharedChunk with heap memory address")); sl@0: TUint8* heapMem = new TUint8[100]; sl@0: CHECK(0,!=,heapMem); sl@0: CHECK(KErrNotFound,==,Ldd.TestOpenAddress(heapMem)); sl@0: delete [] heapMem; sl@0: sl@0: test.Next(_L("Check OpenSharedChunk with BSS memory address")); sl@0: CHECK(KErrNotFound,==,Ldd.TestOpenAddress(BssMem)); sl@0: sl@0: test.Next(_L("Check OpenSharedChunk with code memory address")); sl@0: CHECK(KErrNotFound,==,Ldd.TestOpenAddress((TAny*)&TestOpenSharedChunk)); sl@0: sl@0: test.Next(_L("Check OpenSharedChunk with NULL address")); sl@0: CHECK(KErrNotFound,==,Ldd.TestOpenAddress(0)); sl@0: sl@0: test.Next(_L("Check ChunkAddress for given memory region")); sl@0: static const TCommitRegion regions[] = sl@0: { sl@0: {0,1,KErrNotFound}, sl@0: {0,2,KErrNotFound}, sl@0: {0,3,KErrNotFound}, sl@0: {1,1}, sl@0: {1,2}, sl@0: {1,3,KErrNotFound}, sl@0: {2,1}, sl@0: {2,2,KErrNotFound}, sl@0: {2,3,KErrNotFound}, sl@0: {3,1,KErrNotFound}, sl@0: {3,2,KErrNotFound}, sl@0: {3,3,KErrNotFound}, sl@0: {4,1}, sl@0: {4,2,KErrNotFound}, sl@0: {4,3,KErrNotFound}, sl@0: {0,10240,KErrArgument}, // too big sl@0: {1,0,KErrArgument}, // bad size sl@0: {1,-1,KErrArgument}, // bad size sl@0: {10240,1,KErrArgument}, // bad offset sl@0: {-2,2,KErrArgument}, // bad offset sl@0: {-1} sl@0: }; sl@0: const TCommitRegion* region = regions; sl@0: for(;region->iOffset!=-1; ++region) sl@0: { sl@0: TUint32 offset = region->iOffset*PageSize; sl@0: TUint32 size = region->iSize*PageSize; sl@0: TInt expectedResult = region->iExpectedResult; sl@0: if((MemModelAttributes&EMemModelTypeMask)==EMemModelTypeDirect && expectedResult==KErrNotFound) sl@0: continue; sl@0: TBuf<100> text; sl@0: text.AppendFormat(_L("Memory region: offset=%08x size=%08x expectedResult=%d"),offset,size,expectedResult); sl@0: test.Next(text); sl@0: CHECK(expectedResult,==,Ldd.TestAddress(offset,size)); sl@0: } sl@0: sl@0: test.Next(_L("Close handles")); sl@0: TheChunk.Close(); sl@0: CHECK(1,==,Ldd.CloseChunk()); sl@0: sl@0: test.End(); sl@0: } sl@0: sl@0: sl@0: void AccessSpeed(TCreateFlags aCreateFlags, TInt& aRead,TInt& aWrite) sl@0: { sl@0: // test.Start(_L("Create chunk")); sl@0: CHECK(KErrNone,==,Ldd.CreateChunk(ChunkSize|ESingle|EOwnsMemory|aCreateFlags)); sl@0: sl@0: // test.Next(_L("Commit some memory")); sl@0: if((MemModelAttributes&EMemModelTypeMask)==EMemModelTypeDirect) sl@0: { sl@0: CHECK(KErrNone,==,Ldd.CommitMemory(EDiscontiguous|0*PageSize,PageSize)); sl@0: } sl@0: else sl@0: { sl@0: // Allocate contiguous memory when possible so that the sl@0: // Cache::SyncMemoryBeforeXxxx calls in the test driver get exercised sl@0: CHECK(KErrNone,==,Ldd.CommitMemory(EContiguous|0*PageSize,PageSize)); sl@0: } sl@0: sl@0: // test.Next(_L("Open user handle")); sl@0: CHECK(KErrNone,==,Ldd.GetChunkHandle(TheChunk)); sl@0: volatile TUint32* p = (TUint32*)TheChunk.Base(); sl@0: sl@0: TUint32 time; sl@0: TInt itterCount=128; sl@0: do sl@0: { sl@0: itterCount *= 2; sl@0: TUint32 lastCount=User::NTickCount(); sl@0: for(TInt i=itterCount; i>0; --i) sl@0: { sl@0: TUint32 x=p[0]; x=p[1]; x=p[2]; x=p[3]; x=p[4]; x=p[5]; x=p[6]; x=p[7]; sl@0: } sl@0: time = User::NTickCount()-lastCount; sl@0: } sl@0: while(time<200); sl@0: aRead = itterCount*8/time; sl@0: sl@0: itterCount=128; sl@0: do sl@0: { sl@0: itterCount *= 2; sl@0: TUint32 lastCount=User::NTickCount(); sl@0: for(TInt i=itterCount; i>0; --i) sl@0: { sl@0: p[0]=i; p[1]=i; p[2]=i; p[3]=i; p[4]=i; p[5]=i; p[6]=i; p[7]=i; sl@0: } sl@0: time = User::NTickCount()-lastCount; sl@0: } sl@0: while(time<200); sl@0: aWrite = itterCount*8/time; sl@0: sl@0: TBuf<100> text; sl@0: text.AppendFormat(_L("Read speed=%7d Write speed=%7d\n"),aRead,aWrite); sl@0: test.Printf(text); sl@0: sl@0: // test.Next(_L("Close handles")); sl@0: TheChunk.Close(); sl@0: CHECK(1,==,Ldd.CloseChunk()); sl@0: sl@0: // test.End(); sl@0: } sl@0: sl@0: void TestMappingAttributes() sl@0: { sl@0: test.Start(_L("Fully Blocking")); sl@0: TInt blockedRead; sl@0: TInt blockedWrite; sl@0: AccessSpeed(EBlocking,blockedRead,blockedWrite); sl@0: sl@0: TInt read; sl@0: TInt write; sl@0: sl@0: test.Next(_L("Write Buffered")); sl@0: AccessSpeed(EBuffered,read,write); sl@0: CHECK(2*blockedRead,>,read); sl@0: // CHECK(2*blockedWrite,<,write); // Write buffering doesn't seem to work when cache disabled (?) sl@0: sl@0: test.Next(_L("Fully Cached")); sl@0: AccessSpeed(ECached,read,write); sl@0: CHECK(2*blockedRead,<,read); sl@0: #ifndef __X86__ // X86 seems to do always do write buffering sl@0: // Following check disabled because most dev boards only seem to be a bit faster sl@0: // and asserting a particular speed improvement is unreliable sl@0: // CHECK(2*blockedWrite,<,write); sl@0: #endif sl@0: sl@0: test.End(); sl@0: } sl@0: sl@0: class RSession : public RSessionBase sl@0: { sl@0: public: sl@0: inline TInt CreateSession(const TDesC& aServer,const TVersion& aVersion) sl@0: { return RSessionBase::CreateSession(aServer,aVersion); } sl@0: inline TInt SendReceive(TInt aFunction,const TIpcArgs& aArgs) const sl@0: { return RSessionBase::SendReceive(aFunction,aArgs); } sl@0: }; sl@0: sl@0: TInt SlaveCommand(TSlaveCommand aCommand) sl@0: { sl@0: RDebug::Print(_L("Slave Process - Command %d\n"),aCommand); sl@0: CHECK(KErrNone,==,UserHal::PageSizeInBytes(PageSize)); sl@0: CHECK(KErrNone,==,((RBusLogicalChannel&)Ldd).Open(2,EOwnerProcess)); sl@0: switch(aCommand) sl@0: { sl@0: case ESlaveCheckChunk: sl@0: { sl@0: RDebug::Print(_L("Slave Process - TheChunk.Open()\n")); sl@0: CHECK(KErrNone,==,TheChunk.Open(3)); sl@0: RDebug::Print(_L("Slave Process - Get Region Count\n")); sl@0: TInt regionCount; sl@0: CHECK(KErrNone,==,User::GetTIntParameter(4,regionCount)); sl@0: RDebug::Print(_L("Slave Process - CheckCommitedContents(%d)\n"),regionCount); sl@0: CheckCommitedContents(regionCount); sl@0: RDebug::Print(_L("Slave Process - CheckCommitState(%d)\n"),regionCount); sl@0: CheckCommitState(regionCount); sl@0: RDebug::Print(_L("Slave Process - Done\n")); sl@0: return 0; sl@0: } sl@0: sl@0: case ESlaveCreateChunk: sl@0: { sl@0: RDebug::Print(_L("Slave Process - Get parameters\n")); sl@0: TInt createFlags; sl@0: TInt commitType; sl@0: TInt commitSize; sl@0: CHECK(KErrNone,==,User::GetTIntParameter(3,createFlags)); sl@0: CHECK(KErrNone,==,User::GetTIntParameter(4,commitType)); sl@0: CHECK(KErrNone,==,User::GetTIntParameter(5,commitSize)); sl@0: sl@0: RDebug::Print(_L("Slave Process - Create Chunk\n")); sl@0: CHECK(KErrNone,==,Ldd.CreateChunk(createFlags)); sl@0: CHECK(KErrNone,==,Ldd.CommitMemory(commitType,commitSize)); sl@0: CHECK(KErrNone,==,Ldd.GetChunkHandle(TheChunk)); sl@0: TUint8* chunkBase=TheChunk.Base(); sl@0: memcpy(chunkBase,KTestString().Ptr(),KTestString().Size()); sl@0: sl@0: RDebug::Print(_L("Slave Process - Connecting to test server\n")); sl@0: RSession session; sl@0: CHECK(KErrNone,==,session.CreateSession(KSecondProcessName,TVersion())); sl@0: sl@0: RDebug::Print(_L("Slave Process - Sending message\n")); sl@0: TPtr8 ptr(chunkBase,commitSize,commitSize); sl@0: session.SendReceive(0,TIpcArgs(&ptr)); sl@0: sl@0: RDebug::Print(_L("Slave Process - Destroy Chunk\n")); sl@0: TheChunk.Close(); sl@0: CHECK(1,==,Ldd.CloseChunk()); // 1==DObject::EObjectDeleted sl@0: CHECK(1,==,Ldd.IsDestroyed()); sl@0: return 0; sl@0: } sl@0: sl@0: default: sl@0: RDebug::Print(_L("Slave Process - Bad Command\n")); sl@0: return KErrArgument; sl@0: } sl@0: } sl@0: sl@0: void TestChunkUserBase() sl@0: { sl@0: TUint ChunkAttribs = ChunkSize|ESingle|EOwnsMemory; sl@0: sl@0: test.Start(_L("Create chunk")); sl@0: CHECK(KErrNone,==,Ldd.CreateChunk(ChunkAttribs)); sl@0: sl@0: test.Next(_L("Open user handle")); sl@0: CHECK(KErrNone,==,Ldd.GetChunkHandle(TheChunk)); sl@0: sl@0: test.Next(_L("Commit some memory")); sl@0: CHECK(KErrNone,==,Ldd.CommitMemory(EDiscontiguous|1*PageSize,PageSize)); sl@0: sl@0: test.Next(_L("Check OpenSharedChunk with handle")); sl@0: CHECK(KErrNone,==,Ldd.TestOpenHandle(TheChunk.Handle())); sl@0: sl@0: test.Next(_L("Get Kernel's user base")); sl@0: TAny *kernelUserAddress; sl@0: CHECK(KErrNone,==,Ldd.GetChunkUserBase(&kernelUserAddress)); sl@0: TAny *userAddress = TheChunk.Base(); sl@0: test(kernelUserAddress == userAddress); sl@0: sl@0: TheChunk.Close(); sl@0: CHECK(1,==,Ldd.CloseChunk()); sl@0: sl@0: test.End(); sl@0: } sl@0: sl@0: sl@0: TInt E32Main() sl@0: { sl@0: // Running as slave? sl@0: TInt slaveCommand; sl@0: if(User::GetTIntParameter(1,slaveCommand)==KErrNone) sl@0: return SlaveCommand((TSlaveCommand)slaveCommand); sl@0: sl@0: // Turn off lazy dll unloading sl@0: RLoader l; sl@0: test(l.Connect()==KErrNone); sl@0: test(l.CancelLazyDllUnload()==KErrNone); sl@0: l.Close(); sl@0: sl@0: test.Title(); sl@0: sl@0: MemModelAttributes=UserSvr::HalFunction(EHalGroupKernel, EKernelHalMemModelInfo, NULL, NULL); sl@0: TUint mm=MemModelAttributes&EMemModelTypeMask; sl@0: #ifdef __T_SHAREDCHUNKF__ sl@0: if(mm!=EMemModelTypeMoving) sl@0: { sl@0: test.Start(_L("TESTS NOT RUN - Only valid on Moving Memory Model")); sl@0: test.End(); sl@0: return 0; sl@0: } sl@0: #endif sl@0: sl@0: test.Start(_L("Initialise")); sl@0: CHECK(KErrNone,==,UserHal::PageSizeInBytes(PageSize)); sl@0: PhysicalCommitSupported = mm!=EMemModelTypeDirect && mm!=EMemModelTypeEmul; sl@0: CachingAttributesSupported = mm!=EMemModelTypeDirect && mm!=EMemModelTypeEmul; sl@0: sl@0: sl@0: test.Next(_L("Loading test driver")); sl@0: TInt r = User::LoadLogicalDevice(KSharedChunkLddName); sl@0: test(r==KErrNone || r==KErrAlreadyExists); sl@0: sl@0: test.Next(_L("Opening channel")); sl@0: CHECK(KErrNone,==,Ldd.Open()); sl@0: sl@0: // now 'unload' test driver, however, it will remain loaded whilst sl@0: // we still have a channel open with it... sl@0: User::FreeLogicalDevice(KSharedChunkLddName); sl@0: sl@0: test.Next(_L("Test chunk create")); sl@0: TestCreate(); sl@0: sl@0: test.Next(_L("Test handles")); sl@0: TestHandles(); sl@0: sl@0: test.Next(_L("Test handle ownership")); sl@0: TestHandleOwnership(); sl@0: sl@0: test.Next(_L("Test restrictions for multiply shared chunks")); sl@0: TestRestrictions(EMultiple); sl@0: test.Next(_L("Test restrictions for singly shared chunks")); sl@0: TestRestrictions(ESingle); sl@0: sl@0: test.Next(_L("Test memory access for multiply shared chunks")); sl@0: TestAccess(EMultiple|EOwnsMemory,EDiscontiguous); sl@0: test.Next(_L("Test memory access for singly shared chunks")); sl@0: TestAccess(ESingle|EOwnsMemory,EDiscontiguous); sl@0: sl@0: test.Next(_L("Test Discontiguous memory commit for multiply shared chunks")); sl@0: TestCommit(EMultiple,EDiscontiguous); sl@0: test.Next(_L("Test Discontiguous memory commit for singly shared chunks")); sl@0: TestCommit(ESingle,EDiscontiguous); sl@0: sl@0: if((MemModelAttributes&EMemModelTypeMask)!=EMemModelTypeDirect) sl@0: { sl@0: test.Next(_L("Test Contiguous memory commit for multiply shared chunks")); sl@0: TestCommit(EMultiple,EContiguous); sl@0: test.Next(_L("Test Contiguous memory commit for singly shared chunks")); sl@0: TestCommit(ESingle,EContiguous); sl@0: } sl@0: sl@0: if(PhysicalCommitSupported) sl@0: { sl@0: test.Next(_L("Test Discontiguous Physical commit for multiply shared chunks")); sl@0: TestCommit(EMultiple,EDiscontiguousPhysical); sl@0: test.Next(_L("Test Discontiguous Physical commit for singly shared chunks")); sl@0: TestCommit(ESingle,EDiscontiguousPhysical); sl@0: sl@0: test.Next(_L("Test Contiguous Physical commit for multiply shared chunks")); sl@0: TestCommit(EMultiple,EContiguousPhysical); sl@0: test.Next(_L("Test Contiguous Physical commit for singly shared chunks")); sl@0: TestCommit(ESingle,EContiguousPhysical); sl@0: } sl@0: sl@0: test.Next(_L("Test Kern::OpenSharedChunk for multiply shared chunks")); sl@0: TestOpenSharedChunk(EMultiple,EDiscontiguous); sl@0: test.Next(_L("Test Kern::OpenSharedChunk for singly shared chunks")); sl@0: TestOpenSharedChunk(ESingle,EDiscontiguous); sl@0: sl@0: if(CachingAttributesSupported) sl@0: { sl@0: test.Next(_L("Test Mapping Attributes")); sl@0: TestMappingAttributes(); sl@0: } sl@0: sl@0: test.Next(_L("Testing Kern::ChunkUserBase for shared chunks")); sl@0: TestChunkUserBase(); sl@0: sl@0: if (PhysicalCommitSupported) sl@0: { sl@0: test.Next(_L("Testing Kern::ChunkClose allows immediate freeing of physical ram")); sl@0: test_KErrNone(Ldd.TestChunkCloseAndFree()); sl@0: } sl@0: sl@0: test.Next(_L("Close test driver")); sl@0: Ldd.Close(); sl@0: sl@0: test.End(); sl@0: sl@0: return 0; sl@0: }