sl@0: // Copyright (c) 2005-2009 Nokia Corporation and/or its subsidiary(-ies). sl@0: // All rights reserved. sl@0: // This component and the accompanying materials are made available sl@0: // under the terms of the License "Eclipse Public License v1.0" sl@0: // which accompanies this distribution, and is available sl@0: // at the URL "http://www.eclipse.org/legal/epl-v10.html". sl@0: // sl@0: // Initial Contributors: sl@0: // Nokia Corporation - initial contribution. sl@0: // sl@0: // Contributors: sl@0: // sl@0: // Description: sl@0: // e32test\mmu\d_demandpaging.cpp sl@0: // sl@0: // sl@0: sl@0: #include sl@0: #include sl@0: #include "d_demandpaging.h" sl@0: sl@0: /// Page attributes, cut-n-paste'd from mmubase.h sl@0: enum TType sl@0: { sl@0: // EInvalid=0, // No physical RAM exists for this page sl@0: // EFixed=1, // RAM fixed at boot time sl@0: // EUnused=2, // Page is unused sl@0: // EChunk=3, sl@0: // ECodeSeg=4, sl@0: // EHwChunk=5, sl@0: // EPageTable=6, sl@0: // EPageDir=7, sl@0: // EPtInfo=8, sl@0: // EShadow=9, sl@0: sl@0: EPagedROM=10, sl@0: EPagedCode=11, sl@0: EPagedData=12, sl@0: EPagedCache=13, sl@0: EPagedFree=14, sl@0: }; sl@0: sl@0: enum TState sl@0: { sl@0: EStateNormal = 0, // no special state sl@0: EStatePagedYoung = 1, sl@0: EStatePagedOld = 2, sl@0: EStatePagedDead = 3, sl@0: EStatePagedLocked = 4 sl@0: }; sl@0: sl@0: // sl@0: // Class definitions sl@0: // sl@0: sl@0: class DDemandPagingTestFactory : public DLogicalDevice sl@0: { sl@0: public: sl@0: ~DDemandPagingTestFactory(); sl@0: virtual TInt Install(); sl@0: virtual void GetCaps(TDes8& aDes) const; sl@0: virtual TInt Create(DLogicalChannelBase*& aChannel); sl@0: }; sl@0: sl@0: class DDemandPagingTestChannel : public DLogicalChannelBase sl@0: { sl@0: public: sl@0: DDemandPagingTestChannel(); sl@0: ~DDemandPagingTestChannel(); sl@0: virtual TInt DoCreate(TInt aUnit, const TDesC8* anInfo, const TVersion& aVer); sl@0: virtual TInt Request(TInt aFunction, TAny* a1, TAny* a2); sl@0: TInt LockTest(const TAny* aBuffer, TInt aSize); sl@0: TInt LockTest2(); sl@0: TInt DoConsumeContiguousRamTest(TInt aAlign, TInt aPages); sl@0: TInt DoCreatePlatHwChunk(TInt aSize, TAny* aLinAddr); sl@0: TInt DoDestroyPlatHwChunk(); sl@0: TInt ReadHoldingMutexTest(TAny* aDest); sl@0: sl@0: TBool CheckPagedIn(TLinAddr aAddress); sl@0: TBool CheckPagedOut(TLinAddr aAddress); sl@0: TBool CheckLocked(TLinAddr aAddress); sl@0: sl@0: TInt FreeRam(); sl@0: public: sl@0: DDemandPagingTestFactory* iFactory; sl@0: DDemandPagingLock iLock; sl@0: sl@0: DPlatChunkHw* iHwChunk; sl@0: TInt iChunkSize; sl@0: TPhysAddr iPhysBase; // This will be base physical address of the chunk sl@0: TLinAddr iLinearBase; // This will be base linear address of the chunk sl@0: }; sl@0: sl@0: // sl@0: // DDemandPagingTestFactory sl@0: // sl@0: sl@0: TInt DDemandPagingTestFactory::Install() sl@0: { sl@0: return SetName(&KDemandPagingTestLddName); sl@0: } sl@0: sl@0: DDemandPagingTestFactory::~DDemandPagingTestFactory() sl@0: { sl@0: } sl@0: sl@0: void DDemandPagingTestFactory::GetCaps(TDes8& /*aDes*/) const sl@0: { sl@0: // Not used but required as DLogicalDevice::GetCaps is pure virtual sl@0: } sl@0: sl@0: TInt DDemandPagingTestFactory::Create(DLogicalChannelBase*& aChannel) sl@0: { sl@0: aChannel = NULL; sl@0: DDemandPagingTestChannel* channel=new DDemandPagingTestChannel; sl@0: if(!channel) sl@0: return KErrNoMemory; sl@0: channel->iFactory = this; sl@0: aChannel = channel; sl@0: return KErrNone; sl@0: } sl@0: sl@0: DECLARE_STANDARD_LDD() sl@0: { sl@0: return new DDemandPagingTestFactory; sl@0: } sl@0: sl@0: // sl@0: // DDemandPagingTestChannel sl@0: // sl@0: sl@0: TInt DDemandPagingTestChannel::DoCreate(TInt /*aUnit*/, const TDesC8* /*aInfo*/, const TVersion& /*aVer*/) sl@0: { sl@0: return KErrNone; sl@0: } sl@0: sl@0: DDemandPagingTestChannel::DDemandPagingTestChannel() sl@0: { sl@0: } sl@0: sl@0: DDemandPagingTestChannel::~DDemandPagingTestChannel() sl@0: { sl@0: DoDestroyPlatHwChunk(); sl@0: } sl@0: sl@0: TInt DDemandPagingTestChannel::Request(TInt aFunction, TAny* a1, TAny* a2) sl@0: { sl@0: switch(aFunction) sl@0: { sl@0: case RDemandPagingTestLdd::ELockTest: sl@0: { sl@0: TInt r = LockTest(a1,(TInt)a2); sl@0: if (r == KErrNone) sl@0: r = LockTest2(); sl@0: return r; sl@0: } sl@0: sl@0: case RDemandPagingTestLdd::ESetRealtimeTrace: sl@0: { sl@0: #if defined(_DEBUG) sl@0: TUint32 bit = TUint32(1<<(KREALTIME&31)); sl@0: __e32_atomic_axo_ord32(&Kern::SuperPage().iDebugMask[KREALTIME>>5], ~bit, a1?bit:0); sl@0: #if 0 // can enable this to help debugging sl@0: bit = (1<<(KPAGING&31)); sl@0: __e32_atomic_axo_ord32(&Kern::SuperPage().iDebugMask[KPAGING>>5], ~bit, a1?bit:0); sl@0: #endif sl@0: #endif //_DEBUG sl@0: } sl@0: return KErrNone; sl@0: sl@0: case RDemandPagingTestLdd::EDoConsumeContiguousRamTest: sl@0: { sl@0: return DDemandPagingTestChannel::DoConsumeContiguousRamTest((TInt)a1, (TInt)a2); sl@0: } sl@0: sl@0: case RDemandPagingTestLdd::ECreatePlatHwChunk: sl@0: { sl@0: return DDemandPagingTestChannel::DoCreatePlatHwChunk((TInt)a1, a2); sl@0: } sl@0: sl@0: case RDemandPagingTestLdd::EDestroyPlatHwChunk: sl@0: { sl@0: return DDemandPagingTestChannel::DoDestroyPlatHwChunk(); sl@0: } sl@0: sl@0: case RDemandPagingTestLdd::ELock: sl@0: { sl@0: TInt r=iLock.Alloc((TInt)a2); sl@0: if(r!=KErrNone) sl@0: return r; sl@0: return iLock.Lock(&Kern::CurrentThread(),(TLinAddr)a1,(TInt)a2); sl@0: } sl@0: sl@0: case RDemandPagingTestLdd::EUnlock: sl@0: { sl@0: iLock.Free(); sl@0: return KErrNone; sl@0: } sl@0: sl@0: case RDemandPagingTestLdd::EReadHoldingMutexTest: sl@0: return ReadHoldingMutexTest((TAny*)a1); sl@0: sl@0: default: sl@0: return KErrNotSupported; sl@0: } sl@0: } sl@0: sl@0: // sl@0: // DDemandPagingTestChannel::DoCreatePlatHwChunk sl@0: // sl@0: // For some of the tests of IPC from demand-paged memory, we need a writable sl@0: // globally-mapped buffer; so this function creates a suitable chunk and sl@0: // returns its (global, virtual) address to the userland caller. The caller sl@0: // should call DoDestroyPlatHwChunk() to release the memory when the tests sl@0: // are finished. sl@0: // sl@0: TInt DDemandPagingTestChannel::DoCreatePlatHwChunk(TInt aSize, TAny* aLinAddr) sl@0: { sl@0: TInt mapAttr = EMapAttrUserRw; // Supervisor and user both have read/write permissions sl@0: sl@0: NKern::ThreadEnterCS(); sl@0: if (iHwChunk) // Only one chunk at a atime sl@0: { sl@0: NKern::ThreadLeaveCS(); sl@0: return KErrAlreadyExists; sl@0: } sl@0: sl@0: iChunkSize = Kern::RoundToPageSize(aSize); sl@0: sl@0: Kern::Printf("*** Attempting to allocate contiguous physical RAM ***"); sl@0: TInt free = Kern::FreeRamInBytes(); sl@0: Kern::Printf(" requested: %08x", iChunkSize); sl@0: Kern::Printf(" total free: %08x", free); sl@0: sl@0: TInt r = Epoc::AllocPhysicalRam(iChunkSize, iPhysBase, 0); // Allocate RAM; result in iPhysBase sl@0: if (r) sl@0: { sl@0: NKern::ThreadLeaveCS(); sl@0: Kern::Printf(" failed with error %d", r); sl@0: return r; sl@0: } sl@0: else sl@0: Kern::Printf(" success"); sl@0: sl@0: r = DPlatChunkHw::New(iHwChunk, iPhysBase, iChunkSize, mapAttr); // Create chunk sl@0: if (r) sl@0: { sl@0: Epoc::FreePhysicalRam(iPhysBase, iChunkSize); sl@0: iHwChunk = 0; sl@0: NKern::ThreadLeaveCS(); sl@0: return r; sl@0: } sl@0: NKern::ThreadLeaveCS(); sl@0: sl@0: // Return the virtual address to userland sl@0: iLinearBase = iHwChunk->LinearAddress(); sl@0: kumemput(aLinAddr, &iLinearBase, sizeof(iLinearBase)); sl@0: sl@0: Kern::Printf("CreatePlatHwChunk@%08x: iLinearBase %08x, iPhysBase %08x, size %d", sl@0: iHwChunk, iLinearBase, iPhysBase, iChunkSize); sl@0: sl@0: return KErrNone; sl@0: } sl@0: sl@0: TInt DDemandPagingTestChannel::DoDestroyPlatHwChunk() sl@0: { sl@0: Kern::Printf("DestroyPlatHwChunk@%08x: iLinearBase %08x, iPhysBase %08x, size %d", sl@0: iHwChunk, iLinearBase, iPhysBase, iChunkSize); sl@0: NKern::ThreadEnterCS(); sl@0: if (iHwChunk) sl@0: { sl@0: iHwChunk->Close(NULL); sl@0: Epoc::FreePhysicalRam(iPhysBase, iChunkSize); sl@0: iPhysBase = 0; sl@0: iChunkSize = 0; sl@0: iHwChunk = 0; sl@0: } sl@0: NKern::ThreadLeaveCS(); sl@0: return KErrNone; sl@0: } sl@0: sl@0: // sl@0: // DDemandPagingTestChannel::DoConsumeContiguousRamTest sl@0: // sl@0: // This test attempts to consume all available Contiguous Ram until we need to ask the sl@0: // demand paging code to release memory for it. sl@0: // sl@0: // On completion free all the memory allocated. sl@0: // sl@0: #define CHECK(c) { if(!(c)) { Kern::Printf("Fail %d", __LINE__); ; retVal = __LINE__;} } sl@0: sl@0: TInt DDemandPagingTestChannel::DoConsumeContiguousRamTest(TInt aAlign, TInt aSize) sl@0: { sl@0: TInt retVal = KErrNone; sl@0: TInt initialFreeRam = FreeRam(); sl@0: TInt totalBlocks = initialFreeRam/aSize; sl@0: sl@0: NKern::ThreadEnterCS(); sl@0: TPhysAddr* pAddrArray = (TPhysAddr *)Kern::Alloc(sizeof(TPhysAddr) * totalBlocks); sl@0: NKern::ThreadLeaveCS(); sl@0: CHECK(pAddrArray); sl@0: if(!pAddrArray) sl@0: return retVal; sl@0: sl@0: SVMCacheInfo tempPages; sl@0: sl@0: // get the initial free ram again as the heap may have grabbed a page during the alloc sl@0: initialFreeRam = FreeRam(); sl@0: Kern::Printf("ConsumeContiguousRamTest: align %d size %d initialFreeRam %d", aAlign, aSize, initialFreeRam); sl@0: sl@0: CHECK(Kern::HalFunction(EHalGroupVM,EVMHalGetCacheSize,&tempPages,0) == KErrNone); sl@0: Kern::Printf("Start cache info: iMinSize %d iMaxSize %d iCurrentSize %d iMaxFreeSize %d", sl@0: tempPages.iMinSize, tempPages.iMaxSize, tempPages.iCurrentSize ,tempPages.iMaxFreeSize); sl@0: sl@0: TInt initialFreePages = tempPages.iMaxFreeSize; sl@0: CHECK(initialFreePages != 0); sl@0: sl@0: // allocate blocks to use up RAM until we fail to allocate any further... sl@0: TBool freedPagesToAlloc = EFalse; sl@0: TInt index; sl@0: TUint32 alignMask = (1 << aAlign) - 1; sl@0: for (index = 0; index < totalBlocks; ) sl@0: { sl@0: CHECK(Kern::HalFunction(EHalGroupVM,EVMHalGetCacheSize,&tempPages,0) == KErrNone); sl@0: TInt beforePages = tempPages.iMaxFreeSize; sl@0: sl@0: NKern::ThreadEnterCS(); sl@0: TInt r = Epoc::AllocPhysicalRam(aSize, pAddrArray[index], aAlign); sl@0: if(r==KErrNone) sl@0: { sl@0: // check the alignment of the returned pages sl@0: CHECK((pAddrArray[index] & alignMask) == 0); sl@0: ++index; sl@0: } sl@0: NKern::ThreadLeaveCS(); sl@0: if(r!=KErrNone) sl@0: { sl@0: break; sl@0: } sl@0: CHECK(Kern::HalFunction(EHalGroupVM,EVMHalGetCacheSize,&tempPages,0) == KErrNone); sl@0: TInt afterPages = tempPages.iMaxFreeSize; sl@0: sl@0: if (afterPages != beforePages) sl@0: freedPagesToAlloc = ETrue; // the alloc reclaimed memory from the paging cache sl@0: } sl@0: sl@0: if (!index) sl@0: Kern::Printf("WARNING : DoConsumeContiguousRamTest no allocations were successful"); sl@0: // free the memory we allocated... sl@0: while(--index>=0) sl@0: { sl@0: NKern::ThreadEnterCS(); sl@0: TInt r = Epoc::FreePhysicalRam(pAddrArray[index], aSize); sl@0: NKern::ThreadLeaveCS(); sl@0: CHECK(r==KErrNone); sl@0: } sl@0: sl@0: CHECK(FreeRam() == initialFreeRam); sl@0: sl@0: NKern::ThreadEnterCS(); sl@0: Kern::Free(pAddrArray); sl@0: NKern::ThreadLeaveCS(); sl@0: sl@0: CHECK(Kern::HalFunction(EHalGroupVM,EVMHalGetCacheSize,&tempPages,0) == KErrNone); sl@0: Kern::Printf("End cache info: iMinSize %d iMaxSize %d iCurrentSize %d iMaxFreeSize %d", sl@0: tempPages.iMinSize, tempPages.iMaxSize, tempPages.iCurrentSize ,tempPages.iMaxFreeSize); sl@0: sl@0: if (!freedPagesToAlloc) sl@0: Kern::Printf("WARNING : DoConsumeContiguousRamTest freedPagesToAlloc was eFalse"); sl@0: //CHECK(freedPagesToAlloc); sl@0: sl@0: return retVal; sl@0: } sl@0: #undef CHECK sl@0: sl@0: sl@0: TUint8 ReadByte(volatile TUint8* aPtr) sl@0: { sl@0: return *aPtr; sl@0: } sl@0: sl@0: #define CHECK(c) { if(!(c)) return __LINE__; } sl@0: sl@0: #define READ(a) ReadByte((volatile TUint8*)(a)) sl@0: sl@0: TInt DDemandPagingTestChannel::LockTest(const TAny* aBuffer, TInt aSize) sl@0: { sl@0: // Get page size info sl@0: TInt pageSize = 0; sl@0: CHECK(Kern::HalFunction(EHalGroupKernel,EKernelHalPageSizeInBytes,&pageSize,0)==KErrNone); sl@0: TInt pageMask = pageSize-1; sl@0: sl@0: // See if were running of the Flexible Memory Model sl@0: TUint32 memModelAttrib = (TUint32)Kern::HalFunction(EHalGroupKernel,EKernelHalMemModelInfo,0,0); sl@0: TBool fmm = (memModelAttrib&EMemModelTypeMask)==EMemModelTypeFlexible; sl@0: sl@0: // Round buffer to page boundaries sl@0: TLinAddr start = ((TLinAddr)aBuffer+pageMask)&~pageMask; sl@0: TLinAddr end = ((TLinAddr)aBuffer+aSize)&~pageMask; sl@0: aSize = end-start; sl@0: Kern::Printf("Test buffer is %08x, %x\n",start,aSize); sl@0: CHECK(aSize>pageSize*2); sl@0: sl@0: // Flush all paged memory sl@0: Kern::HalFunction(EHalGroupVM,EVMHalFlushCache,0,0); sl@0: sl@0: TInt initialFreeRam; sl@0: TInt freeRam1; sl@0: TInt freeRam2; sl@0: TLinAddr addr; sl@0: TUint lockBytesUsed = fmm ? 0 : 0; // free ram change on locking (zero or aSize depending on implementation) sl@0: sl@0: { // this brace is essential for correctness sl@0: DDemandPagingLock lock2; // construct a lock; sl@0: sl@0: Kern::Printf("Check reading from buffer pages it in\n"); sl@0: for(addr=start; addr lockArray; sl@0: sl@0: const TInt KLockMax = 1000; // make this a bit bigger than current min page count? sl@0: TInt i; sl@0: sl@0: NKern::ThreadEnterCS(); sl@0: for (i = 0 ; i < KLockMax ; ++i) sl@0: { sl@0: lock = new DDemandPagingLock; sl@0: CHECK(lock); sl@0: CHECK(lockArray.Append(lock) == KErrNone); sl@0: lock = NULL; sl@0: sl@0: TInt initialFreeRam = FreeRam(); sl@0: CHECK(lockArray[i]->Alloc(1) == KErrNone); sl@0: if (FreeRam() < initialFreeRam) sl@0: { sl@0: Kern::Printf("Live list size increased after %d locks allocated", i + 1); sl@0: break; sl@0: } sl@0: } sl@0: sl@0: CHECK(i < KLockMax); sl@0: sl@0: cleanup: sl@0: sl@0: delete lock; sl@0: lock = NULL; sl@0: for (i = 0 ; i < lockArray.Count() ; ++i) sl@0: { sl@0: delete lockArray[i]; sl@0: lockArray[i] = NULL; sl@0: } sl@0: lockArray.Reset(); sl@0: sl@0: NKern::ThreadLeaveCS(); sl@0: sl@0: return r; sl@0: } sl@0: sl@0: TInt DDemandPagingTestChannel::FreeRam() sl@0: { sl@0: Kern::HalFunction(EHalGroupKernel, EKernelHalSupervisorBarrier, 0, 0); sl@0: TInt freeRam = Kern::FreeRamInBytes(); sl@0: Kern::Printf("...free RAM: %x\n",freeRam); sl@0: return freeRam; sl@0: } sl@0: sl@0: sl@0: TUint32 PageState(TLinAddr aAddress) sl@0: { sl@0: TUint32 state = Kern::HalFunction(EHalGroupVM, EVMPageState, (TAny*)aAddress, 0); sl@0: Kern::Printf("PageState: %08x=%08x",aAddress,state); sl@0: return state; sl@0: } sl@0: sl@0: sl@0: TBool DDemandPagingTestChannel::CheckPagedIn(TLinAddr aAddress) sl@0: { sl@0: TUint32 state = PageState(aAddress); sl@0: return (state&0xff00) == (EStatePagedYoung<<8); sl@0: } sl@0: sl@0: sl@0: TBool DDemandPagingTestChannel::CheckPagedOut(TLinAddr aAddress) sl@0: { sl@0: TUint32 state = PageState(aAddress); sl@0: return (state&0xffff) == 0; sl@0: } sl@0: sl@0: sl@0: TInt DDemandPagingTestChannel::CheckLocked(TLinAddr aAddress) sl@0: { sl@0: TUint32 state = PageState(aAddress); sl@0: return (state&0xff00) == (EStatePagedLocked<<8); sl@0: } sl@0: sl@0: sl@0: TInt DDemandPagingTestChannel::ReadHoldingMutexTest(TAny* aDest) sl@0: { sl@0: _LIT(KMutexName, "DPTestMutex"); sl@0: sl@0: NKern::ThreadEnterCS(); sl@0: sl@0: DMutex* mutex; sl@0: TInt r = Kern::MutexCreate(mutex, KMutexName, KMutexOrdDebug); // Mutex order < demand paging sl@0: if (r != KErrNone) sl@0: { sl@0: NKern::ThreadLeaveCS(); sl@0: return r; sl@0: } sl@0: Kern::MutexWait(*mutex); sl@0: sl@0: const TRomHeader& romHeader = Epoc::RomHeader(); sl@0: TLinAddr unpagedRomStart = (TLinAddr)&romHeader; sl@0: TLinAddr unpagedRomEnd; sl@0: if (romHeader.iPageableRomStart) sl@0: unpagedRomEnd = unpagedRomStart + romHeader.iPageableRomStart; sl@0: else sl@0: unpagedRomEnd = unpagedRomStart + romHeader.iUncompressedSize; sl@0: sl@0: const TInt length = 16; sl@0: TUint8 localBuf[length]; sl@0: if(!aDest) sl@0: aDest = localBuf; sl@0: Kern::Printf("Local buffer at %08x", aDest); sl@0: sl@0: TAny* src1 = (TAny*)unpagedRomStart; sl@0: TAny* src2 = (TAny*)(unpagedRomEnd - length); sl@0: sl@0: DThread* thread = &Kern::CurrentThread(); sl@0: sl@0: Kern::Printf("Attempting to access %08x", src1); sl@0: Kern::ThreadRawWrite(thread, aDest, src1, length); sl@0: Kern::Printf("Attempting to access %08x", src2); sl@0: Kern::ThreadRawWrite(thread, aDest, src2, length); sl@0: sl@0: TUint8 stackData[length]; sl@0: Kern::Printf("Attempting to access %08x", stackData); sl@0: Kern::ThreadRawWrite(thread, aDest, stackData, length); sl@0: sl@0: TAny* heapData = Kern::Alloc(length); sl@0: if (heapData) sl@0: { sl@0: Kern::Printf("Attempting to access %08x", heapData); sl@0: Kern::ThreadRawWrite(thread, aDest, heapData, length); sl@0: Kern::Free(heapData); sl@0: } sl@0: else sl@0: r = KErrNoMemory; sl@0: sl@0: Kern::MutexSignal(*mutex); sl@0: mutex->Close(NULL); sl@0: sl@0: NKern::ThreadLeaveCS(); sl@0: sl@0: return r; // a kernel fault indicates that the test failed sl@0: } sl@0: