sl@0: // Copyright (c) 2004-2009 Nokia Corporation and/or its subsidiary(-ies). sl@0: // All rights reserved. sl@0: // This component and the accompanying materials are made available sl@0: // under the terms of the License "Eclipse Public License v1.0" sl@0: // which accompanies this distribution, and is available sl@0: // at the URL "http://www.eclipse.org/legal/epl-v10.html". sl@0: // sl@0: // Initial Contributors: sl@0: // Nokia Corporation - initial contribution. sl@0: // sl@0: // Contributors: sl@0: // sl@0: // Description: sl@0: // e32test\mmu\d_sharedchunk.cpp sl@0: // sl@0: // sl@0: sl@0: #include sl@0: #include sl@0: #include "d_sharedchunk.h" sl@0: sl@0: TBool PhysicalCommitSupported = ETrue; sl@0: sl@0: #ifdef __EPOC32__ sl@0: #define TEST_PHYSICAL_COMMIT sl@0: #endif sl@0: sl@0: static volatile TInt ChunkDestroyedCount=1; // Test counter sl@0: sl@0: // sl@0: // Class definitions sl@0: // sl@0: sl@0: class DSharedChunkFactory : public DLogicalDevice sl@0: { sl@0: public: sl@0: ~DSharedChunkFactory(); sl@0: virtual TInt Install(); sl@0: virtual void GetCaps(TDes8& aDes) const; sl@0: virtual TInt Create(DLogicalChannelBase*& aChannel); sl@0: TInt ClaimMemory(); sl@0: void ReleaseMemory(); sl@0: TInt AllocMemory(TInt aSize, TUint32& aPhysAddr); sl@0: void FreeMemory(TInt aSize,TUint32 aPhysAddr); sl@0: void LockWait(); sl@0: void LockSignal(); sl@0: private: sl@0: NFastMutex iLock; sl@0: public: sl@0: TBool iMemoryInUse; sl@0: TUint32 iPhysBase; sl@0: TUint32 iPhysEnd; sl@0: TUint32 iPhysNext; sl@0: TInt* iDummyCell; sl@0: }; sl@0: sl@0: class DSharedChunkChannel : public DLogicalChannelBase sl@0: { sl@0: public: sl@0: DSharedChunkChannel(); sl@0: ~DSharedChunkChannel(); sl@0: virtual TInt DoCreate(TInt aUnit, const TDesC8* anInfo, const TVersion& aVer); sl@0: virtual TInt Request(TInt aFunction, TAny* a1, TAny* a2); sl@0: DChunk* OpenChunk(TLinAddr* aKernelAddr=0, TInt* aMaxSize=0); sl@0: inline void LockWait() sl@0: { iFactory->LockWait(); } sl@0: inline void LockSignal() sl@0: { iFactory->LockSignal(); } sl@0: TUint32 DfcReadWrite(TUint32* aPtr, TUint32 aValue); sl@0: TUint32 IsrReadWrite(TUint32* aPtr, TUint32 aValue); sl@0: public: sl@0: DSharedChunkFactory* iFactory; sl@0: DChunk* iChunk; sl@0: TLinAddr iKernelAddress; sl@0: TInt iMaxSize; sl@0: }; sl@0: sl@0: class TChunkCleanup : public TDfc sl@0: { sl@0: public: sl@0: TChunkCleanup(DSharedChunkFactory* aFactory,TBool aReleasePhysicalMemory); sl@0: ~TChunkCleanup(); sl@0: static void ChunkDestroyed(TChunkCleanup* aSelf); sl@0: void Cancel(); sl@0: public: sl@0: DSharedChunkFactory* iFactory; sl@0: TBool iReleasePhysicalMemory; sl@0: }; sl@0: sl@0: // sl@0: // TChunkCleanup sl@0: // sl@0: sl@0: TChunkCleanup::TChunkCleanup(DSharedChunkFactory* aFactory,TBool aReleasePhysicalMemory) sl@0: : TDfc((TDfcFn)TChunkCleanup::ChunkDestroyed,this,Kern::SvMsgQue(),0) sl@0: , iFactory(0), iReleasePhysicalMemory(aReleasePhysicalMemory) sl@0: { sl@0: aFactory->Open(); sl@0: iFactory = aFactory; sl@0: } sl@0: sl@0: TChunkCleanup::~TChunkCleanup() sl@0: { sl@0: if(iFactory) sl@0: iFactory->Close(0); sl@0: } sl@0: sl@0: void TChunkCleanup::ChunkDestroyed(TChunkCleanup* aSelf) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("D_SHAREDCHUNK ChunkDestroyed DFC\n")); sl@0: DSharedChunkFactory* factory = aSelf->iFactory; sl@0: if(factory) sl@0: { sl@0: factory->LockWait(); sl@0: if(aSelf->iReleasePhysicalMemory) sl@0: factory->ReleaseMemory(); sl@0: factory->LockSignal(); sl@0: __e32_atomic_add_ord32(&ChunkDestroyedCount, 1); sl@0: __KTRACE_OPT(KMMU,Kern::Printf("D_SHAREDCHUNK ChunkDestroyedCount=%d\n",ChunkDestroyedCount)); sl@0: } sl@0: delete aSelf; sl@0: } sl@0: sl@0: void TChunkCleanup::Cancel() sl@0: { sl@0: if(iFactory) sl@0: { sl@0: iFactory->Close(0); sl@0: iFactory = 0; sl@0: } sl@0: }; sl@0: sl@0: // sl@0: // DSharedChunkFactory sl@0: // sl@0: sl@0: TInt DSharedChunkFactory::Install() sl@0: { sl@0: TUint mm=Kern::HalFunction(EHalGroupKernel,EKernelHalMemModelInfo,0,0)&EMemModelTypeMask; sl@0: PhysicalCommitSupported = mm!=EMemModelTypeDirect && mm!=EMemModelTypeEmul; sl@0: #ifdef __EPOC32__ sl@0: if(PhysicalCommitSupported) sl@0: { sl@0: TInt physSize = 4096*1024; sl@0: TInt r=Epoc::AllocPhysicalRam(physSize, iPhysBase); sl@0: if(r!=KErrNone) sl@0: return r; sl@0: iPhysNext = iPhysBase; sl@0: iPhysEnd = iPhysBase+physSize; sl@0: iMemoryInUse = EFalse; sl@0: } sl@0: #endif sl@0: // Make sure there is enough space on kernel heap to that heap doesn't need sl@0: // to expand when allocating objects. (Required for OOM and memory leak testing.) sl@0: TAny* expandHeap = Kern::Alloc(16*1024); sl@0: iDummyCell = new TInt; sl@0: Kern::Free(expandHeap); sl@0: sl@0: return SetName(&KSharedChunkLddName); sl@0: } sl@0: sl@0: DSharedChunkFactory::~DSharedChunkFactory() sl@0: { sl@0: #ifdef __EPOC32__ sl@0: if(PhysicalCommitSupported) sl@0: Epoc::FreePhysicalRam(iPhysBase, iPhysEnd-iPhysBase); sl@0: #endif sl@0: delete iDummyCell; sl@0: } sl@0: sl@0: void DSharedChunkFactory::GetCaps(TDes8& /*aDes*/) const sl@0: { sl@0: // Not used but required as DLogicalDevice::GetCaps is pure virtual sl@0: } sl@0: sl@0: TInt DSharedChunkFactory::Create(DLogicalChannelBase*& aChannel) sl@0: { sl@0: aChannel = NULL; sl@0: DSharedChunkChannel* channel=new DSharedChunkChannel; sl@0: if(!channel) sl@0: return KErrNoMemory; sl@0: channel->iFactory = this; sl@0: aChannel = channel; sl@0: return KErrNone; sl@0: } sl@0: sl@0: void DSharedChunkFactory::LockWait() sl@0: { sl@0: NKern::FMWait(&iLock); sl@0: } sl@0: sl@0: void DSharedChunkFactory::LockSignal() sl@0: { sl@0: NKern::FMSignal(&iLock); sl@0: } sl@0: sl@0: TInt DSharedChunkFactory::AllocMemory(TInt aSize, TUint32& aPhysAddr) sl@0: { sl@0: if(!PhysicalCommitSupported) sl@0: aSize = 0; sl@0: TInt r=KErrNone; sl@0: Kern::RoundToPageSize(aSize); sl@0: LockWait(); sl@0: if(iPhysNext+aSize>iPhysEnd) sl@0: r = KErrNoMemory; sl@0: else sl@0: { sl@0: aPhysAddr = iPhysNext; sl@0: iPhysNext += aSize; sl@0: } sl@0: LockSignal(); sl@0: return r; sl@0: } sl@0: sl@0: TInt DSharedChunkFactory::ClaimMemory() sl@0: { sl@0: if (__e32_atomic_swp_ord32(&iMemoryInUse, 1)) sl@0: return KErrInUse; sl@0: iPhysNext = iPhysBase; // reset allocation pointer sl@0: return KErrNone; sl@0: } sl@0: sl@0: void DSharedChunkFactory::ReleaseMemory() sl@0: { sl@0: iMemoryInUse=EFalse; sl@0: } sl@0: sl@0: void DSharedChunkFactory::FreeMemory(TInt aSize,TUint32 aPhysAddr) sl@0: { sl@0: if(!PhysicalCommitSupported) sl@0: aSize = 0; sl@0: if(iPhysNext!=aPhysAddr+aSize) sl@0: { FAULT(); } // Only support freeing from the end sl@0: Kern::RoundToPageSize(aSize); sl@0: LockWait(); sl@0: iPhysNext -= aSize; sl@0: LockSignal(); sl@0: } sl@0: sl@0: DECLARE_STANDARD_LDD() sl@0: { sl@0: return new DSharedChunkFactory; sl@0: } sl@0: sl@0: // sl@0: // DSharedChunkChannel sl@0: // sl@0: sl@0: TInt DSharedChunkChannel::DoCreate(TInt /*aUnit*/, const TDesC8* /*aInfo*/, const TVersion& /*aVer*/) sl@0: { sl@0: return KErrNone; sl@0: } sl@0: sl@0: DSharedChunkChannel::DSharedChunkChannel() sl@0: { sl@0: } sl@0: sl@0: DSharedChunkChannel::~DSharedChunkChannel() sl@0: { sl@0: if(iChunk) sl@0: iChunk->Close(0); sl@0: } sl@0: sl@0: sl@0: void DoDfcReadWrite(TUint32* aArgs) sl@0: { sl@0: TUint32* ptr = (TUint32*)aArgs[0]; sl@0: TUint32 value = aArgs[1]; sl@0: aArgs[1] = *ptr; sl@0: *ptr = value; sl@0: NKern::FSSignal((NFastSemaphore*)aArgs[2]); sl@0: } sl@0: sl@0: TUint32 DSharedChunkChannel::DfcReadWrite(TUint32* aPtr, TUint32 aValue) sl@0: { sl@0: NFastSemaphore sem; sl@0: NKern::FSSetOwner(&sem,0); sl@0: sl@0: TUint32 args[3]; sl@0: args[0] = (TUint32)aPtr; sl@0: args[1] = aValue; sl@0: args[2] = (TUint32)&sem; sl@0: sl@0: TDfc dfc((TDfcFn)DoDfcReadWrite,&args,Kern::SvMsgQue(),0); sl@0: dfc.Enque(); sl@0: NKern::FSWait(&sem); sl@0: sl@0: return args[1]; sl@0: } sl@0: sl@0: sl@0: void DoIsrReadWrite(TUint32* aArgs) sl@0: { sl@0: TUint32* ptr = (TUint32*)aArgs[0]; sl@0: TUint32 value = aArgs[1]; sl@0: aArgs[1] = *ptr; sl@0: *ptr = value; sl@0: ((TDfc*)aArgs[2])->Add(); sl@0: } sl@0: sl@0: void DoIsrReadWriteDfcCallback(TUint32* aArgs) sl@0: { sl@0: NKern::FSSignal((NFastSemaphore*)aArgs); sl@0: } sl@0: sl@0: TUint32 DSharedChunkChannel::IsrReadWrite(TUint32* aPtr, TUint32 aValue) sl@0: { sl@0: NFastSemaphore sem; sl@0: NKern::FSSetOwner(&sem,0); sl@0: sl@0: TDfc dfc((TDfcFn)DoIsrReadWriteDfcCallback,&sem,Kern::SvMsgQue(),0); sl@0: sl@0: TUint32 args[3]; sl@0: args[0] = (TUint32)aPtr; sl@0: args[1] = aValue; sl@0: args[2] = (TUint32)&dfc; sl@0: sl@0: NTimer timer((NTimerFn)DoIsrReadWrite,&args); sl@0: timer.OneShot(1); sl@0: sl@0: NKern::FSWait(&sem); sl@0: return args[1]; sl@0: } sl@0: sl@0: sl@0: DChunk* DSharedChunkChannel::OpenChunk(TLinAddr* aKernelAddr,TInt* aMaxSize) sl@0: { sl@0: __ASSERT_CRITICAL // Thread must be in critical section (to avoid leaking access count on chunk) sl@0: LockWait(); sl@0: DChunk* chunk=iChunk; sl@0: if(chunk) sl@0: if(chunk->Open()!=KErrNone) sl@0: chunk = NULL; sl@0: if(aKernelAddr) sl@0: *aKernelAddr = chunk ? iKernelAddress : NULL; sl@0: if(aMaxSize) sl@0: *aMaxSize = chunk ? iMaxSize : 0; sl@0: LockSignal(); sl@0: return chunk; sl@0: } sl@0: sl@0: sl@0: TUint8 ReadByte(volatile TUint8* aPtr) sl@0: { sl@0: return *aPtr; sl@0: } sl@0: sl@0: void signal_sem(TAny* aPtr) sl@0: { sl@0: NKern::FSSignal((NFastSemaphore*)aPtr); sl@0: } sl@0: sl@0: TInt WaitForIdle() sl@0: { sl@0: NFastSemaphore s(0); sl@0: TDfc idler(&signal_sem, &s, Kern::SvMsgQue(), 0); // supervisor thread, priority 0, so will run after destroyed DFC sl@0: NTimer timer(&signal_sem, &s); sl@0: idler.QueueOnIdle(); sl@0: timer.OneShot(NKern::TimerTicks(5000), ETrue); // runs in DFCThread1 sl@0: NKern::FSWait(&s); // wait for either idle DFC or timer sl@0: TBool timeout = idler.Cancel(); // cancel idler, return TRUE if it hadn't run sl@0: TBool tmc = timer.Cancel(); // cancel timer, return TRUE if it hadn't expired sl@0: if (!timeout && !tmc) sl@0: NKern::FSWait(&s); // both the DFC and the timer went off - wait for the second one sl@0: if (timeout) sl@0: return KErrTimedOut; sl@0: return KErrNone; sl@0: } sl@0: sl@0: sl@0: TInt WaitForIdle2() sl@0: { sl@0: TInt r = WaitForIdle(); // wait for chunk async delete sl@0: if(r==KErrNone) sl@0: r = WaitForIdle(); // wait for chunk destroyed notification DFC sl@0: return r; sl@0: } sl@0: sl@0: sl@0: TInt DSharedChunkChannel::Request(TInt aFunction, TAny* a1, TAny* a2) sl@0: { sl@0: TInt i1 = (TInt)a1; sl@0: TInt i2 = (TInt)a2; sl@0: sl@0: TInt r=KErrNotSupported; sl@0: sl@0: switch(aFunction) sl@0: { sl@0: sl@0: case RSharedChunkLdd::ECreateChunk: sl@0: { sl@0: NKern::ThreadEnterCS(); sl@0: if (__e32_atomic_load_acq32(&ChunkDestroyedCount)==0) sl@0: { sl@0: WaitForIdle2(); // Go idle for a while to let chunk cleanup DFCs to be called sl@0: } sl@0: sl@0: // Create cleanup item sl@0: TBool chunkUsesPhysicalMemory = (i1&EOwnsMemory)==0; sl@0: sl@0: TChunkCleanup* cleanup = new TChunkCleanup(this->iFactory,chunkUsesPhysicalMemory); sl@0: if(!cleanup) sl@0: { sl@0: NKern::ThreadLeaveCS(); sl@0: return KErrNoMemory; sl@0: } sl@0: sl@0: // Try and create chunk... sl@0: DChunk* chunk; sl@0: TChunkCreateInfo info; sl@0: sl@0: info.iType = (i1&EMultiple) sl@0: ? TChunkCreateInfo::ESharedKernelMultiple sl@0: : TChunkCreateInfo::ESharedKernelSingle; sl@0: sl@0: info.iMaxSize = i1&~ECreateFlagsMask; sl@0: #ifdef __EPOC32__ sl@0: info.iMapAttr = (i1&ECached) ? EMapAttrCachedMax sl@0: : (i1&EBuffered) ? EMapAttrBufferedC sl@0: : EMapAttrFullyBlocking; sl@0: #endif sl@0: info.iOwnsMemory = (i1&EOwnsMemory)!=0; sl@0: sl@0: info.iDestroyedDfc = cleanup; sl@0: sl@0: if(i1&EBadType) *(TUint8*)&info.iType = 0xff; sl@0: sl@0: TUint32 mapAttr; sl@0: TUint32 kernAddr; sl@0: r = Kern::ChunkCreate(info, chunk, kernAddr, mapAttr); sl@0: if(r!=KErrNone) sl@0: { sl@0: delete cleanup; sl@0: NKern::ThreadLeaveCS(); sl@0: return r; sl@0: } sl@0: sl@0: // Setup data members sl@0: LockWait(); sl@0: if(iChunk) sl@0: r = KErrAlreadyExists; sl@0: else sl@0: { sl@0: if(chunkUsesPhysicalMemory) sl@0: r = iFactory->ClaimMemory(); sl@0: if(r==KErrNone) sl@0: { sl@0: iChunk = chunk; sl@0: iKernelAddress = kernAddr; sl@0: iMaxSize = info.iMaxSize; sl@0: __e32_atomic_store_ord32(&ChunkDestroyedCount,0); sl@0: } sl@0: } sl@0: LockSignal(); sl@0: sl@0: if(r!=KErrNone) sl@0: { sl@0: // There was an error, so discard created chunk sl@0: cleanup->Cancel(); sl@0: Kern::ChunkClose(chunk); sl@0: NKern::ThreadLeaveCS(); sl@0: return r; sl@0: } sl@0: sl@0: NKern::ThreadLeaveCS(); sl@0: sl@0: // Write back kernel address of chunk sl@0: if(a2) sl@0: kumemput32(a2,(TAny*)&kernAddr,4); sl@0: sl@0: return KErrNone; sl@0: } sl@0: sl@0: sl@0: case RSharedChunkLdd::EGetChunkHandle: sl@0: { sl@0: TInt isThreadLocal = (TInt)a1; sl@0: TOwnerType ownertype; sl@0: if (isThreadLocal) sl@0: ownertype = EOwnerThread; sl@0: else sl@0: ownertype = EOwnerProcess; sl@0: sl@0: NKern::ThreadEnterCS(); sl@0: DChunk* chunk=OpenChunk(); sl@0: if(chunk) sl@0: { sl@0: r = Kern::MakeHandleAndOpen(0,chunk,ownertype); sl@0: chunk->Close(0); sl@0: } sl@0: else sl@0: r = KErrNotFound; sl@0: NKern::ThreadLeaveCS(); sl@0: return r; sl@0: } sl@0: sl@0: sl@0: case RSharedChunkLdd::ECloseChunkHandle: sl@0: { sl@0: NKern::ThreadEnterCS(); sl@0: r = Kern::CloseHandle(0,i1); sl@0: NKern::ThreadLeaveCS(); sl@0: return r; sl@0: } sl@0: sl@0: sl@0: case RSharedChunkLdd::ECommitMemory: sl@0: { sl@0: NKern::ThreadEnterCS(); sl@0: TUint32 chunkKernelAddress; sl@0: DChunk* chunk=OpenChunk(&chunkKernelAddress); sl@0: if(chunk) sl@0: { sl@0: TInt type = i1&ECommitTypeMask; sl@0: i1 &= ~ECommitTypeMask; sl@0: switch(type) sl@0: { sl@0: case EDiscontiguous: sl@0: r = Kern::ChunkCommit(chunk,i1,i2); sl@0: break; sl@0: sl@0: case EContiguous: sl@0: { sl@0: TUint32 physAddr=~0u; sl@0: r = Kern::ChunkCommitContiguous(chunk,i1,i2,physAddr); sl@0: if(r!=KErrNone || i2==0) sl@0: break; sl@0: if(physAddr==~0u) sl@0: { r=KErrGeneral; break; } sl@0: sl@0: // Check that ChunkPhysicalAddress returns addresses consistant with the commit sl@0: TUint32 kernAddr; sl@0: TUint32 mapAttr; sl@0: TUint32 physAddr2; sl@0: r = Kern::ChunkPhysicalAddress(chunk, i1, i2, kernAddr, mapAttr, physAddr2); sl@0: if(r==KErrNone) sl@0: if(kernAddr!=chunkKernelAddress+i1 || physAddr2!=physAddr) sl@0: r=KErrGeneral; sl@0: sl@0: if(r==KErrNone) sl@0: { sl@0: // Exercise memory sync functions sl@0: Cache::SyncMemoryBeforeDmaRead(kernAddr, i2, mapAttr); sl@0: Cache::SyncMemoryBeforeDmaWrite(kernAddr, i2, mapAttr); sl@0: } sl@0: } sl@0: break; sl@0: sl@0: case EDiscontiguousPhysical|EBadPhysicalAddress: sl@0: case EDiscontiguousPhysical: sl@0: { sl@0: TUint32 physAddr; sl@0: r = iFactory->AllocMemory(i2,physAddr); sl@0: if(r!=KErrNone) sl@0: break; sl@0: sl@0: TInt pageSize = Kern::RoundToPageSize(1); sl@0: TInt numPages = Kern::RoundToPageSize(i2)/pageSize; sl@0: TUint32* physAddrList = new TUint32[numPages]; sl@0: TInt i; sl@0: for(i=0; iFreeMemory(i2,physAddr); sl@0: break; sl@0: } sl@0: sl@0: // Check that ChunkPhysicalAddress returns the same addresses we used in the commit sl@0: TUint32 kernAddr; sl@0: TUint32 mapAttr; sl@0: TUint32 physAddr2; sl@0: TUint32* physAddrList2 = new TUint32[numPages]; sl@0: r = Kern::ChunkPhysicalAddress(chunk, i1, i2, kernAddr, mapAttr, physAddr2, physAddrList2); sl@0: if(r==KErrNone) sl@0: { sl@0: if(kernAddr!=chunkKernelAddress+i1 || physAddr2!=physAddr) sl@0: r=KErrGeneral; sl@0: else sl@0: for(i=0; iAllocMemory(i2,physAddr); sl@0: if(r==KErrNone) sl@0: { sl@0: if(type&EBadPhysicalAddress) sl@0: r = Kern::ChunkCommitPhysical(chunk,i1,i2,physAddr|1); sl@0: else sl@0: r = Kern::ChunkCommitPhysical(chunk,i1,i2,physAddr); sl@0: } sl@0: if(r!=KErrNone || i2==0) sl@0: { sl@0: iFactory->FreeMemory(i2,physAddr); sl@0: break; sl@0: } sl@0: sl@0: // Check that ChunkPhysicalAddress returns the same addresses we used in the commit sl@0: TUint32 kernAddr; sl@0: TUint32 mapAttr; sl@0: TUint32 physAddr2; sl@0: r = Kern::ChunkPhysicalAddress(chunk, i1, i2, kernAddr, mapAttr, physAddr2); sl@0: if(r==KErrNone) sl@0: if(kernAddr!=chunkKernelAddress+i1 || physAddr2!=physAddr) sl@0: r=KErrGeneral; sl@0: sl@0: if(r==KErrNone) sl@0: { sl@0: // Exercise memory sync functions sl@0: Cache::SyncMemoryBeforeDmaRead(kernAddr, i2, mapAttr); sl@0: Cache::SyncMemoryBeforeDmaWrite(kernAddr, i2, mapAttr); sl@0: } sl@0: } sl@0: break; sl@0: sl@0: default: sl@0: r = KErrNotSupported; sl@0: break; sl@0: sl@0: } sl@0: chunk->Close(0); sl@0: } sl@0: else sl@0: r = KErrNotFound; sl@0: NKern::ThreadLeaveCS(); sl@0: return r; sl@0: } sl@0: sl@0: sl@0: case RSharedChunkLdd::EIsDestroyed: sl@0: { sl@0: NKern::ThreadEnterCS(); sl@0: TInt r = WaitForIdle2(); sl@0: NKern::ThreadLeaveCS(); sl@0: if (r==KErrNone) sl@0: return __e32_atomic_load_acq32(&ChunkDestroyedCount); sl@0: return 0; // never went idle so can't have been destroyed sl@0: } sl@0: sl@0: sl@0: case RSharedChunkLdd::ECloseChunk: sl@0: { sl@0: NKern::ThreadEnterCS(); sl@0: sl@0: // Claim ownership of the chunk sl@0: LockWait(); sl@0: DChunk* chunk=iChunk; sl@0: iChunk = 0; sl@0: LockSignal(); sl@0: sl@0: // Close the chunk sl@0: if(chunk) sl@0: r = Kern::ChunkClose(chunk); sl@0: else sl@0: r = KErrNotFound; sl@0: sl@0: NKern::ThreadLeaveCS(); sl@0: return r; sl@0: } sl@0: sl@0: sl@0: case RSharedChunkLdd::ECheckMemory: sl@0: case RSharedChunkLdd::EReadMemory: sl@0: case RSharedChunkLdd::EWriteMemory: sl@0: { sl@0: TUint32 value=0; sl@0: sl@0: NKern::ThreadEnterCS(); sl@0: TLinAddr kernAddr; sl@0: TInt maxSize; sl@0: DChunk* chunk=OpenChunk(&kernAddr,&maxSize); sl@0: if(chunk) sl@0: { sl@0: if((TUint)i1>=(TUint)maxSize) sl@0: r = KErrArgument; sl@0: else sl@0: { sl@0: TInt addr = kernAddr+i1; sl@0: #ifdef _DEBUG sl@0: TInt debugMask = Kern::CurrentThread().iDebugMask; sl@0: Kern::CurrentThread().iDebugMask = debugMask&~(1<Close(0); sl@0: } sl@0: else sl@0: r = KErrNotFound; sl@0: sl@0: NKern::ThreadLeaveCS(); sl@0: sl@0: if(aFunction==RSharedChunkLdd::EReadMemory) sl@0: kumemput32(a2,&value,sizeof(value)); sl@0: sl@0: return r; sl@0: } sl@0: sl@0: sl@0: case RSharedChunkLdd::EDfcReadWrite: sl@0: case RSharedChunkLdd::EIsrReadWrite: sl@0: { sl@0: TUint32 value=0; sl@0: kumemget32(&value,a2,sizeof(value)); sl@0: sl@0: NKern::ThreadEnterCS(); sl@0: TLinAddr kernAddr; sl@0: TInt maxSize; sl@0: DChunk* chunk=OpenChunk(&kernAddr,&maxSize); sl@0: if(chunk) sl@0: { sl@0: if((TUint)i1>=(TUint)maxSize) sl@0: r = KErrArgument; sl@0: else sl@0: { sl@0: TInt addr = kernAddr+i1; sl@0: if(aFunction==RSharedChunkLdd::EDfcReadWrite) sl@0: value = DfcReadWrite((TUint32*)addr,value); sl@0: else if(aFunction==RSharedChunkLdd::EIsrReadWrite) sl@0: value = IsrReadWrite((TUint32*)addr,value); sl@0: r = KErrNone; sl@0: } sl@0: chunk->Close(0); sl@0: } sl@0: else sl@0: r = KErrNotFound; sl@0: NKern::ThreadLeaveCS(); sl@0: sl@0: kumemput32(a2,&value,sizeof(value)); sl@0: return r; sl@0: } sl@0: sl@0: sl@0: case RSharedChunkLdd::ETestOpenAddress: sl@0: { sl@0: NKern::ThreadEnterCS(); sl@0: sl@0: TLinAddr kernAddr; sl@0: DChunk* chunk=OpenChunk(&kernAddr); sl@0: if(!chunk) sl@0: { sl@0: NKern::ThreadLeaveCS(); sl@0: return KErrNotReady; sl@0: } sl@0: sl@0: TInt offset; sl@0: DChunk* chunk2 = Kern::OpenSharedChunk(0,a1,EFalse,offset); sl@0: if(chunk2) sl@0: { sl@0: if(chunk2!=chunk) sl@0: r = KErrGeneral; sl@0: else sl@0: r = KErrNone; sl@0: chunk2->Close(0); sl@0: } sl@0: else sl@0: r = KErrNotFound; sl@0: sl@0: chunk->Close(0); sl@0: sl@0: NKern::ThreadLeaveCS(); sl@0: return r; sl@0: } sl@0: sl@0: case RSharedChunkLdd::ETestOpenHandle: sl@0: { sl@0: NKern::ThreadEnterCS(); sl@0: sl@0: TLinAddr kernAddr; sl@0: DChunk* chunk=OpenChunk(&kernAddr); sl@0: if(!chunk) sl@0: { sl@0: NKern::ThreadLeaveCS(); sl@0: return KErrNotReady; sl@0: } sl@0: sl@0: DChunk* chunk2 = Kern::OpenSharedChunk(0,i1,EFalse); sl@0: if(chunk2) sl@0: { sl@0: if(chunk2==chunk) sl@0: r = KErrNone; sl@0: else sl@0: r = KErrGeneral; sl@0: chunk2->Close(0); sl@0: } sl@0: else sl@0: r = KErrNotFound; sl@0: sl@0: chunk->Close(0); sl@0: sl@0: NKern::ThreadLeaveCS(); sl@0: return r; sl@0: } sl@0: sl@0: case RSharedChunkLdd::ETestAddress: sl@0: { sl@0: NKern::ThreadEnterCS(); sl@0: sl@0: TLinAddr kernAddr; sl@0: DChunk* chunk=OpenChunk(&kernAddr); sl@0: if(!chunk) sl@0: { sl@0: NKern::ThreadLeaveCS(); sl@0: return KErrNotReady; sl@0: } sl@0: sl@0: TLinAddr kernAddr2; sl@0: r = Kern::ChunkAddress(chunk,i1,i2,kernAddr2); sl@0: if(r==KErrNone) sl@0: if(kernAddr2!=kernAddr+i1) sl@0: r = KErrGeneral; sl@0: sl@0: chunk->Close(0); sl@0: sl@0: NKern::ThreadLeaveCS(); sl@0: return r; sl@0: } sl@0: sl@0: case RSharedChunkLdd::EChunkUserBase: sl@0: { sl@0: NKern::ThreadEnterCS(); sl@0: sl@0: DChunk* chunk=OpenChunk(); sl@0: if(!chunk) sl@0: { sl@0: NKern::ThreadLeaveCS(); sl@0: return KErrNotReady; sl@0: } sl@0: sl@0: TUint8* baseAddress = Kern::ChunkUserBase(chunk, &Kern::CurrentThread()); sl@0: sl@0: chunk->Close(0); sl@0: if(a1) sl@0: kumemput32(a1,(TAny*)&baseAddress,4); sl@0: sl@0: NKern::ThreadLeaveCS(); sl@0: return KErrNone; sl@0: } sl@0: sl@0: case RSharedChunkLdd::EChunkCloseAndFree: sl@0: { sl@0: #ifdef __EPOC32__ sl@0: // Allocate and then commit some physical ram to a chunk sl@0: NKern::ThreadEnterCS(); sl@0: const TUint KPhysPages = 5; sl@0: TUint pageSize = Kern::RoundToPageSize(1); sl@0: TUint physBytes = KPhysPages * pageSize; sl@0: TPhysAddr addrArray[KPhysPages]; sl@0: TLinAddr linAddr; sl@0: TUint32 mapAttr; sl@0: DChunk* chunk; sl@0: sl@0: TChunkCreateInfo chunkInfo; sl@0: chunkInfo.iType = TChunkCreateInfo::ESharedKernelSingle; sl@0: chunkInfo.iMaxSize = physBytes; sl@0: chunkInfo.iMapAttr = EMapAttrFullyBlocking; sl@0: chunkInfo.iOwnsMemory = EFalse; sl@0: sl@0: r = Kern::ChunkCreate(chunkInfo, chunk, linAddr, mapAttr); sl@0: if (r != KErrNone) sl@0: { sl@0: NKern::ThreadLeaveCS(); sl@0: return r; sl@0: } sl@0: r = Epoc::AllocPhysicalRam(KPhysPages, addrArray); sl@0: if (r != KErrNone) sl@0: { sl@0: Kern::ChunkClose(chunk); sl@0: NKern::ThreadLeaveCS(); sl@0: return r; sl@0: } sl@0: r = Kern::ChunkCommitPhysical(chunk, 0, physBytes, addrArray); sl@0: if (r != KErrNone) sl@0: { sl@0: Kern::ChunkClose(chunk); sl@0: r = Epoc::FreePhysicalRam(KPhysPages, addrArray); sl@0: NKern::ThreadLeaveCS(); sl@0: return r; sl@0: } sl@0: // Now attempt to free the physical ram immediately after the chunk sl@0: // has been closed. sl@0: Kern::ChunkClose(chunk); sl@0: r = Epoc::FreePhysicalRam(KPhysPages, addrArray); sl@0: NKern::ThreadLeaveCS(); sl@0: return r; sl@0: #endif sl@0: } sl@0: sl@0: default: sl@0: return KErrNotSupported; sl@0: } sl@0: } sl@0: