Update contrib.
1 // Copyright (c) 2004-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32test\mmu\d_sharedchunk.cpp
18 #include <kernel/kern_priv.h>
19 #include <kernel/cache.h>
20 #include "d_sharedchunk.h"
22 TBool PhysicalCommitSupported = ETrue;
25 #define TEST_PHYSICAL_COMMIT
28 static volatile TInt ChunkDestroyedCount=1; // Test counter
34 class DSharedChunkFactory : public DLogicalDevice
37 ~DSharedChunkFactory();
38 virtual TInt Install();
39 virtual void GetCaps(TDes8& aDes) const;
40 virtual TInt Create(DLogicalChannelBase*& aChannel);
43 TInt AllocMemory(TInt aSize, TUint32& aPhysAddr);
44 void FreeMemory(TInt aSize,TUint32 aPhysAddr);
57 class DSharedChunkChannel : public DLogicalChannelBase
60 DSharedChunkChannel();
61 ~DSharedChunkChannel();
62 virtual TInt DoCreate(TInt aUnit, const TDesC8* anInfo, const TVersion& aVer);
63 virtual TInt Request(TInt aFunction, TAny* a1, TAny* a2);
64 DChunk* OpenChunk(TLinAddr* aKernelAddr=0, TInt* aMaxSize=0);
65 inline void LockWait()
66 { iFactory->LockWait(); }
67 inline void LockSignal()
68 { iFactory->LockSignal(); }
69 TUint32 DfcReadWrite(TUint32* aPtr, TUint32 aValue);
70 TUint32 IsrReadWrite(TUint32* aPtr, TUint32 aValue);
72 DSharedChunkFactory* iFactory;
74 TLinAddr iKernelAddress;
78 class TChunkCleanup : public TDfc
81 TChunkCleanup(DSharedChunkFactory* aFactory,TBool aReleasePhysicalMemory);
83 static void ChunkDestroyed(TChunkCleanup* aSelf);
86 DSharedChunkFactory* iFactory;
87 TBool iReleasePhysicalMemory;
94 TChunkCleanup::TChunkCleanup(DSharedChunkFactory* aFactory,TBool aReleasePhysicalMemory)
95 : TDfc((TDfcFn)TChunkCleanup::ChunkDestroyed,this,Kern::SvMsgQue(),0)
96 , iFactory(0), iReleasePhysicalMemory(aReleasePhysicalMemory)
102 TChunkCleanup::~TChunkCleanup()
108 void TChunkCleanup::ChunkDestroyed(TChunkCleanup* aSelf)
110 __KTRACE_OPT(KMMU,Kern::Printf("D_SHAREDCHUNK ChunkDestroyed DFC\n"));
111 DSharedChunkFactory* factory = aSelf->iFactory;
115 if(aSelf->iReleasePhysicalMemory)
116 factory->ReleaseMemory();
117 factory->LockSignal();
118 __e32_atomic_add_ord32(&ChunkDestroyedCount, 1);
119 __KTRACE_OPT(KMMU,Kern::Printf("D_SHAREDCHUNK ChunkDestroyedCount=%d\n",ChunkDestroyedCount));
124 void TChunkCleanup::Cancel()
134 // DSharedChunkFactory
137 TInt DSharedChunkFactory::Install()
139 TUint mm=Kern::HalFunction(EHalGroupKernel,EKernelHalMemModelInfo,0,0)&EMemModelTypeMask;
140 PhysicalCommitSupported = mm!=EMemModelTypeDirect && mm!=EMemModelTypeEmul;
142 if(PhysicalCommitSupported)
144 TInt physSize = 4096*1024;
145 TInt r=Epoc::AllocPhysicalRam(physSize, iPhysBase);
148 iPhysNext = iPhysBase;
149 iPhysEnd = iPhysBase+physSize;
150 iMemoryInUse = EFalse;
153 // Make sure there is enough space on kernel heap to that heap doesn't need
154 // to expand when allocating objects. (Required for OOM and memory leak testing.)
155 TAny* expandHeap = Kern::Alloc(16*1024);
156 iDummyCell = new TInt;
157 Kern::Free(expandHeap);
159 return SetName(&KSharedChunkLddName);
162 DSharedChunkFactory::~DSharedChunkFactory()
165 if(PhysicalCommitSupported)
166 Epoc::FreePhysicalRam(iPhysBase, iPhysEnd-iPhysBase);
171 void DSharedChunkFactory::GetCaps(TDes8& /*aDes*/) const
173 // Not used but required as DLogicalDevice::GetCaps is pure virtual
176 TInt DSharedChunkFactory::Create(DLogicalChannelBase*& aChannel)
179 DSharedChunkChannel* channel=new DSharedChunkChannel;
182 channel->iFactory = this;
187 void DSharedChunkFactory::LockWait()
189 NKern::FMWait(&iLock);
192 void DSharedChunkFactory::LockSignal()
194 NKern::FMSignal(&iLock);
197 TInt DSharedChunkFactory::AllocMemory(TInt aSize, TUint32& aPhysAddr)
199 if(!PhysicalCommitSupported)
202 Kern::RoundToPageSize(aSize);
204 if(iPhysNext+aSize>iPhysEnd)
208 aPhysAddr = iPhysNext;
215 TInt DSharedChunkFactory::ClaimMemory()
217 if (__e32_atomic_swp_ord32(&iMemoryInUse, 1))
219 iPhysNext = iPhysBase; // reset allocation pointer
223 void DSharedChunkFactory::ReleaseMemory()
228 void DSharedChunkFactory::FreeMemory(TInt aSize,TUint32 aPhysAddr)
230 if(!PhysicalCommitSupported)
232 if(iPhysNext!=aPhysAddr+aSize)
233 { FAULT(); } // Only support freeing from the end
234 Kern::RoundToPageSize(aSize);
240 DECLARE_STANDARD_LDD()
242 return new DSharedChunkFactory;
246 // DSharedChunkChannel
249 TInt DSharedChunkChannel::DoCreate(TInt /*aUnit*/, const TDesC8* /*aInfo*/, const TVersion& /*aVer*/)
254 DSharedChunkChannel::DSharedChunkChannel()
258 DSharedChunkChannel::~DSharedChunkChannel()
265 void DoDfcReadWrite(TUint32* aArgs)
267 TUint32* ptr = (TUint32*)aArgs[0];
268 TUint32 value = aArgs[1];
271 NKern::FSSignal((NFastSemaphore*)aArgs[2]);
274 TUint32 DSharedChunkChannel::DfcReadWrite(TUint32* aPtr, TUint32 aValue)
277 NKern::FSSetOwner(&sem,0);
280 args[0] = (TUint32)aPtr;
282 args[2] = (TUint32)&sem;
284 TDfc dfc((TDfcFn)DoDfcReadWrite,&args,Kern::SvMsgQue(),0);
292 void DoIsrReadWrite(TUint32* aArgs)
294 TUint32* ptr = (TUint32*)aArgs[0];
295 TUint32 value = aArgs[1];
298 ((TDfc*)aArgs[2])->Add();
301 void DoIsrReadWriteDfcCallback(TUint32* aArgs)
303 NKern::FSSignal((NFastSemaphore*)aArgs);
306 TUint32 DSharedChunkChannel::IsrReadWrite(TUint32* aPtr, TUint32 aValue)
309 NKern::FSSetOwner(&sem,0);
311 TDfc dfc((TDfcFn)DoIsrReadWriteDfcCallback,&sem,Kern::SvMsgQue(),0);
314 args[0] = (TUint32)aPtr;
316 args[2] = (TUint32)&dfc;
318 NTimer timer((NTimerFn)DoIsrReadWrite,&args);
326 DChunk* DSharedChunkChannel::OpenChunk(TLinAddr* aKernelAddr,TInt* aMaxSize)
328 __ASSERT_CRITICAL // Thread must be in critical section (to avoid leaking access count on chunk)
330 DChunk* chunk=iChunk;
332 if(chunk->Open()!=KErrNone)
335 *aKernelAddr = chunk ? iKernelAddress : NULL;
337 *aMaxSize = chunk ? iMaxSize : 0;
343 TUint8 ReadByte(volatile TUint8* aPtr)
348 void signal_sem(TAny* aPtr)
350 NKern::FSSignal((NFastSemaphore*)aPtr);
356 TDfc idler(&signal_sem, &s, Kern::SvMsgQue(), 0); // supervisor thread, priority 0, so will run after destroyed DFC
357 NTimer timer(&signal_sem, &s);
359 timer.OneShot(NKern::TimerTicks(5000), ETrue); // runs in DFCThread1
360 NKern::FSWait(&s); // wait for either idle DFC or timer
361 TBool timeout = idler.Cancel(); // cancel idler, return TRUE if it hadn't run
362 TBool tmc = timer.Cancel(); // cancel timer, return TRUE if it hadn't expired
363 if (!timeout && !tmc)
364 NKern::FSWait(&s); // both the DFC and the timer went off - wait for the second one
373 TInt r = WaitForIdle(); // wait for chunk async delete
375 r = WaitForIdle(); // wait for chunk destroyed notification DFC
380 TInt DSharedChunkChannel::Request(TInt aFunction, TAny* a1, TAny* a2)
385 TInt r=KErrNotSupported;
390 case RSharedChunkLdd::ECreateChunk:
392 NKern::ThreadEnterCS();
393 if (__e32_atomic_load_acq32(&ChunkDestroyedCount)==0)
395 WaitForIdle2(); // Go idle for a while to let chunk cleanup DFCs to be called
398 // Create cleanup item
399 TBool chunkUsesPhysicalMemory = (i1&EOwnsMemory)==0;
401 TChunkCleanup* cleanup = new TChunkCleanup(this->iFactory,chunkUsesPhysicalMemory);
404 NKern::ThreadLeaveCS();
408 // Try and create chunk...
410 TChunkCreateInfo info;
412 info.iType = (i1&EMultiple)
413 ? TChunkCreateInfo::ESharedKernelMultiple
414 : TChunkCreateInfo::ESharedKernelSingle;
416 info.iMaxSize = i1&~ECreateFlagsMask;
418 info.iMapAttr = (i1&ECached) ? EMapAttrCachedMax
419 : (i1&EBuffered) ? EMapAttrBufferedC
420 : EMapAttrFullyBlocking;
422 info.iOwnsMemory = (i1&EOwnsMemory)!=0;
424 info.iDestroyedDfc = cleanup;
426 if(i1&EBadType) *(TUint8*)&info.iType = 0xff;
430 r = Kern::ChunkCreate(info, chunk, kernAddr, mapAttr);
434 NKern::ThreadLeaveCS();
438 // Setup data members
441 r = KErrAlreadyExists;
444 if(chunkUsesPhysicalMemory)
445 r = iFactory->ClaimMemory();
449 iKernelAddress = kernAddr;
450 iMaxSize = info.iMaxSize;
451 __e32_atomic_store_ord32(&ChunkDestroyedCount,0);
458 // There was an error, so discard created chunk
460 Kern::ChunkClose(chunk);
461 NKern::ThreadLeaveCS();
465 NKern::ThreadLeaveCS();
467 // Write back kernel address of chunk
469 kumemput32(a2,(TAny*)&kernAddr,4);
475 case RSharedChunkLdd::EGetChunkHandle:
477 TInt isThreadLocal = (TInt)a1;
478 TOwnerType ownertype;
480 ownertype = EOwnerThread;
482 ownertype = EOwnerProcess;
484 NKern::ThreadEnterCS();
485 DChunk* chunk=OpenChunk();
488 r = Kern::MakeHandleAndOpen(0,chunk,ownertype);
493 NKern::ThreadLeaveCS();
498 case RSharedChunkLdd::ECloseChunkHandle:
500 NKern::ThreadEnterCS();
501 r = Kern::CloseHandle(0,i1);
502 NKern::ThreadLeaveCS();
507 case RSharedChunkLdd::ECommitMemory:
509 NKern::ThreadEnterCS();
510 TUint32 chunkKernelAddress;
511 DChunk* chunk=OpenChunk(&chunkKernelAddress);
514 TInt type = i1&ECommitTypeMask;
515 i1 &= ~ECommitTypeMask;
519 r = Kern::ChunkCommit(chunk,i1,i2);
524 TUint32 physAddr=~0u;
525 r = Kern::ChunkCommitContiguous(chunk,i1,i2,physAddr);
526 if(r!=KErrNone || i2==0)
529 { r=KErrGeneral; break; }
531 // Check that ChunkPhysicalAddress returns addresses consistant with the commit
535 r = Kern::ChunkPhysicalAddress(chunk, i1, i2, kernAddr, mapAttr, physAddr2);
537 if(kernAddr!=chunkKernelAddress+i1 || physAddr2!=physAddr)
542 // Exercise memory sync functions
543 Cache::SyncMemoryBeforeDmaRead(kernAddr, i2, mapAttr);
544 Cache::SyncMemoryBeforeDmaWrite(kernAddr, i2, mapAttr);
549 case EDiscontiguousPhysical|EBadPhysicalAddress:
550 case EDiscontiguousPhysical:
553 r = iFactory->AllocMemory(i2,physAddr);
557 TInt pageSize = Kern::RoundToPageSize(1);
558 TInt numPages = Kern::RoundToPageSize(i2)/pageSize;
559 TUint32* physAddrList = new TUint32[numPages];
561 for(i=0; i<numPages; i++)
562 physAddrList[i] = physAddr+i*pageSize;
563 if(type&EBadPhysicalAddress)
564 physAddrList[i-1] |= 1;
565 r = Kern::ChunkCommitPhysical(chunk,i1,i2,physAddrList);
566 delete[] physAddrList;
567 if(r!=KErrNone || i2==0)
569 iFactory->FreeMemory(i2,physAddr);
573 // Check that ChunkPhysicalAddress returns the same addresses we used in the commit
577 TUint32* physAddrList2 = new TUint32[numPages];
578 r = Kern::ChunkPhysicalAddress(chunk, i1, i2, kernAddr, mapAttr, physAddr2, physAddrList2);
581 if(kernAddr!=chunkKernelAddress+i1 || physAddr2!=physAddr)
584 for(i=0; i<numPages; i++)
585 if(physAddrList2[i] != physAddr+i*pageSize)
588 delete[] physAddrList2;
592 // Exercise memory sync functions
593 Cache::SyncMemoryBeforeDmaRead(kernAddr, i2, mapAttr);
594 Cache::SyncMemoryBeforeDmaWrite(kernAddr, i2, mapAttr);
599 case EContiguousPhysical|EBadPhysicalAddress:
600 case EContiguousPhysical:
603 r = iFactory->AllocMemory(i2,physAddr);
606 if(type&EBadPhysicalAddress)
607 r = Kern::ChunkCommitPhysical(chunk,i1,i2,physAddr|1);
609 r = Kern::ChunkCommitPhysical(chunk,i1,i2,physAddr);
611 if(r!=KErrNone || i2==0)
613 iFactory->FreeMemory(i2,physAddr);
617 // Check that ChunkPhysicalAddress returns the same addresses we used in the commit
621 r = Kern::ChunkPhysicalAddress(chunk, i1, i2, kernAddr, mapAttr, physAddr2);
623 if(kernAddr!=chunkKernelAddress+i1 || physAddr2!=physAddr)
628 // Exercise memory sync functions
629 Cache::SyncMemoryBeforeDmaRead(kernAddr, i2, mapAttr);
630 Cache::SyncMemoryBeforeDmaWrite(kernAddr, i2, mapAttr);
636 r = KErrNotSupported;
644 NKern::ThreadLeaveCS();
649 case RSharedChunkLdd::EIsDestroyed:
651 NKern::ThreadEnterCS();
652 TInt r = WaitForIdle2();
653 NKern::ThreadLeaveCS();
655 return __e32_atomic_load_acq32(&ChunkDestroyedCount);
656 return 0; // never went idle so can't have been destroyed
660 case RSharedChunkLdd::ECloseChunk:
662 NKern::ThreadEnterCS();
664 // Claim ownership of the chunk
666 DChunk* chunk=iChunk;
672 r = Kern::ChunkClose(chunk);
676 NKern::ThreadLeaveCS();
681 case RSharedChunkLdd::ECheckMemory:
682 case RSharedChunkLdd::EReadMemory:
683 case RSharedChunkLdd::EWriteMemory:
687 NKern::ThreadEnterCS();
690 DChunk* chunk=OpenChunk(&kernAddr,&maxSize);
693 if((TUint)i1>=(TUint)maxSize)
697 TInt addr = kernAddr+i1;
699 TInt debugMask = Kern::CurrentThread().iDebugMask;
700 Kern::CurrentThread().iDebugMask = debugMask&~(1<<KPANIC);
703 if(aFunction==RSharedChunkLdd::ECheckMemory)
704 ReadByte((volatile TUint8*)addr);
705 else if(aFunction==RSharedChunkLdd::EReadMemory)
706 value = *(volatile TUint32*)addr;
707 else if(aFunction==RSharedChunkLdd::EWriteMemory)
708 *(volatile TUint32*)addr = i2;
711 Kern::CurrentThread().iDebugMask = debugMask;
713 if(aFunction==RSharedChunkLdd::ECheckMemory)
721 NKern::ThreadLeaveCS();
723 if(aFunction==RSharedChunkLdd::EReadMemory)
724 kumemput32(a2,&value,sizeof(value));
730 case RSharedChunkLdd::EDfcReadWrite:
731 case RSharedChunkLdd::EIsrReadWrite:
734 kumemget32(&value,a2,sizeof(value));
736 NKern::ThreadEnterCS();
739 DChunk* chunk=OpenChunk(&kernAddr,&maxSize);
742 if((TUint)i1>=(TUint)maxSize)
746 TInt addr = kernAddr+i1;
747 if(aFunction==RSharedChunkLdd::EDfcReadWrite)
748 value = DfcReadWrite((TUint32*)addr,value);
749 else if(aFunction==RSharedChunkLdd::EIsrReadWrite)
750 value = IsrReadWrite((TUint32*)addr,value);
757 NKern::ThreadLeaveCS();
759 kumemput32(a2,&value,sizeof(value));
764 case RSharedChunkLdd::ETestOpenAddress:
766 NKern::ThreadEnterCS();
769 DChunk* chunk=OpenChunk(&kernAddr);
772 NKern::ThreadLeaveCS();
777 DChunk* chunk2 = Kern::OpenSharedChunk(0,a1,EFalse,offset);
791 NKern::ThreadLeaveCS();
795 case RSharedChunkLdd::ETestOpenHandle:
797 NKern::ThreadEnterCS();
800 DChunk* chunk=OpenChunk(&kernAddr);
803 NKern::ThreadLeaveCS();
807 DChunk* chunk2 = Kern::OpenSharedChunk(0,i1,EFalse);
821 NKern::ThreadLeaveCS();
825 case RSharedChunkLdd::ETestAddress:
827 NKern::ThreadEnterCS();
830 DChunk* chunk=OpenChunk(&kernAddr);
833 NKern::ThreadLeaveCS();
838 r = Kern::ChunkAddress(chunk,i1,i2,kernAddr2);
840 if(kernAddr2!=kernAddr+i1)
845 NKern::ThreadLeaveCS();
849 case RSharedChunkLdd::EChunkUserBase:
851 NKern::ThreadEnterCS();
853 DChunk* chunk=OpenChunk();
856 NKern::ThreadLeaveCS();
860 TUint8* baseAddress = Kern::ChunkUserBase(chunk, &Kern::CurrentThread());
864 kumemput32(a1,(TAny*)&baseAddress,4);
866 NKern::ThreadLeaveCS();
870 case RSharedChunkLdd::EChunkCloseAndFree:
873 // Allocate and then commit some physical ram to a chunk
874 NKern::ThreadEnterCS();
875 const TUint KPhysPages = 5;
876 TUint pageSize = Kern::RoundToPageSize(1);
877 TUint physBytes = KPhysPages * pageSize;
878 TPhysAddr addrArray[KPhysPages];
883 TChunkCreateInfo chunkInfo;
884 chunkInfo.iType = TChunkCreateInfo::ESharedKernelSingle;
885 chunkInfo.iMaxSize = physBytes;
886 chunkInfo.iMapAttr = EMapAttrFullyBlocking;
887 chunkInfo.iOwnsMemory = EFalse;
889 r = Kern::ChunkCreate(chunkInfo, chunk, linAddr, mapAttr);
892 NKern::ThreadLeaveCS();
895 r = Epoc::AllocPhysicalRam(KPhysPages, addrArray);
898 Kern::ChunkClose(chunk);
899 NKern::ThreadLeaveCS();
902 r = Kern::ChunkCommitPhysical(chunk, 0, physBytes, addrArray);
905 Kern::ChunkClose(chunk);
906 r = Epoc::FreePhysicalRam(KPhysPages, addrArray);
907 NKern::ThreadLeaveCS();
910 // Now attempt to free the physical ram immediately after the chunk
912 Kern::ChunkClose(chunk);
913 r = Epoc::FreePhysicalRam(KPhysPages, addrArray);
914 NKern::ThreadLeaveCS();
920 return KErrNotSupported;