1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/os/kernelhwsrv/kerneltest/e32test/mmu/d_sharedchunk.cpp Fri Jun 15 03:10:57 2012 +0200
1.3 @@ -0,0 +1,923 @@
1.4 +// Copyright (c) 2004-2009 Nokia Corporation and/or its subsidiary(-ies).
1.5 +// All rights reserved.
1.6 +// This component and the accompanying materials are made available
1.7 +// under the terms of the License "Eclipse Public License v1.0"
1.8 +// which accompanies this distribution, and is available
1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
1.10 +//
1.11 +// Initial Contributors:
1.12 +// Nokia Corporation - initial contribution.
1.13 +//
1.14 +// Contributors:
1.15 +//
1.16 +// Description:
1.17 +// e32test\mmu\d_sharedchunk.cpp
1.18 +//
1.19 +//
1.20 +
1.21 +#include <kernel/kern_priv.h>
1.22 +#include <kernel/cache.h>
1.23 +#include "d_sharedchunk.h"
1.24 +
1.25 +TBool PhysicalCommitSupported = ETrue;
1.26 +
1.27 +#ifdef __EPOC32__
1.28 +#define TEST_PHYSICAL_COMMIT
1.29 +#endif
1.30 +
1.31 +static volatile TInt ChunkDestroyedCount=1; // Test counter
1.32 +
1.33 +//
1.34 +// Class definitions
1.35 +//
1.36 +
1.37 +class DSharedChunkFactory : public DLogicalDevice
1.38 + {
1.39 +public:
1.40 + ~DSharedChunkFactory();
1.41 + virtual TInt Install();
1.42 + virtual void GetCaps(TDes8& aDes) const;
1.43 + virtual TInt Create(DLogicalChannelBase*& aChannel);
1.44 + TInt ClaimMemory();
1.45 + void ReleaseMemory();
1.46 + TInt AllocMemory(TInt aSize, TUint32& aPhysAddr);
1.47 + void FreeMemory(TInt aSize,TUint32 aPhysAddr);
1.48 + void LockWait();
1.49 + void LockSignal();
1.50 +private:
1.51 + NFastMutex iLock;
1.52 +public:
1.53 + TBool iMemoryInUse;
1.54 + TUint32 iPhysBase;
1.55 + TUint32 iPhysEnd;
1.56 + TUint32 iPhysNext;
1.57 + TInt* iDummyCell;
1.58 + };
1.59 +
1.60 +class DSharedChunkChannel : public DLogicalChannelBase
1.61 + {
1.62 +public:
1.63 + DSharedChunkChannel();
1.64 + ~DSharedChunkChannel();
1.65 + virtual TInt DoCreate(TInt aUnit, const TDesC8* anInfo, const TVersion& aVer);
1.66 + virtual TInt Request(TInt aFunction, TAny* a1, TAny* a2);
1.67 + DChunk* OpenChunk(TLinAddr* aKernelAddr=0, TInt* aMaxSize=0);
1.68 + inline void LockWait()
1.69 + { iFactory->LockWait(); }
1.70 + inline void LockSignal()
1.71 + { iFactory->LockSignal(); }
1.72 + TUint32 DfcReadWrite(TUint32* aPtr, TUint32 aValue);
1.73 + TUint32 IsrReadWrite(TUint32* aPtr, TUint32 aValue);
1.74 +public:
1.75 + DSharedChunkFactory* iFactory;
1.76 + DChunk* iChunk;
1.77 + TLinAddr iKernelAddress;
1.78 + TInt iMaxSize;
1.79 + };
1.80 +
1.81 +class TChunkCleanup : public TDfc
1.82 + {
1.83 +public:
1.84 + TChunkCleanup(DSharedChunkFactory* aFactory,TBool aReleasePhysicalMemory);
1.85 + ~TChunkCleanup();
1.86 + static void ChunkDestroyed(TChunkCleanup* aSelf);
1.87 + void Cancel();
1.88 +public:
1.89 + DSharedChunkFactory* iFactory;
1.90 + TBool iReleasePhysicalMemory;
1.91 + };
1.92 +
1.93 +//
1.94 +// TChunkCleanup
1.95 +//
1.96 +
1.97 +TChunkCleanup::TChunkCleanup(DSharedChunkFactory* aFactory,TBool aReleasePhysicalMemory)
1.98 + : TDfc((TDfcFn)TChunkCleanup::ChunkDestroyed,this,Kern::SvMsgQue(),0)
1.99 + , iFactory(0), iReleasePhysicalMemory(aReleasePhysicalMemory)
1.100 + {
1.101 + aFactory->Open();
1.102 + iFactory = aFactory;
1.103 + }
1.104 +
1.105 +TChunkCleanup::~TChunkCleanup()
1.106 + {
1.107 + if(iFactory)
1.108 + iFactory->Close(0);
1.109 + }
1.110 +
1.111 +void TChunkCleanup::ChunkDestroyed(TChunkCleanup* aSelf)
1.112 + {
1.113 + __KTRACE_OPT(KMMU,Kern::Printf("D_SHAREDCHUNK ChunkDestroyed DFC\n"));
1.114 + DSharedChunkFactory* factory = aSelf->iFactory;
1.115 + if(factory)
1.116 + {
1.117 + factory->LockWait();
1.118 + if(aSelf->iReleasePhysicalMemory)
1.119 + factory->ReleaseMemory();
1.120 + factory->LockSignal();
1.121 + __e32_atomic_add_ord32(&ChunkDestroyedCount, 1);
1.122 + __KTRACE_OPT(KMMU,Kern::Printf("D_SHAREDCHUNK ChunkDestroyedCount=%d\n",ChunkDestroyedCount));
1.123 + }
1.124 + delete aSelf;
1.125 + }
1.126 +
1.127 +void TChunkCleanup::Cancel()
1.128 + {
1.129 + if(iFactory)
1.130 + {
1.131 + iFactory->Close(0);
1.132 + iFactory = 0;
1.133 + }
1.134 + };
1.135 +
1.136 +//
1.137 +// DSharedChunkFactory
1.138 +//
1.139 +
1.140 +TInt DSharedChunkFactory::Install()
1.141 + {
1.142 + TUint mm=Kern::HalFunction(EHalGroupKernel,EKernelHalMemModelInfo,0,0)&EMemModelTypeMask;
1.143 + PhysicalCommitSupported = mm!=EMemModelTypeDirect && mm!=EMemModelTypeEmul;
1.144 +#ifdef __EPOC32__
1.145 + if(PhysicalCommitSupported)
1.146 + {
1.147 + TInt physSize = 4096*1024;
1.148 + TInt r=Epoc::AllocPhysicalRam(physSize, iPhysBase);
1.149 + if(r!=KErrNone)
1.150 + return r;
1.151 + iPhysNext = iPhysBase;
1.152 + iPhysEnd = iPhysBase+physSize;
1.153 + iMemoryInUse = EFalse;
1.154 + }
1.155 +#endif
1.156 + // Make sure there is enough space on kernel heap to that heap doesn't need
1.157 + // to expand when allocating objects. (Required for OOM and memory leak testing.)
1.158 + TAny* expandHeap = Kern::Alloc(16*1024);
1.159 + iDummyCell = new TInt;
1.160 + Kern::Free(expandHeap);
1.161 +
1.162 + return SetName(&KSharedChunkLddName);
1.163 + }
1.164 +
1.165 +DSharedChunkFactory::~DSharedChunkFactory()
1.166 + {
1.167 +#ifdef __EPOC32__
1.168 + if(PhysicalCommitSupported)
1.169 + Epoc::FreePhysicalRam(iPhysBase, iPhysEnd-iPhysBase);
1.170 +#endif
1.171 + delete iDummyCell;
1.172 + }
1.173 +
1.174 +void DSharedChunkFactory::GetCaps(TDes8& /*aDes*/) const
1.175 + {
1.176 + // Not used but required as DLogicalDevice::GetCaps is pure virtual
1.177 + }
1.178 +
1.179 +TInt DSharedChunkFactory::Create(DLogicalChannelBase*& aChannel)
1.180 + {
1.181 + aChannel = NULL;
1.182 + DSharedChunkChannel* channel=new DSharedChunkChannel;
1.183 + if(!channel)
1.184 + return KErrNoMemory;
1.185 + channel->iFactory = this;
1.186 + aChannel = channel;
1.187 + return KErrNone;
1.188 + }
1.189 +
1.190 +void DSharedChunkFactory::LockWait()
1.191 + {
1.192 + NKern::FMWait(&iLock);
1.193 + }
1.194 +
1.195 +void DSharedChunkFactory::LockSignal()
1.196 + {
1.197 + NKern::FMSignal(&iLock);
1.198 + }
1.199 +
1.200 +TInt DSharedChunkFactory::AllocMemory(TInt aSize, TUint32& aPhysAddr)
1.201 + {
1.202 + if(!PhysicalCommitSupported)
1.203 + aSize = 0;
1.204 + TInt r=KErrNone;
1.205 + Kern::RoundToPageSize(aSize);
1.206 + LockWait();
1.207 + if(iPhysNext+aSize>iPhysEnd)
1.208 + r = KErrNoMemory;
1.209 + else
1.210 + {
1.211 + aPhysAddr = iPhysNext;
1.212 + iPhysNext += aSize;
1.213 + }
1.214 + LockSignal();
1.215 + return r;
1.216 + }
1.217 +
1.218 +TInt DSharedChunkFactory::ClaimMemory()
1.219 + {
1.220 + if (__e32_atomic_swp_ord32(&iMemoryInUse, 1))
1.221 + return KErrInUse;
1.222 + iPhysNext = iPhysBase; // reset allocation pointer
1.223 + return KErrNone;
1.224 + }
1.225 +
1.226 +void DSharedChunkFactory::ReleaseMemory()
1.227 + {
1.228 + iMemoryInUse=EFalse;
1.229 + }
1.230 +
1.231 +void DSharedChunkFactory::FreeMemory(TInt aSize,TUint32 aPhysAddr)
1.232 + {
1.233 + if(!PhysicalCommitSupported)
1.234 + aSize = 0;
1.235 + if(iPhysNext!=aPhysAddr+aSize)
1.236 + { FAULT(); } // Only support freeing from the end
1.237 + Kern::RoundToPageSize(aSize);
1.238 + LockWait();
1.239 + iPhysNext -= aSize;
1.240 + LockSignal();
1.241 + }
1.242 +
1.243 +DECLARE_STANDARD_LDD()
1.244 + {
1.245 + return new DSharedChunkFactory;
1.246 + }
1.247 +
1.248 +//
1.249 +// DSharedChunkChannel
1.250 +//
1.251 +
1.252 +TInt DSharedChunkChannel::DoCreate(TInt /*aUnit*/, const TDesC8* /*aInfo*/, const TVersion& /*aVer*/)
1.253 + {
1.254 + return KErrNone;
1.255 + }
1.256 +
1.257 +DSharedChunkChannel::DSharedChunkChannel()
1.258 + {
1.259 + }
1.260 +
1.261 +DSharedChunkChannel::~DSharedChunkChannel()
1.262 + {
1.263 + if(iChunk)
1.264 + iChunk->Close(0);
1.265 + }
1.266 +
1.267 +
1.268 +void DoDfcReadWrite(TUint32* aArgs)
1.269 + {
1.270 + TUint32* ptr = (TUint32*)aArgs[0];
1.271 + TUint32 value = aArgs[1];
1.272 + aArgs[1] = *ptr;
1.273 + *ptr = value;
1.274 + NKern::FSSignal((NFastSemaphore*)aArgs[2]);
1.275 + }
1.276 +
1.277 +TUint32 DSharedChunkChannel::DfcReadWrite(TUint32* aPtr, TUint32 aValue)
1.278 + {
1.279 + NFastSemaphore sem;
1.280 + NKern::FSSetOwner(&sem,0);
1.281 +
1.282 + TUint32 args[3];
1.283 + args[0] = (TUint32)aPtr;
1.284 + args[1] = aValue;
1.285 + args[2] = (TUint32)&sem;
1.286 +
1.287 + TDfc dfc((TDfcFn)DoDfcReadWrite,&args,Kern::SvMsgQue(),0);
1.288 + dfc.Enque();
1.289 + NKern::FSWait(&sem);
1.290 +
1.291 + return args[1];
1.292 + }
1.293 +
1.294 +
1.295 +void DoIsrReadWrite(TUint32* aArgs)
1.296 + {
1.297 + TUint32* ptr = (TUint32*)aArgs[0];
1.298 + TUint32 value = aArgs[1];
1.299 + aArgs[1] = *ptr;
1.300 + *ptr = value;
1.301 + ((TDfc*)aArgs[2])->Add();
1.302 + }
1.303 +
1.304 +void DoIsrReadWriteDfcCallback(TUint32* aArgs)
1.305 + {
1.306 + NKern::FSSignal((NFastSemaphore*)aArgs);
1.307 + }
1.308 +
1.309 +TUint32 DSharedChunkChannel::IsrReadWrite(TUint32* aPtr, TUint32 aValue)
1.310 + {
1.311 + NFastSemaphore sem;
1.312 + NKern::FSSetOwner(&sem,0);
1.313 +
1.314 + TDfc dfc((TDfcFn)DoIsrReadWriteDfcCallback,&sem,Kern::SvMsgQue(),0);
1.315 +
1.316 + TUint32 args[3];
1.317 + args[0] = (TUint32)aPtr;
1.318 + args[1] = aValue;
1.319 + args[2] = (TUint32)&dfc;
1.320 +
1.321 + NTimer timer((NTimerFn)DoIsrReadWrite,&args);
1.322 + timer.OneShot(1);
1.323 +
1.324 + NKern::FSWait(&sem);
1.325 + return args[1];
1.326 + }
1.327 +
1.328 +
1.329 +DChunk* DSharedChunkChannel::OpenChunk(TLinAddr* aKernelAddr,TInt* aMaxSize)
1.330 + {
1.331 + __ASSERT_CRITICAL // Thread must be in critical section (to avoid leaking access count on chunk)
1.332 + LockWait();
1.333 + DChunk* chunk=iChunk;
1.334 + if(chunk)
1.335 + if(chunk->Open()!=KErrNone)
1.336 + chunk = NULL;
1.337 + if(aKernelAddr)
1.338 + *aKernelAddr = chunk ? iKernelAddress : NULL;
1.339 + if(aMaxSize)
1.340 + *aMaxSize = chunk ? iMaxSize : 0;
1.341 + LockSignal();
1.342 + return chunk;
1.343 + }
1.344 +
1.345 +
1.346 +TUint8 ReadByte(volatile TUint8* aPtr)
1.347 + {
1.348 + return *aPtr;
1.349 + }
1.350 +
1.351 +void signal_sem(TAny* aPtr)
1.352 + {
1.353 + NKern::FSSignal((NFastSemaphore*)aPtr);
1.354 + }
1.355 +
1.356 +TInt WaitForIdle()
1.357 + {
1.358 + NFastSemaphore s(0);
1.359 + TDfc idler(&signal_sem, &s, Kern::SvMsgQue(), 0); // supervisor thread, priority 0, so will run after destroyed DFC
1.360 + NTimer timer(&signal_sem, &s);
1.361 + idler.QueueOnIdle();
1.362 + timer.OneShot(NKern::TimerTicks(5000), ETrue); // runs in DFCThread1
1.363 + NKern::FSWait(&s); // wait for either idle DFC or timer
1.364 + TBool timeout = idler.Cancel(); // cancel idler, return TRUE if it hadn't run
1.365 + TBool tmc = timer.Cancel(); // cancel timer, return TRUE if it hadn't expired
1.366 + if (!timeout && !tmc)
1.367 + NKern::FSWait(&s); // both the DFC and the timer went off - wait for the second one
1.368 + if (timeout)
1.369 + return KErrTimedOut;
1.370 + return KErrNone;
1.371 + }
1.372 +
1.373 +
1.374 +TInt WaitForIdle2()
1.375 + {
1.376 + TInt r = WaitForIdle(); // wait for chunk async delete
1.377 + if(r==KErrNone)
1.378 + r = WaitForIdle(); // wait for chunk destroyed notification DFC
1.379 + return r;
1.380 + }
1.381 +
1.382 +
1.383 +TInt DSharedChunkChannel::Request(TInt aFunction, TAny* a1, TAny* a2)
1.384 + {
1.385 + TInt i1 = (TInt)a1;
1.386 + TInt i2 = (TInt)a2;
1.387 +
1.388 + TInt r=KErrNotSupported;
1.389 +
1.390 + switch(aFunction)
1.391 + {
1.392 +
1.393 + case RSharedChunkLdd::ECreateChunk:
1.394 + {
1.395 + NKern::ThreadEnterCS();
1.396 + if (__e32_atomic_load_acq32(&ChunkDestroyedCount)==0)
1.397 + {
1.398 + WaitForIdle2(); // Go idle for a while to let chunk cleanup DFCs to be called
1.399 + }
1.400 +
1.401 + // Create cleanup item
1.402 + TBool chunkUsesPhysicalMemory = (i1&EOwnsMemory)==0;
1.403 +
1.404 + TChunkCleanup* cleanup = new TChunkCleanup(this->iFactory,chunkUsesPhysicalMemory);
1.405 + if(!cleanup)
1.406 + {
1.407 + NKern::ThreadLeaveCS();
1.408 + return KErrNoMemory;
1.409 + }
1.410 +
1.411 + // Try and create chunk...
1.412 + DChunk* chunk;
1.413 + TChunkCreateInfo info;
1.414 +
1.415 + info.iType = (i1&EMultiple)
1.416 + ? TChunkCreateInfo::ESharedKernelMultiple
1.417 + : TChunkCreateInfo::ESharedKernelSingle;
1.418 +
1.419 + info.iMaxSize = i1&~ECreateFlagsMask;
1.420 +#ifdef __EPOC32__
1.421 + info.iMapAttr = (i1&ECached) ? EMapAttrCachedMax
1.422 + : (i1&EBuffered) ? EMapAttrBufferedC
1.423 + : EMapAttrFullyBlocking;
1.424 +#endif
1.425 + info.iOwnsMemory = (i1&EOwnsMemory)!=0;
1.426 +
1.427 + info.iDestroyedDfc = cleanup;
1.428 +
1.429 + if(i1&EBadType) *(TUint8*)&info.iType = 0xff;
1.430 +
1.431 + TUint32 mapAttr;
1.432 + TUint32 kernAddr;
1.433 + r = Kern::ChunkCreate(info, chunk, kernAddr, mapAttr);
1.434 + if(r!=KErrNone)
1.435 + {
1.436 + delete cleanup;
1.437 + NKern::ThreadLeaveCS();
1.438 + return r;
1.439 + }
1.440 +
1.441 + // Setup data members
1.442 + LockWait();
1.443 + if(iChunk)
1.444 + r = KErrAlreadyExists;
1.445 + else
1.446 + {
1.447 + if(chunkUsesPhysicalMemory)
1.448 + r = iFactory->ClaimMemory();
1.449 + if(r==KErrNone)
1.450 + {
1.451 + iChunk = chunk;
1.452 + iKernelAddress = kernAddr;
1.453 + iMaxSize = info.iMaxSize;
1.454 + __e32_atomic_store_ord32(&ChunkDestroyedCount,0);
1.455 + }
1.456 + }
1.457 + LockSignal();
1.458 +
1.459 + if(r!=KErrNone)
1.460 + {
1.461 + // There was an error, so discard created chunk
1.462 + cleanup->Cancel();
1.463 + Kern::ChunkClose(chunk);
1.464 + NKern::ThreadLeaveCS();
1.465 + return r;
1.466 + }
1.467 +
1.468 + NKern::ThreadLeaveCS();
1.469 +
1.470 + // Write back kernel address of chunk
1.471 + if(a2)
1.472 + kumemput32(a2,(TAny*)&kernAddr,4);
1.473 +
1.474 + return KErrNone;
1.475 + }
1.476 +
1.477 +
1.478 + case RSharedChunkLdd::EGetChunkHandle:
1.479 + {
1.480 + TInt isThreadLocal = (TInt)a1;
1.481 + TOwnerType ownertype;
1.482 + if (isThreadLocal)
1.483 + ownertype = EOwnerThread;
1.484 + else
1.485 + ownertype = EOwnerProcess;
1.486 +
1.487 + NKern::ThreadEnterCS();
1.488 + DChunk* chunk=OpenChunk();
1.489 + if(chunk)
1.490 + {
1.491 + r = Kern::MakeHandleAndOpen(0,chunk,ownertype);
1.492 + chunk->Close(0);
1.493 + }
1.494 + else
1.495 + r = KErrNotFound;
1.496 + NKern::ThreadLeaveCS();
1.497 + return r;
1.498 + }
1.499 +
1.500 +
1.501 + case RSharedChunkLdd::ECloseChunkHandle:
1.502 + {
1.503 + NKern::ThreadEnterCS();
1.504 + r = Kern::CloseHandle(0,i1);
1.505 + NKern::ThreadLeaveCS();
1.506 + return r;
1.507 + }
1.508 +
1.509 +
1.510 + case RSharedChunkLdd::ECommitMemory:
1.511 + {
1.512 + NKern::ThreadEnterCS();
1.513 + TUint32 chunkKernelAddress;
1.514 + DChunk* chunk=OpenChunk(&chunkKernelAddress);
1.515 + if(chunk)
1.516 + {
1.517 + TInt type = i1&ECommitTypeMask;
1.518 + i1 &= ~ECommitTypeMask;
1.519 + switch(type)
1.520 + {
1.521 + case EDiscontiguous:
1.522 + r = Kern::ChunkCommit(chunk,i1,i2);
1.523 + break;
1.524 +
1.525 + case EContiguous:
1.526 + {
1.527 + TUint32 physAddr=~0u;
1.528 + r = Kern::ChunkCommitContiguous(chunk,i1,i2,physAddr);
1.529 + if(r!=KErrNone || i2==0)
1.530 + break;
1.531 + if(physAddr==~0u)
1.532 + { r=KErrGeneral; break; }
1.533 +
1.534 + // Check that ChunkPhysicalAddress returns addresses consistant with the commit
1.535 + TUint32 kernAddr;
1.536 + TUint32 mapAttr;
1.537 + TUint32 physAddr2;
1.538 + r = Kern::ChunkPhysicalAddress(chunk, i1, i2, kernAddr, mapAttr, physAddr2);
1.539 + if(r==KErrNone)
1.540 + if(kernAddr!=chunkKernelAddress+i1 || physAddr2!=physAddr)
1.541 + r=KErrGeneral;
1.542 +
1.543 + if(r==KErrNone)
1.544 + {
1.545 + // Exercise memory sync functions
1.546 + Cache::SyncMemoryBeforeDmaRead(kernAddr, i2, mapAttr);
1.547 + Cache::SyncMemoryBeforeDmaWrite(kernAddr, i2, mapAttr);
1.548 + }
1.549 + }
1.550 + break;
1.551 +
1.552 + case EDiscontiguousPhysical|EBadPhysicalAddress:
1.553 + case EDiscontiguousPhysical:
1.554 + {
1.555 + TUint32 physAddr;
1.556 + r = iFactory->AllocMemory(i2,physAddr);
1.557 + if(r!=KErrNone)
1.558 + break;
1.559 +
1.560 + TInt pageSize = Kern::RoundToPageSize(1);
1.561 + TInt numPages = Kern::RoundToPageSize(i2)/pageSize;
1.562 + TUint32* physAddrList = new TUint32[numPages];
1.563 + TInt i;
1.564 + for(i=0; i<numPages; i++)
1.565 + physAddrList[i] = physAddr+i*pageSize;
1.566 + if(type&EBadPhysicalAddress)
1.567 + physAddrList[i-1] |= 1;
1.568 + r = Kern::ChunkCommitPhysical(chunk,i1,i2,physAddrList);
1.569 + delete[] physAddrList;
1.570 + if(r!=KErrNone || i2==0)
1.571 + {
1.572 + iFactory->FreeMemory(i2,physAddr);
1.573 + break;
1.574 + }
1.575 +
1.576 + // Check that ChunkPhysicalAddress returns the same addresses we used in the commit
1.577 + TUint32 kernAddr;
1.578 + TUint32 mapAttr;
1.579 + TUint32 physAddr2;
1.580 + TUint32* physAddrList2 = new TUint32[numPages];
1.581 + r = Kern::ChunkPhysicalAddress(chunk, i1, i2, kernAddr, mapAttr, physAddr2, physAddrList2);
1.582 + if(r==KErrNone)
1.583 + {
1.584 + if(kernAddr!=chunkKernelAddress+i1 || physAddr2!=physAddr)
1.585 + r=KErrGeneral;
1.586 + else
1.587 + for(i=0; i<numPages; i++)
1.588 + if(physAddrList2[i] != physAddr+i*pageSize)
1.589 + r = KErrGeneral;
1.590 + }
1.591 + delete[] physAddrList2;
1.592 +
1.593 + if(r==KErrNone)
1.594 + {
1.595 + // Exercise memory sync functions
1.596 + Cache::SyncMemoryBeforeDmaRead(kernAddr, i2, mapAttr);
1.597 + Cache::SyncMemoryBeforeDmaWrite(kernAddr, i2, mapAttr);
1.598 + }
1.599 + }
1.600 + break;
1.601 +
1.602 + case EContiguousPhysical|EBadPhysicalAddress:
1.603 + case EContiguousPhysical:
1.604 + {
1.605 + TUint32 physAddr;
1.606 + r = iFactory->AllocMemory(i2,physAddr);
1.607 + if(r==KErrNone)
1.608 + {
1.609 + if(type&EBadPhysicalAddress)
1.610 + r = Kern::ChunkCommitPhysical(chunk,i1,i2,physAddr|1);
1.611 + else
1.612 + r = Kern::ChunkCommitPhysical(chunk,i1,i2,physAddr);
1.613 + }
1.614 + if(r!=KErrNone || i2==0)
1.615 + {
1.616 + iFactory->FreeMemory(i2,physAddr);
1.617 + break;
1.618 + }
1.619 +
1.620 + // Check that ChunkPhysicalAddress returns the same addresses we used in the commit
1.621 + TUint32 kernAddr;
1.622 + TUint32 mapAttr;
1.623 + TUint32 physAddr2;
1.624 + r = Kern::ChunkPhysicalAddress(chunk, i1, i2, kernAddr, mapAttr, physAddr2);
1.625 + if(r==KErrNone)
1.626 + if(kernAddr!=chunkKernelAddress+i1 || physAddr2!=physAddr)
1.627 + r=KErrGeneral;
1.628 +
1.629 + if(r==KErrNone)
1.630 + {
1.631 + // Exercise memory sync functions
1.632 + Cache::SyncMemoryBeforeDmaRead(kernAddr, i2, mapAttr);
1.633 + Cache::SyncMemoryBeforeDmaWrite(kernAddr, i2, mapAttr);
1.634 + }
1.635 + }
1.636 + break;
1.637 +
1.638 + default:
1.639 + r = KErrNotSupported;
1.640 + break;
1.641 +
1.642 + }
1.643 + chunk->Close(0);
1.644 + }
1.645 + else
1.646 + r = KErrNotFound;
1.647 + NKern::ThreadLeaveCS();
1.648 + return r;
1.649 + }
1.650 +
1.651 +
1.652 + case RSharedChunkLdd::EIsDestroyed:
1.653 + {
1.654 + NKern::ThreadEnterCS();
1.655 + TInt r = WaitForIdle2();
1.656 + NKern::ThreadLeaveCS();
1.657 + if (r==KErrNone)
1.658 + return __e32_atomic_load_acq32(&ChunkDestroyedCount);
1.659 + return 0; // never went idle so can't have been destroyed
1.660 + }
1.661 +
1.662 +
1.663 + case RSharedChunkLdd::ECloseChunk:
1.664 + {
1.665 + NKern::ThreadEnterCS();
1.666 +
1.667 + // Claim ownership of the chunk
1.668 + LockWait();
1.669 + DChunk* chunk=iChunk;
1.670 + iChunk = 0;
1.671 + LockSignal();
1.672 +
1.673 + // Close the chunk
1.674 + if(chunk)
1.675 + r = Kern::ChunkClose(chunk);
1.676 + else
1.677 + r = KErrNotFound;
1.678 +
1.679 + NKern::ThreadLeaveCS();
1.680 + return r;
1.681 + }
1.682 +
1.683 +
1.684 + case RSharedChunkLdd::ECheckMemory:
1.685 + case RSharedChunkLdd::EReadMemory:
1.686 + case RSharedChunkLdd::EWriteMemory:
1.687 + {
1.688 + TUint32 value=0;
1.689 +
1.690 + NKern::ThreadEnterCS();
1.691 + TLinAddr kernAddr;
1.692 + TInt maxSize;
1.693 + DChunk* chunk=OpenChunk(&kernAddr,&maxSize);
1.694 + if(chunk)
1.695 + {
1.696 + if((TUint)i1>=(TUint)maxSize)
1.697 + r = KErrArgument;
1.698 + else
1.699 + {
1.700 + TInt addr = kernAddr+i1;
1.701 +#ifdef _DEBUG
1.702 + TInt debugMask = Kern::CurrentThread().iDebugMask;
1.703 + Kern::CurrentThread().iDebugMask = debugMask&~(1<<KPANIC);
1.704 +#endif
1.705 + XTRAP(r, XT_DEFAULT,
1.706 + if(aFunction==RSharedChunkLdd::ECheckMemory)
1.707 + ReadByte((volatile TUint8*)addr);
1.708 + else if(aFunction==RSharedChunkLdd::EReadMemory)
1.709 + value = *(volatile TUint32*)addr;
1.710 + else if(aFunction==RSharedChunkLdd::EWriteMemory)
1.711 + *(volatile TUint32*)addr = i2;
1.712 + );
1.713 +#ifdef _DEBUG
1.714 + Kern::CurrentThread().iDebugMask = debugMask;
1.715 +#endif
1.716 + if(aFunction==RSharedChunkLdd::ECheckMemory)
1.717 + r = r==KErrNone;
1.718 + }
1.719 + chunk->Close(0);
1.720 + }
1.721 + else
1.722 + r = KErrNotFound;
1.723 +
1.724 + NKern::ThreadLeaveCS();
1.725 +
1.726 + if(aFunction==RSharedChunkLdd::EReadMemory)
1.727 + kumemput32(a2,&value,sizeof(value));
1.728 +
1.729 + return r;
1.730 + }
1.731 +
1.732 +
1.733 + case RSharedChunkLdd::EDfcReadWrite:
1.734 + case RSharedChunkLdd::EIsrReadWrite:
1.735 + {
1.736 + TUint32 value=0;
1.737 + kumemget32(&value,a2,sizeof(value));
1.738 +
1.739 + NKern::ThreadEnterCS();
1.740 + TLinAddr kernAddr;
1.741 + TInt maxSize;
1.742 + DChunk* chunk=OpenChunk(&kernAddr,&maxSize);
1.743 + if(chunk)
1.744 + {
1.745 + if((TUint)i1>=(TUint)maxSize)
1.746 + r = KErrArgument;
1.747 + else
1.748 + {
1.749 + TInt addr = kernAddr+i1;
1.750 + if(aFunction==RSharedChunkLdd::EDfcReadWrite)
1.751 + value = DfcReadWrite((TUint32*)addr,value);
1.752 + else if(aFunction==RSharedChunkLdd::EIsrReadWrite)
1.753 + value = IsrReadWrite((TUint32*)addr,value);
1.754 + r = KErrNone;
1.755 + }
1.756 + chunk->Close(0);
1.757 + }
1.758 + else
1.759 + r = KErrNotFound;
1.760 + NKern::ThreadLeaveCS();
1.761 +
1.762 + kumemput32(a2,&value,sizeof(value));
1.763 + return r;
1.764 + }
1.765 +
1.766 +
1.767 + case RSharedChunkLdd::ETestOpenAddress:
1.768 + {
1.769 + NKern::ThreadEnterCS();
1.770 +
1.771 + TLinAddr kernAddr;
1.772 + DChunk* chunk=OpenChunk(&kernAddr);
1.773 + if(!chunk)
1.774 + {
1.775 + NKern::ThreadLeaveCS();
1.776 + return KErrNotReady;
1.777 + }
1.778 +
1.779 + TInt offset;
1.780 + DChunk* chunk2 = Kern::OpenSharedChunk(0,a1,EFalse,offset);
1.781 + if(chunk2)
1.782 + {
1.783 + if(chunk2!=chunk)
1.784 + r = KErrGeneral;
1.785 + else
1.786 + r = KErrNone;
1.787 + chunk2->Close(0);
1.788 + }
1.789 + else
1.790 + r = KErrNotFound;
1.791 +
1.792 + chunk->Close(0);
1.793 +
1.794 + NKern::ThreadLeaveCS();
1.795 + return r;
1.796 + }
1.797 +
1.798 + case RSharedChunkLdd::ETestOpenHandle:
1.799 + {
1.800 + NKern::ThreadEnterCS();
1.801 +
1.802 + TLinAddr kernAddr;
1.803 + DChunk* chunk=OpenChunk(&kernAddr);
1.804 + if(!chunk)
1.805 + {
1.806 + NKern::ThreadLeaveCS();
1.807 + return KErrNotReady;
1.808 + }
1.809 +
1.810 + DChunk* chunk2 = Kern::OpenSharedChunk(0,i1,EFalse);
1.811 + if(chunk2)
1.812 + {
1.813 + if(chunk2==chunk)
1.814 + r = KErrNone;
1.815 + else
1.816 + r = KErrGeneral;
1.817 + chunk2->Close(0);
1.818 + }
1.819 + else
1.820 + r = KErrNotFound;
1.821 +
1.822 + chunk->Close(0);
1.823 +
1.824 + NKern::ThreadLeaveCS();
1.825 + return r;
1.826 + }
1.827 +
1.828 + case RSharedChunkLdd::ETestAddress:
1.829 + {
1.830 + NKern::ThreadEnterCS();
1.831 +
1.832 + TLinAddr kernAddr;
1.833 + DChunk* chunk=OpenChunk(&kernAddr);
1.834 + if(!chunk)
1.835 + {
1.836 + NKern::ThreadLeaveCS();
1.837 + return KErrNotReady;
1.838 + }
1.839 +
1.840 + TLinAddr kernAddr2;
1.841 + r = Kern::ChunkAddress(chunk,i1,i2,kernAddr2);
1.842 + if(r==KErrNone)
1.843 + if(kernAddr2!=kernAddr+i1)
1.844 + r = KErrGeneral;
1.845 +
1.846 + chunk->Close(0);
1.847 +
1.848 + NKern::ThreadLeaveCS();
1.849 + return r;
1.850 + }
1.851 +
1.852 + case RSharedChunkLdd::EChunkUserBase:
1.853 + {
1.854 + NKern::ThreadEnterCS();
1.855 +
1.856 + DChunk* chunk=OpenChunk();
1.857 + if(!chunk)
1.858 + {
1.859 + NKern::ThreadLeaveCS();
1.860 + return KErrNotReady;
1.861 + }
1.862 +
1.863 + TUint8* baseAddress = Kern::ChunkUserBase(chunk, &Kern::CurrentThread());
1.864 +
1.865 + chunk->Close(0);
1.866 + if(a1)
1.867 + kumemput32(a1,(TAny*)&baseAddress,4);
1.868 +
1.869 + NKern::ThreadLeaveCS();
1.870 + return KErrNone;
1.871 + }
1.872 +
1.873 + case RSharedChunkLdd::EChunkCloseAndFree:
1.874 + {
1.875 +#ifdef __EPOC32__
1.876 + // Allocate and then commit some physical ram to a chunk
1.877 + NKern::ThreadEnterCS();
1.878 + const TUint KPhysPages = 5;
1.879 + TUint pageSize = Kern::RoundToPageSize(1);
1.880 + TUint physBytes = KPhysPages * pageSize;
1.881 + TPhysAddr addrArray[KPhysPages];
1.882 + TLinAddr linAddr;
1.883 + TUint32 mapAttr;
1.884 + DChunk* chunk;
1.885 +
1.886 + TChunkCreateInfo chunkInfo;
1.887 + chunkInfo.iType = TChunkCreateInfo::ESharedKernelSingle;
1.888 + chunkInfo.iMaxSize = physBytes;
1.889 + chunkInfo.iMapAttr = EMapAttrFullyBlocking;
1.890 + chunkInfo.iOwnsMemory = EFalse;
1.891 +
1.892 + r = Kern::ChunkCreate(chunkInfo, chunk, linAddr, mapAttr);
1.893 + if (r != KErrNone)
1.894 + {
1.895 + NKern::ThreadLeaveCS();
1.896 + return r;
1.897 + }
1.898 + r = Epoc::AllocPhysicalRam(KPhysPages, addrArray);
1.899 + if (r != KErrNone)
1.900 + {
1.901 + Kern::ChunkClose(chunk);
1.902 + NKern::ThreadLeaveCS();
1.903 + return r;
1.904 + }
1.905 + r = Kern::ChunkCommitPhysical(chunk, 0, physBytes, addrArray);
1.906 + if (r != KErrNone)
1.907 + {
1.908 + Kern::ChunkClose(chunk);
1.909 + r = Epoc::FreePhysicalRam(KPhysPages, addrArray);
1.910 + NKern::ThreadLeaveCS();
1.911 + return r;
1.912 + }
1.913 + // Now attempt to free the physical ram immediately after the chunk
1.914 + // has been closed.
1.915 + Kern::ChunkClose(chunk);
1.916 + r = Epoc::FreePhysicalRam(KPhysPages, addrArray);
1.917 + NKern::ThreadLeaveCS();
1.918 + return r;
1.919 +#endif
1.920 + }
1.921 +
1.922 + default:
1.923 + return KErrNotSupported;
1.924 + }
1.925 + }
1.926 +