First public contribution.
1 // Copyright (c) 2002-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32test\dma\d_dma.cpp
19 #include <kernel/kern_priv.h>
20 #include <drivers/dma.h>
23 _LIT(KClientPanicCat, "D_DMA");
24 _LIT(KDFCThreadName,"D_DMA_DFC_THREAD");
25 const TInt KDFCThreadPriority=26;
27 //////////////////////////////////////////////////////////////////////////////
30 // Class abstracting the way DMA buffers are created and destroyed to
31 // allow tests to run both on WINS and hardware.
37 TInt Alloc(TInt aIdx, TInt aSize);
39 TUint8* Addr(TInt aIdx) const;
40 TPhysAddr PhysAddr(TInt aIdx) const;
41 TInt Size(TInt aIdx) const;
45 struct {TUint8* iPtr; TInt iSize;} iBufs[KMaxBuf];
47 struct {DPlatChunkHw* iChunk; TInt iSize;} iBufs[KMaxBuf];
53 TUint8* TBufferMgr::Addr(TInt aIdx) const
55 __ASSERT_DEBUG(0 <= aIdx && aIdx < KMaxBuf, Kern::PanicCurrentThread(KClientPanicCat, __LINE__));
56 __ASSERT_DEBUG(iBufs[aIdx].iPtr != NULL, Kern::PanicCurrentThread(KClientPanicCat, __LINE__));
57 return iBufs[aIdx].iPtr;
61 TInt TBufferMgr::Size(TInt aIdx) const
63 __ASSERT_DEBUG(0 <= aIdx && aIdx < KMaxBuf, Kern::PanicCurrentThread(KClientPanicCat, __LINE__));
64 __ASSERT_DEBUG(iBufs[aIdx].iPtr != NULL, Kern::PanicCurrentThread(KClientPanicCat, __LINE__));
65 return iBufs[aIdx].iSize;
69 TInt TBufferMgr::Alloc(TInt aIdx, TInt aSize)
71 __ASSERT_DEBUG(0 <= aIdx && aIdx < KMaxBuf, Kern::PanicCurrentThread(KClientPanicCat, __LINE__));
72 __ASSERT_DEBUG(iBufs[aIdx].iPtr == NULL, Kern::PanicCurrentThread(KClientPanicCat, __LINE__));
73 NKern::ThreadEnterCS();
74 iBufs[aIdx].iPtr = new TUint8[aSize];
75 NKern::ThreadLeaveCS();
76 iBufs[aIdx].iSize = aSize;
77 return iBufs[aIdx].iPtr ? KErrNone : KErrNoMemory;
81 void TBufferMgr::FreeAll()
83 NKern::ThreadEnterCS();
84 for (TInt i=0; i<KMaxBuf; ++i)
89 NKern::ThreadLeaveCS();
94 TUint8* TBufferMgr::Addr(TInt aIdx) const
96 __ASSERT_DEBUG(0 <= aIdx && aIdx < KMaxBuf, Kern::PanicCurrentThread(KClientPanicCat, __LINE__));
97 __ASSERT_DEBUG(iBufs[aIdx].iChunk != NULL, Kern::PanicCurrentThread(KClientPanicCat, __LINE__));
98 return (TUint8*)iBufs[aIdx].iChunk->LinearAddress();
102 TPhysAddr TBufferMgr::PhysAddr(TInt aIdx) const
104 __ASSERT_DEBUG(0 <= aIdx && aIdx < KMaxBuf, Kern::PanicCurrentThread(KClientPanicCat, __LINE__));
105 __ASSERT_DEBUG(iBufs[aIdx].iChunk != NULL, Kern::PanicCurrentThread(KClientPanicCat, __LINE__));
106 return iBufs[aIdx].iChunk->PhysicalAddress();
110 TInt TBufferMgr::Size(TInt aIdx) const
112 __ASSERT_DEBUG(0 <= aIdx && aIdx < KMaxBuf, Kern::PanicCurrentThread(KClientPanicCat, __LINE__));
113 __ASSERT_DEBUG(iBufs[aIdx].iChunk != NULL, Kern::PanicCurrentThread(KClientPanicCat, __LINE__));
114 return iBufs[aIdx].iSize;
118 TInt TBufferMgr::Alloc(TInt aIdx, TInt aSize)
120 __ASSERT_DEBUG(0 <= aIdx && aIdx < KMaxBuf, Kern::PanicCurrentThread(KClientPanicCat, __LINE__));
121 __ASSERT_DEBUG(iBufs[aIdx].iChunk == NULL, Kern::PanicCurrentThread(KClientPanicCat, __LINE__));
122 NKern::ThreadEnterCS();
124 TInt r = Epoc::AllocPhysicalRam(aSize, phys);
127 r = DPlatChunkHw::New(iBufs[aIdx].iChunk, phys, aSize, EMapAttrSupRw | EMapAttrFullyBlocking);
129 Epoc::FreePhysicalRam(phys, aSize);
130 iBufs[aIdx].iSize = aSize;
132 __KTRACE_OPT(KDMA, Kern::Printf("TBufferMgr::Alloc buffer %d linAddr=0x%08x, physAddr=0x%08x, size=%d",
133 aIdx, Addr(aIdx), PhysAddr(aIdx), Size(aIdx)));
135 NKern::ThreadLeaveCS();
140 void TBufferMgr::FreeAll()
142 for (TInt i=0; i<KMaxBuf; ++i)
146 TPhysAddr base = iBufs[i].iChunk->PhysicalAddress();
147 TInt size = iBufs[i].iSize;
148 __ASSERT_DEBUG(iBufs[i].iChunk->AccessCount() == 1,
149 Kern::PanicCurrentThread(KClientPanicCat, __LINE__));
150 NKern::ThreadEnterCS();
151 iBufs[i].iChunk->Close(NULL);
152 iBufs[i].iChunk = NULL;
153 Epoc::FreePhysicalRam(base, size);
154 NKern::ThreadLeaveCS();
163 static TInt FragmentCount(DDmaRequest* aRequest)
166 for (SDmaDesHdr* pH = aRequest->iFirstHdr; pH != NULL; pH = pH->iNext)
173 //////////////////////////////////////////////////////////////////////////////
175 class DDmaTestChannel : public DLogicalChannelBase
178 virtual ~DDmaTestChannel();
180 // from DLogicalChannelBase
181 virtual TInt DoCreate(TInt aUnit, const TDesC8* anInfo, const TVersion& aVer);
182 virtual TInt Request(TInt aFunction, TAny* a1, TAny* a2);
183 virtual TInt RequestUserHandle(DThread* aThread, TOwnerType aType);
185 TInt Execute(const TDesC8& aDes);
186 static void Dfc(DDmaRequest::TResult aResult, TAny* aArg);
187 TInt DoGetInfo(TAny* aInfo);
191 TDmaChannel* iChannel;
192 enum { KMaxRequests = 8 };
193 DDmaRequest* iRequests[KMaxRequests];
194 TClientRequest* iClientRequests[KMaxRequests];
195 DDmaTestChannel* iMap[KMaxRequests];
196 TUint32 iMemMemPslInfo;
198 TDynamicDfcQue* iDfcQ;
202 TInt DDmaTestChannel::RequestUserHandle(DThread* aThread, TOwnerType aType)
204 if (aType!=EOwnerThread || aThread!=iClient)
205 return KErrAccessDenied;
209 TInt DDmaTestChannel::DoGetInfo(TAny* aInfo)
211 RTestDma::TInfo uinfo;
212 const TDmaTestInfo& kinfo = DmaTestInfo();
213 uinfo.iMaxTransferSize = kinfo.iMaxTransferSize;
214 uinfo.iMemAlignMask = kinfo.iMemAlignMask;
215 uinfo.iMaxSbChannels = kinfo.iMaxSbChannels;
216 memcpy(&(uinfo.iSbChannels), kinfo.iSbChannels, 4 * kinfo.iMaxSbChannels);
217 uinfo.iMaxDbChannels = kinfo.iMaxDbChannels;
218 memcpy(&(uinfo.iDbChannels), kinfo.iDbChannels, 4 * kinfo.iMaxDbChannels);
219 uinfo.iMaxSgChannels = kinfo.iMaxSgChannels;
220 memcpy(&(uinfo.iSgChannels), kinfo.iSgChannels, 4 * kinfo.iMaxSgChannels);
222 XTRAPD(r, XT_DEFAULT, kumemput(aInfo, &uinfo, sizeof(RTestDma::TInfo)));
223 return r == KErrNone ? KErrDied : KErrGeneral;
227 // called in thread critical section
228 TInt DDmaTestChannel::DoCreate(TInt /*aUnit*/, const TDesC8* aInfo, const TVersion& /*aVer*/)
230 TPckgBuf<RTestDma::TOpenInfo> infoBuf;
232 TInt r=Kern::ThreadDesRead(&Kern::CurrentThread(), aInfo, infoBuf, 0, KChunkShiftBy0);
236 if (infoBuf().iWhat == RTestDma::TOpenInfo::EGetInfo)
237 return DoGetInfo(infoBuf().U.iInfo);
242 r = Kern::DynamicDfcQCreate(iDfcQ, KDFCThreadPriority, KDFCThreadName);
245 #ifdef CPU_AFFINITY_ANY
246 NKern::ThreadSetCpuAffinity((NThread*)(iDfcQ->iThread), KCpuAffinityAny);
250 iMemMemPslInfo = DmaTestInfo().iMemMemPslInfo;
251 iCookie = infoBuf().U.iOpen.iId;
252 TDmaChannel::SCreateInfo info;
253 info.iCookie = iCookie;
255 info.iDfcPriority = 3;
256 info.iDesCount = infoBuf().U.iOpen.iDesCount;
257 r = TDmaChannel::Open(info, iChannel);
260 iClient = &Kern::CurrentThread();
261 for (TInt i=0; i<KMaxRequests; ++i)
263 r = Kern::CreateClientRequest(iClientRequests[i]);
267 TInt max = infoBuf().U.iOpen.iMaxTransferSize;
270 // Exercise request with custom limit
271 iRequests[i] = new DDmaRequest(*iChannel, Dfc, iMap+i, max);
275 // Exercise request with default limit
276 iRequests[i] = new DDmaRequest(*iChannel, Dfc, iMap+i);
286 DDmaTestChannel::~DDmaTestChannel()
290 iChannel->CancelAll();
292 for (i=0; i<KMaxRequests; ++i)
295 for (i=0; i<KMaxRequests; ++i)
296 Kern::DestroyClientRequest(iClientRequests[i]);
306 TInt DDmaTestChannel::Request(TInt aFunction, TAny* a1, TAny* a2)
310 case RTestDma::EAllocBuffer:
311 return iBufMgr.Alloc((TInt)a1, (TInt)a2);
312 case RTestDma::EFreeAllBuffers:
315 case RTestDma::EFillBuffer:
318 TUint8 val = (TUint8)(TUint)a2;
319 memset(iBufMgr.Addr(i), val, iBufMgr.Size(i));
322 case RTestDma::ECheckBuffer:
325 TUint8 val = (TUint8)(TUint)a2;
326 TUint8* p = iBufMgr.Addr(i);
327 TUint8* end = p + iBufMgr.Size(i);
332 const TUint8 prevValue = *(p-1);
334 __KTRACE_OPT(KDMA, Kern::Printf("Check DMA buffer number %d failed at offset: %d value: %d(%c)",
335 i, p-iBufMgr.Addr(i)-1, prevValue, prevValue));
340 case RTestDma::EFragment:
342 RTestDma::TFragmentInfo info;
343 kumemget(&info, a1, sizeof info);
344 __ASSERT_DEBUG(iBufMgr.Size(info.iSrcBufIdx) == iBufMgr.Size(info.iDestBufIdx),
345 Kern::PanicCurrentThread(KClientPanicCat, __LINE__));
346 __ASSERT_DEBUG(info.iSize <= iBufMgr.Size(info.iSrcBufIdx),
347 Kern::PanicCurrentThread(KClientPanicCat, __LINE__));
348 __ASSERT_DEBUG(0 <= info.iRequestIdx && info.iRequestIdx < KMaxRequests,
349 Kern::PanicCurrentThread(KClientPanicCat, __LINE__));
351 // DMASIM doesn't use physical addresses
352 TUint32 src = (TUint32)iBufMgr.Addr(info.iSrcBufIdx);
353 TUint32 dest = (TUint32)iBufMgr.Addr(info.iDestBufIdx);
354 TUint KFlags = KDmaMemSrc | KDmaIncSrc | KDmaMemDest | KDmaIncDest;
356 TUint32 src = iBufMgr.PhysAddr(info.iSrcBufIdx);
357 TUint32 dest = iBufMgr.PhysAddr(info.iDestBufIdx);
358 TUint KFlags = KDmaMemSrc | KDmaIncSrc | KDmaPhysAddrSrc |
359 KDmaMemDest | KDmaIncDest | KDmaPhysAddrDest | KDmaAltTransferLen;
361 TInt r = iRequests[info.iRequestIdx]->Fragment(src, dest, info.iSize, KFlags, iMemMemPslInfo);
362 if (r == KErrNone && info.iRs)
363 r = iClientRequests[info.iRequestIdx]->SetStatus(info.iRs);
366 case RTestDma::EExecute:
367 return Execute(*(TDesC8*)a1);
368 case RTestDma::EFailNext:
369 return iChannel->FailNext((TInt)a1);
370 case RTestDma::EFragmentCount:
372 TInt reqIdx = (TInt)a1;
373 __ASSERT_DEBUG(0 <= reqIdx && reqIdx < KMaxRequests, Kern::PanicCurrentThread(KClientPanicCat, __LINE__));
375 return iRequests[reqIdx]->FragmentCount();
377 return FragmentCount(iRequests[reqIdx]);
380 case RTestDma::EMissInterrupts:
381 return iChannel->MissNextInterrupts((TInt)a1);
383 Kern::PanicCurrentThread(KClientPanicCat, __LINE__);
384 return KErrNone; // work-around spurious warning
389 TInt DDmaTestChannel::Execute(const TDesC8& aDes)
392 Kern::KUDesGet(cmd, aDes);
393 __KTRACE_OPT(KDMA, Kern::Printf("DDmaTestChannel::Execute cmd=%S", &cmd));
395 const TText8* p = cmd.Ptr();
396 const TText8* pEnd = p + cmd.Length();
399 TText8 opcode = *p++;
404 TInt arg = *p++ - '0';
405 __ASSERT_DEBUG(0 <= arg && arg < KMaxRequests, Kern::PanicCurrentThread(KClientPanicCat, __LINE__));
406 iRequests[arg]->Queue();
410 iChannel->CancelAll();
413 Kern::PanicCurrentThread(KClientPanicCat, __LINE__);
420 void DDmaTestChannel::Dfc(DDmaRequest::TResult aResult, TAny* aArg)
422 DDmaTestChannel** ppC = (DDmaTestChannel**)aArg;
423 DDmaTestChannel* pC = *ppC;
424 TInt i = ppC - pC->iMap;
425 TClientRequest* req = pC->iClientRequests[i];
426 TInt r = (aResult==DDmaRequest::EOk) ? KErrNone : KErrGeneral;
428 Kern::QueueRequestComplete(pC->iClient, req, r);
431 //////////////////////////////////////////////////////////////////////////////
433 class DDmaTestFactory : public DLogicalDevice
437 // from DLogicalDevice
438 virtual TInt Install();
439 virtual void GetCaps(TDes8& aDes) const;
440 virtual TInt Create(DLogicalChannelBase*& aChannel);
444 DDmaTestFactory::DDmaTestFactory()
446 iVersion = TestDmaLddVersion();
447 iParseMask = KDeviceAllowUnit; // no info, no PDD
448 // iUnitsMask = 0; // Only one thing
452 TInt DDmaTestFactory::Create(DLogicalChannelBase*& aChannel)
454 aChannel=new DDmaTestChannel;
455 return aChannel ? KErrNone : KErrNoMemory;
459 TInt DDmaTestFactory::Install()
461 return SetName(&KTestDmaLddName);
465 void DDmaTestFactory::GetCaps(TDes8& /*aDes*/) const
469 //////////////////////////////////////////////////////////////////////////////
471 DECLARE_STANDARD_LDD()
473 return new DDmaTestFactory;