Update contrib.
1 // Copyright (c) 2002-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // Test driver for DMA V2 framework
18 #include <kernel/kern_priv.h>
19 #include <drivers/dma.h>
22 _LIT(KClientPanicCat, "D_DMA2");
23 _LIT(KDFCThreadName,"D_DMA_DFC_THREAD");
24 _LIT(KIsrCbDfcThreadName,"D_DMA_IsrCb_thread");
25 const TInt KDFCThreadPriority=26;
35 {iStart = NKern::FastCounter();}
39 iStop = NKern::FastCounter();
41 __KTRACE_OPT(KDMA, Kern::Printf(">TStopwatch::Stop FastCounter ticks: iStart=0x%lx iStop=0x%lx", iStart, iStop));
44 TUint64 ReadMicroSecs() const
50 diff = (KMaxTUint64 - iStart) + iStop;
54 diff = iStop - iStart;
56 return FastCountToMicroSecs(diff);
58 //TODO On SMP it is possible for the value returned from
59 //NKern::FastCounter to depend on the current CPU (ie.
62 //One solution would be to tie DFC's and ISR's to the same
63 //core as the client, but this would reduce the usefulness of
70 TUint64 FastCountToMicroSecs(TUint64 aCount) const
72 const TUint64 countsPerS = NKern::FastCounterFrequency();
74 TUint64 timeuS = (aCount*1000000)/countsPerS;
75 __KTRACE_OPT(KDMA, Kern::Printf(">TStopwatch::FastCountToMicroSecs FastCounter ticks: aCount=0x%lx countsPerS=0x%lx time=0x%lx", aCount, countsPerS, timeuS));
83 //////////////////////////////////////////////////////////////////////////////
85 class DClientDmaRequest;
87 Driver channel. Only accessible by a single client thread
89 class DDmaTestSession : public DLogicalChannelBase
93 virtual ~DDmaTestSession();
95 // from DLogicalChannelBase
96 virtual TInt DoCreate(TInt aUnit, const TDesC8* anInfo, const TVersion& aVer);
97 virtual TInt Request(TInt aFunction, TAny* a1, TAny* a2);
98 virtual TInt RequestUserHandle(DThread* aThread, TOwnerType aType);
100 TInt DoGetInfo(TAny* aInfo);
102 TInt OpenDmaChannel(TUint aPslCookie, TUint& aDriverCookie);
103 TInt CloseDmaChannelByCookie(TUint aDriverCookie);
104 TInt PauseDmaChannelByCookie(TUint aDriverCookie);
105 TInt ResumeDmaChannelByCookie(TUint aDriverCookie);
106 TInt GetChannelCapsByCookie(TUint aDriverCookie, SDmacCaps& aChannelCaps);
107 TInt GetChannelCapsByCookie(TUint aDriverCookie, TDmacTestCaps& aChannelCaps);
108 TInt CancelAllByCookie(TUint aDriverCookie);
109 TInt IsrRedoRequestByCookie(TUint aDriverCookie,TUint32 aSrcAddr,TUint32 aDstAddr,TInt aTransferCount,TUint32 aPslRequestInfo,TBool aIsrCb);
110 TInt IsQueueEmptyByCookie(TUint aDriverCookie, TBool& aQueueEmpty);
111 TInt ChannelIsOpenedByCookie(TUint aDriverCookie, TBool& aChannelOpen);
112 void CloseDmaChannelByIndex(TInt aIndex);
113 void CancelAllByIndex(TInt aIndex);
114 TInt PauseDmaChannelByIndex(TInt aIndex);
115 TInt ResumeDmaChannelByIndex(TInt aIndex);
116 TInt IsrRedoRequestByIndex(TInt aIndex,TUint32 aSrcAddr,TUint32 aDstAddr,TInt aTransferCount,TUint32 aPslRequestInfo,TBool aIsrCb);
117 TInt CreateSharedChunk();
118 TUint OpenSharedChunkHandle();
121 Creates a new kernel-side DMA request object, associated with a previously
124 @param aChannelCookie - A channel cookie as returned by OpenDmaChannel
125 @param aRequestCookie - On success will be a cookie by which the dma request can be referred to
126 @param aNewCallback - If true, then a new style DMA callback will be used
128 TInt CreateDmaRequest(TUint aChannelCookie, TUint& aRequestCookie, TBool aNewCallback = EFalse, TInt aMaxFragmentSizeBytes=0);
130 //TODO what happens if a client closes a channel that
131 //it still has dma requests associated with?
134 Destroys a previously created dma request object
136 TInt DestroyDmaRequestByCookie(TUint aRequestCookie);
138 void DestroyDmaRequestByIndex(TInt aIndex);
141 TInt CookieToChannelIndex(TUint aDriverCookie) const;
142 TInt CookieToRequestIndex(TUint aRequestCookie) const;
144 void FixupTransferArgs(TDmaTransferArgs& aTransferArgs) const;
145 TInt FragmentRequest(TUint aRequestCookie, const TDmaTransferArgs& aTransferArgs, TBool aLegacy=ETrue);
147 TInt QueueRequest(TUint aRequestCookie, TRequestStatus* aStatus, TCallbackRecord* aRecord, TUint64* aDurationMicroSecs);
148 DClientDmaRequest* RequestFromCookie(TUint aRequestCookie) const;
149 TInt RequestFragmentCount(TUint aRequestCookie);
151 TDmaV2TestInfo ConvertTestInfo(const TDmaTestInfo& aOldInfo) const;
154 TDynamicDfcQue* iDfcQ;
155 TDynamicDfcQue* iIsrCallbackDfcQ; // Will be used by requests which complete with an ISR callback
156 static const TInt KMaxChunkSize = 8 * KMega;
160 RPointerArray<TDmaChannel> iChannels;
161 RPointerArray<DClientDmaRequest> iClientDmaReqs;
166 Allows a TClientRequest to be associated with a DDmaRequest
168 class DClientDmaRequest : public DDmaRequest
171 static DClientDmaRequest* Construct(DThread* aClient, TDfcQue* const aDfcQ, TDmaChannel& aChannel, TBool aNewStyle=EFalse, TInt aMaxTransferSize=0);
172 ~DClientDmaRequest();
174 TInt Queue(TRequestStatus* aRequestStatus, TCallbackRecord* aRecord, TUint64* aDurationMicroSecs);
175 void AddRequeArgs(const TIsrRequeArgsSet& aRequeArgSet);
177 TUint64 GetDuration()
178 {return iStopwatch.ReadMicroSecs();}
182 /** Construct with old style callback */
183 DClientDmaRequest(DThread* aClient, TDfcQue* const aDfcQ, TDmaChannel& aChannel, TInt aMaxTransferSize);
185 /** Construct with new style callback */
186 DClientDmaRequest(DThread* aClient, TDfcQue* const aDfcQ, TDmaChannel& aChannel, TBool aNewStyle, TInt aMaxTransferSize);
189 static void CallbackOldStyle(TResult aResult, TAny* aRequest);
190 static void Callback(TUint, TDmaResult, TAny*, SDmaDesHdr*);
191 static void CompleteCallback(TAny* aRequest);
193 void DoCallback(TUint, TDmaResult);
196 //!< Used to return a TCallbackRecord and transfer time
197 TClientDataRequest2<TCallbackRecord, TUint64>* iClientDataRequest;
199 DThread* const iClient;
200 TDfcQue* const iDfcQ; //!< Use the DDmaTestSession's dfc queue
203 TStopwatch iStopwatch;
204 TIsrRequeArgsSet iIsrRequeArgSet;
207 DClientDmaRequest* DClientDmaRequest::Construct(DThread* aClient, TDfcQue* const aDfcQ, TDmaChannel& aChannel, TBool aNewStyle, TInt aMaxTransferSize)
209 DClientDmaRequest* dmaRequest = NULL;
213 dmaRequest = new DClientDmaRequest(aClient, aDfcQ, aChannel, aNewStyle, aMaxTransferSize);
215 TEST_FAULT; // if a new style dma request was requested it should have been caught earlier
220 dmaRequest = new DClientDmaRequest(aClient, aDfcQ, aChannel, aMaxTransferSize);
223 if(dmaRequest == NULL)
228 const TInt r = dmaRequest->Create();
237 DClientDmaRequest::DClientDmaRequest(DThread* aClient, TDfcQue* const aDfcQ, TDmaChannel& aChannel, TInt aMaxFragmentSize)
238 :DDmaRequest(aChannel, &CallbackOldStyle, this, aMaxFragmentSize),
239 iClientDataRequest(NULL),
242 iDfc(CompleteCallback,NULL, iDfcQ, KMaxDfcPriority)
246 DClientDmaRequest::DClientDmaRequest(DThread* aClient, TDfcQue* const aDfcQ, TDmaChannel& aChannel, TBool /*aNewStyle*/, TInt aMaxFragmentSize)
247 :DDmaRequest(aChannel, &Callback, this, aMaxFragmentSize),
248 iClientDataRequest(NULL),
251 iDfc(CompleteCallback,NULL, iDfcQ, KMaxDfcPriority)
256 TInt DClientDmaRequest::Create()
258 return Kern::CreateClientDataRequest2(iClientDataRequest);
261 DClientDmaRequest::~DClientDmaRequest()
263 __KTRACE_OPT(KDMA, Kern::Printf(">DClientDmaRequest::~DClientDmaRequest"));
264 if(iClientDataRequest)
266 Kern::DestroyClientRequest(iClientDataRequest);
271 Queue the DClientDmaRequest.
273 @param aRequestStatus Pointer to the client's request status
274 @param aRecord Pointer to the user's TCallbackRecord, may be null
276 -KErrInUse The client request is in use
279 TInt DClientDmaRequest::Queue(TRequestStatus* aRequestStatus, TCallbackRecord* aRecord, TUint64* aDurationMicroSecs)
281 __NK_ASSERT_ALWAYS(aRecord);
282 __NK_ASSERT_ALWAYS(aDurationMicroSecs);
284 //erase results from last transfer
285 iClientDataRequest->Data1().Reset();
286 iClientDataRequest->SetDestPtr1(aRecord);
288 iClientDataRequest->SetDestPtr2(aDurationMicroSecs);
291 TInt r = iClientDataRequest->SetStatus(aRequestStatus);
299 r = DDmaRequest::Queue();
301 // old version of queue did not return an error code
302 DDmaRequest::Queue();
309 void DClientDmaRequest::AddRequeArgs(const TIsrRequeArgsSet& aRequeArgSet)
311 iIsrRequeArgSet = aRequeArgSet;
315 If a transfer complete callback in ISR context s received this will be
316 called to redo the request with the first entry in the array
318 @return ETrue If the redo was successful - indicates that another callback is comming
320 TBool DClientDmaRequest::RedoRequest()
322 TIsrRequeArgs args = iIsrRequeArgSet.GetArgs();
323 const TInt r = args.Call(iChannel);
324 TCallbackRecord& record = iClientDataRequest->Data1();
325 record.IsrRedoResult(r);
326 return (r == KErrNone);
331 Calls TDmaChannel::IsrRedoRequest on aChannel
332 with this object's parameters
334 TInt TIsrRequeArgs::Call(TDmaChannel& aChannel)
337 return aChannel.IsrRedoRequest(iSrcAddr, iDstAddr, iTransferCount, iPslRequestInfo, iIsrCb);
340 return KErrNotSupported;
345 Check that both source and destination of ISR reque args will
346 lie within the range specified by aStart and aSize.
348 @param aStart The linear base address of the region
349 @param aSize The size of the region
351 TBool TIsrRequeArgs::CheckRange(TLinAddr aStart, TUint aSize) const
353 TUint physStart = Epoc::LinearToPhysical(aStart);
354 TEST_ASSERT(physStart != KPhysAddrInvalid);
356 TAddrRange chunk(physStart, aSize);
357 TBool sourceOk = (iSrcAddr == KPhysAddrInvalid) ? ETrue : chunk.Contains(SourceRange());
359 TBool destOk = (iDstAddr == KPhysAddrInvalid) ? ETrue : chunk.Contains(DestRange());
361 return sourceOk && destOk;
364 TBool TIsrRequeArgsSet::CheckRange(TLinAddr aAddr, TUint aSize) const
366 for(TInt i=0; i<iCount; i++)
368 if(!iRequeArgs[i].CheckRange(aAddr, aSize))
375 Translate an old style dma callback to a new-style one
377 void DClientDmaRequest::CallbackOldStyle(TResult aResult, TAny* aArg)
379 __KTRACE_OPT(KDMA, Kern::Printf(">DClientDmaRequest::CallBackOldStyle: TResult result=%d", aResult));
380 TEST_ASSERT(aResult != EBadResult);
381 //translate result code
382 const TDmaResult result = (aResult == EOk) ? EDmaResultOK : EDmaResultError;
384 //call the new-style callback
385 Callback(EDmaCallbackRequestCompletion, result, aArg, NULL);
390 The new style callback called by the DMA framework
391 may be called in either thread or ISR context
393 void DClientDmaRequest::Callback(TUint aCallbackType, TDmaResult aResult, TAny* aArg, SDmaDesHdr* aHdr)
395 const TInt context = NKern::CurrentContext();
396 __KTRACE_OPT(KDMA, Kern::Printf(">DClientDmaRequest::CallBack: TDmaResult result = %d, NKern::TContext context = %d", aResult, context));
398 DClientDmaRequest& self = *reinterpret_cast<DClientDmaRequest*>(aArg);
399 self.DoCallback(aCallbackType, aResult);
401 // decide if callback is complete
402 const TBool transferComplete = aCallbackType & EDmaCallbackRequestCompletion;
403 if(!transferComplete)
408 // If there are reque args then redo this request
409 // another callback would then be expected.
410 // Requests can only be re-queued in ISR context, but we
411 // do not check that here as it is up to the client to get
412 // it right - also, we want to test that the PIL catches this
414 if(!self.iIsrRequeArgSet.IsEmpty())
416 // If redo call was succesful, return and wait for next call back
417 if(self.RedoRequest())
425 CompleteCallback(aArg);
428 case NKern::EInterrupt:
430 self.iDfc.iPtr = aArg;
434 case NKern::EIDFC: //fall-through
435 case NKern::EEscaped:
442 Log results of callback. May be called in either thread or ISR context
444 void DClientDmaRequest::DoCallback(TUint aCallbackType, TDmaResult aResult)
446 iStopwatch.Stop(); //sucessive calls will simply over write the stop time
448 // This will always be done whether the client requested a
449 // callback record or not
450 TCallbackRecord& record = iClientDataRequest->Data1();
451 record.ProcessCallback(aCallbackType, aResult);
455 This function may either be called directly or queued as a DFC
457 void DClientDmaRequest::CompleteCallback(TAny* aArg)
459 __KTRACE_OPT(KDMA, Kern::Printf(">DClientDmaRequest::CompleteCallBack thread %O", &Kern::CurrentThread()));
462 DClientDmaRequest& self = *reinterpret_cast<DClientDmaRequest*>(aArg);
464 self.iClientDataRequest->Data2() = self.iStopwatch.ReadMicroSecs();
466 //Assert that we called SetRequestStatus on this object before
468 __NK_ASSERT_DEBUG(self.iClientDataRequest->IsReady());
470 // This is an inelegant, temporary, solution to the following problem:
472 // If a dma request completes with an ISR callback the test
473 // framework will queue this function as a DFC which
474 // will then signal the user-side client. As a consequence of
475 // this the user side client may then decide to destroy this
476 // request. However, untill the DMA framework's DFC has run
477 // and called OnDeque() on this request, it is still considered as
478 // queued. Since it is possible that this DFC could run
479 // before the DMA fw's DFC, this request could get destroyed while
480 // it is stil queued, triggering a PIL assertion.
482 // The real fix is likely be for the PIL to call the callback
483 // twice, but with different arguments, once to annonunce the
484 // ISR and again to announce the dequeue.
486 // Here we poll and wait for this request to be dequeued. Note,
487 // this DFC is currently run on a separate DFC queue, otherwise
488 // it could get deadlocked. An alternative to polling would be
489 // to use DCondVar, but that would require PIL modification
491 if(NKern::CurrentThread() == self.iDfcQ->iThread)
493 // Only need to poll if we aren't on the channel's DFC queue
496 // once the request has been unqueued it
497 // can only be queued again by the client
498 const TBool queued = __e32_atomic_load_acq32(&self.iQueued);
501 __KTRACE_OPT(KDMA, Kern::Printf("Waiting for requeuest to be dequeued"));
507 // If we are on the channel's DFCQ we should be dequeued
509 __NK_ASSERT_DEBUG(!__e32_atomic_load_acq32(&self.iQueued));
512 // We can always complete with KErrNone, the actual DMA result is
513 // logged in the TCallbackRecord
514 Kern::QueueRequestComplete(self.iClient, self.iClientDataRequest, KErrNone);
517 const TInt DDmaTestSession::KMaxChunkSize;
519 TInt DDmaTestSession::RequestUserHandle(DThread* aThread, TOwnerType aType)
521 if (aType!=EOwnerThread || aThread!=iClient)
522 return KErrAccessDenied;
526 DDmaTestSession::DDmaTestSession()
527 : iClient(NULL), iDfcQ(NULL), iIsrCallbackDfcQ(NULL), iChunkBase(0), iChunk(NULL)
530 // called in thread critical section
531 TInt DDmaTestSession::DoCreate(TInt /*aUnit*/, const TDesC8* aInfo, const TVersion& /*aVer*/)
533 __NK_ASSERT_ALWAYS(iDfcQ == NULL);
534 __NK_ASSERT_ALWAYS(iIsrCallbackDfcQ == NULL);
536 TInt r = Kern::DynamicDfcQCreate(iDfcQ, KDFCThreadPriority, KDFCThreadName);
539 NKern::ThreadSetCpuAffinity((NThread*)(iDfcQ->iThread), KCpuAffinityAny);
541 r = Kern::DynamicDfcQCreate(iIsrCallbackDfcQ, KDFCThreadPriority, KIsrCbDfcThreadName);
544 NKern::ThreadSetCpuAffinity((NThread*)(iIsrCallbackDfcQ->iThread), KCpuAffinityAny);
546 iClient = &Kern::CurrentThread();
548 r = CreateSharedChunk();
552 DDmaTestSession::~DDmaTestSession()
554 //Destroy requests before channels
555 //or we will trigger an assertion
556 while(iClientDmaReqs.Count())
558 DestroyDmaRequestByIndex(0);
560 iClientDmaReqs.Close();
562 while(iChannels.Count())
564 CloseDmaChannelByIndex(0);
574 if (iIsrCallbackDfcQ)
576 iIsrCallbackDfcQ->Destroy();
581 Kern::ChunkClose(iChunk);
586 TInt DDmaTestSession::Request(TInt aFunction, TAny* a1, TAny* a2)
588 __NK_ASSERT_DEBUG(&Kern::CurrentThread() == iClient);
592 case RDmaSession::EOpenChannel:
594 TUint pslCookie = (TUint)a1;
595 TUint driverCookie = 0;
596 TInt r = OpenDmaChannel(pslCookie, driverCookie);
597 umemput32(a2, &driverCookie, sizeof(TAny*));
600 case RDmaSession::ECloseChannel:
602 TUint driverCookie = reinterpret_cast<TUint>(a1);
603 TInt r = CloseDmaChannelByCookie(driverCookie);
606 case RDmaSession::EChannelCaps:
608 TUint driverCookie = reinterpret_cast<TUint>(a1);
609 TPckgBuf<TDmacTestCaps> capsBuf;
610 TInt r = GetChannelCapsByCookie(driverCookie, capsBuf());
611 Kern::KUDesPut(*reinterpret_cast<TDes8*>(a2), capsBuf);
614 case RDmaSession::EPauseChannel:
616 TUint driverCookie = reinterpret_cast<TUint>(a1);
617 TInt r = PauseDmaChannelByCookie(driverCookie);
620 case RDmaSession::EResumeChannel:
622 TUint driverCookie = reinterpret_cast<TUint>(a1);
623 TInt r = ResumeDmaChannelByCookie(driverCookie);
626 case RDmaSession::EFragmentCount:
628 TUint requestCookie = reinterpret_cast<TUint>(a1);
629 TInt r = RequestFragmentCount(requestCookie);
632 case RDmaSession::ERequestOpen:
634 RDmaSession::TRequestCreateArgs createArgs(0, EFalse, 0);
635 TPckg<RDmaSession::TRequestCreateArgs> package(createArgs);
636 Kern::KUDesGet(package, *reinterpret_cast<TDes8*>(a1));
638 const TUint channelCookie = createArgs.iChannelCookie;
639 TUint requestCookie = 0;
641 TInt r = CreateDmaRequest(channelCookie, requestCookie, createArgs.iNewStyle, createArgs.iMaxFragmentSize);
643 umemput32(a2, &requestCookie, sizeof(TAny*));
646 case RDmaSession::ERequestClose:
648 const TUint requestCookie = reinterpret_cast<TUint>(a1);
649 return DestroyDmaRequestByCookie(requestCookie);
651 case RDmaSession::EFragmentLegacy:
652 case RDmaSession::EFragment:
654 TPckgBuf<RDmaSession::TFragmentArgs> argsBuff;
655 Kern::KUDesGet(argsBuff, *reinterpret_cast<TDes8*>(a1));
656 const TUint requestCookie = argsBuff().iRequestCookie;
658 //must remove constness as we actually need to
659 //convert the src and dst offsets to addresses
660 TDmaTransferArgs& transferArgs = const_cast<TDmaTransferArgs&>(argsBuff().iTransferArgs);
662 //convert address offsets in to kernel virtual addresses
663 FixupTransferArgs(transferArgs);
665 TEST_ASSERT((TAddressParms(transferArgs).CheckRange(iChunkBase, iChunk->Size())));
667 TInt r = KErrGeneral;
673 case RDmaSession::EFragmentLegacy:
674 r = FragmentRequest(requestCookie, transferArgs, ETrue); break;
675 case RDmaSession::EFragment:
676 r = FragmentRequest(requestCookie, transferArgs, EFalse); break;
682 const TUint64 time = clock.ReadMicroSecs();
684 TUint64* const timePtr = argsBuff().iDurationMicroSecs;
687 umemput(timePtr, &time, sizeof(time));
691 case RDmaSession::EQueueRequest:
693 TPckgBuf<RDmaSession::TQueueArgs> argsBuff;
694 Kern::KUDesGet(argsBuff, *reinterpret_cast<TDes8*>(a1));
696 //this is an Asynchronous request
697 const TUint requestCookie = argsBuff().iRequestCookie;
698 TRequestStatus* requestStatus = argsBuff().iStatus;
699 TCallbackRecord* callbackRec = argsBuff().iCallbackRecord;
700 TUint64* duration = argsBuff().iDurationMicroSecs;
702 TInt r = QueueRequest(requestCookie, requestStatus, callbackRec, duration);
705 Kern::RequestComplete(requestStatus, r);
709 case RDmaSession::EQueueRequestWithReque:
711 //TODO can common code with EQueueRequest be extracted?
712 TPckgBuf<RDmaSession::TQueueArgsWithReque> argsBuff;
713 Kern::KUDesGet(argsBuff, *reinterpret_cast<TDes8*>(a1));
715 //this is an Asynchronous request
716 const TUint requestCookie = argsBuff().iRequestCookie;
717 TRequestStatus* requestStatus = argsBuff().iStatus;
718 TCallbackRecord* callbackRec = argsBuff().iCallbackRecord;
719 TUint64* duration = argsBuff().iDurationMicroSecs;
721 TInt r = KErrNotFound;
723 DClientDmaRequest* const request = RequestFromCookie(requestCookie);
726 argsBuff().iRequeSet.Fixup(iChunkBase);
727 //TODO reque args must be substituted in order to
728 //check the range. The original transfer args are not
729 //available when queue is called, they could
730 //however be stored within DClientDmaRequest
731 //TEST_ASSERT((argsBuff().iRequeSet.CheckRange(iChunkBase, iChunk->Size())));
732 request->AddRequeArgs(argsBuff().iRequeSet);
734 r = QueueRequest(requestCookie, requestStatus, callbackRec, duration);
739 Kern::RequestComplete(requestStatus, r);
743 case RDmaSession::EIsrRedoRequest:
745 TPckgBuf<RDmaSession::TIsrRedoReqArgs> argsBuff;
746 Kern::KUDesGet(argsBuff, *reinterpret_cast<TDes8*>(a1));
748 const TUint driverCookie = argsBuff().iDriverCookie;
749 const TUint32 srcAddr = argsBuff().iSrcAddr;
750 const TUint32 dstAddr = argsBuff().iDstAddr;
751 const TInt transferCount = argsBuff().iTransferCount;
752 const TUint32 pslRequestInfo = argsBuff().iPslRequestInfo;
753 const TBool isrCb = argsBuff().iIsrCb;
755 TInt r = IsrRedoRequestByCookie(driverCookie,srcAddr,dstAddr,transferCount,pslRequestInfo,isrCb);
758 case RDmaSession::EIsOpened:
760 TUint driverCookie = (TUint)a1;
761 TBool channelOpen = EFalse;;
762 TInt r = ChannelIsOpenedByCookie(driverCookie,channelOpen);
763 umemput32(a2, &channelOpen, sizeof(TAny*));
766 case RDmaSession::EIsQueueEmpty:
768 TUint driverCookie = (TUint)a1;
769 TBool queueEmpty = EFalse;;
770 TInt r = IsQueueEmptyByCookie(driverCookie,queueEmpty);
771 umemput32(a2, &queueEmpty, sizeof(TAny*));
774 case RDmaSession::ECancelAllChannel:
776 TUint driverCookie = reinterpret_cast<TUint>(a1);
777 TInt r = CancelAllByCookie(driverCookie);
780 case RDmaSession::EOpenSharedChunk:
782 return OpenSharedChunkHandle();
784 case RDmaSession::EGetTestInfo:
787 TPckgC<TDmaV2TestInfo> package(DmaTestInfoV2());
789 TPckgC<TDmaV2TestInfo> package(ConvertTestInfo(DmaTestInfo()));
791 Kern::KUDesPut(*reinterpret_cast<TDes8*>(a1), package);
795 Kern::PanicCurrentThread(KClientPanicCat, __LINE__);
800 TInt DDmaTestSession::OpenDmaChannel(TUint aPslCookie, TUint& aDriverCookie )
802 TDmaChannel::SCreateInfo info;
803 info.iCookie = aPslCookie;
805 info.iDfcPriority = 3;
806 info.iDesCount = 128;
808 TDmaChannel* channel = NULL;
810 //cs so thread can't be killed between
811 //opening channel and adding to array
812 NKern::ThreadEnterCS();
813 TInt r = TDmaChannel::Open(info, channel);
816 __NK_ASSERT_ALWAYS(channel);
818 __KTRACE_OPT(KDMA, Kern::Printf("OpenDmaChannel: channel@ 0x%08x", channel));
821 TInt err = iChannels.Append(channel);
824 aDriverCookie = reinterpret_cast<TUint>(channel);
832 NKern::ThreadLeaveCS();
837 TInt DDmaTestSession::CookieToChannelIndex(TUint aDriverCookie) const
839 const TInt r = iChannels.Find(reinterpret_cast<TDmaChannel*>(aDriverCookie));
843 __KTRACE_OPT(KDMA, Kern::Printf("CookieToChannelIndex: cookie 0x%08x not found!", aDriverCookie));
848 TInt DDmaTestSession::CookieToRequestIndex(TUint aRequestCookie) const
850 const TInt r = iClientDmaReqs.Find(reinterpret_cast<DClientDmaRequest*>(aRequestCookie));
854 __KTRACE_OPT(KDMA, Kern::Printf("CookieToRequestIndex: cookie 0x%08x not found!", aRequestCookie));
859 void DDmaTestSession::CloseDmaChannelByIndex(TInt aIndex)
861 __KTRACE_OPT(KDMA, Kern::Printf("CloseDmaChannelByIndex: %d", aIndex));
862 __NK_ASSERT_DEBUG(aIndex < iChannels.Count());
863 // cs so client thread can't be killed between removing channel from
864 // array and closing it.
865 NKern::ThreadEnterCS();
866 TDmaChannel* channel = iChannels[aIndex];
867 iChannels.Remove(aIndex);
869 NKern::ThreadLeaveCS();
872 TInt DDmaTestSession::CloseDmaChannelByCookie(TUint aDriverCookie)
874 __KTRACE_OPT(KDMA, Kern::Printf("CloseDmaChannelByCookie: 0x%08x", aDriverCookie));
875 const TInt index = CookieToChannelIndex(aDriverCookie);
879 CloseDmaChannelByIndex(index);
888 TInt DDmaTestSession::CancelAllByCookie(TUint aDriverCookie)
890 __KTRACE_OPT(KDMA, Kern::Printf("CancelAllByCookie: 0x%08x", aDriverCookie));
891 const TInt index = CookieToChannelIndex(aDriverCookie);
895 CancelAllByIndex(index);
904 void DDmaTestSession::CancelAllByIndex(TInt aIndex)
906 __KTRACE_OPT(KDMA, Kern::Printf("CancelAllByIndex: %d", aIndex));
907 __NK_ASSERT_DEBUG(aIndex < iChannels.Count());
909 TDmaChannel* channel = iChannels[aIndex];
910 iChannels.Remove(aIndex);
911 channel->CancelAll();
914 TInt DDmaTestSession::PauseDmaChannelByIndex(TInt aIndex)
916 __KTRACE_OPT(KDMA, Kern::Printf("PauseDmaChannelByIndex: %d", aIndex));
917 __NK_ASSERT_DEBUG(aIndex < iChannels.Count());
920 TDmaChannel* channel = iChannels[aIndex];
921 return channel->Pause();
923 return KErrNotSupported;
927 TInt DDmaTestSession::PauseDmaChannelByCookie(TUint aDriverCookie)
929 __KTRACE_OPT(KDMA, Kern::Printf("PauseDmaChannelByCookie: 0x%08x", aDriverCookie));
930 const TInt index = CookieToChannelIndex(aDriverCookie);
934 TInt r = PauseDmaChannelByIndex(index);
943 TInt DDmaTestSession::ResumeDmaChannelByIndex(TInt aIndex)
945 __KTRACE_OPT(KDMA, Kern::Printf("ResumeDmaChannelByIndex: %d", aIndex));
946 __NK_ASSERT_DEBUG(aIndex < iChannels.Count());
949 TDmaChannel* channel = iChannels[aIndex];
950 return channel->Resume();
952 return KErrNotSupported;
956 TInt DDmaTestSession::ResumeDmaChannelByCookie(TUint aDriverCookie)
958 __KTRACE_OPT(KDMA, Kern::Printf("ResumeDmaChannelByCookie: 0x%08x", aDriverCookie));
959 const TInt index = CookieToChannelIndex(aDriverCookie);
963 TInt r = ResumeDmaChannelByIndex(index);
972 TInt DDmaTestSession::IsrRedoRequestByCookie(TUint aDriverCookie,TUint32 aSrcAddr,TUint32 aDstAddr,TInt aTransferCount,TUint32 aPslRequestInfo,TBool aIsrCb)
974 __KTRACE_OPT(KDMA, Kern::Printf("IsrRedoRequestByCookie: 0x%08x", aDriverCookie));
975 const TInt index = CookieToChannelIndex(aDriverCookie);
979 TInt r = IsrRedoRequestByIndex(index,aSrcAddr,aDstAddr,aTransferCount,aPslRequestInfo,aIsrCb);
988 TInt DDmaTestSession::IsrRedoRequestByIndex(TInt aIndex,TUint32 aSrcAddr,TUint32 aDstAddr,TInt aTransferCount,TUint32 aPslRequestInfo,TBool aIsrCb)
990 __KTRACE_OPT(KDMA, Kern::Printf("IsrRedoRequestByIndex: %d", aIndex));
991 __NK_ASSERT_DEBUG(aIndex < iChannels.Count());
994 TDmaChannel* channel = iChannels[aIndex];
995 return channel->IsrRedoRequest(aSrcAddr,aDstAddr,aTransferCount,aPslRequestInfo,aIsrCb);
997 return KErrNotSupported;
1002 aChannelCaps will be set to "NULL" values
1004 TInt DDmaTestSession::GetChannelCapsByCookie(TUint aDriverCookie, TDmacTestCaps& aChannelCaps)
1006 SDmacCaps caps = {0,}; //initialise with NULL values
1007 TInt r = GetChannelCapsByCookie(aDriverCookie, caps);
1009 if(r == KErrNotSupported)
1011 //If we can not query caps it means
1012 //that we are using the v1 driver
1013 //we construct a empty TDmacTestCaps
1014 //but with an iPILVersion of 1
1015 const TDmacTestCaps nullCapsV1(caps, 1);
1016 aChannelCaps = nullCapsV1;
1019 else if(r == KErrNone)
1021 const TDmacTestCaps capsV2(caps, 2);
1022 aChannelCaps = capsV2;
1029 Will return the capabilities of the DMA channel.
1030 Querying SDmacCaps is not possible on V1 of the DMA framework.
1031 In that case an error of KErrNotSupported will be returned
1033 TInt DDmaTestSession::GetChannelCapsByCookie(TUint aDriverCookie, SDmacCaps& aChannelCaps)
1035 __KTRACE_OPT(KDMA, Kern::Printf("GetChannelCapsByCookie: 0x%08x", aDriverCookie));
1036 const TInt index = CookieToChannelIndex(aDriverCookie);
1040 aChannelCaps = iChannels[index]->DmacCaps();
1043 return KErrNotSupported;
1048 return KErrNotFound;
1052 TInt DDmaTestSession::IsQueueEmptyByCookie(TUint aDriverCookie, TBool& aQueueEmpty)
1054 __KTRACE_OPT(KDMA, Kern::Printf("IsQueueEmptyByCookie: 0x%08x", aDriverCookie));
1055 const TInt index = CookieToChannelIndex(aDriverCookie);
1059 aQueueEmpty=iChannels[index]->IsQueueEmpty();
1064 return KErrNotFound;
1068 TInt DDmaTestSession::ChannelIsOpenedByCookie(TUint aDriverCookie, TBool& aChannelOpen)
1070 __KTRACE_OPT(KDMA, Kern::Printf("ChannelIsOpenedByCookie: 0x%08x", aDriverCookie));
1071 const TInt index = CookieToChannelIndex(aDriverCookie);
1075 aChannelOpen=iChannels[index]->IsOpened();
1080 return KErrNotFound;
1084 TInt DDmaTestSession::CreateDmaRequest(TUint aChannelCookie, TUint& aRequestCookie, TBool aNewCallback, TInt aMaxFragmentSizeBytes)
1088 return KErrNotSupported;
1091 TInt channelIndex = CookieToChannelIndex(aChannelCookie);
1092 if(channelIndex < 0)
1093 return channelIndex;
1095 NKern::ThreadEnterCS();
1096 DClientDmaRequest* request = DClientDmaRequest::Construct(iClient, iIsrCallbackDfcQ, *iChannels[channelIndex], aNewCallback, aMaxFragmentSizeBytes);
1099 NKern::ThreadLeaveCS();
1100 return KErrNoMemory;
1103 TInt r = iClientDmaReqs.Append(request);
1106 aRequestCookie = reinterpret_cast<TUint>(request);
1112 NKern::ThreadLeaveCS();
1117 TInt DDmaTestSession::DestroyDmaRequestByCookie(TUint aRequestCookie)
1119 TInt requestIndex = CookieToRequestIndex(aRequestCookie);
1120 if(requestIndex < 0)
1121 return requestIndex;
1123 DestroyDmaRequestByIndex(requestIndex);
1128 void DDmaTestSession::DestroyDmaRequestByIndex(TInt aIndex)
1130 __KTRACE_OPT(KDMA, Kern::Printf("DestroyDmaRequestByIndex: %d", aIndex));
1131 __NK_ASSERT_DEBUG(aIndex < iClientDmaReqs.Count());
1132 NKern::ThreadEnterCS();
1134 DClientDmaRequest* request = iClientDmaReqs[aIndex];
1135 iClientDmaReqs.Remove(aIndex);
1138 NKern::ThreadLeaveCS();
1141 TInt DDmaTestSession::CreateSharedChunk()
1143 // Enter critical section so we can't die and leak the objects we are creating
1144 // I.e. the TChunkCleanup and DChunk (Shared Chunk)
1145 NKern::ThreadEnterCS();
1148 TChunkCreateInfo info;
1149 info.iType = TChunkCreateInfo::ESharedKernelSingle;
1150 info.iMaxSize = KMaxChunkSize;
1151 info.iMapAttr = EMapAttrFullyBlocking | EMapAttrUserRw;
1152 info.iOwnsMemory = ETrue;
1153 info.iDestroyedDfc = NULL;
1157 TInt r = Kern::ChunkCreate(info, chunk, iChunkBase, mapAttr);
1160 NKern::ThreadLeaveCS();
1164 // Map our device's memory into the chunk (at offset 0)
1165 TUint32 physicalAddr;
1166 r = Kern::ChunkCommitContiguous(chunk,0,KMaxChunkSize, physicalAddr);
1169 // Commit failed so tidy-up...
1170 Kern::ChunkClose(chunk);
1177 // Can leave critical section now that we have saved pointers to created objects
1178 NKern::ThreadLeaveCS();
1183 TUint DDmaTestSession::OpenSharedChunkHandle()
1185 NKern::ThreadEnterCS();
1186 const TInt r = Kern::MakeHandleAndOpen(NULL, iChunk);
1187 NKern::ThreadLeaveCS();
1191 void DDmaTestSession::FixupTransferArgs(TDmaTransferArgs& aTransferArgs) const
1193 aTransferArgs.iSrcConfig.iAddr += iChunkBase;
1194 aTransferArgs.iDstConfig.iAddr += iChunkBase;
1198 static TInt FragmentCount(DDmaRequest* aRequest)
1201 for (SDmaDesHdr* pH = aRequest->iFirstHdr; pH != NULL; pH = pH->iNext)
1207 TInt DDmaTestSession::RequestFragmentCount(TUint aRequestCookie)
1209 TInt requestIndex = CookieToRequestIndex(aRequestCookie);
1210 if(requestIndex < 0)
1211 return requestIndex;
1213 TInt r = iClientDmaReqs[requestIndex]->FragmentCount();
1215 TInt r = FragmentCount(iClientDmaReqs[requestIndex]);
1221 TInt DDmaTestSession::FragmentRequest(TUint aRequestCookie, const TDmaTransferArgs& aTransferArgs, TBool aLegacy)
1223 __KTRACE_OPT(KDMA, Kern::Printf(">FragmentRequest: cookie=0x%08x, legacy=%d", aRequestCookie, aLegacy));
1224 TInt requestIndex = CookieToRequestIndex(aRequestCookie);
1225 if(requestIndex < 0)
1226 return requestIndex;
1228 TInt r = KErrNotSupported;
1231 // TODO we can extract the required info from the struct to
1233 TUint flags = KDmaMemSrc | KDmaIncSrc | KDmaMemDest | KDmaIncDest;
1235 const TUint src = aTransferArgs.iSrcConfig.iAddr;
1236 const TUint dst = aTransferArgs.iDstConfig.iAddr;
1237 r = iClientDmaReqs[requestIndex]->Fragment(src, dst, aTransferArgs.iTransferCount, flags, NULL);
1242 r = iClientDmaReqs[requestIndex]->Fragment(aTransferArgs);
1244 r = KErrNotSupported;
1251 Queue the request refered to by aRequestCookie
1253 @param aRequestCookie Client identifier for the DDmaRequest
1254 @param aStatus Pointer to the client's TRequestStatus
1255 @param aRecord Pointer to the client's TCallbackRecord
1257 - KErrNotFound - aRequestCookie was invalid
1258 - KErrNone - Success
1260 TInt DDmaTestSession::QueueRequest(TUint aRequestCookie, TRequestStatus* aStatus, TCallbackRecord* aRecord, TUint64* aDurationMicroSecs)
1262 __KTRACE_OPT(KDMA, Kern::Printf(">QueueRequest: 0x%08x", aRequestCookie));
1264 DClientDmaRequest* request = RequestFromCookie(aRequestCookie);
1266 return KErrNotFound;
1268 return request->Queue(aStatus, aRecord, aDurationMicroSecs);
1271 DClientDmaRequest* DDmaTestSession::RequestFromCookie(TUint aRequestCookie) const
1273 TInt requestIndex = CookieToRequestIndex(aRequestCookie);
1274 if(requestIndex < 0)
1277 return (iClientDmaReqs[requestIndex]);
1280 TDmaV2TestInfo DDmaTestSession::ConvertTestInfo(const TDmaTestInfo& aOldInfo) const
1282 TDmaV2TestInfo newInfo;
1283 newInfo.iMaxTransferSize = aOldInfo.iMaxTransferSize;
1284 newInfo.iMemAlignMask = aOldInfo.iMemAlignMask;
1285 newInfo.iMemMemPslInfo = aOldInfo.iMemMemPslInfo;
1287 newInfo.iMaxSbChannels = aOldInfo.iMaxSbChannels;
1288 for(TInt i=0; i<aOldInfo.iMaxSbChannels; i++)
1289 newInfo.iSbChannels[i] = aOldInfo.iSbChannels[i];
1291 newInfo.iMaxDbChannels = aOldInfo.iMaxDbChannels;
1292 for(TInt i=0; i<aOldInfo.iMaxDbChannels; i++)
1293 newInfo.iDbChannels[i] = aOldInfo.iDbChannels[i];
1295 newInfo.iMaxSgChannels = aOldInfo.iMaxSgChannels;
1296 for(TInt i=0; i<aOldInfo.iMaxSgChannels; i++)
1297 newInfo.iSgChannels[i] = aOldInfo.iSgChannels[i];
1299 //TODO will want to add initialisation for Asym channels
1300 //when these are available
1304 //////////////////////////////////////////////////////////////////////////////
1306 class DDmaTestFactory : public DLogicalDevice
1310 // from DLogicalDevice
1311 virtual ~DDmaTestFactory()
1313 __KTRACE_OPT(KDMA, Kern::Printf(">DDmaTestFactory::~DDmaTestFactory"));
1315 virtual TInt Install();
1316 virtual void GetCaps(TDes8& aDes) const;
1317 virtual TInt Create(DLogicalChannelBase*& aChannel);
1321 DDmaTestFactory::DDmaTestFactory()
1323 iVersion = TestDmaLddVersion();
1324 iParseMask = KDeviceAllowUnit; // no info, no PDD
1325 // iUnitsMask = 0; // Only one thing
1329 TInt DDmaTestFactory::Create(DLogicalChannelBase*& aChannel)
1331 aChannel=new DDmaTestSession;
1332 return aChannel ? KErrNone : KErrNoMemory;
1336 TInt DDmaTestFactory::Install()
1338 return SetName(&KTestDmaLddName);
1342 void DDmaTestFactory::GetCaps(TDes8& /*aDes*/) const
1346 //////////////////////////////////////////////////////////////////////////////
1348 DECLARE_STANDARD_LDD()
1350 return new DDmaTestFactory;