sl@0: // Copyright (c) 2002-2009 Nokia Corporation and/or its subsidiary(-ies). sl@0: // All rights reserved. sl@0: // This component and the accompanying materials are made available sl@0: // under the terms of "Eclipse Public License v1.0" sl@0: // which accompanies this distribution, and is available sl@0: // at the URL "http://www.eclipse.org/legal/epl-v10.html". sl@0: // sl@0: // Initial Contributors: sl@0: // Nokia Corporation - initial contribution. sl@0: // sl@0: // Contributors: sl@0: // sl@0: // Description: sl@0: // Test driver for DMA V2 framework sl@0: // sl@0: // sl@0: sl@0: #include sl@0: #include sl@0: #include "d_dma2.h" sl@0: sl@0: _LIT(KClientPanicCat, "D_DMA2"); sl@0: _LIT(KDFCThreadName,"D_DMA_DFC_THREAD"); sl@0: _LIT(KIsrCbDfcThreadName,"D_DMA_IsrCb_thread"); sl@0: const TInt KDFCThreadPriority=26; sl@0: sl@0: class TStopwatch sl@0: { sl@0: public: sl@0: TStopwatch() sl@0: :iStart(0), iStop(0) sl@0: {} sl@0: sl@0: void Start() sl@0: {iStart = NKern::FastCounter();} sl@0: sl@0: void Stop() sl@0: { sl@0: iStop = NKern::FastCounter(); sl@0: sl@0: __KTRACE_OPT(KDMA, Kern::Printf(">TStopwatch::Stop FastCounter ticks: iStart=0x%lx iStop=0x%lx", iStart, iStop)); sl@0: } sl@0: sl@0: TUint64 ReadMicroSecs() const sl@0: { sl@0: #ifndef __SMP__ sl@0: TUint64 diff = 0; sl@0: if(iStart > iStop) sl@0: { sl@0: diff = (KMaxTUint64 - iStart) + iStop; sl@0: } sl@0: else sl@0: { sl@0: diff = iStop - iStart; sl@0: } sl@0: return FastCountToMicroSecs(diff); sl@0: #else sl@0: //TODO On SMP it is possible for the value returned from sl@0: //NKern::FastCounter to depend on the current CPU (ie. sl@0: //NaviEngine) sl@0: // sl@0: //One solution would be to tie DFC's and ISR's to the same sl@0: //core as the client, but this would reduce the usefulness of sl@0: //SMP testing. sl@0: return 0; sl@0: #endif sl@0: } sl@0: private: sl@0: sl@0: TUint64 FastCountToMicroSecs(TUint64 aCount) const sl@0: { sl@0: const TUint64 countsPerS = NKern::FastCounterFrequency(); sl@0: sl@0: TUint64 timeuS = (aCount*1000000)/countsPerS; sl@0: __KTRACE_OPT(KDMA, Kern::Printf(">TStopwatch::FastCountToMicroSecs FastCounter ticks: aCount=0x%lx countsPerS=0x%lx time=0x%lx", aCount, countsPerS, timeuS)); sl@0: return timeuS; sl@0: } sl@0: sl@0: TUint64 iStart; sl@0: TUint64 iStop; sl@0: }; sl@0: sl@0: ////////////////////////////////////////////////////////////////////////////// sl@0: sl@0: class DClientDmaRequest; sl@0: /** sl@0: Driver channel. Only accessible by a single client thread sl@0: */ sl@0: class DDmaTestSession : public DLogicalChannelBase sl@0: { sl@0: public: sl@0: DDmaTestSession(); sl@0: virtual ~DDmaTestSession(); sl@0: protected: sl@0: // from DLogicalChannelBase sl@0: virtual TInt DoCreate(TInt aUnit, const TDesC8* anInfo, const TVersion& aVer); sl@0: virtual TInt Request(TInt aFunction, TAny* a1, TAny* a2); sl@0: virtual TInt RequestUserHandle(DThread* aThread, TOwnerType aType); sl@0: private: sl@0: TInt DoGetInfo(TAny* aInfo); sl@0: sl@0: TInt OpenDmaChannel(TUint aPslCookie, TUint& aDriverCookie); sl@0: TInt CloseDmaChannelByCookie(TUint aDriverCookie); sl@0: TInt PauseDmaChannelByCookie(TUint aDriverCookie); sl@0: TInt ResumeDmaChannelByCookie(TUint aDriverCookie); sl@0: TInt GetChannelCapsByCookie(TUint aDriverCookie, SDmacCaps& aChannelCaps); sl@0: TInt GetChannelCapsByCookie(TUint aDriverCookie, TDmacTestCaps& aChannelCaps); sl@0: TInt CancelAllByCookie(TUint aDriverCookie); sl@0: TInt IsrRedoRequestByCookie(TUint aDriverCookie,TUint32 aSrcAddr,TUint32 aDstAddr,TInt aTransferCount,TUint32 aPslRequestInfo,TBool aIsrCb); sl@0: TInt IsQueueEmptyByCookie(TUint aDriverCookie, TBool& aQueueEmpty); sl@0: TInt ChannelIsOpenedByCookie(TUint aDriverCookie, TBool& aChannelOpen); sl@0: void CloseDmaChannelByIndex(TInt aIndex); sl@0: void CancelAllByIndex(TInt aIndex); sl@0: TInt PauseDmaChannelByIndex(TInt aIndex); sl@0: TInt ResumeDmaChannelByIndex(TInt aIndex); sl@0: TInt IsrRedoRequestByIndex(TInt aIndex,TUint32 aSrcAddr,TUint32 aDstAddr,TInt aTransferCount,TUint32 aPslRequestInfo,TBool aIsrCb); sl@0: TInt CreateSharedChunk(); sl@0: TUint OpenSharedChunkHandle(); sl@0: sl@0: /** sl@0: Creates a new kernel-side DMA request object, associated with a previously sl@0: opened channel sl@0: sl@0: @param aChannelCookie - A channel cookie as returned by OpenDmaChannel sl@0: @param aRequestCookie - On success will be a cookie by which the dma request can be referred to sl@0: @param aNewCallback - If true, then a new style DMA callback will be used sl@0: */ sl@0: TInt CreateDmaRequest(TUint aChannelCookie, TUint& aRequestCookie, TBool aNewCallback = EFalse, TInt aMaxFragmentSizeBytes=0); sl@0: sl@0: //TODO what happens if a client closes a channel that sl@0: //it still has dma requests associated with? sl@0: sl@0: /** sl@0: Destroys a previously created dma request object sl@0: */ sl@0: TInt DestroyDmaRequestByCookie(TUint aRequestCookie); sl@0: sl@0: void DestroyDmaRequestByIndex(TInt aIndex); sl@0: sl@0: sl@0: TInt CookieToChannelIndex(TUint aDriverCookie) const; sl@0: TInt CookieToRequestIndex(TUint aRequestCookie) const; sl@0: sl@0: void FixupTransferArgs(TDmaTransferArgs& aTransferArgs) const; sl@0: TInt FragmentRequest(TUint aRequestCookie, const TDmaTransferArgs& aTransferArgs, TBool aLegacy=ETrue); sl@0: sl@0: TInt QueueRequest(TUint aRequestCookie, TRequestStatus* aStatus, TCallbackRecord* aRecord, TUint64* aDurationMicroSecs); sl@0: DClientDmaRequest* RequestFromCookie(TUint aRequestCookie) const; sl@0: TInt RequestFragmentCount(TUint aRequestCookie); sl@0: sl@0: TDmaV2TestInfo ConvertTestInfo(const TDmaTestInfo& aOldInfo) const; sl@0: private: sl@0: DThread* iClient; sl@0: TDynamicDfcQue* iDfcQ; sl@0: TDynamicDfcQue* iIsrCallbackDfcQ; // Will be used by requests which complete with an ISR callback sl@0: static const TInt KMaxChunkSize = 8 * KMega; sl@0: TLinAddr iChunkBase; sl@0: DChunk* iChunk; sl@0: sl@0: RPointerArray iChannels; sl@0: RPointerArray iClientDmaReqs; sl@0: }; sl@0: sl@0: sl@0: /** sl@0: Allows a TClientRequest to be associated with a DDmaRequest sl@0: */ sl@0: class DClientDmaRequest : public DDmaRequest sl@0: { sl@0: public: sl@0: static DClientDmaRequest* Construct(DThread* aClient, TDfcQue* const aDfcQ, TDmaChannel& aChannel, TBool aNewStyle=EFalse, TInt aMaxTransferSize=0); sl@0: ~DClientDmaRequest(); sl@0: sl@0: TInt Queue(TRequestStatus* aRequestStatus, TCallbackRecord* aRecord, TUint64* aDurationMicroSecs); sl@0: void AddRequeArgs(const TIsrRequeArgsSet& aRequeArgSet); sl@0: sl@0: TUint64 GetDuration() sl@0: {return iStopwatch.ReadMicroSecs();} sl@0: sl@0: protected: sl@0: TInt Create(); sl@0: /** Construct with old style callback */ sl@0: DClientDmaRequest(DThread* aClient, TDfcQue* const aDfcQ, TDmaChannel& aChannel, TInt aMaxTransferSize); sl@0: sl@0: /** Construct with new style callback */ sl@0: DClientDmaRequest(DThread* aClient, TDfcQue* const aDfcQ, TDmaChannel& aChannel, TBool aNewStyle, TInt aMaxTransferSize); sl@0: sl@0: private: sl@0: static void CallbackOldStyle(TResult aResult, TAny* aRequest); sl@0: static void Callback(TUint, TDmaResult, TAny*, SDmaDesHdr*); sl@0: static void CompleteCallback(TAny* aRequest); sl@0: sl@0: void DoCallback(TUint, TDmaResult); sl@0: TBool RedoRequest(); sl@0: sl@0: //!< Used to return a TCallbackRecord and transfer time sl@0: TClientDataRequest2* iClientDataRequest; sl@0: sl@0: DThread* const iClient; sl@0: TDfcQue* const iDfcQ; //!< Use the DDmaTestSession's dfc queue sl@0: TDfc iDfc; sl@0: sl@0: TStopwatch iStopwatch; sl@0: TIsrRequeArgsSet iIsrRequeArgSet; sl@0: }; sl@0: sl@0: DClientDmaRequest* DClientDmaRequest::Construct(DThread* aClient, TDfcQue* const aDfcQ, TDmaChannel& aChannel, TBool aNewStyle, TInt aMaxTransferSize) sl@0: { sl@0: DClientDmaRequest* dmaRequest = NULL; sl@0: if(aNewStyle) sl@0: { sl@0: #ifdef DMA_APIV2 sl@0: dmaRequest = new DClientDmaRequest(aClient, aDfcQ, aChannel, aNewStyle, aMaxTransferSize); sl@0: #else sl@0: TEST_FAULT; // if a new style dma request was requested it should have been caught earlier sl@0: #endif sl@0: } sl@0: else sl@0: { sl@0: dmaRequest = new DClientDmaRequest(aClient, aDfcQ, aChannel, aMaxTransferSize); sl@0: } sl@0: sl@0: if(dmaRequest == NULL) sl@0: { sl@0: return dmaRequest; sl@0: } sl@0: sl@0: const TInt r = dmaRequest->Create(); sl@0: if(r != KErrNone) sl@0: { sl@0: delete dmaRequest; sl@0: dmaRequest = NULL; sl@0: } sl@0: return dmaRequest; sl@0: } sl@0: sl@0: DClientDmaRequest::DClientDmaRequest(DThread* aClient, TDfcQue* const aDfcQ, TDmaChannel& aChannel, TInt aMaxFragmentSize) sl@0: :DDmaRequest(aChannel, &CallbackOldStyle, this, aMaxFragmentSize), sl@0: iClientDataRequest(NULL), sl@0: iClient(aClient), sl@0: iDfcQ(aDfcQ), sl@0: iDfc(CompleteCallback,NULL, iDfcQ, KMaxDfcPriority) sl@0: { sl@0: } sl@0: #ifdef DMA_APIV2 sl@0: DClientDmaRequest::DClientDmaRequest(DThread* aClient, TDfcQue* const aDfcQ, TDmaChannel& aChannel, TBool /*aNewStyle*/, TInt aMaxFragmentSize) sl@0: :DDmaRequest(aChannel, &Callback, this, aMaxFragmentSize), sl@0: iClientDataRequest(NULL), sl@0: iClient(aClient), sl@0: iDfcQ(aDfcQ), sl@0: iDfc(CompleteCallback,NULL, iDfcQ, KMaxDfcPriority) sl@0: { sl@0: } sl@0: #endif sl@0: sl@0: TInt DClientDmaRequest::Create() sl@0: { sl@0: return Kern::CreateClientDataRequest2(iClientDataRequest); sl@0: } sl@0: sl@0: DClientDmaRequest::~DClientDmaRequest() sl@0: { sl@0: __KTRACE_OPT(KDMA, Kern::Printf(">DClientDmaRequest::~DClientDmaRequest")); sl@0: if(iClientDataRequest) sl@0: { sl@0: Kern::DestroyClientRequest(iClientDataRequest); sl@0: } sl@0: } sl@0: sl@0: /** sl@0: Queue the DClientDmaRequest. sl@0: sl@0: @param aRequestStatus Pointer to the client's request status sl@0: @param aRecord Pointer to the user's TCallbackRecord, may be null sl@0: @return sl@0: -KErrInUse The client request is in use sl@0: -KErrNone success sl@0: */ sl@0: TInt DClientDmaRequest::Queue(TRequestStatus* aRequestStatus, TCallbackRecord* aRecord, TUint64* aDurationMicroSecs) sl@0: { sl@0: __NK_ASSERT_ALWAYS(aRecord); sl@0: __NK_ASSERT_ALWAYS(aDurationMicroSecs); sl@0: sl@0: //erase results from last transfer sl@0: iClientDataRequest->Data1().Reset(); sl@0: iClientDataRequest->SetDestPtr1(aRecord); sl@0: sl@0: iClientDataRequest->SetDestPtr2(aDurationMicroSecs); sl@0: sl@0: sl@0: TInt r = iClientDataRequest->SetStatus(aRequestStatus); sl@0: if(r != KErrNone) sl@0: { sl@0: return r; sl@0: } sl@0: sl@0: iStopwatch.Start(); sl@0: #ifdef DMA_APIV2 sl@0: r = DDmaRequest::Queue(); sl@0: #else sl@0: // old version of queue did not return an error code sl@0: DDmaRequest::Queue(); sl@0: r = KErrNone; sl@0: #endif sl@0: sl@0: return r; sl@0: } sl@0: sl@0: void DClientDmaRequest::AddRequeArgs(const TIsrRequeArgsSet& aRequeArgSet) sl@0: { sl@0: iIsrRequeArgSet = aRequeArgSet; sl@0: } sl@0: sl@0: /** sl@0: If a transfer complete callback in ISR context s received this will be sl@0: called to redo the request with the first entry in the array sl@0: sl@0: @return ETrue If the redo was successful - indicates that another callback is comming sl@0: */ sl@0: TBool DClientDmaRequest::RedoRequest() sl@0: { sl@0: TIsrRequeArgs args = iIsrRequeArgSet.GetArgs(); sl@0: const TInt r = args.Call(iChannel); sl@0: TCallbackRecord& record = iClientDataRequest->Data1(); sl@0: record.IsrRedoResult(r); sl@0: return (r == KErrNone); sl@0: } sl@0: sl@0: sl@0: /** sl@0: Calls TDmaChannel::IsrRedoRequest on aChannel sl@0: with this object's parameters sl@0: */ sl@0: TInt TIsrRequeArgs::Call(TDmaChannel& aChannel) sl@0: { sl@0: #ifdef DMA_APIV2 sl@0: return aChannel.IsrRedoRequest(iSrcAddr, iDstAddr, iTransferCount, iPslRequestInfo, iIsrCb); sl@0: #else sl@0: TEST_FAULT; sl@0: return KErrNotSupported; sl@0: #endif sl@0: } sl@0: sl@0: /** sl@0: Check that both source and destination of ISR reque args will sl@0: lie within the range specified by aStart and aSize. sl@0: sl@0: @param aStart The linear base address of the region sl@0: @param aSize The size of the region sl@0: */ sl@0: TBool TIsrRequeArgs::CheckRange(TLinAddr aStart, TUint aSize) const sl@0: { sl@0: TUint physStart = Epoc::LinearToPhysical(aStart); sl@0: TEST_ASSERT(physStart != KPhysAddrInvalid); sl@0: sl@0: TAddrRange chunk(physStart, aSize); sl@0: TBool sourceOk = (iSrcAddr == KPhysAddrInvalid) ? ETrue : chunk.Contains(SourceRange()); sl@0: sl@0: TBool destOk = (iDstAddr == KPhysAddrInvalid) ? ETrue : chunk.Contains(DestRange()); sl@0: sl@0: return sourceOk && destOk; sl@0: } sl@0: sl@0: TBool TIsrRequeArgsSet::CheckRange(TLinAddr aAddr, TUint aSize) const sl@0: { sl@0: for(TInt i=0; iDClientDmaRequest::CallBackOldStyle: TResult result=%d", aResult)); sl@0: TEST_ASSERT(aResult != EBadResult); sl@0: //translate result code sl@0: const TDmaResult result = (aResult == EOk) ? EDmaResultOK : EDmaResultError; sl@0: sl@0: //call the new-style callback sl@0: Callback(EDmaCallbackRequestCompletion, result, aArg, NULL); sl@0: } sl@0: sl@0: sl@0: /** sl@0: The new style callback called by the DMA framework sl@0: may be called in either thread or ISR context sl@0: */ sl@0: void DClientDmaRequest::Callback(TUint aCallbackType, TDmaResult aResult, TAny* aArg, SDmaDesHdr* aHdr) sl@0: { sl@0: const TInt context = NKern::CurrentContext(); sl@0: __KTRACE_OPT(KDMA, Kern::Printf(">DClientDmaRequest::CallBack: TDmaResult result = %d, NKern::TContext context = %d", aResult, context)); sl@0: sl@0: DClientDmaRequest& self = *reinterpret_cast(aArg); sl@0: self.DoCallback(aCallbackType, aResult); sl@0: sl@0: // decide if callback is complete sl@0: const TBool transferComplete = aCallbackType & EDmaCallbackRequestCompletion; sl@0: if(!transferComplete) sl@0: { sl@0: return; sl@0: } sl@0: sl@0: // If there are reque args then redo this request sl@0: // another callback would then be expected. sl@0: // Requests can only be re-queued in ISR context, but we sl@0: // do not check that here as it is up to the client to get sl@0: // it right - also, we want to test that the PIL catches this sl@0: // error sl@0: if(!self.iIsrRequeArgSet.IsEmpty()) sl@0: { sl@0: // If redo call was succesful, return and wait for next call back sl@0: if(self.RedoRequest()) sl@0: return; sl@0: } sl@0: sl@0: switch(context) sl@0: { sl@0: case NKern::EThread: sl@0: { sl@0: CompleteCallback(aArg); sl@0: break; sl@0: } sl@0: case NKern::EInterrupt: sl@0: { sl@0: self.iDfc.iPtr = aArg; sl@0: self.iDfc.Add(); sl@0: break; sl@0: } sl@0: case NKern::EIDFC: //fall-through sl@0: case NKern::EEscaped: sl@0: default: sl@0: TEST_FAULT; sl@0: } sl@0: } sl@0: sl@0: /** sl@0: Log results of callback. May be called in either thread or ISR context sl@0: */ sl@0: void DClientDmaRequest::DoCallback(TUint aCallbackType, TDmaResult aResult) sl@0: { sl@0: iStopwatch.Stop(); //sucessive calls will simply over write the stop time sl@0: sl@0: // This will always be done whether the client requested a sl@0: // callback record or not sl@0: TCallbackRecord& record = iClientDataRequest->Data1(); sl@0: record.ProcessCallback(aCallbackType, aResult); sl@0: } sl@0: sl@0: /** sl@0: This function may either be called directly or queued as a DFC sl@0: */ sl@0: void DClientDmaRequest::CompleteCallback(TAny* aArg) sl@0: { sl@0: __KTRACE_OPT(KDMA, Kern::Printf(">DClientDmaRequest::CompleteCallBack thread %O", &Kern::CurrentThread())); sl@0: __ASSERT_NOT_ISR; sl@0: sl@0: DClientDmaRequest& self = *reinterpret_cast(aArg); sl@0: sl@0: self.iClientDataRequest->Data2() = self.iStopwatch.ReadMicroSecs(); sl@0: sl@0: //Assert that we called SetRequestStatus on this object before sl@0: //queueing sl@0: __NK_ASSERT_DEBUG(self.iClientDataRequest->IsReady()); sl@0: sl@0: // This is an inelegant, temporary, solution to the following problem: sl@0: // sl@0: // If a dma request completes with an ISR callback the test sl@0: // framework will queue this function as a DFC which sl@0: // will then signal the user-side client. As a consequence of sl@0: // this the user side client may then decide to destroy this sl@0: // request. However, untill the DMA framework's DFC has run sl@0: // and called OnDeque() on this request, it is still considered as sl@0: // queued. Since it is possible that this DFC could run sl@0: // before the DMA fw's DFC, this request could get destroyed while sl@0: // it is stil queued, triggering a PIL assertion. sl@0: // sl@0: // The real fix is likely be for the PIL to call the callback sl@0: // twice, but with different arguments, once to annonunce the sl@0: // ISR and again to announce the dequeue. sl@0: // sl@0: // Here we poll and wait for this request to be dequeued. Note, sl@0: // this DFC is currently run on a separate DFC queue, otherwise sl@0: // it could get deadlocked. An alternative to polling would be sl@0: // to use DCondVar, but that would require PIL modification sl@0: sl@0: if(NKern::CurrentThread() == self.iDfcQ->iThread) sl@0: { sl@0: // Only need to poll if we aren't on the channel's DFC queue sl@0: for(;;) sl@0: { sl@0: // once the request has been unqueued it sl@0: // can only be queued again by the client sl@0: const TBool queued = __e32_atomic_load_acq32(&self.iQueued); sl@0: if(!queued) sl@0: break; sl@0: __KTRACE_OPT(KDMA, Kern::Printf("Waiting for requeuest to be dequeued")); sl@0: NKern::Sleep(10); sl@0: } sl@0: } sl@0: else sl@0: { sl@0: // If we are on the channel's DFCQ we should be dequeued sl@0: // already sl@0: __NK_ASSERT_DEBUG(!__e32_atomic_load_acq32(&self.iQueued)); sl@0: } sl@0: sl@0: // We can always complete with KErrNone, the actual DMA result is sl@0: // logged in the TCallbackRecord sl@0: Kern::QueueRequestComplete(self.iClient, self.iClientDataRequest, KErrNone); sl@0: } sl@0: sl@0: const TInt DDmaTestSession::KMaxChunkSize; sl@0: sl@0: TInt DDmaTestSession::RequestUserHandle(DThread* aThread, TOwnerType aType) sl@0: { sl@0: if (aType!=EOwnerThread || aThread!=iClient) sl@0: return KErrAccessDenied; sl@0: return KErrNone; sl@0: } sl@0: sl@0: DDmaTestSession::DDmaTestSession() sl@0: : iClient(NULL), iDfcQ(NULL), iIsrCallbackDfcQ(NULL), iChunkBase(0), iChunk(NULL) sl@0: {} sl@0: sl@0: // called in thread critical section sl@0: TInt DDmaTestSession::DoCreate(TInt /*aUnit*/, const TDesC8* aInfo, const TVersion& /*aVer*/) sl@0: { sl@0: __NK_ASSERT_ALWAYS(iDfcQ == NULL); sl@0: __NK_ASSERT_ALWAYS(iIsrCallbackDfcQ == NULL); sl@0: sl@0: TInt r = Kern::DynamicDfcQCreate(iDfcQ, KDFCThreadPriority, KDFCThreadName); sl@0: if (r != KErrNone) sl@0: return r; sl@0: NKern::ThreadSetCpuAffinity((NThread*)(iDfcQ->iThread), KCpuAffinityAny); sl@0: sl@0: r = Kern::DynamicDfcQCreate(iIsrCallbackDfcQ, KDFCThreadPriority, KIsrCbDfcThreadName); sl@0: if (r != KErrNone) sl@0: return r; sl@0: NKern::ThreadSetCpuAffinity((NThread*)(iIsrCallbackDfcQ->iThread), KCpuAffinityAny); sl@0: sl@0: iClient = &Kern::CurrentThread(); sl@0: sl@0: r = CreateSharedChunk(); sl@0: return r; sl@0: } sl@0: sl@0: DDmaTestSession::~DDmaTestSession() sl@0: { sl@0: //Destroy requests before channels sl@0: //or we will trigger an assertion sl@0: while(iClientDmaReqs.Count()) sl@0: { sl@0: DestroyDmaRequestByIndex(0); sl@0: } sl@0: iClientDmaReqs.Close(); sl@0: sl@0: while(iChannels.Count()) sl@0: { sl@0: CloseDmaChannelByIndex(0); sl@0: } sl@0: iChannels.Close(); sl@0: sl@0: sl@0: if (iDfcQ) sl@0: { sl@0: iDfcQ->Destroy(); sl@0: } sl@0: sl@0: if (iIsrCallbackDfcQ) sl@0: { sl@0: iIsrCallbackDfcQ->Destroy(); sl@0: } sl@0: sl@0: if(iChunk) sl@0: { sl@0: Kern::ChunkClose(iChunk); sl@0: iChunk = NULL; sl@0: } sl@0: } sl@0: sl@0: TInt DDmaTestSession::Request(TInt aFunction, TAny* a1, TAny* a2) sl@0: { sl@0: __NK_ASSERT_DEBUG(&Kern::CurrentThread() == iClient); sl@0: sl@0: switch (aFunction) sl@0: { sl@0: case RDmaSession::EOpenChannel: sl@0: { sl@0: TUint pslCookie = (TUint)a1; sl@0: TUint driverCookie = 0; sl@0: TInt r = OpenDmaChannel(pslCookie, driverCookie); sl@0: umemput32(a2, &driverCookie, sizeof(TAny*)); sl@0: return r; sl@0: } sl@0: case RDmaSession::ECloseChannel: sl@0: { sl@0: TUint driverCookie = reinterpret_cast(a1); sl@0: TInt r = CloseDmaChannelByCookie(driverCookie); sl@0: return r; sl@0: } sl@0: case RDmaSession::EChannelCaps: sl@0: { sl@0: TUint driverCookie = reinterpret_cast(a1); sl@0: TPckgBuf capsBuf; sl@0: TInt r = GetChannelCapsByCookie(driverCookie, capsBuf()); sl@0: Kern::KUDesPut(*reinterpret_cast(a2), capsBuf); sl@0: return r; sl@0: } sl@0: case RDmaSession::EPauseChannel: sl@0: { sl@0: TUint driverCookie = reinterpret_cast(a1); sl@0: TInt r = PauseDmaChannelByCookie(driverCookie); sl@0: return r; sl@0: } sl@0: case RDmaSession::EResumeChannel: sl@0: { sl@0: TUint driverCookie = reinterpret_cast(a1); sl@0: TInt r = ResumeDmaChannelByCookie(driverCookie); sl@0: return r; sl@0: } sl@0: case RDmaSession::EFragmentCount: sl@0: { sl@0: TUint requestCookie = reinterpret_cast(a1); sl@0: TInt r = RequestFragmentCount(requestCookie); sl@0: return r; sl@0: } sl@0: case RDmaSession::ERequestOpen: sl@0: { sl@0: RDmaSession::TRequestCreateArgs createArgs(0, EFalse, 0); sl@0: TPckg package(createArgs); sl@0: Kern::KUDesGet(package, *reinterpret_cast(a1)); sl@0: sl@0: const TUint channelCookie = createArgs.iChannelCookie; sl@0: TUint requestCookie = 0; sl@0: sl@0: TInt r = CreateDmaRequest(channelCookie, requestCookie, createArgs.iNewStyle, createArgs.iMaxFragmentSize); sl@0: sl@0: umemput32(a2, &requestCookie, sizeof(TAny*)); sl@0: return r; sl@0: } sl@0: case RDmaSession::ERequestClose: sl@0: { sl@0: const TUint requestCookie = reinterpret_cast(a1); sl@0: return DestroyDmaRequestByCookie(requestCookie); sl@0: } sl@0: case RDmaSession::EFragmentLegacy: sl@0: case RDmaSession::EFragment: sl@0: { sl@0: TPckgBuf argsBuff; sl@0: Kern::KUDesGet(argsBuff, *reinterpret_cast(a1)); sl@0: const TUint requestCookie = argsBuff().iRequestCookie; sl@0: sl@0: //must remove constness as we actually need to sl@0: //convert the src and dst offsets to addresses sl@0: TDmaTransferArgs& transferArgs = const_cast(argsBuff().iTransferArgs); sl@0: sl@0: //convert address offsets in to kernel virtual addresses sl@0: FixupTransferArgs(transferArgs); sl@0: sl@0: TEST_ASSERT((TAddressParms(transferArgs).CheckRange(iChunkBase, iChunk->Size()))); sl@0: sl@0: TInt r = KErrGeneral; sl@0: sl@0: TStopwatch clock; sl@0: clock.Start(); sl@0: switch (aFunction) sl@0: { sl@0: case RDmaSession::EFragmentLegacy: sl@0: r = FragmentRequest(requestCookie, transferArgs, ETrue); break; sl@0: case RDmaSession::EFragment: sl@0: r = FragmentRequest(requestCookie, transferArgs, EFalse); break; sl@0: default: sl@0: TEST_FAULT; sl@0: } sl@0: clock.Stop(); sl@0: sl@0: const TUint64 time = clock.ReadMicroSecs(); sl@0: sl@0: TUint64* const timePtr = argsBuff().iDurationMicroSecs; sl@0: if(timePtr) sl@0: { sl@0: umemput(timePtr, &time, sizeof(time)); sl@0: } sl@0: return r; sl@0: } sl@0: case RDmaSession::EQueueRequest: sl@0: { sl@0: TPckgBuf argsBuff; sl@0: Kern::KUDesGet(argsBuff, *reinterpret_cast(a1)); sl@0: sl@0: //this is an Asynchronous request sl@0: const TUint requestCookie = argsBuff().iRequestCookie; sl@0: TRequestStatus* requestStatus = argsBuff().iStatus; sl@0: TCallbackRecord* callbackRec = argsBuff().iCallbackRecord; sl@0: TUint64* duration = argsBuff().iDurationMicroSecs; sl@0: sl@0: TInt r = QueueRequest(requestCookie, requestStatus, callbackRec, duration); sl@0: if(r != KErrNone) sl@0: { sl@0: Kern::RequestComplete(requestStatus, r); sl@0: } sl@0: return r; sl@0: } sl@0: case RDmaSession::EQueueRequestWithReque: sl@0: { sl@0: //TODO can common code with EQueueRequest be extracted? sl@0: TPckgBuf argsBuff; sl@0: Kern::KUDesGet(argsBuff, *reinterpret_cast(a1)); sl@0: sl@0: //this is an Asynchronous request sl@0: const TUint requestCookie = argsBuff().iRequestCookie; sl@0: TRequestStatus* requestStatus = argsBuff().iStatus; sl@0: TCallbackRecord* callbackRec = argsBuff().iCallbackRecord; sl@0: TUint64* duration = argsBuff().iDurationMicroSecs; sl@0: sl@0: TInt r = KErrNotFound; sl@0: sl@0: DClientDmaRequest* const request = RequestFromCookie(requestCookie); sl@0: if(request != NULL) sl@0: { sl@0: argsBuff().iRequeSet.Fixup(iChunkBase); sl@0: //TODO reque args must be substituted in order to sl@0: //check the range. The original transfer args are not sl@0: //available when queue is called, they could sl@0: //however be stored within DClientDmaRequest sl@0: //TEST_ASSERT((argsBuff().iRequeSet.CheckRange(iChunkBase, iChunk->Size()))); sl@0: request->AddRequeArgs(argsBuff().iRequeSet); sl@0: sl@0: r = QueueRequest(requestCookie, requestStatus, callbackRec, duration); sl@0: } sl@0: sl@0: if(r != KErrNone) sl@0: { sl@0: Kern::RequestComplete(requestStatus, r); sl@0: } sl@0: return r; sl@0: } sl@0: case RDmaSession::EIsrRedoRequest: sl@0: { sl@0: TPckgBuf argsBuff; sl@0: Kern::KUDesGet(argsBuff, *reinterpret_cast(a1)); sl@0: sl@0: const TUint driverCookie = argsBuff().iDriverCookie; sl@0: const TUint32 srcAddr = argsBuff().iSrcAddr; sl@0: const TUint32 dstAddr = argsBuff().iDstAddr; sl@0: const TInt transferCount = argsBuff().iTransferCount; sl@0: const TUint32 pslRequestInfo = argsBuff().iPslRequestInfo; sl@0: const TBool isrCb = argsBuff().iIsrCb; sl@0: sl@0: TInt r = IsrRedoRequestByCookie(driverCookie,srcAddr,dstAddr,transferCount,pslRequestInfo,isrCb); sl@0: return r; sl@0: } sl@0: case RDmaSession::EIsOpened: sl@0: { sl@0: TUint driverCookie = (TUint)a1; sl@0: TBool channelOpen = EFalse;; sl@0: TInt r = ChannelIsOpenedByCookie(driverCookie,channelOpen); sl@0: umemput32(a2, &channelOpen, sizeof(TAny*)); sl@0: return r; sl@0: } sl@0: case RDmaSession::EIsQueueEmpty: sl@0: { sl@0: TUint driverCookie = (TUint)a1; sl@0: TBool queueEmpty = EFalse;; sl@0: TInt r = IsQueueEmptyByCookie(driverCookie,queueEmpty); sl@0: umemput32(a2, &queueEmpty, sizeof(TAny*)); sl@0: return r; sl@0: } sl@0: case RDmaSession::ECancelAllChannel: sl@0: { sl@0: TUint driverCookie = reinterpret_cast(a1); sl@0: TInt r = CancelAllByCookie(driverCookie); sl@0: return r; sl@0: } sl@0: case RDmaSession::EOpenSharedChunk: sl@0: { sl@0: return OpenSharedChunkHandle(); sl@0: } sl@0: case RDmaSession::EGetTestInfo: sl@0: { sl@0: #ifdef DMA_APIV2 sl@0: TPckgC package(DmaTestInfoV2()); sl@0: #else sl@0: TPckgC package(ConvertTestInfo(DmaTestInfo())); sl@0: #endif sl@0: Kern::KUDesPut(*reinterpret_cast(a1), package); sl@0: return KErrNone; sl@0: } sl@0: default: sl@0: Kern::PanicCurrentThread(KClientPanicCat, __LINE__); sl@0: return KErrGeneral; sl@0: } sl@0: } sl@0: sl@0: TInt DDmaTestSession::OpenDmaChannel(TUint aPslCookie, TUint& aDriverCookie ) sl@0: { sl@0: TDmaChannel::SCreateInfo info; sl@0: info.iCookie = aPslCookie; sl@0: info.iDfcQ = iDfcQ; sl@0: info.iDfcPriority = 3; sl@0: info.iDesCount = 128; sl@0: sl@0: TDmaChannel* channel = NULL; sl@0: sl@0: //cs so thread can't be killed between sl@0: //opening channel and adding to array sl@0: NKern::ThreadEnterCS(); sl@0: TInt r = TDmaChannel::Open(info, channel); sl@0: if(KErrNone == r) sl@0: { sl@0: __NK_ASSERT_ALWAYS(channel); sl@0: sl@0: __KTRACE_OPT(KDMA, Kern::Printf("OpenDmaChannel: channel@ 0x%08x", channel)); sl@0: sl@0: sl@0: TInt err = iChannels.Append(channel); sl@0: if(KErrNone == err) sl@0: { sl@0: aDriverCookie = reinterpret_cast(channel); sl@0: } sl@0: else sl@0: { sl@0: channel->Close(); sl@0: r = KErrNoMemory; sl@0: } sl@0: } sl@0: NKern::ThreadLeaveCS(); sl@0: sl@0: return r; sl@0: } sl@0: sl@0: TInt DDmaTestSession::CookieToChannelIndex(TUint aDriverCookie) const sl@0: { sl@0: const TInt r = iChannels.Find(reinterpret_cast(aDriverCookie)); sl@0: sl@0: if(r < 0) sl@0: { sl@0: __KTRACE_OPT(KDMA, Kern::Printf("CookieToChannelIndex: cookie 0x%08x not found!", aDriverCookie)); sl@0: } sl@0: return r; sl@0: } sl@0: sl@0: TInt DDmaTestSession::CookieToRequestIndex(TUint aRequestCookie) const sl@0: { sl@0: const TInt r = iClientDmaReqs.Find(reinterpret_cast(aRequestCookie)); sl@0: sl@0: if(r < 0) sl@0: { sl@0: __KTRACE_OPT(KDMA, Kern::Printf("CookieToRequestIndex: cookie 0x%08x not found!", aRequestCookie)); sl@0: } sl@0: return r; sl@0: } sl@0: sl@0: void DDmaTestSession::CloseDmaChannelByIndex(TInt aIndex) sl@0: { sl@0: __KTRACE_OPT(KDMA, Kern::Printf("CloseDmaChannelByIndex: %d", aIndex)); sl@0: __NK_ASSERT_DEBUG(aIndex < iChannels.Count()); sl@0: // cs so client thread can't be killed between removing channel from sl@0: // array and closing it. sl@0: NKern::ThreadEnterCS(); sl@0: TDmaChannel* channel = iChannels[aIndex]; sl@0: iChannels.Remove(aIndex); sl@0: channel->Close(); sl@0: NKern::ThreadLeaveCS(); sl@0: } sl@0: sl@0: TInt DDmaTestSession::CloseDmaChannelByCookie(TUint aDriverCookie) sl@0: { sl@0: __KTRACE_OPT(KDMA, Kern::Printf("CloseDmaChannelByCookie: 0x%08x", aDriverCookie)); sl@0: const TInt index = CookieToChannelIndex(aDriverCookie); sl@0: sl@0: if(index >= 0) sl@0: { sl@0: CloseDmaChannelByIndex(index); sl@0: return KErrNone; sl@0: } sl@0: else sl@0: { sl@0: return KErrNotFound; sl@0: } sl@0: } sl@0: sl@0: TInt DDmaTestSession::CancelAllByCookie(TUint aDriverCookie) sl@0: { sl@0: __KTRACE_OPT(KDMA, Kern::Printf("CancelAllByCookie: 0x%08x", aDriverCookie)); sl@0: const TInt index = CookieToChannelIndex(aDriverCookie); sl@0: sl@0: if(index >= 0) sl@0: { sl@0: CancelAllByIndex(index); sl@0: return KErrNone; sl@0: } sl@0: else sl@0: { sl@0: return KErrNotFound; sl@0: } sl@0: } sl@0: sl@0: void DDmaTestSession::CancelAllByIndex(TInt aIndex) sl@0: { sl@0: __KTRACE_OPT(KDMA, Kern::Printf("CancelAllByIndex: %d", aIndex)); sl@0: __NK_ASSERT_DEBUG(aIndex < iChannels.Count()); sl@0: sl@0: TDmaChannel* channel = iChannels[aIndex]; sl@0: iChannels.Remove(aIndex); sl@0: channel->CancelAll(); sl@0: } sl@0: sl@0: TInt DDmaTestSession::PauseDmaChannelByIndex(TInt aIndex) sl@0: { sl@0: __KTRACE_OPT(KDMA, Kern::Printf("PauseDmaChannelByIndex: %d", aIndex)); sl@0: __NK_ASSERT_DEBUG(aIndex < iChannels.Count()); sl@0: sl@0: #ifdef DMA_APIV2 sl@0: TDmaChannel* channel = iChannels[aIndex]; sl@0: return channel->Pause(); sl@0: #else sl@0: return KErrNotSupported; sl@0: #endif sl@0: } sl@0: sl@0: TInt DDmaTestSession::PauseDmaChannelByCookie(TUint aDriverCookie) sl@0: { sl@0: __KTRACE_OPT(KDMA, Kern::Printf("PauseDmaChannelByCookie: 0x%08x", aDriverCookie)); sl@0: const TInt index = CookieToChannelIndex(aDriverCookie); sl@0: sl@0: if(index >= 0) sl@0: { sl@0: TInt r = PauseDmaChannelByIndex(index); sl@0: return r; sl@0: } sl@0: else sl@0: { sl@0: return KErrNotFound; sl@0: } sl@0: } sl@0: sl@0: TInt DDmaTestSession::ResumeDmaChannelByIndex(TInt aIndex) sl@0: { sl@0: __KTRACE_OPT(KDMA, Kern::Printf("ResumeDmaChannelByIndex: %d", aIndex)); sl@0: __NK_ASSERT_DEBUG(aIndex < iChannels.Count()); sl@0: sl@0: #ifdef DMA_APIV2 sl@0: TDmaChannel* channel = iChannels[aIndex]; sl@0: return channel->Resume(); sl@0: #else sl@0: return KErrNotSupported; sl@0: #endif sl@0: } sl@0: sl@0: TInt DDmaTestSession::ResumeDmaChannelByCookie(TUint aDriverCookie) sl@0: { sl@0: __KTRACE_OPT(KDMA, Kern::Printf("ResumeDmaChannelByCookie: 0x%08x", aDriverCookie)); sl@0: const TInt index = CookieToChannelIndex(aDriverCookie); sl@0: sl@0: if(index >= 0) sl@0: { sl@0: TInt r = ResumeDmaChannelByIndex(index); sl@0: return r; sl@0: } sl@0: else sl@0: { sl@0: return KErrNotFound; sl@0: } sl@0: } sl@0: sl@0: TInt DDmaTestSession::IsrRedoRequestByCookie(TUint aDriverCookie,TUint32 aSrcAddr,TUint32 aDstAddr,TInt aTransferCount,TUint32 aPslRequestInfo,TBool aIsrCb) sl@0: { sl@0: __KTRACE_OPT(KDMA, Kern::Printf("IsrRedoRequestByCookie: 0x%08x", aDriverCookie)); sl@0: const TInt index = CookieToChannelIndex(aDriverCookie); sl@0: sl@0: if(index >= 0) sl@0: { sl@0: TInt r = IsrRedoRequestByIndex(index,aSrcAddr,aDstAddr,aTransferCount,aPslRequestInfo,aIsrCb); sl@0: return r; sl@0: } sl@0: else sl@0: { sl@0: return KErrNotFound; sl@0: } sl@0: } sl@0: sl@0: TInt DDmaTestSession::IsrRedoRequestByIndex(TInt aIndex,TUint32 aSrcAddr,TUint32 aDstAddr,TInt aTransferCount,TUint32 aPslRequestInfo,TBool aIsrCb) sl@0: { sl@0: __KTRACE_OPT(KDMA, Kern::Printf("IsrRedoRequestByIndex: %d", aIndex)); sl@0: __NK_ASSERT_DEBUG(aIndex < iChannels.Count()); sl@0: sl@0: #ifdef DMA_APIV2 sl@0: TDmaChannel* channel = iChannels[aIndex]; sl@0: return channel->IsrRedoRequest(aSrcAddr,aDstAddr,aTransferCount,aPslRequestInfo,aIsrCb); sl@0: #else sl@0: return KErrNotSupported; sl@0: #endif sl@0: } sl@0: sl@0: /** sl@0: aChannelCaps will be set to "NULL" values sl@0: */ sl@0: TInt DDmaTestSession::GetChannelCapsByCookie(TUint aDriverCookie, TDmacTestCaps& aChannelCaps) sl@0: { sl@0: SDmacCaps caps = {0,}; //initialise with NULL values sl@0: TInt r = GetChannelCapsByCookie(aDriverCookie, caps); sl@0: sl@0: if(r == KErrNotSupported) sl@0: { sl@0: //If we can not query caps it means sl@0: //that we are using the v1 driver sl@0: //we construct a empty TDmacTestCaps sl@0: //but with an iPILVersion of 1 sl@0: const TDmacTestCaps nullCapsV1(caps, 1); sl@0: aChannelCaps = nullCapsV1; sl@0: r = KErrNone; sl@0: } sl@0: else if(r == KErrNone) sl@0: { sl@0: const TDmacTestCaps capsV2(caps, 2); sl@0: aChannelCaps = capsV2; sl@0: } sl@0: sl@0: return r; sl@0: } sl@0: sl@0: /** sl@0: Will return the capabilities of the DMA channel. sl@0: Querying SDmacCaps is not possible on V1 of the DMA framework. sl@0: In that case an error of KErrNotSupported will be returned sl@0: */ sl@0: TInt DDmaTestSession::GetChannelCapsByCookie(TUint aDriverCookie, SDmacCaps& aChannelCaps) sl@0: { sl@0: __KTRACE_OPT(KDMA, Kern::Printf("GetChannelCapsByCookie: 0x%08x", aDriverCookie)); sl@0: const TInt index = CookieToChannelIndex(aDriverCookie); sl@0: if(index >= 0) sl@0: { sl@0: #ifdef DMA_APIV2 sl@0: aChannelCaps = iChannels[index]->DmacCaps(); sl@0: return KErrNone; sl@0: #else sl@0: return KErrNotSupported; sl@0: #endif sl@0: } sl@0: else sl@0: { sl@0: return KErrNotFound; sl@0: } sl@0: } sl@0: sl@0: TInt DDmaTestSession::IsQueueEmptyByCookie(TUint aDriverCookie, TBool& aQueueEmpty) sl@0: { sl@0: __KTRACE_OPT(KDMA, Kern::Printf("IsQueueEmptyByCookie: 0x%08x", aDriverCookie)); sl@0: const TInt index = CookieToChannelIndex(aDriverCookie); sl@0: sl@0: if(index >= 0) sl@0: { sl@0: aQueueEmpty=iChannels[index]->IsQueueEmpty(); sl@0: return KErrNone; sl@0: } sl@0: else sl@0: { sl@0: return KErrNotFound; sl@0: } sl@0: } sl@0: sl@0: TInt DDmaTestSession::ChannelIsOpenedByCookie(TUint aDriverCookie, TBool& aChannelOpen) sl@0: { sl@0: __KTRACE_OPT(KDMA, Kern::Printf("ChannelIsOpenedByCookie: 0x%08x", aDriverCookie)); sl@0: const TInt index = CookieToChannelIndex(aDriverCookie); sl@0: sl@0: if(index >= 0) sl@0: { sl@0: aChannelOpen=iChannels[index]->IsOpened(); sl@0: return KErrNone; sl@0: } sl@0: else sl@0: { sl@0: return KErrNotFound; sl@0: } sl@0: } sl@0: sl@0: TInt DDmaTestSession::CreateDmaRequest(TUint aChannelCookie, TUint& aRequestCookie, TBool aNewCallback, TInt aMaxFragmentSizeBytes) sl@0: { sl@0: #ifndef DMA_APIV2 sl@0: if(aNewCallback) sl@0: return KErrNotSupported; sl@0: #endif sl@0: sl@0: TInt channelIndex = CookieToChannelIndex(aChannelCookie); sl@0: if(channelIndex < 0) sl@0: return channelIndex; sl@0: sl@0: NKern::ThreadEnterCS(); sl@0: DClientDmaRequest* request = DClientDmaRequest::Construct(iClient, iIsrCallbackDfcQ, *iChannels[channelIndex], aNewCallback, aMaxFragmentSizeBytes); sl@0: if(request == NULL) sl@0: { sl@0: NKern::ThreadLeaveCS(); sl@0: return KErrNoMemory; sl@0: } sl@0: sl@0: TInt r = iClientDmaReqs.Append(request); sl@0: if(r == KErrNone) sl@0: { sl@0: aRequestCookie = reinterpret_cast(request); sl@0: } sl@0: else sl@0: { sl@0: delete request; sl@0: } sl@0: NKern::ThreadLeaveCS(); sl@0: sl@0: return r; sl@0: } sl@0: sl@0: TInt DDmaTestSession::DestroyDmaRequestByCookie(TUint aRequestCookie) sl@0: { sl@0: TInt requestIndex = CookieToRequestIndex(aRequestCookie); sl@0: if(requestIndex < 0) sl@0: return requestIndex; sl@0: sl@0: DestroyDmaRequestByIndex(requestIndex); sl@0: sl@0: return KErrNone; sl@0: } sl@0: sl@0: void DDmaTestSession::DestroyDmaRequestByIndex(TInt aIndex) sl@0: { sl@0: __KTRACE_OPT(KDMA, Kern::Printf("DestroyDmaRequestByIndex: %d", aIndex)); sl@0: __NK_ASSERT_DEBUG(aIndex < iClientDmaReqs.Count()); sl@0: NKern::ThreadEnterCS(); sl@0: sl@0: DClientDmaRequest* request = iClientDmaReqs[aIndex]; sl@0: iClientDmaReqs.Remove(aIndex); sl@0: delete request; sl@0: sl@0: NKern::ThreadLeaveCS(); sl@0: } sl@0: sl@0: TInt DDmaTestSession::CreateSharedChunk() sl@0: { sl@0: // Enter critical section so we can't die and leak the objects we are creating sl@0: // I.e. the TChunkCleanup and DChunk (Shared Chunk) sl@0: NKern::ThreadEnterCS(); sl@0: sl@0: // Create the chunk sl@0: TChunkCreateInfo info; sl@0: info.iType = TChunkCreateInfo::ESharedKernelSingle; sl@0: info.iMaxSize = KMaxChunkSize; sl@0: info.iMapAttr = EMapAttrFullyBlocking | EMapAttrUserRw; sl@0: info.iOwnsMemory = ETrue; sl@0: info.iDestroyedDfc = NULL; sl@0: sl@0: DChunk* chunk; sl@0: TUint32 mapAttr; sl@0: TInt r = Kern::ChunkCreate(info, chunk, iChunkBase, mapAttr); sl@0: if(r!=KErrNone) sl@0: { sl@0: NKern::ThreadLeaveCS(); sl@0: return r; sl@0: } sl@0: sl@0: // Map our device's memory into the chunk (at offset 0) sl@0: TUint32 physicalAddr; sl@0: r = Kern::ChunkCommitContiguous(chunk,0,KMaxChunkSize, physicalAddr); sl@0: if(r!=KErrNone) sl@0: { sl@0: // Commit failed so tidy-up... sl@0: Kern::ChunkClose(chunk); sl@0: } sl@0: else sl@0: { sl@0: iChunk = chunk; sl@0: } sl@0: sl@0: // Can leave critical section now that we have saved pointers to created objects sl@0: NKern::ThreadLeaveCS(); sl@0: sl@0: return r; sl@0: } sl@0: sl@0: TUint DDmaTestSession::OpenSharedChunkHandle() sl@0: { sl@0: NKern::ThreadEnterCS(); sl@0: const TInt r = Kern::MakeHandleAndOpen(NULL, iChunk); sl@0: NKern::ThreadLeaveCS(); sl@0: return r; sl@0: } sl@0: sl@0: void DDmaTestSession::FixupTransferArgs(TDmaTransferArgs& aTransferArgs) const sl@0: { sl@0: aTransferArgs.iSrcConfig.iAddr += iChunkBase; sl@0: aTransferArgs.iDstConfig.iAddr += iChunkBase; sl@0: } sl@0: sl@0: #ifndef DMA_APIV2 sl@0: static TInt FragmentCount(DDmaRequest* aRequest) sl@0: { sl@0: TInt count = 0; sl@0: for (SDmaDesHdr* pH = aRequest->iFirstHdr; pH != NULL; pH = pH->iNext) sl@0: count++; sl@0: return count; sl@0: } sl@0: #endif sl@0: sl@0: TInt DDmaTestSession::RequestFragmentCount(TUint aRequestCookie) sl@0: { sl@0: TInt requestIndex = CookieToRequestIndex(aRequestCookie); sl@0: if(requestIndex < 0) sl@0: return requestIndex; sl@0: #ifdef DMA_APIV2 sl@0: TInt r = iClientDmaReqs[requestIndex]->FragmentCount(); sl@0: #else sl@0: TInt r = FragmentCount(iClientDmaReqs[requestIndex]); sl@0: #endif sl@0: sl@0: return r; sl@0: } sl@0: sl@0: TInt DDmaTestSession::FragmentRequest(TUint aRequestCookie, const TDmaTransferArgs& aTransferArgs, TBool aLegacy) sl@0: { sl@0: __KTRACE_OPT(KDMA, Kern::Printf(">FragmentRequest: cookie=0x%08x, legacy=%d", aRequestCookie, aLegacy)); sl@0: TInt requestIndex = CookieToRequestIndex(aRequestCookie); sl@0: if(requestIndex < 0) sl@0: return requestIndex; sl@0: sl@0: TInt r = KErrNotSupported; sl@0: if(aLegacy) sl@0: { sl@0: // TODO we can extract the required info from the struct to sl@0: // set flags sl@0: TUint flags = KDmaMemSrc | KDmaIncSrc | KDmaMemDest | KDmaIncDest; sl@0: sl@0: const TUint src = aTransferArgs.iSrcConfig.iAddr; sl@0: const TUint dst = aTransferArgs.iDstConfig.iAddr; sl@0: r = iClientDmaReqs[requestIndex]->Fragment(src, dst, aTransferArgs.iTransferCount, flags, NULL); sl@0: } sl@0: else sl@0: { sl@0: #ifdef DMA_APIV2 sl@0: r = iClientDmaReqs[requestIndex]->Fragment(aTransferArgs); sl@0: #else sl@0: r = KErrNotSupported; sl@0: #endif sl@0: } sl@0: return r; sl@0: } sl@0: sl@0: /** sl@0: Queue the request refered to by aRequestCookie sl@0: sl@0: @param aRequestCookie Client identifier for the DDmaRequest sl@0: @param aStatus Pointer to the client's TRequestStatus sl@0: @param aRecord Pointer to the client's TCallbackRecord sl@0: @return sl@0: - KErrNotFound - aRequestCookie was invalid sl@0: - KErrNone - Success sl@0: */ sl@0: TInt DDmaTestSession::QueueRequest(TUint aRequestCookie, TRequestStatus* aStatus, TCallbackRecord* aRecord, TUint64* aDurationMicroSecs) sl@0: { sl@0: __KTRACE_OPT(KDMA, Kern::Printf(">QueueRequest: 0x%08x", aRequestCookie)); sl@0: sl@0: DClientDmaRequest* request = RequestFromCookie(aRequestCookie); sl@0: if(request == NULL) sl@0: return KErrNotFound; sl@0: sl@0: return request->Queue(aStatus, aRecord, aDurationMicroSecs); sl@0: } sl@0: sl@0: DClientDmaRequest* DDmaTestSession::RequestFromCookie(TUint aRequestCookie) const sl@0: { sl@0: TInt requestIndex = CookieToRequestIndex(aRequestCookie); sl@0: if(requestIndex < 0) sl@0: return NULL; sl@0: sl@0: return (iClientDmaReqs[requestIndex]); sl@0: } sl@0: sl@0: TDmaV2TestInfo DDmaTestSession::ConvertTestInfo(const TDmaTestInfo& aOldInfo) const sl@0: { sl@0: TDmaV2TestInfo newInfo; sl@0: newInfo.iMaxTransferSize = aOldInfo.iMaxTransferSize; sl@0: newInfo.iMemAlignMask = aOldInfo.iMemAlignMask; sl@0: newInfo.iMemMemPslInfo = aOldInfo.iMemMemPslInfo; sl@0: sl@0: newInfo.iMaxSbChannels = aOldInfo.iMaxSbChannels; sl@0: for(TInt i=0; iDDmaTestFactory::~DDmaTestFactory")); sl@0: } sl@0: virtual TInt Install(); sl@0: virtual void GetCaps(TDes8& aDes) const; sl@0: virtual TInt Create(DLogicalChannelBase*& aChannel); sl@0: }; sl@0: sl@0: sl@0: DDmaTestFactory::DDmaTestFactory() sl@0: { sl@0: iVersion = TestDmaLddVersion(); sl@0: iParseMask = KDeviceAllowUnit; // no info, no PDD sl@0: // iUnitsMask = 0; // Only one thing sl@0: } sl@0: sl@0: sl@0: TInt DDmaTestFactory::Create(DLogicalChannelBase*& aChannel) sl@0: { sl@0: aChannel=new DDmaTestSession; sl@0: return aChannel ? KErrNone : KErrNoMemory; sl@0: } sl@0: sl@0: sl@0: TInt DDmaTestFactory::Install() sl@0: { sl@0: return SetName(&KTestDmaLddName); sl@0: } sl@0: sl@0: sl@0: void DDmaTestFactory::GetCaps(TDes8& /*aDes*/) const sl@0: { sl@0: } sl@0: sl@0: ////////////////////////////////////////////////////////////////////////////// sl@0: sl@0: DECLARE_STANDARD_LDD() sl@0: { sl@0: return new DDmaTestFactory; sl@0: }