1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/os/kernelhwsrv/kerneltest/e32test/dmav2/d_dma2.cpp Fri Jun 15 03:10:57 2012 +0200
1.3 @@ -0,0 +1,1351 @@
1.4 +// Copyright (c) 2002-2009 Nokia Corporation and/or its subsidiary(-ies).
1.5 +// All rights reserved.
1.6 +// This component and the accompanying materials are made available
1.7 +// under the terms of "Eclipse Public License v1.0"
1.8 +// which accompanies this distribution, and is available
1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
1.10 +//
1.11 +// Initial Contributors:
1.12 +// Nokia Corporation - initial contribution.
1.13 +//
1.14 +// Contributors:
1.15 +//
1.16 +// Description:
1.17 +// Test driver for DMA V2 framework
1.18 +//
1.19 +//
1.20 +
1.21 +#include <kernel/kern_priv.h>
1.22 +#include <drivers/dma.h>
1.23 +#include "d_dma2.h"
1.24 +
1.25 +_LIT(KClientPanicCat, "D_DMA2");
1.26 +_LIT(KDFCThreadName,"D_DMA_DFC_THREAD");
1.27 +_LIT(KIsrCbDfcThreadName,"D_DMA_IsrCb_thread");
1.28 +const TInt KDFCThreadPriority=26;
1.29 +
1.30 +class TStopwatch
1.31 + {
1.32 +public:
1.33 + TStopwatch()
1.34 + :iStart(0), iStop(0)
1.35 + {}
1.36 +
1.37 + void Start()
1.38 + {iStart = NKern::FastCounter();}
1.39 +
1.40 + void Stop()
1.41 + {
1.42 + iStop = NKern::FastCounter();
1.43 +
1.44 + __KTRACE_OPT(KDMA, Kern::Printf(">TStopwatch::Stop FastCounter ticks: iStart=0x%lx iStop=0x%lx", iStart, iStop));
1.45 + }
1.46 +
1.47 + TUint64 ReadMicroSecs() const
1.48 + {
1.49 +#ifndef __SMP__
1.50 + TUint64 diff = 0;
1.51 + if(iStart > iStop)
1.52 + {
1.53 + diff = (KMaxTUint64 - iStart) + iStop;
1.54 + }
1.55 + else
1.56 + {
1.57 + diff = iStop - iStart;
1.58 + }
1.59 + return FastCountToMicroSecs(diff);
1.60 +#else
1.61 + //TODO On SMP it is possible for the value returned from
1.62 + //NKern::FastCounter to depend on the current CPU (ie.
1.63 + //NaviEngine)
1.64 + //
1.65 + //One solution would be to tie DFC's and ISR's to the same
1.66 + //core as the client, but this would reduce the usefulness of
1.67 + //SMP testing.
1.68 + return 0;
1.69 +#endif
1.70 + }
1.71 +private:
1.72 +
1.73 + TUint64 FastCountToMicroSecs(TUint64 aCount) const
1.74 + {
1.75 + const TUint64 countsPerS = NKern::FastCounterFrequency();
1.76 +
1.77 + TUint64 timeuS = (aCount*1000000)/countsPerS;
1.78 + __KTRACE_OPT(KDMA, Kern::Printf(">TStopwatch::FastCountToMicroSecs FastCounter ticks: aCount=0x%lx countsPerS=0x%lx time=0x%lx", aCount, countsPerS, timeuS));
1.79 + return timeuS;
1.80 + }
1.81 +
1.82 + TUint64 iStart;
1.83 + TUint64 iStop;
1.84 + };
1.85 +
1.86 +//////////////////////////////////////////////////////////////////////////////
1.87 +
1.88 +class DClientDmaRequest;
1.89 +/**
1.90 +Driver channel. Only accessible by a single client thread
1.91 +*/
1.92 +class DDmaTestSession : public DLogicalChannelBase
1.93 + {
1.94 +public:
1.95 + DDmaTestSession();
1.96 + virtual ~DDmaTestSession();
1.97 +protected:
1.98 + // from DLogicalChannelBase
1.99 + virtual TInt DoCreate(TInt aUnit, const TDesC8* anInfo, const TVersion& aVer);
1.100 + virtual TInt Request(TInt aFunction, TAny* a1, TAny* a2);
1.101 + virtual TInt RequestUserHandle(DThread* aThread, TOwnerType aType);
1.102 +private:
1.103 + TInt DoGetInfo(TAny* aInfo);
1.104 +
1.105 + TInt OpenDmaChannel(TUint aPslCookie, TUint& aDriverCookie);
1.106 + TInt CloseDmaChannelByCookie(TUint aDriverCookie);
1.107 + TInt PauseDmaChannelByCookie(TUint aDriverCookie);
1.108 + TInt ResumeDmaChannelByCookie(TUint aDriverCookie);
1.109 + TInt GetChannelCapsByCookie(TUint aDriverCookie, SDmacCaps& aChannelCaps);
1.110 + TInt GetChannelCapsByCookie(TUint aDriverCookie, TDmacTestCaps& aChannelCaps);
1.111 + TInt CancelAllByCookie(TUint aDriverCookie);
1.112 + TInt IsrRedoRequestByCookie(TUint aDriverCookie,TUint32 aSrcAddr,TUint32 aDstAddr,TInt aTransferCount,TUint32 aPslRequestInfo,TBool aIsrCb);
1.113 + TInt IsQueueEmptyByCookie(TUint aDriverCookie, TBool& aQueueEmpty);
1.114 + TInt ChannelIsOpenedByCookie(TUint aDriverCookie, TBool& aChannelOpen);
1.115 + void CloseDmaChannelByIndex(TInt aIndex);
1.116 + void CancelAllByIndex(TInt aIndex);
1.117 + TInt PauseDmaChannelByIndex(TInt aIndex);
1.118 + TInt ResumeDmaChannelByIndex(TInt aIndex);
1.119 + TInt IsrRedoRequestByIndex(TInt aIndex,TUint32 aSrcAddr,TUint32 aDstAddr,TInt aTransferCount,TUint32 aPslRequestInfo,TBool aIsrCb);
1.120 + TInt CreateSharedChunk();
1.121 + TUint OpenSharedChunkHandle();
1.122 +
1.123 + /**
1.124 + Creates a new kernel-side DMA request object, associated with a previously
1.125 + opened channel
1.126 +
1.127 + @param aChannelCookie - A channel cookie as returned by OpenDmaChannel
1.128 + @param aRequestCookie - On success will be a cookie by which the dma request can be referred to
1.129 + @param aNewCallback - If true, then a new style DMA callback will be used
1.130 + */
1.131 + TInt CreateDmaRequest(TUint aChannelCookie, TUint& aRequestCookie, TBool aNewCallback = EFalse, TInt aMaxFragmentSizeBytes=0);
1.132 +
1.133 + //TODO what happens if a client closes a channel that
1.134 + //it still has dma requests associated with?
1.135 +
1.136 + /**
1.137 + Destroys a previously created dma request object
1.138 + */
1.139 + TInt DestroyDmaRequestByCookie(TUint aRequestCookie);
1.140 +
1.141 + void DestroyDmaRequestByIndex(TInt aIndex);
1.142 +
1.143 +
1.144 + TInt CookieToChannelIndex(TUint aDriverCookie) const;
1.145 + TInt CookieToRequestIndex(TUint aRequestCookie) const;
1.146 +
1.147 + void FixupTransferArgs(TDmaTransferArgs& aTransferArgs) const;
1.148 + TInt FragmentRequest(TUint aRequestCookie, const TDmaTransferArgs& aTransferArgs, TBool aLegacy=ETrue);
1.149 +
1.150 + TInt QueueRequest(TUint aRequestCookie, TRequestStatus* aStatus, TCallbackRecord* aRecord, TUint64* aDurationMicroSecs);
1.151 + DClientDmaRequest* RequestFromCookie(TUint aRequestCookie) const;
1.152 + TInt RequestFragmentCount(TUint aRequestCookie);
1.153 +
1.154 + TDmaV2TestInfo ConvertTestInfo(const TDmaTestInfo& aOldInfo) const;
1.155 +private:
1.156 + DThread* iClient;
1.157 + TDynamicDfcQue* iDfcQ;
1.158 + TDynamicDfcQue* iIsrCallbackDfcQ; // Will be used by requests which complete with an ISR callback
1.159 + static const TInt KMaxChunkSize = 8 * KMega;
1.160 + TLinAddr iChunkBase;
1.161 + DChunk* iChunk;
1.162 +
1.163 + RPointerArray<TDmaChannel> iChannels;
1.164 + RPointerArray<DClientDmaRequest> iClientDmaReqs;
1.165 + };
1.166 +
1.167 +
1.168 +/**
1.169 +Allows a TClientRequest to be associated with a DDmaRequest
1.170 +*/
1.171 +class DClientDmaRequest : public DDmaRequest
1.172 + {
1.173 +public:
1.174 + static DClientDmaRequest* Construct(DThread* aClient, TDfcQue* const aDfcQ, TDmaChannel& aChannel, TBool aNewStyle=EFalse, TInt aMaxTransferSize=0);
1.175 + ~DClientDmaRequest();
1.176 +
1.177 + TInt Queue(TRequestStatus* aRequestStatus, TCallbackRecord* aRecord, TUint64* aDurationMicroSecs);
1.178 + void AddRequeArgs(const TIsrRequeArgsSet& aRequeArgSet);
1.179 +
1.180 + TUint64 GetDuration()
1.181 + {return iStopwatch.ReadMicroSecs();}
1.182 +
1.183 +protected:
1.184 + TInt Create();
1.185 + /** Construct with old style callback */
1.186 + DClientDmaRequest(DThread* aClient, TDfcQue* const aDfcQ, TDmaChannel& aChannel, TInt aMaxTransferSize);
1.187 +
1.188 + /** Construct with new style callback */
1.189 + DClientDmaRequest(DThread* aClient, TDfcQue* const aDfcQ, TDmaChannel& aChannel, TBool aNewStyle, TInt aMaxTransferSize);
1.190 +
1.191 +private:
1.192 + static void CallbackOldStyle(TResult aResult, TAny* aRequest);
1.193 + static void Callback(TUint, TDmaResult, TAny*, SDmaDesHdr*);
1.194 + static void CompleteCallback(TAny* aRequest);
1.195 +
1.196 + void DoCallback(TUint, TDmaResult);
1.197 + TBool RedoRequest();
1.198 +
1.199 + //!< Used to return a TCallbackRecord and transfer time
1.200 + TClientDataRequest2<TCallbackRecord, TUint64>* iClientDataRequest;
1.201 +
1.202 + DThread* const iClient;
1.203 + TDfcQue* const iDfcQ; //!< Use the DDmaTestSession's dfc queue
1.204 + TDfc iDfc;
1.205 +
1.206 + TStopwatch iStopwatch;
1.207 + TIsrRequeArgsSet iIsrRequeArgSet;
1.208 + };
1.209 +
1.210 +DClientDmaRequest* DClientDmaRequest::Construct(DThread* aClient, TDfcQue* const aDfcQ, TDmaChannel& aChannel, TBool aNewStyle, TInt aMaxTransferSize)
1.211 + {
1.212 + DClientDmaRequest* dmaRequest = NULL;
1.213 + if(aNewStyle)
1.214 + {
1.215 +#ifdef DMA_APIV2
1.216 + dmaRequest = new DClientDmaRequest(aClient, aDfcQ, aChannel, aNewStyle, aMaxTransferSize);
1.217 +#else
1.218 + TEST_FAULT; // if a new style dma request was requested it should have been caught earlier
1.219 +#endif
1.220 + }
1.221 + else
1.222 + {
1.223 + dmaRequest = new DClientDmaRequest(aClient, aDfcQ, aChannel, aMaxTransferSize);
1.224 + }
1.225 +
1.226 + if(dmaRequest == NULL)
1.227 + {
1.228 + return dmaRequest;
1.229 + }
1.230 +
1.231 + const TInt r = dmaRequest->Create();
1.232 + if(r != KErrNone)
1.233 + {
1.234 + delete dmaRequest;
1.235 + dmaRequest = NULL;
1.236 + }
1.237 + return dmaRequest;
1.238 + }
1.239 +
1.240 +DClientDmaRequest::DClientDmaRequest(DThread* aClient, TDfcQue* const aDfcQ, TDmaChannel& aChannel, TInt aMaxFragmentSize)
1.241 + :DDmaRequest(aChannel, &CallbackOldStyle, this, aMaxFragmentSize),
1.242 + iClientDataRequest(NULL),
1.243 + iClient(aClient),
1.244 + iDfcQ(aDfcQ),
1.245 + iDfc(CompleteCallback,NULL, iDfcQ, KMaxDfcPriority)
1.246 + {
1.247 + }
1.248 +#ifdef DMA_APIV2
1.249 +DClientDmaRequest::DClientDmaRequest(DThread* aClient, TDfcQue* const aDfcQ, TDmaChannel& aChannel, TBool /*aNewStyle*/, TInt aMaxFragmentSize)
1.250 + :DDmaRequest(aChannel, &Callback, this, aMaxFragmentSize),
1.251 + iClientDataRequest(NULL),
1.252 + iClient(aClient),
1.253 + iDfcQ(aDfcQ),
1.254 + iDfc(CompleteCallback,NULL, iDfcQ, KMaxDfcPriority)
1.255 + {
1.256 + }
1.257 +#endif
1.258 +
1.259 +TInt DClientDmaRequest::Create()
1.260 + {
1.261 + return Kern::CreateClientDataRequest2(iClientDataRequest);
1.262 + }
1.263 +
1.264 +DClientDmaRequest::~DClientDmaRequest()
1.265 + {
1.266 + __KTRACE_OPT(KDMA, Kern::Printf(">DClientDmaRequest::~DClientDmaRequest"));
1.267 + if(iClientDataRequest)
1.268 + {
1.269 + Kern::DestroyClientRequest(iClientDataRequest);
1.270 + }
1.271 + }
1.272 +
1.273 +/**
1.274 +Queue the DClientDmaRequest.
1.275 +
1.276 +@param aRequestStatus Pointer to the client's request status
1.277 +@param aRecord Pointer to the user's TCallbackRecord, may be null
1.278 +@return
1.279 + -KErrInUse The client request is in use
1.280 + -KErrNone success
1.281 +*/
1.282 +TInt DClientDmaRequest::Queue(TRequestStatus* aRequestStatus, TCallbackRecord* aRecord, TUint64* aDurationMicroSecs)
1.283 + {
1.284 + __NK_ASSERT_ALWAYS(aRecord);
1.285 + __NK_ASSERT_ALWAYS(aDurationMicroSecs);
1.286 +
1.287 + //erase results from last transfer
1.288 + iClientDataRequest->Data1().Reset();
1.289 + iClientDataRequest->SetDestPtr1(aRecord);
1.290 +
1.291 + iClientDataRequest->SetDestPtr2(aDurationMicroSecs);
1.292 +
1.293 +
1.294 + TInt r = iClientDataRequest->SetStatus(aRequestStatus);
1.295 + if(r != KErrNone)
1.296 + {
1.297 + return r;
1.298 + }
1.299 +
1.300 + iStopwatch.Start();
1.301 +#ifdef DMA_APIV2
1.302 + r = DDmaRequest::Queue();
1.303 +#else
1.304 + // old version of queue did not return an error code
1.305 + DDmaRequest::Queue();
1.306 + r = KErrNone;
1.307 +#endif
1.308 +
1.309 + return r;
1.310 + }
1.311 +
1.312 +void DClientDmaRequest::AddRequeArgs(const TIsrRequeArgsSet& aRequeArgSet)
1.313 + {
1.314 + iIsrRequeArgSet = aRequeArgSet;
1.315 + }
1.316 +
1.317 +/**
1.318 +If a transfer complete callback in ISR context s received this will be
1.319 +called to redo the request with the first entry in the array
1.320 +
1.321 +@return ETrue If the redo was successful - indicates that another callback is comming
1.322 +*/
1.323 +TBool DClientDmaRequest::RedoRequest()
1.324 + {
1.325 + TIsrRequeArgs args = iIsrRequeArgSet.GetArgs();
1.326 + const TInt r = args.Call(iChannel);
1.327 + TCallbackRecord& record = iClientDataRequest->Data1();
1.328 + record.IsrRedoResult(r);
1.329 + return (r == KErrNone);
1.330 + }
1.331 +
1.332 +
1.333 +/**
1.334 +Calls TDmaChannel::IsrRedoRequest on aChannel
1.335 +with this object's parameters
1.336 +*/
1.337 +TInt TIsrRequeArgs::Call(TDmaChannel& aChannel)
1.338 + {
1.339 +#ifdef DMA_APIV2
1.340 + return aChannel.IsrRedoRequest(iSrcAddr, iDstAddr, iTransferCount, iPslRequestInfo, iIsrCb);
1.341 +#else
1.342 + TEST_FAULT;
1.343 + return KErrNotSupported;
1.344 +#endif
1.345 + }
1.346 +
1.347 +/**
1.348 +Check that both source and destination of ISR reque args will
1.349 +lie within the range specified by aStart and aSize.
1.350 +
1.351 +@param aStart The linear base address of the region
1.352 +@param aSize The size of the region
1.353 +*/
1.354 +TBool TIsrRequeArgs::CheckRange(TLinAddr aStart, TUint aSize) const
1.355 + {
1.356 + TUint physStart = Epoc::LinearToPhysical(aStart);
1.357 + TEST_ASSERT(physStart != KPhysAddrInvalid);
1.358 +
1.359 + TAddrRange chunk(physStart, aSize);
1.360 + TBool sourceOk = (iSrcAddr == KPhysAddrInvalid) ? ETrue : chunk.Contains(SourceRange());
1.361 +
1.362 + TBool destOk = (iDstAddr == KPhysAddrInvalid) ? ETrue : chunk.Contains(DestRange());
1.363 +
1.364 + return sourceOk && destOk;
1.365 + }
1.366 +
1.367 +TBool TIsrRequeArgsSet::CheckRange(TLinAddr aAddr, TUint aSize) const
1.368 + {
1.369 + for(TInt i=0; i<iCount; i++)
1.370 + {
1.371 + if(!iRequeArgs[i].CheckRange(aAddr, aSize))
1.372 + return EFalse;
1.373 + }
1.374 + return ETrue;
1.375 + }
1.376 +
1.377 +/**
1.378 +Translate an old style dma callback to a new-style one
1.379 +*/
1.380 +void DClientDmaRequest::CallbackOldStyle(TResult aResult, TAny* aArg)
1.381 + {
1.382 + __KTRACE_OPT(KDMA, Kern::Printf(">DClientDmaRequest::CallBackOldStyle: TResult result=%d", aResult));
1.383 + TEST_ASSERT(aResult != EBadResult);
1.384 + //translate result code
1.385 + const TDmaResult result = (aResult == EOk) ? EDmaResultOK : EDmaResultError;
1.386 +
1.387 + //call the new-style callback
1.388 + Callback(EDmaCallbackRequestCompletion, result, aArg, NULL);
1.389 + }
1.390 +
1.391 +
1.392 +/**
1.393 +The new style callback called by the DMA framework
1.394 +may be called in either thread or ISR context
1.395 +*/
1.396 +void DClientDmaRequest::Callback(TUint aCallbackType, TDmaResult aResult, TAny* aArg, SDmaDesHdr* aHdr)
1.397 + {
1.398 + const TInt context = NKern::CurrentContext();
1.399 + __KTRACE_OPT(KDMA, Kern::Printf(">DClientDmaRequest::CallBack: TDmaResult result = %d, NKern::TContext context = %d", aResult, context));
1.400 +
1.401 + DClientDmaRequest& self = *reinterpret_cast<DClientDmaRequest*>(aArg);
1.402 + self.DoCallback(aCallbackType, aResult);
1.403 +
1.404 + // decide if callback is complete
1.405 + const TBool transferComplete = aCallbackType & EDmaCallbackRequestCompletion;
1.406 + if(!transferComplete)
1.407 + {
1.408 + return;
1.409 + }
1.410 +
1.411 + // If there are reque args then redo this request
1.412 + // another callback would then be expected.
1.413 + // Requests can only be re-queued in ISR context, but we
1.414 + // do not check that here as it is up to the client to get
1.415 + // it right - also, we want to test that the PIL catches this
1.416 + // error
1.417 + if(!self.iIsrRequeArgSet.IsEmpty())
1.418 + {
1.419 + // If redo call was succesful, return and wait for next call back
1.420 + if(self.RedoRequest())
1.421 + return;
1.422 + }
1.423 +
1.424 + switch(context)
1.425 + {
1.426 + case NKern::EThread:
1.427 + {
1.428 + CompleteCallback(aArg);
1.429 + break;
1.430 + }
1.431 + case NKern::EInterrupt:
1.432 + {
1.433 + self.iDfc.iPtr = aArg;
1.434 + self.iDfc.Add();
1.435 + break;
1.436 + }
1.437 + case NKern::EIDFC: //fall-through
1.438 + case NKern::EEscaped:
1.439 + default:
1.440 + TEST_FAULT;
1.441 + }
1.442 + }
1.443 +
1.444 +/**
1.445 +Log results of callback. May be called in either thread or ISR context
1.446 +*/
1.447 +void DClientDmaRequest::DoCallback(TUint aCallbackType, TDmaResult aResult)
1.448 + {
1.449 + iStopwatch.Stop(); //sucessive calls will simply over write the stop time
1.450 +
1.451 + // This will always be done whether the client requested a
1.452 + // callback record or not
1.453 + TCallbackRecord& record = iClientDataRequest->Data1();
1.454 + record.ProcessCallback(aCallbackType, aResult);
1.455 + }
1.456 +
1.457 +/**
1.458 +This function may either be called directly or queued as a DFC
1.459 +*/
1.460 +void DClientDmaRequest::CompleteCallback(TAny* aArg)
1.461 + {
1.462 + __KTRACE_OPT(KDMA, Kern::Printf(">DClientDmaRequest::CompleteCallBack thread %O", &Kern::CurrentThread()));
1.463 + __ASSERT_NOT_ISR;
1.464 +
1.465 + DClientDmaRequest& self = *reinterpret_cast<DClientDmaRequest*>(aArg);
1.466 +
1.467 + self.iClientDataRequest->Data2() = self.iStopwatch.ReadMicroSecs();
1.468 +
1.469 + //Assert that we called SetRequestStatus on this object before
1.470 + //queueing
1.471 + __NK_ASSERT_DEBUG(self.iClientDataRequest->IsReady());
1.472 +
1.473 + // This is an inelegant, temporary, solution to the following problem:
1.474 + //
1.475 + // If a dma request completes with an ISR callback the test
1.476 + // framework will queue this function as a DFC which
1.477 + // will then signal the user-side client. As a consequence of
1.478 + // this the user side client may then decide to destroy this
1.479 + // request. However, untill the DMA framework's DFC has run
1.480 + // and called OnDeque() on this request, it is still considered as
1.481 + // queued. Since it is possible that this DFC could run
1.482 + // before the DMA fw's DFC, this request could get destroyed while
1.483 + // it is stil queued, triggering a PIL assertion.
1.484 + //
1.485 + // The real fix is likely be for the PIL to call the callback
1.486 + // twice, but with different arguments, once to annonunce the
1.487 + // ISR and again to announce the dequeue.
1.488 + //
1.489 + // Here we poll and wait for this request to be dequeued. Note,
1.490 + // this DFC is currently run on a separate DFC queue, otherwise
1.491 + // it could get deadlocked. An alternative to polling would be
1.492 + // to use DCondVar, but that would require PIL modification
1.493 +
1.494 + if(NKern::CurrentThread() == self.iDfcQ->iThread)
1.495 + {
1.496 + // Only need to poll if we aren't on the channel's DFC queue
1.497 + for(;;)
1.498 + {
1.499 + // once the request has been unqueued it
1.500 + // can only be queued again by the client
1.501 + const TBool queued = __e32_atomic_load_acq32(&self.iQueued);
1.502 + if(!queued)
1.503 + break;
1.504 + __KTRACE_OPT(KDMA, Kern::Printf("Waiting for requeuest to be dequeued"));
1.505 + NKern::Sleep(10);
1.506 + }
1.507 + }
1.508 + else
1.509 + {
1.510 + // If we are on the channel's DFCQ we should be dequeued
1.511 + // already
1.512 + __NK_ASSERT_DEBUG(!__e32_atomic_load_acq32(&self.iQueued));
1.513 + }
1.514 +
1.515 + // We can always complete with KErrNone, the actual DMA result is
1.516 + // logged in the TCallbackRecord
1.517 + Kern::QueueRequestComplete(self.iClient, self.iClientDataRequest, KErrNone);
1.518 + }
1.519 +
1.520 +const TInt DDmaTestSession::KMaxChunkSize;
1.521 +
1.522 +TInt DDmaTestSession::RequestUserHandle(DThread* aThread, TOwnerType aType)
1.523 + {
1.524 + if (aType!=EOwnerThread || aThread!=iClient)
1.525 + return KErrAccessDenied;
1.526 + return KErrNone;
1.527 + }
1.528 +
1.529 +DDmaTestSession::DDmaTestSession()
1.530 + : iClient(NULL), iDfcQ(NULL), iIsrCallbackDfcQ(NULL), iChunkBase(0), iChunk(NULL)
1.531 + {}
1.532 +
1.533 +// called in thread critical section
1.534 +TInt DDmaTestSession::DoCreate(TInt /*aUnit*/, const TDesC8* aInfo, const TVersion& /*aVer*/)
1.535 + {
1.536 + __NK_ASSERT_ALWAYS(iDfcQ == NULL);
1.537 + __NK_ASSERT_ALWAYS(iIsrCallbackDfcQ == NULL);
1.538 +
1.539 + TInt r = Kern::DynamicDfcQCreate(iDfcQ, KDFCThreadPriority, KDFCThreadName);
1.540 + if (r != KErrNone)
1.541 + return r;
1.542 + NKern::ThreadSetCpuAffinity((NThread*)(iDfcQ->iThread), KCpuAffinityAny);
1.543 +
1.544 + r = Kern::DynamicDfcQCreate(iIsrCallbackDfcQ, KDFCThreadPriority, KIsrCbDfcThreadName);
1.545 + if (r != KErrNone)
1.546 + return r;
1.547 + NKern::ThreadSetCpuAffinity((NThread*)(iIsrCallbackDfcQ->iThread), KCpuAffinityAny);
1.548 +
1.549 + iClient = &Kern::CurrentThread();
1.550 +
1.551 + r = CreateSharedChunk();
1.552 + return r;
1.553 + }
1.554 +
1.555 +DDmaTestSession::~DDmaTestSession()
1.556 + {
1.557 + //Destroy requests before channels
1.558 + //or we will trigger an assertion
1.559 + while(iClientDmaReqs.Count())
1.560 + {
1.561 + DestroyDmaRequestByIndex(0);
1.562 + }
1.563 + iClientDmaReqs.Close();
1.564 +
1.565 + while(iChannels.Count())
1.566 + {
1.567 + CloseDmaChannelByIndex(0);
1.568 + }
1.569 + iChannels.Close();
1.570 +
1.571 +
1.572 + if (iDfcQ)
1.573 + {
1.574 + iDfcQ->Destroy();
1.575 + }
1.576 +
1.577 + if (iIsrCallbackDfcQ)
1.578 + {
1.579 + iIsrCallbackDfcQ->Destroy();
1.580 + }
1.581 +
1.582 + if(iChunk)
1.583 + {
1.584 + Kern::ChunkClose(iChunk);
1.585 + iChunk = NULL;
1.586 + }
1.587 + }
1.588 +
1.589 +TInt DDmaTestSession::Request(TInt aFunction, TAny* a1, TAny* a2)
1.590 + {
1.591 + __NK_ASSERT_DEBUG(&Kern::CurrentThread() == iClient);
1.592 +
1.593 + switch (aFunction)
1.594 + {
1.595 + case RDmaSession::EOpenChannel:
1.596 + {
1.597 + TUint pslCookie = (TUint)a1;
1.598 + TUint driverCookie = 0;
1.599 + TInt r = OpenDmaChannel(pslCookie, driverCookie);
1.600 + umemput32(a2, &driverCookie, sizeof(TAny*));
1.601 + return r;
1.602 + }
1.603 + case RDmaSession::ECloseChannel:
1.604 + {
1.605 + TUint driverCookie = reinterpret_cast<TUint>(a1);
1.606 + TInt r = CloseDmaChannelByCookie(driverCookie);
1.607 + return r;
1.608 + }
1.609 + case RDmaSession::EChannelCaps:
1.610 + {
1.611 + TUint driverCookie = reinterpret_cast<TUint>(a1);
1.612 + TPckgBuf<TDmacTestCaps> capsBuf;
1.613 + TInt r = GetChannelCapsByCookie(driverCookie, capsBuf());
1.614 + Kern::KUDesPut(*reinterpret_cast<TDes8*>(a2), capsBuf);
1.615 + return r;
1.616 + }
1.617 + case RDmaSession::EPauseChannel:
1.618 + {
1.619 + TUint driverCookie = reinterpret_cast<TUint>(a1);
1.620 + TInt r = PauseDmaChannelByCookie(driverCookie);
1.621 + return r;
1.622 + }
1.623 + case RDmaSession::EResumeChannel:
1.624 + {
1.625 + TUint driverCookie = reinterpret_cast<TUint>(a1);
1.626 + TInt r = ResumeDmaChannelByCookie(driverCookie);
1.627 + return r;
1.628 + }
1.629 + case RDmaSession::EFragmentCount:
1.630 + {
1.631 + TUint requestCookie = reinterpret_cast<TUint>(a1);
1.632 + TInt r = RequestFragmentCount(requestCookie);
1.633 + return r;
1.634 + }
1.635 + case RDmaSession::ERequestOpen:
1.636 + {
1.637 + RDmaSession::TRequestCreateArgs createArgs(0, EFalse, 0);
1.638 + TPckg<RDmaSession::TRequestCreateArgs> package(createArgs);
1.639 + Kern::KUDesGet(package, *reinterpret_cast<TDes8*>(a1));
1.640 +
1.641 + const TUint channelCookie = createArgs.iChannelCookie;
1.642 + TUint requestCookie = 0;
1.643 +
1.644 + TInt r = CreateDmaRequest(channelCookie, requestCookie, createArgs.iNewStyle, createArgs.iMaxFragmentSize);
1.645 +
1.646 + umemput32(a2, &requestCookie, sizeof(TAny*));
1.647 + return r;
1.648 + }
1.649 + case RDmaSession::ERequestClose:
1.650 + {
1.651 + const TUint requestCookie = reinterpret_cast<TUint>(a1);
1.652 + return DestroyDmaRequestByCookie(requestCookie);
1.653 + }
1.654 + case RDmaSession::EFragmentLegacy:
1.655 + case RDmaSession::EFragment:
1.656 + {
1.657 + TPckgBuf<RDmaSession::TFragmentArgs> argsBuff;
1.658 + Kern::KUDesGet(argsBuff, *reinterpret_cast<TDes8*>(a1));
1.659 + const TUint requestCookie = argsBuff().iRequestCookie;
1.660 +
1.661 + //must remove constness as we actually need to
1.662 + //convert the src and dst offsets to addresses
1.663 + TDmaTransferArgs& transferArgs = const_cast<TDmaTransferArgs&>(argsBuff().iTransferArgs);
1.664 +
1.665 + //convert address offsets in to kernel virtual addresses
1.666 + FixupTransferArgs(transferArgs);
1.667 +
1.668 + TEST_ASSERT((TAddressParms(transferArgs).CheckRange(iChunkBase, iChunk->Size())));
1.669 +
1.670 + TInt r = KErrGeneral;
1.671 +
1.672 + TStopwatch clock;
1.673 + clock.Start();
1.674 + switch (aFunction)
1.675 + {
1.676 + case RDmaSession::EFragmentLegacy:
1.677 + r = FragmentRequest(requestCookie, transferArgs, ETrue); break;
1.678 + case RDmaSession::EFragment:
1.679 + r = FragmentRequest(requestCookie, transferArgs, EFalse); break;
1.680 + default:
1.681 + TEST_FAULT;
1.682 + }
1.683 + clock.Stop();
1.684 +
1.685 + const TUint64 time = clock.ReadMicroSecs();
1.686 +
1.687 + TUint64* const timePtr = argsBuff().iDurationMicroSecs;
1.688 + if(timePtr)
1.689 + {
1.690 + umemput(timePtr, &time, sizeof(time));
1.691 + }
1.692 + return r;
1.693 + }
1.694 + case RDmaSession::EQueueRequest:
1.695 + {
1.696 + TPckgBuf<RDmaSession::TQueueArgs> argsBuff;
1.697 + Kern::KUDesGet(argsBuff, *reinterpret_cast<TDes8*>(a1));
1.698 +
1.699 + //this is an Asynchronous request
1.700 + const TUint requestCookie = argsBuff().iRequestCookie;
1.701 + TRequestStatus* requestStatus = argsBuff().iStatus;
1.702 + TCallbackRecord* callbackRec = argsBuff().iCallbackRecord;
1.703 + TUint64* duration = argsBuff().iDurationMicroSecs;
1.704 +
1.705 + TInt r = QueueRequest(requestCookie, requestStatus, callbackRec, duration);
1.706 + if(r != KErrNone)
1.707 + {
1.708 + Kern::RequestComplete(requestStatus, r);
1.709 + }
1.710 + return r;
1.711 + }
1.712 + case RDmaSession::EQueueRequestWithReque:
1.713 + {
1.714 + //TODO can common code with EQueueRequest be extracted?
1.715 + TPckgBuf<RDmaSession::TQueueArgsWithReque> argsBuff;
1.716 + Kern::KUDesGet(argsBuff, *reinterpret_cast<TDes8*>(a1));
1.717 +
1.718 + //this is an Asynchronous request
1.719 + const TUint requestCookie = argsBuff().iRequestCookie;
1.720 + TRequestStatus* requestStatus = argsBuff().iStatus;
1.721 + TCallbackRecord* callbackRec = argsBuff().iCallbackRecord;
1.722 + TUint64* duration = argsBuff().iDurationMicroSecs;
1.723 +
1.724 + TInt r = KErrNotFound;
1.725 +
1.726 + DClientDmaRequest* const request = RequestFromCookie(requestCookie);
1.727 + if(request != NULL)
1.728 + {
1.729 + argsBuff().iRequeSet.Fixup(iChunkBase);
1.730 + //TODO reque args must be substituted in order to
1.731 + //check the range. The original transfer args are not
1.732 + //available when queue is called, they could
1.733 + //however be stored within DClientDmaRequest
1.734 + //TEST_ASSERT((argsBuff().iRequeSet.CheckRange(iChunkBase, iChunk->Size())));
1.735 + request->AddRequeArgs(argsBuff().iRequeSet);
1.736 +
1.737 + r = QueueRequest(requestCookie, requestStatus, callbackRec, duration);
1.738 + }
1.739 +
1.740 + if(r != KErrNone)
1.741 + {
1.742 + Kern::RequestComplete(requestStatus, r);
1.743 + }
1.744 + return r;
1.745 + }
1.746 + case RDmaSession::EIsrRedoRequest:
1.747 + {
1.748 + TPckgBuf<RDmaSession::TIsrRedoReqArgs> argsBuff;
1.749 + Kern::KUDesGet(argsBuff, *reinterpret_cast<TDes8*>(a1));
1.750 +
1.751 + const TUint driverCookie = argsBuff().iDriverCookie;
1.752 + const TUint32 srcAddr = argsBuff().iSrcAddr;
1.753 + const TUint32 dstAddr = argsBuff().iDstAddr;
1.754 + const TInt transferCount = argsBuff().iTransferCount;
1.755 + const TUint32 pslRequestInfo = argsBuff().iPslRequestInfo;
1.756 + const TBool isrCb = argsBuff().iIsrCb;
1.757 +
1.758 + TInt r = IsrRedoRequestByCookie(driverCookie,srcAddr,dstAddr,transferCount,pslRequestInfo,isrCb);
1.759 + return r;
1.760 + }
1.761 + case RDmaSession::EIsOpened:
1.762 + {
1.763 + TUint driverCookie = (TUint)a1;
1.764 + TBool channelOpen = EFalse;;
1.765 + TInt r = ChannelIsOpenedByCookie(driverCookie,channelOpen);
1.766 + umemput32(a2, &channelOpen, sizeof(TAny*));
1.767 + return r;
1.768 + }
1.769 + case RDmaSession::EIsQueueEmpty:
1.770 + {
1.771 + TUint driverCookie = (TUint)a1;
1.772 + TBool queueEmpty = EFalse;;
1.773 + TInt r = IsQueueEmptyByCookie(driverCookie,queueEmpty);
1.774 + umemput32(a2, &queueEmpty, sizeof(TAny*));
1.775 + return r;
1.776 + }
1.777 + case RDmaSession::ECancelAllChannel:
1.778 + {
1.779 + TUint driverCookie = reinterpret_cast<TUint>(a1);
1.780 + TInt r = CancelAllByCookie(driverCookie);
1.781 + return r;
1.782 + }
1.783 + case RDmaSession::EOpenSharedChunk:
1.784 + {
1.785 + return OpenSharedChunkHandle();
1.786 + }
1.787 + case RDmaSession::EGetTestInfo:
1.788 + {
1.789 +#ifdef DMA_APIV2
1.790 + TPckgC<TDmaV2TestInfo> package(DmaTestInfoV2());
1.791 +#else
1.792 + TPckgC<TDmaV2TestInfo> package(ConvertTestInfo(DmaTestInfo()));
1.793 +#endif
1.794 + Kern::KUDesPut(*reinterpret_cast<TDes8*>(a1), package);
1.795 + return KErrNone;
1.796 + }
1.797 + default:
1.798 + Kern::PanicCurrentThread(KClientPanicCat, __LINE__);
1.799 + return KErrGeneral;
1.800 + }
1.801 + }
1.802 +
1.803 +TInt DDmaTestSession::OpenDmaChannel(TUint aPslCookie, TUint& aDriverCookie )
1.804 + {
1.805 + TDmaChannel::SCreateInfo info;
1.806 + info.iCookie = aPslCookie;
1.807 + info.iDfcQ = iDfcQ;
1.808 + info.iDfcPriority = 3;
1.809 + info.iDesCount = 128;
1.810 +
1.811 + TDmaChannel* channel = NULL;
1.812 +
1.813 + //cs so thread can't be killed between
1.814 + //opening channel and adding to array
1.815 + NKern::ThreadEnterCS();
1.816 + TInt r = TDmaChannel::Open(info, channel);
1.817 + if(KErrNone == r)
1.818 + {
1.819 + __NK_ASSERT_ALWAYS(channel);
1.820 +
1.821 + __KTRACE_OPT(KDMA, Kern::Printf("OpenDmaChannel: channel@ 0x%08x", channel));
1.822 +
1.823 +
1.824 + TInt err = iChannels.Append(channel);
1.825 + if(KErrNone == err)
1.826 + {
1.827 + aDriverCookie = reinterpret_cast<TUint>(channel);
1.828 + }
1.829 + else
1.830 + {
1.831 + channel->Close();
1.832 + r = KErrNoMemory;
1.833 + }
1.834 + }
1.835 + NKern::ThreadLeaveCS();
1.836 +
1.837 + return r;
1.838 + }
1.839 +
1.840 +TInt DDmaTestSession::CookieToChannelIndex(TUint aDriverCookie) const
1.841 + {
1.842 + const TInt r = iChannels.Find(reinterpret_cast<TDmaChannel*>(aDriverCookie));
1.843 +
1.844 + if(r < 0)
1.845 + {
1.846 + __KTRACE_OPT(KDMA, Kern::Printf("CookieToChannelIndex: cookie 0x%08x not found!", aDriverCookie));
1.847 + }
1.848 + return r;
1.849 + }
1.850 +
1.851 +TInt DDmaTestSession::CookieToRequestIndex(TUint aRequestCookie) const
1.852 + {
1.853 + const TInt r = iClientDmaReqs.Find(reinterpret_cast<DClientDmaRequest*>(aRequestCookie));
1.854 +
1.855 + if(r < 0)
1.856 + {
1.857 + __KTRACE_OPT(KDMA, Kern::Printf("CookieToRequestIndex: cookie 0x%08x not found!", aRequestCookie));
1.858 + }
1.859 + return r;
1.860 + }
1.861 +
1.862 +void DDmaTestSession::CloseDmaChannelByIndex(TInt aIndex)
1.863 + {
1.864 + __KTRACE_OPT(KDMA, Kern::Printf("CloseDmaChannelByIndex: %d", aIndex));
1.865 + __NK_ASSERT_DEBUG(aIndex < iChannels.Count());
1.866 + // cs so client thread can't be killed between removing channel from
1.867 + // array and closing it.
1.868 + NKern::ThreadEnterCS();
1.869 + TDmaChannel* channel = iChannels[aIndex];
1.870 + iChannels.Remove(aIndex);
1.871 + channel->Close();
1.872 + NKern::ThreadLeaveCS();
1.873 + }
1.874 +
1.875 +TInt DDmaTestSession::CloseDmaChannelByCookie(TUint aDriverCookie)
1.876 + {
1.877 + __KTRACE_OPT(KDMA, Kern::Printf("CloseDmaChannelByCookie: 0x%08x", aDriverCookie));
1.878 + const TInt index = CookieToChannelIndex(aDriverCookie);
1.879 +
1.880 + if(index >= 0)
1.881 + {
1.882 + CloseDmaChannelByIndex(index);
1.883 + return KErrNone;
1.884 + }
1.885 + else
1.886 + {
1.887 + return KErrNotFound;
1.888 + }
1.889 + }
1.890 +
1.891 +TInt DDmaTestSession::CancelAllByCookie(TUint aDriverCookie)
1.892 + {
1.893 + __KTRACE_OPT(KDMA, Kern::Printf("CancelAllByCookie: 0x%08x", aDriverCookie));
1.894 + const TInt index = CookieToChannelIndex(aDriverCookie);
1.895 +
1.896 + if(index >= 0)
1.897 + {
1.898 + CancelAllByIndex(index);
1.899 + return KErrNone;
1.900 + }
1.901 + else
1.902 + {
1.903 + return KErrNotFound;
1.904 + }
1.905 + }
1.906 +
1.907 +void DDmaTestSession::CancelAllByIndex(TInt aIndex)
1.908 + {
1.909 + __KTRACE_OPT(KDMA, Kern::Printf("CancelAllByIndex: %d", aIndex));
1.910 + __NK_ASSERT_DEBUG(aIndex < iChannels.Count());
1.911 +
1.912 + TDmaChannel* channel = iChannels[aIndex];
1.913 + iChannels.Remove(aIndex);
1.914 + channel->CancelAll();
1.915 + }
1.916 +
1.917 +TInt DDmaTestSession::PauseDmaChannelByIndex(TInt aIndex)
1.918 + {
1.919 + __KTRACE_OPT(KDMA, Kern::Printf("PauseDmaChannelByIndex: %d", aIndex));
1.920 + __NK_ASSERT_DEBUG(aIndex < iChannels.Count());
1.921 +
1.922 +#ifdef DMA_APIV2
1.923 + TDmaChannel* channel = iChannels[aIndex];
1.924 + return channel->Pause();
1.925 +#else
1.926 + return KErrNotSupported;
1.927 +#endif
1.928 + }
1.929 +
1.930 +TInt DDmaTestSession::PauseDmaChannelByCookie(TUint aDriverCookie)
1.931 + {
1.932 + __KTRACE_OPT(KDMA, Kern::Printf("PauseDmaChannelByCookie: 0x%08x", aDriverCookie));
1.933 + const TInt index = CookieToChannelIndex(aDriverCookie);
1.934 +
1.935 + if(index >= 0)
1.936 + {
1.937 + TInt r = PauseDmaChannelByIndex(index);
1.938 + return r;
1.939 + }
1.940 + else
1.941 + {
1.942 + return KErrNotFound;
1.943 + }
1.944 + }
1.945 +
1.946 +TInt DDmaTestSession::ResumeDmaChannelByIndex(TInt aIndex)
1.947 + {
1.948 + __KTRACE_OPT(KDMA, Kern::Printf("ResumeDmaChannelByIndex: %d", aIndex));
1.949 + __NK_ASSERT_DEBUG(aIndex < iChannels.Count());
1.950 +
1.951 +#ifdef DMA_APIV2
1.952 + TDmaChannel* channel = iChannels[aIndex];
1.953 + return channel->Resume();
1.954 +#else
1.955 + return KErrNotSupported;
1.956 +#endif
1.957 + }
1.958 +
1.959 +TInt DDmaTestSession::ResumeDmaChannelByCookie(TUint aDriverCookie)
1.960 + {
1.961 + __KTRACE_OPT(KDMA, Kern::Printf("ResumeDmaChannelByCookie: 0x%08x", aDriverCookie));
1.962 + const TInt index = CookieToChannelIndex(aDriverCookie);
1.963 +
1.964 + if(index >= 0)
1.965 + {
1.966 + TInt r = ResumeDmaChannelByIndex(index);
1.967 + return r;
1.968 + }
1.969 + else
1.970 + {
1.971 + return KErrNotFound;
1.972 + }
1.973 + }
1.974 +
1.975 +TInt DDmaTestSession::IsrRedoRequestByCookie(TUint aDriverCookie,TUint32 aSrcAddr,TUint32 aDstAddr,TInt aTransferCount,TUint32 aPslRequestInfo,TBool aIsrCb)
1.976 +{
1.977 + __KTRACE_OPT(KDMA, Kern::Printf("IsrRedoRequestByCookie: 0x%08x", aDriverCookie));
1.978 + const TInt index = CookieToChannelIndex(aDriverCookie);
1.979 +
1.980 + if(index >= 0)
1.981 + {
1.982 + TInt r = IsrRedoRequestByIndex(index,aSrcAddr,aDstAddr,aTransferCount,aPslRequestInfo,aIsrCb);
1.983 + return r;
1.984 + }
1.985 + else
1.986 + {
1.987 + return KErrNotFound;
1.988 + }
1.989 +}
1.990 +
1.991 +TInt DDmaTestSession::IsrRedoRequestByIndex(TInt aIndex,TUint32 aSrcAddr,TUint32 aDstAddr,TInt aTransferCount,TUint32 aPslRequestInfo,TBool aIsrCb)
1.992 + {
1.993 + __KTRACE_OPT(KDMA, Kern::Printf("IsrRedoRequestByIndex: %d", aIndex));
1.994 + __NK_ASSERT_DEBUG(aIndex < iChannels.Count());
1.995 +
1.996 +#ifdef DMA_APIV2
1.997 + TDmaChannel* channel = iChannels[aIndex];
1.998 + return channel->IsrRedoRequest(aSrcAddr,aDstAddr,aTransferCount,aPslRequestInfo,aIsrCb);
1.999 +#else
1.1000 + return KErrNotSupported;
1.1001 +#endif
1.1002 + }
1.1003 +
1.1004 +/**
1.1005 +aChannelCaps will be set to "NULL" values
1.1006 +*/
1.1007 +TInt DDmaTestSession::GetChannelCapsByCookie(TUint aDriverCookie, TDmacTestCaps& aChannelCaps)
1.1008 + {
1.1009 + SDmacCaps caps = {0,}; //initialise with NULL values
1.1010 + TInt r = GetChannelCapsByCookie(aDriverCookie, caps);
1.1011 +
1.1012 + if(r == KErrNotSupported)
1.1013 + {
1.1014 + //If we can not query caps it means
1.1015 + //that we are using the v1 driver
1.1016 + //we construct a empty TDmacTestCaps
1.1017 + //but with an iPILVersion of 1
1.1018 + const TDmacTestCaps nullCapsV1(caps, 1);
1.1019 + aChannelCaps = nullCapsV1;
1.1020 + r = KErrNone;
1.1021 + }
1.1022 + else if(r == KErrNone)
1.1023 + {
1.1024 + const TDmacTestCaps capsV2(caps, 2);
1.1025 + aChannelCaps = capsV2;
1.1026 + }
1.1027 +
1.1028 + return r;
1.1029 + }
1.1030 +
1.1031 +/**
1.1032 +Will return the capabilities of the DMA channel.
1.1033 +Querying SDmacCaps is not possible on V1 of the DMA framework.
1.1034 +In that case an error of KErrNotSupported will be returned
1.1035 +*/
1.1036 +TInt DDmaTestSession::GetChannelCapsByCookie(TUint aDriverCookie, SDmacCaps& aChannelCaps)
1.1037 + {
1.1038 + __KTRACE_OPT(KDMA, Kern::Printf("GetChannelCapsByCookie: 0x%08x", aDriverCookie));
1.1039 + const TInt index = CookieToChannelIndex(aDriverCookie);
1.1040 + if(index >= 0)
1.1041 + {
1.1042 +#ifdef DMA_APIV2
1.1043 + aChannelCaps = iChannels[index]->DmacCaps();
1.1044 + return KErrNone;
1.1045 +#else
1.1046 + return KErrNotSupported;
1.1047 +#endif
1.1048 + }
1.1049 + else
1.1050 + {
1.1051 + return KErrNotFound;
1.1052 + }
1.1053 + }
1.1054 +
1.1055 +TInt DDmaTestSession::IsQueueEmptyByCookie(TUint aDriverCookie, TBool& aQueueEmpty)
1.1056 + {
1.1057 + __KTRACE_OPT(KDMA, Kern::Printf("IsQueueEmptyByCookie: 0x%08x", aDriverCookie));
1.1058 + const TInt index = CookieToChannelIndex(aDriverCookie);
1.1059 +
1.1060 + if(index >= 0)
1.1061 + {
1.1062 + aQueueEmpty=iChannels[index]->IsQueueEmpty();
1.1063 + return KErrNone;
1.1064 + }
1.1065 + else
1.1066 + {
1.1067 + return KErrNotFound;
1.1068 + }
1.1069 + }
1.1070 +
1.1071 +TInt DDmaTestSession::ChannelIsOpenedByCookie(TUint aDriverCookie, TBool& aChannelOpen)
1.1072 + {
1.1073 + __KTRACE_OPT(KDMA, Kern::Printf("ChannelIsOpenedByCookie: 0x%08x", aDriverCookie));
1.1074 + const TInt index = CookieToChannelIndex(aDriverCookie);
1.1075 +
1.1076 + if(index >= 0)
1.1077 + {
1.1078 + aChannelOpen=iChannels[index]->IsOpened();
1.1079 + return KErrNone;
1.1080 + }
1.1081 + else
1.1082 + {
1.1083 + return KErrNotFound;
1.1084 + }
1.1085 + }
1.1086 +
1.1087 +TInt DDmaTestSession::CreateDmaRequest(TUint aChannelCookie, TUint& aRequestCookie, TBool aNewCallback, TInt aMaxFragmentSizeBytes)
1.1088 + {
1.1089 +#ifndef DMA_APIV2
1.1090 + if(aNewCallback)
1.1091 + return KErrNotSupported;
1.1092 +#endif
1.1093 +
1.1094 + TInt channelIndex = CookieToChannelIndex(aChannelCookie);
1.1095 + if(channelIndex < 0)
1.1096 + return channelIndex;
1.1097 +
1.1098 + NKern::ThreadEnterCS();
1.1099 + DClientDmaRequest* request = DClientDmaRequest::Construct(iClient, iIsrCallbackDfcQ, *iChannels[channelIndex], aNewCallback, aMaxFragmentSizeBytes);
1.1100 + if(request == NULL)
1.1101 + {
1.1102 + NKern::ThreadLeaveCS();
1.1103 + return KErrNoMemory;
1.1104 + }
1.1105 +
1.1106 + TInt r = iClientDmaReqs.Append(request);
1.1107 + if(r == KErrNone)
1.1108 + {
1.1109 + aRequestCookie = reinterpret_cast<TUint>(request);
1.1110 + }
1.1111 + else
1.1112 + {
1.1113 + delete request;
1.1114 + }
1.1115 + NKern::ThreadLeaveCS();
1.1116 +
1.1117 + return r;
1.1118 + }
1.1119 +
1.1120 +TInt DDmaTestSession::DestroyDmaRequestByCookie(TUint aRequestCookie)
1.1121 + {
1.1122 + TInt requestIndex = CookieToRequestIndex(aRequestCookie);
1.1123 + if(requestIndex < 0)
1.1124 + return requestIndex;
1.1125 +
1.1126 + DestroyDmaRequestByIndex(requestIndex);
1.1127 +
1.1128 + return KErrNone;
1.1129 + }
1.1130 +
1.1131 +void DDmaTestSession::DestroyDmaRequestByIndex(TInt aIndex)
1.1132 + {
1.1133 + __KTRACE_OPT(KDMA, Kern::Printf("DestroyDmaRequestByIndex: %d", aIndex));
1.1134 + __NK_ASSERT_DEBUG(aIndex < iClientDmaReqs.Count());
1.1135 + NKern::ThreadEnterCS();
1.1136 +
1.1137 + DClientDmaRequest* request = iClientDmaReqs[aIndex];
1.1138 + iClientDmaReqs.Remove(aIndex);
1.1139 + delete request;
1.1140 +
1.1141 + NKern::ThreadLeaveCS();
1.1142 + }
1.1143 +
1.1144 +TInt DDmaTestSession::CreateSharedChunk()
1.1145 + {
1.1146 + // Enter critical section so we can't die and leak the objects we are creating
1.1147 + // I.e. the TChunkCleanup and DChunk (Shared Chunk)
1.1148 + NKern::ThreadEnterCS();
1.1149 +
1.1150 + // Create the chunk
1.1151 + TChunkCreateInfo info;
1.1152 + info.iType = TChunkCreateInfo::ESharedKernelSingle;
1.1153 + info.iMaxSize = KMaxChunkSize;
1.1154 + info.iMapAttr = EMapAttrFullyBlocking | EMapAttrUserRw;
1.1155 + info.iOwnsMemory = ETrue;
1.1156 + info.iDestroyedDfc = NULL;
1.1157 +
1.1158 + DChunk* chunk;
1.1159 + TUint32 mapAttr;
1.1160 + TInt r = Kern::ChunkCreate(info, chunk, iChunkBase, mapAttr);
1.1161 + if(r!=KErrNone)
1.1162 + {
1.1163 + NKern::ThreadLeaveCS();
1.1164 + return r;
1.1165 + }
1.1166 +
1.1167 + // Map our device's memory into the chunk (at offset 0)
1.1168 + TUint32 physicalAddr;
1.1169 + r = Kern::ChunkCommitContiguous(chunk,0,KMaxChunkSize, physicalAddr);
1.1170 + if(r!=KErrNone)
1.1171 + {
1.1172 + // Commit failed so tidy-up...
1.1173 + Kern::ChunkClose(chunk);
1.1174 + }
1.1175 + else
1.1176 + {
1.1177 + iChunk = chunk;
1.1178 + }
1.1179 +
1.1180 + // Can leave critical section now that we have saved pointers to created objects
1.1181 + NKern::ThreadLeaveCS();
1.1182 +
1.1183 + return r;
1.1184 + }
1.1185 +
1.1186 +TUint DDmaTestSession::OpenSharedChunkHandle()
1.1187 + {
1.1188 + NKern::ThreadEnterCS();
1.1189 + const TInt r = Kern::MakeHandleAndOpen(NULL, iChunk);
1.1190 + NKern::ThreadLeaveCS();
1.1191 + return r;
1.1192 + }
1.1193 +
1.1194 +void DDmaTestSession::FixupTransferArgs(TDmaTransferArgs& aTransferArgs) const
1.1195 + {
1.1196 + aTransferArgs.iSrcConfig.iAddr += iChunkBase;
1.1197 + aTransferArgs.iDstConfig.iAddr += iChunkBase;
1.1198 + }
1.1199 +
1.1200 +#ifndef DMA_APIV2
1.1201 +static TInt FragmentCount(DDmaRequest* aRequest)
1.1202 + {
1.1203 + TInt count = 0;
1.1204 + for (SDmaDesHdr* pH = aRequest->iFirstHdr; pH != NULL; pH = pH->iNext)
1.1205 + count++;
1.1206 + return count;
1.1207 + }
1.1208 +#endif
1.1209 +
1.1210 +TInt DDmaTestSession::RequestFragmentCount(TUint aRequestCookie)
1.1211 + {
1.1212 + TInt requestIndex = CookieToRequestIndex(aRequestCookie);
1.1213 + if(requestIndex < 0)
1.1214 + return requestIndex;
1.1215 +#ifdef DMA_APIV2
1.1216 + TInt r = iClientDmaReqs[requestIndex]->FragmentCount();
1.1217 +#else
1.1218 + TInt r = FragmentCount(iClientDmaReqs[requestIndex]);
1.1219 +#endif
1.1220 +
1.1221 + return r;
1.1222 + }
1.1223 +
1.1224 +TInt DDmaTestSession::FragmentRequest(TUint aRequestCookie, const TDmaTransferArgs& aTransferArgs, TBool aLegacy)
1.1225 + {
1.1226 + __KTRACE_OPT(KDMA, Kern::Printf(">FragmentRequest: cookie=0x%08x, legacy=%d", aRequestCookie, aLegacy));
1.1227 + TInt requestIndex = CookieToRequestIndex(aRequestCookie);
1.1228 + if(requestIndex < 0)
1.1229 + return requestIndex;
1.1230 +
1.1231 + TInt r = KErrNotSupported;
1.1232 + if(aLegacy)
1.1233 + {
1.1234 + // TODO we can extract the required info from the struct to
1.1235 + // set flags
1.1236 + TUint flags = KDmaMemSrc | KDmaIncSrc | KDmaMemDest | KDmaIncDest;
1.1237 +
1.1238 + const TUint src = aTransferArgs.iSrcConfig.iAddr;
1.1239 + const TUint dst = aTransferArgs.iDstConfig.iAddr;
1.1240 + r = iClientDmaReqs[requestIndex]->Fragment(src, dst, aTransferArgs.iTransferCount, flags, NULL);
1.1241 + }
1.1242 + else
1.1243 + {
1.1244 +#ifdef DMA_APIV2
1.1245 + r = iClientDmaReqs[requestIndex]->Fragment(aTransferArgs);
1.1246 +#else
1.1247 + r = KErrNotSupported;
1.1248 +#endif
1.1249 + }
1.1250 + return r;
1.1251 + }
1.1252 +
1.1253 +/**
1.1254 +Queue the request refered to by aRequestCookie
1.1255 +
1.1256 +@param aRequestCookie Client identifier for the DDmaRequest
1.1257 +@param aStatus Pointer to the client's TRequestStatus
1.1258 +@param aRecord Pointer to the client's TCallbackRecord
1.1259 +@return
1.1260 + - KErrNotFound - aRequestCookie was invalid
1.1261 + - KErrNone - Success
1.1262 +*/
1.1263 +TInt DDmaTestSession::QueueRequest(TUint aRequestCookie, TRequestStatus* aStatus, TCallbackRecord* aRecord, TUint64* aDurationMicroSecs)
1.1264 + {
1.1265 + __KTRACE_OPT(KDMA, Kern::Printf(">QueueRequest: 0x%08x", aRequestCookie));
1.1266 +
1.1267 + DClientDmaRequest* request = RequestFromCookie(aRequestCookie);
1.1268 + if(request == NULL)
1.1269 + return KErrNotFound;
1.1270 +
1.1271 + return request->Queue(aStatus, aRecord, aDurationMicroSecs);
1.1272 + }
1.1273 +
1.1274 +DClientDmaRequest* DDmaTestSession::RequestFromCookie(TUint aRequestCookie) const
1.1275 + {
1.1276 + TInt requestIndex = CookieToRequestIndex(aRequestCookie);
1.1277 + if(requestIndex < 0)
1.1278 + return NULL;
1.1279 +
1.1280 + return (iClientDmaReqs[requestIndex]);
1.1281 + }
1.1282 +
1.1283 +TDmaV2TestInfo DDmaTestSession::ConvertTestInfo(const TDmaTestInfo& aOldInfo) const
1.1284 + {
1.1285 + TDmaV2TestInfo newInfo;
1.1286 + newInfo.iMaxTransferSize = aOldInfo.iMaxTransferSize;
1.1287 + newInfo.iMemAlignMask = aOldInfo.iMemAlignMask;
1.1288 + newInfo.iMemMemPslInfo = aOldInfo.iMemMemPslInfo;
1.1289 +
1.1290 + newInfo.iMaxSbChannels = aOldInfo.iMaxSbChannels;
1.1291 + for(TInt i=0; i<aOldInfo.iMaxSbChannels; i++)
1.1292 + newInfo.iSbChannels[i] = aOldInfo.iSbChannels[i];
1.1293 +
1.1294 + newInfo.iMaxDbChannels = aOldInfo.iMaxDbChannels;
1.1295 + for(TInt i=0; i<aOldInfo.iMaxDbChannels; i++)
1.1296 + newInfo.iDbChannels[i] = aOldInfo.iDbChannels[i];
1.1297 +
1.1298 + newInfo.iMaxSgChannels = aOldInfo.iMaxSgChannels;
1.1299 + for(TInt i=0; i<aOldInfo.iMaxSgChannels; i++)
1.1300 + newInfo.iSgChannels[i] = aOldInfo.iSgChannels[i];
1.1301 +
1.1302 + //TODO will want to add initialisation for Asym channels
1.1303 + //when these are available
1.1304 +
1.1305 + return newInfo;
1.1306 + }
1.1307 +//////////////////////////////////////////////////////////////////////////////
1.1308 +
1.1309 +class DDmaTestFactory : public DLogicalDevice
1.1310 + {
1.1311 +public:
1.1312 + DDmaTestFactory();
1.1313 + // from DLogicalDevice
1.1314 + virtual ~DDmaTestFactory()
1.1315 + {
1.1316 + __KTRACE_OPT(KDMA, Kern::Printf(">DDmaTestFactory::~DDmaTestFactory"));
1.1317 + }
1.1318 + virtual TInt Install();
1.1319 + virtual void GetCaps(TDes8& aDes) const;
1.1320 + virtual TInt Create(DLogicalChannelBase*& aChannel);
1.1321 + };
1.1322 +
1.1323 +
1.1324 +DDmaTestFactory::DDmaTestFactory()
1.1325 + {
1.1326 + iVersion = TestDmaLddVersion();
1.1327 + iParseMask = KDeviceAllowUnit; // no info, no PDD
1.1328 + // iUnitsMask = 0; // Only one thing
1.1329 + }
1.1330 +
1.1331 +
1.1332 +TInt DDmaTestFactory::Create(DLogicalChannelBase*& aChannel)
1.1333 + {
1.1334 + aChannel=new DDmaTestSession;
1.1335 + return aChannel ? KErrNone : KErrNoMemory;
1.1336 + }
1.1337 +
1.1338 +
1.1339 +TInt DDmaTestFactory::Install()
1.1340 + {
1.1341 + return SetName(&KTestDmaLddName);
1.1342 + }
1.1343 +
1.1344 +
1.1345 +void DDmaTestFactory::GetCaps(TDes8& /*aDes*/) const
1.1346 + {
1.1347 + }
1.1348 +
1.1349 +//////////////////////////////////////////////////////////////////////////////
1.1350 +
1.1351 +DECLARE_STANDARD_LDD()
1.1352 + {
1.1353 + return new DDmaTestFactory;
1.1354 + }