os/kernelhwsrv/kerneltest/e32test/dmav2/d_dma2.cpp
author sl
Tue, 10 Jun 2014 14:32:02 +0200
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
     1 // Copyright (c) 2002-2009 Nokia Corporation and/or its subsidiary(-ies).
     2 // All rights reserved.
     3 // This component and the accompanying materials are made available
     4 // under the terms of "Eclipse Public License v1.0"
     5 // which accompanies this distribution, and is available
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
     7 //
     8 // Initial Contributors:
     9 // Nokia Corporation - initial contribution.
    10 //
    11 // Contributors:
    12 //
    13 // Description:
    14 // Test driver for DMA V2 framework
    15 //
    16 //
    17 
    18 #include <kernel/kern_priv.h>
    19 #include <drivers/dma.h>
    20 #include "d_dma2.h"
    21 
    22 _LIT(KClientPanicCat, "D_DMA2");
    23 _LIT(KDFCThreadName,"D_DMA_DFC_THREAD");
    24 _LIT(KIsrCbDfcThreadName,"D_DMA_IsrCb_thread");
    25 const TInt KDFCThreadPriority=26;
    26 
    27 class TStopwatch
    28 	{
    29 public:
    30 	TStopwatch()
    31 		:iStart(0), iStop(0)
    32 		{}
    33 
    34 	void Start()
    35 		{iStart = NKern::FastCounter();}
    36 
    37 	void Stop()
    38 		{
    39 		iStop = NKern::FastCounter();
    40 
    41 		__KTRACE_OPT(KDMA, Kern::Printf(">TStopwatch::Stop FastCounter ticks: iStart=0x%lx iStop=0x%lx", iStart, iStop));
    42 		}
    43 
    44 	TUint64 ReadMicroSecs() const
    45 		{
    46 #ifndef __SMP__
    47 		TUint64 diff = 0;
    48 		if(iStart > iStop)
    49 			{
    50 			diff = (KMaxTUint64 - iStart) + iStop;
    51 			}
    52 		else
    53 			{
    54 			diff = iStop - iStart;
    55 			}
    56 		return FastCountToMicroSecs(diff);
    57 #else
    58 		//TODO On SMP it is possible for the value returned from
    59 		//NKern::FastCounter to depend on the current CPU (ie.
    60 		//NaviEngine)
    61 		//
    62 		//One solution would be to tie DFC's and ISR's to the same
    63 		//core as the client, but this would reduce the usefulness of
    64 		//SMP testing.
    65 		return 0;
    66 #endif
    67 		}
    68 private:
    69 
    70 	TUint64 FastCountToMicroSecs(TUint64 aCount) const
    71 		{
    72 		const TUint64 countsPerS = NKern::FastCounterFrequency();
    73 
    74 		TUint64 timeuS = (aCount*1000000)/countsPerS;
    75 		__KTRACE_OPT(KDMA, Kern::Printf(">TStopwatch::FastCountToMicroSecs FastCounter ticks: aCount=0x%lx countsPerS=0x%lx time=0x%lx", aCount, countsPerS, timeuS));
    76 		return timeuS;
    77 		}
    78 
    79 	TUint64 iStart;
    80 	TUint64 iStop;
    81 	};
    82 
    83 //////////////////////////////////////////////////////////////////////////////
    84 
    85 class DClientDmaRequest;
    86 /**
    87 Driver channel. Only accessible by a single client thread
    88 */
    89 class DDmaTestSession : public DLogicalChannelBase
    90 	{
    91 public:
    92 	DDmaTestSession();
    93 	virtual ~DDmaTestSession();
    94 protected:
    95 	// from DLogicalChannelBase
    96 	virtual TInt DoCreate(TInt aUnit, const TDesC8* anInfo, const TVersion& aVer);
    97 	virtual TInt Request(TInt aFunction, TAny* a1, TAny* a2);
    98 	virtual TInt RequestUserHandle(DThread* aThread, TOwnerType aType);
    99 private:
   100 	TInt DoGetInfo(TAny* aInfo);
   101 
   102 	TInt OpenDmaChannel(TUint aPslCookie, TUint& aDriverCookie);
   103 	TInt CloseDmaChannelByCookie(TUint aDriverCookie);
   104 	TInt PauseDmaChannelByCookie(TUint aDriverCookie);
   105 	TInt ResumeDmaChannelByCookie(TUint aDriverCookie);
   106 	TInt GetChannelCapsByCookie(TUint aDriverCookie, SDmacCaps& aChannelCaps);
   107 	TInt GetChannelCapsByCookie(TUint aDriverCookie, TDmacTestCaps& aChannelCaps);
   108 	TInt CancelAllByCookie(TUint aDriverCookie);
   109 	TInt IsrRedoRequestByCookie(TUint aDriverCookie,TUint32 aSrcAddr,TUint32 aDstAddr,TInt aTransferCount,TUint32 aPslRequestInfo,TBool aIsrCb);
   110 	TInt IsQueueEmptyByCookie(TUint aDriverCookie, TBool& aQueueEmpty);		
   111 	TInt ChannelIsOpenedByCookie(TUint aDriverCookie, TBool& aChannelOpen);		
   112 	void CloseDmaChannelByIndex(TInt aIndex);
   113 	void CancelAllByIndex(TInt aIndex);
   114 	TInt PauseDmaChannelByIndex(TInt aIndex);
   115 	TInt ResumeDmaChannelByIndex(TInt aIndex);		
   116 	TInt IsrRedoRequestByIndex(TInt aIndex,TUint32 aSrcAddr,TUint32 aDstAddr,TInt aTransferCount,TUint32 aPslRequestInfo,TBool aIsrCb);
   117 	TInt CreateSharedChunk();
   118 	TUint OpenSharedChunkHandle();
   119 
   120 	/**
   121 	Creates a new kernel-side DMA request object, associated with a previously
   122 	opened channel
   123 
   124 	@param aChannelCookie - A channel cookie as returned by OpenDmaChannel
   125 	@param aRequestCookie - On success will be a cookie by which the dma request can be referred to
   126 	@param aNewCallback - If true, then a new style DMA callback will be used
   127 	*/
   128 	TInt CreateDmaRequest(TUint aChannelCookie, TUint& aRequestCookie, TBool aNewCallback = EFalse, TInt aMaxFragmentSizeBytes=0);
   129 
   130 	//TODO what happens if a client closes a channel that
   131 	//it still has dma requests associated with?
   132 	
   133 	/**
   134 	Destroys a previously created dma request object
   135 	*/
   136 	TInt DestroyDmaRequestByCookie(TUint aRequestCookie);
   137 
   138 	void DestroyDmaRequestByIndex(TInt aIndex);
   139 
   140 
   141 	TInt CookieToChannelIndex(TUint aDriverCookie) const;
   142 	TInt CookieToRequestIndex(TUint aRequestCookie) const;
   143 
   144 	void FixupTransferArgs(TDmaTransferArgs& aTransferArgs) const;
   145 	TInt FragmentRequest(TUint aRequestCookie, const TDmaTransferArgs& aTransferArgs, TBool aLegacy=ETrue);
   146 
   147 	TInt QueueRequest(TUint aRequestCookie, TRequestStatus* aStatus, TCallbackRecord* aRecord, TUint64* aDurationMicroSecs);
   148 	DClientDmaRequest* RequestFromCookie(TUint aRequestCookie) const;
   149 	TInt RequestFragmentCount(TUint aRequestCookie);
   150 
   151 	TDmaV2TestInfo ConvertTestInfo(const TDmaTestInfo& aOldInfo) const;
   152 private:
   153 	DThread* iClient;
   154 	TDynamicDfcQue* iDfcQ;
   155 	TDynamicDfcQue* iIsrCallbackDfcQ; // Will be used by requests which complete with an ISR callback
   156 	static const TInt KMaxChunkSize = 8 * KMega;
   157 	TLinAddr iChunkBase;
   158 	DChunk* iChunk;
   159 
   160 	RPointerArray<TDmaChannel> iChannels;
   161 	RPointerArray<DClientDmaRequest> iClientDmaReqs;
   162 	};
   163 
   164 
   165 /**
   166 Allows a TClientRequest to be associated with a DDmaRequest
   167 */
   168 class DClientDmaRequest : public DDmaRequest
   169 	{
   170 public:
   171 	static DClientDmaRequest* Construct(DThread* aClient, TDfcQue* const aDfcQ, TDmaChannel& aChannel, TBool aNewStyle=EFalse, TInt aMaxTransferSize=0);
   172 	~DClientDmaRequest();
   173 
   174 	TInt Queue(TRequestStatus* aRequestStatus, TCallbackRecord* aRecord, TUint64* aDurationMicroSecs);
   175 	void AddRequeArgs(const TIsrRequeArgsSet& aRequeArgSet);
   176 
   177 	TUint64 GetDuration()
   178 		{return iStopwatch.ReadMicroSecs();}
   179 
   180 protected:
   181 	TInt Create();
   182 	/** Construct with old style callback */
   183 	DClientDmaRequest(DThread* aClient, TDfcQue* const aDfcQ, TDmaChannel& aChannel, TInt aMaxTransferSize);
   184 
   185 	/** Construct with new style callback */
   186 	DClientDmaRequest(DThread* aClient, TDfcQue* const aDfcQ, TDmaChannel& aChannel, TBool aNewStyle, TInt aMaxTransferSize);
   187 
   188 private:
   189 	static void CallbackOldStyle(TResult aResult, TAny* aRequest);
   190 	static void Callback(TUint, TDmaResult, TAny*, SDmaDesHdr*);
   191 	static void CompleteCallback(TAny* aRequest);
   192 
   193 	void DoCallback(TUint, TDmaResult);
   194 	TBool RedoRequest();
   195 
   196 	//!< Used to return a TCallbackRecord and transfer time
   197 	TClientDataRequest2<TCallbackRecord, TUint64>* iClientDataRequest;
   198 
   199 	DThread* const iClient;
   200 	TDfcQue* const iDfcQ; //!< Use the DDmaTestSession's dfc queue
   201 	TDfc iDfc;
   202 
   203 	TStopwatch iStopwatch;
   204 	TIsrRequeArgsSet iIsrRequeArgSet;
   205 	};
   206 
   207 DClientDmaRequest* DClientDmaRequest::Construct(DThread* aClient, TDfcQue* const aDfcQ, TDmaChannel& aChannel, TBool aNewStyle, TInt aMaxTransferSize)
   208 	{
   209 	DClientDmaRequest* dmaRequest = NULL;
   210 	if(aNewStyle)
   211 		{
   212 #ifdef DMA_APIV2
   213 		dmaRequest = new DClientDmaRequest(aClient, aDfcQ, aChannel, aNewStyle, aMaxTransferSize);
   214 #else
   215 		TEST_FAULT; // if a new style dma request was requested it should have been caught earlier
   216 #endif
   217 		}
   218 	else
   219 		{
   220 		dmaRequest = new DClientDmaRequest(aClient, aDfcQ, aChannel, aMaxTransferSize);
   221 		}
   222 
   223 	if(dmaRequest == NULL)
   224 		{
   225 		return dmaRequest;
   226 		}
   227 
   228 	const TInt r = dmaRequest->Create();
   229 	if(r != KErrNone)
   230 		{
   231 		delete dmaRequest;
   232 		dmaRequest = NULL;
   233 		}
   234 	return dmaRequest;
   235 	}
   236 
   237 DClientDmaRequest::DClientDmaRequest(DThread* aClient, TDfcQue* const aDfcQ, TDmaChannel& aChannel, TInt aMaxFragmentSize)
   238 	:DDmaRequest(aChannel, &CallbackOldStyle, this, aMaxFragmentSize),
   239 	iClientDataRequest(NULL),
   240 	iClient(aClient),
   241 	iDfcQ(aDfcQ),
   242 	iDfc(CompleteCallback,NULL, iDfcQ, KMaxDfcPriority)
   243 	{
   244 	}
   245 #ifdef DMA_APIV2
   246 DClientDmaRequest::DClientDmaRequest(DThread* aClient, TDfcQue* const aDfcQ, TDmaChannel& aChannel, TBool /*aNewStyle*/, TInt aMaxFragmentSize)
   247 	:DDmaRequest(aChannel, &Callback, this, aMaxFragmentSize),
   248 	iClientDataRequest(NULL),
   249 	iClient(aClient),
   250 	iDfcQ(aDfcQ),
   251 	iDfc(CompleteCallback,NULL, iDfcQ, KMaxDfcPriority)
   252 	{
   253 	}
   254 #endif
   255 
   256 TInt DClientDmaRequest::Create()
   257 	{
   258 	return Kern::CreateClientDataRequest2(iClientDataRequest);
   259 	}
   260 
   261 DClientDmaRequest::~DClientDmaRequest()
   262 	{
   263 	__KTRACE_OPT(KDMA, Kern::Printf(">DClientDmaRequest::~DClientDmaRequest")); 
   264 	if(iClientDataRequest)
   265 		{
   266 		Kern::DestroyClientRequest(iClientDataRequest);
   267 		}
   268 	}
   269 
   270 /**
   271 Queue the DClientDmaRequest.
   272 
   273 @param aRequestStatus Pointer to the client's request status
   274 @param aRecord Pointer to the user's TCallbackRecord, may be null
   275 @return
   276    -KErrInUse The client request is in use
   277    -KErrNone success
   278 */
   279 TInt DClientDmaRequest::Queue(TRequestStatus* aRequestStatus, TCallbackRecord* aRecord, TUint64* aDurationMicroSecs)
   280 	{
   281 	__NK_ASSERT_ALWAYS(aRecord);
   282 	__NK_ASSERT_ALWAYS(aDurationMicroSecs);
   283 
   284 	//erase results from last transfer
   285 	iClientDataRequest->Data1().Reset();
   286 	iClientDataRequest->SetDestPtr1(aRecord);
   287 
   288 	iClientDataRequest->SetDestPtr2(aDurationMicroSecs);
   289 
   290 
   291 	TInt r = iClientDataRequest->SetStatus(aRequestStatus);
   292 	if(r != KErrNone)
   293 		{
   294 		return r;
   295 		}
   296 
   297 	iStopwatch.Start();
   298 #ifdef DMA_APIV2
   299 	r = DDmaRequest::Queue();
   300 #else
   301 	// old version of queue did not return an error code
   302 	DDmaRequest::Queue();
   303 	r = KErrNone;
   304 #endif
   305 
   306 	return r;
   307 	}
   308 
   309 void DClientDmaRequest::AddRequeArgs(const TIsrRequeArgsSet& aRequeArgSet)
   310 	{
   311 	iIsrRequeArgSet = aRequeArgSet;
   312 	}
   313 
   314 /**
   315 If a transfer complete callback in ISR context s received this will be
   316 called to redo the request with the first entry in the array
   317 
   318 @return ETrue If the redo was successful - indicates that another callback is comming
   319 */
   320 TBool DClientDmaRequest::RedoRequest()
   321 	{
   322 	TIsrRequeArgs args = iIsrRequeArgSet.GetArgs();
   323 	const TInt r = args.Call(iChannel);
   324 	TCallbackRecord& record = iClientDataRequest->Data1();
   325 	record.IsrRedoResult(r);
   326 	return (r == KErrNone);
   327 	}
   328 
   329 
   330 /**
   331 Calls TDmaChannel::IsrRedoRequest on aChannel
   332 with this object's parameters
   333 */
   334 TInt TIsrRequeArgs::Call(TDmaChannel& aChannel)
   335 	{
   336 #ifdef DMA_APIV2
   337 	return aChannel.IsrRedoRequest(iSrcAddr, iDstAddr, iTransferCount, iPslRequestInfo, iIsrCb);
   338 #else
   339 	TEST_FAULT;
   340 	return KErrNotSupported;
   341 #endif
   342 	}
   343 
   344 /**
   345 Check that both source and destination of ISR reque args will
   346 lie within the range specified by aStart and aSize.
   347 
   348 @param aStart The linear base address of the region
   349 @param aSize The size of the region
   350 */
   351 TBool TIsrRequeArgs::CheckRange(TLinAddr aStart, TUint aSize) const
   352 	{
   353 	TUint physStart = Epoc::LinearToPhysical(aStart);
   354 	TEST_ASSERT(physStart != KPhysAddrInvalid);
   355 
   356 	TAddrRange chunk(physStart, aSize);
   357 	TBool sourceOk = (iSrcAddr == KPhysAddrInvalid) ? ETrue : chunk.Contains(SourceRange());
   358 
   359 	TBool destOk = (iDstAddr == KPhysAddrInvalid) ? ETrue : chunk.Contains(DestRange());
   360 
   361 	return sourceOk && destOk;
   362 	}
   363 
   364 TBool TIsrRequeArgsSet::CheckRange(TLinAddr aAddr, TUint aSize) const
   365 	{
   366 	for(TInt i=0; i<iCount; i++)
   367 		{
   368 		if(!iRequeArgs[i].CheckRange(aAddr, aSize))
   369 			return EFalse;
   370 		}
   371 	return ETrue;
   372 	}
   373 
   374 /**
   375 Translate an old style dma callback to a new-style one
   376 */
   377 void DClientDmaRequest::CallbackOldStyle(TResult aResult, TAny* aArg)
   378 	{
   379 	__KTRACE_OPT(KDMA, Kern::Printf(">DClientDmaRequest::CallBackOldStyle: TResult result=%d", aResult));
   380 	TEST_ASSERT(aResult != EBadResult);
   381 	//translate result code
   382 	const TDmaResult result = (aResult == EOk) ? EDmaResultOK : EDmaResultError;
   383 
   384 	//call the new-style callback
   385 	Callback(EDmaCallbackRequestCompletion, result, aArg, NULL);
   386 	}
   387 
   388 
   389 /**
   390 The new style callback called by the DMA framework
   391 may be called in either thread or ISR context
   392 */
   393 void DClientDmaRequest::Callback(TUint aCallbackType, TDmaResult aResult, TAny* aArg, SDmaDesHdr* aHdr)
   394 	{
   395 	const TInt context = NKern::CurrentContext();
   396 	__KTRACE_OPT(KDMA, Kern::Printf(">DClientDmaRequest::CallBack: TDmaResult result = %d, NKern::TContext context = %d", aResult, context));
   397 	
   398 	DClientDmaRequest& self = *reinterpret_cast<DClientDmaRequest*>(aArg);
   399 	self.DoCallback(aCallbackType, aResult);
   400 
   401 	// decide if callback is complete
   402 	const TBool transferComplete = aCallbackType & EDmaCallbackRequestCompletion;
   403 	if(!transferComplete)
   404 		{
   405 		return;
   406 		}
   407 
   408 	// If there are reque args then redo this request
   409 	// another callback would then be expected.
   410 	// Requests can only be re-queued in ISR context, but we
   411 	// do not check that here as it is up to the client to get
   412 	// it right - also, we want to test that the PIL catches this
   413 	// error
   414 	if(!self.iIsrRequeArgSet.IsEmpty())
   415 		{
   416 		// If redo call was succesful, return and wait for next call back
   417 		if(self.RedoRequest())
   418 			return;
   419 		}
   420 
   421 	switch(context)
   422 		{
   423 	case NKern::EThread:
   424 		{
   425 		CompleteCallback(aArg);
   426 		break;
   427 		}
   428 	case NKern::EInterrupt:
   429 		{
   430 		self.iDfc.iPtr = aArg;
   431 		self.iDfc.Add();
   432 		break;
   433 		}
   434 	case NKern::EIDFC: //fall-through
   435 	case NKern::EEscaped:
   436 	default:
   437 		TEST_FAULT;
   438 		}
   439 	}
   440 
   441 /**
   442 Log results of callback. May be called in either thread or ISR context
   443 */
   444 void DClientDmaRequest::DoCallback(TUint aCallbackType, TDmaResult aResult)
   445 	{
   446 	iStopwatch.Stop(); //sucessive calls will simply over write the stop time
   447 
   448 	// This will always be done whether the client requested a
   449 	// callback record or not
   450 	TCallbackRecord& record = iClientDataRequest->Data1();
   451 	record.ProcessCallback(aCallbackType, aResult);
   452 	}
   453 
   454 /**
   455 This function may either be called directly or queued as a DFC
   456 */
   457 void DClientDmaRequest::CompleteCallback(TAny* aArg)
   458 	{
   459 	__KTRACE_OPT(KDMA, Kern::Printf(">DClientDmaRequest::CompleteCallBack thread %O", &Kern::CurrentThread()));
   460 	__ASSERT_NOT_ISR;
   461 
   462 	DClientDmaRequest& self = *reinterpret_cast<DClientDmaRequest*>(aArg);
   463 
   464 	self.iClientDataRequest->Data2() = self.iStopwatch.ReadMicroSecs();
   465 
   466 	//Assert that we called SetRequestStatus on this object before
   467 	//queueing
   468 	__NK_ASSERT_DEBUG(self.iClientDataRequest->IsReady());
   469 
   470 	// This is an inelegant, temporary, solution to the following problem:
   471 	//
   472 	// If a dma request completes with an ISR callback the test
   473 	// framework will queue this function as a DFC which
   474 	// will then signal the user-side client. As a consequence of
   475 	// this the user side client may then decide to destroy this
   476 	// request. However, untill the DMA framework's DFC has run
   477 	// and called OnDeque() on this request, it is still considered as
   478 	// queued. Since it is possible that this DFC could run
   479 	// before the DMA fw's DFC, this request could get destroyed while
   480 	// it is stil queued, triggering a PIL assertion.
   481 	//
   482 	// The real fix is likely be for the PIL to call the callback
   483 	// twice, but with different arguments, once to annonunce the
   484 	// ISR and again to announce the dequeue.
   485 	//
   486 	// Here we poll and wait for this request to be dequeued. Note,
   487 	// this DFC is currently run on a separate DFC queue, otherwise
   488 	// it could get deadlocked. An alternative to polling would be
   489 	// to use DCondVar, but that would require PIL modification
   490 
   491 	if(NKern::CurrentThread() == self.iDfcQ->iThread)
   492 		{
   493 		// Only need to poll if we aren't on the channel's DFC queue
   494 		for(;;)
   495 			{
   496 			// once the request has been unqueued it
   497 			// can only be queued again by the client
   498 			const TBool queued = __e32_atomic_load_acq32(&self.iQueued);
   499 			if(!queued)
   500 				break;
   501 			__KTRACE_OPT(KDMA, Kern::Printf("Waiting for requeuest to be dequeued"));
   502 			NKern::Sleep(10);
   503 			}
   504 		}
   505 	else
   506 		{
   507 		// If we are on the channel's DFCQ we should be dequeued
   508 		// already
   509 		__NK_ASSERT_DEBUG(!__e32_atomic_load_acq32(&self.iQueued));
   510 		}
   511 
   512 	// We can always complete with KErrNone, the actual DMA result is
   513 	// logged in the TCallbackRecord
   514 	Kern::QueueRequestComplete(self.iClient, self.iClientDataRequest, KErrNone);
   515 	}
   516 
   517 const TInt DDmaTestSession::KMaxChunkSize;
   518 
   519 TInt DDmaTestSession::RequestUserHandle(DThread* aThread, TOwnerType aType)
   520 	{
   521 	if (aType!=EOwnerThread || aThread!=iClient)
   522 		return KErrAccessDenied;
   523 	return KErrNone;
   524 	}
   525 
   526 DDmaTestSession::DDmaTestSession()
   527 	: iClient(NULL), iDfcQ(NULL), iIsrCallbackDfcQ(NULL), iChunkBase(0), iChunk(NULL)
   528 	{}
   529 
   530 // called in thread critical section
   531 TInt DDmaTestSession::DoCreate(TInt /*aUnit*/, const TDesC8* aInfo, const TVersion& /*aVer*/)
   532 	{
   533 	__NK_ASSERT_ALWAYS(iDfcQ == NULL);
   534 	__NK_ASSERT_ALWAYS(iIsrCallbackDfcQ == NULL);
   535 
   536 	TInt r = Kern::DynamicDfcQCreate(iDfcQ, KDFCThreadPriority, KDFCThreadName);
   537 	if (r != KErrNone)
   538 		return r;
   539 	NKern::ThreadSetCpuAffinity((NThread*)(iDfcQ->iThread), KCpuAffinityAny);
   540 
   541 	r = Kern::DynamicDfcQCreate(iIsrCallbackDfcQ, KDFCThreadPriority, KIsrCbDfcThreadName);
   542 	if (r != KErrNone)
   543 		return r;
   544 	NKern::ThreadSetCpuAffinity((NThread*)(iIsrCallbackDfcQ->iThread), KCpuAffinityAny);
   545 
   546 	iClient = &Kern::CurrentThread();
   547 
   548 	r = CreateSharedChunk();
   549 	return r;
   550 	}
   551 
   552 DDmaTestSession::~DDmaTestSession()
   553 	{
   554 	//Destroy requests before channels
   555 	//or we will trigger an assertion
   556 	while(iClientDmaReqs.Count())
   557 		{
   558 		DestroyDmaRequestByIndex(0);
   559 		}
   560 	iClientDmaReqs.Close();
   561 
   562 	while(iChannels.Count())
   563 		{
   564 		CloseDmaChannelByIndex(0);
   565 		}
   566 	iChannels.Close();
   567 
   568 
   569 	if (iDfcQ)
   570 		{
   571 		iDfcQ->Destroy();
   572 		}
   573 
   574 	if (iIsrCallbackDfcQ)
   575 		{
   576 		iIsrCallbackDfcQ->Destroy();
   577 		}
   578 
   579 	if(iChunk)
   580 		{
   581 		Kern::ChunkClose(iChunk);
   582 		iChunk = NULL;
   583 		}
   584 	}
   585 
   586 TInt DDmaTestSession::Request(TInt aFunction, TAny* a1, TAny* a2)
   587 	{
   588 	__NK_ASSERT_DEBUG(&Kern::CurrentThread() == iClient);
   589 
   590 	switch (aFunction)
   591 		{
   592 	case RDmaSession::EOpenChannel:
   593 			{
   594 			TUint pslCookie = (TUint)a1;
   595 			TUint driverCookie = 0;
   596 			TInt r = OpenDmaChannel(pslCookie, driverCookie);	
   597 			umemput32(a2, &driverCookie, sizeof(TAny*));
   598 			return r;
   599 			}
   600 	case RDmaSession::ECloseChannel:
   601 			{
   602 			TUint driverCookie = reinterpret_cast<TUint>(a1);
   603 			TInt r = CloseDmaChannelByCookie(driverCookie);
   604 			return r;
   605 			}
   606 	case RDmaSession::EChannelCaps:
   607 			{
   608 			TUint driverCookie = reinterpret_cast<TUint>(a1);
   609 			TPckgBuf<TDmacTestCaps> capsBuf;
   610 			TInt r = GetChannelCapsByCookie(driverCookie, capsBuf());
   611 			Kern::KUDesPut(*reinterpret_cast<TDes8*>(a2), capsBuf);
   612 			return r;
   613 			}
   614 	case RDmaSession::EPauseChannel:
   615 			{
   616 			TUint driverCookie = reinterpret_cast<TUint>(a1);
   617 			TInt r = PauseDmaChannelByCookie(driverCookie);
   618 			return r;
   619 			}
   620 	case RDmaSession::EResumeChannel:
   621 			{
   622 			TUint driverCookie = reinterpret_cast<TUint>(a1);
   623 			TInt r = ResumeDmaChannelByCookie(driverCookie);
   624 			return r;
   625 			}
   626 	case RDmaSession::EFragmentCount:
   627 			{
   628 			TUint requestCookie = reinterpret_cast<TUint>(a1);
   629 			TInt r = RequestFragmentCount(requestCookie);
   630 			return r;
   631 			}
   632 	case RDmaSession::ERequestOpen:
   633 			{
   634 			RDmaSession::TRequestCreateArgs createArgs(0, EFalse, 0);
   635 			TPckg<RDmaSession::TRequestCreateArgs> package(createArgs);
   636 			Kern::KUDesGet(package, *reinterpret_cast<TDes8*>(a1));
   637 
   638 			const TUint channelCookie = createArgs.iChannelCookie;
   639 			TUint requestCookie = 0;
   640 
   641 			TInt r = CreateDmaRequest(channelCookie, requestCookie, createArgs.iNewStyle, createArgs.iMaxFragmentSize);
   642 
   643 			umemput32(a2, &requestCookie, sizeof(TAny*));
   644 			return r;
   645 			}
   646 	case RDmaSession::ERequestClose:
   647 			{
   648 			const TUint requestCookie = reinterpret_cast<TUint>(a1);
   649 			return DestroyDmaRequestByCookie(requestCookie);
   650 			}
   651 	case RDmaSession::EFragmentLegacy:
   652 	case RDmaSession::EFragment:
   653 			{
   654 			TPckgBuf<RDmaSession::TFragmentArgs> argsBuff;
   655 			Kern::KUDesGet(argsBuff, *reinterpret_cast<TDes8*>(a1));
   656 			const TUint requestCookie = argsBuff().iRequestCookie;
   657 
   658 			//must remove constness as we actually need to
   659 			//convert the src and dst offsets to addresses
   660 			TDmaTransferArgs& transferArgs = const_cast<TDmaTransferArgs&>(argsBuff().iTransferArgs);
   661 
   662 			//convert address offsets in to kernel virtual addresses
   663 			FixupTransferArgs(transferArgs);
   664 
   665 			TEST_ASSERT((TAddressParms(transferArgs).CheckRange(iChunkBase, iChunk->Size())));
   666 
   667 			TInt r = KErrGeneral;
   668 
   669 			TStopwatch clock;
   670 			clock.Start();
   671 			switch (aFunction)
   672 				{
   673 			case RDmaSession::EFragmentLegacy:
   674 				r = FragmentRequest(requestCookie, transferArgs, ETrue); break;
   675 			case RDmaSession::EFragment:
   676 				r = FragmentRequest(requestCookie, transferArgs, EFalse); break;
   677 			default:
   678 				TEST_FAULT;
   679 				}
   680 			clock.Stop();
   681 
   682 			const TUint64 time = clock.ReadMicroSecs();
   683 
   684 			TUint64* const timePtr = argsBuff().iDurationMicroSecs;
   685 			if(timePtr)
   686 				{
   687 				umemput(timePtr, &time, sizeof(time));
   688 				}
   689 			return r;
   690 			}
   691 	case RDmaSession::EQueueRequest:
   692 			{
   693 			TPckgBuf<RDmaSession::TQueueArgs> argsBuff;
   694 			Kern::KUDesGet(argsBuff, *reinterpret_cast<TDes8*>(a1));
   695 
   696 			//this is an Asynchronous request
   697 			const TUint requestCookie = argsBuff().iRequestCookie;
   698 			TRequestStatus* requestStatus = argsBuff().iStatus;
   699 			TCallbackRecord* callbackRec = argsBuff().iCallbackRecord;
   700 			TUint64* duration = argsBuff().iDurationMicroSecs;
   701 
   702 			TInt r = QueueRequest(requestCookie, requestStatus, callbackRec, duration);
   703 			if(r != KErrNone)
   704 				{
   705 				Kern::RequestComplete(requestStatus, r);
   706 				}
   707 			return r;
   708 			}	
   709 	case RDmaSession::EQueueRequestWithReque:
   710 			{
   711 			//TODO can common code with EQueueRequest be extracted?
   712 			TPckgBuf<RDmaSession::TQueueArgsWithReque> argsBuff;
   713 			Kern::KUDesGet(argsBuff, *reinterpret_cast<TDes8*>(a1));
   714 
   715 			//this is an Asynchronous request
   716 			const TUint requestCookie = argsBuff().iRequestCookie;
   717 			TRequestStatus* requestStatus = argsBuff().iStatus;
   718 			TCallbackRecord* callbackRec = argsBuff().iCallbackRecord;
   719 			TUint64* duration = argsBuff().iDurationMicroSecs;
   720 
   721 			TInt r = KErrNotFound;
   722 
   723 			DClientDmaRequest* const request = RequestFromCookie(requestCookie);
   724 			if(request != NULL)
   725 				{
   726 				argsBuff().iRequeSet.Fixup(iChunkBase);
   727 				//TODO reque args must be substituted in order to
   728 				//check the range. The original transfer args are not
   729 				//available when queue is called, they could
   730 				//however be stored within DClientDmaRequest
   731 				//TEST_ASSERT((argsBuff().iRequeSet.CheckRange(iChunkBase, iChunk->Size())));
   732 				request->AddRequeArgs(argsBuff().iRequeSet);
   733 
   734 				r = QueueRequest(requestCookie, requestStatus, callbackRec, duration);
   735 				}
   736 
   737 			if(r != KErrNone)
   738 				{
   739 				Kern::RequestComplete(requestStatus, r);
   740 				}
   741 			return r;
   742 			}
   743 	case RDmaSession::EIsrRedoRequest:
   744 			{
   745 			TPckgBuf<RDmaSession::TIsrRedoReqArgs> argsBuff;
   746 			Kern::KUDesGet(argsBuff, *reinterpret_cast<TDes8*>(a1));
   747 
   748 			const TUint driverCookie = argsBuff().iDriverCookie;
   749 			const TUint32 srcAddr = argsBuff().iSrcAddr;
   750 			const TUint32 dstAddr = argsBuff().iDstAddr;
   751 			const TInt transferCount = argsBuff().iTransferCount;
   752 			const TUint32 pslRequestInfo = argsBuff().iPslRequestInfo;
   753 			const TBool isrCb = argsBuff().iIsrCb;
   754 
   755 			TInt r = IsrRedoRequestByCookie(driverCookie,srcAddr,dstAddr,transferCount,pslRequestInfo,isrCb);
   756 			return r;
   757 			}
   758 	case RDmaSession::EIsOpened:
   759 			{
   760 			TUint driverCookie = (TUint)a1;
   761 			TBool channelOpen = EFalse;;
   762 			TInt r = ChannelIsOpenedByCookie(driverCookie,channelOpen);	
   763 			umemput32(a2, &channelOpen, sizeof(TAny*));
   764 			return r;		
   765 			}
   766 	case RDmaSession::EIsQueueEmpty:
   767 			{
   768 			TUint driverCookie = (TUint)a1;
   769 			TBool queueEmpty = EFalse;;
   770 			TInt r = IsQueueEmptyByCookie(driverCookie,queueEmpty);	
   771 			umemput32(a2, &queueEmpty, sizeof(TAny*));
   772 			return r;
   773 			}
   774 	case RDmaSession::ECancelAllChannel:
   775 			{
   776 			TUint driverCookie = reinterpret_cast<TUint>(a1);
   777 			TInt r = CancelAllByCookie(driverCookie);
   778 			return r;
   779 			}
   780 	case RDmaSession::EOpenSharedChunk:
   781 			{
   782 			return OpenSharedChunkHandle();
   783 			}
   784 	case RDmaSession::EGetTestInfo:
   785 			{
   786 #ifdef DMA_APIV2
   787 			TPckgC<TDmaV2TestInfo> package(DmaTestInfoV2());
   788 #else
   789 			TPckgC<TDmaV2TestInfo> package(ConvertTestInfo(DmaTestInfo()));
   790 #endif
   791 			Kern::KUDesPut(*reinterpret_cast<TDes8*>(a1), package);
   792 			return KErrNone;
   793 			}
   794 	default:
   795 		Kern::PanicCurrentThread(KClientPanicCat, __LINE__);
   796 		return KErrGeneral;
   797 		}
   798 	}
   799 
   800 TInt DDmaTestSession::OpenDmaChannel(TUint aPslCookie, TUint& aDriverCookie )
   801 	{
   802 	TDmaChannel::SCreateInfo info;
   803 	info.iCookie = aPslCookie;
   804 	info.iDfcQ = iDfcQ;
   805 	info.iDfcPriority = 3;
   806 	info.iDesCount = 128;
   807 
   808 	TDmaChannel* channel = NULL;
   809 
   810 	//cs so thread can't be killed between
   811 	//opening channel and adding to array
   812 	NKern::ThreadEnterCS();
   813 	TInt r = TDmaChannel::Open(info, channel);
   814 	if(KErrNone == r)
   815 		{
   816 		__NK_ASSERT_ALWAYS(channel);
   817 		
   818 		__KTRACE_OPT(KDMA, Kern::Printf("OpenDmaChannel: channel@ 0x%08x", channel)); 
   819 
   820 
   821 		TInt err = iChannels.Append(channel);
   822 		if(KErrNone == err)
   823 			{
   824 			aDriverCookie = reinterpret_cast<TUint>(channel);
   825 			}
   826 		else
   827 			{
   828 			channel->Close();
   829 			r = KErrNoMemory;
   830 			}
   831 		}
   832 	NKern::ThreadLeaveCS();
   833 
   834 	return r;
   835 	}
   836 
   837 TInt DDmaTestSession::CookieToChannelIndex(TUint aDriverCookie) const
   838 	{
   839 	const TInt r = iChannels.Find(reinterpret_cast<TDmaChannel*>(aDriverCookie));
   840 
   841 	if(r < 0)
   842 		{
   843 		__KTRACE_OPT(KDMA, Kern::Printf("CookieToChannelIndex: cookie 0x%08x not found!", aDriverCookie)); 
   844 		}
   845 	return r;
   846 	}
   847 
   848 TInt DDmaTestSession::CookieToRequestIndex(TUint aRequestCookie) const
   849 	{
   850 	const TInt r = iClientDmaReqs.Find(reinterpret_cast<DClientDmaRequest*>(aRequestCookie));
   851 
   852 	if(r < 0)
   853 		{
   854 		__KTRACE_OPT(KDMA, Kern::Printf("CookieToRequestIndex: cookie 0x%08x not found!", aRequestCookie)); 
   855 		}
   856 	return r;
   857 	}
   858 
   859 void DDmaTestSession::CloseDmaChannelByIndex(TInt aIndex)
   860 	{
   861 	__KTRACE_OPT(KDMA, Kern::Printf("CloseDmaChannelByIndex: %d", aIndex)); 
   862 	__NK_ASSERT_DEBUG(aIndex < iChannels.Count()); 
   863 	// cs so client thread can't be killed between removing channel from
   864 	// array and closing it.
   865 	NKern::ThreadEnterCS();
   866 	TDmaChannel* channel = iChannels[aIndex];
   867 	iChannels.Remove(aIndex);
   868 	channel->Close();
   869 	NKern::ThreadLeaveCS();
   870 	}
   871 
   872 TInt DDmaTestSession::CloseDmaChannelByCookie(TUint aDriverCookie)
   873 	{
   874 	__KTRACE_OPT(KDMA, Kern::Printf("CloseDmaChannelByCookie: 0x%08x", aDriverCookie)); 
   875 	const TInt index = CookieToChannelIndex(aDriverCookie);
   876 	
   877 	if(index >= 0)
   878 		{
   879 		CloseDmaChannelByIndex(index);
   880 		return KErrNone;
   881 		}
   882 	else
   883 		{
   884 		return KErrNotFound;
   885 		}
   886 	}
   887 
   888 TInt DDmaTestSession::CancelAllByCookie(TUint aDriverCookie)
   889 	{
   890 	__KTRACE_OPT(KDMA, Kern::Printf("CancelAllByCookie: 0x%08x", aDriverCookie)); 
   891 	const TInt index = CookieToChannelIndex(aDriverCookie);
   892 	
   893 	if(index >= 0)
   894 		{
   895 		CancelAllByIndex(index);
   896 		return KErrNone;
   897 		}
   898 	else
   899 		{
   900 		return KErrNotFound;
   901 		}
   902 	}
   903 
   904 void DDmaTestSession::CancelAllByIndex(TInt aIndex)
   905 	{
   906 	__KTRACE_OPT(KDMA, Kern::Printf("CancelAllByIndex: %d", aIndex)); 
   907 	__NK_ASSERT_DEBUG(aIndex < iChannels.Count()); 
   908 	
   909 	TDmaChannel* channel = iChannels[aIndex];
   910 	iChannels.Remove(aIndex);
   911 	channel->CancelAll();
   912 	}
   913 
   914 TInt DDmaTestSession::PauseDmaChannelByIndex(TInt aIndex)
   915 	{
   916 	__KTRACE_OPT(KDMA, Kern::Printf("PauseDmaChannelByIndex: %d", aIndex)); 
   917 	__NK_ASSERT_DEBUG(aIndex < iChannels.Count()); 
   918 
   919 #ifdef DMA_APIV2
   920 	TDmaChannel* channel = iChannels[aIndex];
   921 	return channel->Pause();
   922 #else
   923 	return KErrNotSupported;
   924 #endif	
   925 	}
   926 
   927 TInt DDmaTestSession::PauseDmaChannelByCookie(TUint aDriverCookie)
   928 	{
   929 	__KTRACE_OPT(KDMA, Kern::Printf("PauseDmaChannelByCookie: 0x%08x", aDriverCookie)); 
   930 	const TInt index = CookieToChannelIndex(aDriverCookie);
   931 	
   932 	if(index >= 0)
   933 		{
   934 		TInt r = PauseDmaChannelByIndex(index);
   935 		return r;
   936 		}
   937 	else
   938 		{
   939 		return KErrNotFound;
   940 		}
   941 	}
   942 
   943 TInt DDmaTestSession::ResumeDmaChannelByIndex(TInt aIndex)
   944 	{
   945 	__KTRACE_OPT(KDMA, Kern::Printf("ResumeDmaChannelByIndex: %d", aIndex)); 
   946 	__NK_ASSERT_DEBUG(aIndex < iChannels.Count()); 
   947 
   948 #ifdef DMA_APIV2
   949 	TDmaChannel* channel = iChannels[aIndex];
   950 	return channel->Resume();
   951 #else
   952 	return KErrNotSupported;
   953 #endif
   954 	}
   955 
   956 TInt DDmaTestSession::ResumeDmaChannelByCookie(TUint aDriverCookie)
   957 	{
   958 	__KTRACE_OPT(KDMA, Kern::Printf("ResumeDmaChannelByCookie: 0x%08x", aDriverCookie)); 
   959 	const TInt index = CookieToChannelIndex(aDriverCookie);
   960 	
   961 	if(index >= 0)
   962 		{
   963 		TInt r = ResumeDmaChannelByIndex(index);
   964 		return r;
   965 		}
   966 	else
   967 		{
   968 		return KErrNotFound;
   969 		}
   970 	}
   971 
   972 TInt DDmaTestSession::IsrRedoRequestByCookie(TUint aDriverCookie,TUint32 aSrcAddr,TUint32 aDstAddr,TInt aTransferCount,TUint32 aPslRequestInfo,TBool aIsrCb)
   973 {
   974 	__KTRACE_OPT(KDMA, Kern::Printf("IsrRedoRequestByCookie: 0x%08x", aDriverCookie)); 
   975 	const TInt index = CookieToChannelIndex(aDriverCookie);
   976 	
   977 	if(index >= 0)
   978 		{
   979 		TInt r = IsrRedoRequestByIndex(index,aSrcAddr,aDstAddr,aTransferCount,aPslRequestInfo,aIsrCb);
   980 		return r;
   981 		}
   982 	else
   983 		{
   984 		return KErrNotFound;
   985 		}
   986 }
   987 
   988 TInt DDmaTestSession::IsrRedoRequestByIndex(TInt aIndex,TUint32 aSrcAddr,TUint32 aDstAddr,TInt aTransferCount,TUint32 aPslRequestInfo,TBool aIsrCb)
   989 	{
   990 	__KTRACE_OPT(KDMA, Kern::Printf("IsrRedoRequestByIndex: %d", aIndex)); 
   991 	__NK_ASSERT_DEBUG(aIndex < iChannels.Count()); 
   992 
   993 #ifdef DMA_APIV2
   994 	TDmaChannel* channel = iChannels[aIndex];
   995 	return channel->IsrRedoRequest(aSrcAddr,aDstAddr,aTransferCount,aPslRequestInfo,aIsrCb);
   996 #else
   997 	return KErrNotSupported;
   998 #endif
   999 	}
  1000 
  1001 /**
  1002 aChannelCaps will be set to "NULL" values
  1003 */
  1004 TInt DDmaTestSession::GetChannelCapsByCookie(TUint aDriverCookie, TDmacTestCaps& aChannelCaps)
  1005 	{
  1006 	SDmacCaps caps = {0,}; //initialise with NULL values
  1007 	TInt r = GetChannelCapsByCookie(aDriverCookie, caps);
  1008 
  1009 	if(r == KErrNotSupported)
  1010 		{
  1011 		//If we can not query caps it means
  1012 		//that we are using the v1 driver
  1013 		//we construct a empty TDmacTestCaps
  1014 		//but with an iPILVersion of 1
  1015 		const TDmacTestCaps nullCapsV1(caps, 1);
  1016 		aChannelCaps = nullCapsV1;
  1017 		r = KErrNone;
  1018 		}
  1019 	else if(r == KErrNone)
  1020 		{
  1021 		const TDmacTestCaps capsV2(caps, 2);
  1022 		aChannelCaps = capsV2;
  1023 		}
  1024 
  1025 	return r;
  1026 	}
  1027 
  1028 /**
  1029 Will return the capabilities of the DMA channel.
  1030 Querying SDmacCaps is not possible on V1 of the DMA framework.
  1031 In that case an error of KErrNotSupported will be returned
  1032 */
  1033 TInt DDmaTestSession::GetChannelCapsByCookie(TUint aDriverCookie, SDmacCaps& aChannelCaps)
  1034 	{
  1035 	__KTRACE_OPT(KDMA, Kern::Printf("GetChannelCapsByCookie: 0x%08x", aDriverCookie)); 
  1036 	const TInt index = CookieToChannelIndex(aDriverCookie);
  1037 	if(index >= 0)
  1038 		{
  1039 #ifdef DMA_APIV2
  1040 		aChannelCaps = iChannels[index]->DmacCaps();
  1041 		return KErrNone;
  1042 #else
  1043 		return KErrNotSupported;
  1044 #endif
  1045 		}
  1046 	else
  1047 		{
  1048 		return KErrNotFound;
  1049 		}
  1050 	}
  1051 
  1052 TInt DDmaTestSession::IsQueueEmptyByCookie(TUint aDriverCookie, TBool& aQueueEmpty)
  1053 	{
  1054 	__KTRACE_OPT(KDMA, Kern::Printf("IsQueueEmptyByCookie: 0x%08x", aDriverCookie)); 
  1055 	const TInt index = CookieToChannelIndex(aDriverCookie);
  1056 	
  1057 	if(index >= 0)
  1058 		{
  1059 		aQueueEmpty=iChannels[index]->IsQueueEmpty();
  1060 		return KErrNone;
  1061 		}
  1062 	else
  1063 		{
  1064 		return KErrNotFound;
  1065 		}
  1066 	}
  1067 
  1068 TInt DDmaTestSession::ChannelIsOpenedByCookie(TUint aDriverCookie, TBool& aChannelOpen)
  1069 	{
  1070 	__KTRACE_OPT(KDMA, Kern::Printf("ChannelIsOpenedByCookie: 0x%08x", aDriverCookie)); 
  1071 	const TInt index = CookieToChannelIndex(aDriverCookie);
  1072 	
  1073 	if(index >= 0)
  1074 		{
  1075 		aChannelOpen=iChannels[index]->IsOpened();
  1076 		return KErrNone;
  1077 		}
  1078 	else
  1079 		{
  1080 		return KErrNotFound;
  1081 		}
  1082 	}
  1083 
  1084 TInt DDmaTestSession::CreateDmaRequest(TUint aChannelCookie, TUint& aRequestCookie, TBool aNewCallback, TInt aMaxFragmentSizeBytes)
  1085 	{
  1086 #ifndef DMA_APIV2
  1087 	if(aNewCallback)
  1088 		return KErrNotSupported;
  1089 #endif
  1090 
  1091 	TInt channelIndex = CookieToChannelIndex(aChannelCookie);
  1092 	if(channelIndex < 0)
  1093 		return channelIndex;
  1094 
  1095 	NKern::ThreadEnterCS();
  1096 	DClientDmaRequest* request = DClientDmaRequest::Construct(iClient, iIsrCallbackDfcQ, *iChannels[channelIndex], aNewCallback, aMaxFragmentSizeBytes);
  1097 	if(request == NULL)
  1098 		{
  1099 		NKern::ThreadLeaveCS();
  1100 		return KErrNoMemory;
  1101 		}
  1102 
  1103 	TInt r = iClientDmaReqs.Append(request);
  1104 	if(r == KErrNone)
  1105 		{
  1106 		aRequestCookie = reinterpret_cast<TUint>(request);
  1107 		}
  1108 	else
  1109 		{
  1110 		delete request;
  1111 		}
  1112 	NKern::ThreadLeaveCS();
  1113 	
  1114 	return r;
  1115 	}
  1116 
  1117 TInt DDmaTestSession::DestroyDmaRequestByCookie(TUint aRequestCookie)
  1118 	{
  1119 	TInt requestIndex = CookieToRequestIndex(aRequestCookie);
  1120 	if(requestIndex < 0)
  1121 		return requestIndex;
  1122 
  1123 	DestroyDmaRequestByIndex(requestIndex);
  1124 
  1125 	return KErrNone;
  1126 	}
  1127 
  1128 void DDmaTestSession::DestroyDmaRequestByIndex(TInt aIndex)
  1129 	{
  1130 	__KTRACE_OPT(KDMA, Kern::Printf("DestroyDmaRequestByIndex: %d", aIndex)); 
  1131 	__NK_ASSERT_DEBUG(aIndex < iClientDmaReqs.Count()); 
  1132 	NKern::ThreadEnterCS();
  1133 
  1134 	DClientDmaRequest* request = iClientDmaReqs[aIndex];
  1135 	iClientDmaReqs.Remove(aIndex);
  1136 	delete request;
  1137 
  1138 	NKern::ThreadLeaveCS();
  1139 	}
  1140 
  1141 TInt DDmaTestSession::CreateSharedChunk()
  1142 	{
  1143     // Enter critical section so we can't die and leak the objects we are creating
  1144     // I.e. the TChunkCleanup and DChunk (Shared Chunk)
  1145     NKern::ThreadEnterCS();
  1146 
  1147     // Create the chunk
  1148     TChunkCreateInfo info;
  1149     info.iType         = TChunkCreateInfo::ESharedKernelSingle;
  1150     info.iMaxSize      = KMaxChunkSize;
  1151     info.iMapAttr      = EMapAttrFullyBlocking | EMapAttrUserRw;
  1152     info.iOwnsMemory   = ETrue;
  1153     info.iDestroyedDfc = NULL;
  1154 
  1155     DChunk* chunk;
  1156 	TUint32 mapAttr;
  1157     TInt r = Kern::ChunkCreate(info, chunk, iChunkBase, mapAttr);
  1158     if(r!=KErrNone)
  1159         {
  1160         NKern::ThreadLeaveCS();
  1161         return r;
  1162         }
  1163 
  1164     // Map our device's memory into the chunk (at offset 0)
  1165 	TUint32 physicalAddr;
  1166 	r = Kern::ChunkCommitContiguous(chunk,0,KMaxChunkSize, physicalAddr);
  1167     if(r!=KErrNone)
  1168         {
  1169         // Commit failed so tidy-up...
  1170         Kern::ChunkClose(chunk);
  1171         }
  1172     else
  1173         {
  1174         iChunk = chunk;
  1175         }
  1176 
  1177     // Can leave critical section now that we have saved pointers to created objects
  1178     NKern::ThreadLeaveCS();
  1179 
  1180     return r;
  1181 	}
  1182 
  1183 TUint DDmaTestSession::OpenSharedChunkHandle()
  1184 	{
  1185 	NKern::ThreadEnterCS();
  1186 	const TInt r = Kern::MakeHandleAndOpen(NULL, iChunk);
  1187 	NKern::ThreadLeaveCS();
  1188 	return r;
  1189 	}
  1190 
  1191 void DDmaTestSession::FixupTransferArgs(TDmaTransferArgs& aTransferArgs) const
  1192 	{
  1193 	aTransferArgs.iSrcConfig.iAddr += iChunkBase;
  1194 	aTransferArgs.iDstConfig.iAddr += iChunkBase;
  1195 	}
  1196 
  1197 #ifndef DMA_APIV2
  1198 static TInt FragmentCount(DDmaRequest* aRequest)
  1199 	{
  1200 	TInt count = 0;
  1201 	for (SDmaDesHdr* pH = aRequest->iFirstHdr; pH != NULL; pH = pH->iNext)
  1202 		count++;
  1203 	return count;
  1204 	}
  1205 #endif
  1206 
  1207 TInt DDmaTestSession::RequestFragmentCount(TUint aRequestCookie)
  1208 	{
  1209 	TInt requestIndex = CookieToRequestIndex(aRequestCookie);
  1210 	if(requestIndex < 0)
  1211 		return requestIndex;
  1212 #ifdef DMA_APIV2
  1213 	TInt r = iClientDmaReqs[requestIndex]->FragmentCount();
  1214 #else
  1215 	TInt r = FragmentCount(iClientDmaReqs[requestIndex]);
  1216 #endif
  1217 
  1218 	return r;
  1219 	}
  1220 
  1221 TInt DDmaTestSession::FragmentRequest(TUint aRequestCookie, const TDmaTransferArgs& aTransferArgs, TBool aLegacy)
  1222 	{
  1223 	__KTRACE_OPT(KDMA, Kern::Printf(">FragmentRequest: cookie=0x%08x, legacy=%d", aRequestCookie, aLegacy)); 
  1224 	TInt requestIndex = CookieToRequestIndex(aRequestCookie);
  1225 	if(requestIndex < 0)
  1226 		return requestIndex;
  1227 
  1228 	TInt r = KErrNotSupported;
  1229 	if(aLegacy)
  1230 		{
  1231 		// TODO we can extract the required info from the struct to
  1232 		// set flags
  1233 		TUint flags = KDmaMemSrc | KDmaIncSrc | KDmaMemDest | KDmaIncDest;
  1234 
  1235 		const TUint src = aTransferArgs.iSrcConfig.iAddr;
  1236 		const TUint dst = aTransferArgs.iDstConfig.iAddr;
  1237 		r = iClientDmaReqs[requestIndex]->Fragment(src, dst, aTransferArgs.iTransferCount, flags, NULL);
  1238 		}
  1239 	else
  1240 		{
  1241 #ifdef DMA_APIV2
  1242 		r = iClientDmaReqs[requestIndex]->Fragment(aTransferArgs);
  1243 #else
  1244 		r = KErrNotSupported;
  1245 #endif
  1246 		}
  1247 	return r;
  1248 	}
  1249 
  1250 /**
  1251 Queue the request refered to by aRequestCookie
  1252 
  1253 @param aRequestCookie Client identifier for the DDmaRequest
  1254 @param aStatus Pointer to the client's TRequestStatus
  1255 @param aRecord Pointer to the client's TCallbackRecord
  1256 @return
  1257    - KErrNotFound - aRequestCookie was invalid
  1258    - KErrNone - Success
  1259 */
  1260 TInt DDmaTestSession::QueueRequest(TUint aRequestCookie, TRequestStatus* aStatus, TCallbackRecord* aRecord, TUint64* aDurationMicroSecs)
  1261 	{
  1262 	__KTRACE_OPT(KDMA, Kern::Printf(">QueueRequest: 0x%08x", aRequestCookie)); 
  1263 
  1264 	DClientDmaRequest* request = RequestFromCookie(aRequestCookie);
  1265 	if(request == NULL)
  1266 		return KErrNotFound;
  1267 
  1268 	return request->Queue(aStatus, aRecord, aDurationMicroSecs);
  1269 	}
  1270 
  1271 DClientDmaRequest* DDmaTestSession::RequestFromCookie(TUint aRequestCookie) const
  1272 	{
  1273 	TInt requestIndex = CookieToRequestIndex(aRequestCookie);
  1274 	if(requestIndex < 0)
  1275 		return NULL;
  1276 
  1277 	return (iClientDmaReqs[requestIndex]);
  1278 	}
  1279 
  1280 TDmaV2TestInfo DDmaTestSession::ConvertTestInfo(const TDmaTestInfo& aOldInfo) const
  1281 	{
  1282 	TDmaV2TestInfo newInfo;
  1283 	newInfo.iMaxTransferSize = aOldInfo.iMaxTransferSize;
  1284 	newInfo.iMemAlignMask = aOldInfo.iMemAlignMask;
  1285 	newInfo.iMemMemPslInfo = aOldInfo.iMemMemPslInfo;
  1286 
  1287 	newInfo.iMaxSbChannels = aOldInfo.iMaxSbChannels;
  1288 	for(TInt i=0; i<aOldInfo.iMaxSbChannels; i++)
  1289 		newInfo.iSbChannels[i] = aOldInfo.iSbChannels[i];
  1290 
  1291 	newInfo.iMaxDbChannels = aOldInfo.iMaxDbChannels;
  1292 	for(TInt i=0; i<aOldInfo.iMaxDbChannels; i++)
  1293 		newInfo.iDbChannels[i] = aOldInfo.iDbChannels[i];
  1294 
  1295 	newInfo.iMaxSgChannels = aOldInfo.iMaxSgChannels;
  1296 	for(TInt i=0; i<aOldInfo.iMaxSgChannels; i++)
  1297 		newInfo.iSgChannels[i] = aOldInfo.iSgChannels[i];
  1298 
  1299 	//TODO will want to add initialisation for Asym channels
  1300 	//when these are available
  1301 
  1302 	return newInfo;
  1303 	}
  1304 //////////////////////////////////////////////////////////////////////////////
  1305 
  1306 class DDmaTestFactory : public DLogicalDevice
  1307 	{
  1308 public:
  1309 	DDmaTestFactory();
  1310 	// from DLogicalDevice
  1311 	virtual ~DDmaTestFactory()
  1312 		{
  1313 		__KTRACE_OPT(KDMA, Kern::Printf(">DDmaTestFactory::~DDmaTestFactory"));
  1314 		}
  1315 	virtual TInt Install();
  1316 	virtual void GetCaps(TDes8& aDes) const;
  1317 	virtual TInt Create(DLogicalChannelBase*& aChannel);
  1318 	};
  1319 
  1320 
  1321 DDmaTestFactory::DDmaTestFactory()
  1322     {
  1323     iVersion = TestDmaLddVersion();
  1324     iParseMask = KDeviceAllowUnit;							// no info, no PDD
  1325     // iUnitsMask = 0;										// Only one thing
  1326     }
  1327 
  1328 
  1329 TInt DDmaTestFactory::Create(DLogicalChannelBase*& aChannel)
  1330     {
  1331 	aChannel=new DDmaTestSession;
  1332 	return aChannel ? KErrNone : KErrNoMemory;
  1333     }
  1334 
  1335 
  1336 TInt DDmaTestFactory::Install()
  1337     {
  1338     return SetName(&KTestDmaLddName);
  1339     }
  1340 
  1341 
  1342 void DDmaTestFactory::GetCaps(TDes8& /*aDes*/) const
  1343     {
  1344     }
  1345 
  1346 //////////////////////////////////////////////////////////////////////////////
  1347 
  1348 DECLARE_STANDARD_LDD()
  1349 	{
  1350     return new DDmaTestFactory;
  1351 	}