os/kernelhwsrv/kernel/eka/drivers/dma/dma2_pil.cpp
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
sl@0
     1
// Copyright (c) 2002-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
// e32/drivers/dma2_pil.cpp
sl@0
    15
// DMA Platform Independent Layer (PIL)
sl@0
    16
//
sl@0
    17
//
sl@0
    18
sl@0
    19
#include <drivers/dma.h>
sl@0
    20
#include <drivers/dma_hai.h>
sl@0
    21
sl@0
    22
#include <kernel/kern_priv.h>
sl@0
    23
sl@0
    24
sl@0
    25
// Symbian Min() & Max() are broken, so we have to define them ourselves
sl@0
    26
inline TUint Min(TUint aLeft, TUint aRight)
sl@0
    27
	{return(aLeft < aRight ? aLeft : aRight);}
sl@0
    28
inline TUint Max(TUint aLeft, TUint aRight)
sl@0
    29
	{return(aLeft > aRight ? aLeft : aRight);}
sl@0
    30
sl@0
    31
sl@0
    32
// Uncomment the following #define only when freezing the DMA2 export library.
sl@0
    33
//#define __FREEZE_DMA2_LIB
sl@0
    34
#ifdef __FREEZE_DMA2_LIB
sl@0
    35
TInt DmaChannelMgr::StaticExtension(TInt, TAny*) {return 0;}
sl@0
    36
TDmaChannel* DmaChannelMgr::Open(TUint32, TBool, TUint) {return 0;}
sl@0
    37
void DmaChannelMgr::Close(TDmaChannel*) {}
sl@0
    38
EXPORT_C const TDmaTestInfo& DmaTestInfo() {static TDmaTestInfo a; return a;}
sl@0
    39
EXPORT_C const TDmaV2TestInfo& DmaTestInfoV2() {static TDmaV2TestInfo a; return a;}
sl@0
    40
#endif	// #ifdef __FREEZE_DMA2_LIB
sl@0
    41
sl@0
    42
sl@0
    43
static const char KDmaPanicCat[] = "DMA " __FILE__;
sl@0
    44
sl@0
    45
//////////////////////////////////////////////////////////////////////
sl@0
    46
// DmaChannelMgr
sl@0
    47
//
sl@0
    48
// Wait, Signal, and Initialise are defined here in the PIL.
sl@0
    49
// Open, Close and Extension must be defined in the PSL.
sl@0
    50
sl@0
    51
NFastMutex DmaChannelMgr::Lock;
sl@0
    52
sl@0
    53
sl@0
    54
void DmaChannelMgr::Wait()
sl@0
    55
	{
sl@0
    56
	NKern::FMWait(&Lock);
sl@0
    57
	}
sl@0
    58
sl@0
    59
sl@0
    60
void DmaChannelMgr::Signal()
sl@0
    61
	{
sl@0
    62
	NKern::FMSignal(&Lock);
sl@0
    63
	}
sl@0
    64
sl@0
    65
sl@0
    66
TInt DmaChannelMgr::Initialise()
sl@0
    67
	{
sl@0
    68
	return KErrNone;
sl@0
    69
	}
sl@0
    70
sl@0
    71
sl@0
    72
class TDmaCancelInfo : public SDblQueLink
sl@0
    73
	{
sl@0
    74
public:
sl@0
    75
	TDmaCancelInfo();
sl@0
    76
	void Signal();
sl@0
    77
public:
sl@0
    78
	NFastSemaphore iSem;
sl@0
    79
	};
sl@0
    80
sl@0
    81
sl@0
    82
TDmaCancelInfo::TDmaCancelInfo()
sl@0
    83
	: iSem(0)
sl@0
    84
	{
sl@0
    85
	iNext = this;
sl@0
    86
	iPrev = this;
sl@0
    87
	}
sl@0
    88
sl@0
    89
sl@0
    90
void TDmaCancelInfo::Signal()
sl@0
    91
	{
sl@0
    92
	TDmaCancelInfo* p = this;
sl@0
    93
	FOREVER
sl@0
    94
		{
sl@0
    95
		TDmaCancelInfo* next = (TDmaCancelInfo*)p->iNext;
sl@0
    96
		if (p!=next)
sl@0
    97
			p->Deque();
sl@0
    98
		NKern::FSSignal(&p->iSem);	// Don't dereference p after this
sl@0
    99
		if (p==next)
sl@0
   100
			break;
sl@0
   101
		p = next;
sl@0
   102
		}
sl@0
   103
	}
sl@0
   104
sl@0
   105
sl@0
   106
//////////////////////////////////////////////////////////////////////////////
sl@0
   107
sl@0
   108
#ifdef __DMASIM__
sl@0
   109
#ifdef __WINS__
sl@0
   110
typedef TLinAddr TPhysAddr;
sl@0
   111
#endif
sl@0
   112
static inline TPhysAddr LinToPhys(TLinAddr aLin) {return aLin;}
sl@0
   113
#else
sl@0
   114
static inline TPhysAddr LinToPhys(TLinAddr aLin) {return Epoc::LinearToPhysical(aLin);}
sl@0
   115
#endif
sl@0
   116
sl@0
   117
//
sl@0
   118
// Return minimum of aMaxSize and size of largest physically contiguous block
sl@0
   119
// starting at aLinAddr.
sl@0
   120
//
sl@0
   121
static TInt MaxPhysSize(TLinAddr aLinAddr, const TInt aMaxSize)
sl@0
   122
	{
sl@0
   123
	const TPhysAddr physBase = LinToPhys(aLinAddr);
sl@0
   124
	TLinAddr lin = aLinAddr;
sl@0
   125
	TInt size = 0;
sl@0
   126
	for (;;)
sl@0
   127
		{
sl@0
   128
		// Round up the linear address to the next MMU page boundary
sl@0
   129
		const TLinAddr linBoundary = Kern::RoundToPageSize(lin + 1);
sl@0
   130
		size += linBoundary - lin;
sl@0
   131
		if (size >= aMaxSize)
sl@0
   132
			return aMaxSize;
sl@0
   133
		if ((physBase + size) != LinToPhys(linBoundary))
sl@0
   134
			return size;
sl@0
   135
		lin = linBoundary;
sl@0
   136
		}
sl@0
   137
	}
sl@0
   138
sl@0
   139
sl@0
   140
//////////////////////////////////////////////////////////////////////////////
sl@0
   141
// TDmac
sl@0
   142
sl@0
   143
TDmac::TDmac(const SCreateInfo& aInfo)
sl@0
   144
	: iMaxDesCount(aInfo.iDesCount),
sl@0
   145
	  iAvailDesCount(aInfo.iDesCount),
sl@0
   146
	  iHdrPool(NULL),
sl@0
   147
#ifndef __WINS__
sl@0
   148
	  iHwDesChunk(NULL),
sl@0
   149
#endif
sl@0
   150
	  iDesPool(NULL),
sl@0
   151
	  iDesSize(aInfo.iDesSize),
sl@0
   152
	  iCapsHwDes(aInfo.iCapsHwDes),
sl@0
   153
	  iFreeHdr(NULL)
sl@0
   154
	{
sl@0
   155
	__DMA_ASSERTD(iMaxDesCount > 0);
sl@0
   156
	__DMA_ASSERTD(iDesSize > 0);
sl@0
   157
	}
sl@0
   158
sl@0
   159
sl@0
   160
//
sl@0
   161
// Second-phase c'tor
sl@0
   162
//
sl@0
   163
TInt TDmac::Create(const SCreateInfo& aInfo)
sl@0
   164
	{
sl@0
   165
	iHdrPool = new SDmaDesHdr[iMaxDesCount];
sl@0
   166
	if (iHdrPool == NULL)
sl@0
   167
		{
sl@0
   168
		return KErrNoMemory;
sl@0
   169
		}
sl@0
   170
sl@0
   171
	TInt r = AllocDesPool(aInfo.iDesChunkAttribs);
sl@0
   172
	if (r != KErrNone)
sl@0
   173
		{
sl@0
   174
		return KErrNoMemory;
sl@0
   175
		}
sl@0
   176
sl@0
   177
	// Link all descriptor headers together on the free list
sl@0
   178
	iFreeHdr = iHdrPool;
sl@0
   179
	for (TInt i = 0; i < iMaxDesCount - 1; i++)
sl@0
   180
		iHdrPool[i].iNext = iHdrPool + i + 1;
sl@0
   181
	iHdrPool[iMaxDesCount-1].iNext = NULL;
sl@0
   182
sl@0
   183
	__DMA_INVARIANT();
sl@0
   184
	return KErrNone;
sl@0
   185
	}
sl@0
   186
sl@0
   187
sl@0
   188
TDmac::~TDmac()
sl@0
   189
	{
sl@0
   190
	__DMA_INVARIANT();
sl@0
   191
sl@0
   192
	FreeDesPool();
sl@0
   193
	delete[] iHdrPool;
sl@0
   194
	}
sl@0
   195
sl@0
   196
sl@0
   197
void TDmac::Transfer(const TDmaChannel& /*aChannel*/, const SDmaDesHdr& /*aHdr*/)
sl@0
   198
	{
sl@0
   199
	// TDmac needs to override this function if it has reported the channel
sl@0
   200
	// type for which the PIL calls it.
sl@0
   201
	__DMA_CANT_HAPPEN();
sl@0
   202
	}
sl@0
   203
sl@0
   204
sl@0
   205
void TDmac::Transfer(const TDmaChannel& /*aChannel*/, const SDmaDesHdr& /*aSrcHdr*/,
sl@0
   206
					 const SDmaDesHdr& /*aDstHdr*/)
sl@0
   207
	{
sl@0
   208
	// TDmac needs to override this function if it has reported the channel
sl@0
   209
	// type for which the PIL calls it.
sl@0
   210
	__DMA_CANT_HAPPEN();
sl@0
   211
	}
sl@0
   212
sl@0
   213
sl@0
   214
TInt TDmac::PauseTransfer(const TDmaChannel& /*aChannel*/)
sl@0
   215
	{
sl@0
   216
	// TDmac needs to override this function if it has reported support for
sl@0
   217
	// channel pausing/resuming.
sl@0
   218
	return KErrNotSupported;
sl@0
   219
	}
sl@0
   220
sl@0
   221
sl@0
   222
TInt TDmac::ResumeTransfer(const TDmaChannel& /*aChannel*/)
sl@0
   223
	{
sl@0
   224
	// TDmac needs to override this function if it has reported support for
sl@0
   225
	// channel pausing/resuming.
sl@0
   226
	return KErrNotSupported;
sl@0
   227
	}
sl@0
   228
sl@0
   229
sl@0
   230
TInt TDmac::AllocDesPool(TUint aAttribs)
sl@0
   231
	{
sl@0
   232
	// Calling thread must be in CS
sl@0
   233
	__ASSERT_CRITICAL;
sl@0
   234
	TInt r;
sl@0
   235
	if (iCapsHwDes)
sl@0
   236
		{
sl@0
   237
		const TInt size = iMaxDesCount * iDesSize;
sl@0
   238
#ifdef __WINS__
sl@0
   239
		(void)aAttribs;
sl@0
   240
		iDesPool = new TUint8[size];
sl@0
   241
		r = iDesPool ? KErrNone : KErrNoMemory;
sl@0
   242
#else
sl@0
   243
		// Chunk not mapped as supervisor r/w user none? incorrect mask passed by PSL
sl@0
   244
		__DMA_ASSERTD((aAttribs & EMapAttrAccessMask) == EMapAttrSupRw);
sl@0
   245
		TPhysAddr phys;
sl@0
   246
		r = Epoc::AllocPhysicalRam(size, phys);
sl@0
   247
		if (r == KErrNone)
sl@0
   248
			{
sl@0
   249
			r = DPlatChunkHw::New(iHwDesChunk, phys, size, aAttribs);
sl@0
   250
			if (r == KErrNone)
sl@0
   251
				{
sl@0
   252
				iDesPool = (TAny*)iHwDesChunk->LinearAddress();
sl@0
   253
				__KTRACE_OPT(KDMA, Kern::Printf("descriptor hw chunk created lin=0x%08X phys=0x%08X, size=0x%X",
sl@0
   254
												iHwDesChunk->iLinAddr, iHwDesChunk->iPhysAddr, size));
sl@0
   255
				}
sl@0
   256
			else
sl@0
   257
				Epoc::FreePhysicalRam(phys, size);
sl@0
   258
			}
sl@0
   259
#endif
sl@0
   260
		}
sl@0
   261
	else
sl@0
   262
		{
sl@0
   263
		iDesPool = new TDmaTransferArgs[iMaxDesCount];
sl@0
   264
		r = iDesPool ? KErrNone : KErrNoMemory;
sl@0
   265
		}
sl@0
   266
	return r;
sl@0
   267
	}
sl@0
   268
sl@0
   269
sl@0
   270
void TDmac::FreeDesPool()
sl@0
   271
	{
sl@0
   272
	// Calling thread must be in CS
sl@0
   273
	__ASSERT_CRITICAL;
sl@0
   274
	if (iCapsHwDes)
sl@0
   275
		{
sl@0
   276
#ifdef __WINS__
sl@0
   277
		delete[] iDesPool;
sl@0
   278
#else
sl@0
   279
		if (iHwDesChunk)
sl@0
   280
			{
sl@0
   281
			const TPhysAddr phys = iHwDesChunk->PhysicalAddress();
sl@0
   282
			const TInt size = iHwDesChunk->iSize;
sl@0
   283
			iHwDesChunk->Close(NULL);
sl@0
   284
			Epoc::FreePhysicalRam(phys, size);
sl@0
   285
			}
sl@0
   286
#endif
sl@0
   287
		}
sl@0
   288
	else
sl@0
   289
		{
sl@0
   290
		Kern::Free(iDesPool);
sl@0
   291
		}
sl@0
   292
	}
sl@0
   293
sl@0
   294
sl@0
   295
//
sl@0
   296
// Prealloc the given number of descriptors.
sl@0
   297
//
sl@0
   298
TInt TDmac::ReserveSetOfDes(TInt aCount)
sl@0
   299
	{
sl@0
   300
	__KTRACE_OPT(KDMA, Kern::Printf("TDmac::ReserveSetOfDes count=%d", aCount));
sl@0
   301
	__DMA_ASSERTD(aCount > 0);
sl@0
   302
	TInt r = KErrTooBig;
sl@0
   303
	Wait();
sl@0
   304
	if (iAvailDesCount - aCount >= 0)
sl@0
   305
		{
sl@0
   306
		iAvailDesCount -= aCount;
sl@0
   307
		r = KErrNone;
sl@0
   308
		}
sl@0
   309
	Signal();
sl@0
   310
	__DMA_INVARIANT();
sl@0
   311
	return r;
sl@0
   312
	}
sl@0
   313
sl@0
   314
sl@0
   315
//
sl@0
   316
// Return the given number of preallocated descriptors to the free pool.
sl@0
   317
//
sl@0
   318
void TDmac::ReleaseSetOfDes(TInt aCount)
sl@0
   319
	{
sl@0
   320
	__DMA_ASSERTD(aCount >= 0);
sl@0
   321
	Wait();
sl@0
   322
	iAvailDesCount += aCount;
sl@0
   323
	Signal();
sl@0
   324
	__DMA_INVARIANT();
sl@0
   325
	}
sl@0
   326
sl@0
   327
sl@0
   328
//
sl@0
   329
// Queue DFC and update word used to communicate with channel DFC.
sl@0
   330
//
sl@0
   331
// Called in interrupt context by PSL.
sl@0
   332
//
sl@0
   333
void TDmac::HandleIsr(TDmaChannel& aChannel, TUint aEventMask, TBool aIsComplete)
sl@0
   334
	{
sl@0
   335
	__KTRACE_OPT(KDMA, Kern::Printf("TDmac::HandleIsr"));
sl@0
   336
sl@0
   337
	// Function needs to be called by PSL in ISR context
sl@0
   338
	__DMA_ASSERTD(NKern::CurrentContext() == NKern::EInterrupt);
sl@0
   339
sl@0
   340
	// First the ISR callback stuff
sl@0
   341
sl@0
   342
	// Is this a transfer completion notification?
sl@0
   343
	if (aEventMask & EDmaCallbackRequestCompletion)
sl@0
   344
		{
sl@0
   345
		// If so, has the client requested an ISR callback?
sl@0
   346
		if (__e32_atomic_load_acq32(&aChannel.iIsrCbRequest))
sl@0
   347
			{
sl@0
   348
			__KTRACE_OPT(KDMA, Kern::Printf("ISR callback"));
sl@0
   349
sl@0
   350
			// Since iIsrCbRequest was set no threads will be
sl@0
   351
			// modifying the request queue.
sl@0
   352
			const DDmaRequest* const req = _LOFF(aChannel.iReqQ.First(), DDmaRequest, iLink);
sl@0
   353
sl@0
   354
			// We expect the request to have requested
sl@0
   355
			// ISR callback
sl@0
   356
			__NK_ASSERT_DEBUG(req->iIsrCb);
sl@0
   357
sl@0
   358
			TDmaCallback const cb = req->iDmaCb;
sl@0
   359
			TAny* const arg = req->iDmaCbArg;
sl@0
   360
			// Execute the client callback
sl@0
   361
			(*cb)(EDmaCallbackRequestCompletion,
sl@0
   362
				  (aIsComplete ? EDmaResultOK : EDmaResultError),
sl@0
   363
				  arg,
sl@0
   364
				  NULL);
sl@0
   365
			// Now let's see if the callback rescheduled the transfer request
sl@0
   366
			// (see TDmaChannel::IsrRedoRequest()).
sl@0
   367
			const TBool redo = aChannel.iRedoRequest;
sl@0
   368
			aChannel.iRedoRequest = EFalse;
sl@0
   369
			const TBool stop = __e32_atomic_load_acq32(&aChannel.iIsrDfc) &
sl@0
   370
				(TUint32)TDmaChannel::KCancelFlagMask;
sl@0
   371
			// There won't be another ISR callback if this callback didn't
sl@0
   372
			// reschedule the request, or the client cancelled all requests, or
sl@0
   373
			// this callback rescheduled the request with a DFC callback.
sl@0
   374
			if (!redo || stop || !req->iIsrCb)
sl@0
   375
				{
sl@0
   376
				__e32_atomic_store_rel32(&aChannel.iIsrCbRequest, EFalse);
sl@0
   377
				}
sl@0
   378
			if (redo && !stop)
sl@0
   379
				{
sl@0
   380
				// We won't queue the channel DFC in this case and just return.
sl@0
   381
				__KTRACE_OPT(KDMA, Kern::Printf("CB rescheduled xfer -> no DFC"));
sl@0
   382
				return;
sl@0
   383
				}
sl@0
   384
			// Not redoing or being cancelled means we've been calling the
sl@0
   385
			// request's ISR callback for the last time. We're going to
sl@0
   386
			// complete the request via the DFC in the usual way.
sl@0
   387
			}
sl@0
   388
		}
sl@0
   389
sl@0
   390
	// Now queue a DFC if necessary. The possible scenarios are:
sl@0
   391
	// a) DFC not queued (orig == 0)              -> update iIsrDfc + queue DFC
sl@0
   392
	// b) DFC queued, not running yet (orig != 0) -> just update iIsrDfc
sl@0
   393
	// c) DFC running / iIsrDfc not reset yet (orig != 0) -> just update iIsrDfc
sl@0
   394
	// d) DFC running / iIsrDfc already reset (orig == 0) -> update iIsrDfc + requeue DFC
sl@0
   395
sl@0
   396
	// Set error flag if necessary.
sl@0
   397
	const TUint32 inc = aIsComplete ? 1u : TUint32(TDmaChannel::KErrorFlagMask) | 1u;
sl@0
   398
sl@0
   399
	// Add 'inc' (interrupt count increment + poss. error flag) to 'iIsrDfc' if
sl@0
   400
	// cancel flag is not set, do nothing otherwise. Assign original value of
sl@0
   401
	// 'iIsrDfc' to 'orig' in any case.
sl@0
   402
	const TUint32 orig = __e32_atomic_tau_ord32(&aChannel.iIsrDfc,
sl@0
   403
												TUint32(TDmaChannel::KCancelFlagMask),
sl@0
   404
												0,
sl@0
   405
												inc);
sl@0
   406
sl@0
   407
	// As transfer should be suspended when an error occurs, we
sl@0
   408
	// should never get there with the error flag already set.
sl@0
   409
	__DMA_ASSERTD((orig & inc & (TUint32)TDmaChannel::KErrorFlagMask) == 0);
sl@0
   410
sl@0
   411
	if (orig == 0)
sl@0
   412
		{
sl@0
   413
		aChannel.iDfc.Add();
sl@0
   414
		}
sl@0
   415
	}
sl@0
   416
sl@0
   417
sl@0
   418
TInt TDmac::InitDes(const SDmaDesHdr& aHdr, const TDmaTransferArgs& aTransferArgs)
sl@0
   419
	{
sl@0
   420
	__KTRACE_OPT(KDMA, Kern::Printf("TDmac::InitDes"));
sl@0
   421
	TInt r;
sl@0
   422
	if (iCapsHwDes)
sl@0
   423
		{
sl@0
   424
		__KTRACE_OPT(KDMA, Kern::Printf("iCaps.iHwDescriptors"));
sl@0
   425
		r = InitHwDes(aHdr, aTransferArgs);
sl@0
   426
		}
sl@0
   427
	else
sl@0
   428
		{
sl@0
   429
		TDmaTransferArgs& args = HdrToDes(aHdr);
sl@0
   430
		args = aTransferArgs;
sl@0
   431
		r = KErrNone;
sl@0
   432
		}
sl@0
   433
	return r;
sl@0
   434
	}
sl@0
   435
sl@0
   436
sl@0
   437
TInt TDmac::InitHwDes(const SDmaDesHdr& /*aHdr*/, const TDmaTransferArgs& /*aTransferArgs*/)
sl@0
   438
	{
sl@0
   439
	// concrete controller must override if SDmacCaps::iHwDescriptors set
sl@0
   440
	__DMA_CANT_HAPPEN();
sl@0
   441
	return KErrGeneral;
sl@0
   442
	}
sl@0
   443
sl@0
   444
sl@0
   445
TInt TDmac::InitSrcHwDes(const SDmaDesHdr& /*aHdr*/, const TDmaTransferArgs& /*aTransferArgs*/)
sl@0
   446
	{
sl@0
   447
	// concrete controller must override if SDmacCaps::iAsymHwDescriptors set
sl@0
   448
	__DMA_CANT_HAPPEN();
sl@0
   449
	return KErrGeneral;
sl@0
   450
	}
sl@0
   451
sl@0
   452
sl@0
   453
TInt TDmac::InitDstHwDes(const SDmaDesHdr& /*aHdr*/, const TDmaTransferArgs& /*aTransferArgs*/)
sl@0
   454
	{
sl@0
   455
	// concrete controller must override if SDmacCaps::iAsymHwDescriptors set
sl@0
   456
	__DMA_CANT_HAPPEN();
sl@0
   457
	return KErrGeneral;
sl@0
   458
	}
sl@0
   459
sl@0
   460
sl@0
   461
TInt TDmac::UpdateDes(const SDmaDesHdr& aHdr, TUint32 aSrcAddr, TUint32 aDstAddr,
sl@0
   462
					  TUint aTransferCount, TUint32 aPslRequestInfo)
sl@0
   463
	{
sl@0
   464
	__KTRACE_OPT(KDMA, Kern::Printf("TDmac::UpdateDes"));
sl@0
   465
	TInt r;
sl@0
   466
	if (iCapsHwDes)
sl@0
   467
		{
sl@0
   468
		__KTRACE_OPT(KDMA, Kern::Printf("iCaps.iHwDescriptors"));
sl@0
   469
		r = UpdateHwDes(aHdr, aSrcAddr, aDstAddr, aTransferCount, aPslRequestInfo);
sl@0
   470
		}
sl@0
   471
	else
sl@0
   472
		{
sl@0
   473
		TDmaTransferArgs& args = HdrToDes(aHdr);
sl@0
   474
		if (aSrcAddr != KPhysAddrInvalid)
sl@0
   475
			args.iSrcConfig.iAddr = aSrcAddr;
sl@0
   476
		if (aDstAddr != KPhysAddrInvalid)
sl@0
   477
			args.iDstConfig.iAddr = aDstAddr;
sl@0
   478
		if (aTransferCount)
sl@0
   479
			args.iTransferCount = aTransferCount;
sl@0
   480
		if (aPslRequestInfo)
sl@0
   481
			args.iPslRequestInfo = aPslRequestInfo;
sl@0
   482
		r = KErrNone;
sl@0
   483
		}
sl@0
   484
	return r;
sl@0
   485
	}
sl@0
   486
sl@0
   487
sl@0
   488
TInt TDmac::UpdateHwDes(const SDmaDesHdr& /*aHdr*/, TUint32 /*aSrcAddr*/, TUint32 /*aDstAddr*/,
sl@0
   489
						TUint /*aTransferCount*/, TUint32 /*aPslRequestInfo*/)
sl@0
   490
	{
sl@0
   491
	// concrete controller must override if SDmacCaps::iHwDescriptors set
sl@0
   492
	__DMA_CANT_HAPPEN();
sl@0
   493
	return KErrGeneral;
sl@0
   494
	}
sl@0
   495
sl@0
   496
sl@0
   497
TInt TDmac::UpdateSrcHwDes(const SDmaDesHdr& /*aHdr*/, TUint32 /*aSrcAddr*/,
sl@0
   498
						   TUint /*aTransferCount*/, TUint32 /*aPslRequestInfo*/)
sl@0
   499
	{
sl@0
   500
	// concrete controller must override if SDmacCaps::iAsymHwDescriptors set
sl@0
   501
	__DMA_CANT_HAPPEN();
sl@0
   502
	return KErrGeneral;
sl@0
   503
	}
sl@0
   504
sl@0
   505
sl@0
   506
TInt TDmac::UpdateDstHwDes(const SDmaDesHdr& /*aHdr*/, TUint32 /*aDstAddr*/,
sl@0
   507
						   TUint /*aTransferCount*/, TUint32 /*aPslRequestInfo*/)
sl@0
   508
	{
sl@0
   509
	// concrete controller must override if SDmacCaps::iAsymHwDescriptors set
sl@0
   510
	__DMA_CANT_HAPPEN();
sl@0
   511
	return KErrGeneral;
sl@0
   512
	}
sl@0
   513
sl@0
   514
sl@0
   515
void TDmac::ChainHwDes(const SDmaDesHdr& /*aHdr*/, const SDmaDesHdr& /*aNextHdr*/)
sl@0
   516
	{
sl@0
   517
	// concrete controller must override if SDmacCaps::iHwDescriptors set
sl@0
   518
	__DMA_CANT_HAPPEN();
sl@0
   519
	}
sl@0
   520
sl@0
   521
sl@0
   522
void TDmac::AppendHwDes(const TDmaChannel& /*aChannel*/, const SDmaDesHdr& /*aLastHdr*/,
sl@0
   523
						const SDmaDesHdr& /*aNewHdr*/)
sl@0
   524
	{
sl@0
   525
 	// concrete controller must override if SDmacCaps::iHwDescriptors set
sl@0
   526
	__DMA_CANT_HAPPEN();
sl@0
   527
	}
sl@0
   528
sl@0
   529
sl@0
   530
void TDmac::AppendHwDes(const TDmaChannel& /*aChannel*/,
sl@0
   531
						const SDmaDesHdr& /*aSrcLastHdr*/, const SDmaDesHdr& /*aSrcNewHdr*/,
sl@0
   532
						const SDmaDesHdr& /*aDstLastHdr*/, const SDmaDesHdr& /*aDstNewHdr*/)
sl@0
   533
	{
sl@0
   534
	// concrete controller must override if SDmacCaps::iAsymHwDescriptors set
sl@0
   535
	__DMA_CANT_HAPPEN();
sl@0
   536
	}
sl@0
   537
sl@0
   538
sl@0
   539
void TDmac::UnlinkHwDes(const TDmaChannel& /*aChannel*/, SDmaDesHdr& /*aHdr*/)
sl@0
   540
	{
sl@0
   541
 	// concrete controller must override if SDmacCaps::iHwDescriptors set
sl@0
   542
	__DMA_CANT_HAPPEN();
sl@0
   543
	}
sl@0
   544
sl@0
   545
sl@0
   546
void TDmac::ClearHwDes(const SDmaDesHdr& /*aHdr*/)
sl@0
   547
	{
sl@0
   548
	// default implementation - NOP; concrete controller may override
sl@0
   549
	return;
sl@0
   550
	}
sl@0
   551
sl@0
   552
sl@0
   553
TInt TDmac::LinkChannels(TDmaChannel& /*a1stChannel*/, TDmaChannel& /*a2ndChannel*/)
sl@0
   554
	{
sl@0
   555
	// default implementation - NOP; concrete controller may override
sl@0
   556
	return KErrNotSupported;
sl@0
   557
	}
sl@0
   558
sl@0
   559
sl@0
   560
TInt TDmac::UnlinkChannel(TDmaChannel& /*aChannel*/)
sl@0
   561
	{
sl@0
   562
	// default implementation - NOP; concrete controller may override
sl@0
   563
	return KErrNotSupported;
sl@0
   564
	}
sl@0
   565
sl@0
   566
sl@0
   567
TInt TDmac::FailNext(const TDmaChannel& /*aChannel*/)
sl@0
   568
	{
sl@0
   569
	// default implementation - NOP; concrete controller may override
sl@0
   570
	return KErrNotSupported;
sl@0
   571
	}
sl@0
   572
sl@0
   573
sl@0
   574
TInt TDmac::MissNextInterrupts(const TDmaChannel& /*aChannel*/, TInt /*aInterruptCount*/)
sl@0
   575
	{
sl@0
   576
	// default implementation - NOP; concrete controller may override
sl@0
   577
	return KErrNotSupported;
sl@0
   578
	}
sl@0
   579
sl@0
   580
sl@0
   581
TInt TDmac::Extension(TDmaChannel& /*aChannel*/, TInt /*aCmd*/, TAny* /*aArg*/)
sl@0
   582
	{
sl@0
   583
	// default implementation - NOP; concrete controller may override
sl@0
   584
	return KErrNotSupported;
sl@0
   585
	}
sl@0
   586
sl@0
   587
sl@0
   588
TUint32 TDmac::HwDesNumDstElementsTransferred(const SDmaDesHdr& /*aHdr*/)
sl@0
   589
	{
sl@0
   590
 	// Concrete controller must override if SDmacCaps::iHwDescriptors set.
sl@0
   591
	__DMA_CANT_HAPPEN();
sl@0
   592
	return 0;
sl@0
   593
	}
sl@0
   594
sl@0
   595
sl@0
   596
TUint32 TDmac::HwDesNumSrcElementsTransferred(const SDmaDesHdr& /*aHdr*/)
sl@0
   597
	{
sl@0
   598
 	// Concrete controller must override if SDmacCaps::iHwDescriptors set.
sl@0
   599
	__DMA_CANT_HAPPEN();
sl@0
   600
	return 0;
sl@0
   601
	}
sl@0
   602
sl@0
   603
sl@0
   604
#ifdef _DEBUG
sl@0
   605
sl@0
   606
void TDmac::Invariant()
sl@0
   607
	{
sl@0
   608
	Wait();
sl@0
   609
	__DMA_ASSERTD(0 <= iAvailDesCount && iAvailDesCount <= iMaxDesCount);
sl@0
   610
	__DMA_ASSERTD(! iFreeHdr || IsValidHdr(iFreeHdr));
sl@0
   611
	for (TInt i = 0; i < iMaxDesCount; i++)
sl@0
   612
		__DMA_ASSERTD(iHdrPool[i].iNext == NULL || IsValidHdr(iHdrPool[i].iNext));
sl@0
   613
	Signal();
sl@0
   614
	}
sl@0
   615
sl@0
   616
sl@0
   617
TBool TDmac::IsValidHdr(const SDmaDesHdr* aHdr)
sl@0
   618
	{
sl@0
   619
	return (iHdrPool <= aHdr) && (aHdr < iHdrPool + iMaxDesCount);
sl@0
   620
	}
sl@0
   621
sl@0
   622
#endif
sl@0
   623
sl@0
   624
sl@0
   625
sl@0
   626
sl@0
   627
//
sl@0
   628
// Internal compat version, used by legacy Fragment()
sl@0
   629
//
sl@0
   630
TDmaTransferConfig::TDmaTransferConfig(TUint32 aAddr, TUint aFlags, TBool aAddrInc)
sl@0
   631
	: iAddr(aAddr),
sl@0
   632
	  iAddrMode(aAddrInc ? KDmaAddrModePostIncrement : KDmaAddrModeConstant),
sl@0
   633
	  iElementSize(0),
sl@0
   634
	  iElementsPerFrame(0),
sl@0
   635
	  iElementsPerPacket(0),
sl@0
   636
	  iFramesPerTransfer(0),
sl@0
   637
	  iElementSkip(0),
sl@0
   638
	  iFrameSkip(0),
sl@0
   639
	  iBurstSize(KDmaBurstSizeAny),
sl@0
   640
	  iFlags(aFlags),
sl@0
   641
	  iSyncFlags(KDmaSyncAuto),
sl@0
   642
	  iPslTargetInfo(0),
sl@0
   643
	  iRepeatCount(0),
sl@0
   644
	  iDelta(~0u),
sl@0
   645
	  iReserved(0)
sl@0
   646
	{
sl@0
   647
	}
sl@0
   648
sl@0
   649
sl@0
   650
sl@0
   651
//
sl@0
   652
// Internal compat version, used by legacy Fragment()
sl@0
   653
//
sl@0
   654
TDmaTransferArgs::TDmaTransferArgs(TUint32 aSrc, TUint32 aDest, TInt aCount,
sl@0
   655
								   TUint aFlags, TUint32 aPslInfo)
sl@0
   656
	: iSrcConfig(aSrc, RequestFlags2SrcConfigFlags(aFlags), (aFlags & KDmaIncSrc)),
sl@0
   657
	  iDstConfig(aDest, RequestFlags2DstConfigFlags(aFlags), (aFlags & KDmaIncDest)),
sl@0
   658
	  iTransferCount(aCount),
sl@0
   659
	  iGraphicsOps(KDmaGraphicsOpNone),
sl@0
   660
	  iColour(0),
sl@0
   661
	  iFlags(0),
sl@0
   662
	  iChannelPriority(KDmaPriorityNone),
sl@0
   663
	  iPslRequestInfo(aPslInfo),
sl@0
   664
	  iDelta(~0u),
sl@0
   665
	  iReserved1(0),
sl@0
   666
	  iChannelCookie(0),
sl@0
   667
	  iReserved2(0)
sl@0
   668
	{
sl@0
   669
	}
sl@0
   670
sl@0
   671
sl@0
   672
//
sl@0
   673
// As DDmaRequest is derived from DBase, the initializations with zero aren't
sl@0
   674
// strictly necessary here, but this way it's nicer.
sl@0
   675
//
sl@0
   676
EXPORT_C DDmaRequest::DDmaRequest(TDmaChannel& aChannel, TCallback aCb,
sl@0
   677
								  TAny* aCbArg, TInt aMaxTransferSize)
sl@0
   678
	: iChannel(aChannel),
sl@0
   679
	  iCb(aCb),
sl@0
   680
	  iCbArg(aCbArg),
sl@0
   681
	  iDmaCb(NULL),
sl@0
   682
	  iDmaCbArg(NULL),
sl@0
   683
	  iIsrCb(EFalse),
sl@0
   684
	  iDesCount(0),
sl@0
   685
	  iFirstHdr(NULL),
sl@0
   686
	  iLastHdr(NULL),
sl@0
   687
	  iSrcDesCount(0),
sl@0
   688
	  iSrcFirstHdr(NULL),
sl@0
   689
	  iSrcLastHdr(NULL),
sl@0
   690
	  iDstDesCount(0),
sl@0
   691
	  iDstFirstHdr(NULL),
sl@0
   692
	  iDstLastHdr(NULL),
sl@0
   693
	  iQueued(EFalse),
sl@0
   694
	  iMaxTransferSize(aMaxTransferSize),
sl@0
   695
	  iTotalNumSrcElementsTransferred(0),
sl@0
   696
	  iTotalNumDstElementsTransferred(0)
sl@0
   697
	{
sl@0
   698
	iChannel.iReqCount++;
sl@0
   699
	__DMA_ASSERTD(0 <= aMaxTransferSize);
sl@0
   700
	__DMA_INVARIANT();
sl@0
   701
	}
sl@0
   702
sl@0
   703
sl@0
   704
//
sl@0
   705
// As DDmaRequest is derived from DBase, the initializations with zero aren't
sl@0
   706
// strictly necessary here, but this way it's nicer.
sl@0
   707
//
sl@0
   708
EXPORT_C DDmaRequest::DDmaRequest(TDmaChannel& aChannel, TDmaCallback aDmaCb,
sl@0
   709
								  TAny* aCbArg, TUint aMaxTransferSize)
sl@0
   710
	: iChannel(aChannel),
sl@0
   711
	  iCb(NULL),
sl@0
   712
	  iCbArg(NULL),
sl@0
   713
	  iDmaCb(aDmaCb),
sl@0
   714
	  iDmaCbArg(aCbArg),
sl@0
   715
	  iIsrCb(EFalse),
sl@0
   716
	  iDesCount(0),
sl@0
   717
	  iFirstHdr(NULL),
sl@0
   718
	  iLastHdr(NULL),
sl@0
   719
	  iSrcDesCount(0),
sl@0
   720
	  iSrcFirstHdr(NULL),
sl@0
   721
	  iSrcLastHdr(NULL),
sl@0
   722
	  iDstDesCount(0),
sl@0
   723
	  iDstFirstHdr(NULL),
sl@0
   724
	  iDstLastHdr(NULL),
sl@0
   725
	  iQueued(EFalse),
sl@0
   726
	  iMaxTransferSize(aMaxTransferSize),
sl@0
   727
	  iTotalNumSrcElementsTransferred(0),
sl@0
   728
	  iTotalNumDstElementsTransferred(0)
sl@0
   729
	{
sl@0
   730
	__e32_atomic_add_ord32(&iChannel.iReqCount, 1);
sl@0
   731
	__DMA_INVARIANT();
sl@0
   732
	}
sl@0
   733
sl@0
   734
sl@0
   735
EXPORT_C DDmaRequest::~DDmaRequest()
sl@0
   736
	{
sl@0
   737
	__DMA_ASSERTD(!iQueued);
sl@0
   738
	__DMA_INVARIANT();
sl@0
   739
	FreeDesList();
sl@0
   740
	__e32_atomic_add_ord32(&iChannel.iReqCount, TUint32(-1));
sl@0
   741
	}
sl@0
   742
sl@0
   743
sl@0
   744
EXPORT_C TInt DDmaRequest::Fragment(TUint32 aSrc, TUint32 aDest, TInt aCount,
sl@0
   745
									TUint aFlags, TUint32 aPslInfo)
sl@0
   746
	{
sl@0
   747
	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::Fragment thread %O "
sl@0
   748
									"src=0x%08X dest=0x%08X count=%d flags=0x%X psl=0x%08X",
sl@0
   749
									&Kern::CurrentThread(), aSrc, aDest, aCount, aFlags, aPslInfo));
sl@0
   750
	__DMA_ASSERTD(aCount > 0);
sl@0
   751
sl@0
   752
	TDmaTransferArgs args(aSrc, aDest, aCount, aFlags, aPslInfo);
sl@0
   753
sl@0
   754
	return Frag(args);
sl@0
   755
	}
sl@0
   756
sl@0
   757
sl@0
   758
EXPORT_C TInt DDmaRequest::Fragment(const TDmaTransferArgs& aTransferArgs)
sl@0
   759
	{
sl@0
   760
	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::Fragment thread %O", &Kern::CurrentThread()));
sl@0
   761
sl@0
   762
	// Writable temporary working copy of the transfer arguments.
sl@0
   763
	// We need this because we may have to modify some fields before passing it
sl@0
   764
	// to the PSL (for example iChannelCookie, iTransferCount,
sl@0
   765
	// iDstConfig::iAddr, and iSrcConfig::iAddr).
sl@0
   766
	TDmaTransferArgs args(aTransferArgs);
sl@0
   767
sl@0
   768
	return Frag(args);
sl@0
   769
	}
sl@0
   770
sl@0
   771
sl@0
   772
TUint DDmaRequest::GetTransferCount(const TDmaTransferArgs& aTransferArgs)
sl@0
   773
	{
sl@0
   774
	const TDmaTransferConfig& src = aTransferArgs.iSrcConfig;
sl@0
   775
	const TDmaTransferConfig& dst = aTransferArgs.iDstConfig;
sl@0
   776
sl@0
   777
	TUint count = aTransferArgs.iTransferCount;
sl@0
   778
	if (count == 0)
sl@0
   779
		{
sl@0
   780
		__KTRACE_OPT(KDMA, Kern::Printf("iTransferCount == 0"));
sl@0
   781
		count = src.iElementSize * src.iElementsPerFrame *
sl@0
   782
			src.iFramesPerTransfer;
sl@0
   783
		const TUint dst_cnt = dst.iElementSize * dst.iElementsPerFrame *
sl@0
   784
			dst.iFramesPerTransfer;
sl@0
   785
		if (count != dst_cnt)
sl@0
   786
			{
sl@0
   787
			__KTRACE_OPT(KPANIC, Kern::Printf("Error: (count != dst_cnt)"));
sl@0
   788
			return 0;
sl@0
   789
			}
sl@0
   790
		}
sl@0
   791
	else
sl@0
   792
		{
sl@0
   793
		__KTRACE_OPT(KDMA, Kern::Printf("iTransferCount == %d", count));
sl@0
   794
		// Client shouldn't specify contradictory or incomplete things
sl@0
   795
		if (src.iElementSize != 0)
sl@0
   796
			{
sl@0
   797
			if ((count % src.iElementSize) != 0)
sl@0
   798
				{
sl@0
   799
				__KTRACE_OPT(KPANIC,
sl@0
   800
							 Kern::Printf("Error: ((count %% src.iElementSize) != 0)"));
sl@0
   801
				return 0;
sl@0
   802
				}
sl@0
   803
			if (src.iElementsPerFrame != 0)
sl@0
   804
				{
sl@0
   805
				if ((src.iElementSize * src.iElementsPerFrame * src.iFramesPerTransfer) != count)
sl@0
   806
					{
sl@0
   807
					__KTRACE_OPT(KPANIC,
sl@0
   808
								 Kern::Printf("Error: ((src.iElementSize * "
sl@0
   809
											  "src.iElementsPerFrame * "
sl@0
   810
											  "src.iFramesPerTransfer) != count)"));
sl@0
   811
					return 0;
sl@0
   812
					}
sl@0
   813
				}
sl@0
   814
			}
sl@0
   815
		else
sl@0
   816
			{
sl@0
   817
			if (src.iElementsPerFrame != 0)
sl@0
   818
				{
sl@0
   819
				__KTRACE_OPT(KPANIC,
sl@0
   820
							 Kern::Printf("Error: (src.iElementsPerFrame != 0)"));
sl@0
   821
				return 0;
sl@0
   822
				}
sl@0
   823
			if (src.iFramesPerTransfer != 0)
sl@0
   824
				{
sl@0
   825
				__KTRACE_OPT(KPANIC,
sl@0
   826
							 Kern::Printf("Error: (src.iFramesPerTransfer != 0)"));
sl@0
   827
				return 0;
sl@0
   828
				}
sl@0
   829
			if (src.iElementsPerPacket != 0)
sl@0
   830
				{
sl@0
   831
				__KTRACE_OPT(KPANIC,
sl@0
   832
							 Kern::Printf("Error: (src.iElementsPerPacket != 0)"));
sl@0
   833
				return 0;
sl@0
   834
				}
sl@0
   835
			}
sl@0
   836
		if (dst.iElementSize != 0)
sl@0
   837
			{
sl@0
   838
			if ((count % dst.iElementSize) != 0)
sl@0
   839
				{
sl@0
   840
				__KTRACE_OPT(KPANIC,
sl@0
   841
							 Kern::Printf("Error: ((count %% dst.iElementSize) != 0)"));
sl@0
   842
				return 0;
sl@0
   843
				}
sl@0
   844
			if (dst.iElementsPerFrame != 0)
sl@0
   845
				{
sl@0
   846
				if ((dst.iElementSize * dst.iElementsPerFrame * dst.iFramesPerTransfer) != count)
sl@0
   847
					{
sl@0
   848
					__KTRACE_OPT(KPANIC,
sl@0
   849
								 Kern::Printf("Error: ((dst.iElementSize * "
sl@0
   850
											  "dst.iElementsPerFrame * "
sl@0
   851
											  "dst.iFramesPerTransfer) != count)"));
sl@0
   852
					return 0;
sl@0
   853
					}
sl@0
   854
				}
sl@0
   855
			}
sl@0
   856
		else
sl@0
   857
			{
sl@0
   858
			if (dst.iElementsPerFrame != 0)
sl@0
   859
				{
sl@0
   860
				__KTRACE_OPT(KPANIC,
sl@0
   861
							 Kern::Printf("Error: (dst.iElementsPerFrame != 0)"));
sl@0
   862
				return 0;
sl@0
   863
				}
sl@0
   864
			if (dst.iFramesPerTransfer != 0)
sl@0
   865
				{
sl@0
   866
				__KTRACE_OPT(KPANIC,
sl@0
   867
							 Kern::Printf("Error: (dst.iFramesPerTransfer != 0)"));
sl@0
   868
				return 0;
sl@0
   869
				}
sl@0
   870
			if (dst.iElementsPerPacket != 0)
sl@0
   871
				{
sl@0
   872
				__KTRACE_OPT(KPANIC,
sl@0
   873
							 Kern::Printf("Error: (dst.iElementsPerPacket != 0)"));
sl@0
   874
				return 0;
sl@0
   875
				}
sl@0
   876
			}
sl@0
   877
		}
sl@0
   878
	return count;
sl@0
   879
	}
sl@0
   880
sl@0
   881
sl@0
   882
TInt DDmaRequest::Frag(TDmaTransferArgs& aTransferArgs)
sl@0
   883
	{
sl@0
   884
	__DMA_ASSERTD(!iQueued);
sl@0
   885
sl@0
   886
	// Transfer count checks
sl@0
   887
	const TUint count = GetTransferCount(aTransferArgs);
sl@0
   888
	if (count == 0)
sl@0
   889
		{
sl@0
   890
		return KErrArgument;
sl@0
   891
		}
sl@0
   892
sl@0
   893
	const TDmaTransferConfig& src = aTransferArgs.iSrcConfig;
sl@0
   894
	const TDmaTransferConfig& dst = aTransferArgs.iDstConfig;
sl@0
   895
sl@0
   896
	// Ask the PSL what the maximum length possible for this transfer is
sl@0
   897
	TUint max_xfer_len = iChannel.MaxTransferLength(src.iFlags, dst.iFlags,
sl@0
   898
													aTransferArgs.iPslRequestInfo);
sl@0
   899
	if (iMaxTransferSize)
sl@0
   900
		{
sl@0
   901
		// User has set a size cap
sl@0
   902
		__KTRACE_OPT(KDMA, Kern::Printf("iMaxTransferSize != 0"));
sl@0
   903
		__DMA_ASSERTA((iMaxTransferSize <= max_xfer_len) || (max_xfer_len == 0));
sl@0
   904
		max_xfer_len = iMaxTransferSize;
sl@0
   905
		}
sl@0
   906
	else
sl@0
   907
		{
sl@0
   908
		// User doesn't care about max size
sl@0
   909
		if (max_xfer_len == 0)
sl@0
   910
			{
sl@0
   911
			// No maximum imposed by controller
sl@0
   912
			max_xfer_len = count;
sl@0
   913
			}
sl@0
   914
		}
sl@0
   915
sl@0
   916
	// ISR callback requested?
sl@0
   917
	const TBool isr_cb = (aTransferArgs.iFlags & KDmaRequestCallbackFromIsr);
sl@0
   918
	if (isr_cb)
sl@0
   919
		{
sl@0
   920
		// Requesting an ISR callback w/o supplying one?
sl@0
   921
		if (!iDmaCb)
sl@0
   922
			{
sl@0
   923
			return KErrArgument;
sl@0
   924
			}
sl@0
   925
		}
sl@0
   926
sl@0
   927
	// Set the channel cookie for the PSL
sl@0
   928
	aTransferArgs.iChannelCookie = iChannel.PslId();
sl@0
   929
sl@0
   930
	// Now the actual fragmentation
sl@0
   931
	TInt r;
sl@0
   932
	if (iChannel.iDmacCaps->iAsymHwDescriptors)
sl@0
   933
		{
sl@0
   934
		r = FragAsym(aTransferArgs, count, max_xfer_len);
sl@0
   935
		}
sl@0
   936
	else
sl@0
   937
		{
sl@0
   938
		r = FragSym(aTransferArgs, count, max_xfer_len);
sl@0
   939
		}
sl@0
   940
sl@0
   941
	if (r == KErrNone)
sl@0
   942
		{
sl@0
   943
		iIsrCb = isr_cb;
sl@0
   944
		}
sl@0
   945
sl@0
   946
	__DMA_INVARIANT();
sl@0
   947
	return r;
sl@0
   948
	};
sl@0
   949
sl@0
   950
sl@0
   951
TInt DDmaRequest::FragSym(TDmaTransferArgs& aTransferArgs, TUint aCount,
sl@0
   952
						  TUint aMaxTransferLen)
sl@0
   953
	{
sl@0
   954
	TDmaTransferConfig& src = aTransferArgs.iSrcConfig;
sl@0
   955
	TDmaTransferConfig& dst = aTransferArgs.iDstConfig;
sl@0
   956
sl@0
   957
	const TBool mem_src = (src.iFlags & KDmaMemAddr);
sl@0
   958
	const TBool mem_dst = (dst.iFlags & KDmaMemAddr);
sl@0
   959
sl@0
   960
	const TUint align_mask_src = iChannel.AddressAlignMask(src.iFlags,
sl@0
   961
														   src.iElementSize,
sl@0
   962
														   aTransferArgs.iPslRequestInfo);
sl@0
   963
	const TUint align_mask_dst = iChannel.AddressAlignMask(dst.iFlags,
sl@0
   964
														   dst.iElementSize,
sl@0
   965
														   aTransferArgs.iPslRequestInfo);
sl@0
   966
	// Memory buffers must satisfy alignment constraint
sl@0
   967
	__DMA_ASSERTD(!mem_src || ((src.iAddr & align_mask_src) == 0));
sl@0
   968
	__DMA_ASSERTD(!mem_dst || ((dst.iAddr & align_mask_dst) == 0));
sl@0
   969
sl@0
   970
	const TUint max_aligned_len = (aMaxTransferLen &
sl@0
   971
								   ~(Max(align_mask_src, align_mask_dst)));
sl@0
   972
	// Client and PSL sane?
sl@0
   973
	__DMA_ASSERTD(max_aligned_len > 0);
sl@0
   974
sl@0
   975
	FreeDesList();			   // revert any previous fragmentation attempt
sl@0
   976
	TInt r;
sl@0
   977
	do
sl@0
   978
		{
sl@0
   979
		// Allocate fragment
sl@0
   980
		r = ExpandDesList(/*1*/);
sl@0
   981
		if (r != KErrNone)
sl@0
   982
			{
sl@0
   983
			FreeDesList();
sl@0
   984
			break;
sl@0
   985
			}
sl@0
   986
		// Compute fragment size
sl@0
   987
		TUint c = Min(aMaxTransferLen, aCount);
sl@0
   988
		if (mem_src && !(src.iFlags & KDmaPhysAddr))
sl@0
   989
			{
sl@0
   990
			__KTRACE_OPT(KDMA, Kern::Printf("mem_src && !(src.iFlags & KDmaPhysAddr)"));
sl@0
   991
			// @@@ Should also take into account (src.iFlags & KDmaMemIsContiguous)!
sl@0
   992
			c = MaxPhysSize(src.iAddr, c);
sl@0
   993
			}
sl@0
   994
		if (mem_dst && !(dst.iFlags & KDmaPhysAddr))
sl@0
   995
			{
sl@0
   996
			__KTRACE_OPT(KDMA, Kern::Printf("mem_dst && !(dst.iFlags & KDmaPhysAddr)"));
sl@0
   997
			// @@@ Should also take into account (dst.iFlags & KDmaMemIsContiguous)!
sl@0
   998
			c = MaxPhysSize(dst.iAddr, c);
sl@0
   999
			}
sl@0
  1000
		if ((mem_src || mem_dst) && (c < aCount) && (c > max_aligned_len))
sl@0
  1001
			{
sl@0
  1002
			// This is not the last fragment of a transfer to/from memory.
sl@0
  1003
			// We must round down the fragment size so the next one is
sl@0
  1004
			// correctly aligned.
sl@0
  1005
			__KTRACE_OPT(KDMA, Kern::Printf("(mem_src || mem_dst) && (c < aCount) && (c > max_aligned_len)"));
sl@0
  1006
			c = max_aligned_len;
sl@0
  1007
			}
sl@0
  1008
sl@0
  1009
		// TODO: Make sure an element or frame on neither src or dst side
sl@0
  1010
		// (which can be of different sizes) never straddles a DMA subtransfer.
sl@0
  1011
		// (This would be a fragmentation error by the PIL.)
sl@0
  1012
sl@0
  1013
		// Set transfer count for the PSL
sl@0
  1014
		aTransferArgs.iTransferCount = c;
sl@0
  1015
		__KTRACE_OPT(KDMA, Kern::Printf("this fragm.: %d (0x%x) total remain.: %d (0x%x)",
sl@0
  1016
										c, c, aCount, aCount));
sl@0
  1017
		// Initialise fragment
sl@0
  1018
		r = iChannel.iController->InitDes(*iLastHdr, aTransferArgs);
sl@0
  1019
		if (r != KErrNone)
sl@0
  1020
			{
sl@0
  1021
			FreeDesList();
sl@0
  1022
			break;
sl@0
  1023
			}
sl@0
  1024
		// Update for next iteration
sl@0
  1025
		aCount -= c;
sl@0
  1026
		if (mem_src)
sl@0
  1027
			src.iAddr += c;
sl@0
  1028
		if (mem_dst)
sl@0
  1029
			dst.iAddr += c;
sl@0
  1030
		}
sl@0
  1031
	while (aCount > 0);
sl@0
  1032
sl@0
  1033
	return r;
sl@0
  1034
	}
sl@0
  1035
sl@0
  1036
sl@0
  1037
TInt DDmaRequest::FragAsym(TDmaTransferArgs& aTransferArgs, TUint aCount,
sl@0
  1038
						   TUint aMaxTransferLen)
sl@0
  1039
	{
sl@0
  1040
	TInt r = FragAsymSrc(aTransferArgs, aCount, aMaxTransferLen);
sl@0
  1041
	if (r != KErrNone)
sl@0
  1042
		{
sl@0
  1043
		FreeSrcDesList();
sl@0
  1044
		return r;
sl@0
  1045
		}
sl@0
  1046
	r = FragAsymDst(aTransferArgs, aCount, aMaxTransferLen);
sl@0
  1047
	if (r != KErrNone)
sl@0
  1048
		{
sl@0
  1049
		FreeSrcDesList();
sl@0
  1050
		FreeDstDesList();
sl@0
  1051
		}
sl@0
  1052
	return r;
sl@0
  1053
	}
sl@0
  1054
sl@0
  1055
sl@0
  1056
TInt DDmaRequest::FragAsymSrc(TDmaTransferArgs& aTransferArgs, TUint aCount,
sl@0
  1057
							  TUint aMaxTransferLen)
sl@0
  1058
	{
sl@0
  1059
	TDmaTransferConfig& src = aTransferArgs.iSrcConfig;
sl@0
  1060
sl@0
  1061
	const TBool mem_src = (src.iFlags & KDmaMemAddr);
sl@0
  1062
sl@0
  1063
	const TUint align_mask = iChannel.AddressAlignMask(src.iFlags,
sl@0
  1064
													   src.iElementSize,
sl@0
  1065
													   aTransferArgs.iPslRequestInfo);
sl@0
  1066
	// Memory buffers must satisfy alignment constraint
sl@0
  1067
	__DMA_ASSERTD(!mem_src || ((src.iAddr & align_mask) == 0));
sl@0
  1068
sl@0
  1069
	const TUint max_aligned_len = (aMaxTransferLen & ~align_mask);
sl@0
  1070
	__DMA_ASSERTD(max_aligned_len > 0);				  // bug in PSL if not true
sl@0
  1071
sl@0
  1072
	FreeSrcDesList();
sl@0
  1073
	TInt r;
sl@0
  1074
	do
sl@0
  1075
		{
sl@0
  1076
		// Allocate fragment
sl@0
  1077
		r = ExpandSrcDesList(/*1*/);
sl@0
  1078
		if (r != KErrNone)
sl@0
  1079
			{
sl@0
  1080
			break;
sl@0
  1081
			}
sl@0
  1082
		// Compute fragment size
sl@0
  1083
		TUint c = Min(aMaxTransferLen, aCount);
sl@0
  1084
		if (mem_src && !(src.iFlags & KDmaPhysAddr))
sl@0
  1085
			{
sl@0
  1086
			__KTRACE_OPT(KDMA, Kern::Printf("mem_src && !(src.iFlags & KDmaPhysAddr)"));
sl@0
  1087
			c = MaxPhysSize(src.iAddr, c);
sl@0
  1088
			}
sl@0
  1089
		if (mem_src && (c < aCount) && (c > max_aligned_len))
sl@0
  1090
			{
sl@0
  1091
			// This is not the last fragment of a transfer from memory.
sl@0
  1092
			// We must round down the fragment size so the next one is
sl@0
  1093
			// correctly aligned.
sl@0
  1094
			__KTRACE_OPT(KDMA, Kern::Printf("mem_src && (c < aCount) && (c > max_aligned_len)"));
sl@0
  1095
			c = max_aligned_len;
sl@0
  1096
			}
sl@0
  1097
		// Set transfer count for the PSL
sl@0
  1098
		aTransferArgs.iTransferCount = c;
sl@0
  1099
		__KTRACE_OPT(KDMA, Kern::Printf("this fragm.: %d (0x%x) total remain.: %d (0x%x)",
sl@0
  1100
										c, c, aCount, aCount));
sl@0
  1101
		// Initialise fragment
sl@0
  1102
		r = iChannel.iController->InitSrcHwDes(*iSrcLastHdr, aTransferArgs);
sl@0
  1103
		if (r != KErrNone)
sl@0
  1104
			{
sl@0
  1105
			break;
sl@0
  1106
			}
sl@0
  1107
		// Update for next iteration
sl@0
  1108
		aCount -= c;
sl@0
  1109
		if (mem_src)
sl@0
  1110
			src.iAddr += c;
sl@0
  1111
		}
sl@0
  1112
	while (aCount > 0);
sl@0
  1113
sl@0
  1114
	return r;
sl@0
  1115
	}
sl@0
  1116
sl@0
  1117
sl@0
  1118
TInt DDmaRequest::FragAsymDst(TDmaTransferArgs& aTransferArgs, TUint aCount,
sl@0
  1119
							  TUint aMaxTransferLen)
sl@0
  1120
	{
sl@0
  1121
	TDmaTransferConfig& dst = aTransferArgs.iDstConfig;
sl@0
  1122
sl@0
  1123
	const TBool mem_dst = (dst.iFlags & KDmaMemAddr);
sl@0
  1124
sl@0
  1125
	const TUint align_mask = iChannel.AddressAlignMask(dst.iFlags,
sl@0
  1126
													   dst.iElementSize,
sl@0
  1127
													   aTransferArgs.iPslRequestInfo);
sl@0
  1128
	// Memory buffers must satisfy alignment constraint
sl@0
  1129
	__DMA_ASSERTD(!mem_dst || ((dst.iAddr & align_mask) == 0));
sl@0
  1130
sl@0
  1131
	const TUint max_aligned_len = (aMaxTransferLen & ~align_mask);
sl@0
  1132
	__DMA_ASSERTD(max_aligned_len > 0);				  // bug in PSL if not true
sl@0
  1133
sl@0
  1134
	FreeDstDesList();
sl@0
  1135
	TInt r;
sl@0
  1136
	do
sl@0
  1137
		{
sl@0
  1138
		// Allocate fragment
sl@0
  1139
		r = ExpandDstDesList(/*1*/);
sl@0
  1140
		if (r != KErrNone)
sl@0
  1141
			{
sl@0
  1142
			break;
sl@0
  1143
			}
sl@0
  1144
		// Compute fragment size
sl@0
  1145
		TUint c = Min(aMaxTransferLen, aCount);
sl@0
  1146
		if (mem_dst && !(dst.iFlags & KDmaPhysAddr))
sl@0
  1147
			{
sl@0
  1148
			__KTRACE_OPT(KDMA, Kern::Printf("mem_dst && !(dst.iFlags & KDmaPhysAddr)"));
sl@0
  1149
			c = MaxPhysSize(dst.iAddr, c);
sl@0
  1150
			}
sl@0
  1151
		if (mem_dst && (c < aCount) && (c > max_aligned_len))
sl@0
  1152
			{
sl@0
  1153
			// This is not the last fragment of a transfer to memory.
sl@0
  1154
			// We must round down the fragment size so the next one is
sl@0
  1155
			// correctly aligned.
sl@0
  1156
			__KTRACE_OPT(KDMA, Kern::Printf("mem_dst && (c < aCount) && (c > max_aligned_len)"));
sl@0
  1157
			c = max_aligned_len;
sl@0
  1158
			}
sl@0
  1159
		// Set transfer count for the PSL
sl@0
  1160
		aTransferArgs.iTransferCount = c;
sl@0
  1161
		__KTRACE_OPT(KDMA, Kern::Printf("this fragm.: %d (0x%x) total remain.: %d (0x%x)",
sl@0
  1162
										c, c, aCount, aCount));
sl@0
  1163
		// Initialise fragment
sl@0
  1164
		r = iChannel.iController->InitDstHwDes(*iDstLastHdr, aTransferArgs);
sl@0
  1165
		if (r != KErrNone)
sl@0
  1166
			{
sl@0
  1167
			break;
sl@0
  1168
			}
sl@0
  1169
		// Update for next iteration
sl@0
  1170
		aCount -= c;
sl@0
  1171
		if (mem_dst)
sl@0
  1172
			dst.iAddr += c;
sl@0
  1173
		}
sl@0
  1174
	while (aCount > 0);
sl@0
  1175
sl@0
  1176
	return r;
sl@0
  1177
	}
sl@0
  1178
sl@0
  1179
sl@0
  1180
EXPORT_C TInt DDmaRequest::Queue()
sl@0
  1181
	{
sl@0
  1182
	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::Queue thread %O", &Kern::CurrentThread()));
sl@0
  1183
	__DMA_ASSERTD(iDesCount > 0);	// Not configured? Call Fragment() first!
sl@0
  1184
	__DMA_ASSERTD(!iQueued);
sl@0
  1185
sl@0
  1186
	// Append request to queue and link new descriptor list to existing one.
sl@0
  1187
	iChannel.Wait();
sl@0
  1188
sl@0
  1189
	TInt r = KErrGeneral;
sl@0
  1190
	const TBool ch_isr_cb = __e32_atomic_load_acq32(&iChannel.iIsrCbRequest);
sl@0
  1191
	if (ch_isr_cb)
sl@0
  1192
		{
sl@0
  1193
		// Client mustn't try to queue any new request while one with an ISR
sl@0
  1194
		// callback is already queued on this channel. This is to make sure
sl@0
  1195
		// that the channel's Transfer() function is not called by both the ISR
sl@0
  1196
		// and the client thread at the same time.
sl@0
  1197
		__KTRACE_OPT(KPANIC, Kern::Printf("An ISR cb request exists - not queueing"));
sl@0
  1198
		}
sl@0
  1199
	else if (iIsrCb && !iChannel.IsQueueEmpty())
sl@0
  1200
		{
sl@0
  1201
		// Client mustn't try to queue an ISR callback request whilst any
sl@0
  1202
		// others are still queued on this channel. This is to make sure that
sl@0
  1203
		// the ISR callback doesn't get executed together with the DFC(s) of
sl@0
  1204
		// any previous request(s).
sl@0
  1205
		__KTRACE_OPT(KPANIC, Kern::Printf("Request queue not empty - not queueing"));
sl@0
  1206
		}
sl@0
  1207
	else if (iChannel.iIsrDfc & (TUint32)TDmaChannel::KCancelFlagMask)
sl@0
  1208
		{
sl@0
  1209
		__KTRACE_OPT(KPANIC, Kern::Printf("Channel requests cancelled - not queueing"));
sl@0
  1210
		}
sl@0
  1211
	else
sl@0
  1212
		{
sl@0
  1213
		iQueued = ETrue;
sl@0
  1214
		iChannel.iReqQ.Add(&iLink);
sl@0
  1215
		// iChannel.iNullPtr points to iChannel.iCurHdr for an empty queue
sl@0
  1216
		*iChannel.iNullPtr = iFirstHdr;
sl@0
  1217
		iChannel.iNullPtr = &(iLastHdr->iNext);
sl@0
  1218
		if (iIsrCb)
sl@0
  1219
			{
sl@0
  1220
			// Since we've made sure that there is no other request in the
sl@0
  1221
			// queue before this, the only thing of relevance is the channel
sl@0
  1222
			// DFC which might yet have to complete for the previous request,
sl@0
  1223
			// and this function might indeed have been called from there via
sl@0
  1224
			// the client callback. This should be all right though as once
sl@0
  1225
			// we've set the following flag no further Queue()'s will be
sl@0
  1226
			// possible.
sl@0
  1227
			__e32_atomic_store_rel32(&iChannel.iIsrCbRequest, ETrue);
sl@0
  1228
			}
sl@0
  1229
		iChannel.DoQueue(const_cast<const DDmaRequest&>(*this));
sl@0
  1230
		r = KErrNone;
sl@0
  1231
		}
sl@0
  1232
	iChannel.Signal();
sl@0
  1233
sl@0
  1234
	__DMA_INVARIANT();
sl@0
  1235
	return r;
sl@0
  1236
	}
sl@0
  1237
sl@0
  1238
sl@0
  1239
EXPORT_C TInt DDmaRequest::ExpandDesList(TInt aCount)
sl@0
  1240
	{
sl@0
  1241
	return ExpandDesList(aCount, iDesCount, iFirstHdr, iLastHdr);
sl@0
  1242
	}
sl@0
  1243
sl@0
  1244
sl@0
  1245
EXPORT_C TInt DDmaRequest::ExpandSrcDesList(TInt aCount)
sl@0
  1246
	{
sl@0
  1247
	return ExpandDesList(aCount, iSrcDesCount, iSrcFirstHdr, iSrcLastHdr);
sl@0
  1248
	}
sl@0
  1249
sl@0
  1250
sl@0
  1251
EXPORT_C TInt DDmaRequest::ExpandDstDesList(TInt aCount)
sl@0
  1252
	{
sl@0
  1253
	return ExpandDesList(aCount, iDstDesCount, iDstFirstHdr, iDstLastHdr);
sl@0
  1254
	}
sl@0
  1255
sl@0
  1256
sl@0
  1257
TInt DDmaRequest::ExpandDesList(TInt aCount, TInt& aDesCount,
sl@0
  1258
								SDmaDesHdr*& aFirstHdr,
sl@0
  1259
								SDmaDesHdr*& aLastHdr)
sl@0
  1260
	{
sl@0
  1261
	__DMA_ASSERTD(!iQueued);
sl@0
  1262
	__DMA_ASSERTD(aCount > 0);
sl@0
  1263
sl@0
  1264
	if (aCount > iChannel.iAvailDesCount)
sl@0
  1265
		{
sl@0
  1266
		return KErrTooBig;
sl@0
  1267
		}
sl@0
  1268
sl@0
  1269
	iChannel.iAvailDesCount -= aCount;
sl@0
  1270
	aDesCount += aCount;
sl@0
  1271
sl@0
  1272
	TDmac& c = *(iChannel.iController);
sl@0
  1273
	c.Wait();
sl@0
  1274
sl@0
  1275
	if (aFirstHdr == NULL)
sl@0
  1276
		{
sl@0
  1277
		// Handle an empty list specially to simplify the following loop
sl@0
  1278
		aFirstHdr = aLastHdr = c.iFreeHdr;
sl@0
  1279
		c.iFreeHdr = c.iFreeHdr->iNext;
sl@0
  1280
		--aCount;
sl@0
  1281
		}
sl@0
  1282
	else
sl@0
  1283
		{
sl@0
  1284
		aLastHdr->iNext = c.iFreeHdr;
sl@0
  1285
		}
sl@0
  1286
sl@0
  1287
	// Remove as many descriptors and headers from the free pool as necessary
sl@0
  1288
	// and ensure hardware descriptors are chained together.
sl@0
  1289
	while (aCount-- > 0)
sl@0
  1290
		{
sl@0
  1291
		__DMA_ASSERTD(c.iFreeHdr != NULL);
sl@0
  1292
		if (c.iCapsHwDes)
sl@0
  1293
			{
sl@0
  1294
			c.ChainHwDes(*aLastHdr, *(c.iFreeHdr));
sl@0
  1295
			}
sl@0
  1296
		aLastHdr = c.iFreeHdr;
sl@0
  1297
		c.iFreeHdr = c.iFreeHdr->iNext;
sl@0
  1298
		}
sl@0
  1299
sl@0
  1300
	c.Signal();
sl@0
  1301
sl@0
  1302
	aLastHdr->iNext = NULL;
sl@0
  1303
sl@0
  1304
	__DMA_INVARIANT();
sl@0
  1305
	return KErrNone;
sl@0
  1306
	}
sl@0
  1307
sl@0
  1308
sl@0
  1309
EXPORT_C void DDmaRequest::FreeDesList()
sl@0
  1310
	{
sl@0
  1311
	FreeDesList(iDesCount, iFirstHdr, iLastHdr);
sl@0
  1312
	}
sl@0
  1313
sl@0
  1314
sl@0
  1315
EXPORT_C void DDmaRequest::FreeSrcDesList()
sl@0
  1316
	{
sl@0
  1317
	FreeDesList(iSrcDesCount, iSrcFirstHdr, iSrcLastHdr);
sl@0
  1318
	}
sl@0
  1319
sl@0
  1320
sl@0
  1321
EXPORT_C void DDmaRequest::FreeDstDesList()
sl@0
  1322
	{
sl@0
  1323
	FreeDesList(iDstDesCount, iDstFirstHdr, iDstLastHdr);
sl@0
  1324
	}
sl@0
  1325
sl@0
  1326
sl@0
  1327
void DDmaRequest::FreeDesList(TInt& aDesCount, SDmaDesHdr*& aFirstHdr, SDmaDesHdr*& aLastHdr)
sl@0
  1328
	{
sl@0
  1329
	__DMA_ASSERTD(!iQueued);
sl@0
  1330
sl@0
  1331
	if (aDesCount > 0)
sl@0
  1332
		{
sl@0
  1333
		iChannel.iAvailDesCount += aDesCount;
sl@0
  1334
		TDmac& c = *(iChannel.iController);
sl@0
  1335
		const SDmaDesHdr* hdr = aFirstHdr;
sl@0
  1336
		while (hdr)
sl@0
  1337
			{
sl@0
  1338
			c.ClearHwDes(*hdr);
sl@0
  1339
			hdr = hdr->iNext;
sl@0
  1340
			};
sl@0
  1341
		c.Wait();
sl@0
  1342
		aLastHdr->iNext = c.iFreeHdr;
sl@0
  1343
		c.iFreeHdr = aFirstHdr;
sl@0
  1344
		c.Signal();
sl@0
  1345
		aFirstHdr = aLastHdr = NULL;
sl@0
  1346
		aDesCount = 0;
sl@0
  1347
		}
sl@0
  1348
	}
sl@0
  1349
sl@0
  1350
sl@0
  1351
EXPORT_C void DDmaRequest::EnableSrcElementCounting(TBool /*aResetElementCount*/)
sl@0
  1352
	{
sl@0
  1353
	// Not yet implemented.
sl@0
  1354
	return;
sl@0
  1355
	}
sl@0
  1356
sl@0
  1357
sl@0
  1358
EXPORT_C void DDmaRequest::EnableDstElementCounting(TBool /*aResetElementCount*/)
sl@0
  1359
	{
sl@0
  1360
	// Not yet implemented.
sl@0
  1361
	return;
sl@0
  1362
	}
sl@0
  1363
sl@0
  1364
sl@0
  1365
EXPORT_C void DDmaRequest::DisableSrcElementCounting()
sl@0
  1366
	{
sl@0
  1367
	// Not yet implemented.
sl@0
  1368
	return;
sl@0
  1369
	}
sl@0
  1370
sl@0
  1371
sl@0
  1372
EXPORT_C void DDmaRequest::DisableDstElementCounting()
sl@0
  1373
	{
sl@0
  1374
	// Not yet implemented.
sl@0
  1375
	return;
sl@0
  1376
	}
sl@0
  1377
sl@0
  1378
sl@0
  1379
EXPORT_C TUint32 DDmaRequest::TotalNumSrcElementsTransferred()
sl@0
  1380
	{
sl@0
  1381
	// Not yet implemented.
sl@0
  1382
sl@0
  1383
	// So far largely bogus code (just to touch some symbols)...
sl@0
  1384
	iTotalNumSrcElementsTransferred = 0;
sl@0
  1385
	TDmac& c = *(iChannel.iController);
sl@0
  1386
	if (c.iCapsHwDes)
sl@0
  1387
		{
sl@0
  1388
		for (const SDmaDesHdr* pH = iFirstHdr; pH != NULL; pH = pH->iNext)
sl@0
  1389
			{
sl@0
  1390
			iTotalNumSrcElementsTransferred += c.HwDesNumDstElementsTransferred(*pH);
sl@0
  1391
			}
sl@0
  1392
		}
sl@0
  1393
	else
sl@0
  1394
		{
sl@0
  1395
		// Do something different for pseudo descriptors...
sl@0
  1396
		}
sl@0
  1397
	return iTotalNumSrcElementsTransferred;
sl@0
  1398
	}
sl@0
  1399
sl@0
  1400
sl@0
  1401
EXPORT_C TUint32 DDmaRequest::TotalNumDstElementsTransferred()
sl@0
  1402
	{
sl@0
  1403
	// Not yet implemented.
sl@0
  1404
	return iTotalNumDstElementsTransferred;
sl@0
  1405
	}
sl@0
  1406
sl@0
  1407
sl@0
  1408
EXPORT_C TInt DDmaRequest::FragmentCount()
sl@0
  1409
	{
sl@0
  1410
	return FragmentCount(iFirstHdr);
sl@0
  1411
	}
sl@0
  1412
sl@0
  1413
sl@0
  1414
EXPORT_C TInt DDmaRequest::SrcFragmentCount()
sl@0
  1415
	{
sl@0
  1416
	return FragmentCount(iSrcFirstHdr);
sl@0
  1417
	}
sl@0
  1418
sl@0
  1419
sl@0
  1420
EXPORT_C TInt DDmaRequest::DstFragmentCount()
sl@0
  1421
	{
sl@0
  1422
	return FragmentCount(iDstFirstHdr);
sl@0
  1423
	}
sl@0
  1424
sl@0
  1425
sl@0
  1426
TInt DDmaRequest::FragmentCount(const SDmaDesHdr* aHdr)
sl@0
  1427
	{
sl@0
  1428
	TInt count = 0;
sl@0
  1429
	for (const SDmaDesHdr* pH = aHdr; pH != NULL; pH = pH->iNext)
sl@0
  1430
		{
sl@0
  1431
		count++;
sl@0
  1432
		}
sl@0
  1433
	return count;
sl@0
  1434
	}
sl@0
  1435
sl@0
  1436
sl@0
  1437
//
sl@0
  1438
// Called when request is removed from request queue in channel
sl@0
  1439
//
sl@0
  1440
inline void DDmaRequest::OnDeque()
sl@0
  1441
	{
sl@0
  1442
	iQueued = EFalse;
sl@0
  1443
	iLastHdr->iNext = NULL;
sl@0
  1444
	iChannel.DoUnlink(*iLastHdr);
sl@0
  1445
	}
sl@0
  1446
sl@0
  1447
sl@0
  1448
#ifdef _DEBUG
sl@0
  1449
void DDmaRequest::Invariant()
sl@0
  1450
	{
sl@0
  1451
	iChannel.Wait();
sl@0
  1452
	__DMA_ASSERTD(LOGICAL_XOR(iCb, iDmaCb));
sl@0
  1453
	if (iChannel.iDmacCaps->iAsymHwDescriptors)
sl@0
  1454
		{
sl@0
  1455
		__DMA_ASSERTD((0 <= iSrcDesCount) && (iSrcDesCount <= iChannel.iMaxDesCount) &&
sl@0
  1456
					  (0 <= iDstDesCount) && (iDstDesCount <= iChannel.iMaxDesCount));
sl@0
  1457
		if (iSrcDesCount == 0)
sl@0
  1458
			{
sl@0
  1459
			__DMA_ASSERTD(iDstDesCount == 0);
sl@0
  1460
			__DMA_ASSERTD(!iQueued);
sl@0
  1461
			__DMA_ASSERTD(!iSrcFirstHdr && !iSrcLastHdr &&
sl@0
  1462
						  !iDstFirstHdr && !iDstLastHdr);
sl@0
  1463
			}
sl@0
  1464
		else
sl@0
  1465
			{
sl@0
  1466
			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iSrcFirstHdr));
sl@0
  1467
			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iSrcLastHdr));
sl@0
  1468
			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iDstFirstHdr));
sl@0
  1469
			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iDstLastHdr));
sl@0
  1470
			}
sl@0
  1471
		}
sl@0
  1472
	else
sl@0
  1473
		{
sl@0
  1474
		__DMA_ASSERTD((0 <= iDesCount) && (iDesCount <= iChannel.iMaxDesCount));
sl@0
  1475
		if (iDesCount == 0)
sl@0
  1476
			{
sl@0
  1477
			__DMA_ASSERTD(!iQueued);
sl@0
  1478
			__DMA_ASSERTD(!iFirstHdr && !iLastHdr);
sl@0
  1479
			}
sl@0
  1480
		else
sl@0
  1481
			{
sl@0
  1482
			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iFirstHdr));
sl@0
  1483
			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iLastHdr));
sl@0
  1484
			}
sl@0
  1485
		}
sl@0
  1486
	iChannel.Signal();
sl@0
  1487
	}
sl@0
  1488
#endif
sl@0
  1489
sl@0
  1490
sl@0
  1491
//////////////////////////////////////////////////////////////////////////////
sl@0
  1492
// TDmaChannel
sl@0
  1493
sl@0
  1494
_LIT(KDmaChannelMutex, "DMA-Channel");
sl@0
  1495
sl@0
  1496
TDmaChannel::TDmaChannel()
sl@0
  1497
	: iController(NULL),
sl@0
  1498
	  iDmacCaps(NULL),
sl@0
  1499
	  iPslId(0),
sl@0
  1500
	  iDynChannel(EFalse),
sl@0
  1501
	  iPriority(KDmaPriorityNone),
sl@0
  1502
	  iCurHdr(NULL),
sl@0
  1503
	  iNullPtr(&iCurHdr),
sl@0
  1504
	  iDfc(Dfc, NULL, 0),
sl@0
  1505
	  iMaxDesCount(0),
sl@0
  1506
	  iAvailDesCount(0),
sl@0
  1507
	  iIsrDfc(0),
sl@0
  1508
	  iReqQ(),
sl@0
  1509
	  iReqCount(0),
sl@0
  1510
	  iCancelInfo(NULL),
sl@0
  1511
	  iRedoRequest(EFalse),
sl@0
  1512
	  iIsrCbRequest(EFalse)
sl@0
  1513
	{
sl@0
  1514
	const TInt r = Kern::MutexCreate(iMutex, KDmaChannelMutex, KMutexOrdDmaChannel);
sl@0
  1515
	__DMA_ASSERTA(r == KErrNone);
sl@0
  1516
sl@0
  1517
#ifndef __WINS__
sl@0
  1518
	// On the emulator this code is called from within the codeseg mutex.
sl@0
  1519
	// The invariant tries to hold the dma channel mutex, but this is not allowed
sl@0
  1520
	__DMA_INVARIANT();
sl@0
  1521
#endif
sl@0
  1522
	}
sl@0
  1523
sl@0
  1524
sl@0
  1525
TDmaChannel::~TDmaChannel()
sl@0
  1526
	{
sl@0
  1527
	Kern::SafeClose((DObject*&)iMutex, NULL);
sl@0
  1528
	}
sl@0
  1529
sl@0
  1530
sl@0
  1531
//
sl@0
  1532
// static member function
sl@0
  1533
//
sl@0
  1534
EXPORT_C TInt TDmaChannel::Open(const SCreateInfo& aInfo, TDmaChannel*& aChannel)
sl@0
  1535
	{
sl@0
  1536
	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::Open thread %O", &Kern::CurrentThread()));
sl@0
  1537
sl@0
  1538
	__DMA_ASSERTD(aInfo.iDesCount >= 1);
sl@0
  1539
	__DMA_ASSERTD(aInfo.iPriority <= KDmaPriority8);
sl@0
  1540
	__DMA_ASSERTD(aInfo.iDfcQ != NULL);
sl@0
  1541
	__DMA_ASSERTD(aInfo.iDfcPriority < KNumDfcPriorities);
sl@0
  1542
sl@0
  1543
	aChannel = NULL;
sl@0
  1544
sl@0
  1545
	DmaChannelMgr::Wait();
sl@0
  1546
	TDmaChannel* pC = DmaChannelMgr::Open(aInfo.iCookie, aInfo.iDynChannel, aInfo.iPriority);
sl@0
  1547
	DmaChannelMgr::Signal();
sl@0
  1548
	if (!pC)
sl@0
  1549
		{
sl@0
  1550
		return KErrInUse;
sl@0
  1551
		}
sl@0
  1552
	__DMA_ASSERTD(pC->iController != NULL);
sl@0
  1553
	__DMA_ASSERTD(pC->iDmacCaps != NULL);
sl@0
  1554
	__DMA_ASSERTD(pC->iController->iCapsHwDes == pC->DmacCaps().iHwDescriptors);
sl@0
  1555
	// PSL needs to set iDynChannel if and only if dynamic channel was requested
sl@0
  1556
	__DMA_ASSERTD(!LOGICAL_XOR(aInfo.iDynChannel, pC->iDynChannel));
sl@0
  1557
sl@0
  1558
	const TInt r = pC->iController->ReserveSetOfDes(aInfo.iDesCount);
sl@0
  1559
	if (r != KErrNone)
sl@0
  1560
		{
sl@0
  1561
		pC->Close();
sl@0
  1562
		return r;
sl@0
  1563
		}
sl@0
  1564
	pC->iAvailDesCount = pC->iMaxDesCount = aInfo.iDesCount;
sl@0
  1565
sl@0
  1566
	new (&pC->iDfc) TDfc(&Dfc, pC, aInfo.iDfcQ, aInfo.iDfcPriority);
sl@0
  1567
sl@0
  1568
	aChannel = pC;
sl@0
  1569
sl@0
  1570
#ifdef _DEBUG
sl@0
  1571
	pC->Invariant();
sl@0
  1572
#endif
sl@0
  1573
	__KTRACE_OPT(KDMA, Kern::Printf("opened channel %d", pC->iPslId));
sl@0
  1574
	return KErrNone;
sl@0
  1575
	}
sl@0
  1576
sl@0
  1577
sl@0
  1578
EXPORT_C void TDmaChannel::Close()
sl@0
  1579
	{
sl@0
  1580
	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::Close %d iReqCount=%d", iPslId, iReqCount));
sl@0
  1581
	__DMA_ASSERTD(IsQueueEmpty());
sl@0
  1582
	__DMA_ASSERTD(iReqCount == 0);
sl@0
  1583
sl@0
  1584
	// Descriptor leak? -> bug in request code
sl@0
  1585
	__DMA_ASSERTD(iAvailDesCount == iMaxDesCount);
sl@0
  1586
sl@0
  1587
	__DMA_ASSERTD(!iRedoRequest);
sl@0
  1588
	__DMA_ASSERTD(!iIsrCbRequest);
sl@0
  1589
sl@0
  1590
	iController->ReleaseSetOfDes(iMaxDesCount);
sl@0
  1591
	iAvailDesCount = iMaxDesCount = 0;
sl@0
  1592
sl@0
  1593
	DmaChannelMgr::Wait();
sl@0
  1594
	DmaChannelMgr::Close(this);
sl@0
  1595
	// The following assignment will be removed once IsOpened() has been
sl@0
  1596
	// removed. That's because 'this' shouldn't be touched any more once
sl@0
  1597
	// Close() has returned from the PSL.
sl@0
  1598
	iController = NULL;
sl@0
  1599
	DmaChannelMgr::Signal();
sl@0
  1600
	}
sl@0
  1601
sl@0
  1602
sl@0
  1603
EXPORT_C TInt TDmaChannel::LinkToChannel(TDmaChannel* aChannel)
sl@0
  1604
	{
sl@0
  1605
	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::LinkToChannel thread %O",
sl@0
  1606
									&Kern::CurrentThread()));
sl@0
  1607
	if (aChannel)
sl@0
  1608
		{
sl@0
  1609
		return iController->LinkChannels(*this, *aChannel);
sl@0
  1610
		}
sl@0
  1611
	else
sl@0
  1612
		{
sl@0
  1613
		return iController->UnlinkChannel(*this);
sl@0
  1614
		}
sl@0
  1615
	}
sl@0
  1616
sl@0
  1617
sl@0
  1618
EXPORT_C TInt TDmaChannel::Pause()
sl@0
  1619
	{
sl@0
  1620
	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::Pause thread %O",
sl@0
  1621
									&Kern::CurrentThread()));
sl@0
  1622
	return iController->PauseTransfer(*this);
sl@0
  1623
	}
sl@0
  1624
sl@0
  1625
sl@0
  1626
EXPORT_C TInt TDmaChannel::Resume()
sl@0
  1627
	{
sl@0
  1628
	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::Resume thread %O",
sl@0
  1629
									&Kern::CurrentThread()));
sl@0
  1630
	return iController->ResumeTransfer(*this);
sl@0
  1631
	}
sl@0
  1632
sl@0
  1633
sl@0
  1634
EXPORT_C void TDmaChannel::CancelAll()
sl@0
  1635
	{
sl@0
  1636
	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::CancelAll thread %O channel - %d",
sl@0
  1637
									&Kern::CurrentThread(), iPslId));
sl@0
  1638
	NThread* const nt = NKern::CurrentThread();
sl@0
  1639
	TBool wait = EFalse;
sl@0
  1640
	TDmaCancelInfo cancelinfo;
sl@0
  1641
	TDmaCancelInfo* waiters = NULL;
sl@0
  1642
sl@0
  1643
	NKern::ThreadEnterCS();
sl@0
  1644
	Wait();
sl@0
  1645
sl@0
  1646
	NThreadBase* const dfc_nt = iDfc.Thread();
sl@0
  1647
	// Shouldn't be NULL (i.e. an IDFC)
sl@0
  1648
	__DMA_ASSERTD(dfc_nt);
sl@0
  1649
sl@0
  1650
	__e32_atomic_store_ord32(&iIsrDfc, (TUint32)KCancelFlagMask);
sl@0
  1651
	// ISRs after this point will not post a DFC, however a DFC may already be
sl@0
  1652
	// queued or running or both.
sl@0
  1653
	if (!IsQueueEmpty())
sl@0
  1654
		{
sl@0
  1655
		// There is a transfer in progress. It may complete before the DMAC
sl@0
  1656
		// has stopped, but the resulting ISR will not post a DFC.
sl@0
  1657
		// ISR should not happen after this function returns.
sl@0
  1658
		iController->StopTransfer(*this);
sl@0
  1659
sl@0
  1660
		ResetStateMachine();
sl@0
  1661
sl@0
  1662
		// Clean-up the request queue.
sl@0
  1663
		SDblQueLink* pL;
sl@0
  1664
		while ((pL = iReqQ.GetFirst()) != NULL)
sl@0
  1665
			{
sl@0
  1666
			DDmaRequest* pR = _LOFF(pL, DDmaRequest, iLink);
sl@0
  1667
			pR->OnDeque();
sl@0
  1668
			}
sl@0
  1669
		}
sl@0
  1670
	if (dfc_nt == nt)
sl@0
  1671
		{
sl@0
  1672
		// DFC runs in this thread, so just cancel it and we're finished
sl@0
  1673
		iDfc.Cancel();
sl@0
  1674
sl@0
  1675
		// If other calls to CancelAll() are waiting for the DFC, release them here
sl@0
  1676
		waiters = iCancelInfo;
sl@0
  1677
		iCancelInfo = NULL;
sl@0
  1678
sl@0
  1679
		// Reset the ISR count
sl@0
  1680
		__e32_atomic_store_rel32(&iIsrDfc, 0);
sl@0
  1681
		}
sl@0
  1682
	else
sl@0
  1683
		{
sl@0
  1684
		// DFC runs in another thread. Make sure it's queued and then wait for it to run.
sl@0
  1685
		if (iCancelInfo)
sl@0
  1686
			{
sl@0
  1687
			// Insert cancelinfo into the list so that it precedes iCancelInfo
sl@0
  1688
			cancelinfo.InsertBefore(iCancelInfo);
sl@0
  1689
			}
sl@0
  1690
		else
sl@0
  1691
			{
sl@0
  1692
			iCancelInfo = &cancelinfo;
sl@0
  1693
			}
sl@0
  1694
		wait = ETrue;
sl@0
  1695
		iDfc.Enque();
sl@0
  1696
		}
sl@0
  1697
sl@0
  1698
	Signal();
sl@0
  1699
sl@0
  1700
	if (waiters)
sl@0
  1701
		{
sl@0
  1702
		waiters->Signal();
sl@0
  1703
		}
sl@0
  1704
	else if (wait)
sl@0
  1705
		{
sl@0
  1706
		NKern::FSWait(&cancelinfo.iSem);
sl@0
  1707
		}
sl@0
  1708
sl@0
  1709
 	NKern::ThreadLeaveCS();
sl@0
  1710
	__DMA_INVARIANT();
sl@0
  1711
	}
sl@0
  1712
sl@0
  1713
sl@0
  1714
EXPORT_C TInt TDmaChannel::IsrRedoRequest(TUint32 aSrcAddr, TUint32 aDstAddr,
sl@0
  1715
										  TUint aTransferCount,
sl@0
  1716
										  TUint32 aPslRequestInfo,
sl@0
  1717
										  TBool aIsrCb)
sl@0
  1718
	{
sl@0
  1719
	__KTRACE_OPT(KDMA,
sl@0
  1720
				 Kern::Printf("TDmaChannel::IsrRedoRequest src=0x%08x, "
sl@0
  1721
							  "dst=0x%08x, count=%d, pslInfo=0x%08x, isrCb=%d",
sl@0
  1722
							  aSrcAddr, aDstAddr, aTransferCount, aPslRequestInfo,
sl@0
  1723
							  aIsrCb));
sl@0
  1724
	// Function needs to be called in ISR context.
sl@0
  1725
	__DMA_ASSERTD(NKern::CurrentContext() == NKern::EInterrupt);
sl@0
  1726
sl@0
  1727
	__DMA_ASSERTD(!iReqQ.IsEmpty());
sl@0
  1728
	__DMA_ASSERTD(iIsrCbRequest);
sl@0
  1729
sl@0
  1730
#ifdef _DEBUG
sl@0
  1731
	if ((aSrcAddr != KPhysAddrInvalid) && (aSrcAddr == aDstAddr))
sl@0
  1732
		{
sl@0
  1733
		__KTRACE_OPT(KPANIC,
sl@0
  1734
					 Kern::Printf("Error: Updating src & dst to same address: 0x%08x",
sl@0
  1735
								  aSrcAddr));
sl@0
  1736
		return KErrArgument;
sl@0
  1737
		}
sl@0
  1738
#endif
sl@0
  1739
sl@0
  1740
	// We assume here that the just completed request is the first one in the
sl@0
  1741
	// queue, i.e. that even if there is more than one request in the queue,
sl@0
  1742
	// their respective last and first (hw) descriptors are *not* linked.
sl@0
  1743
	// (Although that's what apparently happens in TDmaSgChannel::DoQueue() /
sl@0
  1744
	// TDmac::AppendHwDes() @@@).
sl@0
  1745
	DDmaRequest* const pCurReq = _LOFF(iReqQ.First(), DDmaRequest, iLink);
sl@0
  1746
	TInt r;
sl@0
  1747
sl@0
  1748
	if (iDmacCaps->iAsymHwDescriptors)
sl@0
  1749
		{
sl@0
  1750
		// We don't allow multiple-descriptor chains to be updated here
sl@0
  1751
		__DMA_ASSERTD((pCurReq->iSrcDesCount == 1) && (pCurReq->iDstDesCount == 1));
sl@0
  1752
		// Adjust parameters if necessary (asymmetrical s/g variety)
sl@0
  1753
		const SDmaDesHdr* const pSrcFirstHdr = pCurReq->iSrcFirstHdr;
sl@0
  1754
		if ((aSrcAddr != KPhysAddrInvalid) || aTransferCount || aPslRequestInfo)
sl@0
  1755
			{
sl@0
  1756
			r = iController->UpdateSrcHwDes(*pSrcFirstHdr, aSrcAddr,
sl@0
  1757
											aTransferCount, aPslRequestInfo);
sl@0
  1758
			if (r != KErrNone)
sl@0
  1759
				{
sl@0
  1760
				__KTRACE_OPT(KPANIC, Kern::Printf("Src descriptor updating failed in PSL"));
sl@0
  1761
				return r;
sl@0
  1762
				}
sl@0
  1763
			}
sl@0
  1764
		const SDmaDesHdr* const pDstFirstHdr = pCurReq->iDstFirstHdr;
sl@0
  1765
		if ((aDstAddr != KPhysAddrInvalid) || aTransferCount || aPslRequestInfo)
sl@0
  1766
			{
sl@0
  1767
			r = iController->UpdateDstHwDes(*pDstFirstHdr, aSrcAddr,
sl@0
  1768
											aTransferCount, aPslRequestInfo);
sl@0
  1769
			if (r != KErrNone)
sl@0
  1770
				{
sl@0
  1771
				__KTRACE_OPT(KPANIC, Kern::Printf("Dst descriptor updating failed in PSL"));
sl@0
  1772
				return r;
sl@0
  1773
				}
sl@0
  1774
			}
sl@0
  1775
		// Reschedule the request
sl@0
  1776
		iController->Transfer(*this, *pSrcFirstHdr, *pDstFirstHdr);
sl@0
  1777
		}
sl@0
  1778
	else
sl@0
  1779
		{
sl@0
  1780
		// We don't allow multiple-descriptor chains to be updated here
sl@0
  1781
		__DMA_ASSERTD(pCurReq->iDesCount == 1);
sl@0
  1782
		// Adjust parameters if necessary (symmetrical s/g and non-s/g variety)
sl@0
  1783
		const SDmaDesHdr* const pFirstHdr = pCurReq->iFirstHdr;
sl@0
  1784
		if ((aSrcAddr != KPhysAddrInvalid) || (aDstAddr != KPhysAddrInvalid) ||
sl@0
  1785
			aTransferCount || aPslRequestInfo)
sl@0
  1786
			{
sl@0
  1787
			r = iController->UpdateDes(*pFirstHdr, aSrcAddr, aDstAddr,
sl@0
  1788
									   aTransferCount, aPslRequestInfo);
sl@0
  1789
			if (r != KErrNone)
sl@0
  1790
				{
sl@0
  1791
				__KTRACE_OPT(KPANIC, Kern::Printf("Descriptor updating failed"));
sl@0
  1792
				return r;
sl@0
  1793
				}
sl@0
  1794
			}
sl@0
  1795
		// Reschedule the request
sl@0
  1796
		iController->Transfer(*this, *pFirstHdr);
sl@0
  1797
		}
sl@0
  1798
sl@0
  1799
	if (!aIsrCb)
sl@0
  1800
		{
sl@0
  1801
		// Not another ISR callback please
sl@0
  1802
		pCurReq->iIsrCb = aIsrCb;
sl@0
  1803
		}
sl@0
  1804
	iRedoRequest = ETrue;
sl@0
  1805
sl@0
  1806
	return KErrNone;
sl@0
  1807
	}
sl@0
  1808
sl@0
  1809
sl@0
  1810
EXPORT_C TInt TDmaChannel::FailNext(TInt /*aFragmentCount*/)
sl@0
  1811
	{
sl@0
  1812
	return iController->FailNext(*this);
sl@0
  1813
	}
sl@0
  1814
sl@0
  1815
sl@0
  1816
EXPORT_C TInt TDmaChannel::MissNextInterrupts(TInt aInterruptCount)
sl@0
  1817
	{
sl@0
  1818
	return iController->MissNextInterrupts(*this, aInterruptCount);
sl@0
  1819
	}
sl@0
  1820
sl@0
  1821
sl@0
  1822
EXPORT_C TInt TDmaChannel::Extension(TInt aCmd, TAny* aArg)
sl@0
  1823
	{
sl@0
  1824
	return iController->Extension(*this, aCmd, aArg);
sl@0
  1825
	}
sl@0
  1826
sl@0
  1827
sl@0
  1828
//
sl@0
  1829
// static member function
sl@0
  1830
//
sl@0
  1831
EXPORT_C TInt TDmaChannel::StaticExtension(TInt aCmd, TAny* aArg)
sl@0
  1832
	{
sl@0
  1833
	return DmaChannelMgr::StaticExtension(aCmd, aArg);
sl@0
  1834
	}
sl@0
  1835
sl@0
  1836
sl@0
  1837
EXPORT_C TUint TDmaChannel::MaxTransferLength(TUint aSrcFlags, TUint aDstFlags,
sl@0
  1838
											  TUint32 aPslInfo)
sl@0
  1839
	{
sl@0
  1840
	return iController->MaxTransferLength(*this, aSrcFlags, aDstFlags, aPslInfo);
sl@0
  1841
	}
sl@0
  1842
sl@0
  1843
sl@0
  1844
EXPORT_C TUint TDmaChannel::AddressAlignMask(TUint aTargetFlags, TUint aElementSize,
sl@0
  1845
											 TUint32 aPslInfo)
sl@0
  1846
	{
sl@0
  1847
	return iController->AddressAlignMask(*this, aTargetFlags, aElementSize, aPslInfo);
sl@0
  1848
	}
sl@0
  1849
sl@0
  1850
sl@0
  1851
EXPORT_C const SDmacCaps& TDmaChannel::DmacCaps()
sl@0
  1852
	{
sl@0
  1853
	return *iDmacCaps;
sl@0
  1854
	}
sl@0
  1855
sl@0
  1856
sl@0
  1857
//
sl@0
  1858
// DFC callback function (static member).
sl@0
  1859
//
sl@0
  1860
void TDmaChannel::Dfc(TAny* aArg)
sl@0
  1861
	{
sl@0
  1862
	static_cast<TDmaChannel*>(aArg)->DoDfc();
sl@0
  1863
	}
sl@0
  1864
sl@0
  1865
sl@0
  1866
//
sl@0
  1867
// This is quite a long function, but what can you do...
sl@0
  1868
//
sl@0
  1869
void TDmaChannel::DoDfc()
sl@0
  1870
	{
sl@0
  1871
	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::DoDfc thread %O channel - %d",
sl@0
  1872
									&Kern::CurrentThread(), iPslId));
sl@0
  1873
	Wait();
sl@0
  1874
sl@0
  1875
	// Atomically fetch and reset the number of DFCs queued by the ISR and the
sl@0
  1876
	// error flag. Leave the cancel flag alone for now.
sl@0
  1877
	const TUint32 w = __e32_atomic_and_ord32(&iIsrDfc, (TUint32)KCancelFlagMask);
sl@0
  1878
	TUint32 count = w & KDfcCountMask;
sl@0
  1879
	const TBool error = w & (TUint32)KErrorFlagMask;
sl@0
  1880
	TBool stop = w & (TUint32)KCancelFlagMask;
sl@0
  1881
	__DMA_ASSERTD((count > 0) || stop);
sl@0
  1882
sl@0
  1883
	__DMA_ASSERTD(!iRedoRequest); // We shouldn't be here if this is true
sl@0
  1884
sl@0
  1885
	while (count && !stop)
sl@0
  1886
		{
sl@0
  1887
		--count;
sl@0
  1888
sl@0
  1889
		__DMA_ASSERTD(!iReqQ.IsEmpty());
sl@0
  1890
sl@0
  1891
		// If an error occurred it must have been reported on the last
sl@0
  1892
		// interrupt since transfers are suspended after an error.
sl@0
  1893
		DDmaRequest::TResult const res = (count == 0 && error) ?
sl@0
  1894
			DDmaRequest::EError : DDmaRequest::EOk;
sl@0
  1895
		DDmaRequest* pCompletedReq = NULL;
sl@0
  1896
		DDmaRequest* const pCurReq = _LOFF(iReqQ.First(), DDmaRequest, iLink);
sl@0
  1897
sl@0
  1898
		if (res == DDmaRequest::EOk)
sl@0
  1899
			{
sl@0
  1900
			// Update state machine, current fragment, completed fragment and
sl@0
  1901
			// tell the DMAC to transfer the next fragment if necessary.
sl@0
  1902
			SDmaDesHdr* pCompletedHdr = NULL;
sl@0
  1903
			DoDfc(const_cast<const DDmaRequest&>(*pCurReq), pCompletedHdr);
sl@0
  1904
sl@0
  1905
			// If just completed last fragment from current request, switch to
sl@0
  1906
			// next request (if any).
sl@0
  1907
			if (pCompletedHdr == pCurReq->iLastHdr)
sl@0
  1908
				{
sl@0
  1909
				pCompletedReq = pCurReq;
sl@0
  1910
				pCurReq->iLink.Deque();
sl@0
  1911
				if (iReqQ.IsEmpty())
sl@0
  1912
					iNullPtr = &iCurHdr;
sl@0
  1913
				pCompletedReq->OnDeque();
sl@0
  1914
				}
sl@0
  1915
			}
sl@0
  1916
		else
sl@0
  1917
			{
sl@0
  1918
			pCompletedReq = pCurReq;
sl@0
  1919
			}
sl@0
  1920
sl@0
  1921
		if (pCompletedReq && !pCompletedReq->iIsrCb)
sl@0
  1922
			{
sl@0
  1923
			// Don't execute ISR callbacks here (they have already been called)
sl@0
  1924
			DDmaRequest::TCallback const cb = pCompletedReq->iCb;
sl@0
  1925
			if (cb)
sl@0
  1926
				{
sl@0
  1927
				// Old style callback
sl@0
  1928
				TAny* const arg = pCompletedReq->iCbArg;
sl@0
  1929
				Signal();
sl@0
  1930
				__KTRACE_OPT(KDMA, Kern::Printf("Client CB res=%d", res));
sl@0
  1931
				(*cb)(res, arg);
sl@0
  1932
				Wait();
sl@0
  1933
				}
sl@0
  1934
			else
sl@0
  1935
				{
sl@0
  1936
				// New style callback
sl@0
  1937
				TDmaCallback const ncb = pCompletedReq->iDmaCb;
sl@0
  1938
				if (ncb)
sl@0
  1939
					{
sl@0
  1940
					TAny* const arg = pCompletedReq->iDmaCbArg;
sl@0
  1941
					TDmaResult const result = (res == DDmaRequest::EOk) ?
sl@0
  1942
						EDmaResultOK : EDmaResultError;
sl@0
  1943
					Signal();
sl@0
  1944
					__KTRACE_OPT(KDMA, Kern::Printf("Client CB result=%d", result));
sl@0
  1945
					(*ncb)(EDmaCallbackRequestCompletion, result, arg, NULL);
sl@0
  1946
					Wait();
sl@0
  1947
					}
sl@0
  1948
				}
sl@0
  1949
			}
sl@0
  1950
		else
sl@0
  1951
			{
sl@0
  1952
			// Allow another thread in, in case they are trying to cancel
sl@0
  1953
			Flash();
sl@0
  1954
			}
sl@0
  1955
		stop = __e32_atomic_load_acq32(&iIsrDfc) & (TUint32)KCancelFlagMask;
sl@0
  1956
		}
sl@0
  1957
sl@0
  1958
	// Some interrupts may be missed (double-buffer and scatter-gather
sl@0
  1959
	// controllers only) if two or more transfers complete while interrupts are
sl@0
  1960
	// disabled in the CPU. If this happens, the framework will go out of sync
sl@0
  1961
	// and leave some orphaned requests in the queue.
sl@0
  1962
	//
sl@0
  1963
	// To ensure correctness we handle this case here by checking that the request
sl@0
  1964
	// queue is empty when all transfers have completed and, if not, cleaning up
sl@0
  1965
	// and notifying the client of the completion of the orphaned requests.
sl@0
  1966
	//
sl@0
  1967
	// Note that if some interrupts are missed and the controller raises an
sl@0
  1968
	// error while transferring a subsequent fragment, the error will be reported
sl@0
  1969
	// on a fragment which was successfully completed.  There is no easy solution
sl@0
  1970
	// to this problem, but this is okay as the only possible action following a
sl@0
  1971
	// failure is to flush the whole queue.
sl@0
  1972
	if (stop)
sl@0
  1973
		{
sl@0
  1974
		// If another thread set the cancel flag, it should have
sl@0
  1975
		// cleaned up the request queue
sl@0
  1976
		__DMA_ASSERTD(IsQueueEmpty());
sl@0
  1977
sl@0
  1978
		TDmaCancelInfo* const waiters = iCancelInfo;
sl@0
  1979
		iCancelInfo = NULL;
sl@0
  1980
sl@0
  1981
		// make sure DFC doesn't run again until a new request completes
sl@0
  1982
		iDfc.Cancel();
sl@0
  1983
sl@0
  1984
		// reset the ISR count - new requests can now be processed
sl@0
  1985
		__e32_atomic_store_rel32(&iIsrDfc, 0);
sl@0
  1986
sl@0
  1987
		Signal();
sl@0
  1988
sl@0
  1989
		// release threads doing CancelAll()
sl@0
  1990
		waiters->Signal();
sl@0
  1991
		}
sl@0
  1992
	else if (!error && !iReqQ.IsEmpty() && iController->IsIdle(*this))
sl@0
  1993
		{
sl@0
  1994
#ifdef __SMP__
sl@0
  1995
		// On an SMP system we must call stop transfer, it will block until
sl@0
  1996
		// any ISRs have completed so that the system does not spuriously
sl@0
  1997
		// attempt to recover from a missed interrupt.
sl@0
  1998
		//
sl@0
  1999
		// On an SMP system it is possible for the code here to execute
sl@0
  2000
		// concurrently with the DMA ISR. It is therefore possible that at this
sl@0
  2001
		// point the previous transfer has already completed (so that IsIdle
sl@0
  2002
		// reports true), but that the ISR has not yet queued a DFC. Therefore
sl@0
  2003
		// we must wait for the ISR to complete.
sl@0
  2004
		//
sl@0
  2005
		// StopTransfer should have no other side effect, given that the
sl@0
  2006
		// channel is already idle.
sl@0
  2007
		iController->StopTransfer(*this); // should block till ISR completion
sl@0
  2008
#endif
sl@0
  2009
sl@0
  2010
		const TBool cleanup = !iDfc.Queued();
sl@0
  2011
		if(cleanup)
sl@0
  2012
			{
sl@0
  2013
			__KTRACE_OPT(KDMA, Kern::Printf("Missed interrupt(s) - draining request queue"));
sl@0
  2014
			ResetStateMachine();
sl@0
  2015
sl@0
  2016
			// Move orphaned requests to temporary queue so channel queue can
sl@0
  2017
			// accept new requests.
sl@0
  2018
			SDblQue q;
sl@0
  2019
			q.MoveFrom(&iReqQ);
sl@0
  2020
sl@0
  2021
			SDblQueLink* pL;
sl@0
  2022
			while ((pL = q.GetFirst()) != NULL)
sl@0
  2023
				{
sl@0
  2024
				DDmaRequest* const pR = _LOFF(pL, DDmaRequest, iLink);
sl@0
  2025
				__KTRACE_OPT(KDMA, Kern::Printf("Removing request from queue and notifying client"));
sl@0
  2026
				pR->OnDeque();
sl@0
  2027
				// Old style callback
sl@0
  2028
				DDmaRequest::TCallback const cb = pR->iCb;
sl@0
  2029
				if (cb)
sl@0
  2030
					{
sl@0
  2031
					TAny* const arg = pR->iCbArg;
sl@0
  2032
					Signal();
sl@0
  2033
					(*cb)(DDmaRequest::EOk, arg);
sl@0
  2034
					Wait();
sl@0
  2035
					}
sl@0
  2036
				else
sl@0
  2037
					{
sl@0
  2038
					// New style callback
sl@0
  2039
					TDmaCallback const ncb = pR->iDmaCb;
sl@0
  2040
					if (ncb)
sl@0
  2041
						{
sl@0
  2042
						TAny* const arg = pR->iDmaCbArg;
sl@0
  2043
						Signal();
sl@0
  2044
						(*ncb)(EDmaCallbackRequestCompletion, EDmaResultOK, arg, NULL);
sl@0
  2045
						Wait();
sl@0
  2046
						}
sl@0
  2047
					}
sl@0
  2048
				}
sl@0
  2049
			}
sl@0
  2050
		Signal();
sl@0
  2051
		}
sl@0
  2052
	else
sl@0
  2053
		Signal();
sl@0
  2054
sl@0
  2055
	__DMA_INVARIANT();
sl@0
  2056
	}
sl@0
  2057
sl@0
  2058
sl@0
  2059
//
sl@0
  2060
// Reset state machine only, request queue is unchanged */
sl@0
  2061
//
sl@0
  2062
void TDmaChannel::ResetStateMachine()
sl@0
  2063
	{
sl@0
  2064
	DoCancelAll();
sl@0
  2065
	iCurHdr = NULL;
sl@0
  2066
	iNullPtr = &iCurHdr;
sl@0
  2067
	}
sl@0
  2068
sl@0
  2069
sl@0
  2070
void TDmaChannel::DoQueue(const DDmaRequest& /*aReq*/)
sl@0
  2071
	{
sl@0
  2072
	// Must be overridden
sl@0
  2073
	__DMA_CANT_HAPPEN();
sl@0
  2074
	}
sl@0
  2075
sl@0
  2076
sl@0
  2077
//
sl@0
  2078
// Unlink the last item of a LLI chain from the next chain.
sl@0
  2079
// Default implementation does nothing. This is overridden by scatter-gather
sl@0
  2080
// channels.
sl@0
  2081
//
sl@0
  2082
void TDmaChannel::DoUnlink(SDmaDesHdr& /*aHdr*/)
sl@0
  2083
	{
sl@0
  2084
	}
sl@0
  2085
sl@0
  2086
sl@0
  2087
void TDmaChannel::DoDfc(const DDmaRequest& /*aCurReq*/, SDmaDesHdr*& /*aCompletedHdr*/)
sl@0
  2088
	{
sl@0
  2089
	// To make sure this version of the function isn't called for channels for
sl@0
  2090
	// which it isn't appropriate (and which therefore don't override it) we
sl@0
  2091
	// put this check in here.
sl@0
  2092
	__DMA_CANT_HAPPEN();
sl@0
  2093
	}
sl@0
  2094
sl@0
  2095
sl@0
  2096
void TDmaChannel::DoDfc(const DDmaRequest& /*aCurReq*/, SDmaDesHdr*& /*aSrcCompletedHdr*/,
sl@0
  2097
						SDmaDesHdr*& /*aDstCompletedHdr*/)
sl@0
  2098
	{
sl@0
  2099
	// To make sure this version of the function isn't called for channels for
sl@0
  2100
	// which it isn't appropriate (and which therefore don't override it) we
sl@0
  2101
	// put this check in here.
sl@0
  2102
	__DMA_CANT_HAPPEN();
sl@0
  2103
	}
sl@0
  2104
sl@0
  2105
sl@0
  2106
#ifdef _DEBUG
sl@0
  2107
void TDmaChannel::Invariant()
sl@0
  2108
	{
sl@0
  2109
	Wait();
sl@0
  2110
sl@0
  2111
	__DMA_ASSERTD(iReqCount >= 0);
sl@0
  2112
sl@0
  2113
	__DMA_ASSERTD(iCurHdr == NULL || iController->IsValidHdr(iCurHdr));
sl@0
  2114
sl@0
  2115
	// should always point to NULL pointer ending fragment queue
sl@0
  2116
	__DMA_ASSERTD(*iNullPtr == NULL);
sl@0
  2117
sl@0
  2118
	__DMA_ASSERTD((0 <= iAvailDesCount) && (iAvailDesCount <= iMaxDesCount));
sl@0
  2119
sl@0
  2120
	__DMA_ASSERTD(LOGICAL_XOR(iCurHdr, IsQueueEmpty()));
sl@0
  2121
	if (iCurHdr == NULL)
sl@0
  2122
		{
sl@0
  2123
		__DMA_ASSERTD(iNullPtr == &iCurHdr);
sl@0
  2124
		}
sl@0
  2125
sl@0
  2126
	Signal();
sl@0
  2127
	}
sl@0
  2128
#endif
sl@0
  2129
sl@0
  2130
sl@0
  2131
//////////////////////////////////////////////////////////////////////////////
sl@0
  2132
// TDmaSbChannel
sl@0
  2133
sl@0
  2134
void TDmaSbChannel::DoQueue(const DDmaRequest& /*aReq*/)
sl@0
  2135
	{
sl@0
  2136
	if (iState != ETransferring)
sl@0
  2137
		{
sl@0
  2138
		iController->Transfer(*this, *iCurHdr);
sl@0
  2139
		iState = ETransferring;
sl@0
  2140
		}
sl@0
  2141
	}
sl@0
  2142
sl@0
  2143
sl@0
  2144
void TDmaSbChannel::DoCancelAll()
sl@0
  2145
	{
sl@0
  2146
	__DMA_ASSERTD(iState == ETransferring);
sl@0
  2147
	iState = EIdle;
sl@0
  2148
	}
sl@0
  2149
sl@0
  2150
sl@0
  2151
void TDmaSbChannel::DoDfc(const DDmaRequest& /*aCurReq*/, SDmaDesHdr*& aCompletedHdr)
sl@0
  2152
	{
sl@0
  2153
	__DMA_ASSERTD(iState == ETransferring);
sl@0
  2154
	aCompletedHdr = iCurHdr;
sl@0
  2155
	iCurHdr = iCurHdr->iNext;
sl@0
  2156
	if (iCurHdr != NULL)
sl@0
  2157
		{
sl@0
  2158
		iController->Transfer(*this, *iCurHdr);
sl@0
  2159
		}
sl@0
  2160
	else
sl@0
  2161
		{
sl@0
  2162
		iState = EIdle;
sl@0
  2163
		}
sl@0
  2164
	}
sl@0
  2165
sl@0
  2166
sl@0
  2167
//////////////////////////////////////////////////////////////////////////////
sl@0
  2168
// TDmaDbChannel
sl@0
  2169
sl@0
  2170
void TDmaDbChannel::DoQueue(const DDmaRequest& aReq)
sl@0
  2171
	{
sl@0
  2172
	switch (iState)
sl@0
  2173
		{
sl@0
  2174
	case EIdle:
sl@0
  2175
		iController->Transfer(*this, *iCurHdr);
sl@0
  2176
		if (iCurHdr->iNext)
sl@0
  2177
			{
sl@0
  2178
			iController->Transfer(*this, *(iCurHdr->iNext));
sl@0
  2179
			iState = ETransferring;
sl@0
  2180
			}
sl@0
  2181
		else
sl@0
  2182
			iState = ETransferringLast;
sl@0
  2183
		break;
sl@0
  2184
	case ETransferring:
sl@0
  2185
		// nothing to do
sl@0
  2186
		break;
sl@0
  2187
	case ETransferringLast:
sl@0
  2188
		iController->Transfer(*this, *(aReq.iFirstHdr));
sl@0
  2189
		iState = ETransferring;
sl@0
  2190
		break;
sl@0
  2191
	default:
sl@0
  2192
		__DMA_CANT_HAPPEN();
sl@0
  2193
		}
sl@0
  2194
	}
sl@0
  2195
sl@0
  2196
sl@0
  2197
void TDmaDbChannel::DoCancelAll()
sl@0
  2198
	{
sl@0
  2199
	iState = EIdle;
sl@0
  2200
	}
sl@0
  2201
sl@0
  2202
sl@0
  2203
void TDmaDbChannel::DoDfc(const DDmaRequest& /*aCurReq*/, SDmaDesHdr*& aCompletedHdr)
sl@0
  2204
	{
sl@0
  2205
	aCompletedHdr = iCurHdr;
sl@0
  2206
	iCurHdr = iCurHdr->iNext;
sl@0
  2207
	switch (iState)
sl@0
  2208
		{
sl@0
  2209
	case ETransferringLast:
sl@0
  2210
		iState = EIdle;
sl@0
  2211
		break;
sl@0
  2212
	case ETransferring:
sl@0
  2213
		if (iCurHdr->iNext == NULL)
sl@0
  2214
			iState = ETransferringLast;
sl@0
  2215
		else
sl@0
  2216
			iController->Transfer(*this, *(iCurHdr->iNext));
sl@0
  2217
		break;
sl@0
  2218
	default:
sl@0
  2219
		__DMA_CANT_HAPPEN();
sl@0
  2220
		}
sl@0
  2221
	}
sl@0
  2222
sl@0
  2223
sl@0
  2224
//////////////////////////////////////////////////////////////////////////////
sl@0
  2225
// TDmaSgChannel
sl@0
  2226
sl@0
  2227
void TDmaSgChannel::DoQueue(const DDmaRequest& aReq)
sl@0
  2228
	{
sl@0
  2229
	if (iState == ETransferring)
sl@0
  2230
		{
sl@0
  2231
		__DMA_ASSERTD(!aReq.iLink.Alone());
sl@0
  2232
		DDmaRequest* pReqPrev = _LOFF(aReq.iLink.iPrev, DDmaRequest, iLink);
sl@0
  2233
		iController->AppendHwDes(*this, *(pReqPrev->iLastHdr), *(aReq.iFirstHdr));
sl@0
  2234
		}
sl@0
  2235
	else
sl@0
  2236
		{
sl@0
  2237
		iController->Transfer(*this, *(aReq.iFirstHdr));
sl@0
  2238
		iState = ETransferring;
sl@0
  2239
		}
sl@0
  2240
	}
sl@0
  2241
sl@0
  2242
sl@0
  2243
void TDmaSgChannel::DoCancelAll()
sl@0
  2244
	{
sl@0
  2245
	__DMA_ASSERTD(iState == ETransferring);
sl@0
  2246
	iState = EIdle;
sl@0
  2247
	}
sl@0
  2248
sl@0
  2249
sl@0
  2250
void TDmaSgChannel::DoUnlink(SDmaDesHdr& aHdr)
sl@0
  2251
	{
sl@0
  2252
	iController->UnlinkHwDes(*this, aHdr);
sl@0
  2253
	}
sl@0
  2254
sl@0
  2255
sl@0
  2256
void TDmaSgChannel::DoDfc(const DDmaRequest& aCurReq, SDmaDesHdr*& aCompletedHdr)
sl@0
  2257
	{
sl@0
  2258
	__DMA_ASSERTD(iState == ETransferring);
sl@0
  2259
	aCompletedHdr = aCurReq.iLastHdr;
sl@0
  2260
	iCurHdr = aCompletedHdr->iNext;
sl@0
  2261
	iState = (iCurHdr != NULL) ? ETransferring : EIdle;
sl@0
  2262
	}
sl@0
  2263
sl@0
  2264
sl@0
  2265
//////////////////////////////////////////////////////////////////////////////
sl@0
  2266
// TDmaAsymSgChannel
sl@0
  2267
sl@0
  2268
void TDmaAsymSgChannel::DoQueue(const DDmaRequest& aReq)
sl@0
  2269
	{
sl@0
  2270
	if (iState == ETransferring)
sl@0
  2271
		{
sl@0
  2272
		__DMA_ASSERTD(!aReq.iLink.Alone());
sl@0
  2273
		DDmaRequest* pReqPrev = _LOFF(aReq.iLink.iPrev, DDmaRequest, iLink);
sl@0
  2274
		iController->AppendHwDes(*this,
sl@0
  2275
								 *(pReqPrev->iSrcLastHdr), *(aReq.iSrcFirstHdr),
sl@0
  2276
								 *(pReqPrev->iDstLastHdr), *(aReq.iDstFirstHdr));
sl@0
  2277
		}
sl@0
  2278
	else
sl@0
  2279
		{
sl@0
  2280
		iController->Transfer(*this, *(aReq.iSrcFirstHdr), *(aReq.iDstFirstHdr));
sl@0
  2281
		iState = ETransferring;
sl@0
  2282
		}
sl@0
  2283
	}
sl@0
  2284
sl@0
  2285
sl@0
  2286
void TDmaAsymSgChannel::DoCancelAll()
sl@0
  2287
	{
sl@0
  2288
	__DMA_ASSERTD(iState == ETransferring);
sl@0
  2289
	iState = EIdle;
sl@0
  2290
	}
sl@0
  2291
sl@0
  2292
sl@0
  2293
void TDmaAsymSgChannel::DoUnlink(SDmaDesHdr& aHdr)
sl@0
  2294
	{
sl@0
  2295
	iController->UnlinkHwDes(*this, aHdr);
sl@0
  2296
	}
sl@0
  2297
sl@0
  2298
sl@0
  2299
void TDmaAsymSgChannel::DoDfc(const DDmaRequest& aCurReq, SDmaDesHdr*& aSrcCompletedHdr,
sl@0
  2300
							  SDmaDesHdr*& aDstCompletedHdr)
sl@0
  2301
	{
sl@0
  2302
	__DMA_ASSERTD(iState == ETransferring);
sl@0
  2303
	aSrcCompletedHdr = aCurReq.iSrcLastHdr;
sl@0
  2304
	iSrcCurHdr = aSrcCompletedHdr->iNext;
sl@0
  2305
	aDstCompletedHdr = aCurReq.iDstLastHdr;
sl@0
  2306
	iDstCurHdr = aDstCompletedHdr->iNext;
sl@0
  2307
	// Must be either both NULL or none of them.
sl@0
  2308
	__DMA_ASSERTD(!LOGICAL_XOR(iSrcCurHdr, iDstCurHdr));
sl@0
  2309
	iState = (iSrcCurHdr != NULL) ? ETransferring : EIdle;
sl@0
  2310
	}
sl@0
  2311