os/kernelhwsrv/kernel/eka/drivers/dma/dmapil.cpp
author sl
Tue, 10 Jun 2014 14:32:02 +0200
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
sl@0
     1
// Copyright (c) 2002-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
// e32\drivers\dmapil.cpp
sl@0
    15
// DMA Platform Independent Layer (PIL)
sl@0
    16
// 
sl@0
    17
//
sl@0
    18
sl@0
    19
#include <drivers/dma.h>
sl@0
    20
#include <kernel/kern_priv.h>
sl@0
    21
sl@0
    22
sl@0
    23
static const char KDmaPanicCat[] = "DMA";
sl@0
    24
sl@0
    25
NFastMutex DmaChannelMgr::Lock;
sl@0
    26
sl@0
    27
class TDmaCancelInfo : public SDblQueLink
sl@0
    28
	{
sl@0
    29
public:
sl@0
    30
	TDmaCancelInfo();
sl@0
    31
	void Signal();
sl@0
    32
public:
sl@0
    33
	NFastSemaphore iSem;
sl@0
    34
	};
sl@0
    35
sl@0
    36
TDmaCancelInfo::TDmaCancelInfo()
sl@0
    37
	:	iSem(0)
sl@0
    38
	{
sl@0
    39
	iNext = this;
sl@0
    40
	iPrev = this;
sl@0
    41
	}
sl@0
    42
sl@0
    43
void TDmaCancelInfo::Signal()
sl@0
    44
	{
sl@0
    45
	TDmaCancelInfo* p = this;
sl@0
    46
	FOREVER
sl@0
    47
		{
sl@0
    48
		TDmaCancelInfo* next = (TDmaCancelInfo*)p->iNext;
sl@0
    49
		if (p!=next)
sl@0
    50
			p->Deque();
sl@0
    51
		NKern::FSSignal(&p->iSem);	// Don't dereference p after this
sl@0
    52
		if (p==next)
sl@0
    53
			break;
sl@0
    54
		p = next;
sl@0
    55
		}
sl@0
    56
	}
sl@0
    57
sl@0
    58
//////////////////////////////////////////////////////////////////////////////
sl@0
    59
sl@0
    60
#ifdef __DMASIM__
sl@0
    61
#ifdef __WINS__
sl@0
    62
typedef TLinAddr TPhysAddr;
sl@0
    63
#endif
sl@0
    64
static inline TPhysAddr LinToPhys(TLinAddr aLin) {return aLin;}
sl@0
    65
#else
sl@0
    66
static inline TPhysAddr LinToPhys(TLinAddr aLin) {return Epoc::LinearToPhysical(aLin);}
sl@0
    67
#endif
sl@0
    68
sl@0
    69
//
sl@0
    70
// Return minimum of aMaxSize and size of largest physically contiguous block
sl@0
    71
// starting at aLinAddr.
sl@0
    72
//
sl@0
    73
static TInt MaxPhysSize(TLinAddr aLinAddr, const TInt aMaxSize)
sl@0
    74
	{
sl@0
    75
	const TPhysAddr physBase = LinToPhys(aLinAddr);
sl@0
    76
	TLinAddr lin = aLinAddr;
sl@0
    77
	TInt size = 0;
sl@0
    78
	for (;;)
sl@0
    79
		{
sl@0
    80
		// Round up the linear address to the next MMU page boundary
sl@0
    81
		const TLinAddr linBoundary = Kern::RoundToPageSize(lin + 1);
sl@0
    82
		size += linBoundary - lin;
sl@0
    83
		if (size >= aMaxSize)
sl@0
    84
			return aMaxSize;
sl@0
    85
		if ((physBase + size) != LinToPhys(linBoundary))
sl@0
    86
			return size;
sl@0
    87
		lin = linBoundary;
sl@0
    88
		}
sl@0
    89
	}
sl@0
    90
sl@0
    91
sl@0
    92
//////////////////////////////////////////////////////////////////////////////
sl@0
    93
// TDmac
sl@0
    94
sl@0
    95
TDmac::TDmac(const SCreateInfo& aInfo)
sl@0
    96
	: iMaxDesCount(aInfo.iDesCount),
sl@0
    97
	  iAvailDesCount(aInfo.iDesCount),
sl@0
    98
	  iDesSize(aInfo.iDesSize),
sl@0
    99
	  iCaps(aInfo.iCaps)
sl@0
   100
	{
sl@0
   101
	__DMA_ASSERTD(iMaxDesCount > 0);
sl@0
   102
	__DMA_ASSERTD((iCaps & ~KCapsBitHwDes) == 0); // undefined bits set?
sl@0
   103
	__DMA_ASSERTD(iDesSize > 0);
sl@0
   104
	}
sl@0
   105
sl@0
   106
//
sl@0
   107
// Second-phase c'tor
sl@0
   108
//
sl@0
   109
sl@0
   110
TInt TDmac::Create(const SCreateInfo& aInfo)
sl@0
   111
	{
sl@0
   112
	iHdrPool = new SDmaDesHdr[iMaxDesCount];
sl@0
   113
	if (iHdrPool == NULL)
sl@0
   114
		return KErrNoMemory;
sl@0
   115
sl@0
   116
	TInt r = AllocDesPool(aInfo.iDesChunkAttribs);
sl@0
   117
	if (r != KErrNone)
sl@0
   118
		return KErrNoMemory;
sl@0
   119
sl@0
   120
	// Link all descriptor headers together on the free list
sl@0
   121
	iFreeHdr = iHdrPool;
sl@0
   122
	TInt i;
sl@0
   123
	for (i = 0; i < iMaxDesCount - 1; i++)
sl@0
   124
		iHdrPool[i].iNext = iHdrPool + i + 1;
sl@0
   125
	iHdrPool[iMaxDesCount-1].iNext = NULL;
sl@0
   126
sl@0
   127
	__DMA_INVARIANT();
sl@0
   128
	return KErrNone;
sl@0
   129
	}
sl@0
   130
sl@0
   131
sl@0
   132
TDmac::~TDmac()
sl@0
   133
	{
sl@0
   134
	__DMA_INVARIANT();
sl@0
   135
sl@0
   136
	FreeDesPool();
sl@0
   137
	delete[] iHdrPool;
sl@0
   138
	}
sl@0
   139
sl@0
   140
sl@0
   141
// Calling thread must be in CS
sl@0
   142
TInt TDmac::AllocDesPool(TUint aAttribs)
sl@0
   143
	{
sl@0
   144
	TInt r;
sl@0
   145
	if (iCaps & KCapsBitHwDes)
sl@0
   146
		{
sl@0
   147
		TInt size = iMaxDesCount*iDesSize;
sl@0
   148
#ifdef __WINS__
sl@0
   149
		(void)aAttribs;
sl@0
   150
		iDesPool = new TUint8[size];
sl@0
   151
		r = iDesPool ? KErrNone : KErrNoMemory;
sl@0
   152
#else
sl@0
   153
		// Chunk not mapped as supervisor r/w user none? incorrect mask passed by PSL
sl@0
   154
		__DMA_ASSERTD((aAttribs & EMapAttrAccessMask) == EMapAttrSupRw);
sl@0
   155
		TPhysAddr phys;
sl@0
   156
		r = Epoc::AllocPhysicalRam(size, phys);
sl@0
   157
		if (r == KErrNone)
sl@0
   158
			{
sl@0
   159
			r = DPlatChunkHw::New(iHwDesChunk, phys, size, aAttribs);
sl@0
   160
			if (r == KErrNone)
sl@0
   161
				{
sl@0
   162
				iDesPool = (TAny*)iHwDesChunk->LinearAddress();
sl@0
   163
				__KTRACE_OPT(KDMA, Kern::Printf("descriptor hw chunk created lin=0x%08X phys=0x%08X, size=0x%X",
sl@0
   164
												iHwDesChunk->iLinAddr, iHwDesChunk->iPhysAddr, size));
sl@0
   165
				}
sl@0
   166
			else
sl@0
   167
				Epoc::FreePhysicalRam(phys, size);
sl@0
   168
			}
sl@0
   169
#endif
sl@0
   170
		}
sl@0
   171
	else
sl@0
   172
		{
sl@0
   173
		iDesPool = new SDmaPseudoDes[iMaxDesCount];
sl@0
   174
		r = iDesPool ? KErrNone : KErrNoMemory;
sl@0
   175
		}
sl@0
   176
	return r;
sl@0
   177
	}
sl@0
   178
sl@0
   179
sl@0
   180
// Calling thread must be in CS
sl@0
   181
void TDmac::FreeDesPool()
sl@0
   182
	{
sl@0
   183
	if (iCaps & KCapsBitHwDes)
sl@0
   184
		{
sl@0
   185
#ifdef __WINS__
sl@0
   186
		delete[] iDesPool;
sl@0
   187
#else
sl@0
   188
		if (iHwDesChunk)
sl@0
   189
			{
sl@0
   190
			TPhysAddr phys = iHwDesChunk->PhysicalAddress();
sl@0
   191
			TInt size = iHwDesChunk->iSize;
sl@0
   192
			iHwDesChunk->Close(NULL);
sl@0
   193
			Epoc::FreePhysicalRam(phys, size);
sl@0
   194
			}
sl@0
   195
#endif
sl@0
   196
		}
sl@0
   197
	else
sl@0
   198
		Kern::Free(iDesPool); 
sl@0
   199
	}
sl@0
   200
sl@0
   201
sl@0
   202
/**
sl@0
   203
 Prealloc the given number of descriptors.
sl@0
   204
 */
sl@0
   205
sl@0
   206
TInt TDmac::ReserveSetOfDes(TInt aCount)
sl@0
   207
	{
sl@0
   208
	__KTRACE_OPT(KDMA, Kern::Printf(">TDmac::ReserveSetOfDes count=%d", aCount));
sl@0
   209
	__DMA_ASSERTD(aCount > 0);
sl@0
   210
	TInt r = KErrTooBig;
sl@0
   211
	Wait();
sl@0
   212
	if (iAvailDesCount - aCount >= 0)
sl@0
   213
		{
sl@0
   214
		iAvailDesCount -= aCount;
sl@0
   215
		r = KErrNone;
sl@0
   216
		}
sl@0
   217
	Signal();
sl@0
   218
	__DMA_INVARIANT();
sl@0
   219
	__KTRACE_OPT(KDMA, Kern::Printf("<TDmac::ReserveSetOfDes r=%d", r));
sl@0
   220
	return r;
sl@0
   221
	}
sl@0
   222
sl@0
   223
sl@0
   224
/**
sl@0
   225
 Return the given number of preallocated descriptors to the free pool.
sl@0
   226
 */
sl@0
   227
sl@0
   228
void TDmac::ReleaseSetOfDes(TInt aCount)
sl@0
   229
	{
sl@0
   230
	__DMA_ASSERTD(aCount >= 0);
sl@0
   231
	Wait();
sl@0
   232
	iAvailDesCount += aCount;
sl@0
   233
	Signal();
sl@0
   234
	__DMA_INVARIANT();
sl@0
   235
	}
sl@0
   236
sl@0
   237
sl@0
   238
/**
sl@0
   239
 Queue DFC and update word used to communicate with DFC.
sl@0
   240
sl@0
   241
 Called in interrupt context by PSL.
sl@0
   242
 */
sl@0
   243
sl@0
   244
void TDmac::HandleIsr(TDmaChannel& aChannel, TBool aIsComplete)
sl@0
   245
	{
sl@0
   246
	//__KTRACE_OPT(KDMA, Kern::Printf("TDmac::HandleIsr channel=%d complete=%d", aChannelIdx, aIsComplete));
sl@0
   247
sl@0
   248
	// Queue DFC if necessary.  The possible scenarios are:
sl@0
   249
	// * no DFC queued --> need to queue DFC
sl@0
   250
	// * DFC queued (not running yet) --> just need to update iIsrDfc
sl@0
   251
	// * DFC running / iIsrDfc already reset --> need to requeue DFC
sl@0
   252
	// * DFC running /  iIsrDfc not reset yet --> just need to update iIsrDfc
sl@0
   253
	// Set error flag if necessary.
sl@0
   254
	TUint32 inc = aIsComplete ? 1u : TUint32(TDmaChannel::KErrorFlagMask)|1u;
sl@0
   255
	TUint32 orig = __e32_atomic_tau_ord32(&aChannel.iIsrDfc, TUint32(TDmaChannel::KCancelFlagMask), 0, inc);
sl@0
   256
sl@0
   257
	// As transfer should be suspended when an error occurs, we
sl@0
   258
	// should never get there with the error flag already set.
sl@0
   259
	__DMA_ASSERTD((orig & inc & (TUint32)TDmaChannel::KErrorFlagMask) == 0);
sl@0
   260
sl@0
   261
	if (orig == 0)
sl@0
   262
		aChannel.iDfc.Add();
sl@0
   263
	}
sl@0
   264
sl@0
   265
sl@0
   266
void TDmac::InitDes(const SDmaDesHdr& aHdr, TUint32 aSrc, TUint32 aDest, TInt aCount,
sl@0
   267
					TUint aFlags, TUint32 aPslInfo, TUint32 aCookie)
sl@0
   268
	{
sl@0
   269
 	if (iCaps & KCapsBitHwDes)
sl@0
   270
		InitHwDes(aHdr, aSrc, aDest, aCount, aFlags, aPslInfo, aCookie);
sl@0
   271
	else
sl@0
   272
		{
sl@0
   273
		SDmaPseudoDes& des = HdrToDes(aHdr);
sl@0
   274
		des.iSrc = aSrc;
sl@0
   275
		des.iDest = aDest;
sl@0
   276
		des.iCount = aCount;
sl@0
   277
		des.iFlags = aFlags;
sl@0
   278
		des.iPslInfo = aPslInfo;
sl@0
   279
		des.iCookie = aCookie;
sl@0
   280
		}
sl@0
   281
	}
sl@0
   282
sl@0
   283
sl@0
   284
void TDmac::InitHwDes(const SDmaDesHdr& /*aHdr*/, TUint32 /*aSrc*/, TUint32 /*aDest*/, TInt /*aCount*/,
sl@0
   285
					  TUint /*aFlags*/, TUint32 /*aPslInfo*/, TUint32 /*aCookie*/)
sl@0
   286
	{
sl@0
   287
	// concrete controller must override if KCapsBitHwDes set
sl@0
   288
	__DMA_CANT_HAPPEN();
sl@0
   289
	}
sl@0
   290
sl@0
   291
sl@0
   292
void TDmac::ChainHwDes(const SDmaDesHdr& /*aHdr*/, const SDmaDesHdr& /*aNextHdr*/)
sl@0
   293
	{
sl@0
   294
	// concrete controller must override if KCapsBitHwDes set
sl@0
   295
	__DMA_CANT_HAPPEN();
sl@0
   296
	}
sl@0
   297
sl@0
   298
sl@0
   299
void TDmac::AppendHwDes(const TDmaChannel& /*aChannel*/, const SDmaDesHdr& /*aLastHdr*/,
sl@0
   300
						const SDmaDesHdr& /*aNewHdr*/)
sl@0
   301
	{
sl@0
   302
 	// concrete controller must override if KCapsBitHwDes set
sl@0
   303
	__DMA_CANT_HAPPEN();
sl@0
   304
	}
sl@0
   305
sl@0
   306
sl@0
   307
void TDmac::UnlinkHwDes(const TDmaChannel& /*aChannel*/, SDmaDesHdr& /*aHdr*/)
sl@0
   308
	{
sl@0
   309
 	// concrete controller must override if KCapsBitHwDes set
sl@0
   310
	__DMA_CANT_HAPPEN();
sl@0
   311
	}
sl@0
   312
sl@0
   313
sl@0
   314
TInt TDmac::FailNext(const TDmaChannel& /*aChannel*/)
sl@0
   315
	{
sl@0
   316
	return KErrNotSupported;
sl@0
   317
	}
sl@0
   318
sl@0
   319
sl@0
   320
TInt TDmac::MissNextInterrupts(const TDmaChannel& /*aChannel*/, TInt /*aInterruptCount*/)
sl@0
   321
	{
sl@0
   322
	return KErrNotSupported;
sl@0
   323
	}
sl@0
   324
sl@0
   325
sl@0
   326
TInt TDmac::Extension(TDmaChannel& /*aChannel*/, TInt /*aCmd*/, TAny* /*aArg*/)
sl@0
   327
	{
sl@0
   328
	// default implementation - NOP
sl@0
   329
	return KErrNotSupported;
sl@0
   330
	}
sl@0
   331
sl@0
   332
sl@0
   333
#ifdef _DEBUG
sl@0
   334
sl@0
   335
void TDmac::Invariant()
sl@0
   336
	{
sl@0
   337
	Wait();
sl@0
   338
	__DMA_ASSERTD(0 <= iAvailDesCount && iAvailDesCount <= iMaxDesCount);
sl@0
   339
	__DMA_ASSERTD(! iFreeHdr || IsValidHdr(iFreeHdr));
sl@0
   340
	for (TInt i = 0; i < iMaxDesCount; i++)
sl@0
   341
		__DMA_ASSERTD(iHdrPool[i].iNext == NULL || IsValidHdr(iHdrPool[i].iNext));
sl@0
   342
	Signal();
sl@0
   343
	}
sl@0
   344
sl@0
   345
sl@0
   346
TBool TDmac::IsValidHdr(const SDmaDesHdr* aHdr)
sl@0
   347
	{
sl@0
   348
	return (iHdrPool <= aHdr) && (aHdr < iHdrPool + iMaxDesCount);
sl@0
   349
	}
sl@0
   350
sl@0
   351
#endif
sl@0
   352
sl@0
   353
//////////////////////////////////////////////////////////////////////////////
sl@0
   354
// DDmaRequest
sl@0
   355
sl@0
   356
sl@0
   357
EXPORT_C DDmaRequest::DDmaRequest(TDmaChannel& aChannel, TCallback aCb, TAny* aCbArg, TInt aMaxTransferSize)
sl@0
   358
	: iChannel(aChannel),
sl@0
   359
	  iCb(aCb),
sl@0
   360
	  iCbArg(aCbArg),
sl@0
   361
	  iMaxTransferSize(aMaxTransferSize)
sl@0
   362
	{
sl@0
   363
	// iDesCount = 0;
sl@0
   364
	// iFirstHdr = iLastHdr = NULL;
sl@0
   365
	// iQueued = EFalse;
sl@0
   366
	iChannel.iReqCount++;
sl@0
   367
	__DMA_INVARIANT();
sl@0
   368
	}
sl@0
   369
sl@0
   370
sl@0
   371
sl@0
   372
EXPORT_C DDmaRequest::~DDmaRequest()
sl@0
   373
	{
sl@0
   374
	__DMA_ASSERTD(!iQueued);
sl@0
   375
	__DMA_INVARIANT();
sl@0
   376
	FreeDesList();
sl@0
   377
	iChannel.iReqCount--;
sl@0
   378
	}
sl@0
   379
sl@0
   380
sl@0
   381
sl@0
   382
EXPORT_C TInt DDmaRequest::Fragment(TUint32 aSrc, TUint32 aDest, TInt aCount, TUint aFlags, TUint32 aPslInfo)
sl@0
   383
	{
sl@0
   384
	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::Fragment thread %O "
sl@0
   385
									"src=0x%08X dest=0x%08X count=%d flags=0x%X psl=0x%08X",
sl@0
   386
									&Kern::CurrentThread(), aSrc, aDest, aCount, aFlags, aPslInfo));
sl@0
   387
	__DMA_ASSERTD(aCount > 0);
sl@0
   388
	__DMA_ASSERTD(!iQueued);
sl@0
   389
sl@0
   390
	const TUint alignMask = iChannel.MemAlignMask(aFlags, aPslInfo);
sl@0
   391
	const TBool memSrc  = aFlags & KDmaMemSrc;
sl@0
   392
	const TBool memDest = aFlags & KDmaMemDest;
sl@0
   393
sl@0
   394
	// Memory buffers must satisfy alignment constraint
sl@0
   395
	__DMA_ASSERTD(!memSrc || ((aSrc & alignMask) == 0));
sl@0
   396
	__DMA_ASSERTD(!memDest || ((aDest & alignMask) == 0));
sl@0
   397
sl@0
   398
	// Ask the PSL what the maximum size possible for this transfer is
sl@0
   399
	TInt maxTransferSize = iChannel.MaxTransferSize(aFlags, aPslInfo);
sl@0
   400
	if (!maxTransferSize)
sl@0
   401
		{
sl@0
   402
		__KTRACE_OPT(KPANIC, Kern::Printf("Error: maxTransferSize == 0"));
sl@0
   403
		return KErrArgument;
sl@0
   404
		}
sl@0
   405
sl@0
   406
	if (iMaxTransferSize)
sl@0
   407
		{
sl@0
   408
		// User has set a size cap
sl@0
   409
		__DMA_ASSERTA((iMaxTransferSize <= maxTransferSize) || (maxTransferSize == -1));
sl@0
   410
		maxTransferSize = iMaxTransferSize;
sl@0
   411
		}
sl@0
   412
	else
sl@0
   413
		{
sl@0
   414
		// User doesn't care about max size
sl@0
   415
		if (maxTransferSize == -1)
sl@0
   416
			{
sl@0
   417
			// No maximum imposed by controller
sl@0
   418
			maxTransferSize = aCount;
sl@0
   419
			}
sl@0
   420
		}
sl@0
   421
sl@0
   422
	const TInt maxAlignedSize = (maxTransferSize & ~alignMask);
sl@0
   423
	__DMA_ASSERTD(maxAlignedSize > 0);						// bug in PSL if not true
sl@0
   424
sl@0
   425
	FreeDesList();
sl@0
   426
sl@0
   427
	TInt r = KErrNone;
sl@0
   428
	do
sl@0
   429
		{
sl@0
   430
		// Allocate fragment
sl@0
   431
		r = ExpandDesList();
sl@0
   432
		if (r != KErrNone)
sl@0
   433
			{
sl@0
   434
			FreeDesList();
sl@0
   435
			break;
sl@0
   436
			}
sl@0
   437
sl@0
   438
		// Compute fragment size
sl@0
   439
		TInt c = Min(maxTransferSize, aCount);
sl@0
   440
		if (memSrc && ((aFlags & KDmaPhysAddrSrc) == 0))
sl@0
   441
			c = MaxPhysSize(aSrc, c);
sl@0
   442
		if (memDest && ((aFlags & KDmaPhysAddrDest) == 0))
sl@0
   443
			c = MaxPhysSize(aDest, c);
sl@0
   444
		if ((memSrc || memDest) && (c < aCount) && (c > maxAlignedSize))
sl@0
   445
			{
sl@0
   446
			// This is not last fragment of transfer to/from memory. We must
sl@0
   447
			// round down fragment size so next one is correctly aligned.
sl@0
   448
			c = maxAlignedSize;
sl@0
   449
			}
sl@0
   450
sl@0
   451
		// Initialise fragment
sl@0
   452
		__KTRACE_OPT(KDMA, Kern::Printf("fragment: src=0x%08X dest=0x%08X count=%d", aSrc, aDest, c));
sl@0
   453
		iChannel.iController->InitDes(*iLastHdr, aSrc, aDest, c, aFlags, aPslInfo, iChannel.PslId());
sl@0
   454
sl@0
   455
		// Update for next iteration
sl@0
   456
		aCount -= c;
sl@0
   457
		if (memSrc)
sl@0
   458
			aSrc += c;
sl@0
   459
		if (memDest)
sl@0
   460
			aDest += c;
sl@0
   461
		}
sl@0
   462
	while (aCount > 0);
sl@0
   463
sl@0
   464
	__DMA_INVARIANT();
sl@0
   465
	return r;
sl@0
   466
	}
sl@0
   467
sl@0
   468
sl@0
   469
sl@0
   470
EXPORT_C void DDmaRequest::Queue()
sl@0
   471
	{
sl@0
   472
	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::Queue thread %O", &Kern::CurrentThread()));
sl@0
   473
	__DMA_ASSERTD(iDesCount > 0);	// Not configured? call Fragment() first !
sl@0
   474
	__DMA_ASSERTD(!iQueued);
sl@0
   475
sl@0
   476
	// append request to queue and link new descriptor list to existing one.
sl@0
   477
	iChannel.Wait();
sl@0
   478
sl@0
   479
	TUint32 req_count = iChannel.iQueuedRequests++;
sl@0
   480
	if (req_count == 0)
sl@0
   481
		{
sl@0
   482
		iChannel.Signal();
sl@0
   483
		iChannel.QueuedRequestCountChanged();
sl@0
   484
		iChannel.Wait();
sl@0
   485
		}
sl@0
   486
sl@0
   487
	if (!(iChannel.iIsrDfc & (TUint32)TDmaChannel::KCancelFlagMask))
sl@0
   488
		{
sl@0
   489
		iQueued = ETrue;
sl@0
   490
		iChannel.iReqQ.Add(&iLink);
sl@0
   491
		*iChannel.iNullPtr = iFirstHdr;
sl@0
   492
		iChannel.iNullPtr = &(iLastHdr->iNext);
sl@0
   493
		iChannel.DoQueue(*this);
sl@0
   494
		iChannel.Signal();
sl@0
   495
		}
sl@0
   496
	else
sl@0
   497
		{
sl@0
   498
		// Someone is cancelling all requests...
sl@0
   499
		req_count = --iChannel.iQueuedRequests;
sl@0
   500
		iChannel.Signal();
sl@0
   501
		if (req_count == 0)
sl@0
   502
			{
sl@0
   503
			iChannel.QueuedRequestCountChanged();
sl@0
   504
			}
sl@0
   505
		}
sl@0
   506
sl@0
   507
	__DMA_INVARIANT();
sl@0
   508
	}
sl@0
   509
sl@0
   510
EXPORT_C TInt DDmaRequest::ExpandDesList(TInt aCount)
sl@0
   511
	{
sl@0
   512
	__DMA_ASSERTD(!iQueued);
sl@0
   513
	__DMA_ASSERTD(aCount > 0);
sl@0
   514
sl@0
   515
	if (aCount > iChannel.iAvailDesCount)
sl@0
   516
		return KErrTooBig;
sl@0
   517
sl@0
   518
	iChannel.iAvailDesCount -= aCount;
sl@0
   519
	iDesCount += aCount;
sl@0
   520
sl@0
   521
	TDmac& c = *(iChannel.iController);
sl@0
   522
	c.Wait();
sl@0
   523
sl@0
   524
	if (iFirstHdr == NULL)
sl@0
   525
		{
sl@0
   526
		// handle empty list specially to simplify following loop
sl@0
   527
		iFirstHdr = iLastHdr = c.iFreeHdr;
sl@0
   528
		c.iFreeHdr = c.iFreeHdr->iNext;
sl@0
   529
		--aCount;
sl@0
   530
		}
sl@0
   531
	else
sl@0
   532
		iLastHdr->iNext = c.iFreeHdr;
sl@0
   533
sl@0
   534
	// Remove as many descriptors and headers from free pool as necessary and
sl@0
   535
	// ensure hardware descriptors are chained together.
sl@0
   536
	while (aCount-- > 0)
sl@0
   537
		{
sl@0
   538
		__DMA_ASSERTD(c.iFreeHdr != NULL);
sl@0
   539
		if (c.iCaps & TDmac::KCapsBitHwDes)
sl@0
   540
			c.ChainHwDes(*iLastHdr, *(c.iFreeHdr));
sl@0
   541
		iLastHdr = c.iFreeHdr;
sl@0
   542
		c.iFreeHdr = c.iFreeHdr->iNext;
sl@0
   543
		}
sl@0
   544
sl@0
   545
	c.Signal();
sl@0
   546
sl@0
   547
	iLastHdr->iNext = NULL;
sl@0
   548
sl@0
   549
	__DMA_INVARIANT();
sl@0
   550
	return KErrNone;
sl@0
   551
	}
sl@0
   552
sl@0
   553
sl@0
   554
sl@0
   555
sl@0
   556
EXPORT_C void DDmaRequest::FreeDesList()
sl@0
   557
	{
sl@0
   558
	__DMA_ASSERTD(!iQueued);
sl@0
   559
	if (iDesCount > 0)
sl@0
   560
		{
sl@0
   561
		iChannel.iAvailDesCount += iDesCount;
sl@0
   562
		TDmac& c = *(iChannel.iController);
sl@0
   563
		c.Wait();
sl@0
   564
		iLastHdr->iNext = c.iFreeHdr;
sl@0
   565
		c.iFreeHdr = iFirstHdr;
sl@0
   566
		c.Signal();
sl@0
   567
		iFirstHdr = iLastHdr = NULL;
sl@0
   568
		iDesCount = 0;
sl@0
   569
		}
sl@0
   570
	}
sl@0
   571
sl@0
   572
sl@0
   573
#ifdef _DEBUG
sl@0
   574
sl@0
   575
void DDmaRequest::Invariant()
sl@0
   576
	{
sl@0
   577
	iChannel.Wait();
sl@0
   578
	__DMA_ASSERTD(iChannel.IsOpened());
sl@0
   579
	__DMA_ASSERTD(0 <= iMaxTransferSize);
sl@0
   580
	__DMA_ASSERTD(0 <= iDesCount && iDesCount <= iChannel.iMaxDesCount);
sl@0
   581
	if (iDesCount == 0)
sl@0
   582
		{
sl@0
   583
		__DMA_ASSERTD(!iQueued);
sl@0
   584
		__DMA_ASSERTD(!iFirstHdr && !iLastHdr);
sl@0
   585
		}
sl@0
   586
	else
sl@0
   587
		{
sl@0
   588
		__DMA_ASSERTD(iChannel.iController->IsValidHdr(iFirstHdr));
sl@0
   589
		__DMA_ASSERTD(iChannel.iController->IsValidHdr(iLastHdr));
sl@0
   590
		}
sl@0
   591
	iChannel.Signal();
sl@0
   592
	}
sl@0
   593
sl@0
   594
#endif
sl@0
   595
sl@0
   596
sl@0
   597
//////////////////////////////////////////////////////////////////////////////
sl@0
   598
// TDmaChannel
sl@0
   599
sl@0
   600
sl@0
   601
EXPORT_C TInt TDmaChannel::StaticExtension(TInt aCmd, TAny* aArg)
sl@0
   602
	{
sl@0
   603
	return DmaChannelMgr::StaticExtension(aCmd, aArg);
sl@0
   604
	}
sl@0
   605
sl@0
   606
sl@0
   607
TDmaChannel::TDmaChannel()
sl@0
   608
	: iController(NULL),
sl@0
   609
	  iPslId(0),
sl@0
   610
	  iCurHdr(NULL),
sl@0
   611
	  iNullPtr(&iCurHdr),
sl@0
   612
	  iDfc(Dfc, NULL, 0),
sl@0
   613
	  iMaxDesCount(0),
sl@0
   614
	  iAvailDesCount(0),
sl@0
   615
	  iIsrDfc(0),
sl@0
   616
	  iReqQ(),
sl@0
   617
	  iReqCount(0),
sl@0
   618
	  iQueuedRequests(0),
sl@0
   619
	  iCancelInfo(NULL)
sl@0
   620
	{
sl@0
   621
	__DMA_INVARIANT();
sl@0
   622
	}
sl@0
   623
sl@0
   624
sl@0
   625
EXPORT_C TInt TDmaChannel::Open(const SCreateInfo& aInfo, TDmaChannel*& aChannel)
sl@0
   626
	{
sl@0
   627
	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::Open thread %O", &Kern::CurrentThread()));
sl@0
   628
	__DMA_ASSERTD(aInfo.iDfcQ != NULL);
sl@0
   629
	__DMA_ASSERTD(aInfo.iDfcPriority < KNumDfcPriorities);
sl@0
   630
	__DMA_ASSERTD(aInfo.iDesCount >= 1);
sl@0
   631
sl@0
   632
	aChannel = NULL;
sl@0
   633
sl@0
   634
	DmaChannelMgr::Wait();
sl@0
   635
	TDmaChannel* pC = DmaChannelMgr::Open(aInfo.iCookie);
sl@0
   636
	DmaChannelMgr::Signal();
sl@0
   637
	if (!pC)
sl@0
   638
		return KErrInUse;
sl@0
   639
sl@0
   640
	TInt r = pC->iController->ReserveSetOfDes(aInfo.iDesCount);
sl@0
   641
	if (r != KErrNone)
sl@0
   642
		{
sl@0
   643
		pC->Close();
sl@0
   644
		return r;
sl@0
   645
		}
sl@0
   646
	pC->iAvailDesCount = pC->iMaxDesCount = aInfo.iDesCount;
sl@0
   647
sl@0
   648
	new (&pC->iDfc) TDfc(&Dfc, pC, aInfo.iDfcQ, aInfo.iDfcPriority);
sl@0
   649
sl@0
   650
	aChannel = pC;
sl@0
   651
sl@0
   652
#ifdef _DEBUG
sl@0
   653
	pC->Invariant();
sl@0
   654
#endif
sl@0
   655
	__KTRACE_OPT(KDMA, Kern::Printf("opened channel %d", pC->iPslId));
sl@0
   656
	return KErrNone;
sl@0
   657
	}
sl@0
   658
sl@0
   659
sl@0
   660
EXPORT_C void TDmaChannel::Close()
sl@0
   661
	{
sl@0
   662
	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::Close %d", iPslId));
sl@0
   663
	__DMA_ASSERTD(IsOpened());
sl@0
   664
	__DMA_ASSERTD(IsQueueEmpty());
sl@0
   665
	__DMA_ASSERTD(iReqCount == 0);
sl@0
   666
sl@0
   667
	__DMA_ASSERTD(iQueuedRequests == 0);
sl@0
   668
sl@0
   669
	// descriptor leak? bug in request code
sl@0
   670
	__DMA_ASSERTD(iAvailDesCount == iMaxDesCount);
sl@0
   671
sl@0
   672
	iController->ReleaseSetOfDes(iMaxDesCount);
sl@0
   673
	iAvailDesCount = iMaxDesCount = 0;
sl@0
   674
sl@0
   675
	DmaChannelMgr::Wait();
sl@0
   676
	DmaChannelMgr::Close(this);
sl@0
   677
	iController = NULL;
sl@0
   678
	DmaChannelMgr::Signal();
sl@0
   679
sl@0
   680
	__DMA_INVARIANT();
sl@0
   681
	}
sl@0
   682
sl@0
   683
sl@0
   684
EXPORT_C void TDmaChannel::CancelAll()
sl@0
   685
	{
sl@0
   686
	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::CancelAll thread %O channel - %d",
sl@0
   687
									&Kern::CurrentThread(), iPslId));
sl@0
   688
	__DMA_ASSERTD(IsOpened());
sl@0
   689
sl@0
   690
	NThread* nt = NKern::CurrentThread();
sl@0
   691
	TBool wait = FALSE;
sl@0
   692
	TDmaCancelInfo c;
sl@0
   693
	TDmaCancelInfo* waiters = 0;
sl@0
   694
sl@0
   695
	NKern::ThreadEnterCS();
sl@0
   696
	Wait();
sl@0
   697
	const TUint32 req_count_before = iQueuedRequests;
sl@0
   698
	NThreadBase* dfcnt = iDfc.Thread();
sl@0
   699
	__e32_atomic_store_ord32(&iIsrDfc, (TUint32)KCancelFlagMask);
sl@0
   700
	// ISRs after this point will not post a DFC, however a DFC may already be queued or running or both
sl@0
   701
	if (!IsQueueEmpty())
sl@0
   702
		{
sl@0
   703
		// There is a transfer in progress.  It may complete before the DMAC
sl@0
   704
		// has stopped, but the resulting ISR will not post a DFC.
sl@0
   705
		// ISR should not happen after this function returns.
sl@0
   706
		iController->StopTransfer(*this);
sl@0
   707
sl@0
   708
		ResetStateMachine();
sl@0
   709
sl@0
   710
		// Clean-up the request queue.
sl@0
   711
		SDblQueLink* pL;
sl@0
   712
		while ((pL = iReqQ.GetFirst()) != NULL)
sl@0
   713
			{
sl@0
   714
			iQueuedRequests--;
sl@0
   715
			DDmaRequest* pR = _LOFF(pL, DDmaRequest, iLink);
sl@0
   716
			pR->OnDeque();
sl@0
   717
			}
sl@0
   718
		}
sl@0
   719
	if (!dfcnt || dfcnt==nt)
sl@0
   720
		{
sl@0
   721
		// no DFC queue or DFC runs in this thread, so just cancel it and we're finished
sl@0
   722
		iDfc.Cancel();
sl@0
   723
sl@0
   724
		// if other calls to CancelAll() are waiting for the DFC, release them here
sl@0
   725
		waiters = iCancelInfo;
sl@0
   726
		iCancelInfo = 0;
sl@0
   727
sl@0
   728
		// reset the ISR count
sl@0
   729
		__e32_atomic_store_rel32(&iIsrDfc, 0);
sl@0
   730
		}
sl@0
   731
	else
sl@0
   732
		{
sl@0
   733
		// DFC runs in another thread. Make sure it's queued and then wait for it to run.
sl@0
   734
		if (iCancelInfo)
sl@0
   735
			c.InsertBefore(iCancelInfo);
sl@0
   736
		else
sl@0
   737
			iCancelInfo = &c;
sl@0
   738
		wait = TRUE;
sl@0
   739
		iDfc.Enque();
sl@0
   740
		}
sl@0
   741
	const TUint32 req_count_after = iQueuedRequests;
sl@0
   742
	Signal();
sl@0
   743
	if (waiters)
sl@0
   744
		waiters->Signal();
sl@0
   745
	if (wait)
sl@0
   746
		NKern::FSWait(&c.iSem);
sl@0
   747
 	NKern::ThreadLeaveCS();
sl@0
   748
sl@0
   749
	// Only call PSL if there were requests queued when we entered AND there
sl@0
   750
	// are now no requests left on the queue.
sl@0
   751
	if ((req_count_before != 0) && (req_count_after == 0))
sl@0
   752
		{
sl@0
   753
		QueuedRequestCountChanged();
sl@0
   754
		}
sl@0
   755
sl@0
   756
	__DMA_INVARIANT();
sl@0
   757
	}
sl@0
   758
sl@0
   759
sl@0
   760
/**
sl@0
   761
 DFC callback function (static member).
sl@0
   762
 */
sl@0
   763
sl@0
   764
void TDmaChannel::Dfc(TAny* aArg)
sl@0
   765
	{
sl@0
   766
	((TDmaChannel*)aArg)->DoDfc();
sl@0
   767
	}
sl@0
   768
sl@0
   769
sl@0
   770
void TDmaChannel::DoDfc()
sl@0
   771
	{
sl@0
   772
	Wait();
sl@0
   773
sl@0
   774
	// Atomically fetch and reset the number of DFC queued by ISR and the error
sl@0
   775
	// flag. Leave the cancel flag alone for now.
sl@0
   776
	const TUint32 w = __e32_atomic_and_ord32(&iIsrDfc, (TUint32)KCancelFlagMask);
sl@0
   777
	TUint32 count = w & KDfcCountMask;
sl@0
   778
	const TBool error = w & (TUint32)KErrorFlagMask;
sl@0
   779
	TBool stop = w & (TUint32)KCancelFlagMask;
sl@0
   780
	__DMA_ASSERTD(count>0 || stop);
sl@0
   781
	const TUint32 req_count_before = iQueuedRequests;
sl@0
   782
	TUint32 req_count_after = 0;
sl@0
   783
sl@0
   784
	while(count && !stop)
sl@0
   785
		{
sl@0
   786
		--count;
sl@0
   787
sl@0
   788
		// If an error occurred it must have been reported on the last interrupt since transfers are
sl@0
   789
		// suspended after an error.
sl@0
   790
		DDmaRequest::TResult res = (count==0 && error) ? DDmaRequest::EError : DDmaRequest::EOk;
sl@0
   791
		__DMA_ASSERTD(!iReqQ.IsEmpty());
sl@0
   792
		DDmaRequest* pCompletedReq = NULL;
sl@0
   793
		DDmaRequest* pCurReq = _LOFF(iReqQ.First(), DDmaRequest, iLink);
sl@0
   794
		DDmaRequest::TCallback cb = 0;
sl@0
   795
		TAny* arg = 0;
sl@0
   796
sl@0
   797
		if (res == DDmaRequest::EOk)
sl@0
   798
			{
sl@0
   799
			// Update state machine, current fragment, completed fragment and
sl@0
   800
			// tell DMAC to transfer next fragment if necessary.
sl@0
   801
			SDmaDesHdr* pCompletedHdr = NULL;
sl@0
   802
			DoDfc(*pCurReq, pCompletedHdr);
sl@0
   803
sl@0
   804
			// If just completed last fragment from current request, switch to next
sl@0
   805
			// request (if any).
sl@0
   806
			if (pCompletedHdr == pCurReq->iLastHdr)
sl@0
   807
				{
sl@0
   808
				pCompletedReq = pCurReq;
sl@0
   809
				pCurReq->iLink.Deque();
sl@0
   810
				iQueuedRequests--;
sl@0
   811
				if (iReqQ.IsEmpty())
sl@0
   812
					iNullPtr = &iCurHdr;
sl@0
   813
				pCompletedReq->OnDeque();
sl@0
   814
				}
sl@0
   815
			}
sl@0
   816
		else if (res == DDmaRequest::EError)
sl@0
   817
			pCompletedReq = pCurReq;
sl@0
   818
		else
sl@0
   819
			__DMA_CANT_HAPPEN();
sl@0
   820
		if (pCompletedReq)
sl@0
   821
			{
sl@0
   822
			cb = pCompletedReq->iCb;
sl@0
   823
			arg = pCompletedReq->iCbArg;
sl@0
   824
			Signal();
sl@0
   825
			__KTRACE_OPT(KDMA, Kern::Printf("notifying DMA client result=%d", res));
sl@0
   826
			(*cb)(res,arg);
sl@0
   827
			Wait();
sl@0
   828
			}
sl@0
   829
		if (pCompletedReq || Flash())
sl@0
   830
			stop = __e32_atomic_load_acq32(&iIsrDfc) & (TUint32)KCancelFlagMask;
sl@0
   831
		}
sl@0
   832
sl@0
   833
	// Some interrupts may be missed (double-buffer and scatter-gather
sl@0
   834
	// controllers only) if two or more transfers complete while interrupts are
sl@0
   835
	// disabled in the CPU. If this happens, the framework will go out of sync
sl@0
   836
	// and leave some orphaned requests in the queue.
sl@0
   837
	//
sl@0
   838
	// To ensure correctness we handle this case here by checking that the request
sl@0
   839
	// queue is empty when all transfers have completed and, if not, cleaning up
sl@0
   840
	// and notifying the client of the completion of the orphaned requests.
sl@0
   841
	//
sl@0
   842
	// Note that if some interrupts are missed and the controller raises an
sl@0
   843
	// error while transferring a subsequent fragment, the error will be reported
sl@0
   844
	// on a fragment which was successfully completed.  There is no easy solution
sl@0
   845
	// to this problem, but this is okay as the only possible action following a
sl@0
   846
	// failure is to flush the whole queue.
sl@0
   847
	if (stop)
sl@0
   848
		{
sl@0
   849
		TDmaCancelInfo* waiters = iCancelInfo;
sl@0
   850
		iCancelInfo = 0;
sl@0
   851
sl@0
   852
		// make sure DFC doesn't run again until a new request completes
sl@0
   853
		iDfc.Cancel();
sl@0
   854
sl@0
   855
		// reset the ISR count - new requests can now be processed
sl@0
   856
		__e32_atomic_store_rel32(&iIsrDfc, 0);
sl@0
   857
sl@0
   858
		req_count_after = iQueuedRequests;
sl@0
   859
		Signal();
sl@0
   860
sl@0
   861
		// release threads doing CancelAll()
sl@0
   862
		waiters->Signal();
sl@0
   863
		}
sl@0
   864
	else if (!error && !iDfc.Queued() && !iReqQ.IsEmpty() && iController->IsIdle(*this))
sl@0
   865
		{
sl@0
   866
		__KTRACE_OPT(KDMA, Kern::Printf("Missed interrupt(s) - draining request queue"));
sl@0
   867
		ResetStateMachine();
sl@0
   868
sl@0
   869
		// Move orphaned requests to temporary queue so channel queue can
sl@0
   870
		// accept new requests.
sl@0
   871
		SDblQue q;
sl@0
   872
		q.MoveFrom(&iReqQ);
sl@0
   873
sl@0
   874
		SDblQueLink* pL;
sl@0
   875
		while ((pL = q.GetFirst()) != NULL)
sl@0
   876
			{
sl@0
   877
			iQueuedRequests--;
sl@0
   878
			DDmaRequest* pR = _LOFF(pL, DDmaRequest, iLink);
sl@0
   879
			__KTRACE_OPT(KDMA, Kern::Printf("Removing request from queue and notifying client"));
sl@0
   880
			pR->OnDeque();
sl@0
   881
			DDmaRequest::TCallback cb = pR->iCb;
sl@0
   882
			TAny* arg = pR->iCbArg;
sl@0
   883
			if (cb)
sl@0
   884
				{
sl@0
   885
				Signal();
sl@0
   886
				(*cb)(DDmaRequest::EOk, arg);
sl@0
   887
				Wait();
sl@0
   888
				}
sl@0
   889
			}
sl@0
   890
		req_count_after = iQueuedRequests;
sl@0
   891
		Signal();
sl@0
   892
		}
sl@0
   893
	else
sl@0
   894
		{
sl@0
   895
		req_count_after = iQueuedRequests;
sl@0
   896
		Signal();
sl@0
   897
		}
sl@0
   898
sl@0
   899
	// Only call PSL if there were requests queued when we entered AND there
sl@0
   900
	// are now no requests left on the queue (after also having executed all
sl@0
   901
	// client callbacks).
sl@0
   902
	if ((req_count_before != 0) && (req_count_after == 0))
sl@0
   903
		{
sl@0
   904
		QueuedRequestCountChanged();
sl@0
   905
		}
sl@0
   906
sl@0
   907
	__DMA_INVARIANT();
sl@0
   908
	}
sl@0
   909
sl@0
   910
sl@0
   911
/** Reset state machine only, request queue is unchanged */
sl@0
   912
sl@0
   913
void TDmaChannel::ResetStateMachine()
sl@0
   914
	{
sl@0
   915
	DoCancelAll();
sl@0
   916
	iCurHdr = NULL;
sl@0
   917
	iNullPtr = &iCurHdr;
sl@0
   918
	}
sl@0
   919
sl@0
   920
sl@0
   921
/** Unlink the last item of a LLI chain from the next chain.
sl@0
   922
	Default implementation does nothing. This is overridden by scatter-gather channels. */
sl@0
   923
sl@0
   924
void TDmaChannel::DoUnlink(SDmaDesHdr& /*aHdr*/)
sl@0
   925
	{
sl@0
   926
	}
sl@0
   927
sl@0
   928
sl@0
   929
/** PSL may override */
sl@0
   930
void TDmaChannel::QueuedRequestCountChanged()
sl@0
   931
	{
sl@0
   932
#ifdef _DEBUG
sl@0
   933
	Wait();
sl@0
   934
	__KTRACE_OPT(KDMA,
sl@0
   935
				 Kern::Printf("TDmaChannel::QueuedRequestCountChanged() %d",
sl@0
   936
							  iQueuedRequests));
sl@0
   937
	__DMA_ASSERTA(iQueuedRequests >= 0);
sl@0
   938
	Signal();
sl@0
   939
#endif
sl@0
   940
	}
sl@0
   941
sl@0
   942
sl@0
   943
#ifdef _DEBUG
sl@0
   944
sl@0
   945
void TDmaChannel::Invariant()
sl@0
   946
	{
sl@0
   947
	Wait();
sl@0
   948
sl@0
   949
	__DMA_ASSERTD(iReqCount >= 0);
sl@0
   950
	// should always point to NULL pointer ending fragment queue
sl@0
   951
	__DMA_ASSERTD(*iNullPtr == NULL);
sl@0
   952
sl@0
   953
	__DMA_ASSERTD(0 <= iAvailDesCount && iAvailDesCount <= iMaxDesCount);
sl@0
   954
sl@0
   955
	__DMA_ASSERTD(iCurHdr == NULL || iController->IsValidHdr(iCurHdr));
sl@0
   956
sl@0
   957
	if (IsOpened())
sl@0
   958
		{
sl@0
   959
		__DMA_ASSERTD((iCurHdr && !IsQueueEmpty()) || (!iCurHdr && IsQueueEmpty()));
sl@0
   960
		if (iCurHdr == NULL)
sl@0
   961
			__DMA_ASSERTD(iNullPtr == &iCurHdr);
sl@0
   962
		}
sl@0
   963
	else
sl@0
   964
		{
sl@0
   965
		__DMA_ASSERTD(iCurHdr == NULL);
sl@0
   966
		__DMA_ASSERTD(iNullPtr == &iCurHdr);
sl@0
   967
		__DMA_ASSERTD(IsQueueEmpty());
sl@0
   968
		}
sl@0
   969
sl@0
   970
	Signal();
sl@0
   971
	}
sl@0
   972
sl@0
   973
#endif
sl@0
   974
sl@0
   975
//////////////////////////////////////////////////////////////////////////////
sl@0
   976
// TDmaSbChannel
sl@0
   977
sl@0
   978
void TDmaSbChannel::DoQueue(DDmaRequest& /*aReq*/)
sl@0
   979
	{
sl@0
   980
	if (!iTransferring)
sl@0
   981
		{
sl@0
   982
		iController->Transfer(*this, *iCurHdr);
sl@0
   983
		iTransferring = ETrue;
sl@0
   984
		}
sl@0
   985
	}
sl@0
   986
sl@0
   987
sl@0
   988
void TDmaSbChannel::DoCancelAll()
sl@0
   989
	{
sl@0
   990
	__DMA_ASSERTD(iTransferring);
sl@0
   991
	iTransferring = EFalse;
sl@0
   992
	}
sl@0
   993
sl@0
   994
sl@0
   995
void TDmaSgChannel::DoUnlink(SDmaDesHdr& aHdr)
sl@0
   996
	{
sl@0
   997
	iController->UnlinkHwDes(*this, aHdr);
sl@0
   998
	}
sl@0
   999
sl@0
  1000
sl@0
  1001
void TDmaSbChannel::DoDfc(DDmaRequest& /*aCurReq*/, SDmaDesHdr*& aCompletedHdr)
sl@0
  1002
	{
sl@0
  1003
	__DMA_ASSERTD(iTransferring);
sl@0
  1004
	aCompletedHdr = iCurHdr;
sl@0
  1005
	iCurHdr = iCurHdr->iNext;
sl@0
  1006
	if (iCurHdr != NULL)
sl@0
  1007
		iController->Transfer(*this, *iCurHdr);
sl@0
  1008
	else
sl@0
  1009
		iTransferring = EFalse;
sl@0
  1010
	}
sl@0
  1011
sl@0
  1012
sl@0
  1013
//////////////////////////////////////////////////////////////////////////////
sl@0
  1014
// TDmaDbChannel
sl@0
  1015
sl@0
  1016
void TDmaDbChannel::DoQueue(DDmaRequest& aReq)
sl@0
  1017
	{
sl@0
  1018
	switch (iState)
sl@0
  1019
		{
sl@0
  1020
	case EIdle:
sl@0
  1021
		iController->Transfer(*this, *iCurHdr);
sl@0
  1022
		if (iCurHdr->iNext)
sl@0
  1023
			{
sl@0
  1024
			iController->Transfer(*this, *(iCurHdr->iNext));
sl@0
  1025
			iState = ETransferring;
sl@0
  1026
			}
sl@0
  1027
		else
sl@0
  1028
			iState = ETransferringLast;
sl@0
  1029
		break;
sl@0
  1030
	case ETransferring:
sl@0
  1031
		// nothing to do
sl@0
  1032
		break;
sl@0
  1033
	case ETransferringLast:
sl@0
  1034
		iController->Transfer(*this, *(aReq.iFirstHdr));
sl@0
  1035
		iState = ETransferring;
sl@0
  1036
		break;
sl@0
  1037
	default:
sl@0
  1038
		__DMA_CANT_HAPPEN();
sl@0
  1039
		}
sl@0
  1040
	}
sl@0
  1041
sl@0
  1042
sl@0
  1043
void TDmaDbChannel::DoCancelAll()
sl@0
  1044
	{
sl@0
  1045
	iState = EIdle;
sl@0
  1046
	}
sl@0
  1047
sl@0
  1048
sl@0
  1049
void TDmaDbChannel::DoDfc(DDmaRequest& /*aCurReq*/, SDmaDesHdr*& aCompletedHdr)
sl@0
  1050
	{
sl@0
  1051
	aCompletedHdr = iCurHdr;
sl@0
  1052
	iCurHdr = iCurHdr->iNext;
sl@0
  1053
	switch (iState)
sl@0
  1054
		{
sl@0
  1055
	case ETransferringLast:
sl@0
  1056
		iState = EIdle;
sl@0
  1057
		break;
sl@0
  1058
	case ETransferring:
sl@0
  1059
		if (iCurHdr->iNext == NULL)
sl@0
  1060
			iState = ETransferringLast;
sl@0
  1061
		else
sl@0
  1062
			iController->Transfer(*this, *(iCurHdr->iNext));
sl@0
  1063
		break;
sl@0
  1064
	default:
sl@0
  1065
		__DMA_CANT_HAPPEN();
sl@0
  1066
		}
sl@0
  1067
	}
sl@0
  1068
sl@0
  1069
sl@0
  1070
//////////////////////////////////////////////////////////////////////////////
sl@0
  1071
// TDmaSgChannel
sl@0
  1072
sl@0
  1073
void TDmaSgChannel::DoQueue(DDmaRequest& aReq)
sl@0
  1074
	{
sl@0
  1075
	if (iTransferring)
sl@0
  1076
		{
sl@0
  1077
		__DMA_ASSERTD(!aReq.iLink.Alone());
sl@0
  1078
		DDmaRequest* pReqPrev = _LOFF(aReq.iLink.iPrev, DDmaRequest, iLink);
sl@0
  1079
		iController->AppendHwDes(*this, *(pReqPrev->iLastHdr), *(aReq.iFirstHdr));
sl@0
  1080
		}
sl@0
  1081
	else
sl@0
  1082
		{
sl@0
  1083
		iController->Transfer(*this, *(aReq.iFirstHdr));
sl@0
  1084
		iTransferring = ETrue;
sl@0
  1085
		}
sl@0
  1086
	}
sl@0
  1087
sl@0
  1088
sl@0
  1089
void TDmaSgChannel::DoCancelAll()
sl@0
  1090
	{
sl@0
  1091
	__DMA_ASSERTD(iTransferring);
sl@0
  1092
	iTransferring = EFalse;
sl@0
  1093
	}
sl@0
  1094
sl@0
  1095
sl@0
  1096
void TDmaSgChannel::DoDfc(DDmaRequest& aCurReq, SDmaDesHdr*& aCompletedHdr)
sl@0
  1097
	{
sl@0
  1098
	__DMA_ASSERTD(iTransferring);
sl@0
  1099
	aCompletedHdr = aCurReq.iLastHdr;
sl@0
  1100
	iCurHdr = aCompletedHdr->iNext;
sl@0
  1101
	iTransferring = (iCurHdr != NULL);
sl@0
  1102
	}