os/kernelhwsrv/kernel/eka/drivers/dma/dma2_pil.cpp
changeset 0 bde4ae8d615e
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/os/kernelhwsrv/kernel/eka/drivers/dma/dma2_pil.cpp	Fri Jun 15 03:10:57 2012 +0200
     1.3 @@ -0,0 +1,2311 @@
     1.4 +// Copyright (c) 2002-2009 Nokia Corporation and/or its subsidiary(-ies).
     1.5 +// All rights reserved.
     1.6 +// This component and the accompanying materials are made available
     1.7 +// under the terms of "Eclipse Public License v1.0"
     1.8 +// which accompanies this distribution, and is available
     1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
    1.10 +//
    1.11 +// Initial Contributors:
    1.12 +// Nokia Corporation - initial contribution.
    1.13 +//
    1.14 +// Contributors:
    1.15 +//
    1.16 +// Description:
    1.17 +// e32/drivers/dma2_pil.cpp
    1.18 +// DMA Platform Independent Layer (PIL)
    1.19 +//
    1.20 +//
    1.21 +
    1.22 +#include <drivers/dma.h>
    1.23 +#include <drivers/dma_hai.h>
    1.24 +
    1.25 +#include <kernel/kern_priv.h>
    1.26 +
    1.27 +
    1.28 +// Symbian Min() & Max() are broken, so we have to define them ourselves
    1.29 +inline TUint Min(TUint aLeft, TUint aRight)
    1.30 +	{return(aLeft < aRight ? aLeft : aRight);}
    1.31 +inline TUint Max(TUint aLeft, TUint aRight)
    1.32 +	{return(aLeft > aRight ? aLeft : aRight);}
    1.33 +
    1.34 +
    1.35 +// Uncomment the following #define only when freezing the DMA2 export library.
    1.36 +//#define __FREEZE_DMA2_LIB
    1.37 +#ifdef __FREEZE_DMA2_LIB
    1.38 +TInt DmaChannelMgr::StaticExtension(TInt, TAny*) {return 0;}
    1.39 +TDmaChannel* DmaChannelMgr::Open(TUint32, TBool, TUint) {return 0;}
    1.40 +void DmaChannelMgr::Close(TDmaChannel*) {}
    1.41 +EXPORT_C const TDmaTestInfo& DmaTestInfo() {static TDmaTestInfo a; return a;}
    1.42 +EXPORT_C const TDmaV2TestInfo& DmaTestInfoV2() {static TDmaV2TestInfo a; return a;}
    1.43 +#endif	// #ifdef __FREEZE_DMA2_LIB
    1.44 +
    1.45 +
    1.46 +static const char KDmaPanicCat[] = "DMA " __FILE__;
    1.47 +
    1.48 +//////////////////////////////////////////////////////////////////////
    1.49 +// DmaChannelMgr
    1.50 +//
    1.51 +// Wait, Signal, and Initialise are defined here in the PIL.
    1.52 +// Open, Close and Extension must be defined in the PSL.
    1.53 +
    1.54 +NFastMutex DmaChannelMgr::Lock;
    1.55 +
    1.56 +
    1.57 +void DmaChannelMgr::Wait()
    1.58 +	{
    1.59 +	NKern::FMWait(&Lock);
    1.60 +	}
    1.61 +
    1.62 +
    1.63 +void DmaChannelMgr::Signal()
    1.64 +	{
    1.65 +	NKern::FMSignal(&Lock);
    1.66 +	}
    1.67 +
    1.68 +
    1.69 +TInt DmaChannelMgr::Initialise()
    1.70 +	{
    1.71 +	return KErrNone;
    1.72 +	}
    1.73 +
    1.74 +
    1.75 +class TDmaCancelInfo : public SDblQueLink
    1.76 +	{
    1.77 +public:
    1.78 +	TDmaCancelInfo();
    1.79 +	void Signal();
    1.80 +public:
    1.81 +	NFastSemaphore iSem;
    1.82 +	};
    1.83 +
    1.84 +
    1.85 +TDmaCancelInfo::TDmaCancelInfo()
    1.86 +	: iSem(0)
    1.87 +	{
    1.88 +	iNext = this;
    1.89 +	iPrev = this;
    1.90 +	}
    1.91 +
    1.92 +
    1.93 +void TDmaCancelInfo::Signal()
    1.94 +	{
    1.95 +	TDmaCancelInfo* p = this;
    1.96 +	FOREVER
    1.97 +		{
    1.98 +		TDmaCancelInfo* next = (TDmaCancelInfo*)p->iNext;
    1.99 +		if (p!=next)
   1.100 +			p->Deque();
   1.101 +		NKern::FSSignal(&p->iSem);	// Don't dereference p after this
   1.102 +		if (p==next)
   1.103 +			break;
   1.104 +		p = next;
   1.105 +		}
   1.106 +	}
   1.107 +
   1.108 +
   1.109 +//////////////////////////////////////////////////////////////////////////////
   1.110 +
   1.111 +#ifdef __DMASIM__
   1.112 +#ifdef __WINS__
   1.113 +typedef TLinAddr TPhysAddr;
   1.114 +#endif
   1.115 +static inline TPhysAddr LinToPhys(TLinAddr aLin) {return aLin;}
   1.116 +#else
   1.117 +static inline TPhysAddr LinToPhys(TLinAddr aLin) {return Epoc::LinearToPhysical(aLin);}
   1.118 +#endif
   1.119 +
   1.120 +//
   1.121 +// Return minimum of aMaxSize and size of largest physically contiguous block
   1.122 +// starting at aLinAddr.
   1.123 +//
   1.124 +static TInt MaxPhysSize(TLinAddr aLinAddr, const TInt aMaxSize)
   1.125 +	{
   1.126 +	const TPhysAddr physBase = LinToPhys(aLinAddr);
   1.127 +	TLinAddr lin = aLinAddr;
   1.128 +	TInt size = 0;
   1.129 +	for (;;)
   1.130 +		{
   1.131 +		// Round up the linear address to the next MMU page boundary
   1.132 +		const TLinAddr linBoundary = Kern::RoundToPageSize(lin + 1);
   1.133 +		size += linBoundary - lin;
   1.134 +		if (size >= aMaxSize)
   1.135 +			return aMaxSize;
   1.136 +		if ((physBase + size) != LinToPhys(linBoundary))
   1.137 +			return size;
   1.138 +		lin = linBoundary;
   1.139 +		}
   1.140 +	}
   1.141 +
   1.142 +
   1.143 +//////////////////////////////////////////////////////////////////////////////
   1.144 +// TDmac
   1.145 +
   1.146 +TDmac::TDmac(const SCreateInfo& aInfo)
   1.147 +	: iMaxDesCount(aInfo.iDesCount),
   1.148 +	  iAvailDesCount(aInfo.iDesCount),
   1.149 +	  iHdrPool(NULL),
   1.150 +#ifndef __WINS__
   1.151 +	  iHwDesChunk(NULL),
   1.152 +#endif
   1.153 +	  iDesPool(NULL),
   1.154 +	  iDesSize(aInfo.iDesSize),
   1.155 +	  iCapsHwDes(aInfo.iCapsHwDes),
   1.156 +	  iFreeHdr(NULL)
   1.157 +	{
   1.158 +	__DMA_ASSERTD(iMaxDesCount > 0);
   1.159 +	__DMA_ASSERTD(iDesSize > 0);
   1.160 +	}
   1.161 +
   1.162 +
   1.163 +//
   1.164 +// Second-phase c'tor
   1.165 +//
   1.166 +TInt TDmac::Create(const SCreateInfo& aInfo)
   1.167 +	{
   1.168 +	iHdrPool = new SDmaDesHdr[iMaxDesCount];
   1.169 +	if (iHdrPool == NULL)
   1.170 +		{
   1.171 +		return KErrNoMemory;
   1.172 +		}
   1.173 +
   1.174 +	TInt r = AllocDesPool(aInfo.iDesChunkAttribs);
   1.175 +	if (r != KErrNone)
   1.176 +		{
   1.177 +		return KErrNoMemory;
   1.178 +		}
   1.179 +
   1.180 +	// Link all descriptor headers together on the free list
   1.181 +	iFreeHdr = iHdrPool;
   1.182 +	for (TInt i = 0; i < iMaxDesCount - 1; i++)
   1.183 +		iHdrPool[i].iNext = iHdrPool + i + 1;
   1.184 +	iHdrPool[iMaxDesCount-1].iNext = NULL;
   1.185 +
   1.186 +	__DMA_INVARIANT();
   1.187 +	return KErrNone;
   1.188 +	}
   1.189 +
   1.190 +
   1.191 +TDmac::~TDmac()
   1.192 +	{
   1.193 +	__DMA_INVARIANT();
   1.194 +
   1.195 +	FreeDesPool();
   1.196 +	delete[] iHdrPool;
   1.197 +	}
   1.198 +
   1.199 +
   1.200 +void TDmac::Transfer(const TDmaChannel& /*aChannel*/, const SDmaDesHdr& /*aHdr*/)
   1.201 +	{
   1.202 +	// TDmac needs to override this function if it has reported the channel
   1.203 +	// type for which the PIL calls it.
   1.204 +	__DMA_CANT_HAPPEN();
   1.205 +	}
   1.206 +
   1.207 +
   1.208 +void TDmac::Transfer(const TDmaChannel& /*aChannel*/, const SDmaDesHdr& /*aSrcHdr*/,
   1.209 +					 const SDmaDesHdr& /*aDstHdr*/)
   1.210 +	{
   1.211 +	// TDmac needs to override this function if it has reported the channel
   1.212 +	// type for which the PIL calls it.
   1.213 +	__DMA_CANT_HAPPEN();
   1.214 +	}
   1.215 +
   1.216 +
   1.217 +TInt TDmac::PauseTransfer(const TDmaChannel& /*aChannel*/)
   1.218 +	{
   1.219 +	// TDmac needs to override this function if it has reported support for
   1.220 +	// channel pausing/resuming.
   1.221 +	return KErrNotSupported;
   1.222 +	}
   1.223 +
   1.224 +
   1.225 +TInt TDmac::ResumeTransfer(const TDmaChannel& /*aChannel*/)
   1.226 +	{
   1.227 +	// TDmac needs to override this function if it has reported support for
   1.228 +	// channel pausing/resuming.
   1.229 +	return KErrNotSupported;
   1.230 +	}
   1.231 +
   1.232 +
   1.233 +TInt TDmac::AllocDesPool(TUint aAttribs)
   1.234 +	{
   1.235 +	// Calling thread must be in CS
   1.236 +	__ASSERT_CRITICAL;
   1.237 +	TInt r;
   1.238 +	if (iCapsHwDes)
   1.239 +		{
   1.240 +		const TInt size = iMaxDesCount * iDesSize;
   1.241 +#ifdef __WINS__
   1.242 +		(void)aAttribs;
   1.243 +		iDesPool = new TUint8[size];
   1.244 +		r = iDesPool ? KErrNone : KErrNoMemory;
   1.245 +#else
   1.246 +		// Chunk not mapped as supervisor r/w user none? incorrect mask passed by PSL
   1.247 +		__DMA_ASSERTD((aAttribs & EMapAttrAccessMask) == EMapAttrSupRw);
   1.248 +		TPhysAddr phys;
   1.249 +		r = Epoc::AllocPhysicalRam(size, phys);
   1.250 +		if (r == KErrNone)
   1.251 +			{
   1.252 +			r = DPlatChunkHw::New(iHwDesChunk, phys, size, aAttribs);
   1.253 +			if (r == KErrNone)
   1.254 +				{
   1.255 +				iDesPool = (TAny*)iHwDesChunk->LinearAddress();
   1.256 +				__KTRACE_OPT(KDMA, Kern::Printf("descriptor hw chunk created lin=0x%08X phys=0x%08X, size=0x%X",
   1.257 +												iHwDesChunk->iLinAddr, iHwDesChunk->iPhysAddr, size));
   1.258 +				}
   1.259 +			else
   1.260 +				Epoc::FreePhysicalRam(phys, size);
   1.261 +			}
   1.262 +#endif
   1.263 +		}
   1.264 +	else
   1.265 +		{
   1.266 +		iDesPool = new TDmaTransferArgs[iMaxDesCount];
   1.267 +		r = iDesPool ? KErrNone : KErrNoMemory;
   1.268 +		}
   1.269 +	return r;
   1.270 +	}
   1.271 +
   1.272 +
   1.273 +void TDmac::FreeDesPool()
   1.274 +	{
   1.275 +	// Calling thread must be in CS
   1.276 +	__ASSERT_CRITICAL;
   1.277 +	if (iCapsHwDes)
   1.278 +		{
   1.279 +#ifdef __WINS__
   1.280 +		delete[] iDesPool;
   1.281 +#else
   1.282 +		if (iHwDesChunk)
   1.283 +			{
   1.284 +			const TPhysAddr phys = iHwDesChunk->PhysicalAddress();
   1.285 +			const TInt size = iHwDesChunk->iSize;
   1.286 +			iHwDesChunk->Close(NULL);
   1.287 +			Epoc::FreePhysicalRam(phys, size);
   1.288 +			}
   1.289 +#endif
   1.290 +		}
   1.291 +	else
   1.292 +		{
   1.293 +		Kern::Free(iDesPool);
   1.294 +		}
   1.295 +	}
   1.296 +
   1.297 +
   1.298 +//
   1.299 +// Prealloc the given number of descriptors.
   1.300 +//
   1.301 +TInt TDmac::ReserveSetOfDes(TInt aCount)
   1.302 +	{
   1.303 +	__KTRACE_OPT(KDMA, Kern::Printf("TDmac::ReserveSetOfDes count=%d", aCount));
   1.304 +	__DMA_ASSERTD(aCount > 0);
   1.305 +	TInt r = KErrTooBig;
   1.306 +	Wait();
   1.307 +	if (iAvailDesCount - aCount >= 0)
   1.308 +		{
   1.309 +		iAvailDesCount -= aCount;
   1.310 +		r = KErrNone;
   1.311 +		}
   1.312 +	Signal();
   1.313 +	__DMA_INVARIANT();
   1.314 +	return r;
   1.315 +	}
   1.316 +
   1.317 +
   1.318 +//
   1.319 +// Return the given number of preallocated descriptors to the free pool.
   1.320 +//
   1.321 +void TDmac::ReleaseSetOfDes(TInt aCount)
   1.322 +	{
   1.323 +	__DMA_ASSERTD(aCount >= 0);
   1.324 +	Wait();
   1.325 +	iAvailDesCount += aCount;
   1.326 +	Signal();
   1.327 +	__DMA_INVARIANT();
   1.328 +	}
   1.329 +
   1.330 +
   1.331 +//
   1.332 +// Queue DFC and update word used to communicate with channel DFC.
   1.333 +//
   1.334 +// Called in interrupt context by PSL.
   1.335 +//
   1.336 +void TDmac::HandleIsr(TDmaChannel& aChannel, TUint aEventMask, TBool aIsComplete)
   1.337 +	{
   1.338 +	__KTRACE_OPT(KDMA, Kern::Printf("TDmac::HandleIsr"));
   1.339 +
   1.340 +	// Function needs to be called by PSL in ISR context
   1.341 +	__DMA_ASSERTD(NKern::CurrentContext() == NKern::EInterrupt);
   1.342 +
   1.343 +	// First the ISR callback stuff
   1.344 +
   1.345 +	// Is this a transfer completion notification?
   1.346 +	if (aEventMask & EDmaCallbackRequestCompletion)
   1.347 +		{
   1.348 +		// If so, has the client requested an ISR callback?
   1.349 +		if (__e32_atomic_load_acq32(&aChannel.iIsrCbRequest))
   1.350 +			{
   1.351 +			__KTRACE_OPT(KDMA, Kern::Printf("ISR callback"));
   1.352 +
   1.353 +			// Since iIsrCbRequest was set no threads will be
   1.354 +			// modifying the request queue.
   1.355 +			const DDmaRequest* const req = _LOFF(aChannel.iReqQ.First(), DDmaRequest, iLink);
   1.356 +
   1.357 +			// We expect the request to have requested
   1.358 +			// ISR callback
   1.359 +			__NK_ASSERT_DEBUG(req->iIsrCb);
   1.360 +
   1.361 +			TDmaCallback const cb = req->iDmaCb;
   1.362 +			TAny* const arg = req->iDmaCbArg;
   1.363 +			// Execute the client callback
   1.364 +			(*cb)(EDmaCallbackRequestCompletion,
   1.365 +				  (aIsComplete ? EDmaResultOK : EDmaResultError),
   1.366 +				  arg,
   1.367 +				  NULL);
   1.368 +			// Now let's see if the callback rescheduled the transfer request
   1.369 +			// (see TDmaChannel::IsrRedoRequest()).
   1.370 +			const TBool redo = aChannel.iRedoRequest;
   1.371 +			aChannel.iRedoRequest = EFalse;
   1.372 +			const TBool stop = __e32_atomic_load_acq32(&aChannel.iIsrDfc) &
   1.373 +				(TUint32)TDmaChannel::KCancelFlagMask;
   1.374 +			// There won't be another ISR callback if this callback didn't
   1.375 +			// reschedule the request, or the client cancelled all requests, or
   1.376 +			// this callback rescheduled the request with a DFC callback.
   1.377 +			if (!redo || stop || !req->iIsrCb)
   1.378 +				{
   1.379 +				__e32_atomic_store_rel32(&aChannel.iIsrCbRequest, EFalse);
   1.380 +				}
   1.381 +			if (redo && !stop)
   1.382 +				{
   1.383 +				// We won't queue the channel DFC in this case and just return.
   1.384 +				__KTRACE_OPT(KDMA, Kern::Printf("CB rescheduled xfer -> no DFC"));
   1.385 +				return;
   1.386 +				}
   1.387 +			// Not redoing or being cancelled means we've been calling the
   1.388 +			// request's ISR callback for the last time. We're going to
   1.389 +			// complete the request via the DFC in the usual way.
   1.390 +			}
   1.391 +		}
   1.392 +
   1.393 +	// Now queue a DFC if necessary. The possible scenarios are:
   1.394 +	// a) DFC not queued (orig == 0)              -> update iIsrDfc + queue DFC
   1.395 +	// b) DFC queued, not running yet (orig != 0) -> just update iIsrDfc
   1.396 +	// c) DFC running / iIsrDfc not reset yet (orig != 0) -> just update iIsrDfc
   1.397 +	// d) DFC running / iIsrDfc already reset (orig == 0) -> update iIsrDfc + requeue DFC
   1.398 +
   1.399 +	// Set error flag if necessary.
   1.400 +	const TUint32 inc = aIsComplete ? 1u : TUint32(TDmaChannel::KErrorFlagMask) | 1u;
   1.401 +
   1.402 +	// Add 'inc' (interrupt count increment + poss. error flag) to 'iIsrDfc' if
   1.403 +	// cancel flag is not set, do nothing otherwise. Assign original value of
   1.404 +	// 'iIsrDfc' to 'orig' in any case.
   1.405 +	const TUint32 orig = __e32_atomic_tau_ord32(&aChannel.iIsrDfc,
   1.406 +												TUint32(TDmaChannel::KCancelFlagMask),
   1.407 +												0,
   1.408 +												inc);
   1.409 +
   1.410 +	// As transfer should be suspended when an error occurs, we
   1.411 +	// should never get there with the error flag already set.
   1.412 +	__DMA_ASSERTD((orig & inc & (TUint32)TDmaChannel::KErrorFlagMask) == 0);
   1.413 +
   1.414 +	if (orig == 0)
   1.415 +		{
   1.416 +		aChannel.iDfc.Add();
   1.417 +		}
   1.418 +	}
   1.419 +
   1.420 +
   1.421 +TInt TDmac::InitDes(const SDmaDesHdr& aHdr, const TDmaTransferArgs& aTransferArgs)
   1.422 +	{
   1.423 +	__KTRACE_OPT(KDMA, Kern::Printf("TDmac::InitDes"));
   1.424 +	TInt r;
   1.425 +	if (iCapsHwDes)
   1.426 +		{
   1.427 +		__KTRACE_OPT(KDMA, Kern::Printf("iCaps.iHwDescriptors"));
   1.428 +		r = InitHwDes(aHdr, aTransferArgs);
   1.429 +		}
   1.430 +	else
   1.431 +		{
   1.432 +		TDmaTransferArgs& args = HdrToDes(aHdr);
   1.433 +		args = aTransferArgs;
   1.434 +		r = KErrNone;
   1.435 +		}
   1.436 +	return r;
   1.437 +	}
   1.438 +
   1.439 +
   1.440 +TInt TDmac::InitHwDes(const SDmaDesHdr& /*aHdr*/, const TDmaTransferArgs& /*aTransferArgs*/)
   1.441 +	{
   1.442 +	// concrete controller must override if SDmacCaps::iHwDescriptors set
   1.443 +	__DMA_CANT_HAPPEN();
   1.444 +	return KErrGeneral;
   1.445 +	}
   1.446 +
   1.447 +
   1.448 +TInt TDmac::InitSrcHwDes(const SDmaDesHdr& /*aHdr*/, const TDmaTransferArgs& /*aTransferArgs*/)
   1.449 +	{
   1.450 +	// concrete controller must override if SDmacCaps::iAsymHwDescriptors set
   1.451 +	__DMA_CANT_HAPPEN();
   1.452 +	return KErrGeneral;
   1.453 +	}
   1.454 +
   1.455 +
   1.456 +TInt TDmac::InitDstHwDes(const SDmaDesHdr& /*aHdr*/, const TDmaTransferArgs& /*aTransferArgs*/)
   1.457 +	{
   1.458 +	// concrete controller must override if SDmacCaps::iAsymHwDescriptors set
   1.459 +	__DMA_CANT_HAPPEN();
   1.460 +	return KErrGeneral;
   1.461 +	}
   1.462 +
   1.463 +
   1.464 +TInt TDmac::UpdateDes(const SDmaDesHdr& aHdr, TUint32 aSrcAddr, TUint32 aDstAddr,
   1.465 +					  TUint aTransferCount, TUint32 aPslRequestInfo)
   1.466 +	{
   1.467 +	__KTRACE_OPT(KDMA, Kern::Printf("TDmac::UpdateDes"));
   1.468 +	TInt r;
   1.469 +	if (iCapsHwDes)
   1.470 +		{
   1.471 +		__KTRACE_OPT(KDMA, Kern::Printf("iCaps.iHwDescriptors"));
   1.472 +		r = UpdateHwDes(aHdr, aSrcAddr, aDstAddr, aTransferCount, aPslRequestInfo);
   1.473 +		}
   1.474 +	else
   1.475 +		{
   1.476 +		TDmaTransferArgs& args = HdrToDes(aHdr);
   1.477 +		if (aSrcAddr != KPhysAddrInvalid)
   1.478 +			args.iSrcConfig.iAddr = aSrcAddr;
   1.479 +		if (aDstAddr != KPhysAddrInvalid)
   1.480 +			args.iDstConfig.iAddr = aDstAddr;
   1.481 +		if (aTransferCount)
   1.482 +			args.iTransferCount = aTransferCount;
   1.483 +		if (aPslRequestInfo)
   1.484 +			args.iPslRequestInfo = aPslRequestInfo;
   1.485 +		r = KErrNone;
   1.486 +		}
   1.487 +	return r;
   1.488 +	}
   1.489 +
   1.490 +
   1.491 +TInt TDmac::UpdateHwDes(const SDmaDesHdr& /*aHdr*/, TUint32 /*aSrcAddr*/, TUint32 /*aDstAddr*/,
   1.492 +						TUint /*aTransferCount*/, TUint32 /*aPslRequestInfo*/)
   1.493 +	{
   1.494 +	// concrete controller must override if SDmacCaps::iHwDescriptors set
   1.495 +	__DMA_CANT_HAPPEN();
   1.496 +	return KErrGeneral;
   1.497 +	}
   1.498 +
   1.499 +
   1.500 +TInt TDmac::UpdateSrcHwDes(const SDmaDesHdr& /*aHdr*/, TUint32 /*aSrcAddr*/,
   1.501 +						   TUint /*aTransferCount*/, TUint32 /*aPslRequestInfo*/)
   1.502 +	{
   1.503 +	// concrete controller must override if SDmacCaps::iAsymHwDescriptors set
   1.504 +	__DMA_CANT_HAPPEN();
   1.505 +	return KErrGeneral;
   1.506 +	}
   1.507 +
   1.508 +
   1.509 +TInt TDmac::UpdateDstHwDes(const SDmaDesHdr& /*aHdr*/, TUint32 /*aDstAddr*/,
   1.510 +						   TUint /*aTransferCount*/, TUint32 /*aPslRequestInfo*/)
   1.511 +	{
   1.512 +	// concrete controller must override if SDmacCaps::iAsymHwDescriptors set
   1.513 +	__DMA_CANT_HAPPEN();
   1.514 +	return KErrGeneral;
   1.515 +	}
   1.516 +
   1.517 +
   1.518 +void TDmac::ChainHwDes(const SDmaDesHdr& /*aHdr*/, const SDmaDesHdr& /*aNextHdr*/)
   1.519 +	{
   1.520 +	// concrete controller must override if SDmacCaps::iHwDescriptors set
   1.521 +	__DMA_CANT_HAPPEN();
   1.522 +	}
   1.523 +
   1.524 +
   1.525 +void TDmac::AppendHwDes(const TDmaChannel& /*aChannel*/, const SDmaDesHdr& /*aLastHdr*/,
   1.526 +						const SDmaDesHdr& /*aNewHdr*/)
   1.527 +	{
   1.528 + 	// concrete controller must override if SDmacCaps::iHwDescriptors set
   1.529 +	__DMA_CANT_HAPPEN();
   1.530 +	}
   1.531 +
   1.532 +
   1.533 +void TDmac::AppendHwDes(const TDmaChannel& /*aChannel*/,
   1.534 +						const SDmaDesHdr& /*aSrcLastHdr*/, const SDmaDesHdr& /*aSrcNewHdr*/,
   1.535 +						const SDmaDesHdr& /*aDstLastHdr*/, const SDmaDesHdr& /*aDstNewHdr*/)
   1.536 +	{
   1.537 +	// concrete controller must override if SDmacCaps::iAsymHwDescriptors set
   1.538 +	__DMA_CANT_HAPPEN();
   1.539 +	}
   1.540 +
   1.541 +
   1.542 +void TDmac::UnlinkHwDes(const TDmaChannel& /*aChannel*/, SDmaDesHdr& /*aHdr*/)
   1.543 +	{
   1.544 + 	// concrete controller must override if SDmacCaps::iHwDescriptors set
   1.545 +	__DMA_CANT_HAPPEN();
   1.546 +	}
   1.547 +
   1.548 +
   1.549 +void TDmac::ClearHwDes(const SDmaDesHdr& /*aHdr*/)
   1.550 +	{
   1.551 +	// default implementation - NOP; concrete controller may override
   1.552 +	return;
   1.553 +	}
   1.554 +
   1.555 +
   1.556 +TInt TDmac::LinkChannels(TDmaChannel& /*a1stChannel*/, TDmaChannel& /*a2ndChannel*/)
   1.557 +	{
   1.558 +	// default implementation - NOP; concrete controller may override
   1.559 +	return KErrNotSupported;
   1.560 +	}
   1.561 +
   1.562 +
   1.563 +TInt TDmac::UnlinkChannel(TDmaChannel& /*aChannel*/)
   1.564 +	{
   1.565 +	// default implementation - NOP; concrete controller may override
   1.566 +	return KErrNotSupported;
   1.567 +	}
   1.568 +
   1.569 +
   1.570 +TInt TDmac::FailNext(const TDmaChannel& /*aChannel*/)
   1.571 +	{
   1.572 +	// default implementation - NOP; concrete controller may override
   1.573 +	return KErrNotSupported;
   1.574 +	}
   1.575 +
   1.576 +
   1.577 +TInt TDmac::MissNextInterrupts(const TDmaChannel& /*aChannel*/, TInt /*aInterruptCount*/)
   1.578 +	{
   1.579 +	// default implementation - NOP; concrete controller may override
   1.580 +	return KErrNotSupported;
   1.581 +	}
   1.582 +
   1.583 +
   1.584 +TInt TDmac::Extension(TDmaChannel& /*aChannel*/, TInt /*aCmd*/, TAny* /*aArg*/)
   1.585 +	{
   1.586 +	// default implementation - NOP; concrete controller may override
   1.587 +	return KErrNotSupported;
   1.588 +	}
   1.589 +
   1.590 +
   1.591 +TUint32 TDmac::HwDesNumDstElementsTransferred(const SDmaDesHdr& /*aHdr*/)
   1.592 +	{
   1.593 + 	// Concrete controller must override if SDmacCaps::iHwDescriptors set.
   1.594 +	__DMA_CANT_HAPPEN();
   1.595 +	return 0;
   1.596 +	}
   1.597 +
   1.598 +
   1.599 +TUint32 TDmac::HwDesNumSrcElementsTransferred(const SDmaDesHdr& /*aHdr*/)
   1.600 +	{
   1.601 + 	// Concrete controller must override if SDmacCaps::iHwDescriptors set.
   1.602 +	__DMA_CANT_HAPPEN();
   1.603 +	return 0;
   1.604 +	}
   1.605 +
   1.606 +
   1.607 +#ifdef _DEBUG
   1.608 +
   1.609 +void TDmac::Invariant()
   1.610 +	{
   1.611 +	Wait();
   1.612 +	__DMA_ASSERTD(0 <= iAvailDesCount && iAvailDesCount <= iMaxDesCount);
   1.613 +	__DMA_ASSERTD(! iFreeHdr || IsValidHdr(iFreeHdr));
   1.614 +	for (TInt i = 0; i < iMaxDesCount; i++)
   1.615 +		__DMA_ASSERTD(iHdrPool[i].iNext == NULL || IsValidHdr(iHdrPool[i].iNext));
   1.616 +	Signal();
   1.617 +	}
   1.618 +
   1.619 +
   1.620 +TBool TDmac::IsValidHdr(const SDmaDesHdr* aHdr)
   1.621 +	{
   1.622 +	return (iHdrPool <= aHdr) && (aHdr < iHdrPool + iMaxDesCount);
   1.623 +	}
   1.624 +
   1.625 +#endif
   1.626 +
   1.627 +
   1.628 +
   1.629 +
   1.630 +//
   1.631 +// Internal compat version, used by legacy Fragment()
   1.632 +//
   1.633 +TDmaTransferConfig::TDmaTransferConfig(TUint32 aAddr, TUint aFlags, TBool aAddrInc)
   1.634 +	: iAddr(aAddr),
   1.635 +	  iAddrMode(aAddrInc ? KDmaAddrModePostIncrement : KDmaAddrModeConstant),
   1.636 +	  iElementSize(0),
   1.637 +	  iElementsPerFrame(0),
   1.638 +	  iElementsPerPacket(0),
   1.639 +	  iFramesPerTransfer(0),
   1.640 +	  iElementSkip(0),
   1.641 +	  iFrameSkip(0),
   1.642 +	  iBurstSize(KDmaBurstSizeAny),
   1.643 +	  iFlags(aFlags),
   1.644 +	  iSyncFlags(KDmaSyncAuto),
   1.645 +	  iPslTargetInfo(0),
   1.646 +	  iRepeatCount(0),
   1.647 +	  iDelta(~0u),
   1.648 +	  iReserved(0)
   1.649 +	{
   1.650 +	}
   1.651 +
   1.652 +
   1.653 +
   1.654 +//
   1.655 +// Internal compat version, used by legacy Fragment()
   1.656 +//
   1.657 +TDmaTransferArgs::TDmaTransferArgs(TUint32 aSrc, TUint32 aDest, TInt aCount,
   1.658 +								   TUint aFlags, TUint32 aPslInfo)
   1.659 +	: iSrcConfig(aSrc, RequestFlags2SrcConfigFlags(aFlags), (aFlags & KDmaIncSrc)),
   1.660 +	  iDstConfig(aDest, RequestFlags2DstConfigFlags(aFlags), (aFlags & KDmaIncDest)),
   1.661 +	  iTransferCount(aCount),
   1.662 +	  iGraphicsOps(KDmaGraphicsOpNone),
   1.663 +	  iColour(0),
   1.664 +	  iFlags(0),
   1.665 +	  iChannelPriority(KDmaPriorityNone),
   1.666 +	  iPslRequestInfo(aPslInfo),
   1.667 +	  iDelta(~0u),
   1.668 +	  iReserved1(0),
   1.669 +	  iChannelCookie(0),
   1.670 +	  iReserved2(0)
   1.671 +	{
   1.672 +	}
   1.673 +
   1.674 +
   1.675 +//
   1.676 +// As DDmaRequest is derived from DBase, the initializations with zero aren't
   1.677 +// strictly necessary here, but this way it's nicer.
   1.678 +//
   1.679 +EXPORT_C DDmaRequest::DDmaRequest(TDmaChannel& aChannel, TCallback aCb,
   1.680 +								  TAny* aCbArg, TInt aMaxTransferSize)
   1.681 +	: iChannel(aChannel),
   1.682 +	  iCb(aCb),
   1.683 +	  iCbArg(aCbArg),
   1.684 +	  iDmaCb(NULL),
   1.685 +	  iDmaCbArg(NULL),
   1.686 +	  iIsrCb(EFalse),
   1.687 +	  iDesCount(0),
   1.688 +	  iFirstHdr(NULL),
   1.689 +	  iLastHdr(NULL),
   1.690 +	  iSrcDesCount(0),
   1.691 +	  iSrcFirstHdr(NULL),
   1.692 +	  iSrcLastHdr(NULL),
   1.693 +	  iDstDesCount(0),
   1.694 +	  iDstFirstHdr(NULL),
   1.695 +	  iDstLastHdr(NULL),
   1.696 +	  iQueued(EFalse),
   1.697 +	  iMaxTransferSize(aMaxTransferSize),
   1.698 +	  iTotalNumSrcElementsTransferred(0),
   1.699 +	  iTotalNumDstElementsTransferred(0)
   1.700 +	{
   1.701 +	iChannel.iReqCount++;
   1.702 +	__DMA_ASSERTD(0 <= aMaxTransferSize);
   1.703 +	__DMA_INVARIANT();
   1.704 +	}
   1.705 +
   1.706 +
   1.707 +//
   1.708 +// As DDmaRequest is derived from DBase, the initializations with zero aren't
   1.709 +// strictly necessary here, but this way it's nicer.
   1.710 +//
   1.711 +EXPORT_C DDmaRequest::DDmaRequest(TDmaChannel& aChannel, TDmaCallback aDmaCb,
   1.712 +								  TAny* aCbArg, TUint aMaxTransferSize)
   1.713 +	: iChannel(aChannel),
   1.714 +	  iCb(NULL),
   1.715 +	  iCbArg(NULL),
   1.716 +	  iDmaCb(aDmaCb),
   1.717 +	  iDmaCbArg(aCbArg),
   1.718 +	  iIsrCb(EFalse),
   1.719 +	  iDesCount(0),
   1.720 +	  iFirstHdr(NULL),
   1.721 +	  iLastHdr(NULL),
   1.722 +	  iSrcDesCount(0),
   1.723 +	  iSrcFirstHdr(NULL),
   1.724 +	  iSrcLastHdr(NULL),
   1.725 +	  iDstDesCount(0),
   1.726 +	  iDstFirstHdr(NULL),
   1.727 +	  iDstLastHdr(NULL),
   1.728 +	  iQueued(EFalse),
   1.729 +	  iMaxTransferSize(aMaxTransferSize),
   1.730 +	  iTotalNumSrcElementsTransferred(0),
   1.731 +	  iTotalNumDstElementsTransferred(0)
   1.732 +	{
   1.733 +	__e32_atomic_add_ord32(&iChannel.iReqCount, 1);
   1.734 +	__DMA_INVARIANT();
   1.735 +	}
   1.736 +
   1.737 +
   1.738 +EXPORT_C DDmaRequest::~DDmaRequest()
   1.739 +	{
   1.740 +	__DMA_ASSERTD(!iQueued);
   1.741 +	__DMA_INVARIANT();
   1.742 +	FreeDesList();
   1.743 +	__e32_atomic_add_ord32(&iChannel.iReqCount, TUint32(-1));
   1.744 +	}
   1.745 +
   1.746 +
   1.747 +EXPORT_C TInt DDmaRequest::Fragment(TUint32 aSrc, TUint32 aDest, TInt aCount,
   1.748 +									TUint aFlags, TUint32 aPslInfo)
   1.749 +	{
   1.750 +	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::Fragment thread %O "
   1.751 +									"src=0x%08X dest=0x%08X count=%d flags=0x%X psl=0x%08X",
   1.752 +									&Kern::CurrentThread(), aSrc, aDest, aCount, aFlags, aPslInfo));
   1.753 +	__DMA_ASSERTD(aCount > 0);
   1.754 +
   1.755 +	TDmaTransferArgs args(aSrc, aDest, aCount, aFlags, aPslInfo);
   1.756 +
   1.757 +	return Frag(args);
   1.758 +	}
   1.759 +
   1.760 +
   1.761 +EXPORT_C TInt DDmaRequest::Fragment(const TDmaTransferArgs& aTransferArgs)
   1.762 +	{
   1.763 +	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::Fragment thread %O", &Kern::CurrentThread()));
   1.764 +
   1.765 +	// Writable temporary working copy of the transfer arguments.
   1.766 +	// We need this because we may have to modify some fields before passing it
   1.767 +	// to the PSL (for example iChannelCookie, iTransferCount,
   1.768 +	// iDstConfig::iAddr, and iSrcConfig::iAddr).
   1.769 +	TDmaTransferArgs args(aTransferArgs);
   1.770 +
   1.771 +	return Frag(args);
   1.772 +	}
   1.773 +
   1.774 +
   1.775 +TUint DDmaRequest::GetTransferCount(const TDmaTransferArgs& aTransferArgs)
   1.776 +	{
   1.777 +	const TDmaTransferConfig& src = aTransferArgs.iSrcConfig;
   1.778 +	const TDmaTransferConfig& dst = aTransferArgs.iDstConfig;
   1.779 +
   1.780 +	TUint count = aTransferArgs.iTransferCount;
   1.781 +	if (count == 0)
   1.782 +		{
   1.783 +		__KTRACE_OPT(KDMA, Kern::Printf("iTransferCount == 0"));
   1.784 +		count = src.iElementSize * src.iElementsPerFrame *
   1.785 +			src.iFramesPerTransfer;
   1.786 +		const TUint dst_cnt = dst.iElementSize * dst.iElementsPerFrame *
   1.787 +			dst.iFramesPerTransfer;
   1.788 +		if (count != dst_cnt)
   1.789 +			{
   1.790 +			__KTRACE_OPT(KPANIC, Kern::Printf("Error: (count != dst_cnt)"));
   1.791 +			return 0;
   1.792 +			}
   1.793 +		}
   1.794 +	else
   1.795 +		{
   1.796 +		__KTRACE_OPT(KDMA, Kern::Printf("iTransferCount == %d", count));
   1.797 +		// Client shouldn't specify contradictory or incomplete things
   1.798 +		if (src.iElementSize != 0)
   1.799 +			{
   1.800 +			if ((count % src.iElementSize) != 0)
   1.801 +				{
   1.802 +				__KTRACE_OPT(KPANIC,
   1.803 +							 Kern::Printf("Error: ((count %% src.iElementSize) != 0)"));
   1.804 +				return 0;
   1.805 +				}
   1.806 +			if (src.iElementsPerFrame != 0)
   1.807 +				{
   1.808 +				if ((src.iElementSize * src.iElementsPerFrame * src.iFramesPerTransfer) != count)
   1.809 +					{
   1.810 +					__KTRACE_OPT(KPANIC,
   1.811 +								 Kern::Printf("Error: ((src.iElementSize * "
   1.812 +											  "src.iElementsPerFrame * "
   1.813 +											  "src.iFramesPerTransfer) != count)"));
   1.814 +					return 0;
   1.815 +					}
   1.816 +				}
   1.817 +			}
   1.818 +		else
   1.819 +			{
   1.820 +			if (src.iElementsPerFrame != 0)
   1.821 +				{
   1.822 +				__KTRACE_OPT(KPANIC,
   1.823 +							 Kern::Printf("Error: (src.iElementsPerFrame != 0)"));
   1.824 +				return 0;
   1.825 +				}
   1.826 +			if (src.iFramesPerTransfer != 0)
   1.827 +				{
   1.828 +				__KTRACE_OPT(KPANIC,
   1.829 +							 Kern::Printf("Error: (src.iFramesPerTransfer != 0)"));
   1.830 +				return 0;
   1.831 +				}
   1.832 +			if (src.iElementsPerPacket != 0)
   1.833 +				{
   1.834 +				__KTRACE_OPT(KPANIC,
   1.835 +							 Kern::Printf("Error: (src.iElementsPerPacket != 0)"));
   1.836 +				return 0;
   1.837 +				}
   1.838 +			}
   1.839 +		if (dst.iElementSize != 0)
   1.840 +			{
   1.841 +			if ((count % dst.iElementSize) != 0)
   1.842 +				{
   1.843 +				__KTRACE_OPT(KPANIC,
   1.844 +							 Kern::Printf("Error: ((count %% dst.iElementSize) != 0)"));
   1.845 +				return 0;
   1.846 +				}
   1.847 +			if (dst.iElementsPerFrame != 0)
   1.848 +				{
   1.849 +				if ((dst.iElementSize * dst.iElementsPerFrame * dst.iFramesPerTransfer) != count)
   1.850 +					{
   1.851 +					__KTRACE_OPT(KPANIC,
   1.852 +								 Kern::Printf("Error: ((dst.iElementSize * "
   1.853 +											  "dst.iElementsPerFrame * "
   1.854 +											  "dst.iFramesPerTransfer) != count)"));
   1.855 +					return 0;
   1.856 +					}
   1.857 +				}
   1.858 +			}
   1.859 +		else
   1.860 +			{
   1.861 +			if (dst.iElementsPerFrame != 0)
   1.862 +				{
   1.863 +				__KTRACE_OPT(KPANIC,
   1.864 +							 Kern::Printf("Error: (dst.iElementsPerFrame != 0)"));
   1.865 +				return 0;
   1.866 +				}
   1.867 +			if (dst.iFramesPerTransfer != 0)
   1.868 +				{
   1.869 +				__KTRACE_OPT(KPANIC,
   1.870 +							 Kern::Printf("Error: (dst.iFramesPerTransfer != 0)"));
   1.871 +				return 0;
   1.872 +				}
   1.873 +			if (dst.iElementsPerPacket != 0)
   1.874 +				{
   1.875 +				__KTRACE_OPT(KPANIC,
   1.876 +							 Kern::Printf("Error: (dst.iElementsPerPacket != 0)"));
   1.877 +				return 0;
   1.878 +				}
   1.879 +			}
   1.880 +		}
   1.881 +	return count;
   1.882 +	}
   1.883 +
   1.884 +
   1.885 +TInt DDmaRequest::Frag(TDmaTransferArgs& aTransferArgs)
   1.886 +	{
   1.887 +	__DMA_ASSERTD(!iQueued);
   1.888 +
   1.889 +	// Transfer count checks
   1.890 +	const TUint count = GetTransferCount(aTransferArgs);
   1.891 +	if (count == 0)
   1.892 +		{
   1.893 +		return KErrArgument;
   1.894 +		}
   1.895 +
   1.896 +	const TDmaTransferConfig& src = aTransferArgs.iSrcConfig;
   1.897 +	const TDmaTransferConfig& dst = aTransferArgs.iDstConfig;
   1.898 +
   1.899 +	// Ask the PSL what the maximum length possible for this transfer is
   1.900 +	TUint max_xfer_len = iChannel.MaxTransferLength(src.iFlags, dst.iFlags,
   1.901 +													aTransferArgs.iPslRequestInfo);
   1.902 +	if (iMaxTransferSize)
   1.903 +		{
   1.904 +		// User has set a size cap
   1.905 +		__KTRACE_OPT(KDMA, Kern::Printf("iMaxTransferSize != 0"));
   1.906 +		__DMA_ASSERTA((iMaxTransferSize <= max_xfer_len) || (max_xfer_len == 0));
   1.907 +		max_xfer_len = iMaxTransferSize;
   1.908 +		}
   1.909 +	else
   1.910 +		{
   1.911 +		// User doesn't care about max size
   1.912 +		if (max_xfer_len == 0)
   1.913 +			{
   1.914 +			// No maximum imposed by controller
   1.915 +			max_xfer_len = count;
   1.916 +			}
   1.917 +		}
   1.918 +
   1.919 +	// ISR callback requested?
   1.920 +	const TBool isr_cb = (aTransferArgs.iFlags & KDmaRequestCallbackFromIsr);
   1.921 +	if (isr_cb)
   1.922 +		{
   1.923 +		// Requesting an ISR callback w/o supplying one?
   1.924 +		if (!iDmaCb)
   1.925 +			{
   1.926 +			return KErrArgument;
   1.927 +			}
   1.928 +		}
   1.929 +
   1.930 +	// Set the channel cookie for the PSL
   1.931 +	aTransferArgs.iChannelCookie = iChannel.PslId();
   1.932 +
   1.933 +	// Now the actual fragmentation
   1.934 +	TInt r;
   1.935 +	if (iChannel.iDmacCaps->iAsymHwDescriptors)
   1.936 +		{
   1.937 +		r = FragAsym(aTransferArgs, count, max_xfer_len);
   1.938 +		}
   1.939 +	else
   1.940 +		{
   1.941 +		r = FragSym(aTransferArgs, count, max_xfer_len);
   1.942 +		}
   1.943 +
   1.944 +	if (r == KErrNone)
   1.945 +		{
   1.946 +		iIsrCb = isr_cb;
   1.947 +		}
   1.948 +
   1.949 +	__DMA_INVARIANT();
   1.950 +	return r;
   1.951 +	};
   1.952 +
   1.953 +
   1.954 +TInt DDmaRequest::FragSym(TDmaTransferArgs& aTransferArgs, TUint aCount,
   1.955 +						  TUint aMaxTransferLen)
   1.956 +	{
   1.957 +	TDmaTransferConfig& src = aTransferArgs.iSrcConfig;
   1.958 +	TDmaTransferConfig& dst = aTransferArgs.iDstConfig;
   1.959 +
   1.960 +	const TBool mem_src = (src.iFlags & KDmaMemAddr);
   1.961 +	const TBool mem_dst = (dst.iFlags & KDmaMemAddr);
   1.962 +
   1.963 +	const TUint align_mask_src = iChannel.AddressAlignMask(src.iFlags,
   1.964 +														   src.iElementSize,
   1.965 +														   aTransferArgs.iPslRequestInfo);
   1.966 +	const TUint align_mask_dst = iChannel.AddressAlignMask(dst.iFlags,
   1.967 +														   dst.iElementSize,
   1.968 +														   aTransferArgs.iPslRequestInfo);
   1.969 +	// Memory buffers must satisfy alignment constraint
   1.970 +	__DMA_ASSERTD(!mem_src || ((src.iAddr & align_mask_src) == 0));
   1.971 +	__DMA_ASSERTD(!mem_dst || ((dst.iAddr & align_mask_dst) == 0));
   1.972 +
   1.973 +	const TUint max_aligned_len = (aMaxTransferLen &
   1.974 +								   ~(Max(align_mask_src, align_mask_dst)));
   1.975 +	// Client and PSL sane?
   1.976 +	__DMA_ASSERTD(max_aligned_len > 0);
   1.977 +
   1.978 +	FreeDesList();			   // revert any previous fragmentation attempt
   1.979 +	TInt r;
   1.980 +	do
   1.981 +		{
   1.982 +		// Allocate fragment
   1.983 +		r = ExpandDesList(/*1*/);
   1.984 +		if (r != KErrNone)
   1.985 +			{
   1.986 +			FreeDesList();
   1.987 +			break;
   1.988 +			}
   1.989 +		// Compute fragment size
   1.990 +		TUint c = Min(aMaxTransferLen, aCount);
   1.991 +		if (mem_src && !(src.iFlags & KDmaPhysAddr))
   1.992 +			{
   1.993 +			__KTRACE_OPT(KDMA, Kern::Printf("mem_src && !(src.iFlags & KDmaPhysAddr)"));
   1.994 +			// @@@ Should also take into account (src.iFlags & KDmaMemIsContiguous)!
   1.995 +			c = MaxPhysSize(src.iAddr, c);
   1.996 +			}
   1.997 +		if (mem_dst && !(dst.iFlags & KDmaPhysAddr))
   1.998 +			{
   1.999 +			__KTRACE_OPT(KDMA, Kern::Printf("mem_dst && !(dst.iFlags & KDmaPhysAddr)"));
  1.1000 +			// @@@ Should also take into account (dst.iFlags & KDmaMemIsContiguous)!
  1.1001 +			c = MaxPhysSize(dst.iAddr, c);
  1.1002 +			}
  1.1003 +		if ((mem_src || mem_dst) && (c < aCount) && (c > max_aligned_len))
  1.1004 +			{
  1.1005 +			// This is not the last fragment of a transfer to/from memory.
  1.1006 +			// We must round down the fragment size so the next one is
  1.1007 +			// correctly aligned.
  1.1008 +			__KTRACE_OPT(KDMA, Kern::Printf("(mem_src || mem_dst) && (c < aCount) && (c > max_aligned_len)"));
  1.1009 +			c = max_aligned_len;
  1.1010 +			}
  1.1011 +
  1.1012 +		// TODO: Make sure an element or frame on neither src or dst side
  1.1013 +		// (which can be of different sizes) never straddles a DMA subtransfer.
  1.1014 +		// (This would be a fragmentation error by the PIL.)
  1.1015 +
  1.1016 +		// Set transfer count for the PSL
  1.1017 +		aTransferArgs.iTransferCount = c;
  1.1018 +		__KTRACE_OPT(KDMA, Kern::Printf("this fragm.: %d (0x%x) total remain.: %d (0x%x)",
  1.1019 +										c, c, aCount, aCount));
  1.1020 +		// Initialise fragment
  1.1021 +		r = iChannel.iController->InitDes(*iLastHdr, aTransferArgs);
  1.1022 +		if (r != KErrNone)
  1.1023 +			{
  1.1024 +			FreeDesList();
  1.1025 +			break;
  1.1026 +			}
  1.1027 +		// Update for next iteration
  1.1028 +		aCount -= c;
  1.1029 +		if (mem_src)
  1.1030 +			src.iAddr += c;
  1.1031 +		if (mem_dst)
  1.1032 +			dst.iAddr += c;
  1.1033 +		}
  1.1034 +	while (aCount > 0);
  1.1035 +
  1.1036 +	return r;
  1.1037 +	}
  1.1038 +
  1.1039 +
  1.1040 +TInt DDmaRequest::FragAsym(TDmaTransferArgs& aTransferArgs, TUint aCount,
  1.1041 +						   TUint aMaxTransferLen)
  1.1042 +	{
  1.1043 +	TInt r = FragAsymSrc(aTransferArgs, aCount, aMaxTransferLen);
  1.1044 +	if (r != KErrNone)
  1.1045 +		{
  1.1046 +		FreeSrcDesList();
  1.1047 +		return r;
  1.1048 +		}
  1.1049 +	r = FragAsymDst(aTransferArgs, aCount, aMaxTransferLen);
  1.1050 +	if (r != KErrNone)
  1.1051 +		{
  1.1052 +		FreeSrcDesList();
  1.1053 +		FreeDstDesList();
  1.1054 +		}
  1.1055 +	return r;
  1.1056 +	}
  1.1057 +
  1.1058 +
  1.1059 +TInt DDmaRequest::FragAsymSrc(TDmaTransferArgs& aTransferArgs, TUint aCount,
  1.1060 +							  TUint aMaxTransferLen)
  1.1061 +	{
  1.1062 +	TDmaTransferConfig& src = aTransferArgs.iSrcConfig;
  1.1063 +
  1.1064 +	const TBool mem_src = (src.iFlags & KDmaMemAddr);
  1.1065 +
  1.1066 +	const TUint align_mask = iChannel.AddressAlignMask(src.iFlags,
  1.1067 +													   src.iElementSize,
  1.1068 +													   aTransferArgs.iPslRequestInfo);
  1.1069 +	// Memory buffers must satisfy alignment constraint
  1.1070 +	__DMA_ASSERTD(!mem_src || ((src.iAddr & align_mask) == 0));
  1.1071 +
  1.1072 +	const TUint max_aligned_len = (aMaxTransferLen & ~align_mask);
  1.1073 +	__DMA_ASSERTD(max_aligned_len > 0);				  // bug in PSL if not true
  1.1074 +
  1.1075 +	FreeSrcDesList();
  1.1076 +	TInt r;
  1.1077 +	do
  1.1078 +		{
  1.1079 +		// Allocate fragment
  1.1080 +		r = ExpandSrcDesList(/*1*/);
  1.1081 +		if (r != KErrNone)
  1.1082 +			{
  1.1083 +			break;
  1.1084 +			}
  1.1085 +		// Compute fragment size
  1.1086 +		TUint c = Min(aMaxTransferLen, aCount);
  1.1087 +		if (mem_src && !(src.iFlags & KDmaPhysAddr))
  1.1088 +			{
  1.1089 +			__KTRACE_OPT(KDMA, Kern::Printf("mem_src && !(src.iFlags & KDmaPhysAddr)"));
  1.1090 +			c = MaxPhysSize(src.iAddr, c);
  1.1091 +			}
  1.1092 +		if (mem_src && (c < aCount) && (c > max_aligned_len))
  1.1093 +			{
  1.1094 +			// This is not the last fragment of a transfer from memory.
  1.1095 +			// We must round down the fragment size so the next one is
  1.1096 +			// correctly aligned.
  1.1097 +			__KTRACE_OPT(KDMA, Kern::Printf("mem_src && (c < aCount) && (c > max_aligned_len)"));
  1.1098 +			c = max_aligned_len;
  1.1099 +			}
  1.1100 +		// Set transfer count for the PSL
  1.1101 +		aTransferArgs.iTransferCount = c;
  1.1102 +		__KTRACE_OPT(KDMA, Kern::Printf("this fragm.: %d (0x%x) total remain.: %d (0x%x)",
  1.1103 +										c, c, aCount, aCount));
  1.1104 +		// Initialise fragment
  1.1105 +		r = iChannel.iController->InitSrcHwDes(*iSrcLastHdr, aTransferArgs);
  1.1106 +		if (r != KErrNone)
  1.1107 +			{
  1.1108 +			break;
  1.1109 +			}
  1.1110 +		// Update for next iteration
  1.1111 +		aCount -= c;
  1.1112 +		if (mem_src)
  1.1113 +			src.iAddr += c;
  1.1114 +		}
  1.1115 +	while (aCount > 0);
  1.1116 +
  1.1117 +	return r;
  1.1118 +	}
  1.1119 +
  1.1120 +
  1.1121 +TInt DDmaRequest::FragAsymDst(TDmaTransferArgs& aTransferArgs, TUint aCount,
  1.1122 +							  TUint aMaxTransferLen)
  1.1123 +	{
  1.1124 +	TDmaTransferConfig& dst = aTransferArgs.iDstConfig;
  1.1125 +
  1.1126 +	const TBool mem_dst = (dst.iFlags & KDmaMemAddr);
  1.1127 +
  1.1128 +	const TUint align_mask = iChannel.AddressAlignMask(dst.iFlags,
  1.1129 +													   dst.iElementSize,
  1.1130 +													   aTransferArgs.iPslRequestInfo);
  1.1131 +	// Memory buffers must satisfy alignment constraint
  1.1132 +	__DMA_ASSERTD(!mem_dst || ((dst.iAddr & align_mask) == 0));
  1.1133 +
  1.1134 +	const TUint max_aligned_len = (aMaxTransferLen & ~align_mask);
  1.1135 +	__DMA_ASSERTD(max_aligned_len > 0);				  // bug in PSL if not true
  1.1136 +
  1.1137 +	FreeDstDesList();
  1.1138 +	TInt r;
  1.1139 +	do
  1.1140 +		{
  1.1141 +		// Allocate fragment
  1.1142 +		r = ExpandDstDesList(/*1*/);
  1.1143 +		if (r != KErrNone)
  1.1144 +			{
  1.1145 +			break;
  1.1146 +			}
  1.1147 +		// Compute fragment size
  1.1148 +		TUint c = Min(aMaxTransferLen, aCount);
  1.1149 +		if (mem_dst && !(dst.iFlags & KDmaPhysAddr))
  1.1150 +			{
  1.1151 +			__KTRACE_OPT(KDMA, Kern::Printf("mem_dst && !(dst.iFlags & KDmaPhysAddr)"));
  1.1152 +			c = MaxPhysSize(dst.iAddr, c);
  1.1153 +			}
  1.1154 +		if (mem_dst && (c < aCount) && (c > max_aligned_len))
  1.1155 +			{
  1.1156 +			// This is not the last fragment of a transfer to memory.
  1.1157 +			// We must round down the fragment size so the next one is
  1.1158 +			// correctly aligned.
  1.1159 +			__KTRACE_OPT(KDMA, Kern::Printf("mem_dst && (c < aCount) && (c > max_aligned_len)"));
  1.1160 +			c = max_aligned_len;
  1.1161 +			}
  1.1162 +		// Set transfer count for the PSL
  1.1163 +		aTransferArgs.iTransferCount = c;
  1.1164 +		__KTRACE_OPT(KDMA, Kern::Printf("this fragm.: %d (0x%x) total remain.: %d (0x%x)",
  1.1165 +										c, c, aCount, aCount));
  1.1166 +		// Initialise fragment
  1.1167 +		r = iChannel.iController->InitDstHwDes(*iDstLastHdr, aTransferArgs);
  1.1168 +		if (r != KErrNone)
  1.1169 +			{
  1.1170 +			break;
  1.1171 +			}
  1.1172 +		// Update for next iteration
  1.1173 +		aCount -= c;
  1.1174 +		if (mem_dst)
  1.1175 +			dst.iAddr += c;
  1.1176 +		}
  1.1177 +	while (aCount > 0);
  1.1178 +
  1.1179 +	return r;
  1.1180 +	}
  1.1181 +
  1.1182 +
  1.1183 +EXPORT_C TInt DDmaRequest::Queue()
  1.1184 +	{
  1.1185 +	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::Queue thread %O", &Kern::CurrentThread()));
  1.1186 +	__DMA_ASSERTD(iDesCount > 0);	// Not configured? Call Fragment() first!
  1.1187 +	__DMA_ASSERTD(!iQueued);
  1.1188 +
  1.1189 +	// Append request to queue and link new descriptor list to existing one.
  1.1190 +	iChannel.Wait();
  1.1191 +
  1.1192 +	TInt r = KErrGeneral;
  1.1193 +	const TBool ch_isr_cb = __e32_atomic_load_acq32(&iChannel.iIsrCbRequest);
  1.1194 +	if (ch_isr_cb)
  1.1195 +		{
  1.1196 +		// Client mustn't try to queue any new request while one with an ISR
  1.1197 +		// callback is already queued on this channel. This is to make sure
  1.1198 +		// that the channel's Transfer() function is not called by both the ISR
  1.1199 +		// and the client thread at the same time.
  1.1200 +		__KTRACE_OPT(KPANIC, Kern::Printf("An ISR cb request exists - not queueing"));
  1.1201 +		}
  1.1202 +	else if (iIsrCb && !iChannel.IsQueueEmpty())
  1.1203 +		{
  1.1204 +		// Client mustn't try to queue an ISR callback request whilst any
  1.1205 +		// others are still queued on this channel. This is to make sure that
  1.1206 +		// the ISR callback doesn't get executed together with the DFC(s) of
  1.1207 +		// any previous request(s).
  1.1208 +		__KTRACE_OPT(KPANIC, Kern::Printf("Request queue not empty - not queueing"));
  1.1209 +		}
  1.1210 +	else if (iChannel.iIsrDfc & (TUint32)TDmaChannel::KCancelFlagMask)
  1.1211 +		{
  1.1212 +		__KTRACE_OPT(KPANIC, Kern::Printf("Channel requests cancelled - not queueing"));
  1.1213 +		}
  1.1214 +	else
  1.1215 +		{
  1.1216 +		iQueued = ETrue;
  1.1217 +		iChannel.iReqQ.Add(&iLink);
  1.1218 +		// iChannel.iNullPtr points to iChannel.iCurHdr for an empty queue
  1.1219 +		*iChannel.iNullPtr = iFirstHdr;
  1.1220 +		iChannel.iNullPtr = &(iLastHdr->iNext);
  1.1221 +		if (iIsrCb)
  1.1222 +			{
  1.1223 +			// Since we've made sure that there is no other request in the
  1.1224 +			// queue before this, the only thing of relevance is the channel
  1.1225 +			// DFC which might yet have to complete for the previous request,
  1.1226 +			// and this function might indeed have been called from there via
  1.1227 +			// the client callback. This should be all right though as once
  1.1228 +			// we've set the following flag no further Queue()'s will be
  1.1229 +			// possible.
  1.1230 +			__e32_atomic_store_rel32(&iChannel.iIsrCbRequest, ETrue);
  1.1231 +			}
  1.1232 +		iChannel.DoQueue(const_cast<const DDmaRequest&>(*this));
  1.1233 +		r = KErrNone;
  1.1234 +		}
  1.1235 +	iChannel.Signal();
  1.1236 +
  1.1237 +	__DMA_INVARIANT();
  1.1238 +	return r;
  1.1239 +	}
  1.1240 +
  1.1241 +
  1.1242 +EXPORT_C TInt DDmaRequest::ExpandDesList(TInt aCount)
  1.1243 +	{
  1.1244 +	return ExpandDesList(aCount, iDesCount, iFirstHdr, iLastHdr);
  1.1245 +	}
  1.1246 +
  1.1247 +
  1.1248 +EXPORT_C TInt DDmaRequest::ExpandSrcDesList(TInt aCount)
  1.1249 +	{
  1.1250 +	return ExpandDesList(aCount, iSrcDesCount, iSrcFirstHdr, iSrcLastHdr);
  1.1251 +	}
  1.1252 +
  1.1253 +
  1.1254 +EXPORT_C TInt DDmaRequest::ExpandDstDesList(TInt aCount)
  1.1255 +	{
  1.1256 +	return ExpandDesList(aCount, iDstDesCount, iDstFirstHdr, iDstLastHdr);
  1.1257 +	}
  1.1258 +
  1.1259 +
  1.1260 +TInt DDmaRequest::ExpandDesList(TInt aCount, TInt& aDesCount,
  1.1261 +								SDmaDesHdr*& aFirstHdr,
  1.1262 +								SDmaDesHdr*& aLastHdr)
  1.1263 +	{
  1.1264 +	__DMA_ASSERTD(!iQueued);
  1.1265 +	__DMA_ASSERTD(aCount > 0);
  1.1266 +
  1.1267 +	if (aCount > iChannel.iAvailDesCount)
  1.1268 +		{
  1.1269 +		return KErrTooBig;
  1.1270 +		}
  1.1271 +
  1.1272 +	iChannel.iAvailDesCount -= aCount;
  1.1273 +	aDesCount += aCount;
  1.1274 +
  1.1275 +	TDmac& c = *(iChannel.iController);
  1.1276 +	c.Wait();
  1.1277 +
  1.1278 +	if (aFirstHdr == NULL)
  1.1279 +		{
  1.1280 +		// Handle an empty list specially to simplify the following loop
  1.1281 +		aFirstHdr = aLastHdr = c.iFreeHdr;
  1.1282 +		c.iFreeHdr = c.iFreeHdr->iNext;
  1.1283 +		--aCount;
  1.1284 +		}
  1.1285 +	else
  1.1286 +		{
  1.1287 +		aLastHdr->iNext = c.iFreeHdr;
  1.1288 +		}
  1.1289 +
  1.1290 +	// Remove as many descriptors and headers from the free pool as necessary
  1.1291 +	// and ensure hardware descriptors are chained together.
  1.1292 +	while (aCount-- > 0)
  1.1293 +		{
  1.1294 +		__DMA_ASSERTD(c.iFreeHdr != NULL);
  1.1295 +		if (c.iCapsHwDes)
  1.1296 +			{
  1.1297 +			c.ChainHwDes(*aLastHdr, *(c.iFreeHdr));
  1.1298 +			}
  1.1299 +		aLastHdr = c.iFreeHdr;
  1.1300 +		c.iFreeHdr = c.iFreeHdr->iNext;
  1.1301 +		}
  1.1302 +
  1.1303 +	c.Signal();
  1.1304 +
  1.1305 +	aLastHdr->iNext = NULL;
  1.1306 +
  1.1307 +	__DMA_INVARIANT();
  1.1308 +	return KErrNone;
  1.1309 +	}
  1.1310 +
  1.1311 +
  1.1312 +EXPORT_C void DDmaRequest::FreeDesList()
  1.1313 +	{
  1.1314 +	FreeDesList(iDesCount, iFirstHdr, iLastHdr);
  1.1315 +	}
  1.1316 +
  1.1317 +
  1.1318 +EXPORT_C void DDmaRequest::FreeSrcDesList()
  1.1319 +	{
  1.1320 +	FreeDesList(iSrcDesCount, iSrcFirstHdr, iSrcLastHdr);
  1.1321 +	}
  1.1322 +
  1.1323 +
  1.1324 +EXPORT_C void DDmaRequest::FreeDstDesList()
  1.1325 +	{
  1.1326 +	FreeDesList(iDstDesCount, iDstFirstHdr, iDstLastHdr);
  1.1327 +	}
  1.1328 +
  1.1329 +
  1.1330 +void DDmaRequest::FreeDesList(TInt& aDesCount, SDmaDesHdr*& aFirstHdr, SDmaDesHdr*& aLastHdr)
  1.1331 +	{
  1.1332 +	__DMA_ASSERTD(!iQueued);
  1.1333 +
  1.1334 +	if (aDesCount > 0)
  1.1335 +		{
  1.1336 +		iChannel.iAvailDesCount += aDesCount;
  1.1337 +		TDmac& c = *(iChannel.iController);
  1.1338 +		const SDmaDesHdr* hdr = aFirstHdr;
  1.1339 +		while (hdr)
  1.1340 +			{
  1.1341 +			c.ClearHwDes(*hdr);
  1.1342 +			hdr = hdr->iNext;
  1.1343 +			};
  1.1344 +		c.Wait();
  1.1345 +		aLastHdr->iNext = c.iFreeHdr;
  1.1346 +		c.iFreeHdr = aFirstHdr;
  1.1347 +		c.Signal();
  1.1348 +		aFirstHdr = aLastHdr = NULL;
  1.1349 +		aDesCount = 0;
  1.1350 +		}
  1.1351 +	}
  1.1352 +
  1.1353 +
  1.1354 +EXPORT_C void DDmaRequest::EnableSrcElementCounting(TBool /*aResetElementCount*/)
  1.1355 +	{
  1.1356 +	// Not yet implemented.
  1.1357 +	return;
  1.1358 +	}
  1.1359 +
  1.1360 +
  1.1361 +EXPORT_C void DDmaRequest::EnableDstElementCounting(TBool /*aResetElementCount*/)
  1.1362 +	{
  1.1363 +	// Not yet implemented.
  1.1364 +	return;
  1.1365 +	}
  1.1366 +
  1.1367 +
  1.1368 +EXPORT_C void DDmaRequest::DisableSrcElementCounting()
  1.1369 +	{
  1.1370 +	// Not yet implemented.
  1.1371 +	return;
  1.1372 +	}
  1.1373 +
  1.1374 +
  1.1375 +EXPORT_C void DDmaRequest::DisableDstElementCounting()
  1.1376 +	{
  1.1377 +	// Not yet implemented.
  1.1378 +	return;
  1.1379 +	}
  1.1380 +
  1.1381 +
  1.1382 +EXPORT_C TUint32 DDmaRequest::TotalNumSrcElementsTransferred()
  1.1383 +	{
  1.1384 +	// Not yet implemented.
  1.1385 +
  1.1386 +	// So far largely bogus code (just to touch some symbols)...
  1.1387 +	iTotalNumSrcElementsTransferred = 0;
  1.1388 +	TDmac& c = *(iChannel.iController);
  1.1389 +	if (c.iCapsHwDes)
  1.1390 +		{
  1.1391 +		for (const SDmaDesHdr* pH = iFirstHdr; pH != NULL; pH = pH->iNext)
  1.1392 +			{
  1.1393 +			iTotalNumSrcElementsTransferred += c.HwDesNumDstElementsTransferred(*pH);
  1.1394 +			}
  1.1395 +		}
  1.1396 +	else
  1.1397 +		{
  1.1398 +		// Do something different for pseudo descriptors...
  1.1399 +		}
  1.1400 +	return iTotalNumSrcElementsTransferred;
  1.1401 +	}
  1.1402 +
  1.1403 +
  1.1404 +EXPORT_C TUint32 DDmaRequest::TotalNumDstElementsTransferred()
  1.1405 +	{
  1.1406 +	// Not yet implemented.
  1.1407 +	return iTotalNumDstElementsTransferred;
  1.1408 +	}
  1.1409 +
  1.1410 +
  1.1411 +EXPORT_C TInt DDmaRequest::FragmentCount()
  1.1412 +	{
  1.1413 +	return FragmentCount(iFirstHdr);
  1.1414 +	}
  1.1415 +
  1.1416 +
  1.1417 +EXPORT_C TInt DDmaRequest::SrcFragmentCount()
  1.1418 +	{
  1.1419 +	return FragmentCount(iSrcFirstHdr);
  1.1420 +	}
  1.1421 +
  1.1422 +
  1.1423 +EXPORT_C TInt DDmaRequest::DstFragmentCount()
  1.1424 +	{
  1.1425 +	return FragmentCount(iDstFirstHdr);
  1.1426 +	}
  1.1427 +
  1.1428 +
  1.1429 +TInt DDmaRequest::FragmentCount(const SDmaDesHdr* aHdr)
  1.1430 +	{
  1.1431 +	TInt count = 0;
  1.1432 +	for (const SDmaDesHdr* pH = aHdr; pH != NULL; pH = pH->iNext)
  1.1433 +		{
  1.1434 +		count++;
  1.1435 +		}
  1.1436 +	return count;
  1.1437 +	}
  1.1438 +
  1.1439 +
  1.1440 +//
  1.1441 +// Called when request is removed from request queue in channel
  1.1442 +//
  1.1443 +inline void DDmaRequest::OnDeque()
  1.1444 +	{
  1.1445 +	iQueued = EFalse;
  1.1446 +	iLastHdr->iNext = NULL;
  1.1447 +	iChannel.DoUnlink(*iLastHdr);
  1.1448 +	}
  1.1449 +
  1.1450 +
  1.1451 +#ifdef _DEBUG
  1.1452 +void DDmaRequest::Invariant()
  1.1453 +	{
  1.1454 +	iChannel.Wait();
  1.1455 +	__DMA_ASSERTD(LOGICAL_XOR(iCb, iDmaCb));
  1.1456 +	if (iChannel.iDmacCaps->iAsymHwDescriptors)
  1.1457 +		{
  1.1458 +		__DMA_ASSERTD((0 <= iSrcDesCount) && (iSrcDesCount <= iChannel.iMaxDesCount) &&
  1.1459 +					  (0 <= iDstDesCount) && (iDstDesCount <= iChannel.iMaxDesCount));
  1.1460 +		if (iSrcDesCount == 0)
  1.1461 +			{
  1.1462 +			__DMA_ASSERTD(iDstDesCount == 0);
  1.1463 +			__DMA_ASSERTD(!iQueued);
  1.1464 +			__DMA_ASSERTD(!iSrcFirstHdr && !iSrcLastHdr &&
  1.1465 +						  !iDstFirstHdr && !iDstLastHdr);
  1.1466 +			}
  1.1467 +		else
  1.1468 +			{
  1.1469 +			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iSrcFirstHdr));
  1.1470 +			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iSrcLastHdr));
  1.1471 +			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iDstFirstHdr));
  1.1472 +			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iDstLastHdr));
  1.1473 +			}
  1.1474 +		}
  1.1475 +	else
  1.1476 +		{
  1.1477 +		__DMA_ASSERTD((0 <= iDesCount) && (iDesCount <= iChannel.iMaxDesCount));
  1.1478 +		if (iDesCount == 0)
  1.1479 +			{
  1.1480 +			__DMA_ASSERTD(!iQueued);
  1.1481 +			__DMA_ASSERTD(!iFirstHdr && !iLastHdr);
  1.1482 +			}
  1.1483 +		else
  1.1484 +			{
  1.1485 +			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iFirstHdr));
  1.1486 +			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iLastHdr));
  1.1487 +			}
  1.1488 +		}
  1.1489 +	iChannel.Signal();
  1.1490 +	}
  1.1491 +#endif
  1.1492 +
  1.1493 +
  1.1494 +//////////////////////////////////////////////////////////////////////////////
  1.1495 +// TDmaChannel
  1.1496 +
  1.1497 +_LIT(KDmaChannelMutex, "DMA-Channel");
  1.1498 +
  1.1499 +TDmaChannel::TDmaChannel()
  1.1500 +	: iController(NULL),
  1.1501 +	  iDmacCaps(NULL),
  1.1502 +	  iPslId(0),
  1.1503 +	  iDynChannel(EFalse),
  1.1504 +	  iPriority(KDmaPriorityNone),
  1.1505 +	  iCurHdr(NULL),
  1.1506 +	  iNullPtr(&iCurHdr),
  1.1507 +	  iDfc(Dfc, NULL, 0),
  1.1508 +	  iMaxDesCount(0),
  1.1509 +	  iAvailDesCount(0),
  1.1510 +	  iIsrDfc(0),
  1.1511 +	  iReqQ(),
  1.1512 +	  iReqCount(0),
  1.1513 +	  iCancelInfo(NULL),
  1.1514 +	  iRedoRequest(EFalse),
  1.1515 +	  iIsrCbRequest(EFalse)
  1.1516 +	{
  1.1517 +	const TInt r = Kern::MutexCreate(iMutex, KDmaChannelMutex, KMutexOrdDmaChannel);
  1.1518 +	__DMA_ASSERTA(r == KErrNone);
  1.1519 +
  1.1520 +#ifndef __WINS__
  1.1521 +	// On the emulator this code is called from within the codeseg mutex.
  1.1522 +	// The invariant tries to hold the dma channel mutex, but this is not allowed
  1.1523 +	__DMA_INVARIANT();
  1.1524 +#endif
  1.1525 +	}
  1.1526 +
  1.1527 +
  1.1528 +TDmaChannel::~TDmaChannel()
  1.1529 +	{
  1.1530 +	Kern::SafeClose((DObject*&)iMutex, NULL);
  1.1531 +	}
  1.1532 +
  1.1533 +
  1.1534 +//
  1.1535 +// static member function
  1.1536 +//
  1.1537 +EXPORT_C TInt TDmaChannel::Open(const SCreateInfo& aInfo, TDmaChannel*& aChannel)
  1.1538 +	{
  1.1539 +	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::Open thread %O", &Kern::CurrentThread()));
  1.1540 +
  1.1541 +	__DMA_ASSERTD(aInfo.iDesCount >= 1);
  1.1542 +	__DMA_ASSERTD(aInfo.iPriority <= KDmaPriority8);
  1.1543 +	__DMA_ASSERTD(aInfo.iDfcQ != NULL);
  1.1544 +	__DMA_ASSERTD(aInfo.iDfcPriority < KNumDfcPriorities);
  1.1545 +
  1.1546 +	aChannel = NULL;
  1.1547 +
  1.1548 +	DmaChannelMgr::Wait();
  1.1549 +	TDmaChannel* pC = DmaChannelMgr::Open(aInfo.iCookie, aInfo.iDynChannel, aInfo.iPriority);
  1.1550 +	DmaChannelMgr::Signal();
  1.1551 +	if (!pC)
  1.1552 +		{
  1.1553 +		return KErrInUse;
  1.1554 +		}
  1.1555 +	__DMA_ASSERTD(pC->iController != NULL);
  1.1556 +	__DMA_ASSERTD(pC->iDmacCaps != NULL);
  1.1557 +	__DMA_ASSERTD(pC->iController->iCapsHwDes == pC->DmacCaps().iHwDescriptors);
  1.1558 +	// PSL needs to set iDynChannel if and only if dynamic channel was requested
  1.1559 +	__DMA_ASSERTD(!LOGICAL_XOR(aInfo.iDynChannel, pC->iDynChannel));
  1.1560 +
  1.1561 +	const TInt r = pC->iController->ReserveSetOfDes(aInfo.iDesCount);
  1.1562 +	if (r != KErrNone)
  1.1563 +		{
  1.1564 +		pC->Close();
  1.1565 +		return r;
  1.1566 +		}
  1.1567 +	pC->iAvailDesCount = pC->iMaxDesCount = aInfo.iDesCount;
  1.1568 +
  1.1569 +	new (&pC->iDfc) TDfc(&Dfc, pC, aInfo.iDfcQ, aInfo.iDfcPriority);
  1.1570 +
  1.1571 +	aChannel = pC;
  1.1572 +
  1.1573 +#ifdef _DEBUG
  1.1574 +	pC->Invariant();
  1.1575 +#endif
  1.1576 +	__KTRACE_OPT(KDMA, Kern::Printf("opened channel %d", pC->iPslId));
  1.1577 +	return KErrNone;
  1.1578 +	}
  1.1579 +
  1.1580 +
  1.1581 +EXPORT_C void TDmaChannel::Close()
  1.1582 +	{
  1.1583 +	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::Close %d iReqCount=%d", iPslId, iReqCount));
  1.1584 +	__DMA_ASSERTD(IsQueueEmpty());
  1.1585 +	__DMA_ASSERTD(iReqCount == 0);
  1.1586 +
  1.1587 +	// Descriptor leak? -> bug in request code
  1.1588 +	__DMA_ASSERTD(iAvailDesCount == iMaxDesCount);
  1.1589 +
  1.1590 +	__DMA_ASSERTD(!iRedoRequest);
  1.1591 +	__DMA_ASSERTD(!iIsrCbRequest);
  1.1592 +
  1.1593 +	iController->ReleaseSetOfDes(iMaxDesCount);
  1.1594 +	iAvailDesCount = iMaxDesCount = 0;
  1.1595 +
  1.1596 +	DmaChannelMgr::Wait();
  1.1597 +	DmaChannelMgr::Close(this);
  1.1598 +	// The following assignment will be removed once IsOpened() has been
  1.1599 +	// removed. That's because 'this' shouldn't be touched any more once
  1.1600 +	// Close() has returned from the PSL.
  1.1601 +	iController = NULL;
  1.1602 +	DmaChannelMgr::Signal();
  1.1603 +	}
  1.1604 +
  1.1605 +
  1.1606 +EXPORT_C TInt TDmaChannel::LinkToChannel(TDmaChannel* aChannel)
  1.1607 +	{
  1.1608 +	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::LinkToChannel thread %O",
  1.1609 +									&Kern::CurrentThread()));
  1.1610 +	if (aChannel)
  1.1611 +		{
  1.1612 +		return iController->LinkChannels(*this, *aChannel);
  1.1613 +		}
  1.1614 +	else
  1.1615 +		{
  1.1616 +		return iController->UnlinkChannel(*this);
  1.1617 +		}
  1.1618 +	}
  1.1619 +
  1.1620 +
  1.1621 +EXPORT_C TInt TDmaChannel::Pause()
  1.1622 +	{
  1.1623 +	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::Pause thread %O",
  1.1624 +									&Kern::CurrentThread()));
  1.1625 +	return iController->PauseTransfer(*this);
  1.1626 +	}
  1.1627 +
  1.1628 +
  1.1629 +EXPORT_C TInt TDmaChannel::Resume()
  1.1630 +	{
  1.1631 +	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::Resume thread %O",
  1.1632 +									&Kern::CurrentThread()));
  1.1633 +	return iController->ResumeTransfer(*this);
  1.1634 +	}
  1.1635 +
  1.1636 +
  1.1637 +EXPORT_C void TDmaChannel::CancelAll()
  1.1638 +	{
  1.1639 +	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::CancelAll thread %O channel - %d",
  1.1640 +									&Kern::CurrentThread(), iPslId));
  1.1641 +	NThread* const nt = NKern::CurrentThread();
  1.1642 +	TBool wait = EFalse;
  1.1643 +	TDmaCancelInfo cancelinfo;
  1.1644 +	TDmaCancelInfo* waiters = NULL;
  1.1645 +
  1.1646 +	NKern::ThreadEnterCS();
  1.1647 +	Wait();
  1.1648 +
  1.1649 +	NThreadBase* const dfc_nt = iDfc.Thread();
  1.1650 +	// Shouldn't be NULL (i.e. an IDFC)
  1.1651 +	__DMA_ASSERTD(dfc_nt);
  1.1652 +
  1.1653 +	__e32_atomic_store_ord32(&iIsrDfc, (TUint32)KCancelFlagMask);
  1.1654 +	// ISRs after this point will not post a DFC, however a DFC may already be
  1.1655 +	// queued or running or both.
  1.1656 +	if (!IsQueueEmpty())
  1.1657 +		{
  1.1658 +		// There is a transfer in progress. It may complete before the DMAC
  1.1659 +		// has stopped, but the resulting ISR will not post a DFC.
  1.1660 +		// ISR should not happen after this function returns.
  1.1661 +		iController->StopTransfer(*this);
  1.1662 +
  1.1663 +		ResetStateMachine();
  1.1664 +
  1.1665 +		// Clean-up the request queue.
  1.1666 +		SDblQueLink* pL;
  1.1667 +		while ((pL = iReqQ.GetFirst()) != NULL)
  1.1668 +			{
  1.1669 +			DDmaRequest* pR = _LOFF(pL, DDmaRequest, iLink);
  1.1670 +			pR->OnDeque();
  1.1671 +			}
  1.1672 +		}
  1.1673 +	if (dfc_nt == nt)
  1.1674 +		{
  1.1675 +		// DFC runs in this thread, so just cancel it and we're finished
  1.1676 +		iDfc.Cancel();
  1.1677 +
  1.1678 +		// If other calls to CancelAll() are waiting for the DFC, release them here
  1.1679 +		waiters = iCancelInfo;
  1.1680 +		iCancelInfo = NULL;
  1.1681 +
  1.1682 +		// Reset the ISR count
  1.1683 +		__e32_atomic_store_rel32(&iIsrDfc, 0);
  1.1684 +		}
  1.1685 +	else
  1.1686 +		{
  1.1687 +		// DFC runs in another thread. Make sure it's queued and then wait for it to run.
  1.1688 +		if (iCancelInfo)
  1.1689 +			{
  1.1690 +			// Insert cancelinfo into the list so that it precedes iCancelInfo
  1.1691 +			cancelinfo.InsertBefore(iCancelInfo);
  1.1692 +			}
  1.1693 +		else
  1.1694 +			{
  1.1695 +			iCancelInfo = &cancelinfo;
  1.1696 +			}
  1.1697 +		wait = ETrue;
  1.1698 +		iDfc.Enque();
  1.1699 +		}
  1.1700 +
  1.1701 +	Signal();
  1.1702 +
  1.1703 +	if (waiters)
  1.1704 +		{
  1.1705 +		waiters->Signal();
  1.1706 +		}
  1.1707 +	else if (wait)
  1.1708 +		{
  1.1709 +		NKern::FSWait(&cancelinfo.iSem);
  1.1710 +		}
  1.1711 +
  1.1712 + 	NKern::ThreadLeaveCS();
  1.1713 +	__DMA_INVARIANT();
  1.1714 +	}
  1.1715 +
  1.1716 +
  1.1717 +EXPORT_C TInt TDmaChannel::IsrRedoRequest(TUint32 aSrcAddr, TUint32 aDstAddr,
  1.1718 +										  TUint aTransferCount,
  1.1719 +										  TUint32 aPslRequestInfo,
  1.1720 +										  TBool aIsrCb)
  1.1721 +	{
  1.1722 +	__KTRACE_OPT(KDMA,
  1.1723 +				 Kern::Printf("TDmaChannel::IsrRedoRequest src=0x%08x, "
  1.1724 +							  "dst=0x%08x, count=%d, pslInfo=0x%08x, isrCb=%d",
  1.1725 +							  aSrcAddr, aDstAddr, aTransferCount, aPslRequestInfo,
  1.1726 +							  aIsrCb));
  1.1727 +	// Function needs to be called in ISR context.
  1.1728 +	__DMA_ASSERTD(NKern::CurrentContext() == NKern::EInterrupt);
  1.1729 +
  1.1730 +	__DMA_ASSERTD(!iReqQ.IsEmpty());
  1.1731 +	__DMA_ASSERTD(iIsrCbRequest);
  1.1732 +
  1.1733 +#ifdef _DEBUG
  1.1734 +	if ((aSrcAddr != KPhysAddrInvalid) && (aSrcAddr == aDstAddr))
  1.1735 +		{
  1.1736 +		__KTRACE_OPT(KPANIC,
  1.1737 +					 Kern::Printf("Error: Updating src & dst to same address: 0x%08x",
  1.1738 +								  aSrcAddr));
  1.1739 +		return KErrArgument;
  1.1740 +		}
  1.1741 +#endif
  1.1742 +
  1.1743 +	// We assume here that the just completed request is the first one in the
  1.1744 +	// queue, i.e. that even if there is more than one request in the queue,
  1.1745 +	// their respective last and first (hw) descriptors are *not* linked.
  1.1746 +	// (Although that's what apparently happens in TDmaSgChannel::DoQueue() /
  1.1747 +	// TDmac::AppendHwDes() @@@).
  1.1748 +	DDmaRequest* const pCurReq = _LOFF(iReqQ.First(), DDmaRequest, iLink);
  1.1749 +	TInt r;
  1.1750 +
  1.1751 +	if (iDmacCaps->iAsymHwDescriptors)
  1.1752 +		{
  1.1753 +		// We don't allow multiple-descriptor chains to be updated here
  1.1754 +		__DMA_ASSERTD((pCurReq->iSrcDesCount == 1) && (pCurReq->iDstDesCount == 1));
  1.1755 +		// Adjust parameters if necessary (asymmetrical s/g variety)
  1.1756 +		const SDmaDesHdr* const pSrcFirstHdr = pCurReq->iSrcFirstHdr;
  1.1757 +		if ((aSrcAddr != KPhysAddrInvalid) || aTransferCount || aPslRequestInfo)
  1.1758 +			{
  1.1759 +			r = iController->UpdateSrcHwDes(*pSrcFirstHdr, aSrcAddr,
  1.1760 +											aTransferCount, aPslRequestInfo);
  1.1761 +			if (r != KErrNone)
  1.1762 +				{
  1.1763 +				__KTRACE_OPT(KPANIC, Kern::Printf("Src descriptor updating failed in PSL"));
  1.1764 +				return r;
  1.1765 +				}
  1.1766 +			}
  1.1767 +		const SDmaDesHdr* const pDstFirstHdr = pCurReq->iDstFirstHdr;
  1.1768 +		if ((aDstAddr != KPhysAddrInvalid) || aTransferCount || aPslRequestInfo)
  1.1769 +			{
  1.1770 +			r = iController->UpdateDstHwDes(*pDstFirstHdr, aSrcAddr,
  1.1771 +											aTransferCount, aPslRequestInfo);
  1.1772 +			if (r != KErrNone)
  1.1773 +				{
  1.1774 +				__KTRACE_OPT(KPANIC, Kern::Printf("Dst descriptor updating failed in PSL"));
  1.1775 +				return r;
  1.1776 +				}
  1.1777 +			}
  1.1778 +		// Reschedule the request
  1.1779 +		iController->Transfer(*this, *pSrcFirstHdr, *pDstFirstHdr);
  1.1780 +		}
  1.1781 +	else
  1.1782 +		{
  1.1783 +		// We don't allow multiple-descriptor chains to be updated here
  1.1784 +		__DMA_ASSERTD(pCurReq->iDesCount == 1);
  1.1785 +		// Adjust parameters if necessary (symmetrical s/g and non-s/g variety)
  1.1786 +		const SDmaDesHdr* const pFirstHdr = pCurReq->iFirstHdr;
  1.1787 +		if ((aSrcAddr != KPhysAddrInvalid) || (aDstAddr != KPhysAddrInvalid) ||
  1.1788 +			aTransferCount || aPslRequestInfo)
  1.1789 +			{
  1.1790 +			r = iController->UpdateDes(*pFirstHdr, aSrcAddr, aDstAddr,
  1.1791 +									   aTransferCount, aPslRequestInfo);
  1.1792 +			if (r != KErrNone)
  1.1793 +				{
  1.1794 +				__KTRACE_OPT(KPANIC, Kern::Printf("Descriptor updating failed"));
  1.1795 +				return r;
  1.1796 +				}
  1.1797 +			}
  1.1798 +		// Reschedule the request
  1.1799 +		iController->Transfer(*this, *pFirstHdr);
  1.1800 +		}
  1.1801 +
  1.1802 +	if (!aIsrCb)
  1.1803 +		{
  1.1804 +		// Not another ISR callback please
  1.1805 +		pCurReq->iIsrCb = aIsrCb;
  1.1806 +		}
  1.1807 +	iRedoRequest = ETrue;
  1.1808 +
  1.1809 +	return KErrNone;
  1.1810 +	}
  1.1811 +
  1.1812 +
  1.1813 +EXPORT_C TInt TDmaChannel::FailNext(TInt /*aFragmentCount*/)
  1.1814 +	{
  1.1815 +	return iController->FailNext(*this);
  1.1816 +	}
  1.1817 +
  1.1818 +
  1.1819 +EXPORT_C TInt TDmaChannel::MissNextInterrupts(TInt aInterruptCount)
  1.1820 +	{
  1.1821 +	return iController->MissNextInterrupts(*this, aInterruptCount);
  1.1822 +	}
  1.1823 +
  1.1824 +
  1.1825 +EXPORT_C TInt TDmaChannel::Extension(TInt aCmd, TAny* aArg)
  1.1826 +	{
  1.1827 +	return iController->Extension(*this, aCmd, aArg);
  1.1828 +	}
  1.1829 +
  1.1830 +
  1.1831 +//
  1.1832 +// static member function
  1.1833 +//
  1.1834 +EXPORT_C TInt TDmaChannel::StaticExtension(TInt aCmd, TAny* aArg)
  1.1835 +	{
  1.1836 +	return DmaChannelMgr::StaticExtension(aCmd, aArg);
  1.1837 +	}
  1.1838 +
  1.1839 +
  1.1840 +EXPORT_C TUint TDmaChannel::MaxTransferLength(TUint aSrcFlags, TUint aDstFlags,
  1.1841 +											  TUint32 aPslInfo)
  1.1842 +	{
  1.1843 +	return iController->MaxTransferLength(*this, aSrcFlags, aDstFlags, aPslInfo);
  1.1844 +	}
  1.1845 +
  1.1846 +
  1.1847 +EXPORT_C TUint TDmaChannel::AddressAlignMask(TUint aTargetFlags, TUint aElementSize,
  1.1848 +											 TUint32 aPslInfo)
  1.1849 +	{
  1.1850 +	return iController->AddressAlignMask(*this, aTargetFlags, aElementSize, aPslInfo);
  1.1851 +	}
  1.1852 +
  1.1853 +
  1.1854 +EXPORT_C const SDmacCaps& TDmaChannel::DmacCaps()
  1.1855 +	{
  1.1856 +	return *iDmacCaps;
  1.1857 +	}
  1.1858 +
  1.1859 +
  1.1860 +//
  1.1861 +// DFC callback function (static member).
  1.1862 +//
  1.1863 +void TDmaChannel::Dfc(TAny* aArg)
  1.1864 +	{
  1.1865 +	static_cast<TDmaChannel*>(aArg)->DoDfc();
  1.1866 +	}
  1.1867 +
  1.1868 +
  1.1869 +//
  1.1870 +// This is quite a long function, but what can you do...
  1.1871 +//
  1.1872 +void TDmaChannel::DoDfc()
  1.1873 +	{
  1.1874 +	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::DoDfc thread %O channel - %d",
  1.1875 +									&Kern::CurrentThread(), iPslId));
  1.1876 +	Wait();
  1.1877 +
  1.1878 +	// Atomically fetch and reset the number of DFCs queued by the ISR and the
  1.1879 +	// error flag. Leave the cancel flag alone for now.
  1.1880 +	const TUint32 w = __e32_atomic_and_ord32(&iIsrDfc, (TUint32)KCancelFlagMask);
  1.1881 +	TUint32 count = w & KDfcCountMask;
  1.1882 +	const TBool error = w & (TUint32)KErrorFlagMask;
  1.1883 +	TBool stop = w & (TUint32)KCancelFlagMask;
  1.1884 +	__DMA_ASSERTD((count > 0) || stop);
  1.1885 +
  1.1886 +	__DMA_ASSERTD(!iRedoRequest); // We shouldn't be here if this is true
  1.1887 +
  1.1888 +	while (count && !stop)
  1.1889 +		{
  1.1890 +		--count;
  1.1891 +
  1.1892 +		__DMA_ASSERTD(!iReqQ.IsEmpty());
  1.1893 +
  1.1894 +		// If an error occurred it must have been reported on the last
  1.1895 +		// interrupt since transfers are suspended after an error.
  1.1896 +		DDmaRequest::TResult const res = (count == 0 && error) ?
  1.1897 +			DDmaRequest::EError : DDmaRequest::EOk;
  1.1898 +		DDmaRequest* pCompletedReq = NULL;
  1.1899 +		DDmaRequest* const pCurReq = _LOFF(iReqQ.First(), DDmaRequest, iLink);
  1.1900 +
  1.1901 +		if (res == DDmaRequest::EOk)
  1.1902 +			{
  1.1903 +			// Update state machine, current fragment, completed fragment and
  1.1904 +			// tell the DMAC to transfer the next fragment if necessary.
  1.1905 +			SDmaDesHdr* pCompletedHdr = NULL;
  1.1906 +			DoDfc(const_cast<const DDmaRequest&>(*pCurReq), pCompletedHdr);
  1.1907 +
  1.1908 +			// If just completed last fragment from current request, switch to
  1.1909 +			// next request (if any).
  1.1910 +			if (pCompletedHdr == pCurReq->iLastHdr)
  1.1911 +				{
  1.1912 +				pCompletedReq = pCurReq;
  1.1913 +				pCurReq->iLink.Deque();
  1.1914 +				if (iReqQ.IsEmpty())
  1.1915 +					iNullPtr = &iCurHdr;
  1.1916 +				pCompletedReq->OnDeque();
  1.1917 +				}
  1.1918 +			}
  1.1919 +		else
  1.1920 +			{
  1.1921 +			pCompletedReq = pCurReq;
  1.1922 +			}
  1.1923 +
  1.1924 +		if (pCompletedReq && !pCompletedReq->iIsrCb)
  1.1925 +			{
  1.1926 +			// Don't execute ISR callbacks here (they have already been called)
  1.1927 +			DDmaRequest::TCallback const cb = pCompletedReq->iCb;
  1.1928 +			if (cb)
  1.1929 +				{
  1.1930 +				// Old style callback
  1.1931 +				TAny* const arg = pCompletedReq->iCbArg;
  1.1932 +				Signal();
  1.1933 +				__KTRACE_OPT(KDMA, Kern::Printf("Client CB res=%d", res));
  1.1934 +				(*cb)(res, arg);
  1.1935 +				Wait();
  1.1936 +				}
  1.1937 +			else
  1.1938 +				{
  1.1939 +				// New style callback
  1.1940 +				TDmaCallback const ncb = pCompletedReq->iDmaCb;
  1.1941 +				if (ncb)
  1.1942 +					{
  1.1943 +					TAny* const arg = pCompletedReq->iDmaCbArg;
  1.1944 +					TDmaResult const result = (res == DDmaRequest::EOk) ?
  1.1945 +						EDmaResultOK : EDmaResultError;
  1.1946 +					Signal();
  1.1947 +					__KTRACE_OPT(KDMA, Kern::Printf("Client CB result=%d", result));
  1.1948 +					(*ncb)(EDmaCallbackRequestCompletion, result, arg, NULL);
  1.1949 +					Wait();
  1.1950 +					}
  1.1951 +				}
  1.1952 +			}
  1.1953 +		else
  1.1954 +			{
  1.1955 +			// Allow another thread in, in case they are trying to cancel
  1.1956 +			Flash();
  1.1957 +			}
  1.1958 +		stop = __e32_atomic_load_acq32(&iIsrDfc) & (TUint32)KCancelFlagMask;
  1.1959 +		}
  1.1960 +
  1.1961 +	// Some interrupts may be missed (double-buffer and scatter-gather
  1.1962 +	// controllers only) if two or more transfers complete while interrupts are
  1.1963 +	// disabled in the CPU. If this happens, the framework will go out of sync
  1.1964 +	// and leave some orphaned requests in the queue.
  1.1965 +	//
  1.1966 +	// To ensure correctness we handle this case here by checking that the request
  1.1967 +	// queue is empty when all transfers have completed and, if not, cleaning up
  1.1968 +	// and notifying the client of the completion of the orphaned requests.
  1.1969 +	//
  1.1970 +	// Note that if some interrupts are missed and the controller raises an
  1.1971 +	// error while transferring a subsequent fragment, the error will be reported
  1.1972 +	// on a fragment which was successfully completed.  There is no easy solution
  1.1973 +	// to this problem, but this is okay as the only possible action following a
  1.1974 +	// failure is to flush the whole queue.
  1.1975 +	if (stop)
  1.1976 +		{
  1.1977 +		// If another thread set the cancel flag, it should have
  1.1978 +		// cleaned up the request queue
  1.1979 +		__DMA_ASSERTD(IsQueueEmpty());
  1.1980 +
  1.1981 +		TDmaCancelInfo* const waiters = iCancelInfo;
  1.1982 +		iCancelInfo = NULL;
  1.1983 +
  1.1984 +		// make sure DFC doesn't run again until a new request completes
  1.1985 +		iDfc.Cancel();
  1.1986 +
  1.1987 +		// reset the ISR count - new requests can now be processed
  1.1988 +		__e32_atomic_store_rel32(&iIsrDfc, 0);
  1.1989 +
  1.1990 +		Signal();
  1.1991 +
  1.1992 +		// release threads doing CancelAll()
  1.1993 +		waiters->Signal();
  1.1994 +		}
  1.1995 +	else if (!error && !iReqQ.IsEmpty() && iController->IsIdle(*this))
  1.1996 +		{
  1.1997 +#ifdef __SMP__
  1.1998 +		// On an SMP system we must call stop transfer, it will block until
  1.1999 +		// any ISRs have completed so that the system does not spuriously
  1.2000 +		// attempt to recover from a missed interrupt.
  1.2001 +		//
  1.2002 +		// On an SMP system it is possible for the code here to execute
  1.2003 +		// concurrently with the DMA ISR. It is therefore possible that at this
  1.2004 +		// point the previous transfer has already completed (so that IsIdle
  1.2005 +		// reports true), but that the ISR has not yet queued a DFC. Therefore
  1.2006 +		// we must wait for the ISR to complete.
  1.2007 +		//
  1.2008 +		// StopTransfer should have no other side effect, given that the
  1.2009 +		// channel is already idle.
  1.2010 +		iController->StopTransfer(*this); // should block till ISR completion
  1.2011 +#endif
  1.2012 +
  1.2013 +		const TBool cleanup = !iDfc.Queued();
  1.2014 +		if(cleanup)
  1.2015 +			{
  1.2016 +			__KTRACE_OPT(KDMA, Kern::Printf("Missed interrupt(s) - draining request queue"));
  1.2017 +			ResetStateMachine();
  1.2018 +
  1.2019 +			// Move orphaned requests to temporary queue so channel queue can
  1.2020 +			// accept new requests.
  1.2021 +			SDblQue q;
  1.2022 +			q.MoveFrom(&iReqQ);
  1.2023 +
  1.2024 +			SDblQueLink* pL;
  1.2025 +			while ((pL = q.GetFirst()) != NULL)
  1.2026 +				{
  1.2027 +				DDmaRequest* const pR = _LOFF(pL, DDmaRequest, iLink);
  1.2028 +				__KTRACE_OPT(KDMA, Kern::Printf("Removing request from queue and notifying client"));
  1.2029 +				pR->OnDeque();
  1.2030 +				// Old style callback
  1.2031 +				DDmaRequest::TCallback const cb = pR->iCb;
  1.2032 +				if (cb)
  1.2033 +					{
  1.2034 +					TAny* const arg = pR->iCbArg;
  1.2035 +					Signal();
  1.2036 +					(*cb)(DDmaRequest::EOk, arg);
  1.2037 +					Wait();
  1.2038 +					}
  1.2039 +				else
  1.2040 +					{
  1.2041 +					// New style callback
  1.2042 +					TDmaCallback const ncb = pR->iDmaCb;
  1.2043 +					if (ncb)
  1.2044 +						{
  1.2045 +						TAny* const arg = pR->iDmaCbArg;
  1.2046 +						Signal();
  1.2047 +						(*ncb)(EDmaCallbackRequestCompletion, EDmaResultOK, arg, NULL);
  1.2048 +						Wait();
  1.2049 +						}
  1.2050 +					}
  1.2051 +				}
  1.2052 +			}
  1.2053 +		Signal();
  1.2054 +		}
  1.2055 +	else
  1.2056 +		Signal();
  1.2057 +
  1.2058 +	__DMA_INVARIANT();
  1.2059 +	}
  1.2060 +
  1.2061 +
  1.2062 +//
  1.2063 +// Reset state machine only, request queue is unchanged */
  1.2064 +//
  1.2065 +void TDmaChannel::ResetStateMachine()
  1.2066 +	{
  1.2067 +	DoCancelAll();
  1.2068 +	iCurHdr = NULL;
  1.2069 +	iNullPtr = &iCurHdr;
  1.2070 +	}
  1.2071 +
  1.2072 +
  1.2073 +void TDmaChannel::DoQueue(const DDmaRequest& /*aReq*/)
  1.2074 +	{
  1.2075 +	// Must be overridden
  1.2076 +	__DMA_CANT_HAPPEN();
  1.2077 +	}
  1.2078 +
  1.2079 +
  1.2080 +//
  1.2081 +// Unlink the last item of a LLI chain from the next chain.
  1.2082 +// Default implementation does nothing. This is overridden by scatter-gather
  1.2083 +// channels.
  1.2084 +//
  1.2085 +void TDmaChannel::DoUnlink(SDmaDesHdr& /*aHdr*/)
  1.2086 +	{
  1.2087 +	}
  1.2088 +
  1.2089 +
  1.2090 +void TDmaChannel::DoDfc(const DDmaRequest& /*aCurReq*/, SDmaDesHdr*& /*aCompletedHdr*/)
  1.2091 +	{
  1.2092 +	// To make sure this version of the function isn't called for channels for
  1.2093 +	// which it isn't appropriate (and which therefore don't override it) we
  1.2094 +	// put this check in here.
  1.2095 +	__DMA_CANT_HAPPEN();
  1.2096 +	}
  1.2097 +
  1.2098 +
  1.2099 +void TDmaChannel::DoDfc(const DDmaRequest& /*aCurReq*/, SDmaDesHdr*& /*aSrcCompletedHdr*/,
  1.2100 +						SDmaDesHdr*& /*aDstCompletedHdr*/)
  1.2101 +	{
  1.2102 +	// To make sure this version of the function isn't called for channels for
  1.2103 +	// which it isn't appropriate (and which therefore don't override it) we
  1.2104 +	// put this check in here.
  1.2105 +	__DMA_CANT_HAPPEN();
  1.2106 +	}
  1.2107 +
  1.2108 +
  1.2109 +#ifdef _DEBUG
  1.2110 +void TDmaChannel::Invariant()
  1.2111 +	{
  1.2112 +	Wait();
  1.2113 +
  1.2114 +	__DMA_ASSERTD(iReqCount >= 0);
  1.2115 +
  1.2116 +	__DMA_ASSERTD(iCurHdr == NULL || iController->IsValidHdr(iCurHdr));
  1.2117 +
  1.2118 +	// should always point to NULL pointer ending fragment queue
  1.2119 +	__DMA_ASSERTD(*iNullPtr == NULL);
  1.2120 +
  1.2121 +	__DMA_ASSERTD((0 <= iAvailDesCount) && (iAvailDesCount <= iMaxDesCount));
  1.2122 +
  1.2123 +	__DMA_ASSERTD(LOGICAL_XOR(iCurHdr, IsQueueEmpty()));
  1.2124 +	if (iCurHdr == NULL)
  1.2125 +		{
  1.2126 +		__DMA_ASSERTD(iNullPtr == &iCurHdr);
  1.2127 +		}
  1.2128 +
  1.2129 +	Signal();
  1.2130 +	}
  1.2131 +#endif
  1.2132 +
  1.2133 +
  1.2134 +//////////////////////////////////////////////////////////////////////////////
  1.2135 +// TDmaSbChannel
  1.2136 +
  1.2137 +void TDmaSbChannel::DoQueue(const DDmaRequest& /*aReq*/)
  1.2138 +	{
  1.2139 +	if (iState != ETransferring)
  1.2140 +		{
  1.2141 +		iController->Transfer(*this, *iCurHdr);
  1.2142 +		iState = ETransferring;
  1.2143 +		}
  1.2144 +	}
  1.2145 +
  1.2146 +
  1.2147 +void TDmaSbChannel::DoCancelAll()
  1.2148 +	{
  1.2149 +	__DMA_ASSERTD(iState == ETransferring);
  1.2150 +	iState = EIdle;
  1.2151 +	}
  1.2152 +
  1.2153 +
  1.2154 +void TDmaSbChannel::DoDfc(const DDmaRequest& /*aCurReq*/, SDmaDesHdr*& aCompletedHdr)
  1.2155 +	{
  1.2156 +	__DMA_ASSERTD(iState == ETransferring);
  1.2157 +	aCompletedHdr = iCurHdr;
  1.2158 +	iCurHdr = iCurHdr->iNext;
  1.2159 +	if (iCurHdr != NULL)
  1.2160 +		{
  1.2161 +		iController->Transfer(*this, *iCurHdr);
  1.2162 +		}
  1.2163 +	else
  1.2164 +		{
  1.2165 +		iState = EIdle;
  1.2166 +		}
  1.2167 +	}
  1.2168 +
  1.2169 +
  1.2170 +//////////////////////////////////////////////////////////////////////////////
  1.2171 +// TDmaDbChannel
  1.2172 +
  1.2173 +void TDmaDbChannel::DoQueue(const DDmaRequest& aReq)
  1.2174 +	{
  1.2175 +	switch (iState)
  1.2176 +		{
  1.2177 +	case EIdle:
  1.2178 +		iController->Transfer(*this, *iCurHdr);
  1.2179 +		if (iCurHdr->iNext)
  1.2180 +			{
  1.2181 +			iController->Transfer(*this, *(iCurHdr->iNext));
  1.2182 +			iState = ETransferring;
  1.2183 +			}
  1.2184 +		else
  1.2185 +			iState = ETransferringLast;
  1.2186 +		break;
  1.2187 +	case ETransferring:
  1.2188 +		// nothing to do
  1.2189 +		break;
  1.2190 +	case ETransferringLast:
  1.2191 +		iController->Transfer(*this, *(aReq.iFirstHdr));
  1.2192 +		iState = ETransferring;
  1.2193 +		break;
  1.2194 +	default:
  1.2195 +		__DMA_CANT_HAPPEN();
  1.2196 +		}
  1.2197 +	}
  1.2198 +
  1.2199 +
  1.2200 +void TDmaDbChannel::DoCancelAll()
  1.2201 +	{
  1.2202 +	iState = EIdle;
  1.2203 +	}
  1.2204 +
  1.2205 +
  1.2206 +void TDmaDbChannel::DoDfc(const DDmaRequest& /*aCurReq*/, SDmaDesHdr*& aCompletedHdr)
  1.2207 +	{
  1.2208 +	aCompletedHdr = iCurHdr;
  1.2209 +	iCurHdr = iCurHdr->iNext;
  1.2210 +	switch (iState)
  1.2211 +		{
  1.2212 +	case ETransferringLast:
  1.2213 +		iState = EIdle;
  1.2214 +		break;
  1.2215 +	case ETransferring:
  1.2216 +		if (iCurHdr->iNext == NULL)
  1.2217 +			iState = ETransferringLast;
  1.2218 +		else
  1.2219 +			iController->Transfer(*this, *(iCurHdr->iNext));
  1.2220 +		break;
  1.2221 +	default:
  1.2222 +		__DMA_CANT_HAPPEN();
  1.2223 +		}
  1.2224 +	}
  1.2225 +
  1.2226 +
  1.2227 +//////////////////////////////////////////////////////////////////////////////
  1.2228 +// TDmaSgChannel
  1.2229 +
  1.2230 +void TDmaSgChannel::DoQueue(const DDmaRequest& aReq)
  1.2231 +	{
  1.2232 +	if (iState == ETransferring)
  1.2233 +		{
  1.2234 +		__DMA_ASSERTD(!aReq.iLink.Alone());
  1.2235 +		DDmaRequest* pReqPrev = _LOFF(aReq.iLink.iPrev, DDmaRequest, iLink);
  1.2236 +		iController->AppendHwDes(*this, *(pReqPrev->iLastHdr), *(aReq.iFirstHdr));
  1.2237 +		}
  1.2238 +	else
  1.2239 +		{
  1.2240 +		iController->Transfer(*this, *(aReq.iFirstHdr));
  1.2241 +		iState = ETransferring;
  1.2242 +		}
  1.2243 +	}
  1.2244 +
  1.2245 +
  1.2246 +void TDmaSgChannel::DoCancelAll()
  1.2247 +	{
  1.2248 +	__DMA_ASSERTD(iState == ETransferring);
  1.2249 +	iState = EIdle;
  1.2250 +	}
  1.2251 +
  1.2252 +
  1.2253 +void TDmaSgChannel::DoUnlink(SDmaDesHdr& aHdr)
  1.2254 +	{
  1.2255 +	iController->UnlinkHwDes(*this, aHdr);
  1.2256 +	}
  1.2257 +
  1.2258 +
  1.2259 +void TDmaSgChannel::DoDfc(const DDmaRequest& aCurReq, SDmaDesHdr*& aCompletedHdr)
  1.2260 +	{
  1.2261 +	__DMA_ASSERTD(iState == ETransferring);
  1.2262 +	aCompletedHdr = aCurReq.iLastHdr;
  1.2263 +	iCurHdr = aCompletedHdr->iNext;
  1.2264 +	iState = (iCurHdr != NULL) ? ETransferring : EIdle;
  1.2265 +	}
  1.2266 +
  1.2267 +
  1.2268 +//////////////////////////////////////////////////////////////////////////////
  1.2269 +// TDmaAsymSgChannel
  1.2270 +
  1.2271 +void TDmaAsymSgChannel::DoQueue(const DDmaRequest& aReq)
  1.2272 +	{
  1.2273 +	if (iState == ETransferring)
  1.2274 +		{
  1.2275 +		__DMA_ASSERTD(!aReq.iLink.Alone());
  1.2276 +		DDmaRequest* pReqPrev = _LOFF(aReq.iLink.iPrev, DDmaRequest, iLink);
  1.2277 +		iController->AppendHwDes(*this,
  1.2278 +								 *(pReqPrev->iSrcLastHdr), *(aReq.iSrcFirstHdr),
  1.2279 +								 *(pReqPrev->iDstLastHdr), *(aReq.iDstFirstHdr));
  1.2280 +		}
  1.2281 +	else
  1.2282 +		{
  1.2283 +		iController->Transfer(*this, *(aReq.iSrcFirstHdr), *(aReq.iDstFirstHdr));
  1.2284 +		iState = ETransferring;
  1.2285 +		}
  1.2286 +	}
  1.2287 +
  1.2288 +
  1.2289 +void TDmaAsymSgChannel::DoCancelAll()
  1.2290 +	{
  1.2291 +	__DMA_ASSERTD(iState == ETransferring);
  1.2292 +	iState = EIdle;
  1.2293 +	}
  1.2294 +
  1.2295 +
  1.2296 +void TDmaAsymSgChannel::DoUnlink(SDmaDesHdr& aHdr)
  1.2297 +	{
  1.2298 +	iController->UnlinkHwDes(*this, aHdr);
  1.2299 +	}
  1.2300 +
  1.2301 +
  1.2302 +void TDmaAsymSgChannel::DoDfc(const DDmaRequest& aCurReq, SDmaDesHdr*& aSrcCompletedHdr,
  1.2303 +							  SDmaDesHdr*& aDstCompletedHdr)
  1.2304 +	{
  1.2305 +	__DMA_ASSERTD(iState == ETransferring);
  1.2306 +	aSrcCompletedHdr = aCurReq.iSrcLastHdr;
  1.2307 +	iSrcCurHdr = aSrcCompletedHdr->iNext;
  1.2308 +	aDstCompletedHdr = aCurReq.iDstLastHdr;
  1.2309 +	iDstCurHdr = aDstCompletedHdr->iNext;
  1.2310 +	// Must be either both NULL or none of them.
  1.2311 +	__DMA_ASSERTD(!LOGICAL_XOR(iSrcCurHdr, iDstCurHdr));
  1.2312 +	iState = (iSrcCurHdr != NULL) ? ETransferring : EIdle;
  1.2313 +	}
  1.2314 +