os/kernelhwsrv/kernel/eka/drivers/dma/dma2_pil.cpp
author sl
Tue, 10 Jun 2014 14:32:02 +0200
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
     1 // Copyright (c) 2002-2009 Nokia Corporation and/or its subsidiary(-ies).
     2 // All rights reserved.
     3 // This component and the accompanying materials are made available
     4 // under the terms of "Eclipse Public License v1.0"
     5 // which accompanies this distribution, and is available
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
     7 //
     8 // Initial Contributors:
     9 // Nokia Corporation - initial contribution.
    10 //
    11 // Contributors:
    12 //
    13 // Description:
    14 // e32/drivers/dma2_pil.cpp
    15 // DMA Platform Independent Layer (PIL)
    16 //
    17 //
    18 
    19 #include <drivers/dma.h>
    20 #include <drivers/dma_hai.h>
    21 
    22 #include <kernel/kern_priv.h>
    23 
    24 
    25 // Symbian Min() & Max() are broken, so we have to define them ourselves
    26 inline TUint Min(TUint aLeft, TUint aRight)
    27 	{return(aLeft < aRight ? aLeft : aRight);}
    28 inline TUint Max(TUint aLeft, TUint aRight)
    29 	{return(aLeft > aRight ? aLeft : aRight);}
    30 
    31 
    32 // Uncomment the following #define only when freezing the DMA2 export library.
    33 //#define __FREEZE_DMA2_LIB
    34 #ifdef __FREEZE_DMA2_LIB
    35 TInt DmaChannelMgr::StaticExtension(TInt, TAny*) {return 0;}
    36 TDmaChannel* DmaChannelMgr::Open(TUint32, TBool, TUint) {return 0;}
    37 void DmaChannelMgr::Close(TDmaChannel*) {}
    38 EXPORT_C const TDmaTestInfo& DmaTestInfo() {static TDmaTestInfo a; return a;}
    39 EXPORT_C const TDmaV2TestInfo& DmaTestInfoV2() {static TDmaV2TestInfo a; return a;}
    40 #endif	// #ifdef __FREEZE_DMA2_LIB
    41 
    42 
    43 static const char KDmaPanicCat[] = "DMA " __FILE__;
    44 
    45 //////////////////////////////////////////////////////////////////////
    46 // DmaChannelMgr
    47 //
    48 // Wait, Signal, and Initialise are defined here in the PIL.
    49 // Open, Close and Extension must be defined in the PSL.
    50 
    51 NFastMutex DmaChannelMgr::Lock;
    52 
    53 
    54 void DmaChannelMgr::Wait()
    55 	{
    56 	NKern::FMWait(&Lock);
    57 	}
    58 
    59 
    60 void DmaChannelMgr::Signal()
    61 	{
    62 	NKern::FMSignal(&Lock);
    63 	}
    64 
    65 
    66 TInt DmaChannelMgr::Initialise()
    67 	{
    68 	return KErrNone;
    69 	}
    70 
    71 
    72 class TDmaCancelInfo : public SDblQueLink
    73 	{
    74 public:
    75 	TDmaCancelInfo();
    76 	void Signal();
    77 public:
    78 	NFastSemaphore iSem;
    79 	};
    80 
    81 
    82 TDmaCancelInfo::TDmaCancelInfo()
    83 	: iSem(0)
    84 	{
    85 	iNext = this;
    86 	iPrev = this;
    87 	}
    88 
    89 
    90 void TDmaCancelInfo::Signal()
    91 	{
    92 	TDmaCancelInfo* p = this;
    93 	FOREVER
    94 		{
    95 		TDmaCancelInfo* next = (TDmaCancelInfo*)p->iNext;
    96 		if (p!=next)
    97 			p->Deque();
    98 		NKern::FSSignal(&p->iSem);	// Don't dereference p after this
    99 		if (p==next)
   100 			break;
   101 		p = next;
   102 		}
   103 	}
   104 
   105 
   106 //////////////////////////////////////////////////////////////////////////////
   107 
   108 #ifdef __DMASIM__
   109 #ifdef __WINS__
   110 typedef TLinAddr TPhysAddr;
   111 #endif
   112 static inline TPhysAddr LinToPhys(TLinAddr aLin) {return aLin;}
   113 #else
   114 static inline TPhysAddr LinToPhys(TLinAddr aLin) {return Epoc::LinearToPhysical(aLin);}
   115 #endif
   116 
   117 //
   118 // Return minimum of aMaxSize and size of largest physically contiguous block
   119 // starting at aLinAddr.
   120 //
   121 static TInt MaxPhysSize(TLinAddr aLinAddr, const TInt aMaxSize)
   122 	{
   123 	const TPhysAddr physBase = LinToPhys(aLinAddr);
   124 	TLinAddr lin = aLinAddr;
   125 	TInt size = 0;
   126 	for (;;)
   127 		{
   128 		// Round up the linear address to the next MMU page boundary
   129 		const TLinAddr linBoundary = Kern::RoundToPageSize(lin + 1);
   130 		size += linBoundary - lin;
   131 		if (size >= aMaxSize)
   132 			return aMaxSize;
   133 		if ((physBase + size) != LinToPhys(linBoundary))
   134 			return size;
   135 		lin = linBoundary;
   136 		}
   137 	}
   138 
   139 
   140 //////////////////////////////////////////////////////////////////////////////
   141 // TDmac
   142 
   143 TDmac::TDmac(const SCreateInfo& aInfo)
   144 	: iMaxDesCount(aInfo.iDesCount),
   145 	  iAvailDesCount(aInfo.iDesCount),
   146 	  iHdrPool(NULL),
   147 #ifndef __WINS__
   148 	  iHwDesChunk(NULL),
   149 #endif
   150 	  iDesPool(NULL),
   151 	  iDesSize(aInfo.iDesSize),
   152 	  iCapsHwDes(aInfo.iCapsHwDes),
   153 	  iFreeHdr(NULL)
   154 	{
   155 	__DMA_ASSERTD(iMaxDesCount > 0);
   156 	__DMA_ASSERTD(iDesSize > 0);
   157 	}
   158 
   159 
   160 //
   161 // Second-phase c'tor
   162 //
   163 TInt TDmac::Create(const SCreateInfo& aInfo)
   164 	{
   165 	iHdrPool = new SDmaDesHdr[iMaxDesCount];
   166 	if (iHdrPool == NULL)
   167 		{
   168 		return KErrNoMemory;
   169 		}
   170 
   171 	TInt r = AllocDesPool(aInfo.iDesChunkAttribs);
   172 	if (r != KErrNone)
   173 		{
   174 		return KErrNoMemory;
   175 		}
   176 
   177 	// Link all descriptor headers together on the free list
   178 	iFreeHdr = iHdrPool;
   179 	for (TInt i = 0; i < iMaxDesCount - 1; i++)
   180 		iHdrPool[i].iNext = iHdrPool + i + 1;
   181 	iHdrPool[iMaxDesCount-1].iNext = NULL;
   182 
   183 	__DMA_INVARIANT();
   184 	return KErrNone;
   185 	}
   186 
   187 
   188 TDmac::~TDmac()
   189 	{
   190 	__DMA_INVARIANT();
   191 
   192 	FreeDesPool();
   193 	delete[] iHdrPool;
   194 	}
   195 
   196 
   197 void TDmac::Transfer(const TDmaChannel& /*aChannel*/, const SDmaDesHdr& /*aHdr*/)
   198 	{
   199 	// TDmac needs to override this function if it has reported the channel
   200 	// type for which the PIL calls it.
   201 	__DMA_CANT_HAPPEN();
   202 	}
   203 
   204 
   205 void TDmac::Transfer(const TDmaChannel& /*aChannel*/, const SDmaDesHdr& /*aSrcHdr*/,
   206 					 const SDmaDesHdr& /*aDstHdr*/)
   207 	{
   208 	// TDmac needs to override this function if it has reported the channel
   209 	// type for which the PIL calls it.
   210 	__DMA_CANT_HAPPEN();
   211 	}
   212 
   213 
   214 TInt TDmac::PauseTransfer(const TDmaChannel& /*aChannel*/)
   215 	{
   216 	// TDmac needs to override this function if it has reported support for
   217 	// channel pausing/resuming.
   218 	return KErrNotSupported;
   219 	}
   220 
   221 
   222 TInt TDmac::ResumeTransfer(const TDmaChannel& /*aChannel*/)
   223 	{
   224 	// TDmac needs to override this function if it has reported support for
   225 	// channel pausing/resuming.
   226 	return KErrNotSupported;
   227 	}
   228 
   229 
   230 TInt TDmac::AllocDesPool(TUint aAttribs)
   231 	{
   232 	// Calling thread must be in CS
   233 	__ASSERT_CRITICAL;
   234 	TInt r;
   235 	if (iCapsHwDes)
   236 		{
   237 		const TInt size = iMaxDesCount * iDesSize;
   238 #ifdef __WINS__
   239 		(void)aAttribs;
   240 		iDesPool = new TUint8[size];
   241 		r = iDesPool ? KErrNone : KErrNoMemory;
   242 #else
   243 		// Chunk not mapped as supervisor r/w user none? incorrect mask passed by PSL
   244 		__DMA_ASSERTD((aAttribs & EMapAttrAccessMask) == EMapAttrSupRw);
   245 		TPhysAddr phys;
   246 		r = Epoc::AllocPhysicalRam(size, phys);
   247 		if (r == KErrNone)
   248 			{
   249 			r = DPlatChunkHw::New(iHwDesChunk, phys, size, aAttribs);
   250 			if (r == KErrNone)
   251 				{
   252 				iDesPool = (TAny*)iHwDesChunk->LinearAddress();
   253 				__KTRACE_OPT(KDMA, Kern::Printf("descriptor hw chunk created lin=0x%08X phys=0x%08X, size=0x%X",
   254 												iHwDesChunk->iLinAddr, iHwDesChunk->iPhysAddr, size));
   255 				}
   256 			else
   257 				Epoc::FreePhysicalRam(phys, size);
   258 			}
   259 #endif
   260 		}
   261 	else
   262 		{
   263 		iDesPool = new TDmaTransferArgs[iMaxDesCount];
   264 		r = iDesPool ? KErrNone : KErrNoMemory;
   265 		}
   266 	return r;
   267 	}
   268 
   269 
   270 void TDmac::FreeDesPool()
   271 	{
   272 	// Calling thread must be in CS
   273 	__ASSERT_CRITICAL;
   274 	if (iCapsHwDes)
   275 		{
   276 #ifdef __WINS__
   277 		delete[] iDesPool;
   278 #else
   279 		if (iHwDesChunk)
   280 			{
   281 			const TPhysAddr phys = iHwDesChunk->PhysicalAddress();
   282 			const TInt size = iHwDesChunk->iSize;
   283 			iHwDesChunk->Close(NULL);
   284 			Epoc::FreePhysicalRam(phys, size);
   285 			}
   286 #endif
   287 		}
   288 	else
   289 		{
   290 		Kern::Free(iDesPool);
   291 		}
   292 	}
   293 
   294 
   295 //
   296 // Prealloc the given number of descriptors.
   297 //
   298 TInt TDmac::ReserveSetOfDes(TInt aCount)
   299 	{
   300 	__KTRACE_OPT(KDMA, Kern::Printf("TDmac::ReserveSetOfDes count=%d", aCount));
   301 	__DMA_ASSERTD(aCount > 0);
   302 	TInt r = KErrTooBig;
   303 	Wait();
   304 	if (iAvailDesCount - aCount >= 0)
   305 		{
   306 		iAvailDesCount -= aCount;
   307 		r = KErrNone;
   308 		}
   309 	Signal();
   310 	__DMA_INVARIANT();
   311 	return r;
   312 	}
   313 
   314 
   315 //
   316 // Return the given number of preallocated descriptors to the free pool.
   317 //
   318 void TDmac::ReleaseSetOfDes(TInt aCount)
   319 	{
   320 	__DMA_ASSERTD(aCount >= 0);
   321 	Wait();
   322 	iAvailDesCount += aCount;
   323 	Signal();
   324 	__DMA_INVARIANT();
   325 	}
   326 
   327 
   328 //
   329 // Queue DFC and update word used to communicate with channel DFC.
   330 //
   331 // Called in interrupt context by PSL.
   332 //
   333 void TDmac::HandleIsr(TDmaChannel& aChannel, TUint aEventMask, TBool aIsComplete)
   334 	{
   335 	__KTRACE_OPT(KDMA, Kern::Printf("TDmac::HandleIsr"));
   336 
   337 	// Function needs to be called by PSL in ISR context
   338 	__DMA_ASSERTD(NKern::CurrentContext() == NKern::EInterrupt);
   339 
   340 	// First the ISR callback stuff
   341 
   342 	// Is this a transfer completion notification?
   343 	if (aEventMask & EDmaCallbackRequestCompletion)
   344 		{
   345 		// If so, has the client requested an ISR callback?
   346 		if (__e32_atomic_load_acq32(&aChannel.iIsrCbRequest))
   347 			{
   348 			__KTRACE_OPT(KDMA, Kern::Printf("ISR callback"));
   349 
   350 			// Since iIsrCbRequest was set no threads will be
   351 			// modifying the request queue.
   352 			const DDmaRequest* const req = _LOFF(aChannel.iReqQ.First(), DDmaRequest, iLink);
   353 
   354 			// We expect the request to have requested
   355 			// ISR callback
   356 			__NK_ASSERT_DEBUG(req->iIsrCb);
   357 
   358 			TDmaCallback const cb = req->iDmaCb;
   359 			TAny* const arg = req->iDmaCbArg;
   360 			// Execute the client callback
   361 			(*cb)(EDmaCallbackRequestCompletion,
   362 				  (aIsComplete ? EDmaResultOK : EDmaResultError),
   363 				  arg,
   364 				  NULL);
   365 			// Now let's see if the callback rescheduled the transfer request
   366 			// (see TDmaChannel::IsrRedoRequest()).
   367 			const TBool redo = aChannel.iRedoRequest;
   368 			aChannel.iRedoRequest = EFalse;
   369 			const TBool stop = __e32_atomic_load_acq32(&aChannel.iIsrDfc) &
   370 				(TUint32)TDmaChannel::KCancelFlagMask;
   371 			// There won't be another ISR callback if this callback didn't
   372 			// reschedule the request, or the client cancelled all requests, or
   373 			// this callback rescheduled the request with a DFC callback.
   374 			if (!redo || stop || !req->iIsrCb)
   375 				{
   376 				__e32_atomic_store_rel32(&aChannel.iIsrCbRequest, EFalse);
   377 				}
   378 			if (redo && !stop)
   379 				{
   380 				// We won't queue the channel DFC in this case and just return.
   381 				__KTRACE_OPT(KDMA, Kern::Printf("CB rescheduled xfer -> no DFC"));
   382 				return;
   383 				}
   384 			// Not redoing or being cancelled means we've been calling the
   385 			// request's ISR callback for the last time. We're going to
   386 			// complete the request via the DFC in the usual way.
   387 			}
   388 		}
   389 
   390 	// Now queue a DFC if necessary. The possible scenarios are:
   391 	// a) DFC not queued (orig == 0)              -> update iIsrDfc + queue DFC
   392 	// b) DFC queued, not running yet (orig != 0) -> just update iIsrDfc
   393 	// c) DFC running / iIsrDfc not reset yet (orig != 0) -> just update iIsrDfc
   394 	// d) DFC running / iIsrDfc already reset (orig == 0) -> update iIsrDfc + requeue DFC
   395 
   396 	// Set error flag if necessary.
   397 	const TUint32 inc = aIsComplete ? 1u : TUint32(TDmaChannel::KErrorFlagMask) | 1u;
   398 
   399 	// Add 'inc' (interrupt count increment + poss. error flag) to 'iIsrDfc' if
   400 	// cancel flag is not set, do nothing otherwise. Assign original value of
   401 	// 'iIsrDfc' to 'orig' in any case.
   402 	const TUint32 orig = __e32_atomic_tau_ord32(&aChannel.iIsrDfc,
   403 												TUint32(TDmaChannel::KCancelFlagMask),
   404 												0,
   405 												inc);
   406 
   407 	// As transfer should be suspended when an error occurs, we
   408 	// should never get there with the error flag already set.
   409 	__DMA_ASSERTD((orig & inc & (TUint32)TDmaChannel::KErrorFlagMask) == 0);
   410 
   411 	if (orig == 0)
   412 		{
   413 		aChannel.iDfc.Add();
   414 		}
   415 	}
   416 
   417 
   418 TInt TDmac::InitDes(const SDmaDesHdr& aHdr, const TDmaTransferArgs& aTransferArgs)
   419 	{
   420 	__KTRACE_OPT(KDMA, Kern::Printf("TDmac::InitDes"));
   421 	TInt r;
   422 	if (iCapsHwDes)
   423 		{
   424 		__KTRACE_OPT(KDMA, Kern::Printf("iCaps.iHwDescriptors"));
   425 		r = InitHwDes(aHdr, aTransferArgs);
   426 		}
   427 	else
   428 		{
   429 		TDmaTransferArgs& args = HdrToDes(aHdr);
   430 		args = aTransferArgs;
   431 		r = KErrNone;
   432 		}
   433 	return r;
   434 	}
   435 
   436 
   437 TInt TDmac::InitHwDes(const SDmaDesHdr& /*aHdr*/, const TDmaTransferArgs& /*aTransferArgs*/)
   438 	{
   439 	// concrete controller must override if SDmacCaps::iHwDescriptors set
   440 	__DMA_CANT_HAPPEN();
   441 	return KErrGeneral;
   442 	}
   443 
   444 
   445 TInt TDmac::InitSrcHwDes(const SDmaDesHdr& /*aHdr*/, const TDmaTransferArgs& /*aTransferArgs*/)
   446 	{
   447 	// concrete controller must override if SDmacCaps::iAsymHwDescriptors set
   448 	__DMA_CANT_HAPPEN();
   449 	return KErrGeneral;
   450 	}
   451 
   452 
   453 TInt TDmac::InitDstHwDes(const SDmaDesHdr& /*aHdr*/, const TDmaTransferArgs& /*aTransferArgs*/)
   454 	{
   455 	// concrete controller must override if SDmacCaps::iAsymHwDescriptors set
   456 	__DMA_CANT_HAPPEN();
   457 	return KErrGeneral;
   458 	}
   459 
   460 
   461 TInt TDmac::UpdateDes(const SDmaDesHdr& aHdr, TUint32 aSrcAddr, TUint32 aDstAddr,
   462 					  TUint aTransferCount, TUint32 aPslRequestInfo)
   463 	{
   464 	__KTRACE_OPT(KDMA, Kern::Printf("TDmac::UpdateDes"));
   465 	TInt r;
   466 	if (iCapsHwDes)
   467 		{
   468 		__KTRACE_OPT(KDMA, Kern::Printf("iCaps.iHwDescriptors"));
   469 		r = UpdateHwDes(aHdr, aSrcAddr, aDstAddr, aTransferCount, aPslRequestInfo);
   470 		}
   471 	else
   472 		{
   473 		TDmaTransferArgs& args = HdrToDes(aHdr);
   474 		if (aSrcAddr != KPhysAddrInvalid)
   475 			args.iSrcConfig.iAddr = aSrcAddr;
   476 		if (aDstAddr != KPhysAddrInvalid)
   477 			args.iDstConfig.iAddr = aDstAddr;
   478 		if (aTransferCount)
   479 			args.iTransferCount = aTransferCount;
   480 		if (aPslRequestInfo)
   481 			args.iPslRequestInfo = aPslRequestInfo;
   482 		r = KErrNone;
   483 		}
   484 	return r;
   485 	}
   486 
   487 
   488 TInt TDmac::UpdateHwDes(const SDmaDesHdr& /*aHdr*/, TUint32 /*aSrcAddr*/, TUint32 /*aDstAddr*/,
   489 						TUint /*aTransferCount*/, TUint32 /*aPslRequestInfo*/)
   490 	{
   491 	// concrete controller must override if SDmacCaps::iHwDescriptors set
   492 	__DMA_CANT_HAPPEN();
   493 	return KErrGeneral;
   494 	}
   495 
   496 
   497 TInt TDmac::UpdateSrcHwDes(const SDmaDesHdr& /*aHdr*/, TUint32 /*aSrcAddr*/,
   498 						   TUint /*aTransferCount*/, TUint32 /*aPslRequestInfo*/)
   499 	{
   500 	// concrete controller must override if SDmacCaps::iAsymHwDescriptors set
   501 	__DMA_CANT_HAPPEN();
   502 	return KErrGeneral;
   503 	}
   504 
   505 
   506 TInt TDmac::UpdateDstHwDes(const SDmaDesHdr& /*aHdr*/, TUint32 /*aDstAddr*/,
   507 						   TUint /*aTransferCount*/, TUint32 /*aPslRequestInfo*/)
   508 	{
   509 	// concrete controller must override if SDmacCaps::iAsymHwDescriptors set
   510 	__DMA_CANT_HAPPEN();
   511 	return KErrGeneral;
   512 	}
   513 
   514 
   515 void TDmac::ChainHwDes(const SDmaDesHdr& /*aHdr*/, const SDmaDesHdr& /*aNextHdr*/)
   516 	{
   517 	// concrete controller must override if SDmacCaps::iHwDescriptors set
   518 	__DMA_CANT_HAPPEN();
   519 	}
   520 
   521 
   522 void TDmac::AppendHwDes(const TDmaChannel& /*aChannel*/, const SDmaDesHdr& /*aLastHdr*/,
   523 						const SDmaDesHdr& /*aNewHdr*/)
   524 	{
   525  	// concrete controller must override if SDmacCaps::iHwDescriptors set
   526 	__DMA_CANT_HAPPEN();
   527 	}
   528 
   529 
   530 void TDmac::AppendHwDes(const TDmaChannel& /*aChannel*/,
   531 						const SDmaDesHdr& /*aSrcLastHdr*/, const SDmaDesHdr& /*aSrcNewHdr*/,
   532 						const SDmaDesHdr& /*aDstLastHdr*/, const SDmaDesHdr& /*aDstNewHdr*/)
   533 	{
   534 	// concrete controller must override if SDmacCaps::iAsymHwDescriptors set
   535 	__DMA_CANT_HAPPEN();
   536 	}
   537 
   538 
   539 void TDmac::UnlinkHwDes(const TDmaChannel& /*aChannel*/, SDmaDesHdr& /*aHdr*/)
   540 	{
   541  	// concrete controller must override if SDmacCaps::iHwDescriptors set
   542 	__DMA_CANT_HAPPEN();
   543 	}
   544 
   545 
   546 void TDmac::ClearHwDes(const SDmaDesHdr& /*aHdr*/)
   547 	{
   548 	// default implementation - NOP; concrete controller may override
   549 	return;
   550 	}
   551 
   552 
   553 TInt TDmac::LinkChannels(TDmaChannel& /*a1stChannel*/, TDmaChannel& /*a2ndChannel*/)
   554 	{
   555 	// default implementation - NOP; concrete controller may override
   556 	return KErrNotSupported;
   557 	}
   558 
   559 
   560 TInt TDmac::UnlinkChannel(TDmaChannel& /*aChannel*/)
   561 	{
   562 	// default implementation - NOP; concrete controller may override
   563 	return KErrNotSupported;
   564 	}
   565 
   566 
   567 TInt TDmac::FailNext(const TDmaChannel& /*aChannel*/)
   568 	{
   569 	// default implementation - NOP; concrete controller may override
   570 	return KErrNotSupported;
   571 	}
   572 
   573 
   574 TInt TDmac::MissNextInterrupts(const TDmaChannel& /*aChannel*/, TInt /*aInterruptCount*/)
   575 	{
   576 	// default implementation - NOP; concrete controller may override
   577 	return KErrNotSupported;
   578 	}
   579 
   580 
   581 TInt TDmac::Extension(TDmaChannel& /*aChannel*/, TInt /*aCmd*/, TAny* /*aArg*/)
   582 	{
   583 	// default implementation - NOP; concrete controller may override
   584 	return KErrNotSupported;
   585 	}
   586 
   587 
   588 TUint32 TDmac::HwDesNumDstElementsTransferred(const SDmaDesHdr& /*aHdr*/)
   589 	{
   590  	// Concrete controller must override if SDmacCaps::iHwDescriptors set.
   591 	__DMA_CANT_HAPPEN();
   592 	return 0;
   593 	}
   594 
   595 
   596 TUint32 TDmac::HwDesNumSrcElementsTransferred(const SDmaDesHdr& /*aHdr*/)
   597 	{
   598  	// Concrete controller must override if SDmacCaps::iHwDescriptors set.
   599 	__DMA_CANT_HAPPEN();
   600 	return 0;
   601 	}
   602 
   603 
   604 #ifdef _DEBUG
   605 
   606 void TDmac::Invariant()
   607 	{
   608 	Wait();
   609 	__DMA_ASSERTD(0 <= iAvailDesCount && iAvailDesCount <= iMaxDesCount);
   610 	__DMA_ASSERTD(! iFreeHdr || IsValidHdr(iFreeHdr));
   611 	for (TInt i = 0; i < iMaxDesCount; i++)
   612 		__DMA_ASSERTD(iHdrPool[i].iNext == NULL || IsValidHdr(iHdrPool[i].iNext));
   613 	Signal();
   614 	}
   615 
   616 
   617 TBool TDmac::IsValidHdr(const SDmaDesHdr* aHdr)
   618 	{
   619 	return (iHdrPool <= aHdr) && (aHdr < iHdrPool + iMaxDesCount);
   620 	}
   621 
   622 #endif
   623 
   624 
   625 
   626 
   627 //
   628 // Internal compat version, used by legacy Fragment()
   629 //
   630 TDmaTransferConfig::TDmaTransferConfig(TUint32 aAddr, TUint aFlags, TBool aAddrInc)
   631 	: iAddr(aAddr),
   632 	  iAddrMode(aAddrInc ? KDmaAddrModePostIncrement : KDmaAddrModeConstant),
   633 	  iElementSize(0),
   634 	  iElementsPerFrame(0),
   635 	  iElementsPerPacket(0),
   636 	  iFramesPerTransfer(0),
   637 	  iElementSkip(0),
   638 	  iFrameSkip(0),
   639 	  iBurstSize(KDmaBurstSizeAny),
   640 	  iFlags(aFlags),
   641 	  iSyncFlags(KDmaSyncAuto),
   642 	  iPslTargetInfo(0),
   643 	  iRepeatCount(0),
   644 	  iDelta(~0u),
   645 	  iReserved(0)
   646 	{
   647 	}
   648 
   649 
   650 
   651 //
   652 // Internal compat version, used by legacy Fragment()
   653 //
   654 TDmaTransferArgs::TDmaTransferArgs(TUint32 aSrc, TUint32 aDest, TInt aCount,
   655 								   TUint aFlags, TUint32 aPslInfo)
   656 	: iSrcConfig(aSrc, RequestFlags2SrcConfigFlags(aFlags), (aFlags & KDmaIncSrc)),
   657 	  iDstConfig(aDest, RequestFlags2DstConfigFlags(aFlags), (aFlags & KDmaIncDest)),
   658 	  iTransferCount(aCount),
   659 	  iGraphicsOps(KDmaGraphicsOpNone),
   660 	  iColour(0),
   661 	  iFlags(0),
   662 	  iChannelPriority(KDmaPriorityNone),
   663 	  iPslRequestInfo(aPslInfo),
   664 	  iDelta(~0u),
   665 	  iReserved1(0),
   666 	  iChannelCookie(0),
   667 	  iReserved2(0)
   668 	{
   669 	}
   670 
   671 
   672 //
   673 // As DDmaRequest is derived from DBase, the initializations with zero aren't
   674 // strictly necessary here, but this way it's nicer.
   675 //
   676 EXPORT_C DDmaRequest::DDmaRequest(TDmaChannel& aChannel, TCallback aCb,
   677 								  TAny* aCbArg, TInt aMaxTransferSize)
   678 	: iChannel(aChannel),
   679 	  iCb(aCb),
   680 	  iCbArg(aCbArg),
   681 	  iDmaCb(NULL),
   682 	  iDmaCbArg(NULL),
   683 	  iIsrCb(EFalse),
   684 	  iDesCount(0),
   685 	  iFirstHdr(NULL),
   686 	  iLastHdr(NULL),
   687 	  iSrcDesCount(0),
   688 	  iSrcFirstHdr(NULL),
   689 	  iSrcLastHdr(NULL),
   690 	  iDstDesCount(0),
   691 	  iDstFirstHdr(NULL),
   692 	  iDstLastHdr(NULL),
   693 	  iQueued(EFalse),
   694 	  iMaxTransferSize(aMaxTransferSize),
   695 	  iTotalNumSrcElementsTransferred(0),
   696 	  iTotalNumDstElementsTransferred(0)
   697 	{
   698 	iChannel.iReqCount++;
   699 	__DMA_ASSERTD(0 <= aMaxTransferSize);
   700 	__DMA_INVARIANT();
   701 	}
   702 
   703 
   704 //
   705 // As DDmaRequest is derived from DBase, the initializations with zero aren't
   706 // strictly necessary here, but this way it's nicer.
   707 //
   708 EXPORT_C DDmaRequest::DDmaRequest(TDmaChannel& aChannel, TDmaCallback aDmaCb,
   709 								  TAny* aCbArg, TUint aMaxTransferSize)
   710 	: iChannel(aChannel),
   711 	  iCb(NULL),
   712 	  iCbArg(NULL),
   713 	  iDmaCb(aDmaCb),
   714 	  iDmaCbArg(aCbArg),
   715 	  iIsrCb(EFalse),
   716 	  iDesCount(0),
   717 	  iFirstHdr(NULL),
   718 	  iLastHdr(NULL),
   719 	  iSrcDesCount(0),
   720 	  iSrcFirstHdr(NULL),
   721 	  iSrcLastHdr(NULL),
   722 	  iDstDesCount(0),
   723 	  iDstFirstHdr(NULL),
   724 	  iDstLastHdr(NULL),
   725 	  iQueued(EFalse),
   726 	  iMaxTransferSize(aMaxTransferSize),
   727 	  iTotalNumSrcElementsTransferred(0),
   728 	  iTotalNumDstElementsTransferred(0)
   729 	{
   730 	__e32_atomic_add_ord32(&iChannel.iReqCount, 1);
   731 	__DMA_INVARIANT();
   732 	}
   733 
   734 
   735 EXPORT_C DDmaRequest::~DDmaRequest()
   736 	{
   737 	__DMA_ASSERTD(!iQueued);
   738 	__DMA_INVARIANT();
   739 	FreeDesList();
   740 	__e32_atomic_add_ord32(&iChannel.iReqCount, TUint32(-1));
   741 	}
   742 
   743 
   744 EXPORT_C TInt DDmaRequest::Fragment(TUint32 aSrc, TUint32 aDest, TInt aCount,
   745 									TUint aFlags, TUint32 aPslInfo)
   746 	{
   747 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::Fragment thread %O "
   748 									"src=0x%08X dest=0x%08X count=%d flags=0x%X psl=0x%08X",
   749 									&Kern::CurrentThread(), aSrc, aDest, aCount, aFlags, aPslInfo));
   750 	__DMA_ASSERTD(aCount > 0);
   751 
   752 	TDmaTransferArgs args(aSrc, aDest, aCount, aFlags, aPslInfo);
   753 
   754 	return Frag(args);
   755 	}
   756 
   757 
   758 EXPORT_C TInt DDmaRequest::Fragment(const TDmaTransferArgs& aTransferArgs)
   759 	{
   760 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::Fragment thread %O", &Kern::CurrentThread()));
   761 
   762 	// Writable temporary working copy of the transfer arguments.
   763 	// We need this because we may have to modify some fields before passing it
   764 	// to the PSL (for example iChannelCookie, iTransferCount,
   765 	// iDstConfig::iAddr, and iSrcConfig::iAddr).
   766 	TDmaTransferArgs args(aTransferArgs);
   767 
   768 	return Frag(args);
   769 	}
   770 
   771 
   772 TUint DDmaRequest::GetTransferCount(const TDmaTransferArgs& aTransferArgs)
   773 	{
   774 	const TDmaTransferConfig& src = aTransferArgs.iSrcConfig;
   775 	const TDmaTransferConfig& dst = aTransferArgs.iDstConfig;
   776 
   777 	TUint count = aTransferArgs.iTransferCount;
   778 	if (count == 0)
   779 		{
   780 		__KTRACE_OPT(KDMA, Kern::Printf("iTransferCount == 0"));
   781 		count = src.iElementSize * src.iElementsPerFrame *
   782 			src.iFramesPerTransfer;
   783 		const TUint dst_cnt = dst.iElementSize * dst.iElementsPerFrame *
   784 			dst.iFramesPerTransfer;
   785 		if (count != dst_cnt)
   786 			{
   787 			__KTRACE_OPT(KPANIC, Kern::Printf("Error: (count != dst_cnt)"));
   788 			return 0;
   789 			}
   790 		}
   791 	else
   792 		{
   793 		__KTRACE_OPT(KDMA, Kern::Printf("iTransferCount == %d", count));
   794 		// Client shouldn't specify contradictory or incomplete things
   795 		if (src.iElementSize != 0)
   796 			{
   797 			if ((count % src.iElementSize) != 0)
   798 				{
   799 				__KTRACE_OPT(KPANIC,
   800 							 Kern::Printf("Error: ((count %% src.iElementSize) != 0)"));
   801 				return 0;
   802 				}
   803 			if (src.iElementsPerFrame != 0)
   804 				{
   805 				if ((src.iElementSize * src.iElementsPerFrame * src.iFramesPerTransfer) != count)
   806 					{
   807 					__KTRACE_OPT(KPANIC,
   808 								 Kern::Printf("Error: ((src.iElementSize * "
   809 											  "src.iElementsPerFrame * "
   810 											  "src.iFramesPerTransfer) != count)"));
   811 					return 0;
   812 					}
   813 				}
   814 			}
   815 		else
   816 			{
   817 			if (src.iElementsPerFrame != 0)
   818 				{
   819 				__KTRACE_OPT(KPANIC,
   820 							 Kern::Printf("Error: (src.iElementsPerFrame != 0)"));
   821 				return 0;
   822 				}
   823 			if (src.iFramesPerTransfer != 0)
   824 				{
   825 				__KTRACE_OPT(KPANIC,
   826 							 Kern::Printf("Error: (src.iFramesPerTransfer != 0)"));
   827 				return 0;
   828 				}
   829 			if (src.iElementsPerPacket != 0)
   830 				{
   831 				__KTRACE_OPT(KPANIC,
   832 							 Kern::Printf("Error: (src.iElementsPerPacket != 0)"));
   833 				return 0;
   834 				}
   835 			}
   836 		if (dst.iElementSize != 0)
   837 			{
   838 			if ((count % dst.iElementSize) != 0)
   839 				{
   840 				__KTRACE_OPT(KPANIC,
   841 							 Kern::Printf("Error: ((count %% dst.iElementSize) != 0)"));
   842 				return 0;
   843 				}
   844 			if (dst.iElementsPerFrame != 0)
   845 				{
   846 				if ((dst.iElementSize * dst.iElementsPerFrame * dst.iFramesPerTransfer) != count)
   847 					{
   848 					__KTRACE_OPT(KPANIC,
   849 								 Kern::Printf("Error: ((dst.iElementSize * "
   850 											  "dst.iElementsPerFrame * "
   851 											  "dst.iFramesPerTransfer) != count)"));
   852 					return 0;
   853 					}
   854 				}
   855 			}
   856 		else
   857 			{
   858 			if (dst.iElementsPerFrame != 0)
   859 				{
   860 				__KTRACE_OPT(KPANIC,
   861 							 Kern::Printf("Error: (dst.iElementsPerFrame != 0)"));
   862 				return 0;
   863 				}
   864 			if (dst.iFramesPerTransfer != 0)
   865 				{
   866 				__KTRACE_OPT(KPANIC,
   867 							 Kern::Printf("Error: (dst.iFramesPerTransfer != 0)"));
   868 				return 0;
   869 				}
   870 			if (dst.iElementsPerPacket != 0)
   871 				{
   872 				__KTRACE_OPT(KPANIC,
   873 							 Kern::Printf("Error: (dst.iElementsPerPacket != 0)"));
   874 				return 0;
   875 				}
   876 			}
   877 		}
   878 	return count;
   879 	}
   880 
   881 
   882 TInt DDmaRequest::Frag(TDmaTransferArgs& aTransferArgs)
   883 	{
   884 	__DMA_ASSERTD(!iQueued);
   885 
   886 	// Transfer count checks
   887 	const TUint count = GetTransferCount(aTransferArgs);
   888 	if (count == 0)
   889 		{
   890 		return KErrArgument;
   891 		}
   892 
   893 	const TDmaTransferConfig& src = aTransferArgs.iSrcConfig;
   894 	const TDmaTransferConfig& dst = aTransferArgs.iDstConfig;
   895 
   896 	// Ask the PSL what the maximum length possible for this transfer is
   897 	TUint max_xfer_len = iChannel.MaxTransferLength(src.iFlags, dst.iFlags,
   898 													aTransferArgs.iPslRequestInfo);
   899 	if (iMaxTransferSize)
   900 		{
   901 		// User has set a size cap
   902 		__KTRACE_OPT(KDMA, Kern::Printf("iMaxTransferSize != 0"));
   903 		__DMA_ASSERTA((iMaxTransferSize <= max_xfer_len) || (max_xfer_len == 0));
   904 		max_xfer_len = iMaxTransferSize;
   905 		}
   906 	else
   907 		{
   908 		// User doesn't care about max size
   909 		if (max_xfer_len == 0)
   910 			{
   911 			// No maximum imposed by controller
   912 			max_xfer_len = count;
   913 			}
   914 		}
   915 
   916 	// ISR callback requested?
   917 	const TBool isr_cb = (aTransferArgs.iFlags & KDmaRequestCallbackFromIsr);
   918 	if (isr_cb)
   919 		{
   920 		// Requesting an ISR callback w/o supplying one?
   921 		if (!iDmaCb)
   922 			{
   923 			return KErrArgument;
   924 			}
   925 		}
   926 
   927 	// Set the channel cookie for the PSL
   928 	aTransferArgs.iChannelCookie = iChannel.PslId();
   929 
   930 	// Now the actual fragmentation
   931 	TInt r;
   932 	if (iChannel.iDmacCaps->iAsymHwDescriptors)
   933 		{
   934 		r = FragAsym(aTransferArgs, count, max_xfer_len);
   935 		}
   936 	else
   937 		{
   938 		r = FragSym(aTransferArgs, count, max_xfer_len);
   939 		}
   940 
   941 	if (r == KErrNone)
   942 		{
   943 		iIsrCb = isr_cb;
   944 		}
   945 
   946 	__DMA_INVARIANT();
   947 	return r;
   948 	};
   949 
   950 
   951 TInt DDmaRequest::FragSym(TDmaTransferArgs& aTransferArgs, TUint aCount,
   952 						  TUint aMaxTransferLen)
   953 	{
   954 	TDmaTransferConfig& src = aTransferArgs.iSrcConfig;
   955 	TDmaTransferConfig& dst = aTransferArgs.iDstConfig;
   956 
   957 	const TBool mem_src = (src.iFlags & KDmaMemAddr);
   958 	const TBool mem_dst = (dst.iFlags & KDmaMemAddr);
   959 
   960 	const TUint align_mask_src = iChannel.AddressAlignMask(src.iFlags,
   961 														   src.iElementSize,
   962 														   aTransferArgs.iPslRequestInfo);
   963 	const TUint align_mask_dst = iChannel.AddressAlignMask(dst.iFlags,
   964 														   dst.iElementSize,
   965 														   aTransferArgs.iPslRequestInfo);
   966 	// Memory buffers must satisfy alignment constraint
   967 	__DMA_ASSERTD(!mem_src || ((src.iAddr & align_mask_src) == 0));
   968 	__DMA_ASSERTD(!mem_dst || ((dst.iAddr & align_mask_dst) == 0));
   969 
   970 	const TUint max_aligned_len = (aMaxTransferLen &
   971 								   ~(Max(align_mask_src, align_mask_dst)));
   972 	// Client and PSL sane?
   973 	__DMA_ASSERTD(max_aligned_len > 0);
   974 
   975 	FreeDesList();			   // revert any previous fragmentation attempt
   976 	TInt r;
   977 	do
   978 		{
   979 		// Allocate fragment
   980 		r = ExpandDesList(/*1*/);
   981 		if (r != KErrNone)
   982 			{
   983 			FreeDesList();
   984 			break;
   985 			}
   986 		// Compute fragment size
   987 		TUint c = Min(aMaxTransferLen, aCount);
   988 		if (mem_src && !(src.iFlags & KDmaPhysAddr))
   989 			{
   990 			__KTRACE_OPT(KDMA, Kern::Printf("mem_src && !(src.iFlags & KDmaPhysAddr)"));
   991 			// @@@ Should also take into account (src.iFlags & KDmaMemIsContiguous)!
   992 			c = MaxPhysSize(src.iAddr, c);
   993 			}
   994 		if (mem_dst && !(dst.iFlags & KDmaPhysAddr))
   995 			{
   996 			__KTRACE_OPT(KDMA, Kern::Printf("mem_dst && !(dst.iFlags & KDmaPhysAddr)"));
   997 			// @@@ Should also take into account (dst.iFlags & KDmaMemIsContiguous)!
   998 			c = MaxPhysSize(dst.iAddr, c);
   999 			}
  1000 		if ((mem_src || mem_dst) && (c < aCount) && (c > max_aligned_len))
  1001 			{
  1002 			// This is not the last fragment of a transfer to/from memory.
  1003 			// We must round down the fragment size so the next one is
  1004 			// correctly aligned.
  1005 			__KTRACE_OPT(KDMA, Kern::Printf("(mem_src || mem_dst) && (c < aCount) && (c > max_aligned_len)"));
  1006 			c = max_aligned_len;
  1007 			}
  1008 
  1009 		// TODO: Make sure an element or frame on neither src or dst side
  1010 		// (which can be of different sizes) never straddles a DMA subtransfer.
  1011 		// (This would be a fragmentation error by the PIL.)
  1012 
  1013 		// Set transfer count for the PSL
  1014 		aTransferArgs.iTransferCount = c;
  1015 		__KTRACE_OPT(KDMA, Kern::Printf("this fragm.: %d (0x%x) total remain.: %d (0x%x)",
  1016 										c, c, aCount, aCount));
  1017 		// Initialise fragment
  1018 		r = iChannel.iController->InitDes(*iLastHdr, aTransferArgs);
  1019 		if (r != KErrNone)
  1020 			{
  1021 			FreeDesList();
  1022 			break;
  1023 			}
  1024 		// Update for next iteration
  1025 		aCount -= c;
  1026 		if (mem_src)
  1027 			src.iAddr += c;
  1028 		if (mem_dst)
  1029 			dst.iAddr += c;
  1030 		}
  1031 	while (aCount > 0);
  1032 
  1033 	return r;
  1034 	}
  1035 
  1036 
  1037 TInt DDmaRequest::FragAsym(TDmaTransferArgs& aTransferArgs, TUint aCount,
  1038 						   TUint aMaxTransferLen)
  1039 	{
  1040 	TInt r = FragAsymSrc(aTransferArgs, aCount, aMaxTransferLen);
  1041 	if (r != KErrNone)
  1042 		{
  1043 		FreeSrcDesList();
  1044 		return r;
  1045 		}
  1046 	r = FragAsymDst(aTransferArgs, aCount, aMaxTransferLen);
  1047 	if (r != KErrNone)
  1048 		{
  1049 		FreeSrcDesList();
  1050 		FreeDstDesList();
  1051 		}
  1052 	return r;
  1053 	}
  1054 
  1055 
  1056 TInt DDmaRequest::FragAsymSrc(TDmaTransferArgs& aTransferArgs, TUint aCount,
  1057 							  TUint aMaxTransferLen)
  1058 	{
  1059 	TDmaTransferConfig& src = aTransferArgs.iSrcConfig;
  1060 
  1061 	const TBool mem_src = (src.iFlags & KDmaMemAddr);
  1062 
  1063 	const TUint align_mask = iChannel.AddressAlignMask(src.iFlags,
  1064 													   src.iElementSize,
  1065 													   aTransferArgs.iPslRequestInfo);
  1066 	// Memory buffers must satisfy alignment constraint
  1067 	__DMA_ASSERTD(!mem_src || ((src.iAddr & align_mask) == 0));
  1068 
  1069 	const TUint max_aligned_len = (aMaxTransferLen & ~align_mask);
  1070 	__DMA_ASSERTD(max_aligned_len > 0);				  // bug in PSL if not true
  1071 
  1072 	FreeSrcDesList();
  1073 	TInt r;
  1074 	do
  1075 		{
  1076 		// Allocate fragment
  1077 		r = ExpandSrcDesList(/*1*/);
  1078 		if (r != KErrNone)
  1079 			{
  1080 			break;
  1081 			}
  1082 		// Compute fragment size
  1083 		TUint c = Min(aMaxTransferLen, aCount);
  1084 		if (mem_src && !(src.iFlags & KDmaPhysAddr))
  1085 			{
  1086 			__KTRACE_OPT(KDMA, Kern::Printf("mem_src && !(src.iFlags & KDmaPhysAddr)"));
  1087 			c = MaxPhysSize(src.iAddr, c);
  1088 			}
  1089 		if (mem_src && (c < aCount) && (c > max_aligned_len))
  1090 			{
  1091 			// This is not the last fragment of a transfer from memory.
  1092 			// We must round down the fragment size so the next one is
  1093 			// correctly aligned.
  1094 			__KTRACE_OPT(KDMA, Kern::Printf("mem_src && (c < aCount) && (c > max_aligned_len)"));
  1095 			c = max_aligned_len;
  1096 			}
  1097 		// Set transfer count for the PSL
  1098 		aTransferArgs.iTransferCount = c;
  1099 		__KTRACE_OPT(KDMA, Kern::Printf("this fragm.: %d (0x%x) total remain.: %d (0x%x)",
  1100 										c, c, aCount, aCount));
  1101 		// Initialise fragment
  1102 		r = iChannel.iController->InitSrcHwDes(*iSrcLastHdr, aTransferArgs);
  1103 		if (r != KErrNone)
  1104 			{
  1105 			break;
  1106 			}
  1107 		// Update for next iteration
  1108 		aCount -= c;
  1109 		if (mem_src)
  1110 			src.iAddr += c;
  1111 		}
  1112 	while (aCount > 0);
  1113 
  1114 	return r;
  1115 	}
  1116 
  1117 
  1118 TInt DDmaRequest::FragAsymDst(TDmaTransferArgs& aTransferArgs, TUint aCount,
  1119 							  TUint aMaxTransferLen)
  1120 	{
  1121 	TDmaTransferConfig& dst = aTransferArgs.iDstConfig;
  1122 
  1123 	const TBool mem_dst = (dst.iFlags & KDmaMemAddr);
  1124 
  1125 	const TUint align_mask = iChannel.AddressAlignMask(dst.iFlags,
  1126 													   dst.iElementSize,
  1127 													   aTransferArgs.iPslRequestInfo);
  1128 	// Memory buffers must satisfy alignment constraint
  1129 	__DMA_ASSERTD(!mem_dst || ((dst.iAddr & align_mask) == 0));
  1130 
  1131 	const TUint max_aligned_len = (aMaxTransferLen & ~align_mask);
  1132 	__DMA_ASSERTD(max_aligned_len > 0);				  // bug in PSL if not true
  1133 
  1134 	FreeDstDesList();
  1135 	TInt r;
  1136 	do
  1137 		{
  1138 		// Allocate fragment
  1139 		r = ExpandDstDesList(/*1*/);
  1140 		if (r != KErrNone)
  1141 			{
  1142 			break;
  1143 			}
  1144 		// Compute fragment size
  1145 		TUint c = Min(aMaxTransferLen, aCount);
  1146 		if (mem_dst && !(dst.iFlags & KDmaPhysAddr))
  1147 			{
  1148 			__KTRACE_OPT(KDMA, Kern::Printf("mem_dst && !(dst.iFlags & KDmaPhysAddr)"));
  1149 			c = MaxPhysSize(dst.iAddr, c);
  1150 			}
  1151 		if (mem_dst && (c < aCount) && (c > max_aligned_len))
  1152 			{
  1153 			// This is not the last fragment of a transfer to memory.
  1154 			// We must round down the fragment size so the next one is
  1155 			// correctly aligned.
  1156 			__KTRACE_OPT(KDMA, Kern::Printf("mem_dst && (c < aCount) && (c > max_aligned_len)"));
  1157 			c = max_aligned_len;
  1158 			}
  1159 		// Set transfer count for the PSL
  1160 		aTransferArgs.iTransferCount = c;
  1161 		__KTRACE_OPT(KDMA, Kern::Printf("this fragm.: %d (0x%x) total remain.: %d (0x%x)",
  1162 										c, c, aCount, aCount));
  1163 		// Initialise fragment
  1164 		r = iChannel.iController->InitDstHwDes(*iDstLastHdr, aTransferArgs);
  1165 		if (r != KErrNone)
  1166 			{
  1167 			break;
  1168 			}
  1169 		// Update for next iteration
  1170 		aCount -= c;
  1171 		if (mem_dst)
  1172 			dst.iAddr += c;
  1173 		}
  1174 	while (aCount > 0);
  1175 
  1176 	return r;
  1177 	}
  1178 
  1179 
  1180 EXPORT_C TInt DDmaRequest::Queue()
  1181 	{
  1182 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::Queue thread %O", &Kern::CurrentThread()));
  1183 	__DMA_ASSERTD(iDesCount > 0);	// Not configured? Call Fragment() first!
  1184 	__DMA_ASSERTD(!iQueued);
  1185 
  1186 	// Append request to queue and link new descriptor list to existing one.
  1187 	iChannel.Wait();
  1188 
  1189 	TInt r = KErrGeneral;
  1190 	const TBool ch_isr_cb = __e32_atomic_load_acq32(&iChannel.iIsrCbRequest);
  1191 	if (ch_isr_cb)
  1192 		{
  1193 		// Client mustn't try to queue any new request while one with an ISR
  1194 		// callback is already queued on this channel. This is to make sure
  1195 		// that the channel's Transfer() function is not called by both the ISR
  1196 		// and the client thread at the same time.
  1197 		__KTRACE_OPT(KPANIC, Kern::Printf("An ISR cb request exists - not queueing"));
  1198 		}
  1199 	else if (iIsrCb && !iChannel.IsQueueEmpty())
  1200 		{
  1201 		// Client mustn't try to queue an ISR callback request whilst any
  1202 		// others are still queued on this channel. This is to make sure that
  1203 		// the ISR callback doesn't get executed together with the DFC(s) of
  1204 		// any previous request(s).
  1205 		__KTRACE_OPT(KPANIC, Kern::Printf("Request queue not empty - not queueing"));
  1206 		}
  1207 	else if (iChannel.iIsrDfc & (TUint32)TDmaChannel::KCancelFlagMask)
  1208 		{
  1209 		__KTRACE_OPT(KPANIC, Kern::Printf("Channel requests cancelled - not queueing"));
  1210 		}
  1211 	else
  1212 		{
  1213 		iQueued = ETrue;
  1214 		iChannel.iReqQ.Add(&iLink);
  1215 		// iChannel.iNullPtr points to iChannel.iCurHdr for an empty queue
  1216 		*iChannel.iNullPtr = iFirstHdr;
  1217 		iChannel.iNullPtr = &(iLastHdr->iNext);
  1218 		if (iIsrCb)
  1219 			{
  1220 			// Since we've made sure that there is no other request in the
  1221 			// queue before this, the only thing of relevance is the channel
  1222 			// DFC which might yet have to complete for the previous request,
  1223 			// and this function might indeed have been called from there via
  1224 			// the client callback. This should be all right though as once
  1225 			// we've set the following flag no further Queue()'s will be
  1226 			// possible.
  1227 			__e32_atomic_store_rel32(&iChannel.iIsrCbRequest, ETrue);
  1228 			}
  1229 		iChannel.DoQueue(const_cast<const DDmaRequest&>(*this));
  1230 		r = KErrNone;
  1231 		}
  1232 	iChannel.Signal();
  1233 
  1234 	__DMA_INVARIANT();
  1235 	return r;
  1236 	}
  1237 
  1238 
  1239 EXPORT_C TInt DDmaRequest::ExpandDesList(TInt aCount)
  1240 	{
  1241 	return ExpandDesList(aCount, iDesCount, iFirstHdr, iLastHdr);
  1242 	}
  1243 
  1244 
  1245 EXPORT_C TInt DDmaRequest::ExpandSrcDesList(TInt aCount)
  1246 	{
  1247 	return ExpandDesList(aCount, iSrcDesCount, iSrcFirstHdr, iSrcLastHdr);
  1248 	}
  1249 
  1250 
  1251 EXPORT_C TInt DDmaRequest::ExpandDstDesList(TInt aCount)
  1252 	{
  1253 	return ExpandDesList(aCount, iDstDesCount, iDstFirstHdr, iDstLastHdr);
  1254 	}
  1255 
  1256 
  1257 TInt DDmaRequest::ExpandDesList(TInt aCount, TInt& aDesCount,
  1258 								SDmaDesHdr*& aFirstHdr,
  1259 								SDmaDesHdr*& aLastHdr)
  1260 	{
  1261 	__DMA_ASSERTD(!iQueued);
  1262 	__DMA_ASSERTD(aCount > 0);
  1263 
  1264 	if (aCount > iChannel.iAvailDesCount)
  1265 		{
  1266 		return KErrTooBig;
  1267 		}
  1268 
  1269 	iChannel.iAvailDesCount -= aCount;
  1270 	aDesCount += aCount;
  1271 
  1272 	TDmac& c = *(iChannel.iController);
  1273 	c.Wait();
  1274 
  1275 	if (aFirstHdr == NULL)
  1276 		{
  1277 		// Handle an empty list specially to simplify the following loop
  1278 		aFirstHdr = aLastHdr = c.iFreeHdr;
  1279 		c.iFreeHdr = c.iFreeHdr->iNext;
  1280 		--aCount;
  1281 		}
  1282 	else
  1283 		{
  1284 		aLastHdr->iNext = c.iFreeHdr;
  1285 		}
  1286 
  1287 	// Remove as many descriptors and headers from the free pool as necessary
  1288 	// and ensure hardware descriptors are chained together.
  1289 	while (aCount-- > 0)
  1290 		{
  1291 		__DMA_ASSERTD(c.iFreeHdr != NULL);
  1292 		if (c.iCapsHwDes)
  1293 			{
  1294 			c.ChainHwDes(*aLastHdr, *(c.iFreeHdr));
  1295 			}
  1296 		aLastHdr = c.iFreeHdr;
  1297 		c.iFreeHdr = c.iFreeHdr->iNext;
  1298 		}
  1299 
  1300 	c.Signal();
  1301 
  1302 	aLastHdr->iNext = NULL;
  1303 
  1304 	__DMA_INVARIANT();
  1305 	return KErrNone;
  1306 	}
  1307 
  1308 
  1309 EXPORT_C void DDmaRequest::FreeDesList()
  1310 	{
  1311 	FreeDesList(iDesCount, iFirstHdr, iLastHdr);
  1312 	}
  1313 
  1314 
  1315 EXPORT_C void DDmaRequest::FreeSrcDesList()
  1316 	{
  1317 	FreeDesList(iSrcDesCount, iSrcFirstHdr, iSrcLastHdr);
  1318 	}
  1319 
  1320 
  1321 EXPORT_C void DDmaRequest::FreeDstDesList()
  1322 	{
  1323 	FreeDesList(iDstDesCount, iDstFirstHdr, iDstLastHdr);
  1324 	}
  1325 
  1326 
  1327 void DDmaRequest::FreeDesList(TInt& aDesCount, SDmaDesHdr*& aFirstHdr, SDmaDesHdr*& aLastHdr)
  1328 	{
  1329 	__DMA_ASSERTD(!iQueued);
  1330 
  1331 	if (aDesCount > 0)
  1332 		{
  1333 		iChannel.iAvailDesCount += aDesCount;
  1334 		TDmac& c = *(iChannel.iController);
  1335 		const SDmaDesHdr* hdr = aFirstHdr;
  1336 		while (hdr)
  1337 			{
  1338 			c.ClearHwDes(*hdr);
  1339 			hdr = hdr->iNext;
  1340 			};
  1341 		c.Wait();
  1342 		aLastHdr->iNext = c.iFreeHdr;
  1343 		c.iFreeHdr = aFirstHdr;
  1344 		c.Signal();
  1345 		aFirstHdr = aLastHdr = NULL;
  1346 		aDesCount = 0;
  1347 		}
  1348 	}
  1349 
  1350 
  1351 EXPORT_C void DDmaRequest::EnableSrcElementCounting(TBool /*aResetElementCount*/)
  1352 	{
  1353 	// Not yet implemented.
  1354 	return;
  1355 	}
  1356 
  1357 
  1358 EXPORT_C void DDmaRequest::EnableDstElementCounting(TBool /*aResetElementCount*/)
  1359 	{
  1360 	// Not yet implemented.
  1361 	return;
  1362 	}
  1363 
  1364 
  1365 EXPORT_C void DDmaRequest::DisableSrcElementCounting()
  1366 	{
  1367 	// Not yet implemented.
  1368 	return;
  1369 	}
  1370 
  1371 
  1372 EXPORT_C void DDmaRequest::DisableDstElementCounting()
  1373 	{
  1374 	// Not yet implemented.
  1375 	return;
  1376 	}
  1377 
  1378 
  1379 EXPORT_C TUint32 DDmaRequest::TotalNumSrcElementsTransferred()
  1380 	{
  1381 	// Not yet implemented.
  1382 
  1383 	// So far largely bogus code (just to touch some symbols)...
  1384 	iTotalNumSrcElementsTransferred = 0;
  1385 	TDmac& c = *(iChannel.iController);
  1386 	if (c.iCapsHwDes)
  1387 		{
  1388 		for (const SDmaDesHdr* pH = iFirstHdr; pH != NULL; pH = pH->iNext)
  1389 			{
  1390 			iTotalNumSrcElementsTransferred += c.HwDesNumDstElementsTransferred(*pH);
  1391 			}
  1392 		}
  1393 	else
  1394 		{
  1395 		// Do something different for pseudo descriptors...
  1396 		}
  1397 	return iTotalNumSrcElementsTransferred;
  1398 	}
  1399 
  1400 
  1401 EXPORT_C TUint32 DDmaRequest::TotalNumDstElementsTransferred()
  1402 	{
  1403 	// Not yet implemented.
  1404 	return iTotalNumDstElementsTransferred;
  1405 	}
  1406 
  1407 
  1408 EXPORT_C TInt DDmaRequest::FragmentCount()
  1409 	{
  1410 	return FragmentCount(iFirstHdr);
  1411 	}
  1412 
  1413 
  1414 EXPORT_C TInt DDmaRequest::SrcFragmentCount()
  1415 	{
  1416 	return FragmentCount(iSrcFirstHdr);
  1417 	}
  1418 
  1419 
  1420 EXPORT_C TInt DDmaRequest::DstFragmentCount()
  1421 	{
  1422 	return FragmentCount(iDstFirstHdr);
  1423 	}
  1424 
  1425 
  1426 TInt DDmaRequest::FragmentCount(const SDmaDesHdr* aHdr)
  1427 	{
  1428 	TInt count = 0;
  1429 	for (const SDmaDesHdr* pH = aHdr; pH != NULL; pH = pH->iNext)
  1430 		{
  1431 		count++;
  1432 		}
  1433 	return count;
  1434 	}
  1435 
  1436 
  1437 //
  1438 // Called when request is removed from request queue in channel
  1439 //
  1440 inline void DDmaRequest::OnDeque()
  1441 	{
  1442 	iQueued = EFalse;
  1443 	iLastHdr->iNext = NULL;
  1444 	iChannel.DoUnlink(*iLastHdr);
  1445 	}
  1446 
  1447 
  1448 #ifdef _DEBUG
  1449 void DDmaRequest::Invariant()
  1450 	{
  1451 	iChannel.Wait();
  1452 	__DMA_ASSERTD(LOGICAL_XOR(iCb, iDmaCb));
  1453 	if (iChannel.iDmacCaps->iAsymHwDescriptors)
  1454 		{
  1455 		__DMA_ASSERTD((0 <= iSrcDesCount) && (iSrcDesCount <= iChannel.iMaxDesCount) &&
  1456 					  (0 <= iDstDesCount) && (iDstDesCount <= iChannel.iMaxDesCount));
  1457 		if (iSrcDesCount == 0)
  1458 			{
  1459 			__DMA_ASSERTD(iDstDesCount == 0);
  1460 			__DMA_ASSERTD(!iQueued);
  1461 			__DMA_ASSERTD(!iSrcFirstHdr && !iSrcLastHdr &&
  1462 						  !iDstFirstHdr && !iDstLastHdr);
  1463 			}
  1464 		else
  1465 			{
  1466 			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iSrcFirstHdr));
  1467 			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iSrcLastHdr));
  1468 			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iDstFirstHdr));
  1469 			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iDstLastHdr));
  1470 			}
  1471 		}
  1472 	else
  1473 		{
  1474 		__DMA_ASSERTD((0 <= iDesCount) && (iDesCount <= iChannel.iMaxDesCount));
  1475 		if (iDesCount == 0)
  1476 			{
  1477 			__DMA_ASSERTD(!iQueued);
  1478 			__DMA_ASSERTD(!iFirstHdr && !iLastHdr);
  1479 			}
  1480 		else
  1481 			{
  1482 			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iFirstHdr));
  1483 			__DMA_ASSERTD(iChannel.iController->IsValidHdr(iLastHdr));
  1484 			}
  1485 		}
  1486 	iChannel.Signal();
  1487 	}
  1488 #endif
  1489 
  1490 
  1491 //////////////////////////////////////////////////////////////////////////////
  1492 // TDmaChannel
  1493 
  1494 _LIT(KDmaChannelMutex, "DMA-Channel");
  1495 
  1496 TDmaChannel::TDmaChannel()
  1497 	: iController(NULL),
  1498 	  iDmacCaps(NULL),
  1499 	  iPslId(0),
  1500 	  iDynChannel(EFalse),
  1501 	  iPriority(KDmaPriorityNone),
  1502 	  iCurHdr(NULL),
  1503 	  iNullPtr(&iCurHdr),
  1504 	  iDfc(Dfc, NULL, 0),
  1505 	  iMaxDesCount(0),
  1506 	  iAvailDesCount(0),
  1507 	  iIsrDfc(0),
  1508 	  iReqQ(),
  1509 	  iReqCount(0),
  1510 	  iCancelInfo(NULL),
  1511 	  iRedoRequest(EFalse),
  1512 	  iIsrCbRequest(EFalse)
  1513 	{
  1514 	const TInt r = Kern::MutexCreate(iMutex, KDmaChannelMutex, KMutexOrdDmaChannel);
  1515 	__DMA_ASSERTA(r == KErrNone);
  1516 
  1517 #ifndef __WINS__
  1518 	// On the emulator this code is called from within the codeseg mutex.
  1519 	// The invariant tries to hold the dma channel mutex, but this is not allowed
  1520 	__DMA_INVARIANT();
  1521 #endif
  1522 	}
  1523 
  1524 
  1525 TDmaChannel::~TDmaChannel()
  1526 	{
  1527 	Kern::SafeClose((DObject*&)iMutex, NULL);
  1528 	}
  1529 
  1530 
  1531 //
  1532 // static member function
  1533 //
  1534 EXPORT_C TInt TDmaChannel::Open(const SCreateInfo& aInfo, TDmaChannel*& aChannel)
  1535 	{
  1536 	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::Open thread %O", &Kern::CurrentThread()));
  1537 
  1538 	__DMA_ASSERTD(aInfo.iDesCount >= 1);
  1539 	__DMA_ASSERTD(aInfo.iPriority <= KDmaPriority8);
  1540 	__DMA_ASSERTD(aInfo.iDfcQ != NULL);
  1541 	__DMA_ASSERTD(aInfo.iDfcPriority < KNumDfcPriorities);
  1542 
  1543 	aChannel = NULL;
  1544 
  1545 	DmaChannelMgr::Wait();
  1546 	TDmaChannel* pC = DmaChannelMgr::Open(aInfo.iCookie, aInfo.iDynChannel, aInfo.iPriority);
  1547 	DmaChannelMgr::Signal();
  1548 	if (!pC)
  1549 		{
  1550 		return KErrInUse;
  1551 		}
  1552 	__DMA_ASSERTD(pC->iController != NULL);
  1553 	__DMA_ASSERTD(pC->iDmacCaps != NULL);
  1554 	__DMA_ASSERTD(pC->iController->iCapsHwDes == pC->DmacCaps().iHwDescriptors);
  1555 	// PSL needs to set iDynChannel if and only if dynamic channel was requested
  1556 	__DMA_ASSERTD(!LOGICAL_XOR(aInfo.iDynChannel, pC->iDynChannel));
  1557 
  1558 	const TInt r = pC->iController->ReserveSetOfDes(aInfo.iDesCount);
  1559 	if (r != KErrNone)
  1560 		{
  1561 		pC->Close();
  1562 		return r;
  1563 		}
  1564 	pC->iAvailDesCount = pC->iMaxDesCount = aInfo.iDesCount;
  1565 
  1566 	new (&pC->iDfc) TDfc(&Dfc, pC, aInfo.iDfcQ, aInfo.iDfcPriority);
  1567 
  1568 	aChannel = pC;
  1569 
  1570 #ifdef _DEBUG
  1571 	pC->Invariant();
  1572 #endif
  1573 	__KTRACE_OPT(KDMA, Kern::Printf("opened channel %d", pC->iPslId));
  1574 	return KErrNone;
  1575 	}
  1576 
  1577 
  1578 EXPORT_C void TDmaChannel::Close()
  1579 	{
  1580 	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::Close %d iReqCount=%d", iPslId, iReqCount));
  1581 	__DMA_ASSERTD(IsQueueEmpty());
  1582 	__DMA_ASSERTD(iReqCount == 0);
  1583 
  1584 	// Descriptor leak? -> bug in request code
  1585 	__DMA_ASSERTD(iAvailDesCount == iMaxDesCount);
  1586 
  1587 	__DMA_ASSERTD(!iRedoRequest);
  1588 	__DMA_ASSERTD(!iIsrCbRequest);
  1589 
  1590 	iController->ReleaseSetOfDes(iMaxDesCount);
  1591 	iAvailDesCount = iMaxDesCount = 0;
  1592 
  1593 	DmaChannelMgr::Wait();
  1594 	DmaChannelMgr::Close(this);
  1595 	// The following assignment will be removed once IsOpened() has been
  1596 	// removed. That's because 'this' shouldn't be touched any more once
  1597 	// Close() has returned from the PSL.
  1598 	iController = NULL;
  1599 	DmaChannelMgr::Signal();
  1600 	}
  1601 
  1602 
  1603 EXPORT_C TInt TDmaChannel::LinkToChannel(TDmaChannel* aChannel)
  1604 	{
  1605 	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::LinkToChannel thread %O",
  1606 									&Kern::CurrentThread()));
  1607 	if (aChannel)
  1608 		{
  1609 		return iController->LinkChannels(*this, *aChannel);
  1610 		}
  1611 	else
  1612 		{
  1613 		return iController->UnlinkChannel(*this);
  1614 		}
  1615 	}
  1616 
  1617 
  1618 EXPORT_C TInt TDmaChannel::Pause()
  1619 	{
  1620 	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::Pause thread %O",
  1621 									&Kern::CurrentThread()));
  1622 	return iController->PauseTransfer(*this);
  1623 	}
  1624 
  1625 
  1626 EXPORT_C TInt TDmaChannel::Resume()
  1627 	{
  1628 	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::Resume thread %O",
  1629 									&Kern::CurrentThread()));
  1630 	return iController->ResumeTransfer(*this);
  1631 	}
  1632 
  1633 
  1634 EXPORT_C void TDmaChannel::CancelAll()
  1635 	{
  1636 	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::CancelAll thread %O channel - %d",
  1637 									&Kern::CurrentThread(), iPslId));
  1638 	NThread* const nt = NKern::CurrentThread();
  1639 	TBool wait = EFalse;
  1640 	TDmaCancelInfo cancelinfo;
  1641 	TDmaCancelInfo* waiters = NULL;
  1642 
  1643 	NKern::ThreadEnterCS();
  1644 	Wait();
  1645 
  1646 	NThreadBase* const dfc_nt = iDfc.Thread();
  1647 	// Shouldn't be NULL (i.e. an IDFC)
  1648 	__DMA_ASSERTD(dfc_nt);
  1649 
  1650 	__e32_atomic_store_ord32(&iIsrDfc, (TUint32)KCancelFlagMask);
  1651 	// ISRs after this point will not post a DFC, however a DFC may already be
  1652 	// queued or running or both.
  1653 	if (!IsQueueEmpty())
  1654 		{
  1655 		// There is a transfer in progress. It may complete before the DMAC
  1656 		// has stopped, but the resulting ISR will not post a DFC.
  1657 		// ISR should not happen after this function returns.
  1658 		iController->StopTransfer(*this);
  1659 
  1660 		ResetStateMachine();
  1661 
  1662 		// Clean-up the request queue.
  1663 		SDblQueLink* pL;
  1664 		while ((pL = iReqQ.GetFirst()) != NULL)
  1665 			{
  1666 			DDmaRequest* pR = _LOFF(pL, DDmaRequest, iLink);
  1667 			pR->OnDeque();
  1668 			}
  1669 		}
  1670 	if (dfc_nt == nt)
  1671 		{
  1672 		// DFC runs in this thread, so just cancel it and we're finished
  1673 		iDfc.Cancel();
  1674 
  1675 		// If other calls to CancelAll() are waiting for the DFC, release them here
  1676 		waiters = iCancelInfo;
  1677 		iCancelInfo = NULL;
  1678 
  1679 		// Reset the ISR count
  1680 		__e32_atomic_store_rel32(&iIsrDfc, 0);
  1681 		}
  1682 	else
  1683 		{
  1684 		// DFC runs in another thread. Make sure it's queued and then wait for it to run.
  1685 		if (iCancelInfo)
  1686 			{
  1687 			// Insert cancelinfo into the list so that it precedes iCancelInfo
  1688 			cancelinfo.InsertBefore(iCancelInfo);
  1689 			}
  1690 		else
  1691 			{
  1692 			iCancelInfo = &cancelinfo;
  1693 			}
  1694 		wait = ETrue;
  1695 		iDfc.Enque();
  1696 		}
  1697 
  1698 	Signal();
  1699 
  1700 	if (waiters)
  1701 		{
  1702 		waiters->Signal();
  1703 		}
  1704 	else if (wait)
  1705 		{
  1706 		NKern::FSWait(&cancelinfo.iSem);
  1707 		}
  1708 
  1709  	NKern::ThreadLeaveCS();
  1710 	__DMA_INVARIANT();
  1711 	}
  1712 
  1713 
  1714 EXPORT_C TInt TDmaChannel::IsrRedoRequest(TUint32 aSrcAddr, TUint32 aDstAddr,
  1715 										  TUint aTransferCount,
  1716 										  TUint32 aPslRequestInfo,
  1717 										  TBool aIsrCb)
  1718 	{
  1719 	__KTRACE_OPT(KDMA,
  1720 				 Kern::Printf("TDmaChannel::IsrRedoRequest src=0x%08x, "
  1721 							  "dst=0x%08x, count=%d, pslInfo=0x%08x, isrCb=%d",
  1722 							  aSrcAddr, aDstAddr, aTransferCount, aPslRequestInfo,
  1723 							  aIsrCb));
  1724 	// Function needs to be called in ISR context.
  1725 	__DMA_ASSERTD(NKern::CurrentContext() == NKern::EInterrupt);
  1726 
  1727 	__DMA_ASSERTD(!iReqQ.IsEmpty());
  1728 	__DMA_ASSERTD(iIsrCbRequest);
  1729 
  1730 #ifdef _DEBUG
  1731 	if ((aSrcAddr != KPhysAddrInvalid) && (aSrcAddr == aDstAddr))
  1732 		{
  1733 		__KTRACE_OPT(KPANIC,
  1734 					 Kern::Printf("Error: Updating src & dst to same address: 0x%08x",
  1735 								  aSrcAddr));
  1736 		return KErrArgument;
  1737 		}
  1738 #endif
  1739 
  1740 	// We assume here that the just completed request is the first one in the
  1741 	// queue, i.e. that even if there is more than one request in the queue,
  1742 	// their respective last and first (hw) descriptors are *not* linked.
  1743 	// (Although that's what apparently happens in TDmaSgChannel::DoQueue() /
  1744 	// TDmac::AppendHwDes() @@@).
  1745 	DDmaRequest* const pCurReq = _LOFF(iReqQ.First(), DDmaRequest, iLink);
  1746 	TInt r;
  1747 
  1748 	if (iDmacCaps->iAsymHwDescriptors)
  1749 		{
  1750 		// We don't allow multiple-descriptor chains to be updated here
  1751 		__DMA_ASSERTD((pCurReq->iSrcDesCount == 1) && (pCurReq->iDstDesCount == 1));
  1752 		// Adjust parameters if necessary (asymmetrical s/g variety)
  1753 		const SDmaDesHdr* const pSrcFirstHdr = pCurReq->iSrcFirstHdr;
  1754 		if ((aSrcAddr != KPhysAddrInvalid) || aTransferCount || aPslRequestInfo)
  1755 			{
  1756 			r = iController->UpdateSrcHwDes(*pSrcFirstHdr, aSrcAddr,
  1757 											aTransferCount, aPslRequestInfo);
  1758 			if (r != KErrNone)
  1759 				{
  1760 				__KTRACE_OPT(KPANIC, Kern::Printf("Src descriptor updating failed in PSL"));
  1761 				return r;
  1762 				}
  1763 			}
  1764 		const SDmaDesHdr* const pDstFirstHdr = pCurReq->iDstFirstHdr;
  1765 		if ((aDstAddr != KPhysAddrInvalid) || aTransferCount || aPslRequestInfo)
  1766 			{
  1767 			r = iController->UpdateDstHwDes(*pDstFirstHdr, aSrcAddr,
  1768 											aTransferCount, aPslRequestInfo);
  1769 			if (r != KErrNone)
  1770 				{
  1771 				__KTRACE_OPT(KPANIC, Kern::Printf("Dst descriptor updating failed in PSL"));
  1772 				return r;
  1773 				}
  1774 			}
  1775 		// Reschedule the request
  1776 		iController->Transfer(*this, *pSrcFirstHdr, *pDstFirstHdr);
  1777 		}
  1778 	else
  1779 		{
  1780 		// We don't allow multiple-descriptor chains to be updated here
  1781 		__DMA_ASSERTD(pCurReq->iDesCount == 1);
  1782 		// Adjust parameters if necessary (symmetrical s/g and non-s/g variety)
  1783 		const SDmaDesHdr* const pFirstHdr = pCurReq->iFirstHdr;
  1784 		if ((aSrcAddr != KPhysAddrInvalid) || (aDstAddr != KPhysAddrInvalid) ||
  1785 			aTransferCount || aPslRequestInfo)
  1786 			{
  1787 			r = iController->UpdateDes(*pFirstHdr, aSrcAddr, aDstAddr,
  1788 									   aTransferCount, aPslRequestInfo);
  1789 			if (r != KErrNone)
  1790 				{
  1791 				__KTRACE_OPT(KPANIC, Kern::Printf("Descriptor updating failed"));
  1792 				return r;
  1793 				}
  1794 			}
  1795 		// Reschedule the request
  1796 		iController->Transfer(*this, *pFirstHdr);
  1797 		}
  1798 
  1799 	if (!aIsrCb)
  1800 		{
  1801 		// Not another ISR callback please
  1802 		pCurReq->iIsrCb = aIsrCb;
  1803 		}
  1804 	iRedoRequest = ETrue;
  1805 
  1806 	return KErrNone;
  1807 	}
  1808 
  1809 
  1810 EXPORT_C TInt TDmaChannel::FailNext(TInt /*aFragmentCount*/)
  1811 	{
  1812 	return iController->FailNext(*this);
  1813 	}
  1814 
  1815 
  1816 EXPORT_C TInt TDmaChannel::MissNextInterrupts(TInt aInterruptCount)
  1817 	{
  1818 	return iController->MissNextInterrupts(*this, aInterruptCount);
  1819 	}
  1820 
  1821 
  1822 EXPORT_C TInt TDmaChannel::Extension(TInt aCmd, TAny* aArg)
  1823 	{
  1824 	return iController->Extension(*this, aCmd, aArg);
  1825 	}
  1826 
  1827 
  1828 //
  1829 // static member function
  1830 //
  1831 EXPORT_C TInt TDmaChannel::StaticExtension(TInt aCmd, TAny* aArg)
  1832 	{
  1833 	return DmaChannelMgr::StaticExtension(aCmd, aArg);
  1834 	}
  1835 
  1836 
  1837 EXPORT_C TUint TDmaChannel::MaxTransferLength(TUint aSrcFlags, TUint aDstFlags,
  1838 											  TUint32 aPslInfo)
  1839 	{
  1840 	return iController->MaxTransferLength(*this, aSrcFlags, aDstFlags, aPslInfo);
  1841 	}
  1842 
  1843 
  1844 EXPORT_C TUint TDmaChannel::AddressAlignMask(TUint aTargetFlags, TUint aElementSize,
  1845 											 TUint32 aPslInfo)
  1846 	{
  1847 	return iController->AddressAlignMask(*this, aTargetFlags, aElementSize, aPslInfo);
  1848 	}
  1849 
  1850 
  1851 EXPORT_C const SDmacCaps& TDmaChannel::DmacCaps()
  1852 	{
  1853 	return *iDmacCaps;
  1854 	}
  1855 
  1856 
  1857 //
  1858 // DFC callback function (static member).
  1859 //
  1860 void TDmaChannel::Dfc(TAny* aArg)
  1861 	{
  1862 	static_cast<TDmaChannel*>(aArg)->DoDfc();
  1863 	}
  1864 
  1865 
  1866 //
  1867 // This is quite a long function, but what can you do...
  1868 //
  1869 void TDmaChannel::DoDfc()
  1870 	{
  1871 	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::DoDfc thread %O channel - %d",
  1872 									&Kern::CurrentThread(), iPslId));
  1873 	Wait();
  1874 
  1875 	// Atomically fetch and reset the number of DFCs queued by the ISR and the
  1876 	// error flag. Leave the cancel flag alone for now.
  1877 	const TUint32 w = __e32_atomic_and_ord32(&iIsrDfc, (TUint32)KCancelFlagMask);
  1878 	TUint32 count = w & KDfcCountMask;
  1879 	const TBool error = w & (TUint32)KErrorFlagMask;
  1880 	TBool stop = w & (TUint32)KCancelFlagMask;
  1881 	__DMA_ASSERTD((count > 0) || stop);
  1882 
  1883 	__DMA_ASSERTD(!iRedoRequest); // We shouldn't be here if this is true
  1884 
  1885 	while (count && !stop)
  1886 		{
  1887 		--count;
  1888 
  1889 		__DMA_ASSERTD(!iReqQ.IsEmpty());
  1890 
  1891 		// If an error occurred it must have been reported on the last
  1892 		// interrupt since transfers are suspended after an error.
  1893 		DDmaRequest::TResult const res = (count == 0 && error) ?
  1894 			DDmaRequest::EError : DDmaRequest::EOk;
  1895 		DDmaRequest* pCompletedReq = NULL;
  1896 		DDmaRequest* const pCurReq = _LOFF(iReqQ.First(), DDmaRequest, iLink);
  1897 
  1898 		if (res == DDmaRequest::EOk)
  1899 			{
  1900 			// Update state machine, current fragment, completed fragment and
  1901 			// tell the DMAC to transfer the next fragment if necessary.
  1902 			SDmaDesHdr* pCompletedHdr = NULL;
  1903 			DoDfc(const_cast<const DDmaRequest&>(*pCurReq), pCompletedHdr);
  1904 
  1905 			// If just completed last fragment from current request, switch to
  1906 			// next request (if any).
  1907 			if (pCompletedHdr == pCurReq->iLastHdr)
  1908 				{
  1909 				pCompletedReq = pCurReq;
  1910 				pCurReq->iLink.Deque();
  1911 				if (iReqQ.IsEmpty())
  1912 					iNullPtr = &iCurHdr;
  1913 				pCompletedReq->OnDeque();
  1914 				}
  1915 			}
  1916 		else
  1917 			{
  1918 			pCompletedReq = pCurReq;
  1919 			}
  1920 
  1921 		if (pCompletedReq && !pCompletedReq->iIsrCb)
  1922 			{
  1923 			// Don't execute ISR callbacks here (they have already been called)
  1924 			DDmaRequest::TCallback const cb = pCompletedReq->iCb;
  1925 			if (cb)
  1926 				{
  1927 				// Old style callback
  1928 				TAny* const arg = pCompletedReq->iCbArg;
  1929 				Signal();
  1930 				__KTRACE_OPT(KDMA, Kern::Printf("Client CB res=%d", res));
  1931 				(*cb)(res, arg);
  1932 				Wait();
  1933 				}
  1934 			else
  1935 				{
  1936 				// New style callback
  1937 				TDmaCallback const ncb = pCompletedReq->iDmaCb;
  1938 				if (ncb)
  1939 					{
  1940 					TAny* const arg = pCompletedReq->iDmaCbArg;
  1941 					TDmaResult const result = (res == DDmaRequest::EOk) ?
  1942 						EDmaResultOK : EDmaResultError;
  1943 					Signal();
  1944 					__KTRACE_OPT(KDMA, Kern::Printf("Client CB result=%d", result));
  1945 					(*ncb)(EDmaCallbackRequestCompletion, result, arg, NULL);
  1946 					Wait();
  1947 					}
  1948 				}
  1949 			}
  1950 		else
  1951 			{
  1952 			// Allow another thread in, in case they are trying to cancel
  1953 			Flash();
  1954 			}
  1955 		stop = __e32_atomic_load_acq32(&iIsrDfc) & (TUint32)KCancelFlagMask;
  1956 		}
  1957 
  1958 	// Some interrupts may be missed (double-buffer and scatter-gather
  1959 	// controllers only) if two or more transfers complete while interrupts are
  1960 	// disabled in the CPU. If this happens, the framework will go out of sync
  1961 	// and leave some orphaned requests in the queue.
  1962 	//
  1963 	// To ensure correctness we handle this case here by checking that the request
  1964 	// queue is empty when all transfers have completed and, if not, cleaning up
  1965 	// and notifying the client of the completion of the orphaned requests.
  1966 	//
  1967 	// Note that if some interrupts are missed and the controller raises an
  1968 	// error while transferring a subsequent fragment, the error will be reported
  1969 	// on a fragment which was successfully completed.  There is no easy solution
  1970 	// to this problem, but this is okay as the only possible action following a
  1971 	// failure is to flush the whole queue.
  1972 	if (stop)
  1973 		{
  1974 		// If another thread set the cancel flag, it should have
  1975 		// cleaned up the request queue
  1976 		__DMA_ASSERTD(IsQueueEmpty());
  1977 
  1978 		TDmaCancelInfo* const waiters = iCancelInfo;
  1979 		iCancelInfo = NULL;
  1980 
  1981 		// make sure DFC doesn't run again until a new request completes
  1982 		iDfc.Cancel();
  1983 
  1984 		// reset the ISR count - new requests can now be processed
  1985 		__e32_atomic_store_rel32(&iIsrDfc, 0);
  1986 
  1987 		Signal();
  1988 
  1989 		// release threads doing CancelAll()
  1990 		waiters->Signal();
  1991 		}
  1992 	else if (!error && !iReqQ.IsEmpty() && iController->IsIdle(*this))
  1993 		{
  1994 #ifdef __SMP__
  1995 		// On an SMP system we must call stop transfer, it will block until
  1996 		// any ISRs have completed so that the system does not spuriously
  1997 		// attempt to recover from a missed interrupt.
  1998 		//
  1999 		// On an SMP system it is possible for the code here to execute
  2000 		// concurrently with the DMA ISR. It is therefore possible that at this
  2001 		// point the previous transfer has already completed (so that IsIdle
  2002 		// reports true), but that the ISR has not yet queued a DFC. Therefore
  2003 		// we must wait for the ISR to complete.
  2004 		//
  2005 		// StopTransfer should have no other side effect, given that the
  2006 		// channel is already idle.
  2007 		iController->StopTransfer(*this); // should block till ISR completion
  2008 #endif
  2009 
  2010 		const TBool cleanup = !iDfc.Queued();
  2011 		if(cleanup)
  2012 			{
  2013 			__KTRACE_OPT(KDMA, Kern::Printf("Missed interrupt(s) - draining request queue"));
  2014 			ResetStateMachine();
  2015 
  2016 			// Move orphaned requests to temporary queue so channel queue can
  2017 			// accept new requests.
  2018 			SDblQue q;
  2019 			q.MoveFrom(&iReqQ);
  2020 
  2021 			SDblQueLink* pL;
  2022 			while ((pL = q.GetFirst()) != NULL)
  2023 				{
  2024 				DDmaRequest* const pR = _LOFF(pL, DDmaRequest, iLink);
  2025 				__KTRACE_OPT(KDMA, Kern::Printf("Removing request from queue and notifying client"));
  2026 				pR->OnDeque();
  2027 				// Old style callback
  2028 				DDmaRequest::TCallback const cb = pR->iCb;
  2029 				if (cb)
  2030 					{
  2031 					TAny* const arg = pR->iCbArg;
  2032 					Signal();
  2033 					(*cb)(DDmaRequest::EOk, arg);
  2034 					Wait();
  2035 					}
  2036 				else
  2037 					{
  2038 					// New style callback
  2039 					TDmaCallback const ncb = pR->iDmaCb;
  2040 					if (ncb)
  2041 						{
  2042 						TAny* const arg = pR->iDmaCbArg;
  2043 						Signal();
  2044 						(*ncb)(EDmaCallbackRequestCompletion, EDmaResultOK, arg, NULL);
  2045 						Wait();
  2046 						}
  2047 					}
  2048 				}
  2049 			}
  2050 		Signal();
  2051 		}
  2052 	else
  2053 		Signal();
  2054 
  2055 	__DMA_INVARIANT();
  2056 	}
  2057 
  2058 
  2059 //
  2060 // Reset state machine only, request queue is unchanged */
  2061 //
  2062 void TDmaChannel::ResetStateMachine()
  2063 	{
  2064 	DoCancelAll();
  2065 	iCurHdr = NULL;
  2066 	iNullPtr = &iCurHdr;
  2067 	}
  2068 
  2069 
  2070 void TDmaChannel::DoQueue(const DDmaRequest& /*aReq*/)
  2071 	{
  2072 	// Must be overridden
  2073 	__DMA_CANT_HAPPEN();
  2074 	}
  2075 
  2076 
  2077 //
  2078 // Unlink the last item of a LLI chain from the next chain.
  2079 // Default implementation does nothing. This is overridden by scatter-gather
  2080 // channels.
  2081 //
  2082 void TDmaChannel::DoUnlink(SDmaDesHdr& /*aHdr*/)
  2083 	{
  2084 	}
  2085 
  2086 
  2087 void TDmaChannel::DoDfc(const DDmaRequest& /*aCurReq*/, SDmaDesHdr*& /*aCompletedHdr*/)
  2088 	{
  2089 	// To make sure this version of the function isn't called for channels for
  2090 	// which it isn't appropriate (and which therefore don't override it) we
  2091 	// put this check in here.
  2092 	__DMA_CANT_HAPPEN();
  2093 	}
  2094 
  2095 
  2096 void TDmaChannel::DoDfc(const DDmaRequest& /*aCurReq*/, SDmaDesHdr*& /*aSrcCompletedHdr*/,
  2097 						SDmaDesHdr*& /*aDstCompletedHdr*/)
  2098 	{
  2099 	// To make sure this version of the function isn't called for channels for
  2100 	// which it isn't appropriate (and which therefore don't override it) we
  2101 	// put this check in here.
  2102 	__DMA_CANT_HAPPEN();
  2103 	}
  2104 
  2105 
  2106 #ifdef _DEBUG
  2107 void TDmaChannel::Invariant()
  2108 	{
  2109 	Wait();
  2110 
  2111 	__DMA_ASSERTD(iReqCount >= 0);
  2112 
  2113 	__DMA_ASSERTD(iCurHdr == NULL || iController->IsValidHdr(iCurHdr));
  2114 
  2115 	// should always point to NULL pointer ending fragment queue
  2116 	__DMA_ASSERTD(*iNullPtr == NULL);
  2117 
  2118 	__DMA_ASSERTD((0 <= iAvailDesCount) && (iAvailDesCount <= iMaxDesCount));
  2119 
  2120 	__DMA_ASSERTD(LOGICAL_XOR(iCurHdr, IsQueueEmpty()));
  2121 	if (iCurHdr == NULL)
  2122 		{
  2123 		__DMA_ASSERTD(iNullPtr == &iCurHdr);
  2124 		}
  2125 
  2126 	Signal();
  2127 	}
  2128 #endif
  2129 
  2130 
  2131 //////////////////////////////////////////////////////////////////////////////
  2132 // TDmaSbChannel
  2133 
  2134 void TDmaSbChannel::DoQueue(const DDmaRequest& /*aReq*/)
  2135 	{
  2136 	if (iState != ETransferring)
  2137 		{
  2138 		iController->Transfer(*this, *iCurHdr);
  2139 		iState = ETransferring;
  2140 		}
  2141 	}
  2142 
  2143 
  2144 void TDmaSbChannel::DoCancelAll()
  2145 	{
  2146 	__DMA_ASSERTD(iState == ETransferring);
  2147 	iState = EIdle;
  2148 	}
  2149 
  2150 
  2151 void TDmaSbChannel::DoDfc(const DDmaRequest& /*aCurReq*/, SDmaDesHdr*& aCompletedHdr)
  2152 	{
  2153 	__DMA_ASSERTD(iState == ETransferring);
  2154 	aCompletedHdr = iCurHdr;
  2155 	iCurHdr = iCurHdr->iNext;
  2156 	if (iCurHdr != NULL)
  2157 		{
  2158 		iController->Transfer(*this, *iCurHdr);
  2159 		}
  2160 	else
  2161 		{
  2162 		iState = EIdle;
  2163 		}
  2164 	}
  2165 
  2166 
  2167 //////////////////////////////////////////////////////////////////////////////
  2168 // TDmaDbChannel
  2169 
  2170 void TDmaDbChannel::DoQueue(const DDmaRequest& aReq)
  2171 	{
  2172 	switch (iState)
  2173 		{
  2174 	case EIdle:
  2175 		iController->Transfer(*this, *iCurHdr);
  2176 		if (iCurHdr->iNext)
  2177 			{
  2178 			iController->Transfer(*this, *(iCurHdr->iNext));
  2179 			iState = ETransferring;
  2180 			}
  2181 		else
  2182 			iState = ETransferringLast;
  2183 		break;
  2184 	case ETransferring:
  2185 		// nothing to do
  2186 		break;
  2187 	case ETransferringLast:
  2188 		iController->Transfer(*this, *(aReq.iFirstHdr));
  2189 		iState = ETransferring;
  2190 		break;
  2191 	default:
  2192 		__DMA_CANT_HAPPEN();
  2193 		}
  2194 	}
  2195 
  2196 
  2197 void TDmaDbChannel::DoCancelAll()
  2198 	{
  2199 	iState = EIdle;
  2200 	}
  2201 
  2202 
  2203 void TDmaDbChannel::DoDfc(const DDmaRequest& /*aCurReq*/, SDmaDesHdr*& aCompletedHdr)
  2204 	{
  2205 	aCompletedHdr = iCurHdr;
  2206 	iCurHdr = iCurHdr->iNext;
  2207 	switch (iState)
  2208 		{
  2209 	case ETransferringLast:
  2210 		iState = EIdle;
  2211 		break;
  2212 	case ETransferring:
  2213 		if (iCurHdr->iNext == NULL)
  2214 			iState = ETransferringLast;
  2215 		else
  2216 			iController->Transfer(*this, *(iCurHdr->iNext));
  2217 		break;
  2218 	default:
  2219 		__DMA_CANT_HAPPEN();
  2220 		}
  2221 	}
  2222 
  2223 
  2224 //////////////////////////////////////////////////////////////////////////////
  2225 // TDmaSgChannel
  2226 
  2227 void TDmaSgChannel::DoQueue(const DDmaRequest& aReq)
  2228 	{
  2229 	if (iState == ETransferring)
  2230 		{
  2231 		__DMA_ASSERTD(!aReq.iLink.Alone());
  2232 		DDmaRequest* pReqPrev = _LOFF(aReq.iLink.iPrev, DDmaRequest, iLink);
  2233 		iController->AppendHwDes(*this, *(pReqPrev->iLastHdr), *(aReq.iFirstHdr));
  2234 		}
  2235 	else
  2236 		{
  2237 		iController->Transfer(*this, *(aReq.iFirstHdr));
  2238 		iState = ETransferring;
  2239 		}
  2240 	}
  2241 
  2242 
  2243 void TDmaSgChannel::DoCancelAll()
  2244 	{
  2245 	__DMA_ASSERTD(iState == ETransferring);
  2246 	iState = EIdle;
  2247 	}
  2248 
  2249 
  2250 void TDmaSgChannel::DoUnlink(SDmaDesHdr& aHdr)
  2251 	{
  2252 	iController->UnlinkHwDes(*this, aHdr);
  2253 	}
  2254 
  2255 
  2256 void TDmaSgChannel::DoDfc(const DDmaRequest& aCurReq, SDmaDesHdr*& aCompletedHdr)
  2257 	{
  2258 	__DMA_ASSERTD(iState == ETransferring);
  2259 	aCompletedHdr = aCurReq.iLastHdr;
  2260 	iCurHdr = aCompletedHdr->iNext;
  2261 	iState = (iCurHdr != NULL) ? ETransferring : EIdle;
  2262 	}
  2263 
  2264 
  2265 //////////////////////////////////////////////////////////////////////////////
  2266 // TDmaAsymSgChannel
  2267 
  2268 void TDmaAsymSgChannel::DoQueue(const DDmaRequest& aReq)
  2269 	{
  2270 	if (iState == ETransferring)
  2271 		{
  2272 		__DMA_ASSERTD(!aReq.iLink.Alone());
  2273 		DDmaRequest* pReqPrev = _LOFF(aReq.iLink.iPrev, DDmaRequest, iLink);
  2274 		iController->AppendHwDes(*this,
  2275 								 *(pReqPrev->iSrcLastHdr), *(aReq.iSrcFirstHdr),
  2276 								 *(pReqPrev->iDstLastHdr), *(aReq.iDstFirstHdr));
  2277 		}
  2278 	else
  2279 		{
  2280 		iController->Transfer(*this, *(aReq.iSrcFirstHdr), *(aReq.iDstFirstHdr));
  2281 		iState = ETransferring;
  2282 		}
  2283 	}
  2284 
  2285 
  2286 void TDmaAsymSgChannel::DoCancelAll()
  2287 	{
  2288 	__DMA_ASSERTD(iState == ETransferring);
  2289 	iState = EIdle;
  2290 	}
  2291 
  2292 
  2293 void TDmaAsymSgChannel::DoUnlink(SDmaDesHdr& aHdr)
  2294 	{
  2295 	iController->UnlinkHwDes(*this, aHdr);
  2296 	}
  2297 
  2298 
  2299 void TDmaAsymSgChannel::DoDfc(const DDmaRequest& aCurReq, SDmaDesHdr*& aSrcCompletedHdr,
  2300 							  SDmaDesHdr*& aDstCompletedHdr)
  2301 	{
  2302 	__DMA_ASSERTD(iState == ETransferring);
  2303 	aSrcCompletedHdr = aCurReq.iSrcLastHdr;
  2304 	iSrcCurHdr = aSrcCompletedHdr->iNext;
  2305 	aDstCompletedHdr = aCurReq.iDstLastHdr;
  2306 	iDstCurHdr = aDstCompletedHdr->iNext;
  2307 	// Must be either both NULL or none of them.
  2308 	__DMA_ASSERTD(!LOGICAL_XOR(iSrcCurHdr, iDstCurHdr));
  2309 	iState = (iSrcCurHdr != NULL) ? ETransferring : EIdle;
  2310 	}
  2311