Update contrib.
1 // Copyright (c) 2002-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32/drivers/dma2_pil.cpp
15 // DMA Platform Independent Layer (PIL)
19 #include <drivers/dma.h>
20 #include <drivers/dma_hai.h>
22 #include <kernel/kern_priv.h>
25 // Symbian Min() & Max() are broken, so we have to define them ourselves
26 inline TUint Min(TUint aLeft, TUint aRight)
27 {return(aLeft < aRight ? aLeft : aRight);}
28 inline TUint Max(TUint aLeft, TUint aRight)
29 {return(aLeft > aRight ? aLeft : aRight);}
32 // Uncomment the following #define only when freezing the DMA2 export library.
33 //#define __FREEZE_DMA2_LIB
34 #ifdef __FREEZE_DMA2_LIB
35 TInt DmaChannelMgr::StaticExtension(TInt, TAny*) {return 0;}
36 TDmaChannel* DmaChannelMgr::Open(TUint32, TBool, TUint) {return 0;}
37 void DmaChannelMgr::Close(TDmaChannel*) {}
38 EXPORT_C const TDmaTestInfo& DmaTestInfo() {static TDmaTestInfo a; return a;}
39 EXPORT_C const TDmaV2TestInfo& DmaTestInfoV2() {static TDmaV2TestInfo a; return a;}
40 #endif // #ifdef __FREEZE_DMA2_LIB
43 static const char KDmaPanicCat[] = "DMA " __FILE__;
45 //////////////////////////////////////////////////////////////////////
48 // Wait, Signal, and Initialise are defined here in the PIL.
49 // Open, Close and Extension must be defined in the PSL.
51 NFastMutex DmaChannelMgr::Lock;
54 void DmaChannelMgr::Wait()
60 void DmaChannelMgr::Signal()
62 NKern::FMSignal(&Lock);
66 TInt DmaChannelMgr::Initialise()
72 class TDmaCancelInfo : public SDblQueLink
82 TDmaCancelInfo::TDmaCancelInfo()
90 void TDmaCancelInfo::Signal()
92 TDmaCancelInfo* p = this;
95 TDmaCancelInfo* next = (TDmaCancelInfo*)p->iNext;
98 NKern::FSSignal(&p->iSem); // Don't dereference p after this
106 //////////////////////////////////////////////////////////////////////////////
110 typedef TLinAddr TPhysAddr;
112 static inline TPhysAddr LinToPhys(TLinAddr aLin) {return aLin;}
114 static inline TPhysAddr LinToPhys(TLinAddr aLin) {return Epoc::LinearToPhysical(aLin);}
118 // Return minimum of aMaxSize and size of largest physically contiguous block
119 // starting at aLinAddr.
121 static TInt MaxPhysSize(TLinAddr aLinAddr, const TInt aMaxSize)
123 const TPhysAddr physBase = LinToPhys(aLinAddr);
124 TLinAddr lin = aLinAddr;
128 // Round up the linear address to the next MMU page boundary
129 const TLinAddr linBoundary = Kern::RoundToPageSize(lin + 1);
130 size += linBoundary - lin;
131 if (size >= aMaxSize)
133 if ((physBase + size) != LinToPhys(linBoundary))
140 //////////////////////////////////////////////////////////////////////////////
143 TDmac::TDmac(const SCreateInfo& aInfo)
144 : iMaxDesCount(aInfo.iDesCount),
145 iAvailDesCount(aInfo.iDesCount),
151 iDesSize(aInfo.iDesSize),
152 iCapsHwDes(aInfo.iCapsHwDes),
155 __DMA_ASSERTD(iMaxDesCount > 0);
156 __DMA_ASSERTD(iDesSize > 0);
161 // Second-phase c'tor
163 TInt TDmac::Create(const SCreateInfo& aInfo)
165 iHdrPool = new SDmaDesHdr[iMaxDesCount];
166 if (iHdrPool == NULL)
171 TInt r = AllocDesPool(aInfo.iDesChunkAttribs);
177 // Link all descriptor headers together on the free list
179 for (TInt i = 0; i < iMaxDesCount - 1; i++)
180 iHdrPool[i].iNext = iHdrPool + i + 1;
181 iHdrPool[iMaxDesCount-1].iNext = NULL;
197 void TDmac::Transfer(const TDmaChannel& /*aChannel*/, const SDmaDesHdr& /*aHdr*/)
199 // TDmac needs to override this function if it has reported the channel
200 // type for which the PIL calls it.
205 void TDmac::Transfer(const TDmaChannel& /*aChannel*/, const SDmaDesHdr& /*aSrcHdr*/,
206 const SDmaDesHdr& /*aDstHdr*/)
208 // TDmac needs to override this function if it has reported the channel
209 // type for which the PIL calls it.
214 TInt TDmac::PauseTransfer(const TDmaChannel& /*aChannel*/)
216 // TDmac needs to override this function if it has reported support for
217 // channel pausing/resuming.
218 return KErrNotSupported;
222 TInt TDmac::ResumeTransfer(const TDmaChannel& /*aChannel*/)
224 // TDmac needs to override this function if it has reported support for
225 // channel pausing/resuming.
226 return KErrNotSupported;
230 TInt TDmac::AllocDesPool(TUint aAttribs)
232 // Calling thread must be in CS
237 const TInt size = iMaxDesCount * iDesSize;
240 iDesPool = new TUint8[size];
241 r = iDesPool ? KErrNone : KErrNoMemory;
243 // Chunk not mapped as supervisor r/w user none? incorrect mask passed by PSL
244 __DMA_ASSERTD((aAttribs & EMapAttrAccessMask) == EMapAttrSupRw);
246 r = Epoc::AllocPhysicalRam(size, phys);
249 r = DPlatChunkHw::New(iHwDesChunk, phys, size, aAttribs);
252 iDesPool = (TAny*)iHwDesChunk->LinearAddress();
253 __KTRACE_OPT(KDMA, Kern::Printf("descriptor hw chunk created lin=0x%08X phys=0x%08X, size=0x%X",
254 iHwDesChunk->iLinAddr, iHwDesChunk->iPhysAddr, size));
257 Epoc::FreePhysicalRam(phys, size);
263 iDesPool = new TDmaTransferArgs[iMaxDesCount];
264 r = iDesPool ? KErrNone : KErrNoMemory;
270 void TDmac::FreeDesPool()
272 // Calling thread must be in CS
281 const TPhysAddr phys = iHwDesChunk->PhysicalAddress();
282 const TInt size = iHwDesChunk->iSize;
283 iHwDesChunk->Close(NULL);
284 Epoc::FreePhysicalRam(phys, size);
290 Kern::Free(iDesPool);
296 // Prealloc the given number of descriptors.
298 TInt TDmac::ReserveSetOfDes(TInt aCount)
300 __KTRACE_OPT(KDMA, Kern::Printf("TDmac::ReserveSetOfDes count=%d", aCount));
301 __DMA_ASSERTD(aCount > 0);
304 if (iAvailDesCount - aCount >= 0)
306 iAvailDesCount -= aCount;
316 // Return the given number of preallocated descriptors to the free pool.
318 void TDmac::ReleaseSetOfDes(TInt aCount)
320 __DMA_ASSERTD(aCount >= 0);
322 iAvailDesCount += aCount;
329 // Queue DFC and update word used to communicate with channel DFC.
331 // Called in interrupt context by PSL.
333 void TDmac::HandleIsr(TDmaChannel& aChannel, TUint aEventMask, TBool aIsComplete)
335 __KTRACE_OPT(KDMA, Kern::Printf("TDmac::HandleIsr"));
337 // Function needs to be called by PSL in ISR context
338 __DMA_ASSERTD(NKern::CurrentContext() == NKern::EInterrupt);
340 // First the ISR callback stuff
342 // Is this a transfer completion notification?
343 if (aEventMask & EDmaCallbackRequestCompletion)
345 // If so, has the client requested an ISR callback?
346 if (__e32_atomic_load_acq32(&aChannel.iIsrCbRequest))
348 __KTRACE_OPT(KDMA, Kern::Printf("ISR callback"));
350 // Since iIsrCbRequest was set no threads will be
351 // modifying the request queue.
352 const DDmaRequest* const req = _LOFF(aChannel.iReqQ.First(), DDmaRequest, iLink);
354 // We expect the request to have requested
356 __NK_ASSERT_DEBUG(req->iIsrCb);
358 TDmaCallback const cb = req->iDmaCb;
359 TAny* const arg = req->iDmaCbArg;
360 // Execute the client callback
361 (*cb)(EDmaCallbackRequestCompletion,
362 (aIsComplete ? EDmaResultOK : EDmaResultError),
365 // Now let's see if the callback rescheduled the transfer request
366 // (see TDmaChannel::IsrRedoRequest()).
367 const TBool redo = aChannel.iRedoRequest;
368 aChannel.iRedoRequest = EFalse;
369 const TBool stop = __e32_atomic_load_acq32(&aChannel.iIsrDfc) &
370 (TUint32)TDmaChannel::KCancelFlagMask;
371 // There won't be another ISR callback if this callback didn't
372 // reschedule the request, or the client cancelled all requests, or
373 // this callback rescheduled the request with a DFC callback.
374 if (!redo || stop || !req->iIsrCb)
376 __e32_atomic_store_rel32(&aChannel.iIsrCbRequest, EFalse);
380 // We won't queue the channel DFC in this case and just return.
381 __KTRACE_OPT(KDMA, Kern::Printf("CB rescheduled xfer -> no DFC"));
384 // Not redoing or being cancelled means we've been calling the
385 // request's ISR callback for the last time. We're going to
386 // complete the request via the DFC in the usual way.
390 // Now queue a DFC if necessary. The possible scenarios are:
391 // a) DFC not queued (orig == 0) -> update iIsrDfc + queue DFC
392 // b) DFC queued, not running yet (orig != 0) -> just update iIsrDfc
393 // c) DFC running / iIsrDfc not reset yet (orig != 0) -> just update iIsrDfc
394 // d) DFC running / iIsrDfc already reset (orig == 0) -> update iIsrDfc + requeue DFC
396 // Set error flag if necessary.
397 const TUint32 inc = aIsComplete ? 1u : TUint32(TDmaChannel::KErrorFlagMask) | 1u;
399 // Add 'inc' (interrupt count increment + poss. error flag) to 'iIsrDfc' if
400 // cancel flag is not set, do nothing otherwise. Assign original value of
401 // 'iIsrDfc' to 'orig' in any case.
402 const TUint32 orig = __e32_atomic_tau_ord32(&aChannel.iIsrDfc,
403 TUint32(TDmaChannel::KCancelFlagMask),
407 // As transfer should be suspended when an error occurs, we
408 // should never get there with the error flag already set.
409 __DMA_ASSERTD((orig & inc & (TUint32)TDmaChannel::KErrorFlagMask) == 0);
418 TInt TDmac::InitDes(const SDmaDesHdr& aHdr, const TDmaTransferArgs& aTransferArgs)
420 __KTRACE_OPT(KDMA, Kern::Printf("TDmac::InitDes"));
424 __KTRACE_OPT(KDMA, Kern::Printf("iCaps.iHwDescriptors"));
425 r = InitHwDes(aHdr, aTransferArgs);
429 TDmaTransferArgs& args = HdrToDes(aHdr);
430 args = aTransferArgs;
437 TInt TDmac::InitHwDes(const SDmaDesHdr& /*aHdr*/, const TDmaTransferArgs& /*aTransferArgs*/)
439 // concrete controller must override if SDmacCaps::iHwDescriptors set
445 TInt TDmac::InitSrcHwDes(const SDmaDesHdr& /*aHdr*/, const TDmaTransferArgs& /*aTransferArgs*/)
447 // concrete controller must override if SDmacCaps::iAsymHwDescriptors set
453 TInt TDmac::InitDstHwDes(const SDmaDesHdr& /*aHdr*/, const TDmaTransferArgs& /*aTransferArgs*/)
455 // concrete controller must override if SDmacCaps::iAsymHwDescriptors set
461 TInt TDmac::UpdateDes(const SDmaDesHdr& aHdr, TUint32 aSrcAddr, TUint32 aDstAddr,
462 TUint aTransferCount, TUint32 aPslRequestInfo)
464 __KTRACE_OPT(KDMA, Kern::Printf("TDmac::UpdateDes"));
468 __KTRACE_OPT(KDMA, Kern::Printf("iCaps.iHwDescriptors"));
469 r = UpdateHwDes(aHdr, aSrcAddr, aDstAddr, aTransferCount, aPslRequestInfo);
473 TDmaTransferArgs& args = HdrToDes(aHdr);
474 if (aSrcAddr != KPhysAddrInvalid)
475 args.iSrcConfig.iAddr = aSrcAddr;
476 if (aDstAddr != KPhysAddrInvalid)
477 args.iDstConfig.iAddr = aDstAddr;
479 args.iTransferCount = aTransferCount;
481 args.iPslRequestInfo = aPslRequestInfo;
488 TInt TDmac::UpdateHwDes(const SDmaDesHdr& /*aHdr*/, TUint32 /*aSrcAddr*/, TUint32 /*aDstAddr*/,
489 TUint /*aTransferCount*/, TUint32 /*aPslRequestInfo*/)
491 // concrete controller must override if SDmacCaps::iHwDescriptors set
497 TInt TDmac::UpdateSrcHwDes(const SDmaDesHdr& /*aHdr*/, TUint32 /*aSrcAddr*/,
498 TUint /*aTransferCount*/, TUint32 /*aPslRequestInfo*/)
500 // concrete controller must override if SDmacCaps::iAsymHwDescriptors set
506 TInt TDmac::UpdateDstHwDes(const SDmaDesHdr& /*aHdr*/, TUint32 /*aDstAddr*/,
507 TUint /*aTransferCount*/, TUint32 /*aPslRequestInfo*/)
509 // concrete controller must override if SDmacCaps::iAsymHwDescriptors set
515 void TDmac::ChainHwDes(const SDmaDesHdr& /*aHdr*/, const SDmaDesHdr& /*aNextHdr*/)
517 // concrete controller must override if SDmacCaps::iHwDescriptors set
522 void TDmac::AppendHwDes(const TDmaChannel& /*aChannel*/, const SDmaDesHdr& /*aLastHdr*/,
523 const SDmaDesHdr& /*aNewHdr*/)
525 // concrete controller must override if SDmacCaps::iHwDescriptors set
530 void TDmac::AppendHwDes(const TDmaChannel& /*aChannel*/,
531 const SDmaDesHdr& /*aSrcLastHdr*/, const SDmaDesHdr& /*aSrcNewHdr*/,
532 const SDmaDesHdr& /*aDstLastHdr*/, const SDmaDesHdr& /*aDstNewHdr*/)
534 // concrete controller must override if SDmacCaps::iAsymHwDescriptors set
539 void TDmac::UnlinkHwDes(const TDmaChannel& /*aChannel*/, SDmaDesHdr& /*aHdr*/)
541 // concrete controller must override if SDmacCaps::iHwDescriptors set
546 void TDmac::ClearHwDes(const SDmaDesHdr& /*aHdr*/)
548 // default implementation - NOP; concrete controller may override
553 TInt TDmac::LinkChannels(TDmaChannel& /*a1stChannel*/, TDmaChannel& /*a2ndChannel*/)
555 // default implementation - NOP; concrete controller may override
556 return KErrNotSupported;
560 TInt TDmac::UnlinkChannel(TDmaChannel& /*aChannel*/)
562 // default implementation - NOP; concrete controller may override
563 return KErrNotSupported;
567 TInt TDmac::FailNext(const TDmaChannel& /*aChannel*/)
569 // default implementation - NOP; concrete controller may override
570 return KErrNotSupported;
574 TInt TDmac::MissNextInterrupts(const TDmaChannel& /*aChannel*/, TInt /*aInterruptCount*/)
576 // default implementation - NOP; concrete controller may override
577 return KErrNotSupported;
581 TInt TDmac::Extension(TDmaChannel& /*aChannel*/, TInt /*aCmd*/, TAny* /*aArg*/)
583 // default implementation - NOP; concrete controller may override
584 return KErrNotSupported;
588 TUint32 TDmac::HwDesNumDstElementsTransferred(const SDmaDesHdr& /*aHdr*/)
590 // Concrete controller must override if SDmacCaps::iHwDescriptors set.
596 TUint32 TDmac::HwDesNumSrcElementsTransferred(const SDmaDesHdr& /*aHdr*/)
598 // Concrete controller must override if SDmacCaps::iHwDescriptors set.
606 void TDmac::Invariant()
609 __DMA_ASSERTD(0 <= iAvailDesCount && iAvailDesCount <= iMaxDesCount);
610 __DMA_ASSERTD(! iFreeHdr || IsValidHdr(iFreeHdr));
611 for (TInt i = 0; i < iMaxDesCount; i++)
612 __DMA_ASSERTD(iHdrPool[i].iNext == NULL || IsValidHdr(iHdrPool[i].iNext));
617 TBool TDmac::IsValidHdr(const SDmaDesHdr* aHdr)
619 return (iHdrPool <= aHdr) && (aHdr < iHdrPool + iMaxDesCount);
628 // Internal compat version, used by legacy Fragment()
630 TDmaTransferConfig::TDmaTransferConfig(TUint32 aAddr, TUint aFlags, TBool aAddrInc)
632 iAddrMode(aAddrInc ? KDmaAddrModePostIncrement : KDmaAddrModeConstant),
634 iElementsPerFrame(0),
635 iElementsPerPacket(0),
636 iFramesPerTransfer(0),
639 iBurstSize(KDmaBurstSizeAny),
641 iSyncFlags(KDmaSyncAuto),
652 // Internal compat version, used by legacy Fragment()
654 TDmaTransferArgs::TDmaTransferArgs(TUint32 aSrc, TUint32 aDest, TInt aCount,
655 TUint aFlags, TUint32 aPslInfo)
656 : iSrcConfig(aSrc, RequestFlags2SrcConfigFlags(aFlags), (aFlags & KDmaIncSrc)),
657 iDstConfig(aDest, RequestFlags2DstConfigFlags(aFlags), (aFlags & KDmaIncDest)),
658 iTransferCount(aCount),
659 iGraphicsOps(KDmaGraphicsOpNone),
662 iChannelPriority(KDmaPriorityNone),
663 iPslRequestInfo(aPslInfo),
673 // As DDmaRequest is derived from DBase, the initializations with zero aren't
674 // strictly necessary here, but this way it's nicer.
676 EXPORT_C DDmaRequest::DDmaRequest(TDmaChannel& aChannel, TCallback aCb,
677 TAny* aCbArg, TInt aMaxTransferSize)
678 : iChannel(aChannel),
694 iMaxTransferSize(aMaxTransferSize),
695 iTotalNumSrcElementsTransferred(0),
696 iTotalNumDstElementsTransferred(0)
698 iChannel.iReqCount++;
699 __DMA_ASSERTD(0 <= aMaxTransferSize);
705 // As DDmaRequest is derived from DBase, the initializations with zero aren't
706 // strictly necessary here, but this way it's nicer.
708 EXPORT_C DDmaRequest::DDmaRequest(TDmaChannel& aChannel, TDmaCallback aDmaCb,
709 TAny* aCbArg, TUint aMaxTransferSize)
710 : iChannel(aChannel),
726 iMaxTransferSize(aMaxTransferSize),
727 iTotalNumSrcElementsTransferred(0),
728 iTotalNumDstElementsTransferred(0)
730 __e32_atomic_add_ord32(&iChannel.iReqCount, 1);
735 EXPORT_C DDmaRequest::~DDmaRequest()
737 __DMA_ASSERTD(!iQueued);
740 __e32_atomic_add_ord32(&iChannel.iReqCount, TUint32(-1));
744 EXPORT_C TInt DDmaRequest::Fragment(TUint32 aSrc, TUint32 aDest, TInt aCount,
745 TUint aFlags, TUint32 aPslInfo)
747 __KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::Fragment thread %O "
748 "src=0x%08X dest=0x%08X count=%d flags=0x%X psl=0x%08X",
749 &Kern::CurrentThread(), aSrc, aDest, aCount, aFlags, aPslInfo));
750 __DMA_ASSERTD(aCount > 0);
752 TDmaTransferArgs args(aSrc, aDest, aCount, aFlags, aPslInfo);
758 EXPORT_C TInt DDmaRequest::Fragment(const TDmaTransferArgs& aTransferArgs)
760 __KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::Fragment thread %O", &Kern::CurrentThread()));
762 // Writable temporary working copy of the transfer arguments.
763 // We need this because we may have to modify some fields before passing it
764 // to the PSL (for example iChannelCookie, iTransferCount,
765 // iDstConfig::iAddr, and iSrcConfig::iAddr).
766 TDmaTransferArgs args(aTransferArgs);
772 TUint DDmaRequest::GetTransferCount(const TDmaTransferArgs& aTransferArgs)
774 const TDmaTransferConfig& src = aTransferArgs.iSrcConfig;
775 const TDmaTransferConfig& dst = aTransferArgs.iDstConfig;
777 TUint count = aTransferArgs.iTransferCount;
780 __KTRACE_OPT(KDMA, Kern::Printf("iTransferCount == 0"));
781 count = src.iElementSize * src.iElementsPerFrame *
782 src.iFramesPerTransfer;
783 const TUint dst_cnt = dst.iElementSize * dst.iElementsPerFrame *
784 dst.iFramesPerTransfer;
785 if (count != dst_cnt)
787 __KTRACE_OPT(KPANIC, Kern::Printf("Error: (count != dst_cnt)"));
793 __KTRACE_OPT(KDMA, Kern::Printf("iTransferCount == %d", count));
794 // Client shouldn't specify contradictory or incomplete things
795 if (src.iElementSize != 0)
797 if ((count % src.iElementSize) != 0)
800 Kern::Printf("Error: ((count %% src.iElementSize) != 0)"));
803 if (src.iElementsPerFrame != 0)
805 if ((src.iElementSize * src.iElementsPerFrame * src.iFramesPerTransfer) != count)
808 Kern::Printf("Error: ((src.iElementSize * "
809 "src.iElementsPerFrame * "
810 "src.iFramesPerTransfer) != count)"));
817 if (src.iElementsPerFrame != 0)
820 Kern::Printf("Error: (src.iElementsPerFrame != 0)"));
823 if (src.iFramesPerTransfer != 0)
826 Kern::Printf("Error: (src.iFramesPerTransfer != 0)"));
829 if (src.iElementsPerPacket != 0)
832 Kern::Printf("Error: (src.iElementsPerPacket != 0)"));
836 if (dst.iElementSize != 0)
838 if ((count % dst.iElementSize) != 0)
841 Kern::Printf("Error: ((count %% dst.iElementSize) != 0)"));
844 if (dst.iElementsPerFrame != 0)
846 if ((dst.iElementSize * dst.iElementsPerFrame * dst.iFramesPerTransfer) != count)
849 Kern::Printf("Error: ((dst.iElementSize * "
850 "dst.iElementsPerFrame * "
851 "dst.iFramesPerTransfer) != count)"));
858 if (dst.iElementsPerFrame != 0)
861 Kern::Printf("Error: (dst.iElementsPerFrame != 0)"));
864 if (dst.iFramesPerTransfer != 0)
867 Kern::Printf("Error: (dst.iFramesPerTransfer != 0)"));
870 if (dst.iElementsPerPacket != 0)
873 Kern::Printf("Error: (dst.iElementsPerPacket != 0)"));
882 TInt DDmaRequest::Frag(TDmaTransferArgs& aTransferArgs)
884 __DMA_ASSERTD(!iQueued);
886 // Transfer count checks
887 const TUint count = GetTransferCount(aTransferArgs);
893 const TDmaTransferConfig& src = aTransferArgs.iSrcConfig;
894 const TDmaTransferConfig& dst = aTransferArgs.iDstConfig;
896 // Ask the PSL what the maximum length possible for this transfer is
897 TUint max_xfer_len = iChannel.MaxTransferLength(src.iFlags, dst.iFlags,
898 aTransferArgs.iPslRequestInfo);
899 if (iMaxTransferSize)
901 // User has set a size cap
902 __KTRACE_OPT(KDMA, Kern::Printf("iMaxTransferSize != 0"));
903 __DMA_ASSERTA((iMaxTransferSize <= max_xfer_len) || (max_xfer_len == 0));
904 max_xfer_len = iMaxTransferSize;
908 // User doesn't care about max size
909 if (max_xfer_len == 0)
911 // No maximum imposed by controller
912 max_xfer_len = count;
916 // ISR callback requested?
917 const TBool isr_cb = (aTransferArgs.iFlags & KDmaRequestCallbackFromIsr);
920 // Requesting an ISR callback w/o supplying one?
927 // Set the channel cookie for the PSL
928 aTransferArgs.iChannelCookie = iChannel.PslId();
930 // Now the actual fragmentation
932 if (iChannel.iDmacCaps->iAsymHwDescriptors)
934 r = FragAsym(aTransferArgs, count, max_xfer_len);
938 r = FragSym(aTransferArgs, count, max_xfer_len);
951 TInt DDmaRequest::FragSym(TDmaTransferArgs& aTransferArgs, TUint aCount,
952 TUint aMaxTransferLen)
954 TDmaTransferConfig& src = aTransferArgs.iSrcConfig;
955 TDmaTransferConfig& dst = aTransferArgs.iDstConfig;
957 const TBool mem_src = (src.iFlags & KDmaMemAddr);
958 const TBool mem_dst = (dst.iFlags & KDmaMemAddr);
960 const TUint align_mask_src = iChannel.AddressAlignMask(src.iFlags,
962 aTransferArgs.iPslRequestInfo);
963 const TUint align_mask_dst = iChannel.AddressAlignMask(dst.iFlags,
965 aTransferArgs.iPslRequestInfo);
966 // Memory buffers must satisfy alignment constraint
967 __DMA_ASSERTD(!mem_src || ((src.iAddr & align_mask_src) == 0));
968 __DMA_ASSERTD(!mem_dst || ((dst.iAddr & align_mask_dst) == 0));
970 const TUint max_aligned_len = (aMaxTransferLen &
971 ~(Max(align_mask_src, align_mask_dst)));
972 // Client and PSL sane?
973 __DMA_ASSERTD(max_aligned_len > 0);
975 FreeDesList(); // revert any previous fragmentation attempt
980 r = ExpandDesList(/*1*/);
986 // Compute fragment size
987 TUint c = Min(aMaxTransferLen, aCount);
988 if (mem_src && !(src.iFlags & KDmaPhysAddr))
990 __KTRACE_OPT(KDMA, Kern::Printf("mem_src && !(src.iFlags & KDmaPhysAddr)"));
991 // @@@ Should also take into account (src.iFlags & KDmaMemIsContiguous)!
992 c = MaxPhysSize(src.iAddr, c);
994 if (mem_dst && !(dst.iFlags & KDmaPhysAddr))
996 __KTRACE_OPT(KDMA, Kern::Printf("mem_dst && !(dst.iFlags & KDmaPhysAddr)"));
997 // @@@ Should also take into account (dst.iFlags & KDmaMemIsContiguous)!
998 c = MaxPhysSize(dst.iAddr, c);
1000 if ((mem_src || mem_dst) && (c < aCount) && (c > max_aligned_len))
1002 // This is not the last fragment of a transfer to/from memory.
1003 // We must round down the fragment size so the next one is
1004 // correctly aligned.
1005 __KTRACE_OPT(KDMA, Kern::Printf("(mem_src || mem_dst) && (c < aCount) && (c > max_aligned_len)"));
1006 c = max_aligned_len;
1009 // TODO: Make sure an element or frame on neither src or dst side
1010 // (which can be of different sizes) never straddles a DMA subtransfer.
1011 // (This would be a fragmentation error by the PIL.)
1013 // Set transfer count for the PSL
1014 aTransferArgs.iTransferCount = c;
1015 __KTRACE_OPT(KDMA, Kern::Printf("this fragm.: %d (0x%x) total remain.: %d (0x%x)",
1016 c, c, aCount, aCount));
1017 // Initialise fragment
1018 r = iChannel.iController->InitDes(*iLastHdr, aTransferArgs);
1024 // Update for next iteration
1037 TInt DDmaRequest::FragAsym(TDmaTransferArgs& aTransferArgs, TUint aCount,
1038 TUint aMaxTransferLen)
1040 TInt r = FragAsymSrc(aTransferArgs, aCount, aMaxTransferLen);
1046 r = FragAsymDst(aTransferArgs, aCount, aMaxTransferLen);
1056 TInt DDmaRequest::FragAsymSrc(TDmaTransferArgs& aTransferArgs, TUint aCount,
1057 TUint aMaxTransferLen)
1059 TDmaTransferConfig& src = aTransferArgs.iSrcConfig;
1061 const TBool mem_src = (src.iFlags & KDmaMemAddr);
1063 const TUint align_mask = iChannel.AddressAlignMask(src.iFlags,
1065 aTransferArgs.iPslRequestInfo);
1066 // Memory buffers must satisfy alignment constraint
1067 __DMA_ASSERTD(!mem_src || ((src.iAddr & align_mask) == 0));
1069 const TUint max_aligned_len = (aMaxTransferLen & ~align_mask);
1070 __DMA_ASSERTD(max_aligned_len > 0); // bug in PSL if not true
1076 // Allocate fragment
1077 r = ExpandSrcDesList(/*1*/);
1082 // Compute fragment size
1083 TUint c = Min(aMaxTransferLen, aCount);
1084 if (mem_src && !(src.iFlags & KDmaPhysAddr))
1086 __KTRACE_OPT(KDMA, Kern::Printf("mem_src && !(src.iFlags & KDmaPhysAddr)"));
1087 c = MaxPhysSize(src.iAddr, c);
1089 if (mem_src && (c < aCount) && (c > max_aligned_len))
1091 // This is not the last fragment of a transfer from memory.
1092 // We must round down the fragment size so the next one is
1093 // correctly aligned.
1094 __KTRACE_OPT(KDMA, Kern::Printf("mem_src && (c < aCount) && (c > max_aligned_len)"));
1095 c = max_aligned_len;
1097 // Set transfer count for the PSL
1098 aTransferArgs.iTransferCount = c;
1099 __KTRACE_OPT(KDMA, Kern::Printf("this fragm.: %d (0x%x) total remain.: %d (0x%x)",
1100 c, c, aCount, aCount));
1101 // Initialise fragment
1102 r = iChannel.iController->InitSrcHwDes(*iSrcLastHdr, aTransferArgs);
1107 // Update for next iteration
1118 TInt DDmaRequest::FragAsymDst(TDmaTransferArgs& aTransferArgs, TUint aCount,
1119 TUint aMaxTransferLen)
1121 TDmaTransferConfig& dst = aTransferArgs.iDstConfig;
1123 const TBool mem_dst = (dst.iFlags & KDmaMemAddr);
1125 const TUint align_mask = iChannel.AddressAlignMask(dst.iFlags,
1127 aTransferArgs.iPslRequestInfo);
1128 // Memory buffers must satisfy alignment constraint
1129 __DMA_ASSERTD(!mem_dst || ((dst.iAddr & align_mask) == 0));
1131 const TUint max_aligned_len = (aMaxTransferLen & ~align_mask);
1132 __DMA_ASSERTD(max_aligned_len > 0); // bug in PSL if not true
1138 // Allocate fragment
1139 r = ExpandDstDesList(/*1*/);
1144 // Compute fragment size
1145 TUint c = Min(aMaxTransferLen, aCount);
1146 if (mem_dst && !(dst.iFlags & KDmaPhysAddr))
1148 __KTRACE_OPT(KDMA, Kern::Printf("mem_dst && !(dst.iFlags & KDmaPhysAddr)"));
1149 c = MaxPhysSize(dst.iAddr, c);
1151 if (mem_dst && (c < aCount) && (c > max_aligned_len))
1153 // This is not the last fragment of a transfer to memory.
1154 // We must round down the fragment size so the next one is
1155 // correctly aligned.
1156 __KTRACE_OPT(KDMA, Kern::Printf("mem_dst && (c < aCount) && (c > max_aligned_len)"));
1157 c = max_aligned_len;
1159 // Set transfer count for the PSL
1160 aTransferArgs.iTransferCount = c;
1161 __KTRACE_OPT(KDMA, Kern::Printf("this fragm.: %d (0x%x) total remain.: %d (0x%x)",
1162 c, c, aCount, aCount));
1163 // Initialise fragment
1164 r = iChannel.iController->InitDstHwDes(*iDstLastHdr, aTransferArgs);
1169 // Update for next iteration
1180 EXPORT_C TInt DDmaRequest::Queue()
1182 __KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::Queue thread %O", &Kern::CurrentThread()));
1183 __DMA_ASSERTD(iDesCount > 0); // Not configured? Call Fragment() first!
1184 __DMA_ASSERTD(!iQueued);
1186 // Append request to queue and link new descriptor list to existing one.
1189 TInt r = KErrGeneral;
1190 const TBool ch_isr_cb = __e32_atomic_load_acq32(&iChannel.iIsrCbRequest);
1193 // Client mustn't try to queue any new request while one with an ISR
1194 // callback is already queued on this channel. This is to make sure
1195 // that the channel's Transfer() function is not called by both the ISR
1196 // and the client thread at the same time.
1197 __KTRACE_OPT(KPANIC, Kern::Printf("An ISR cb request exists - not queueing"));
1199 else if (iIsrCb && !iChannel.IsQueueEmpty())
1201 // Client mustn't try to queue an ISR callback request whilst any
1202 // others are still queued on this channel. This is to make sure that
1203 // the ISR callback doesn't get executed together with the DFC(s) of
1204 // any previous request(s).
1205 __KTRACE_OPT(KPANIC, Kern::Printf("Request queue not empty - not queueing"));
1207 else if (iChannel.iIsrDfc & (TUint32)TDmaChannel::KCancelFlagMask)
1209 __KTRACE_OPT(KPANIC, Kern::Printf("Channel requests cancelled - not queueing"));
1214 iChannel.iReqQ.Add(&iLink);
1215 // iChannel.iNullPtr points to iChannel.iCurHdr for an empty queue
1216 *iChannel.iNullPtr = iFirstHdr;
1217 iChannel.iNullPtr = &(iLastHdr->iNext);
1220 // Since we've made sure that there is no other request in the
1221 // queue before this, the only thing of relevance is the channel
1222 // DFC which might yet have to complete for the previous request,
1223 // and this function might indeed have been called from there via
1224 // the client callback. This should be all right though as once
1225 // we've set the following flag no further Queue()'s will be
1227 __e32_atomic_store_rel32(&iChannel.iIsrCbRequest, ETrue);
1229 iChannel.DoQueue(const_cast<const DDmaRequest&>(*this));
1239 EXPORT_C TInt DDmaRequest::ExpandDesList(TInt aCount)
1241 return ExpandDesList(aCount, iDesCount, iFirstHdr, iLastHdr);
1245 EXPORT_C TInt DDmaRequest::ExpandSrcDesList(TInt aCount)
1247 return ExpandDesList(aCount, iSrcDesCount, iSrcFirstHdr, iSrcLastHdr);
1251 EXPORT_C TInt DDmaRequest::ExpandDstDesList(TInt aCount)
1253 return ExpandDesList(aCount, iDstDesCount, iDstFirstHdr, iDstLastHdr);
1257 TInt DDmaRequest::ExpandDesList(TInt aCount, TInt& aDesCount,
1258 SDmaDesHdr*& aFirstHdr,
1259 SDmaDesHdr*& aLastHdr)
1261 __DMA_ASSERTD(!iQueued);
1262 __DMA_ASSERTD(aCount > 0);
1264 if (aCount > iChannel.iAvailDesCount)
1269 iChannel.iAvailDesCount -= aCount;
1270 aDesCount += aCount;
1272 TDmac& c = *(iChannel.iController);
1275 if (aFirstHdr == NULL)
1277 // Handle an empty list specially to simplify the following loop
1278 aFirstHdr = aLastHdr = c.iFreeHdr;
1279 c.iFreeHdr = c.iFreeHdr->iNext;
1284 aLastHdr->iNext = c.iFreeHdr;
1287 // Remove as many descriptors and headers from the free pool as necessary
1288 // and ensure hardware descriptors are chained together.
1289 while (aCount-- > 0)
1291 __DMA_ASSERTD(c.iFreeHdr != NULL);
1294 c.ChainHwDes(*aLastHdr, *(c.iFreeHdr));
1296 aLastHdr = c.iFreeHdr;
1297 c.iFreeHdr = c.iFreeHdr->iNext;
1302 aLastHdr->iNext = NULL;
1309 EXPORT_C void DDmaRequest::FreeDesList()
1311 FreeDesList(iDesCount, iFirstHdr, iLastHdr);
1315 EXPORT_C void DDmaRequest::FreeSrcDesList()
1317 FreeDesList(iSrcDesCount, iSrcFirstHdr, iSrcLastHdr);
1321 EXPORT_C void DDmaRequest::FreeDstDesList()
1323 FreeDesList(iDstDesCount, iDstFirstHdr, iDstLastHdr);
1327 void DDmaRequest::FreeDesList(TInt& aDesCount, SDmaDesHdr*& aFirstHdr, SDmaDesHdr*& aLastHdr)
1329 __DMA_ASSERTD(!iQueued);
1333 iChannel.iAvailDesCount += aDesCount;
1334 TDmac& c = *(iChannel.iController);
1335 const SDmaDesHdr* hdr = aFirstHdr;
1342 aLastHdr->iNext = c.iFreeHdr;
1343 c.iFreeHdr = aFirstHdr;
1345 aFirstHdr = aLastHdr = NULL;
1351 EXPORT_C void DDmaRequest::EnableSrcElementCounting(TBool /*aResetElementCount*/)
1353 // Not yet implemented.
1358 EXPORT_C void DDmaRequest::EnableDstElementCounting(TBool /*aResetElementCount*/)
1360 // Not yet implemented.
1365 EXPORT_C void DDmaRequest::DisableSrcElementCounting()
1367 // Not yet implemented.
1372 EXPORT_C void DDmaRequest::DisableDstElementCounting()
1374 // Not yet implemented.
1379 EXPORT_C TUint32 DDmaRequest::TotalNumSrcElementsTransferred()
1381 // Not yet implemented.
1383 // So far largely bogus code (just to touch some symbols)...
1384 iTotalNumSrcElementsTransferred = 0;
1385 TDmac& c = *(iChannel.iController);
1388 for (const SDmaDesHdr* pH = iFirstHdr; pH != NULL; pH = pH->iNext)
1390 iTotalNumSrcElementsTransferred += c.HwDesNumDstElementsTransferred(*pH);
1395 // Do something different for pseudo descriptors...
1397 return iTotalNumSrcElementsTransferred;
1401 EXPORT_C TUint32 DDmaRequest::TotalNumDstElementsTransferred()
1403 // Not yet implemented.
1404 return iTotalNumDstElementsTransferred;
1408 EXPORT_C TInt DDmaRequest::FragmentCount()
1410 return FragmentCount(iFirstHdr);
1414 EXPORT_C TInt DDmaRequest::SrcFragmentCount()
1416 return FragmentCount(iSrcFirstHdr);
1420 EXPORT_C TInt DDmaRequest::DstFragmentCount()
1422 return FragmentCount(iDstFirstHdr);
1426 TInt DDmaRequest::FragmentCount(const SDmaDesHdr* aHdr)
1429 for (const SDmaDesHdr* pH = aHdr; pH != NULL; pH = pH->iNext)
1438 // Called when request is removed from request queue in channel
1440 inline void DDmaRequest::OnDeque()
1443 iLastHdr->iNext = NULL;
1444 iChannel.DoUnlink(*iLastHdr);
1449 void DDmaRequest::Invariant()
1452 __DMA_ASSERTD(LOGICAL_XOR(iCb, iDmaCb));
1453 if (iChannel.iDmacCaps->iAsymHwDescriptors)
1455 __DMA_ASSERTD((0 <= iSrcDesCount) && (iSrcDesCount <= iChannel.iMaxDesCount) &&
1456 (0 <= iDstDesCount) && (iDstDesCount <= iChannel.iMaxDesCount));
1457 if (iSrcDesCount == 0)
1459 __DMA_ASSERTD(iDstDesCount == 0);
1460 __DMA_ASSERTD(!iQueued);
1461 __DMA_ASSERTD(!iSrcFirstHdr && !iSrcLastHdr &&
1462 !iDstFirstHdr && !iDstLastHdr);
1466 __DMA_ASSERTD(iChannel.iController->IsValidHdr(iSrcFirstHdr));
1467 __DMA_ASSERTD(iChannel.iController->IsValidHdr(iSrcLastHdr));
1468 __DMA_ASSERTD(iChannel.iController->IsValidHdr(iDstFirstHdr));
1469 __DMA_ASSERTD(iChannel.iController->IsValidHdr(iDstLastHdr));
1474 __DMA_ASSERTD((0 <= iDesCount) && (iDesCount <= iChannel.iMaxDesCount));
1477 __DMA_ASSERTD(!iQueued);
1478 __DMA_ASSERTD(!iFirstHdr && !iLastHdr);
1482 __DMA_ASSERTD(iChannel.iController->IsValidHdr(iFirstHdr));
1483 __DMA_ASSERTD(iChannel.iController->IsValidHdr(iLastHdr));
1491 //////////////////////////////////////////////////////////////////////////////
1494 _LIT(KDmaChannelMutex, "DMA-Channel");
1496 TDmaChannel::TDmaChannel()
1497 : iController(NULL),
1500 iDynChannel(EFalse),
1501 iPriority(KDmaPriorityNone),
1511 iRedoRequest(EFalse),
1512 iIsrCbRequest(EFalse)
1514 const TInt r = Kern::MutexCreate(iMutex, KDmaChannelMutex, KMutexOrdDmaChannel);
1515 __DMA_ASSERTA(r == KErrNone);
1518 // On the emulator this code is called from within the codeseg mutex.
1519 // The invariant tries to hold the dma channel mutex, but this is not allowed
1525 TDmaChannel::~TDmaChannel()
1527 Kern::SafeClose((DObject*&)iMutex, NULL);
1532 // static member function
1534 EXPORT_C TInt TDmaChannel::Open(const SCreateInfo& aInfo, TDmaChannel*& aChannel)
1536 __KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::Open thread %O", &Kern::CurrentThread()));
1538 __DMA_ASSERTD(aInfo.iDesCount >= 1);
1539 __DMA_ASSERTD(aInfo.iPriority <= KDmaPriority8);
1540 __DMA_ASSERTD(aInfo.iDfcQ != NULL);
1541 __DMA_ASSERTD(aInfo.iDfcPriority < KNumDfcPriorities);
1545 DmaChannelMgr::Wait();
1546 TDmaChannel* pC = DmaChannelMgr::Open(aInfo.iCookie, aInfo.iDynChannel, aInfo.iPriority);
1547 DmaChannelMgr::Signal();
1552 __DMA_ASSERTD(pC->iController != NULL);
1553 __DMA_ASSERTD(pC->iDmacCaps != NULL);
1554 __DMA_ASSERTD(pC->iController->iCapsHwDes == pC->DmacCaps().iHwDescriptors);
1555 // PSL needs to set iDynChannel if and only if dynamic channel was requested
1556 __DMA_ASSERTD(!LOGICAL_XOR(aInfo.iDynChannel, pC->iDynChannel));
1558 const TInt r = pC->iController->ReserveSetOfDes(aInfo.iDesCount);
1564 pC->iAvailDesCount = pC->iMaxDesCount = aInfo.iDesCount;
1566 new (&pC->iDfc) TDfc(&Dfc, pC, aInfo.iDfcQ, aInfo.iDfcPriority);
1573 __KTRACE_OPT(KDMA, Kern::Printf("opened channel %d", pC->iPslId));
1578 EXPORT_C void TDmaChannel::Close()
1580 __KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::Close %d iReqCount=%d", iPslId, iReqCount));
1581 __DMA_ASSERTD(IsQueueEmpty());
1582 __DMA_ASSERTD(iReqCount == 0);
1584 // Descriptor leak? -> bug in request code
1585 __DMA_ASSERTD(iAvailDesCount == iMaxDesCount);
1587 __DMA_ASSERTD(!iRedoRequest);
1588 __DMA_ASSERTD(!iIsrCbRequest);
1590 iController->ReleaseSetOfDes(iMaxDesCount);
1591 iAvailDesCount = iMaxDesCount = 0;
1593 DmaChannelMgr::Wait();
1594 DmaChannelMgr::Close(this);
1595 // The following assignment will be removed once IsOpened() has been
1596 // removed. That's because 'this' shouldn't be touched any more once
1597 // Close() has returned from the PSL.
1599 DmaChannelMgr::Signal();
1603 EXPORT_C TInt TDmaChannel::LinkToChannel(TDmaChannel* aChannel)
1605 __KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::LinkToChannel thread %O",
1606 &Kern::CurrentThread()));
1609 return iController->LinkChannels(*this, *aChannel);
1613 return iController->UnlinkChannel(*this);
1618 EXPORT_C TInt TDmaChannel::Pause()
1620 __KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::Pause thread %O",
1621 &Kern::CurrentThread()));
1622 return iController->PauseTransfer(*this);
1626 EXPORT_C TInt TDmaChannel::Resume()
1628 __KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::Resume thread %O",
1629 &Kern::CurrentThread()));
1630 return iController->ResumeTransfer(*this);
1634 EXPORT_C void TDmaChannel::CancelAll()
1636 __KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::CancelAll thread %O channel - %d",
1637 &Kern::CurrentThread(), iPslId));
1638 NThread* const nt = NKern::CurrentThread();
1639 TBool wait = EFalse;
1640 TDmaCancelInfo cancelinfo;
1641 TDmaCancelInfo* waiters = NULL;
1643 NKern::ThreadEnterCS();
1646 NThreadBase* const dfc_nt = iDfc.Thread();
1647 // Shouldn't be NULL (i.e. an IDFC)
1648 __DMA_ASSERTD(dfc_nt);
1650 __e32_atomic_store_ord32(&iIsrDfc, (TUint32)KCancelFlagMask);
1651 // ISRs after this point will not post a DFC, however a DFC may already be
1652 // queued or running or both.
1653 if (!IsQueueEmpty())
1655 // There is a transfer in progress. It may complete before the DMAC
1656 // has stopped, but the resulting ISR will not post a DFC.
1657 // ISR should not happen after this function returns.
1658 iController->StopTransfer(*this);
1660 ResetStateMachine();
1662 // Clean-up the request queue.
1664 while ((pL = iReqQ.GetFirst()) != NULL)
1666 DDmaRequest* pR = _LOFF(pL, DDmaRequest, iLink);
1672 // DFC runs in this thread, so just cancel it and we're finished
1675 // If other calls to CancelAll() are waiting for the DFC, release them here
1676 waiters = iCancelInfo;
1679 // Reset the ISR count
1680 __e32_atomic_store_rel32(&iIsrDfc, 0);
1684 // DFC runs in another thread. Make sure it's queued and then wait for it to run.
1687 // Insert cancelinfo into the list so that it precedes iCancelInfo
1688 cancelinfo.InsertBefore(iCancelInfo);
1692 iCancelInfo = &cancelinfo;
1706 NKern::FSWait(&cancelinfo.iSem);
1709 NKern::ThreadLeaveCS();
1714 EXPORT_C TInt TDmaChannel::IsrRedoRequest(TUint32 aSrcAddr, TUint32 aDstAddr,
1715 TUint aTransferCount,
1716 TUint32 aPslRequestInfo,
1720 Kern::Printf("TDmaChannel::IsrRedoRequest src=0x%08x, "
1721 "dst=0x%08x, count=%d, pslInfo=0x%08x, isrCb=%d",
1722 aSrcAddr, aDstAddr, aTransferCount, aPslRequestInfo,
1724 // Function needs to be called in ISR context.
1725 __DMA_ASSERTD(NKern::CurrentContext() == NKern::EInterrupt);
1727 __DMA_ASSERTD(!iReqQ.IsEmpty());
1728 __DMA_ASSERTD(iIsrCbRequest);
1731 if ((aSrcAddr != KPhysAddrInvalid) && (aSrcAddr == aDstAddr))
1733 __KTRACE_OPT(KPANIC,
1734 Kern::Printf("Error: Updating src & dst to same address: 0x%08x",
1736 return KErrArgument;
1740 // We assume here that the just completed request is the first one in the
1741 // queue, i.e. that even if there is more than one request in the queue,
1742 // their respective last and first (hw) descriptors are *not* linked.
1743 // (Although that's what apparently happens in TDmaSgChannel::DoQueue() /
1744 // TDmac::AppendHwDes() @@@).
1745 DDmaRequest* const pCurReq = _LOFF(iReqQ.First(), DDmaRequest, iLink);
1748 if (iDmacCaps->iAsymHwDescriptors)
1750 // We don't allow multiple-descriptor chains to be updated here
1751 __DMA_ASSERTD((pCurReq->iSrcDesCount == 1) && (pCurReq->iDstDesCount == 1));
1752 // Adjust parameters if necessary (asymmetrical s/g variety)
1753 const SDmaDesHdr* const pSrcFirstHdr = pCurReq->iSrcFirstHdr;
1754 if ((aSrcAddr != KPhysAddrInvalid) || aTransferCount || aPslRequestInfo)
1756 r = iController->UpdateSrcHwDes(*pSrcFirstHdr, aSrcAddr,
1757 aTransferCount, aPslRequestInfo);
1760 __KTRACE_OPT(KPANIC, Kern::Printf("Src descriptor updating failed in PSL"));
1764 const SDmaDesHdr* const pDstFirstHdr = pCurReq->iDstFirstHdr;
1765 if ((aDstAddr != KPhysAddrInvalid) || aTransferCount || aPslRequestInfo)
1767 r = iController->UpdateDstHwDes(*pDstFirstHdr, aSrcAddr,
1768 aTransferCount, aPslRequestInfo);
1771 __KTRACE_OPT(KPANIC, Kern::Printf("Dst descriptor updating failed in PSL"));
1775 // Reschedule the request
1776 iController->Transfer(*this, *pSrcFirstHdr, *pDstFirstHdr);
1780 // We don't allow multiple-descriptor chains to be updated here
1781 __DMA_ASSERTD(pCurReq->iDesCount == 1);
1782 // Adjust parameters if necessary (symmetrical s/g and non-s/g variety)
1783 const SDmaDesHdr* const pFirstHdr = pCurReq->iFirstHdr;
1784 if ((aSrcAddr != KPhysAddrInvalid) || (aDstAddr != KPhysAddrInvalid) ||
1785 aTransferCount || aPslRequestInfo)
1787 r = iController->UpdateDes(*pFirstHdr, aSrcAddr, aDstAddr,
1788 aTransferCount, aPslRequestInfo);
1791 __KTRACE_OPT(KPANIC, Kern::Printf("Descriptor updating failed"));
1795 // Reschedule the request
1796 iController->Transfer(*this, *pFirstHdr);
1801 // Not another ISR callback please
1802 pCurReq->iIsrCb = aIsrCb;
1804 iRedoRequest = ETrue;
1810 EXPORT_C TInt TDmaChannel::FailNext(TInt /*aFragmentCount*/)
1812 return iController->FailNext(*this);
1816 EXPORT_C TInt TDmaChannel::MissNextInterrupts(TInt aInterruptCount)
1818 return iController->MissNextInterrupts(*this, aInterruptCount);
1822 EXPORT_C TInt TDmaChannel::Extension(TInt aCmd, TAny* aArg)
1824 return iController->Extension(*this, aCmd, aArg);
1829 // static member function
1831 EXPORT_C TInt TDmaChannel::StaticExtension(TInt aCmd, TAny* aArg)
1833 return DmaChannelMgr::StaticExtension(aCmd, aArg);
1837 EXPORT_C TUint TDmaChannel::MaxTransferLength(TUint aSrcFlags, TUint aDstFlags,
1840 return iController->MaxTransferLength(*this, aSrcFlags, aDstFlags, aPslInfo);
1844 EXPORT_C TUint TDmaChannel::AddressAlignMask(TUint aTargetFlags, TUint aElementSize,
1847 return iController->AddressAlignMask(*this, aTargetFlags, aElementSize, aPslInfo);
1851 EXPORT_C const SDmacCaps& TDmaChannel::DmacCaps()
1858 // DFC callback function (static member).
1860 void TDmaChannel::Dfc(TAny* aArg)
1862 static_cast<TDmaChannel*>(aArg)->DoDfc();
1867 // This is quite a long function, but what can you do...
1869 void TDmaChannel::DoDfc()
1871 __KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::DoDfc thread %O channel - %d",
1872 &Kern::CurrentThread(), iPslId));
1875 // Atomically fetch and reset the number of DFCs queued by the ISR and the
1876 // error flag. Leave the cancel flag alone for now.
1877 const TUint32 w = __e32_atomic_and_ord32(&iIsrDfc, (TUint32)KCancelFlagMask);
1878 TUint32 count = w & KDfcCountMask;
1879 const TBool error = w & (TUint32)KErrorFlagMask;
1880 TBool stop = w & (TUint32)KCancelFlagMask;
1881 __DMA_ASSERTD((count > 0) || stop);
1883 __DMA_ASSERTD(!iRedoRequest); // We shouldn't be here if this is true
1885 while (count && !stop)
1889 __DMA_ASSERTD(!iReqQ.IsEmpty());
1891 // If an error occurred it must have been reported on the last
1892 // interrupt since transfers are suspended after an error.
1893 DDmaRequest::TResult const res = (count == 0 && error) ?
1894 DDmaRequest::EError : DDmaRequest::EOk;
1895 DDmaRequest* pCompletedReq = NULL;
1896 DDmaRequest* const pCurReq = _LOFF(iReqQ.First(), DDmaRequest, iLink);
1898 if (res == DDmaRequest::EOk)
1900 // Update state machine, current fragment, completed fragment and
1901 // tell the DMAC to transfer the next fragment if necessary.
1902 SDmaDesHdr* pCompletedHdr = NULL;
1903 DoDfc(const_cast<const DDmaRequest&>(*pCurReq), pCompletedHdr);
1905 // If just completed last fragment from current request, switch to
1906 // next request (if any).
1907 if (pCompletedHdr == pCurReq->iLastHdr)
1909 pCompletedReq = pCurReq;
1910 pCurReq->iLink.Deque();
1911 if (iReqQ.IsEmpty())
1912 iNullPtr = &iCurHdr;
1913 pCompletedReq->OnDeque();
1918 pCompletedReq = pCurReq;
1921 if (pCompletedReq && !pCompletedReq->iIsrCb)
1923 // Don't execute ISR callbacks here (they have already been called)
1924 DDmaRequest::TCallback const cb = pCompletedReq->iCb;
1927 // Old style callback
1928 TAny* const arg = pCompletedReq->iCbArg;
1930 __KTRACE_OPT(KDMA, Kern::Printf("Client CB res=%d", res));
1936 // New style callback
1937 TDmaCallback const ncb = pCompletedReq->iDmaCb;
1940 TAny* const arg = pCompletedReq->iDmaCbArg;
1941 TDmaResult const result = (res == DDmaRequest::EOk) ?
1942 EDmaResultOK : EDmaResultError;
1944 __KTRACE_OPT(KDMA, Kern::Printf("Client CB result=%d", result));
1945 (*ncb)(EDmaCallbackRequestCompletion, result, arg, NULL);
1952 // Allow another thread in, in case they are trying to cancel
1955 stop = __e32_atomic_load_acq32(&iIsrDfc) & (TUint32)KCancelFlagMask;
1958 // Some interrupts may be missed (double-buffer and scatter-gather
1959 // controllers only) if two or more transfers complete while interrupts are
1960 // disabled in the CPU. If this happens, the framework will go out of sync
1961 // and leave some orphaned requests in the queue.
1963 // To ensure correctness we handle this case here by checking that the request
1964 // queue is empty when all transfers have completed and, if not, cleaning up
1965 // and notifying the client of the completion of the orphaned requests.
1967 // Note that if some interrupts are missed and the controller raises an
1968 // error while transferring a subsequent fragment, the error will be reported
1969 // on a fragment which was successfully completed. There is no easy solution
1970 // to this problem, but this is okay as the only possible action following a
1971 // failure is to flush the whole queue.
1974 // If another thread set the cancel flag, it should have
1975 // cleaned up the request queue
1976 __DMA_ASSERTD(IsQueueEmpty());
1978 TDmaCancelInfo* const waiters = iCancelInfo;
1981 // make sure DFC doesn't run again until a new request completes
1984 // reset the ISR count - new requests can now be processed
1985 __e32_atomic_store_rel32(&iIsrDfc, 0);
1989 // release threads doing CancelAll()
1992 else if (!error && !iReqQ.IsEmpty() && iController->IsIdle(*this))
1995 // On an SMP system we must call stop transfer, it will block until
1996 // any ISRs have completed so that the system does not spuriously
1997 // attempt to recover from a missed interrupt.
1999 // On an SMP system it is possible for the code here to execute
2000 // concurrently with the DMA ISR. It is therefore possible that at this
2001 // point the previous transfer has already completed (so that IsIdle
2002 // reports true), but that the ISR has not yet queued a DFC. Therefore
2003 // we must wait for the ISR to complete.
2005 // StopTransfer should have no other side effect, given that the
2006 // channel is already idle.
2007 iController->StopTransfer(*this); // should block till ISR completion
2010 const TBool cleanup = !iDfc.Queued();
2013 __KTRACE_OPT(KDMA, Kern::Printf("Missed interrupt(s) - draining request queue"));
2014 ResetStateMachine();
2016 // Move orphaned requests to temporary queue so channel queue can
2017 // accept new requests.
2022 while ((pL = q.GetFirst()) != NULL)
2024 DDmaRequest* const pR = _LOFF(pL, DDmaRequest, iLink);
2025 __KTRACE_OPT(KDMA, Kern::Printf("Removing request from queue and notifying client"));
2027 // Old style callback
2028 DDmaRequest::TCallback const cb = pR->iCb;
2031 TAny* const arg = pR->iCbArg;
2033 (*cb)(DDmaRequest::EOk, arg);
2038 // New style callback
2039 TDmaCallback const ncb = pR->iDmaCb;
2042 TAny* const arg = pR->iDmaCbArg;
2044 (*ncb)(EDmaCallbackRequestCompletion, EDmaResultOK, arg, NULL);
2060 // Reset state machine only, request queue is unchanged */
2062 void TDmaChannel::ResetStateMachine()
2066 iNullPtr = &iCurHdr;
2070 void TDmaChannel::DoQueue(const DDmaRequest& /*aReq*/)
2072 // Must be overridden
2073 __DMA_CANT_HAPPEN();
2078 // Unlink the last item of a LLI chain from the next chain.
2079 // Default implementation does nothing. This is overridden by scatter-gather
2082 void TDmaChannel::DoUnlink(SDmaDesHdr& /*aHdr*/)
2087 void TDmaChannel::DoDfc(const DDmaRequest& /*aCurReq*/, SDmaDesHdr*& /*aCompletedHdr*/)
2089 // To make sure this version of the function isn't called for channels for
2090 // which it isn't appropriate (and which therefore don't override it) we
2091 // put this check in here.
2092 __DMA_CANT_HAPPEN();
2096 void TDmaChannel::DoDfc(const DDmaRequest& /*aCurReq*/, SDmaDesHdr*& /*aSrcCompletedHdr*/,
2097 SDmaDesHdr*& /*aDstCompletedHdr*/)
2099 // To make sure this version of the function isn't called for channels for
2100 // which it isn't appropriate (and which therefore don't override it) we
2101 // put this check in here.
2102 __DMA_CANT_HAPPEN();
2107 void TDmaChannel::Invariant()
2111 __DMA_ASSERTD(iReqCount >= 0);
2113 __DMA_ASSERTD(iCurHdr == NULL || iController->IsValidHdr(iCurHdr));
2115 // should always point to NULL pointer ending fragment queue
2116 __DMA_ASSERTD(*iNullPtr == NULL);
2118 __DMA_ASSERTD((0 <= iAvailDesCount) && (iAvailDesCount <= iMaxDesCount));
2120 __DMA_ASSERTD(LOGICAL_XOR(iCurHdr, IsQueueEmpty()));
2121 if (iCurHdr == NULL)
2123 __DMA_ASSERTD(iNullPtr == &iCurHdr);
2131 //////////////////////////////////////////////////////////////////////////////
2134 void TDmaSbChannel::DoQueue(const DDmaRequest& /*aReq*/)
2136 if (iState != ETransferring)
2138 iController->Transfer(*this, *iCurHdr);
2139 iState = ETransferring;
2144 void TDmaSbChannel::DoCancelAll()
2146 __DMA_ASSERTD(iState == ETransferring);
2151 void TDmaSbChannel::DoDfc(const DDmaRequest& /*aCurReq*/, SDmaDesHdr*& aCompletedHdr)
2153 __DMA_ASSERTD(iState == ETransferring);
2154 aCompletedHdr = iCurHdr;
2155 iCurHdr = iCurHdr->iNext;
2156 if (iCurHdr != NULL)
2158 iController->Transfer(*this, *iCurHdr);
2167 //////////////////////////////////////////////////////////////////////////////
2170 void TDmaDbChannel::DoQueue(const DDmaRequest& aReq)
2175 iController->Transfer(*this, *iCurHdr);
2178 iController->Transfer(*this, *(iCurHdr->iNext));
2179 iState = ETransferring;
2182 iState = ETransferringLast;
2187 case ETransferringLast:
2188 iController->Transfer(*this, *(aReq.iFirstHdr));
2189 iState = ETransferring;
2192 __DMA_CANT_HAPPEN();
2197 void TDmaDbChannel::DoCancelAll()
2203 void TDmaDbChannel::DoDfc(const DDmaRequest& /*aCurReq*/, SDmaDesHdr*& aCompletedHdr)
2205 aCompletedHdr = iCurHdr;
2206 iCurHdr = iCurHdr->iNext;
2209 case ETransferringLast:
2213 if (iCurHdr->iNext == NULL)
2214 iState = ETransferringLast;
2216 iController->Transfer(*this, *(iCurHdr->iNext));
2219 __DMA_CANT_HAPPEN();
2224 //////////////////////////////////////////////////////////////////////////////
2227 void TDmaSgChannel::DoQueue(const DDmaRequest& aReq)
2229 if (iState == ETransferring)
2231 __DMA_ASSERTD(!aReq.iLink.Alone());
2232 DDmaRequest* pReqPrev = _LOFF(aReq.iLink.iPrev, DDmaRequest, iLink);
2233 iController->AppendHwDes(*this, *(pReqPrev->iLastHdr), *(aReq.iFirstHdr));
2237 iController->Transfer(*this, *(aReq.iFirstHdr));
2238 iState = ETransferring;
2243 void TDmaSgChannel::DoCancelAll()
2245 __DMA_ASSERTD(iState == ETransferring);
2250 void TDmaSgChannel::DoUnlink(SDmaDesHdr& aHdr)
2252 iController->UnlinkHwDes(*this, aHdr);
2256 void TDmaSgChannel::DoDfc(const DDmaRequest& aCurReq, SDmaDesHdr*& aCompletedHdr)
2258 __DMA_ASSERTD(iState == ETransferring);
2259 aCompletedHdr = aCurReq.iLastHdr;
2260 iCurHdr = aCompletedHdr->iNext;
2261 iState = (iCurHdr != NULL) ? ETransferring : EIdle;
2265 //////////////////////////////////////////////////////////////////////////////
2266 // TDmaAsymSgChannel
2268 void TDmaAsymSgChannel::DoQueue(const DDmaRequest& aReq)
2270 if (iState == ETransferring)
2272 __DMA_ASSERTD(!aReq.iLink.Alone());
2273 DDmaRequest* pReqPrev = _LOFF(aReq.iLink.iPrev, DDmaRequest, iLink);
2274 iController->AppendHwDes(*this,
2275 *(pReqPrev->iSrcLastHdr), *(aReq.iSrcFirstHdr),
2276 *(pReqPrev->iDstLastHdr), *(aReq.iDstFirstHdr));
2280 iController->Transfer(*this, *(aReq.iSrcFirstHdr), *(aReq.iDstFirstHdr));
2281 iState = ETransferring;
2286 void TDmaAsymSgChannel::DoCancelAll()
2288 __DMA_ASSERTD(iState == ETransferring);
2293 void TDmaAsymSgChannel::DoUnlink(SDmaDesHdr& aHdr)
2295 iController->UnlinkHwDes(*this, aHdr);
2299 void TDmaAsymSgChannel::DoDfc(const DDmaRequest& aCurReq, SDmaDesHdr*& aSrcCompletedHdr,
2300 SDmaDesHdr*& aDstCompletedHdr)
2302 __DMA_ASSERTD(iState == ETransferring);
2303 aSrcCompletedHdr = aCurReq.iSrcLastHdr;
2304 iSrcCurHdr = aSrcCompletedHdr->iNext;
2305 aDstCompletedHdr = aCurReq.iDstLastHdr;
2306 iDstCurHdr = aDstCompletedHdr->iNext;
2307 // Must be either both NULL or none of them.
2308 __DMA_ASSERTD(!LOGICAL_XOR(iSrcCurHdr, iDstCurHdr));
2309 iState = (iSrcCurHdr != NULL) ? ETransferring : EIdle;