Update contrib.
1 // Copyright (c) 2002-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32\drivers\dmapil.cpp
15 // DMA Platform Independent Layer (PIL)
19 #include <drivers/dma.h>
20 #include <kernel/kern_priv.h>
23 static const char KDmaPanicCat[] = "DMA";
25 NFastMutex DmaChannelMgr::Lock;
27 class TDmaCancelInfo : public SDblQueLink
36 TDmaCancelInfo::TDmaCancelInfo()
43 void TDmaCancelInfo::Signal()
45 TDmaCancelInfo* p = this;
48 TDmaCancelInfo* next = (TDmaCancelInfo*)p->iNext;
51 NKern::FSSignal(&p->iSem); // Don't dereference p after this
58 //////////////////////////////////////////////////////////////////////////////
62 typedef TLinAddr TPhysAddr;
64 static inline TPhysAddr LinToPhys(TLinAddr aLin) {return aLin;}
66 static inline TPhysAddr LinToPhys(TLinAddr aLin) {return Epoc::LinearToPhysical(aLin);}
70 // Return minimum of aMaxSize and size of largest physically contiguous block
71 // starting at aLinAddr.
73 static TInt MaxPhysSize(TLinAddr aLinAddr, const TInt aMaxSize)
75 const TPhysAddr physBase = LinToPhys(aLinAddr);
76 TLinAddr lin = aLinAddr;
80 // Round up the linear address to the next MMU page boundary
81 const TLinAddr linBoundary = Kern::RoundToPageSize(lin + 1);
82 size += linBoundary - lin;
85 if ((physBase + size) != LinToPhys(linBoundary))
92 //////////////////////////////////////////////////////////////////////////////
95 TDmac::TDmac(const SCreateInfo& aInfo)
96 : iMaxDesCount(aInfo.iDesCount),
97 iAvailDesCount(aInfo.iDesCount),
98 iDesSize(aInfo.iDesSize),
101 __DMA_ASSERTD(iMaxDesCount > 0);
102 __DMA_ASSERTD((iCaps & ~KCapsBitHwDes) == 0); // undefined bits set?
103 __DMA_ASSERTD(iDesSize > 0);
107 // Second-phase c'tor
110 TInt TDmac::Create(const SCreateInfo& aInfo)
112 iHdrPool = new SDmaDesHdr[iMaxDesCount];
113 if (iHdrPool == NULL)
116 TInt r = AllocDesPool(aInfo.iDesChunkAttribs);
120 // Link all descriptor headers together on the free list
123 for (i = 0; i < iMaxDesCount - 1; i++)
124 iHdrPool[i].iNext = iHdrPool + i + 1;
125 iHdrPool[iMaxDesCount-1].iNext = NULL;
141 // Calling thread must be in CS
142 TInt TDmac::AllocDesPool(TUint aAttribs)
145 if (iCaps & KCapsBitHwDes)
147 TInt size = iMaxDesCount*iDesSize;
150 iDesPool = new TUint8[size];
151 r = iDesPool ? KErrNone : KErrNoMemory;
153 // Chunk not mapped as supervisor r/w user none? incorrect mask passed by PSL
154 __DMA_ASSERTD((aAttribs & EMapAttrAccessMask) == EMapAttrSupRw);
156 r = Epoc::AllocPhysicalRam(size, phys);
159 r = DPlatChunkHw::New(iHwDesChunk, phys, size, aAttribs);
162 iDesPool = (TAny*)iHwDesChunk->LinearAddress();
163 __KTRACE_OPT(KDMA, Kern::Printf("descriptor hw chunk created lin=0x%08X phys=0x%08X, size=0x%X",
164 iHwDesChunk->iLinAddr, iHwDesChunk->iPhysAddr, size));
167 Epoc::FreePhysicalRam(phys, size);
173 iDesPool = new SDmaPseudoDes[iMaxDesCount];
174 r = iDesPool ? KErrNone : KErrNoMemory;
180 // Calling thread must be in CS
181 void TDmac::FreeDesPool()
183 if (iCaps & KCapsBitHwDes)
190 TPhysAddr phys = iHwDesChunk->PhysicalAddress();
191 TInt size = iHwDesChunk->iSize;
192 iHwDesChunk->Close(NULL);
193 Epoc::FreePhysicalRam(phys, size);
198 Kern::Free(iDesPool);
203 Prealloc the given number of descriptors.
206 TInt TDmac::ReserveSetOfDes(TInt aCount)
208 __KTRACE_OPT(KDMA, Kern::Printf(">TDmac::ReserveSetOfDes count=%d", aCount));
209 __DMA_ASSERTD(aCount > 0);
212 if (iAvailDesCount - aCount >= 0)
214 iAvailDesCount -= aCount;
219 __KTRACE_OPT(KDMA, Kern::Printf("<TDmac::ReserveSetOfDes r=%d", r));
225 Return the given number of preallocated descriptors to the free pool.
228 void TDmac::ReleaseSetOfDes(TInt aCount)
230 __DMA_ASSERTD(aCount >= 0);
232 iAvailDesCount += aCount;
239 Queue DFC and update word used to communicate with DFC.
241 Called in interrupt context by PSL.
244 void TDmac::HandleIsr(TDmaChannel& aChannel, TBool aIsComplete)
246 //__KTRACE_OPT(KDMA, Kern::Printf("TDmac::HandleIsr channel=%d complete=%d", aChannelIdx, aIsComplete));
248 // Queue DFC if necessary. The possible scenarios are:
249 // * no DFC queued --> need to queue DFC
250 // * DFC queued (not running yet) --> just need to update iIsrDfc
251 // * DFC running / iIsrDfc already reset --> need to requeue DFC
252 // * DFC running / iIsrDfc not reset yet --> just need to update iIsrDfc
253 // Set error flag if necessary.
254 TUint32 inc = aIsComplete ? 1u : TUint32(TDmaChannel::KErrorFlagMask)|1u;
255 TUint32 orig = __e32_atomic_tau_ord32(&aChannel.iIsrDfc, TUint32(TDmaChannel::KCancelFlagMask), 0, inc);
257 // As transfer should be suspended when an error occurs, we
258 // should never get there with the error flag already set.
259 __DMA_ASSERTD((orig & inc & (TUint32)TDmaChannel::KErrorFlagMask) == 0);
266 void TDmac::InitDes(const SDmaDesHdr& aHdr, TUint32 aSrc, TUint32 aDest, TInt aCount,
267 TUint aFlags, TUint32 aPslInfo, TUint32 aCookie)
269 if (iCaps & KCapsBitHwDes)
270 InitHwDes(aHdr, aSrc, aDest, aCount, aFlags, aPslInfo, aCookie);
273 SDmaPseudoDes& des = HdrToDes(aHdr);
278 des.iPslInfo = aPslInfo;
279 des.iCookie = aCookie;
284 void TDmac::InitHwDes(const SDmaDesHdr& /*aHdr*/, TUint32 /*aSrc*/, TUint32 /*aDest*/, TInt /*aCount*/,
285 TUint /*aFlags*/, TUint32 /*aPslInfo*/, TUint32 /*aCookie*/)
287 // concrete controller must override if KCapsBitHwDes set
292 void TDmac::ChainHwDes(const SDmaDesHdr& /*aHdr*/, const SDmaDesHdr& /*aNextHdr*/)
294 // concrete controller must override if KCapsBitHwDes set
299 void TDmac::AppendHwDes(const TDmaChannel& /*aChannel*/, const SDmaDesHdr& /*aLastHdr*/,
300 const SDmaDesHdr& /*aNewHdr*/)
302 // concrete controller must override if KCapsBitHwDes set
307 void TDmac::UnlinkHwDes(const TDmaChannel& /*aChannel*/, SDmaDesHdr& /*aHdr*/)
309 // concrete controller must override if KCapsBitHwDes set
314 TInt TDmac::FailNext(const TDmaChannel& /*aChannel*/)
316 return KErrNotSupported;
320 TInt TDmac::MissNextInterrupts(const TDmaChannel& /*aChannel*/, TInt /*aInterruptCount*/)
322 return KErrNotSupported;
326 TInt TDmac::Extension(TDmaChannel& /*aChannel*/, TInt /*aCmd*/, TAny* /*aArg*/)
328 // default implementation - NOP
329 return KErrNotSupported;
335 void TDmac::Invariant()
338 __DMA_ASSERTD(0 <= iAvailDesCount && iAvailDesCount <= iMaxDesCount);
339 __DMA_ASSERTD(! iFreeHdr || IsValidHdr(iFreeHdr));
340 for (TInt i = 0; i < iMaxDesCount; i++)
341 __DMA_ASSERTD(iHdrPool[i].iNext == NULL || IsValidHdr(iHdrPool[i].iNext));
346 TBool TDmac::IsValidHdr(const SDmaDesHdr* aHdr)
348 return (iHdrPool <= aHdr) && (aHdr < iHdrPool + iMaxDesCount);
353 //////////////////////////////////////////////////////////////////////////////
357 EXPORT_C DDmaRequest::DDmaRequest(TDmaChannel& aChannel, TCallback aCb, TAny* aCbArg, TInt aMaxTransferSize)
358 : iChannel(aChannel),
361 iMaxTransferSize(aMaxTransferSize)
364 // iFirstHdr = iLastHdr = NULL;
366 iChannel.iReqCount++;
372 EXPORT_C DDmaRequest::~DDmaRequest()
374 __DMA_ASSERTD(!iQueued);
377 iChannel.iReqCount--;
382 EXPORT_C TInt DDmaRequest::Fragment(TUint32 aSrc, TUint32 aDest, TInt aCount, TUint aFlags, TUint32 aPslInfo)
384 __KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::Fragment thread %O "
385 "src=0x%08X dest=0x%08X count=%d flags=0x%X psl=0x%08X",
386 &Kern::CurrentThread(), aSrc, aDest, aCount, aFlags, aPslInfo));
387 __DMA_ASSERTD(aCount > 0);
388 __DMA_ASSERTD(!iQueued);
390 const TUint alignMask = iChannel.MemAlignMask(aFlags, aPslInfo);
391 const TBool memSrc = aFlags & KDmaMemSrc;
392 const TBool memDest = aFlags & KDmaMemDest;
394 // Memory buffers must satisfy alignment constraint
395 __DMA_ASSERTD(!memSrc || ((aSrc & alignMask) == 0));
396 __DMA_ASSERTD(!memDest || ((aDest & alignMask) == 0));
398 // Ask the PSL what the maximum size possible for this transfer is
399 TInt maxTransferSize = iChannel.MaxTransferSize(aFlags, aPslInfo);
400 if (!maxTransferSize)
402 __KTRACE_OPT(KPANIC, Kern::Printf("Error: maxTransferSize == 0"));
406 if (iMaxTransferSize)
408 // User has set a size cap
409 __DMA_ASSERTA((iMaxTransferSize <= maxTransferSize) || (maxTransferSize == -1));
410 maxTransferSize = iMaxTransferSize;
414 // User doesn't care about max size
415 if (maxTransferSize == -1)
417 // No maximum imposed by controller
418 maxTransferSize = aCount;
422 const TInt maxAlignedSize = (maxTransferSize & ~alignMask);
423 __DMA_ASSERTD(maxAlignedSize > 0); // bug in PSL if not true
438 // Compute fragment size
439 TInt c = Min(maxTransferSize, aCount);
440 if (memSrc && ((aFlags & KDmaPhysAddrSrc) == 0))
441 c = MaxPhysSize(aSrc, c);
442 if (memDest && ((aFlags & KDmaPhysAddrDest) == 0))
443 c = MaxPhysSize(aDest, c);
444 if ((memSrc || memDest) && (c < aCount) && (c > maxAlignedSize))
446 // This is not last fragment of transfer to/from memory. We must
447 // round down fragment size so next one is correctly aligned.
451 // Initialise fragment
452 __KTRACE_OPT(KDMA, Kern::Printf("fragment: src=0x%08X dest=0x%08X count=%d", aSrc, aDest, c));
453 iChannel.iController->InitDes(*iLastHdr, aSrc, aDest, c, aFlags, aPslInfo, iChannel.PslId());
455 // Update for next iteration
470 EXPORT_C void DDmaRequest::Queue()
472 __KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::Queue thread %O", &Kern::CurrentThread()));
473 __DMA_ASSERTD(iDesCount > 0); // Not configured? call Fragment() first !
474 __DMA_ASSERTD(!iQueued);
476 // append request to queue and link new descriptor list to existing one.
479 TUint32 req_count = iChannel.iQueuedRequests++;
483 iChannel.QueuedRequestCountChanged();
487 if (!(iChannel.iIsrDfc & (TUint32)TDmaChannel::KCancelFlagMask))
490 iChannel.iReqQ.Add(&iLink);
491 *iChannel.iNullPtr = iFirstHdr;
492 iChannel.iNullPtr = &(iLastHdr->iNext);
493 iChannel.DoQueue(*this);
498 // Someone is cancelling all requests...
499 req_count = --iChannel.iQueuedRequests;
503 iChannel.QueuedRequestCountChanged();
510 EXPORT_C TInt DDmaRequest::ExpandDesList(TInt aCount)
512 __DMA_ASSERTD(!iQueued);
513 __DMA_ASSERTD(aCount > 0);
515 if (aCount > iChannel.iAvailDesCount)
518 iChannel.iAvailDesCount -= aCount;
521 TDmac& c = *(iChannel.iController);
524 if (iFirstHdr == NULL)
526 // handle empty list specially to simplify following loop
527 iFirstHdr = iLastHdr = c.iFreeHdr;
528 c.iFreeHdr = c.iFreeHdr->iNext;
532 iLastHdr->iNext = c.iFreeHdr;
534 // Remove as many descriptors and headers from free pool as necessary and
535 // ensure hardware descriptors are chained together.
538 __DMA_ASSERTD(c.iFreeHdr != NULL);
539 if (c.iCaps & TDmac::KCapsBitHwDes)
540 c.ChainHwDes(*iLastHdr, *(c.iFreeHdr));
541 iLastHdr = c.iFreeHdr;
542 c.iFreeHdr = c.iFreeHdr->iNext;
547 iLastHdr->iNext = NULL;
556 EXPORT_C void DDmaRequest::FreeDesList()
558 __DMA_ASSERTD(!iQueued);
561 iChannel.iAvailDesCount += iDesCount;
562 TDmac& c = *(iChannel.iController);
564 iLastHdr->iNext = c.iFreeHdr;
565 c.iFreeHdr = iFirstHdr;
567 iFirstHdr = iLastHdr = NULL;
575 void DDmaRequest::Invariant()
578 __DMA_ASSERTD(iChannel.IsOpened());
579 __DMA_ASSERTD(0 <= iMaxTransferSize);
580 __DMA_ASSERTD(0 <= iDesCount && iDesCount <= iChannel.iMaxDesCount);
583 __DMA_ASSERTD(!iQueued);
584 __DMA_ASSERTD(!iFirstHdr && !iLastHdr);
588 __DMA_ASSERTD(iChannel.iController->IsValidHdr(iFirstHdr));
589 __DMA_ASSERTD(iChannel.iController->IsValidHdr(iLastHdr));
597 //////////////////////////////////////////////////////////////////////////////
601 EXPORT_C TInt TDmaChannel::StaticExtension(TInt aCmd, TAny* aArg)
603 return DmaChannelMgr::StaticExtension(aCmd, aArg);
607 TDmaChannel::TDmaChannel()
625 EXPORT_C TInt TDmaChannel::Open(const SCreateInfo& aInfo, TDmaChannel*& aChannel)
627 __KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::Open thread %O", &Kern::CurrentThread()));
628 __DMA_ASSERTD(aInfo.iDfcQ != NULL);
629 __DMA_ASSERTD(aInfo.iDfcPriority < KNumDfcPriorities);
630 __DMA_ASSERTD(aInfo.iDesCount >= 1);
634 DmaChannelMgr::Wait();
635 TDmaChannel* pC = DmaChannelMgr::Open(aInfo.iCookie);
636 DmaChannelMgr::Signal();
640 TInt r = pC->iController->ReserveSetOfDes(aInfo.iDesCount);
646 pC->iAvailDesCount = pC->iMaxDesCount = aInfo.iDesCount;
648 new (&pC->iDfc) TDfc(&Dfc, pC, aInfo.iDfcQ, aInfo.iDfcPriority);
655 __KTRACE_OPT(KDMA, Kern::Printf("opened channel %d", pC->iPslId));
660 EXPORT_C void TDmaChannel::Close()
662 __KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::Close %d", iPslId));
663 __DMA_ASSERTD(IsOpened());
664 __DMA_ASSERTD(IsQueueEmpty());
665 __DMA_ASSERTD(iReqCount == 0);
667 __DMA_ASSERTD(iQueuedRequests == 0);
669 // descriptor leak? bug in request code
670 __DMA_ASSERTD(iAvailDesCount == iMaxDesCount);
672 iController->ReleaseSetOfDes(iMaxDesCount);
673 iAvailDesCount = iMaxDesCount = 0;
675 DmaChannelMgr::Wait();
676 DmaChannelMgr::Close(this);
678 DmaChannelMgr::Signal();
684 EXPORT_C void TDmaChannel::CancelAll()
686 __KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::CancelAll thread %O channel - %d",
687 &Kern::CurrentThread(), iPslId));
688 __DMA_ASSERTD(IsOpened());
690 NThread* nt = NKern::CurrentThread();
693 TDmaCancelInfo* waiters = 0;
695 NKern::ThreadEnterCS();
697 const TUint32 req_count_before = iQueuedRequests;
698 NThreadBase* dfcnt = iDfc.Thread();
699 __e32_atomic_store_ord32(&iIsrDfc, (TUint32)KCancelFlagMask);
700 // ISRs after this point will not post a DFC, however a DFC may already be queued or running or both
703 // There is a transfer in progress. It may complete before the DMAC
704 // has stopped, but the resulting ISR will not post a DFC.
705 // ISR should not happen after this function returns.
706 iController->StopTransfer(*this);
710 // Clean-up the request queue.
712 while ((pL = iReqQ.GetFirst()) != NULL)
715 DDmaRequest* pR = _LOFF(pL, DDmaRequest, iLink);
719 if (!dfcnt || dfcnt==nt)
721 // no DFC queue or DFC runs in this thread, so just cancel it and we're finished
724 // if other calls to CancelAll() are waiting for the DFC, release them here
725 waiters = iCancelInfo;
728 // reset the ISR count
729 __e32_atomic_store_rel32(&iIsrDfc, 0);
733 // DFC runs in another thread. Make sure it's queued and then wait for it to run.
735 c.InsertBefore(iCancelInfo);
741 const TUint32 req_count_after = iQueuedRequests;
746 NKern::FSWait(&c.iSem);
747 NKern::ThreadLeaveCS();
749 // Only call PSL if there were requests queued when we entered AND there
750 // are now no requests left on the queue.
751 if ((req_count_before != 0) && (req_count_after == 0))
753 QueuedRequestCountChanged();
761 DFC callback function (static member).
764 void TDmaChannel::Dfc(TAny* aArg)
766 ((TDmaChannel*)aArg)->DoDfc();
770 void TDmaChannel::DoDfc()
774 // Atomically fetch and reset the number of DFC queued by ISR and the error
775 // flag. Leave the cancel flag alone for now.
776 const TUint32 w = __e32_atomic_and_ord32(&iIsrDfc, (TUint32)KCancelFlagMask);
777 TUint32 count = w & KDfcCountMask;
778 const TBool error = w & (TUint32)KErrorFlagMask;
779 TBool stop = w & (TUint32)KCancelFlagMask;
780 __DMA_ASSERTD(count>0 || stop);
781 const TUint32 req_count_before = iQueuedRequests;
782 TUint32 req_count_after = 0;
784 while(count && !stop)
788 // If an error occurred it must have been reported on the last interrupt since transfers are
789 // suspended after an error.
790 DDmaRequest::TResult res = (count==0 && error) ? DDmaRequest::EError : DDmaRequest::EOk;
791 __DMA_ASSERTD(!iReqQ.IsEmpty());
792 DDmaRequest* pCompletedReq = NULL;
793 DDmaRequest* pCurReq = _LOFF(iReqQ.First(), DDmaRequest, iLink);
794 DDmaRequest::TCallback cb = 0;
797 if (res == DDmaRequest::EOk)
799 // Update state machine, current fragment, completed fragment and
800 // tell DMAC to transfer next fragment if necessary.
801 SDmaDesHdr* pCompletedHdr = NULL;
802 DoDfc(*pCurReq, pCompletedHdr);
804 // If just completed last fragment from current request, switch to next
806 if (pCompletedHdr == pCurReq->iLastHdr)
808 pCompletedReq = pCurReq;
809 pCurReq->iLink.Deque();
813 pCompletedReq->OnDeque();
816 else if (res == DDmaRequest::EError)
817 pCompletedReq = pCurReq;
822 cb = pCompletedReq->iCb;
823 arg = pCompletedReq->iCbArg;
825 __KTRACE_OPT(KDMA, Kern::Printf("notifying DMA client result=%d", res));
829 if (pCompletedReq || Flash())
830 stop = __e32_atomic_load_acq32(&iIsrDfc) & (TUint32)KCancelFlagMask;
833 // Some interrupts may be missed (double-buffer and scatter-gather
834 // controllers only) if two or more transfers complete while interrupts are
835 // disabled in the CPU. If this happens, the framework will go out of sync
836 // and leave some orphaned requests in the queue.
838 // To ensure correctness we handle this case here by checking that the request
839 // queue is empty when all transfers have completed and, if not, cleaning up
840 // and notifying the client of the completion of the orphaned requests.
842 // Note that if some interrupts are missed and the controller raises an
843 // error while transferring a subsequent fragment, the error will be reported
844 // on a fragment which was successfully completed. There is no easy solution
845 // to this problem, but this is okay as the only possible action following a
846 // failure is to flush the whole queue.
849 TDmaCancelInfo* waiters = iCancelInfo;
852 // make sure DFC doesn't run again until a new request completes
855 // reset the ISR count - new requests can now be processed
856 __e32_atomic_store_rel32(&iIsrDfc, 0);
858 req_count_after = iQueuedRequests;
861 // release threads doing CancelAll()
864 else if (!error && !iDfc.Queued() && !iReqQ.IsEmpty() && iController->IsIdle(*this))
866 __KTRACE_OPT(KDMA, Kern::Printf("Missed interrupt(s) - draining request queue"));
869 // Move orphaned requests to temporary queue so channel queue can
870 // accept new requests.
875 while ((pL = q.GetFirst()) != NULL)
878 DDmaRequest* pR = _LOFF(pL, DDmaRequest, iLink);
879 __KTRACE_OPT(KDMA, Kern::Printf("Removing request from queue and notifying client"));
881 DDmaRequest::TCallback cb = pR->iCb;
882 TAny* arg = pR->iCbArg;
886 (*cb)(DDmaRequest::EOk, arg);
890 req_count_after = iQueuedRequests;
895 req_count_after = iQueuedRequests;
899 // Only call PSL if there were requests queued when we entered AND there
900 // are now no requests left on the queue (after also having executed all
901 // client callbacks).
902 if ((req_count_before != 0) && (req_count_after == 0))
904 QueuedRequestCountChanged();
911 /** Reset state machine only, request queue is unchanged */
913 void TDmaChannel::ResetStateMachine()
921 /** Unlink the last item of a LLI chain from the next chain.
922 Default implementation does nothing. This is overridden by scatter-gather channels. */
924 void TDmaChannel::DoUnlink(SDmaDesHdr& /*aHdr*/)
929 /** PSL may override */
930 void TDmaChannel::QueuedRequestCountChanged()
935 Kern::Printf("TDmaChannel::QueuedRequestCountChanged() %d",
937 __DMA_ASSERTA(iQueuedRequests >= 0);
945 void TDmaChannel::Invariant()
949 __DMA_ASSERTD(iReqCount >= 0);
950 // should always point to NULL pointer ending fragment queue
951 __DMA_ASSERTD(*iNullPtr == NULL);
953 __DMA_ASSERTD(0 <= iAvailDesCount && iAvailDesCount <= iMaxDesCount);
955 __DMA_ASSERTD(iCurHdr == NULL || iController->IsValidHdr(iCurHdr));
959 __DMA_ASSERTD((iCurHdr && !IsQueueEmpty()) || (!iCurHdr && IsQueueEmpty()));
961 __DMA_ASSERTD(iNullPtr == &iCurHdr);
965 __DMA_ASSERTD(iCurHdr == NULL);
966 __DMA_ASSERTD(iNullPtr == &iCurHdr);
967 __DMA_ASSERTD(IsQueueEmpty());
975 //////////////////////////////////////////////////////////////////////////////
978 void TDmaSbChannel::DoQueue(DDmaRequest& /*aReq*/)
982 iController->Transfer(*this, *iCurHdr);
983 iTransferring = ETrue;
988 void TDmaSbChannel::DoCancelAll()
990 __DMA_ASSERTD(iTransferring);
991 iTransferring = EFalse;
995 void TDmaSgChannel::DoUnlink(SDmaDesHdr& aHdr)
997 iController->UnlinkHwDes(*this, aHdr);
1001 void TDmaSbChannel::DoDfc(DDmaRequest& /*aCurReq*/, SDmaDesHdr*& aCompletedHdr)
1003 __DMA_ASSERTD(iTransferring);
1004 aCompletedHdr = iCurHdr;
1005 iCurHdr = iCurHdr->iNext;
1006 if (iCurHdr != NULL)
1007 iController->Transfer(*this, *iCurHdr);
1009 iTransferring = EFalse;
1013 //////////////////////////////////////////////////////////////////////////////
1016 void TDmaDbChannel::DoQueue(DDmaRequest& aReq)
1021 iController->Transfer(*this, *iCurHdr);
1024 iController->Transfer(*this, *(iCurHdr->iNext));
1025 iState = ETransferring;
1028 iState = ETransferringLast;
1033 case ETransferringLast:
1034 iController->Transfer(*this, *(aReq.iFirstHdr));
1035 iState = ETransferring;
1038 __DMA_CANT_HAPPEN();
1043 void TDmaDbChannel::DoCancelAll()
1049 void TDmaDbChannel::DoDfc(DDmaRequest& /*aCurReq*/, SDmaDesHdr*& aCompletedHdr)
1051 aCompletedHdr = iCurHdr;
1052 iCurHdr = iCurHdr->iNext;
1055 case ETransferringLast:
1059 if (iCurHdr->iNext == NULL)
1060 iState = ETransferringLast;
1062 iController->Transfer(*this, *(iCurHdr->iNext));
1065 __DMA_CANT_HAPPEN();
1070 //////////////////////////////////////////////////////////////////////////////
1073 void TDmaSgChannel::DoQueue(DDmaRequest& aReq)
1077 __DMA_ASSERTD(!aReq.iLink.Alone());
1078 DDmaRequest* pReqPrev = _LOFF(aReq.iLink.iPrev, DDmaRequest, iLink);
1079 iController->AppendHwDes(*this, *(pReqPrev->iLastHdr), *(aReq.iFirstHdr));
1083 iController->Transfer(*this, *(aReq.iFirstHdr));
1084 iTransferring = ETrue;
1089 void TDmaSgChannel::DoCancelAll()
1091 __DMA_ASSERTD(iTransferring);
1092 iTransferring = EFalse;
1096 void TDmaSgChannel::DoDfc(DDmaRequest& aCurReq, SDmaDesHdr*& aCompletedHdr)
1098 __DMA_ASSERTD(iTransferring);
1099 aCompletedHdr = aCurReq.iLastHdr;
1100 iCurHdr = aCompletedHdr->iNext;
1101 iTransferring = (iCurHdr != NULL);