sl@0: // Copyright (c) 2002-2009 Nokia Corporation and/or its subsidiary(-ies). sl@0: // All rights reserved. sl@0: // This component and the accompanying materials are made available sl@0: // under the terms of the License "Eclipse Public License v1.0" sl@0: // which accompanies this distribution, and is available sl@0: // at the URL "http://www.eclipse.org/legal/epl-v10.html". sl@0: // sl@0: // Initial Contributors: sl@0: // Nokia Corporation - initial contribution. sl@0: // sl@0: // Contributors: sl@0: // sl@0: // Description: sl@0: // e32\drivers\dmapil.cpp sl@0: // DMA Platform Independent Layer (PIL) sl@0: // sl@0: // sl@0: sl@0: #include sl@0: #include sl@0: sl@0: sl@0: static const char KDmaPanicCat[] = "DMA"; sl@0: sl@0: NFastMutex DmaChannelMgr::Lock; sl@0: sl@0: class TDmaCancelInfo : public SDblQueLink sl@0: { sl@0: public: sl@0: TDmaCancelInfo(); sl@0: void Signal(); sl@0: public: sl@0: NFastSemaphore iSem; sl@0: }; sl@0: sl@0: TDmaCancelInfo::TDmaCancelInfo() sl@0: : iSem(0) sl@0: { sl@0: iNext = this; sl@0: iPrev = this; sl@0: } sl@0: sl@0: void TDmaCancelInfo::Signal() sl@0: { sl@0: TDmaCancelInfo* p = this; sl@0: FOREVER sl@0: { sl@0: TDmaCancelInfo* next = (TDmaCancelInfo*)p->iNext; sl@0: if (p!=next) sl@0: p->Deque(); sl@0: NKern::FSSignal(&p->iSem); // Don't dereference p after this sl@0: if (p==next) sl@0: break; sl@0: p = next; sl@0: } sl@0: } sl@0: sl@0: ////////////////////////////////////////////////////////////////////////////// sl@0: sl@0: #ifdef __DMASIM__ sl@0: #ifdef __WINS__ sl@0: typedef TLinAddr TPhysAddr; sl@0: #endif sl@0: static inline TPhysAddr LinToPhys(TLinAddr aLin) {return aLin;} sl@0: #else sl@0: static inline TPhysAddr LinToPhys(TLinAddr aLin) {return Epoc::LinearToPhysical(aLin);} sl@0: #endif sl@0: sl@0: // sl@0: // Return minimum of aMaxSize and size of largest physically contiguous block sl@0: // starting at aLinAddr. sl@0: // sl@0: static TInt MaxPhysSize(TLinAddr aLinAddr, const TInt aMaxSize) sl@0: { sl@0: const TPhysAddr physBase = LinToPhys(aLinAddr); sl@0: TLinAddr lin = aLinAddr; sl@0: TInt size = 0; sl@0: for (;;) sl@0: { sl@0: // Round up the linear address to the next MMU page boundary sl@0: const TLinAddr linBoundary = Kern::RoundToPageSize(lin + 1); sl@0: size += linBoundary - lin; sl@0: if (size >= aMaxSize) sl@0: return aMaxSize; sl@0: if ((physBase + size) != LinToPhys(linBoundary)) sl@0: return size; sl@0: lin = linBoundary; sl@0: } sl@0: } sl@0: sl@0: sl@0: ////////////////////////////////////////////////////////////////////////////// sl@0: // TDmac sl@0: sl@0: TDmac::TDmac(const SCreateInfo& aInfo) sl@0: : iMaxDesCount(aInfo.iDesCount), sl@0: iAvailDesCount(aInfo.iDesCount), sl@0: iDesSize(aInfo.iDesSize), sl@0: iCaps(aInfo.iCaps) sl@0: { sl@0: __DMA_ASSERTD(iMaxDesCount > 0); sl@0: __DMA_ASSERTD((iCaps & ~KCapsBitHwDes) == 0); // undefined bits set? sl@0: __DMA_ASSERTD(iDesSize > 0); sl@0: } sl@0: sl@0: // sl@0: // Second-phase c'tor sl@0: // sl@0: sl@0: TInt TDmac::Create(const SCreateInfo& aInfo) sl@0: { sl@0: iHdrPool = new SDmaDesHdr[iMaxDesCount]; sl@0: if (iHdrPool == NULL) sl@0: return KErrNoMemory; sl@0: sl@0: TInt r = AllocDesPool(aInfo.iDesChunkAttribs); sl@0: if (r != KErrNone) sl@0: return KErrNoMemory; sl@0: sl@0: // Link all descriptor headers together on the free list sl@0: iFreeHdr = iHdrPool; sl@0: TInt i; sl@0: for (i = 0; i < iMaxDesCount - 1; i++) sl@0: iHdrPool[i].iNext = iHdrPool + i + 1; sl@0: iHdrPool[iMaxDesCount-1].iNext = NULL; sl@0: sl@0: __DMA_INVARIANT(); sl@0: return KErrNone; sl@0: } sl@0: sl@0: sl@0: TDmac::~TDmac() sl@0: { sl@0: __DMA_INVARIANT(); sl@0: sl@0: FreeDesPool(); sl@0: delete[] iHdrPool; sl@0: } sl@0: sl@0: sl@0: // Calling thread must be in CS sl@0: TInt TDmac::AllocDesPool(TUint aAttribs) sl@0: { sl@0: TInt r; sl@0: if (iCaps & KCapsBitHwDes) sl@0: { sl@0: TInt size = iMaxDesCount*iDesSize; sl@0: #ifdef __WINS__ sl@0: (void)aAttribs; sl@0: iDesPool = new TUint8[size]; sl@0: r = iDesPool ? KErrNone : KErrNoMemory; sl@0: #else sl@0: // Chunk not mapped as supervisor r/w user none? incorrect mask passed by PSL sl@0: __DMA_ASSERTD((aAttribs & EMapAttrAccessMask) == EMapAttrSupRw); sl@0: TPhysAddr phys; sl@0: r = Epoc::AllocPhysicalRam(size, phys); sl@0: if (r == KErrNone) sl@0: { sl@0: r = DPlatChunkHw::New(iHwDesChunk, phys, size, aAttribs); sl@0: if (r == KErrNone) sl@0: { sl@0: iDesPool = (TAny*)iHwDesChunk->LinearAddress(); sl@0: __KTRACE_OPT(KDMA, Kern::Printf("descriptor hw chunk created lin=0x%08X phys=0x%08X, size=0x%X", sl@0: iHwDesChunk->iLinAddr, iHwDesChunk->iPhysAddr, size)); sl@0: } sl@0: else sl@0: Epoc::FreePhysicalRam(phys, size); sl@0: } sl@0: #endif sl@0: } sl@0: else sl@0: { sl@0: iDesPool = new SDmaPseudoDes[iMaxDesCount]; sl@0: r = iDesPool ? KErrNone : KErrNoMemory; sl@0: } sl@0: return r; sl@0: } sl@0: sl@0: sl@0: // Calling thread must be in CS sl@0: void TDmac::FreeDesPool() sl@0: { sl@0: if (iCaps & KCapsBitHwDes) sl@0: { sl@0: #ifdef __WINS__ sl@0: delete[] iDesPool; sl@0: #else sl@0: if (iHwDesChunk) sl@0: { sl@0: TPhysAddr phys = iHwDesChunk->PhysicalAddress(); sl@0: TInt size = iHwDesChunk->iSize; sl@0: iHwDesChunk->Close(NULL); sl@0: Epoc::FreePhysicalRam(phys, size); sl@0: } sl@0: #endif sl@0: } sl@0: else sl@0: Kern::Free(iDesPool); sl@0: } sl@0: sl@0: sl@0: /** sl@0: Prealloc the given number of descriptors. sl@0: */ sl@0: sl@0: TInt TDmac::ReserveSetOfDes(TInt aCount) sl@0: { sl@0: __KTRACE_OPT(KDMA, Kern::Printf(">TDmac::ReserveSetOfDes count=%d", aCount)); sl@0: __DMA_ASSERTD(aCount > 0); sl@0: TInt r = KErrTooBig; sl@0: Wait(); sl@0: if (iAvailDesCount - aCount >= 0) sl@0: { sl@0: iAvailDesCount -= aCount; sl@0: r = KErrNone; sl@0: } sl@0: Signal(); sl@0: __DMA_INVARIANT(); sl@0: __KTRACE_OPT(KDMA, Kern::Printf("= 0); sl@0: Wait(); sl@0: iAvailDesCount += aCount; sl@0: Signal(); sl@0: __DMA_INVARIANT(); sl@0: } sl@0: sl@0: sl@0: /** sl@0: Queue DFC and update word used to communicate with DFC. sl@0: sl@0: Called in interrupt context by PSL. sl@0: */ sl@0: sl@0: void TDmac::HandleIsr(TDmaChannel& aChannel, TBool aIsComplete) sl@0: { sl@0: //__KTRACE_OPT(KDMA, Kern::Printf("TDmac::HandleIsr channel=%d complete=%d", aChannelIdx, aIsComplete)); sl@0: sl@0: // Queue DFC if necessary. The possible scenarios are: sl@0: // * no DFC queued --> need to queue DFC sl@0: // * DFC queued (not running yet) --> just need to update iIsrDfc sl@0: // * DFC running / iIsrDfc already reset --> need to requeue DFC sl@0: // * DFC running / iIsrDfc not reset yet --> just need to update iIsrDfc sl@0: // Set error flag if necessary. sl@0: TUint32 inc = aIsComplete ? 1u : TUint32(TDmaChannel::KErrorFlagMask)|1u; sl@0: TUint32 orig = __e32_atomic_tau_ord32(&aChannel.iIsrDfc, TUint32(TDmaChannel::KCancelFlagMask), 0, inc); sl@0: sl@0: // As transfer should be suspended when an error occurs, we sl@0: // should never get there with the error flag already set. sl@0: __DMA_ASSERTD((orig & inc & (TUint32)TDmaChannel::KErrorFlagMask) == 0); sl@0: sl@0: if (orig == 0) sl@0: aChannel.iDfc.Add(); sl@0: } sl@0: sl@0: sl@0: void TDmac::InitDes(const SDmaDesHdr& aHdr, TUint32 aSrc, TUint32 aDest, TInt aCount, sl@0: TUint aFlags, TUint32 aPslInfo, TUint32 aCookie) sl@0: { sl@0: if (iCaps & KCapsBitHwDes) sl@0: InitHwDes(aHdr, aSrc, aDest, aCount, aFlags, aPslInfo, aCookie); sl@0: else sl@0: { sl@0: SDmaPseudoDes& des = HdrToDes(aHdr); sl@0: des.iSrc = aSrc; sl@0: des.iDest = aDest; sl@0: des.iCount = aCount; sl@0: des.iFlags = aFlags; sl@0: des.iPslInfo = aPslInfo; sl@0: des.iCookie = aCookie; sl@0: } sl@0: } sl@0: sl@0: sl@0: void TDmac::InitHwDes(const SDmaDesHdr& /*aHdr*/, TUint32 /*aSrc*/, TUint32 /*aDest*/, TInt /*aCount*/, sl@0: TUint /*aFlags*/, TUint32 /*aPslInfo*/, TUint32 /*aCookie*/) sl@0: { sl@0: // concrete controller must override if KCapsBitHwDes set sl@0: __DMA_CANT_HAPPEN(); sl@0: } sl@0: sl@0: sl@0: void TDmac::ChainHwDes(const SDmaDesHdr& /*aHdr*/, const SDmaDesHdr& /*aNextHdr*/) sl@0: { sl@0: // concrete controller must override if KCapsBitHwDes set sl@0: __DMA_CANT_HAPPEN(); sl@0: } sl@0: sl@0: sl@0: void TDmac::AppendHwDes(const TDmaChannel& /*aChannel*/, const SDmaDesHdr& /*aLastHdr*/, sl@0: const SDmaDesHdr& /*aNewHdr*/) sl@0: { sl@0: // concrete controller must override if KCapsBitHwDes set sl@0: __DMA_CANT_HAPPEN(); sl@0: } sl@0: sl@0: sl@0: void TDmac::UnlinkHwDes(const TDmaChannel& /*aChannel*/, SDmaDesHdr& /*aHdr*/) sl@0: { sl@0: // concrete controller must override if KCapsBitHwDes set sl@0: __DMA_CANT_HAPPEN(); sl@0: } sl@0: sl@0: sl@0: TInt TDmac::FailNext(const TDmaChannel& /*aChannel*/) sl@0: { sl@0: return KErrNotSupported; sl@0: } sl@0: sl@0: sl@0: TInt TDmac::MissNextInterrupts(const TDmaChannel& /*aChannel*/, TInt /*aInterruptCount*/) sl@0: { sl@0: return KErrNotSupported; sl@0: } sl@0: sl@0: sl@0: TInt TDmac::Extension(TDmaChannel& /*aChannel*/, TInt /*aCmd*/, TAny* /*aArg*/) sl@0: { sl@0: // default implementation - NOP sl@0: return KErrNotSupported; sl@0: } sl@0: sl@0: sl@0: #ifdef _DEBUG sl@0: sl@0: void TDmac::Invariant() sl@0: { sl@0: Wait(); sl@0: __DMA_ASSERTD(0 <= iAvailDesCount && iAvailDesCount <= iMaxDesCount); sl@0: __DMA_ASSERTD(! iFreeHdr || IsValidHdr(iFreeHdr)); sl@0: for (TInt i = 0; i < iMaxDesCount; i++) sl@0: __DMA_ASSERTD(iHdrPool[i].iNext == NULL || IsValidHdr(iHdrPool[i].iNext)); sl@0: Signal(); sl@0: } sl@0: sl@0: sl@0: TBool TDmac::IsValidHdr(const SDmaDesHdr* aHdr) sl@0: { sl@0: return (iHdrPool <= aHdr) && (aHdr < iHdrPool + iMaxDesCount); sl@0: } sl@0: sl@0: #endif sl@0: sl@0: ////////////////////////////////////////////////////////////////////////////// sl@0: // DDmaRequest sl@0: sl@0: sl@0: EXPORT_C DDmaRequest::DDmaRequest(TDmaChannel& aChannel, TCallback aCb, TAny* aCbArg, TInt aMaxTransferSize) sl@0: : iChannel(aChannel), sl@0: iCb(aCb), sl@0: iCbArg(aCbArg), sl@0: iMaxTransferSize(aMaxTransferSize) sl@0: { sl@0: // iDesCount = 0; sl@0: // iFirstHdr = iLastHdr = NULL; sl@0: // iQueued = EFalse; sl@0: iChannel.iReqCount++; sl@0: __DMA_INVARIANT(); sl@0: } sl@0: sl@0: sl@0: sl@0: EXPORT_C DDmaRequest::~DDmaRequest() sl@0: { sl@0: __DMA_ASSERTD(!iQueued); sl@0: __DMA_INVARIANT(); sl@0: FreeDesList(); sl@0: iChannel.iReqCount--; sl@0: } sl@0: sl@0: sl@0: sl@0: EXPORT_C TInt DDmaRequest::Fragment(TUint32 aSrc, TUint32 aDest, TInt aCount, TUint aFlags, TUint32 aPslInfo) sl@0: { sl@0: __KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::Fragment thread %O " sl@0: "src=0x%08X dest=0x%08X count=%d flags=0x%X psl=0x%08X", sl@0: &Kern::CurrentThread(), aSrc, aDest, aCount, aFlags, aPslInfo)); sl@0: __DMA_ASSERTD(aCount > 0); sl@0: __DMA_ASSERTD(!iQueued); sl@0: sl@0: const TUint alignMask = iChannel.MemAlignMask(aFlags, aPslInfo); sl@0: const TBool memSrc = aFlags & KDmaMemSrc; sl@0: const TBool memDest = aFlags & KDmaMemDest; sl@0: sl@0: // Memory buffers must satisfy alignment constraint sl@0: __DMA_ASSERTD(!memSrc || ((aSrc & alignMask) == 0)); sl@0: __DMA_ASSERTD(!memDest || ((aDest & alignMask) == 0)); sl@0: sl@0: // Ask the PSL what the maximum size possible for this transfer is sl@0: TInt maxTransferSize = iChannel.MaxTransferSize(aFlags, aPslInfo); sl@0: if (!maxTransferSize) sl@0: { sl@0: __KTRACE_OPT(KPANIC, Kern::Printf("Error: maxTransferSize == 0")); sl@0: return KErrArgument; sl@0: } sl@0: sl@0: if (iMaxTransferSize) sl@0: { sl@0: // User has set a size cap sl@0: __DMA_ASSERTA((iMaxTransferSize <= maxTransferSize) || (maxTransferSize == -1)); sl@0: maxTransferSize = iMaxTransferSize; sl@0: } sl@0: else sl@0: { sl@0: // User doesn't care about max size sl@0: if (maxTransferSize == -1) sl@0: { sl@0: // No maximum imposed by controller sl@0: maxTransferSize = aCount; sl@0: } sl@0: } sl@0: sl@0: const TInt maxAlignedSize = (maxTransferSize & ~alignMask); sl@0: __DMA_ASSERTD(maxAlignedSize > 0); // bug in PSL if not true sl@0: sl@0: FreeDesList(); sl@0: sl@0: TInt r = KErrNone; sl@0: do sl@0: { sl@0: // Allocate fragment sl@0: r = ExpandDesList(); sl@0: if (r != KErrNone) sl@0: { sl@0: FreeDesList(); sl@0: break; sl@0: } sl@0: sl@0: // Compute fragment size sl@0: TInt c = Min(maxTransferSize, aCount); sl@0: if (memSrc && ((aFlags & KDmaPhysAddrSrc) == 0)) sl@0: c = MaxPhysSize(aSrc, c); sl@0: if (memDest && ((aFlags & KDmaPhysAddrDest) == 0)) sl@0: c = MaxPhysSize(aDest, c); sl@0: if ((memSrc || memDest) && (c < aCount) && (c > maxAlignedSize)) sl@0: { sl@0: // This is not last fragment of transfer to/from memory. We must sl@0: // round down fragment size so next one is correctly aligned. sl@0: c = maxAlignedSize; sl@0: } sl@0: sl@0: // Initialise fragment sl@0: __KTRACE_OPT(KDMA, Kern::Printf("fragment: src=0x%08X dest=0x%08X count=%d", aSrc, aDest, c)); sl@0: iChannel.iController->InitDes(*iLastHdr, aSrc, aDest, c, aFlags, aPslInfo, iChannel.PslId()); sl@0: sl@0: // Update for next iteration sl@0: aCount -= c; sl@0: if (memSrc) sl@0: aSrc += c; sl@0: if (memDest) sl@0: aDest += c; sl@0: } sl@0: while (aCount > 0); sl@0: sl@0: __DMA_INVARIANT(); sl@0: return r; sl@0: } sl@0: sl@0: sl@0: sl@0: EXPORT_C void DDmaRequest::Queue() sl@0: { sl@0: __KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::Queue thread %O", &Kern::CurrentThread())); sl@0: __DMA_ASSERTD(iDesCount > 0); // Not configured? call Fragment() first ! sl@0: __DMA_ASSERTD(!iQueued); sl@0: sl@0: // append request to queue and link new descriptor list to existing one. sl@0: iChannel.Wait(); sl@0: sl@0: TUint32 req_count = iChannel.iQueuedRequests++; sl@0: if (req_count == 0) sl@0: { sl@0: iChannel.Signal(); sl@0: iChannel.QueuedRequestCountChanged(); sl@0: iChannel.Wait(); sl@0: } sl@0: sl@0: if (!(iChannel.iIsrDfc & (TUint32)TDmaChannel::KCancelFlagMask)) sl@0: { sl@0: iQueued = ETrue; sl@0: iChannel.iReqQ.Add(&iLink); sl@0: *iChannel.iNullPtr = iFirstHdr; sl@0: iChannel.iNullPtr = &(iLastHdr->iNext); sl@0: iChannel.DoQueue(*this); sl@0: iChannel.Signal(); sl@0: } sl@0: else sl@0: { sl@0: // Someone is cancelling all requests... sl@0: req_count = --iChannel.iQueuedRequests; sl@0: iChannel.Signal(); sl@0: if (req_count == 0) sl@0: { sl@0: iChannel.QueuedRequestCountChanged(); sl@0: } sl@0: } sl@0: sl@0: __DMA_INVARIANT(); sl@0: } sl@0: sl@0: EXPORT_C TInt DDmaRequest::ExpandDesList(TInt aCount) sl@0: { sl@0: __DMA_ASSERTD(!iQueued); sl@0: __DMA_ASSERTD(aCount > 0); sl@0: sl@0: if (aCount > iChannel.iAvailDesCount) sl@0: return KErrTooBig; sl@0: sl@0: iChannel.iAvailDesCount -= aCount; sl@0: iDesCount += aCount; sl@0: sl@0: TDmac& c = *(iChannel.iController); sl@0: c.Wait(); sl@0: sl@0: if (iFirstHdr == NULL) sl@0: { sl@0: // handle empty list specially to simplify following loop sl@0: iFirstHdr = iLastHdr = c.iFreeHdr; sl@0: c.iFreeHdr = c.iFreeHdr->iNext; sl@0: --aCount; sl@0: } sl@0: else sl@0: iLastHdr->iNext = c.iFreeHdr; sl@0: sl@0: // Remove as many descriptors and headers from free pool as necessary and sl@0: // ensure hardware descriptors are chained together. sl@0: while (aCount-- > 0) sl@0: { sl@0: __DMA_ASSERTD(c.iFreeHdr != NULL); sl@0: if (c.iCaps & TDmac::KCapsBitHwDes) sl@0: c.ChainHwDes(*iLastHdr, *(c.iFreeHdr)); sl@0: iLastHdr = c.iFreeHdr; sl@0: c.iFreeHdr = c.iFreeHdr->iNext; sl@0: } sl@0: sl@0: c.Signal(); sl@0: sl@0: iLastHdr->iNext = NULL; sl@0: sl@0: __DMA_INVARIANT(); sl@0: return KErrNone; sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: EXPORT_C void DDmaRequest::FreeDesList() sl@0: { sl@0: __DMA_ASSERTD(!iQueued); sl@0: if (iDesCount > 0) sl@0: { sl@0: iChannel.iAvailDesCount += iDesCount; sl@0: TDmac& c = *(iChannel.iController); sl@0: c.Wait(); sl@0: iLastHdr->iNext = c.iFreeHdr; sl@0: c.iFreeHdr = iFirstHdr; sl@0: c.Signal(); sl@0: iFirstHdr = iLastHdr = NULL; sl@0: iDesCount = 0; sl@0: } sl@0: } sl@0: sl@0: sl@0: #ifdef _DEBUG sl@0: sl@0: void DDmaRequest::Invariant() sl@0: { sl@0: iChannel.Wait(); sl@0: __DMA_ASSERTD(iChannel.IsOpened()); sl@0: __DMA_ASSERTD(0 <= iMaxTransferSize); sl@0: __DMA_ASSERTD(0 <= iDesCount && iDesCount <= iChannel.iMaxDesCount); sl@0: if (iDesCount == 0) sl@0: { sl@0: __DMA_ASSERTD(!iQueued); sl@0: __DMA_ASSERTD(!iFirstHdr && !iLastHdr); sl@0: } sl@0: else sl@0: { sl@0: __DMA_ASSERTD(iChannel.iController->IsValidHdr(iFirstHdr)); sl@0: __DMA_ASSERTD(iChannel.iController->IsValidHdr(iLastHdr)); sl@0: } sl@0: iChannel.Signal(); sl@0: } sl@0: sl@0: #endif sl@0: sl@0: sl@0: ////////////////////////////////////////////////////////////////////////////// sl@0: // TDmaChannel sl@0: sl@0: sl@0: EXPORT_C TInt TDmaChannel::StaticExtension(TInt aCmd, TAny* aArg) sl@0: { sl@0: return DmaChannelMgr::StaticExtension(aCmd, aArg); sl@0: } sl@0: sl@0: sl@0: TDmaChannel::TDmaChannel() sl@0: : iController(NULL), sl@0: iPslId(0), sl@0: iCurHdr(NULL), sl@0: iNullPtr(&iCurHdr), sl@0: iDfc(Dfc, NULL, 0), sl@0: iMaxDesCount(0), sl@0: iAvailDesCount(0), sl@0: iIsrDfc(0), sl@0: iReqQ(), sl@0: iReqCount(0), sl@0: iQueuedRequests(0), sl@0: iCancelInfo(NULL) sl@0: { sl@0: __DMA_INVARIANT(); sl@0: } sl@0: sl@0: sl@0: EXPORT_C TInt TDmaChannel::Open(const SCreateInfo& aInfo, TDmaChannel*& aChannel) sl@0: { sl@0: __KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::Open thread %O", &Kern::CurrentThread())); sl@0: __DMA_ASSERTD(aInfo.iDfcQ != NULL); sl@0: __DMA_ASSERTD(aInfo.iDfcPriority < KNumDfcPriorities); sl@0: __DMA_ASSERTD(aInfo.iDesCount >= 1); sl@0: sl@0: aChannel = NULL; sl@0: sl@0: DmaChannelMgr::Wait(); sl@0: TDmaChannel* pC = DmaChannelMgr::Open(aInfo.iCookie); sl@0: DmaChannelMgr::Signal(); sl@0: if (!pC) sl@0: return KErrInUse; sl@0: sl@0: TInt r = pC->iController->ReserveSetOfDes(aInfo.iDesCount); sl@0: if (r != KErrNone) sl@0: { sl@0: pC->Close(); sl@0: return r; sl@0: } sl@0: pC->iAvailDesCount = pC->iMaxDesCount = aInfo.iDesCount; sl@0: sl@0: new (&pC->iDfc) TDfc(&Dfc, pC, aInfo.iDfcQ, aInfo.iDfcPriority); sl@0: sl@0: aChannel = pC; sl@0: sl@0: #ifdef _DEBUG sl@0: pC->Invariant(); sl@0: #endif sl@0: __KTRACE_OPT(KDMA, Kern::Printf("opened channel %d", pC->iPslId)); sl@0: return KErrNone; sl@0: } sl@0: sl@0: sl@0: EXPORT_C void TDmaChannel::Close() sl@0: { sl@0: __KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::Close %d", iPslId)); sl@0: __DMA_ASSERTD(IsOpened()); sl@0: __DMA_ASSERTD(IsQueueEmpty()); sl@0: __DMA_ASSERTD(iReqCount == 0); sl@0: sl@0: __DMA_ASSERTD(iQueuedRequests == 0); sl@0: sl@0: // descriptor leak? bug in request code sl@0: __DMA_ASSERTD(iAvailDesCount == iMaxDesCount); sl@0: sl@0: iController->ReleaseSetOfDes(iMaxDesCount); sl@0: iAvailDesCount = iMaxDesCount = 0; sl@0: sl@0: DmaChannelMgr::Wait(); sl@0: DmaChannelMgr::Close(this); sl@0: iController = NULL; sl@0: DmaChannelMgr::Signal(); sl@0: sl@0: __DMA_INVARIANT(); sl@0: } sl@0: sl@0: sl@0: EXPORT_C void TDmaChannel::CancelAll() sl@0: { sl@0: __KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::CancelAll thread %O channel - %d", sl@0: &Kern::CurrentThread(), iPslId)); sl@0: __DMA_ASSERTD(IsOpened()); sl@0: sl@0: NThread* nt = NKern::CurrentThread(); sl@0: TBool wait = FALSE; sl@0: TDmaCancelInfo c; sl@0: TDmaCancelInfo* waiters = 0; sl@0: sl@0: NKern::ThreadEnterCS(); sl@0: Wait(); sl@0: const TUint32 req_count_before = iQueuedRequests; sl@0: NThreadBase* dfcnt = iDfc.Thread(); sl@0: __e32_atomic_store_ord32(&iIsrDfc, (TUint32)KCancelFlagMask); sl@0: // ISRs after this point will not post a DFC, however a DFC may already be queued or running or both sl@0: if (!IsQueueEmpty()) sl@0: { sl@0: // There is a transfer in progress. It may complete before the DMAC sl@0: // has stopped, but the resulting ISR will not post a DFC. sl@0: // ISR should not happen after this function returns. sl@0: iController->StopTransfer(*this); sl@0: sl@0: ResetStateMachine(); sl@0: sl@0: // Clean-up the request queue. sl@0: SDblQueLink* pL; sl@0: while ((pL = iReqQ.GetFirst()) != NULL) sl@0: { sl@0: iQueuedRequests--; sl@0: DDmaRequest* pR = _LOFF(pL, DDmaRequest, iLink); sl@0: pR->OnDeque(); sl@0: } sl@0: } sl@0: if (!dfcnt || dfcnt==nt) sl@0: { sl@0: // no DFC queue or DFC runs in this thread, so just cancel it and we're finished sl@0: iDfc.Cancel(); sl@0: sl@0: // if other calls to CancelAll() are waiting for the DFC, release them here sl@0: waiters = iCancelInfo; sl@0: iCancelInfo = 0; sl@0: sl@0: // reset the ISR count sl@0: __e32_atomic_store_rel32(&iIsrDfc, 0); sl@0: } sl@0: else sl@0: { sl@0: // DFC runs in another thread. Make sure it's queued and then wait for it to run. sl@0: if (iCancelInfo) sl@0: c.InsertBefore(iCancelInfo); sl@0: else sl@0: iCancelInfo = &c; sl@0: wait = TRUE; sl@0: iDfc.Enque(); sl@0: } sl@0: const TUint32 req_count_after = iQueuedRequests; sl@0: Signal(); sl@0: if (waiters) sl@0: waiters->Signal(); sl@0: if (wait) sl@0: NKern::FSWait(&c.iSem); sl@0: NKern::ThreadLeaveCS(); sl@0: sl@0: // Only call PSL if there were requests queued when we entered AND there sl@0: // are now no requests left on the queue. sl@0: if ((req_count_before != 0) && (req_count_after == 0)) sl@0: { sl@0: QueuedRequestCountChanged(); sl@0: } sl@0: sl@0: __DMA_INVARIANT(); sl@0: } sl@0: sl@0: sl@0: /** sl@0: DFC callback function (static member). sl@0: */ sl@0: sl@0: void TDmaChannel::Dfc(TAny* aArg) sl@0: { sl@0: ((TDmaChannel*)aArg)->DoDfc(); sl@0: } sl@0: sl@0: sl@0: void TDmaChannel::DoDfc() sl@0: { sl@0: Wait(); sl@0: sl@0: // Atomically fetch and reset the number of DFC queued by ISR and the error sl@0: // flag. Leave the cancel flag alone for now. sl@0: const TUint32 w = __e32_atomic_and_ord32(&iIsrDfc, (TUint32)KCancelFlagMask); sl@0: TUint32 count = w & KDfcCountMask; sl@0: const TBool error = w & (TUint32)KErrorFlagMask; sl@0: TBool stop = w & (TUint32)KCancelFlagMask; sl@0: __DMA_ASSERTD(count>0 || stop); sl@0: const TUint32 req_count_before = iQueuedRequests; sl@0: TUint32 req_count_after = 0; sl@0: sl@0: while(count && !stop) sl@0: { sl@0: --count; sl@0: sl@0: // If an error occurred it must have been reported on the last interrupt since transfers are sl@0: // suspended after an error. sl@0: DDmaRequest::TResult res = (count==0 && error) ? DDmaRequest::EError : DDmaRequest::EOk; sl@0: __DMA_ASSERTD(!iReqQ.IsEmpty()); sl@0: DDmaRequest* pCompletedReq = NULL; sl@0: DDmaRequest* pCurReq = _LOFF(iReqQ.First(), DDmaRequest, iLink); sl@0: DDmaRequest::TCallback cb = 0; sl@0: TAny* arg = 0; sl@0: sl@0: if (res == DDmaRequest::EOk) sl@0: { sl@0: // Update state machine, current fragment, completed fragment and sl@0: // tell DMAC to transfer next fragment if necessary. sl@0: SDmaDesHdr* pCompletedHdr = NULL; sl@0: DoDfc(*pCurReq, pCompletedHdr); sl@0: sl@0: // If just completed last fragment from current request, switch to next sl@0: // request (if any). sl@0: if (pCompletedHdr == pCurReq->iLastHdr) sl@0: { sl@0: pCompletedReq = pCurReq; sl@0: pCurReq->iLink.Deque(); sl@0: iQueuedRequests--; sl@0: if (iReqQ.IsEmpty()) sl@0: iNullPtr = &iCurHdr; sl@0: pCompletedReq->OnDeque(); sl@0: } sl@0: } sl@0: else if (res == DDmaRequest::EError) sl@0: pCompletedReq = pCurReq; sl@0: else sl@0: __DMA_CANT_HAPPEN(); sl@0: if (pCompletedReq) sl@0: { sl@0: cb = pCompletedReq->iCb; sl@0: arg = pCompletedReq->iCbArg; sl@0: Signal(); sl@0: __KTRACE_OPT(KDMA, Kern::Printf("notifying DMA client result=%d", res)); sl@0: (*cb)(res,arg); sl@0: Wait(); sl@0: } sl@0: if (pCompletedReq || Flash()) sl@0: stop = __e32_atomic_load_acq32(&iIsrDfc) & (TUint32)KCancelFlagMask; sl@0: } sl@0: sl@0: // Some interrupts may be missed (double-buffer and scatter-gather sl@0: // controllers only) if two or more transfers complete while interrupts are sl@0: // disabled in the CPU. If this happens, the framework will go out of sync sl@0: // and leave some orphaned requests in the queue. sl@0: // sl@0: // To ensure correctness we handle this case here by checking that the request sl@0: // queue is empty when all transfers have completed and, if not, cleaning up sl@0: // and notifying the client of the completion of the orphaned requests. sl@0: // sl@0: // Note that if some interrupts are missed and the controller raises an sl@0: // error while transferring a subsequent fragment, the error will be reported sl@0: // on a fragment which was successfully completed. There is no easy solution sl@0: // to this problem, but this is okay as the only possible action following a sl@0: // failure is to flush the whole queue. sl@0: if (stop) sl@0: { sl@0: TDmaCancelInfo* waiters = iCancelInfo; sl@0: iCancelInfo = 0; sl@0: sl@0: // make sure DFC doesn't run again until a new request completes sl@0: iDfc.Cancel(); sl@0: sl@0: // reset the ISR count - new requests can now be processed sl@0: __e32_atomic_store_rel32(&iIsrDfc, 0); sl@0: sl@0: req_count_after = iQueuedRequests; sl@0: Signal(); sl@0: sl@0: // release threads doing CancelAll() sl@0: waiters->Signal(); sl@0: } sl@0: else if (!error && !iDfc.Queued() && !iReqQ.IsEmpty() && iController->IsIdle(*this)) sl@0: { sl@0: __KTRACE_OPT(KDMA, Kern::Printf("Missed interrupt(s) - draining request queue")); sl@0: ResetStateMachine(); sl@0: sl@0: // Move orphaned requests to temporary queue so channel queue can sl@0: // accept new requests. sl@0: SDblQue q; sl@0: q.MoveFrom(&iReqQ); sl@0: sl@0: SDblQueLink* pL; sl@0: while ((pL = q.GetFirst()) != NULL) sl@0: { sl@0: iQueuedRequests--; sl@0: DDmaRequest* pR = _LOFF(pL, DDmaRequest, iLink); sl@0: __KTRACE_OPT(KDMA, Kern::Printf("Removing request from queue and notifying client")); sl@0: pR->OnDeque(); sl@0: DDmaRequest::TCallback cb = pR->iCb; sl@0: TAny* arg = pR->iCbArg; sl@0: if (cb) sl@0: { sl@0: Signal(); sl@0: (*cb)(DDmaRequest::EOk, arg); sl@0: Wait(); sl@0: } sl@0: } sl@0: req_count_after = iQueuedRequests; sl@0: Signal(); sl@0: } sl@0: else sl@0: { sl@0: req_count_after = iQueuedRequests; sl@0: Signal(); sl@0: } sl@0: sl@0: // Only call PSL if there were requests queued when we entered AND there sl@0: // are now no requests left on the queue (after also having executed all sl@0: // client callbacks). sl@0: if ((req_count_before != 0) && (req_count_after == 0)) sl@0: { sl@0: QueuedRequestCountChanged(); sl@0: } sl@0: sl@0: __DMA_INVARIANT(); sl@0: } sl@0: sl@0: sl@0: /** Reset state machine only, request queue is unchanged */ sl@0: sl@0: void TDmaChannel::ResetStateMachine() sl@0: { sl@0: DoCancelAll(); sl@0: iCurHdr = NULL; sl@0: iNullPtr = &iCurHdr; sl@0: } sl@0: sl@0: sl@0: /** Unlink the last item of a LLI chain from the next chain. sl@0: Default implementation does nothing. This is overridden by scatter-gather channels. */ sl@0: sl@0: void TDmaChannel::DoUnlink(SDmaDesHdr& /*aHdr*/) sl@0: { sl@0: } sl@0: sl@0: sl@0: /** PSL may override */ sl@0: void TDmaChannel::QueuedRequestCountChanged() sl@0: { sl@0: #ifdef _DEBUG sl@0: Wait(); sl@0: __KTRACE_OPT(KDMA, sl@0: Kern::Printf("TDmaChannel::QueuedRequestCountChanged() %d", sl@0: iQueuedRequests)); sl@0: __DMA_ASSERTA(iQueuedRequests >= 0); sl@0: Signal(); sl@0: #endif sl@0: } sl@0: sl@0: sl@0: #ifdef _DEBUG sl@0: sl@0: void TDmaChannel::Invariant() sl@0: { sl@0: Wait(); sl@0: sl@0: __DMA_ASSERTD(iReqCount >= 0); sl@0: // should always point to NULL pointer ending fragment queue sl@0: __DMA_ASSERTD(*iNullPtr == NULL); sl@0: sl@0: __DMA_ASSERTD(0 <= iAvailDesCount && iAvailDesCount <= iMaxDesCount); sl@0: sl@0: __DMA_ASSERTD(iCurHdr == NULL || iController->IsValidHdr(iCurHdr)); sl@0: sl@0: if (IsOpened()) sl@0: { sl@0: __DMA_ASSERTD((iCurHdr && !IsQueueEmpty()) || (!iCurHdr && IsQueueEmpty())); sl@0: if (iCurHdr == NULL) sl@0: __DMA_ASSERTD(iNullPtr == &iCurHdr); sl@0: } sl@0: else sl@0: { sl@0: __DMA_ASSERTD(iCurHdr == NULL); sl@0: __DMA_ASSERTD(iNullPtr == &iCurHdr); sl@0: __DMA_ASSERTD(IsQueueEmpty()); sl@0: } sl@0: sl@0: Signal(); sl@0: } sl@0: sl@0: #endif sl@0: sl@0: ////////////////////////////////////////////////////////////////////////////// sl@0: // TDmaSbChannel sl@0: sl@0: void TDmaSbChannel::DoQueue(DDmaRequest& /*aReq*/) sl@0: { sl@0: if (!iTransferring) sl@0: { sl@0: iController->Transfer(*this, *iCurHdr); sl@0: iTransferring = ETrue; sl@0: } sl@0: } sl@0: sl@0: sl@0: void TDmaSbChannel::DoCancelAll() sl@0: { sl@0: __DMA_ASSERTD(iTransferring); sl@0: iTransferring = EFalse; sl@0: } sl@0: sl@0: sl@0: void TDmaSgChannel::DoUnlink(SDmaDesHdr& aHdr) sl@0: { sl@0: iController->UnlinkHwDes(*this, aHdr); sl@0: } sl@0: sl@0: sl@0: void TDmaSbChannel::DoDfc(DDmaRequest& /*aCurReq*/, SDmaDesHdr*& aCompletedHdr) sl@0: { sl@0: __DMA_ASSERTD(iTransferring); sl@0: aCompletedHdr = iCurHdr; sl@0: iCurHdr = iCurHdr->iNext; sl@0: if (iCurHdr != NULL) sl@0: iController->Transfer(*this, *iCurHdr); sl@0: else sl@0: iTransferring = EFalse; sl@0: } sl@0: sl@0: sl@0: ////////////////////////////////////////////////////////////////////////////// sl@0: // TDmaDbChannel sl@0: sl@0: void TDmaDbChannel::DoQueue(DDmaRequest& aReq) sl@0: { sl@0: switch (iState) sl@0: { sl@0: case EIdle: sl@0: iController->Transfer(*this, *iCurHdr); sl@0: if (iCurHdr->iNext) sl@0: { sl@0: iController->Transfer(*this, *(iCurHdr->iNext)); sl@0: iState = ETransferring; sl@0: } sl@0: else sl@0: iState = ETransferringLast; sl@0: break; sl@0: case ETransferring: sl@0: // nothing to do sl@0: break; sl@0: case ETransferringLast: sl@0: iController->Transfer(*this, *(aReq.iFirstHdr)); sl@0: iState = ETransferring; sl@0: break; sl@0: default: sl@0: __DMA_CANT_HAPPEN(); sl@0: } sl@0: } sl@0: sl@0: sl@0: void TDmaDbChannel::DoCancelAll() sl@0: { sl@0: iState = EIdle; sl@0: } sl@0: sl@0: sl@0: void TDmaDbChannel::DoDfc(DDmaRequest& /*aCurReq*/, SDmaDesHdr*& aCompletedHdr) sl@0: { sl@0: aCompletedHdr = iCurHdr; sl@0: iCurHdr = iCurHdr->iNext; sl@0: switch (iState) sl@0: { sl@0: case ETransferringLast: sl@0: iState = EIdle; sl@0: break; sl@0: case ETransferring: sl@0: if (iCurHdr->iNext == NULL) sl@0: iState = ETransferringLast; sl@0: else sl@0: iController->Transfer(*this, *(iCurHdr->iNext)); sl@0: break; sl@0: default: sl@0: __DMA_CANT_HAPPEN(); sl@0: } sl@0: } sl@0: sl@0: sl@0: ////////////////////////////////////////////////////////////////////////////// sl@0: // TDmaSgChannel sl@0: sl@0: void TDmaSgChannel::DoQueue(DDmaRequest& aReq) sl@0: { sl@0: if (iTransferring) sl@0: { sl@0: __DMA_ASSERTD(!aReq.iLink.Alone()); sl@0: DDmaRequest* pReqPrev = _LOFF(aReq.iLink.iPrev, DDmaRequest, iLink); sl@0: iController->AppendHwDes(*this, *(pReqPrev->iLastHdr), *(aReq.iFirstHdr)); sl@0: } sl@0: else sl@0: { sl@0: iController->Transfer(*this, *(aReq.iFirstHdr)); sl@0: iTransferring = ETrue; sl@0: } sl@0: } sl@0: sl@0: sl@0: void TDmaSgChannel::DoCancelAll() sl@0: { sl@0: __DMA_ASSERTD(iTransferring); sl@0: iTransferring = EFalse; sl@0: } sl@0: sl@0: sl@0: void TDmaSgChannel::DoDfc(DDmaRequest& aCurReq, SDmaDesHdr*& aCompletedHdr) sl@0: { sl@0: __DMA_ASSERTD(iTransferring); sl@0: aCompletedHdr = aCurReq.iLastHdr; sl@0: iCurHdr = aCompletedHdr->iNext; sl@0: iTransferring = (iCurHdr != NULL); sl@0: }