1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/os/kernelhwsrv/kernel/eka/drivers/dma/dmapil.cpp Fri Jun 15 03:10:57 2012 +0200
1.3 @@ -0,0 +1,1102 @@
1.4 +// Copyright (c) 2002-2009 Nokia Corporation and/or its subsidiary(-ies).
1.5 +// All rights reserved.
1.6 +// This component and the accompanying materials are made available
1.7 +// under the terms of the License "Eclipse Public License v1.0"
1.8 +// which accompanies this distribution, and is available
1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
1.10 +//
1.11 +// Initial Contributors:
1.12 +// Nokia Corporation - initial contribution.
1.13 +//
1.14 +// Contributors:
1.15 +//
1.16 +// Description:
1.17 +// e32\drivers\dmapil.cpp
1.18 +// DMA Platform Independent Layer (PIL)
1.19 +//
1.20 +//
1.21 +
1.22 +#include <drivers/dma.h>
1.23 +#include <kernel/kern_priv.h>
1.24 +
1.25 +
1.26 +static const char KDmaPanicCat[] = "DMA";
1.27 +
1.28 +NFastMutex DmaChannelMgr::Lock;
1.29 +
1.30 +class TDmaCancelInfo : public SDblQueLink
1.31 + {
1.32 +public:
1.33 + TDmaCancelInfo();
1.34 + void Signal();
1.35 +public:
1.36 + NFastSemaphore iSem;
1.37 + };
1.38 +
1.39 +TDmaCancelInfo::TDmaCancelInfo()
1.40 + : iSem(0)
1.41 + {
1.42 + iNext = this;
1.43 + iPrev = this;
1.44 + }
1.45 +
1.46 +void TDmaCancelInfo::Signal()
1.47 + {
1.48 + TDmaCancelInfo* p = this;
1.49 + FOREVER
1.50 + {
1.51 + TDmaCancelInfo* next = (TDmaCancelInfo*)p->iNext;
1.52 + if (p!=next)
1.53 + p->Deque();
1.54 + NKern::FSSignal(&p->iSem); // Don't dereference p after this
1.55 + if (p==next)
1.56 + break;
1.57 + p = next;
1.58 + }
1.59 + }
1.60 +
1.61 +//////////////////////////////////////////////////////////////////////////////
1.62 +
1.63 +#ifdef __DMASIM__
1.64 +#ifdef __WINS__
1.65 +typedef TLinAddr TPhysAddr;
1.66 +#endif
1.67 +static inline TPhysAddr LinToPhys(TLinAddr aLin) {return aLin;}
1.68 +#else
1.69 +static inline TPhysAddr LinToPhys(TLinAddr aLin) {return Epoc::LinearToPhysical(aLin);}
1.70 +#endif
1.71 +
1.72 +//
1.73 +// Return minimum of aMaxSize and size of largest physically contiguous block
1.74 +// starting at aLinAddr.
1.75 +//
1.76 +static TInt MaxPhysSize(TLinAddr aLinAddr, const TInt aMaxSize)
1.77 + {
1.78 + const TPhysAddr physBase = LinToPhys(aLinAddr);
1.79 + TLinAddr lin = aLinAddr;
1.80 + TInt size = 0;
1.81 + for (;;)
1.82 + {
1.83 + // Round up the linear address to the next MMU page boundary
1.84 + const TLinAddr linBoundary = Kern::RoundToPageSize(lin + 1);
1.85 + size += linBoundary - lin;
1.86 + if (size >= aMaxSize)
1.87 + return aMaxSize;
1.88 + if ((physBase + size) != LinToPhys(linBoundary))
1.89 + return size;
1.90 + lin = linBoundary;
1.91 + }
1.92 + }
1.93 +
1.94 +
1.95 +//////////////////////////////////////////////////////////////////////////////
1.96 +// TDmac
1.97 +
1.98 +TDmac::TDmac(const SCreateInfo& aInfo)
1.99 + : iMaxDesCount(aInfo.iDesCount),
1.100 + iAvailDesCount(aInfo.iDesCount),
1.101 + iDesSize(aInfo.iDesSize),
1.102 + iCaps(aInfo.iCaps)
1.103 + {
1.104 + __DMA_ASSERTD(iMaxDesCount > 0);
1.105 + __DMA_ASSERTD((iCaps & ~KCapsBitHwDes) == 0); // undefined bits set?
1.106 + __DMA_ASSERTD(iDesSize > 0);
1.107 + }
1.108 +
1.109 +//
1.110 +// Second-phase c'tor
1.111 +//
1.112 +
1.113 +TInt TDmac::Create(const SCreateInfo& aInfo)
1.114 + {
1.115 + iHdrPool = new SDmaDesHdr[iMaxDesCount];
1.116 + if (iHdrPool == NULL)
1.117 + return KErrNoMemory;
1.118 +
1.119 + TInt r = AllocDesPool(aInfo.iDesChunkAttribs);
1.120 + if (r != KErrNone)
1.121 + return KErrNoMemory;
1.122 +
1.123 + // Link all descriptor headers together on the free list
1.124 + iFreeHdr = iHdrPool;
1.125 + TInt i;
1.126 + for (i = 0; i < iMaxDesCount - 1; i++)
1.127 + iHdrPool[i].iNext = iHdrPool + i + 1;
1.128 + iHdrPool[iMaxDesCount-1].iNext = NULL;
1.129 +
1.130 + __DMA_INVARIANT();
1.131 + return KErrNone;
1.132 + }
1.133 +
1.134 +
1.135 +TDmac::~TDmac()
1.136 + {
1.137 + __DMA_INVARIANT();
1.138 +
1.139 + FreeDesPool();
1.140 + delete[] iHdrPool;
1.141 + }
1.142 +
1.143 +
1.144 +// Calling thread must be in CS
1.145 +TInt TDmac::AllocDesPool(TUint aAttribs)
1.146 + {
1.147 + TInt r;
1.148 + if (iCaps & KCapsBitHwDes)
1.149 + {
1.150 + TInt size = iMaxDesCount*iDesSize;
1.151 +#ifdef __WINS__
1.152 + (void)aAttribs;
1.153 + iDesPool = new TUint8[size];
1.154 + r = iDesPool ? KErrNone : KErrNoMemory;
1.155 +#else
1.156 + // Chunk not mapped as supervisor r/w user none? incorrect mask passed by PSL
1.157 + __DMA_ASSERTD((aAttribs & EMapAttrAccessMask) == EMapAttrSupRw);
1.158 + TPhysAddr phys;
1.159 + r = Epoc::AllocPhysicalRam(size, phys);
1.160 + if (r == KErrNone)
1.161 + {
1.162 + r = DPlatChunkHw::New(iHwDesChunk, phys, size, aAttribs);
1.163 + if (r == KErrNone)
1.164 + {
1.165 + iDesPool = (TAny*)iHwDesChunk->LinearAddress();
1.166 + __KTRACE_OPT(KDMA, Kern::Printf("descriptor hw chunk created lin=0x%08X phys=0x%08X, size=0x%X",
1.167 + iHwDesChunk->iLinAddr, iHwDesChunk->iPhysAddr, size));
1.168 + }
1.169 + else
1.170 + Epoc::FreePhysicalRam(phys, size);
1.171 + }
1.172 +#endif
1.173 + }
1.174 + else
1.175 + {
1.176 + iDesPool = new SDmaPseudoDes[iMaxDesCount];
1.177 + r = iDesPool ? KErrNone : KErrNoMemory;
1.178 + }
1.179 + return r;
1.180 + }
1.181 +
1.182 +
1.183 +// Calling thread must be in CS
1.184 +void TDmac::FreeDesPool()
1.185 + {
1.186 + if (iCaps & KCapsBitHwDes)
1.187 + {
1.188 +#ifdef __WINS__
1.189 + delete[] iDesPool;
1.190 +#else
1.191 + if (iHwDesChunk)
1.192 + {
1.193 + TPhysAddr phys = iHwDesChunk->PhysicalAddress();
1.194 + TInt size = iHwDesChunk->iSize;
1.195 + iHwDesChunk->Close(NULL);
1.196 + Epoc::FreePhysicalRam(phys, size);
1.197 + }
1.198 +#endif
1.199 + }
1.200 + else
1.201 + Kern::Free(iDesPool);
1.202 + }
1.203 +
1.204 +
1.205 +/**
1.206 + Prealloc the given number of descriptors.
1.207 + */
1.208 +
1.209 +TInt TDmac::ReserveSetOfDes(TInt aCount)
1.210 + {
1.211 + __KTRACE_OPT(KDMA, Kern::Printf(">TDmac::ReserveSetOfDes count=%d", aCount));
1.212 + __DMA_ASSERTD(aCount > 0);
1.213 + TInt r = KErrTooBig;
1.214 + Wait();
1.215 + if (iAvailDesCount - aCount >= 0)
1.216 + {
1.217 + iAvailDesCount -= aCount;
1.218 + r = KErrNone;
1.219 + }
1.220 + Signal();
1.221 + __DMA_INVARIANT();
1.222 + __KTRACE_OPT(KDMA, Kern::Printf("<TDmac::ReserveSetOfDes r=%d", r));
1.223 + return r;
1.224 + }
1.225 +
1.226 +
1.227 +/**
1.228 + Return the given number of preallocated descriptors to the free pool.
1.229 + */
1.230 +
1.231 +void TDmac::ReleaseSetOfDes(TInt aCount)
1.232 + {
1.233 + __DMA_ASSERTD(aCount >= 0);
1.234 + Wait();
1.235 + iAvailDesCount += aCount;
1.236 + Signal();
1.237 + __DMA_INVARIANT();
1.238 + }
1.239 +
1.240 +
1.241 +/**
1.242 + Queue DFC and update word used to communicate with DFC.
1.243 +
1.244 + Called in interrupt context by PSL.
1.245 + */
1.246 +
1.247 +void TDmac::HandleIsr(TDmaChannel& aChannel, TBool aIsComplete)
1.248 + {
1.249 + //__KTRACE_OPT(KDMA, Kern::Printf("TDmac::HandleIsr channel=%d complete=%d", aChannelIdx, aIsComplete));
1.250 +
1.251 + // Queue DFC if necessary. The possible scenarios are:
1.252 + // * no DFC queued --> need to queue DFC
1.253 + // * DFC queued (not running yet) --> just need to update iIsrDfc
1.254 + // * DFC running / iIsrDfc already reset --> need to requeue DFC
1.255 + // * DFC running / iIsrDfc not reset yet --> just need to update iIsrDfc
1.256 + // Set error flag if necessary.
1.257 + TUint32 inc = aIsComplete ? 1u : TUint32(TDmaChannel::KErrorFlagMask)|1u;
1.258 + TUint32 orig = __e32_atomic_tau_ord32(&aChannel.iIsrDfc, TUint32(TDmaChannel::KCancelFlagMask), 0, inc);
1.259 +
1.260 + // As transfer should be suspended when an error occurs, we
1.261 + // should never get there with the error flag already set.
1.262 + __DMA_ASSERTD((orig & inc & (TUint32)TDmaChannel::KErrorFlagMask) == 0);
1.263 +
1.264 + if (orig == 0)
1.265 + aChannel.iDfc.Add();
1.266 + }
1.267 +
1.268 +
1.269 +void TDmac::InitDes(const SDmaDesHdr& aHdr, TUint32 aSrc, TUint32 aDest, TInt aCount,
1.270 + TUint aFlags, TUint32 aPslInfo, TUint32 aCookie)
1.271 + {
1.272 + if (iCaps & KCapsBitHwDes)
1.273 + InitHwDes(aHdr, aSrc, aDest, aCount, aFlags, aPslInfo, aCookie);
1.274 + else
1.275 + {
1.276 + SDmaPseudoDes& des = HdrToDes(aHdr);
1.277 + des.iSrc = aSrc;
1.278 + des.iDest = aDest;
1.279 + des.iCount = aCount;
1.280 + des.iFlags = aFlags;
1.281 + des.iPslInfo = aPslInfo;
1.282 + des.iCookie = aCookie;
1.283 + }
1.284 + }
1.285 +
1.286 +
1.287 +void TDmac::InitHwDes(const SDmaDesHdr& /*aHdr*/, TUint32 /*aSrc*/, TUint32 /*aDest*/, TInt /*aCount*/,
1.288 + TUint /*aFlags*/, TUint32 /*aPslInfo*/, TUint32 /*aCookie*/)
1.289 + {
1.290 + // concrete controller must override if KCapsBitHwDes set
1.291 + __DMA_CANT_HAPPEN();
1.292 + }
1.293 +
1.294 +
1.295 +void TDmac::ChainHwDes(const SDmaDesHdr& /*aHdr*/, const SDmaDesHdr& /*aNextHdr*/)
1.296 + {
1.297 + // concrete controller must override if KCapsBitHwDes set
1.298 + __DMA_CANT_HAPPEN();
1.299 + }
1.300 +
1.301 +
1.302 +void TDmac::AppendHwDes(const TDmaChannel& /*aChannel*/, const SDmaDesHdr& /*aLastHdr*/,
1.303 + const SDmaDesHdr& /*aNewHdr*/)
1.304 + {
1.305 + // concrete controller must override if KCapsBitHwDes set
1.306 + __DMA_CANT_HAPPEN();
1.307 + }
1.308 +
1.309 +
1.310 +void TDmac::UnlinkHwDes(const TDmaChannel& /*aChannel*/, SDmaDesHdr& /*aHdr*/)
1.311 + {
1.312 + // concrete controller must override if KCapsBitHwDes set
1.313 + __DMA_CANT_HAPPEN();
1.314 + }
1.315 +
1.316 +
1.317 +TInt TDmac::FailNext(const TDmaChannel& /*aChannel*/)
1.318 + {
1.319 + return KErrNotSupported;
1.320 + }
1.321 +
1.322 +
1.323 +TInt TDmac::MissNextInterrupts(const TDmaChannel& /*aChannel*/, TInt /*aInterruptCount*/)
1.324 + {
1.325 + return KErrNotSupported;
1.326 + }
1.327 +
1.328 +
1.329 +TInt TDmac::Extension(TDmaChannel& /*aChannel*/, TInt /*aCmd*/, TAny* /*aArg*/)
1.330 + {
1.331 + // default implementation - NOP
1.332 + return KErrNotSupported;
1.333 + }
1.334 +
1.335 +
1.336 +#ifdef _DEBUG
1.337 +
1.338 +void TDmac::Invariant()
1.339 + {
1.340 + Wait();
1.341 + __DMA_ASSERTD(0 <= iAvailDesCount && iAvailDesCount <= iMaxDesCount);
1.342 + __DMA_ASSERTD(! iFreeHdr || IsValidHdr(iFreeHdr));
1.343 + for (TInt i = 0; i < iMaxDesCount; i++)
1.344 + __DMA_ASSERTD(iHdrPool[i].iNext == NULL || IsValidHdr(iHdrPool[i].iNext));
1.345 + Signal();
1.346 + }
1.347 +
1.348 +
1.349 +TBool TDmac::IsValidHdr(const SDmaDesHdr* aHdr)
1.350 + {
1.351 + return (iHdrPool <= aHdr) && (aHdr < iHdrPool + iMaxDesCount);
1.352 + }
1.353 +
1.354 +#endif
1.355 +
1.356 +//////////////////////////////////////////////////////////////////////////////
1.357 +// DDmaRequest
1.358 +
1.359 +
1.360 +EXPORT_C DDmaRequest::DDmaRequest(TDmaChannel& aChannel, TCallback aCb, TAny* aCbArg, TInt aMaxTransferSize)
1.361 + : iChannel(aChannel),
1.362 + iCb(aCb),
1.363 + iCbArg(aCbArg),
1.364 + iMaxTransferSize(aMaxTransferSize)
1.365 + {
1.366 + // iDesCount = 0;
1.367 + // iFirstHdr = iLastHdr = NULL;
1.368 + // iQueued = EFalse;
1.369 + iChannel.iReqCount++;
1.370 + __DMA_INVARIANT();
1.371 + }
1.372 +
1.373 +
1.374 +
1.375 +EXPORT_C DDmaRequest::~DDmaRequest()
1.376 + {
1.377 + __DMA_ASSERTD(!iQueued);
1.378 + __DMA_INVARIANT();
1.379 + FreeDesList();
1.380 + iChannel.iReqCount--;
1.381 + }
1.382 +
1.383 +
1.384 +
1.385 +EXPORT_C TInt DDmaRequest::Fragment(TUint32 aSrc, TUint32 aDest, TInt aCount, TUint aFlags, TUint32 aPslInfo)
1.386 + {
1.387 + __KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::Fragment thread %O "
1.388 + "src=0x%08X dest=0x%08X count=%d flags=0x%X psl=0x%08X",
1.389 + &Kern::CurrentThread(), aSrc, aDest, aCount, aFlags, aPslInfo));
1.390 + __DMA_ASSERTD(aCount > 0);
1.391 + __DMA_ASSERTD(!iQueued);
1.392 +
1.393 + const TUint alignMask = iChannel.MemAlignMask(aFlags, aPslInfo);
1.394 + const TBool memSrc = aFlags & KDmaMemSrc;
1.395 + const TBool memDest = aFlags & KDmaMemDest;
1.396 +
1.397 + // Memory buffers must satisfy alignment constraint
1.398 + __DMA_ASSERTD(!memSrc || ((aSrc & alignMask) == 0));
1.399 + __DMA_ASSERTD(!memDest || ((aDest & alignMask) == 0));
1.400 +
1.401 + // Ask the PSL what the maximum size possible for this transfer is
1.402 + TInt maxTransferSize = iChannel.MaxTransferSize(aFlags, aPslInfo);
1.403 + if (!maxTransferSize)
1.404 + {
1.405 + __KTRACE_OPT(KPANIC, Kern::Printf("Error: maxTransferSize == 0"));
1.406 + return KErrArgument;
1.407 + }
1.408 +
1.409 + if (iMaxTransferSize)
1.410 + {
1.411 + // User has set a size cap
1.412 + __DMA_ASSERTA((iMaxTransferSize <= maxTransferSize) || (maxTransferSize == -1));
1.413 + maxTransferSize = iMaxTransferSize;
1.414 + }
1.415 + else
1.416 + {
1.417 + // User doesn't care about max size
1.418 + if (maxTransferSize == -1)
1.419 + {
1.420 + // No maximum imposed by controller
1.421 + maxTransferSize = aCount;
1.422 + }
1.423 + }
1.424 +
1.425 + const TInt maxAlignedSize = (maxTransferSize & ~alignMask);
1.426 + __DMA_ASSERTD(maxAlignedSize > 0); // bug in PSL if not true
1.427 +
1.428 + FreeDesList();
1.429 +
1.430 + TInt r = KErrNone;
1.431 + do
1.432 + {
1.433 + // Allocate fragment
1.434 + r = ExpandDesList();
1.435 + if (r != KErrNone)
1.436 + {
1.437 + FreeDesList();
1.438 + break;
1.439 + }
1.440 +
1.441 + // Compute fragment size
1.442 + TInt c = Min(maxTransferSize, aCount);
1.443 + if (memSrc && ((aFlags & KDmaPhysAddrSrc) == 0))
1.444 + c = MaxPhysSize(aSrc, c);
1.445 + if (memDest && ((aFlags & KDmaPhysAddrDest) == 0))
1.446 + c = MaxPhysSize(aDest, c);
1.447 + if ((memSrc || memDest) && (c < aCount) && (c > maxAlignedSize))
1.448 + {
1.449 + // This is not last fragment of transfer to/from memory. We must
1.450 + // round down fragment size so next one is correctly aligned.
1.451 + c = maxAlignedSize;
1.452 + }
1.453 +
1.454 + // Initialise fragment
1.455 + __KTRACE_OPT(KDMA, Kern::Printf("fragment: src=0x%08X dest=0x%08X count=%d", aSrc, aDest, c));
1.456 + iChannel.iController->InitDes(*iLastHdr, aSrc, aDest, c, aFlags, aPslInfo, iChannel.PslId());
1.457 +
1.458 + // Update for next iteration
1.459 + aCount -= c;
1.460 + if (memSrc)
1.461 + aSrc += c;
1.462 + if (memDest)
1.463 + aDest += c;
1.464 + }
1.465 + while (aCount > 0);
1.466 +
1.467 + __DMA_INVARIANT();
1.468 + return r;
1.469 + }
1.470 +
1.471 +
1.472 +
1.473 +EXPORT_C void DDmaRequest::Queue()
1.474 + {
1.475 + __KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::Queue thread %O", &Kern::CurrentThread()));
1.476 + __DMA_ASSERTD(iDesCount > 0); // Not configured? call Fragment() first !
1.477 + __DMA_ASSERTD(!iQueued);
1.478 +
1.479 + // append request to queue and link new descriptor list to existing one.
1.480 + iChannel.Wait();
1.481 +
1.482 + TUint32 req_count = iChannel.iQueuedRequests++;
1.483 + if (req_count == 0)
1.484 + {
1.485 + iChannel.Signal();
1.486 + iChannel.QueuedRequestCountChanged();
1.487 + iChannel.Wait();
1.488 + }
1.489 +
1.490 + if (!(iChannel.iIsrDfc & (TUint32)TDmaChannel::KCancelFlagMask))
1.491 + {
1.492 + iQueued = ETrue;
1.493 + iChannel.iReqQ.Add(&iLink);
1.494 + *iChannel.iNullPtr = iFirstHdr;
1.495 + iChannel.iNullPtr = &(iLastHdr->iNext);
1.496 + iChannel.DoQueue(*this);
1.497 + iChannel.Signal();
1.498 + }
1.499 + else
1.500 + {
1.501 + // Someone is cancelling all requests...
1.502 + req_count = --iChannel.iQueuedRequests;
1.503 + iChannel.Signal();
1.504 + if (req_count == 0)
1.505 + {
1.506 + iChannel.QueuedRequestCountChanged();
1.507 + }
1.508 + }
1.509 +
1.510 + __DMA_INVARIANT();
1.511 + }
1.512 +
1.513 +EXPORT_C TInt DDmaRequest::ExpandDesList(TInt aCount)
1.514 + {
1.515 + __DMA_ASSERTD(!iQueued);
1.516 + __DMA_ASSERTD(aCount > 0);
1.517 +
1.518 + if (aCount > iChannel.iAvailDesCount)
1.519 + return KErrTooBig;
1.520 +
1.521 + iChannel.iAvailDesCount -= aCount;
1.522 + iDesCount += aCount;
1.523 +
1.524 + TDmac& c = *(iChannel.iController);
1.525 + c.Wait();
1.526 +
1.527 + if (iFirstHdr == NULL)
1.528 + {
1.529 + // handle empty list specially to simplify following loop
1.530 + iFirstHdr = iLastHdr = c.iFreeHdr;
1.531 + c.iFreeHdr = c.iFreeHdr->iNext;
1.532 + --aCount;
1.533 + }
1.534 + else
1.535 + iLastHdr->iNext = c.iFreeHdr;
1.536 +
1.537 + // Remove as many descriptors and headers from free pool as necessary and
1.538 + // ensure hardware descriptors are chained together.
1.539 + while (aCount-- > 0)
1.540 + {
1.541 + __DMA_ASSERTD(c.iFreeHdr != NULL);
1.542 + if (c.iCaps & TDmac::KCapsBitHwDes)
1.543 + c.ChainHwDes(*iLastHdr, *(c.iFreeHdr));
1.544 + iLastHdr = c.iFreeHdr;
1.545 + c.iFreeHdr = c.iFreeHdr->iNext;
1.546 + }
1.547 +
1.548 + c.Signal();
1.549 +
1.550 + iLastHdr->iNext = NULL;
1.551 +
1.552 + __DMA_INVARIANT();
1.553 + return KErrNone;
1.554 + }
1.555 +
1.556 +
1.557 +
1.558 +
1.559 +EXPORT_C void DDmaRequest::FreeDesList()
1.560 + {
1.561 + __DMA_ASSERTD(!iQueued);
1.562 + if (iDesCount > 0)
1.563 + {
1.564 + iChannel.iAvailDesCount += iDesCount;
1.565 + TDmac& c = *(iChannel.iController);
1.566 + c.Wait();
1.567 + iLastHdr->iNext = c.iFreeHdr;
1.568 + c.iFreeHdr = iFirstHdr;
1.569 + c.Signal();
1.570 + iFirstHdr = iLastHdr = NULL;
1.571 + iDesCount = 0;
1.572 + }
1.573 + }
1.574 +
1.575 +
1.576 +#ifdef _DEBUG
1.577 +
1.578 +void DDmaRequest::Invariant()
1.579 + {
1.580 + iChannel.Wait();
1.581 + __DMA_ASSERTD(iChannel.IsOpened());
1.582 + __DMA_ASSERTD(0 <= iMaxTransferSize);
1.583 + __DMA_ASSERTD(0 <= iDesCount && iDesCount <= iChannel.iMaxDesCount);
1.584 + if (iDesCount == 0)
1.585 + {
1.586 + __DMA_ASSERTD(!iQueued);
1.587 + __DMA_ASSERTD(!iFirstHdr && !iLastHdr);
1.588 + }
1.589 + else
1.590 + {
1.591 + __DMA_ASSERTD(iChannel.iController->IsValidHdr(iFirstHdr));
1.592 + __DMA_ASSERTD(iChannel.iController->IsValidHdr(iLastHdr));
1.593 + }
1.594 + iChannel.Signal();
1.595 + }
1.596 +
1.597 +#endif
1.598 +
1.599 +
1.600 +//////////////////////////////////////////////////////////////////////////////
1.601 +// TDmaChannel
1.602 +
1.603 +
1.604 +EXPORT_C TInt TDmaChannel::StaticExtension(TInt aCmd, TAny* aArg)
1.605 + {
1.606 + return DmaChannelMgr::StaticExtension(aCmd, aArg);
1.607 + }
1.608 +
1.609 +
1.610 +TDmaChannel::TDmaChannel()
1.611 + : iController(NULL),
1.612 + iPslId(0),
1.613 + iCurHdr(NULL),
1.614 + iNullPtr(&iCurHdr),
1.615 + iDfc(Dfc, NULL, 0),
1.616 + iMaxDesCount(0),
1.617 + iAvailDesCount(0),
1.618 + iIsrDfc(0),
1.619 + iReqQ(),
1.620 + iReqCount(0),
1.621 + iQueuedRequests(0),
1.622 + iCancelInfo(NULL)
1.623 + {
1.624 + __DMA_INVARIANT();
1.625 + }
1.626 +
1.627 +
1.628 +EXPORT_C TInt TDmaChannel::Open(const SCreateInfo& aInfo, TDmaChannel*& aChannel)
1.629 + {
1.630 + __KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::Open thread %O", &Kern::CurrentThread()));
1.631 + __DMA_ASSERTD(aInfo.iDfcQ != NULL);
1.632 + __DMA_ASSERTD(aInfo.iDfcPriority < KNumDfcPriorities);
1.633 + __DMA_ASSERTD(aInfo.iDesCount >= 1);
1.634 +
1.635 + aChannel = NULL;
1.636 +
1.637 + DmaChannelMgr::Wait();
1.638 + TDmaChannel* pC = DmaChannelMgr::Open(aInfo.iCookie);
1.639 + DmaChannelMgr::Signal();
1.640 + if (!pC)
1.641 + return KErrInUse;
1.642 +
1.643 + TInt r = pC->iController->ReserveSetOfDes(aInfo.iDesCount);
1.644 + if (r != KErrNone)
1.645 + {
1.646 + pC->Close();
1.647 + return r;
1.648 + }
1.649 + pC->iAvailDesCount = pC->iMaxDesCount = aInfo.iDesCount;
1.650 +
1.651 + new (&pC->iDfc) TDfc(&Dfc, pC, aInfo.iDfcQ, aInfo.iDfcPriority);
1.652 +
1.653 + aChannel = pC;
1.654 +
1.655 +#ifdef _DEBUG
1.656 + pC->Invariant();
1.657 +#endif
1.658 + __KTRACE_OPT(KDMA, Kern::Printf("opened channel %d", pC->iPslId));
1.659 + return KErrNone;
1.660 + }
1.661 +
1.662 +
1.663 +EXPORT_C void TDmaChannel::Close()
1.664 + {
1.665 + __KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::Close %d", iPslId));
1.666 + __DMA_ASSERTD(IsOpened());
1.667 + __DMA_ASSERTD(IsQueueEmpty());
1.668 + __DMA_ASSERTD(iReqCount == 0);
1.669 +
1.670 + __DMA_ASSERTD(iQueuedRequests == 0);
1.671 +
1.672 + // descriptor leak? bug in request code
1.673 + __DMA_ASSERTD(iAvailDesCount == iMaxDesCount);
1.674 +
1.675 + iController->ReleaseSetOfDes(iMaxDesCount);
1.676 + iAvailDesCount = iMaxDesCount = 0;
1.677 +
1.678 + DmaChannelMgr::Wait();
1.679 + DmaChannelMgr::Close(this);
1.680 + iController = NULL;
1.681 + DmaChannelMgr::Signal();
1.682 +
1.683 + __DMA_INVARIANT();
1.684 + }
1.685 +
1.686 +
1.687 +EXPORT_C void TDmaChannel::CancelAll()
1.688 + {
1.689 + __KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::CancelAll thread %O channel - %d",
1.690 + &Kern::CurrentThread(), iPslId));
1.691 + __DMA_ASSERTD(IsOpened());
1.692 +
1.693 + NThread* nt = NKern::CurrentThread();
1.694 + TBool wait = FALSE;
1.695 + TDmaCancelInfo c;
1.696 + TDmaCancelInfo* waiters = 0;
1.697 +
1.698 + NKern::ThreadEnterCS();
1.699 + Wait();
1.700 + const TUint32 req_count_before = iQueuedRequests;
1.701 + NThreadBase* dfcnt = iDfc.Thread();
1.702 + __e32_atomic_store_ord32(&iIsrDfc, (TUint32)KCancelFlagMask);
1.703 + // ISRs after this point will not post a DFC, however a DFC may already be queued or running or both
1.704 + if (!IsQueueEmpty())
1.705 + {
1.706 + // There is a transfer in progress. It may complete before the DMAC
1.707 + // has stopped, but the resulting ISR will not post a DFC.
1.708 + // ISR should not happen after this function returns.
1.709 + iController->StopTransfer(*this);
1.710 +
1.711 + ResetStateMachine();
1.712 +
1.713 + // Clean-up the request queue.
1.714 + SDblQueLink* pL;
1.715 + while ((pL = iReqQ.GetFirst()) != NULL)
1.716 + {
1.717 + iQueuedRequests--;
1.718 + DDmaRequest* pR = _LOFF(pL, DDmaRequest, iLink);
1.719 + pR->OnDeque();
1.720 + }
1.721 + }
1.722 + if (!dfcnt || dfcnt==nt)
1.723 + {
1.724 + // no DFC queue or DFC runs in this thread, so just cancel it and we're finished
1.725 + iDfc.Cancel();
1.726 +
1.727 + // if other calls to CancelAll() are waiting for the DFC, release them here
1.728 + waiters = iCancelInfo;
1.729 + iCancelInfo = 0;
1.730 +
1.731 + // reset the ISR count
1.732 + __e32_atomic_store_rel32(&iIsrDfc, 0);
1.733 + }
1.734 + else
1.735 + {
1.736 + // DFC runs in another thread. Make sure it's queued and then wait for it to run.
1.737 + if (iCancelInfo)
1.738 + c.InsertBefore(iCancelInfo);
1.739 + else
1.740 + iCancelInfo = &c;
1.741 + wait = TRUE;
1.742 + iDfc.Enque();
1.743 + }
1.744 + const TUint32 req_count_after = iQueuedRequests;
1.745 + Signal();
1.746 + if (waiters)
1.747 + waiters->Signal();
1.748 + if (wait)
1.749 + NKern::FSWait(&c.iSem);
1.750 + NKern::ThreadLeaveCS();
1.751 +
1.752 + // Only call PSL if there were requests queued when we entered AND there
1.753 + // are now no requests left on the queue.
1.754 + if ((req_count_before != 0) && (req_count_after == 0))
1.755 + {
1.756 + QueuedRequestCountChanged();
1.757 + }
1.758 +
1.759 + __DMA_INVARIANT();
1.760 + }
1.761 +
1.762 +
1.763 +/**
1.764 + DFC callback function (static member).
1.765 + */
1.766 +
1.767 +void TDmaChannel::Dfc(TAny* aArg)
1.768 + {
1.769 + ((TDmaChannel*)aArg)->DoDfc();
1.770 + }
1.771 +
1.772 +
1.773 +void TDmaChannel::DoDfc()
1.774 + {
1.775 + Wait();
1.776 +
1.777 + // Atomically fetch and reset the number of DFC queued by ISR and the error
1.778 + // flag. Leave the cancel flag alone for now.
1.779 + const TUint32 w = __e32_atomic_and_ord32(&iIsrDfc, (TUint32)KCancelFlagMask);
1.780 + TUint32 count = w & KDfcCountMask;
1.781 + const TBool error = w & (TUint32)KErrorFlagMask;
1.782 + TBool stop = w & (TUint32)KCancelFlagMask;
1.783 + __DMA_ASSERTD(count>0 || stop);
1.784 + const TUint32 req_count_before = iQueuedRequests;
1.785 + TUint32 req_count_after = 0;
1.786 +
1.787 + while(count && !stop)
1.788 + {
1.789 + --count;
1.790 +
1.791 + // If an error occurred it must have been reported on the last interrupt since transfers are
1.792 + // suspended after an error.
1.793 + DDmaRequest::TResult res = (count==0 && error) ? DDmaRequest::EError : DDmaRequest::EOk;
1.794 + __DMA_ASSERTD(!iReqQ.IsEmpty());
1.795 + DDmaRequest* pCompletedReq = NULL;
1.796 + DDmaRequest* pCurReq = _LOFF(iReqQ.First(), DDmaRequest, iLink);
1.797 + DDmaRequest::TCallback cb = 0;
1.798 + TAny* arg = 0;
1.799 +
1.800 + if (res == DDmaRequest::EOk)
1.801 + {
1.802 + // Update state machine, current fragment, completed fragment and
1.803 + // tell DMAC to transfer next fragment if necessary.
1.804 + SDmaDesHdr* pCompletedHdr = NULL;
1.805 + DoDfc(*pCurReq, pCompletedHdr);
1.806 +
1.807 + // If just completed last fragment from current request, switch to next
1.808 + // request (if any).
1.809 + if (pCompletedHdr == pCurReq->iLastHdr)
1.810 + {
1.811 + pCompletedReq = pCurReq;
1.812 + pCurReq->iLink.Deque();
1.813 + iQueuedRequests--;
1.814 + if (iReqQ.IsEmpty())
1.815 + iNullPtr = &iCurHdr;
1.816 + pCompletedReq->OnDeque();
1.817 + }
1.818 + }
1.819 + else if (res == DDmaRequest::EError)
1.820 + pCompletedReq = pCurReq;
1.821 + else
1.822 + __DMA_CANT_HAPPEN();
1.823 + if (pCompletedReq)
1.824 + {
1.825 + cb = pCompletedReq->iCb;
1.826 + arg = pCompletedReq->iCbArg;
1.827 + Signal();
1.828 + __KTRACE_OPT(KDMA, Kern::Printf("notifying DMA client result=%d", res));
1.829 + (*cb)(res,arg);
1.830 + Wait();
1.831 + }
1.832 + if (pCompletedReq || Flash())
1.833 + stop = __e32_atomic_load_acq32(&iIsrDfc) & (TUint32)KCancelFlagMask;
1.834 + }
1.835 +
1.836 + // Some interrupts may be missed (double-buffer and scatter-gather
1.837 + // controllers only) if two or more transfers complete while interrupts are
1.838 + // disabled in the CPU. If this happens, the framework will go out of sync
1.839 + // and leave some orphaned requests in the queue.
1.840 + //
1.841 + // To ensure correctness we handle this case here by checking that the request
1.842 + // queue is empty when all transfers have completed and, if not, cleaning up
1.843 + // and notifying the client of the completion of the orphaned requests.
1.844 + //
1.845 + // Note that if some interrupts are missed and the controller raises an
1.846 + // error while transferring a subsequent fragment, the error will be reported
1.847 + // on a fragment which was successfully completed. There is no easy solution
1.848 + // to this problem, but this is okay as the only possible action following a
1.849 + // failure is to flush the whole queue.
1.850 + if (stop)
1.851 + {
1.852 + TDmaCancelInfo* waiters = iCancelInfo;
1.853 + iCancelInfo = 0;
1.854 +
1.855 + // make sure DFC doesn't run again until a new request completes
1.856 + iDfc.Cancel();
1.857 +
1.858 + // reset the ISR count - new requests can now be processed
1.859 + __e32_atomic_store_rel32(&iIsrDfc, 0);
1.860 +
1.861 + req_count_after = iQueuedRequests;
1.862 + Signal();
1.863 +
1.864 + // release threads doing CancelAll()
1.865 + waiters->Signal();
1.866 + }
1.867 + else if (!error && !iDfc.Queued() && !iReqQ.IsEmpty() && iController->IsIdle(*this))
1.868 + {
1.869 + __KTRACE_OPT(KDMA, Kern::Printf("Missed interrupt(s) - draining request queue"));
1.870 + ResetStateMachine();
1.871 +
1.872 + // Move orphaned requests to temporary queue so channel queue can
1.873 + // accept new requests.
1.874 + SDblQue q;
1.875 + q.MoveFrom(&iReqQ);
1.876 +
1.877 + SDblQueLink* pL;
1.878 + while ((pL = q.GetFirst()) != NULL)
1.879 + {
1.880 + iQueuedRequests--;
1.881 + DDmaRequest* pR = _LOFF(pL, DDmaRequest, iLink);
1.882 + __KTRACE_OPT(KDMA, Kern::Printf("Removing request from queue and notifying client"));
1.883 + pR->OnDeque();
1.884 + DDmaRequest::TCallback cb = pR->iCb;
1.885 + TAny* arg = pR->iCbArg;
1.886 + if (cb)
1.887 + {
1.888 + Signal();
1.889 + (*cb)(DDmaRequest::EOk, arg);
1.890 + Wait();
1.891 + }
1.892 + }
1.893 + req_count_after = iQueuedRequests;
1.894 + Signal();
1.895 + }
1.896 + else
1.897 + {
1.898 + req_count_after = iQueuedRequests;
1.899 + Signal();
1.900 + }
1.901 +
1.902 + // Only call PSL if there were requests queued when we entered AND there
1.903 + // are now no requests left on the queue (after also having executed all
1.904 + // client callbacks).
1.905 + if ((req_count_before != 0) && (req_count_after == 0))
1.906 + {
1.907 + QueuedRequestCountChanged();
1.908 + }
1.909 +
1.910 + __DMA_INVARIANT();
1.911 + }
1.912 +
1.913 +
1.914 +/** Reset state machine only, request queue is unchanged */
1.915 +
1.916 +void TDmaChannel::ResetStateMachine()
1.917 + {
1.918 + DoCancelAll();
1.919 + iCurHdr = NULL;
1.920 + iNullPtr = &iCurHdr;
1.921 + }
1.922 +
1.923 +
1.924 +/** Unlink the last item of a LLI chain from the next chain.
1.925 + Default implementation does nothing. This is overridden by scatter-gather channels. */
1.926 +
1.927 +void TDmaChannel::DoUnlink(SDmaDesHdr& /*aHdr*/)
1.928 + {
1.929 + }
1.930 +
1.931 +
1.932 +/** PSL may override */
1.933 +void TDmaChannel::QueuedRequestCountChanged()
1.934 + {
1.935 +#ifdef _DEBUG
1.936 + Wait();
1.937 + __KTRACE_OPT(KDMA,
1.938 + Kern::Printf("TDmaChannel::QueuedRequestCountChanged() %d",
1.939 + iQueuedRequests));
1.940 + __DMA_ASSERTA(iQueuedRequests >= 0);
1.941 + Signal();
1.942 +#endif
1.943 + }
1.944 +
1.945 +
1.946 +#ifdef _DEBUG
1.947 +
1.948 +void TDmaChannel::Invariant()
1.949 + {
1.950 + Wait();
1.951 +
1.952 + __DMA_ASSERTD(iReqCount >= 0);
1.953 + // should always point to NULL pointer ending fragment queue
1.954 + __DMA_ASSERTD(*iNullPtr == NULL);
1.955 +
1.956 + __DMA_ASSERTD(0 <= iAvailDesCount && iAvailDesCount <= iMaxDesCount);
1.957 +
1.958 + __DMA_ASSERTD(iCurHdr == NULL || iController->IsValidHdr(iCurHdr));
1.959 +
1.960 + if (IsOpened())
1.961 + {
1.962 + __DMA_ASSERTD((iCurHdr && !IsQueueEmpty()) || (!iCurHdr && IsQueueEmpty()));
1.963 + if (iCurHdr == NULL)
1.964 + __DMA_ASSERTD(iNullPtr == &iCurHdr);
1.965 + }
1.966 + else
1.967 + {
1.968 + __DMA_ASSERTD(iCurHdr == NULL);
1.969 + __DMA_ASSERTD(iNullPtr == &iCurHdr);
1.970 + __DMA_ASSERTD(IsQueueEmpty());
1.971 + }
1.972 +
1.973 + Signal();
1.974 + }
1.975 +
1.976 +#endif
1.977 +
1.978 +//////////////////////////////////////////////////////////////////////////////
1.979 +// TDmaSbChannel
1.980 +
1.981 +void TDmaSbChannel::DoQueue(DDmaRequest& /*aReq*/)
1.982 + {
1.983 + if (!iTransferring)
1.984 + {
1.985 + iController->Transfer(*this, *iCurHdr);
1.986 + iTransferring = ETrue;
1.987 + }
1.988 + }
1.989 +
1.990 +
1.991 +void TDmaSbChannel::DoCancelAll()
1.992 + {
1.993 + __DMA_ASSERTD(iTransferring);
1.994 + iTransferring = EFalse;
1.995 + }
1.996 +
1.997 +
1.998 +void TDmaSgChannel::DoUnlink(SDmaDesHdr& aHdr)
1.999 + {
1.1000 + iController->UnlinkHwDes(*this, aHdr);
1.1001 + }
1.1002 +
1.1003 +
1.1004 +void TDmaSbChannel::DoDfc(DDmaRequest& /*aCurReq*/, SDmaDesHdr*& aCompletedHdr)
1.1005 + {
1.1006 + __DMA_ASSERTD(iTransferring);
1.1007 + aCompletedHdr = iCurHdr;
1.1008 + iCurHdr = iCurHdr->iNext;
1.1009 + if (iCurHdr != NULL)
1.1010 + iController->Transfer(*this, *iCurHdr);
1.1011 + else
1.1012 + iTransferring = EFalse;
1.1013 + }
1.1014 +
1.1015 +
1.1016 +//////////////////////////////////////////////////////////////////////////////
1.1017 +// TDmaDbChannel
1.1018 +
1.1019 +void TDmaDbChannel::DoQueue(DDmaRequest& aReq)
1.1020 + {
1.1021 + switch (iState)
1.1022 + {
1.1023 + case EIdle:
1.1024 + iController->Transfer(*this, *iCurHdr);
1.1025 + if (iCurHdr->iNext)
1.1026 + {
1.1027 + iController->Transfer(*this, *(iCurHdr->iNext));
1.1028 + iState = ETransferring;
1.1029 + }
1.1030 + else
1.1031 + iState = ETransferringLast;
1.1032 + break;
1.1033 + case ETransferring:
1.1034 + // nothing to do
1.1035 + break;
1.1036 + case ETransferringLast:
1.1037 + iController->Transfer(*this, *(aReq.iFirstHdr));
1.1038 + iState = ETransferring;
1.1039 + break;
1.1040 + default:
1.1041 + __DMA_CANT_HAPPEN();
1.1042 + }
1.1043 + }
1.1044 +
1.1045 +
1.1046 +void TDmaDbChannel::DoCancelAll()
1.1047 + {
1.1048 + iState = EIdle;
1.1049 + }
1.1050 +
1.1051 +
1.1052 +void TDmaDbChannel::DoDfc(DDmaRequest& /*aCurReq*/, SDmaDesHdr*& aCompletedHdr)
1.1053 + {
1.1054 + aCompletedHdr = iCurHdr;
1.1055 + iCurHdr = iCurHdr->iNext;
1.1056 + switch (iState)
1.1057 + {
1.1058 + case ETransferringLast:
1.1059 + iState = EIdle;
1.1060 + break;
1.1061 + case ETransferring:
1.1062 + if (iCurHdr->iNext == NULL)
1.1063 + iState = ETransferringLast;
1.1064 + else
1.1065 + iController->Transfer(*this, *(iCurHdr->iNext));
1.1066 + break;
1.1067 + default:
1.1068 + __DMA_CANT_HAPPEN();
1.1069 + }
1.1070 + }
1.1071 +
1.1072 +
1.1073 +//////////////////////////////////////////////////////////////////////////////
1.1074 +// TDmaSgChannel
1.1075 +
1.1076 +void TDmaSgChannel::DoQueue(DDmaRequest& aReq)
1.1077 + {
1.1078 + if (iTransferring)
1.1079 + {
1.1080 + __DMA_ASSERTD(!aReq.iLink.Alone());
1.1081 + DDmaRequest* pReqPrev = _LOFF(aReq.iLink.iPrev, DDmaRequest, iLink);
1.1082 + iController->AppendHwDes(*this, *(pReqPrev->iLastHdr), *(aReq.iFirstHdr));
1.1083 + }
1.1084 + else
1.1085 + {
1.1086 + iController->Transfer(*this, *(aReq.iFirstHdr));
1.1087 + iTransferring = ETrue;
1.1088 + }
1.1089 + }
1.1090 +
1.1091 +
1.1092 +void TDmaSgChannel::DoCancelAll()
1.1093 + {
1.1094 + __DMA_ASSERTD(iTransferring);
1.1095 + iTransferring = EFalse;
1.1096 + }
1.1097 +
1.1098 +
1.1099 +void TDmaSgChannel::DoDfc(DDmaRequest& aCurReq, SDmaDesHdr*& aCompletedHdr)
1.1100 + {
1.1101 + __DMA_ASSERTD(iTransferring);
1.1102 + aCompletedHdr = aCurReq.iLastHdr;
1.1103 + iCurHdr = aCompletedHdr->iNext;
1.1104 + iTransferring = (iCurHdr != NULL);
1.1105 + }