os/kernelhwsrv/kernel/eka/drivers/dma/dmapil.cpp
author sl
Tue, 10 Jun 2014 14:32:02 +0200
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
     1 // Copyright (c) 2002-2009 Nokia Corporation and/or its subsidiary(-ies).
     2 // All rights reserved.
     3 // This component and the accompanying materials are made available
     4 // under the terms of the License "Eclipse Public License v1.0"
     5 // which accompanies this distribution, and is available
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
     7 //
     8 // Initial Contributors:
     9 // Nokia Corporation - initial contribution.
    10 //
    11 // Contributors:
    12 //
    13 // Description:
    14 // e32\drivers\dmapil.cpp
    15 // DMA Platform Independent Layer (PIL)
    16 // 
    17 //
    18 
    19 #include <drivers/dma.h>
    20 #include <kernel/kern_priv.h>
    21 
    22 
    23 static const char KDmaPanicCat[] = "DMA";
    24 
    25 NFastMutex DmaChannelMgr::Lock;
    26 
    27 class TDmaCancelInfo : public SDblQueLink
    28 	{
    29 public:
    30 	TDmaCancelInfo();
    31 	void Signal();
    32 public:
    33 	NFastSemaphore iSem;
    34 	};
    35 
    36 TDmaCancelInfo::TDmaCancelInfo()
    37 	:	iSem(0)
    38 	{
    39 	iNext = this;
    40 	iPrev = this;
    41 	}
    42 
    43 void TDmaCancelInfo::Signal()
    44 	{
    45 	TDmaCancelInfo* p = this;
    46 	FOREVER
    47 		{
    48 		TDmaCancelInfo* next = (TDmaCancelInfo*)p->iNext;
    49 		if (p!=next)
    50 			p->Deque();
    51 		NKern::FSSignal(&p->iSem);	// Don't dereference p after this
    52 		if (p==next)
    53 			break;
    54 		p = next;
    55 		}
    56 	}
    57 
    58 //////////////////////////////////////////////////////////////////////////////
    59 
    60 #ifdef __DMASIM__
    61 #ifdef __WINS__
    62 typedef TLinAddr TPhysAddr;
    63 #endif
    64 static inline TPhysAddr LinToPhys(TLinAddr aLin) {return aLin;}
    65 #else
    66 static inline TPhysAddr LinToPhys(TLinAddr aLin) {return Epoc::LinearToPhysical(aLin);}
    67 #endif
    68 
    69 //
    70 // Return minimum of aMaxSize and size of largest physically contiguous block
    71 // starting at aLinAddr.
    72 //
    73 static TInt MaxPhysSize(TLinAddr aLinAddr, const TInt aMaxSize)
    74 	{
    75 	const TPhysAddr physBase = LinToPhys(aLinAddr);
    76 	TLinAddr lin = aLinAddr;
    77 	TInt size = 0;
    78 	for (;;)
    79 		{
    80 		// Round up the linear address to the next MMU page boundary
    81 		const TLinAddr linBoundary = Kern::RoundToPageSize(lin + 1);
    82 		size += linBoundary - lin;
    83 		if (size >= aMaxSize)
    84 			return aMaxSize;
    85 		if ((physBase + size) != LinToPhys(linBoundary))
    86 			return size;
    87 		lin = linBoundary;
    88 		}
    89 	}
    90 
    91 
    92 //////////////////////////////////////////////////////////////////////////////
    93 // TDmac
    94 
    95 TDmac::TDmac(const SCreateInfo& aInfo)
    96 	: iMaxDesCount(aInfo.iDesCount),
    97 	  iAvailDesCount(aInfo.iDesCount),
    98 	  iDesSize(aInfo.iDesSize),
    99 	  iCaps(aInfo.iCaps)
   100 	{
   101 	__DMA_ASSERTD(iMaxDesCount > 0);
   102 	__DMA_ASSERTD((iCaps & ~KCapsBitHwDes) == 0); // undefined bits set?
   103 	__DMA_ASSERTD(iDesSize > 0);
   104 	}
   105 
   106 //
   107 // Second-phase c'tor
   108 //
   109 
   110 TInt TDmac::Create(const SCreateInfo& aInfo)
   111 	{
   112 	iHdrPool = new SDmaDesHdr[iMaxDesCount];
   113 	if (iHdrPool == NULL)
   114 		return KErrNoMemory;
   115 
   116 	TInt r = AllocDesPool(aInfo.iDesChunkAttribs);
   117 	if (r != KErrNone)
   118 		return KErrNoMemory;
   119 
   120 	// Link all descriptor headers together on the free list
   121 	iFreeHdr = iHdrPool;
   122 	TInt i;
   123 	for (i = 0; i < iMaxDesCount - 1; i++)
   124 		iHdrPool[i].iNext = iHdrPool + i + 1;
   125 	iHdrPool[iMaxDesCount-1].iNext = NULL;
   126 
   127 	__DMA_INVARIANT();
   128 	return KErrNone;
   129 	}
   130 
   131 
   132 TDmac::~TDmac()
   133 	{
   134 	__DMA_INVARIANT();
   135 
   136 	FreeDesPool();
   137 	delete[] iHdrPool;
   138 	}
   139 
   140 
   141 // Calling thread must be in CS
   142 TInt TDmac::AllocDesPool(TUint aAttribs)
   143 	{
   144 	TInt r;
   145 	if (iCaps & KCapsBitHwDes)
   146 		{
   147 		TInt size = iMaxDesCount*iDesSize;
   148 #ifdef __WINS__
   149 		(void)aAttribs;
   150 		iDesPool = new TUint8[size];
   151 		r = iDesPool ? KErrNone : KErrNoMemory;
   152 #else
   153 		// Chunk not mapped as supervisor r/w user none? incorrect mask passed by PSL
   154 		__DMA_ASSERTD((aAttribs & EMapAttrAccessMask) == EMapAttrSupRw);
   155 		TPhysAddr phys;
   156 		r = Epoc::AllocPhysicalRam(size, phys);
   157 		if (r == KErrNone)
   158 			{
   159 			r = DPlatChunkHw::New(iHwDesChunk, phys, size, aAttribs);
   160 			if (r == KErrNone)
   161 				{
   162 				iDesPool = (TAny*)iHwDesChunk->LinearAddress();
   163 				__KTRACE_OPT(KDMA, Kern::Printf("descriptor hw chunk created lin=0x%08X phys=0x%08X, size=0x%X",
   164 												iHwDesChunk->iLinAddr, iHwDesChunk->iPhysAddr, size));
   165 				}
   166 			else
   167 				Epoc::FreePhysicalRam(phys, size);
   168 			}
   169 #endif
   170 		}
   171 	else
   172 		{
   173 		iDesPool = new SDmaPseudoDes[iMaxDesCount];
   174 		r = iDesPool ? KErrNone : KErrNoMemory;
   175 		}
   176 	return r;
   177 	}
   178 
   179 
   180 // Calling thread must be in CS
   181 void TDmac::FreeDesPool()
   182 	{
   183 	if (iCaps & KCapsBitHwDes)
   184 		{
   185 #ifdef __WINS__
   186 		delete[] iDesPool;
   187 #else
   188 		if (iHwDesChunk)
   189 			{
   190 			TPhysAddr phys = iHwDesChunk->PhysicalAddress();
   191 			TInt size = iHwDesChunk->iSize;
   192 			iHwDesChunk->Close(NULL);
   193 			Epoc::FreePhysicalRam(phys, size);
   194 			}
   195 #endif
   196 		}
   197 	else
   198 		Kern::Free(iDesPool); 
   199 	}
   200 
   201 
   202 /**
   203  Prealloc the given number of descriptors.
   204  */
   205 
   206 TInt TDmac::ReserveSetOfDes(TInt aCount)
   207 	{
   208 	__KTRACE_OPT(KDMA, Kern::Printf(">TDmac::ReserveSetOfDes count=%d", aCount));
   209 	__DMA_ASSERTD(aCount > 0);
   210 	TInt r = KErrTooBig;
   211 	Wait();
   212 	if (iAvailDesCount - aCount >= 0)
   213 		{
   214 		iAvailDesCount -= aCount;
   215 		r = KErrNone;
   216 		}
   217 	Signal();
   218 	__DMA_INVARIANT();
   219 	__KTRACE_OPT(KDMA, Kern::Printf("<TDmac::ReserveSetOfDes r=%d", r));
   220 	return r;
   221 	}
   222 
   223 
   224 /**
   225  Return the given number of preallocated descriptors to the free pool.
   226  */
   227 
   228 void TDmac::ReleaseSetOfDes(TInt aCount)
   229 	{
   230 	__DMA_ASSERTD(aCount >= 0);
   231 	Wait();
   232 	iAvailDesCount += aCount;
   233 	Signal();
   234 	__DMA_INVARIANT();
   235 	}
   236 
   237 
   238 /**
   239  Queue DFC and update word used to communicate with DFC.
   240 
   241  Called in interrupt context by PSL.
   242  */
   243 
   244 void TDmac::HandleIsr(TDmaChannel& aChannel, TBool aIsComplete)
   245 	{
   246 	//__KTRACE_OPT(KDMA, Kern::Printf("TDmac::HandleIsr channel=%d complete=%d", aChannelIdx, aIsComplete));
   247 
   248 	// Queue DFC if necessary.  The possible scenarios are:
   249 	// * no DFC queued --> need to queue DFC
   250 	// * DFC queued (not running yet) --> just need to update iIsrDfc
   251 	// * DFC running / iIsrDfc already reset --> need to requeue DFC
   252 	// * DFC running /  iIsrDfc not reset yet --> just need to update iIsrDfc
   253 	// Set error flag if necessary.
   254 	TUint32 inc = aIsComplete ? 1u : TUint32(TDmaChannel::KErrorFlagMask)|1u;
   255 	TUint32 orig = __e32_atomic_tau_ord32(&aChannel.iIsrDfc, TUint32(TDmaChannel::KCancelFlagMask), 0, inc);
   256 
   257 	// As transfer should be suspended when an error occurs, we
   258 	// should never get there with the error flag already set.
   259 	__DMA_ASSERTD((orig & inc & (TUint32)TDmaChannel::KErrorFlagMask) == 0);
   260 
   261 	if (orig == 0)
   262 		aChannel.iDfc.Add();
   263 	}
   264 
   265 
   266 void TDmac::InitDes(const SDmaDesHdr& aHdr, TUint32 aSrc, TUint32 aDest, TInt aCount,
   267 					TUint aFlags, TUint32 aPslInfo, TUint32 aCookie)
   268 	{
   269  	if (iCaps & KCapsBitHwDes)
   270 		InitHwDes(aHdr, aSrc, aDest, aCount, aFlags, aPslInfo, aCookie);
   271 	else
   272 		{
   273 		SDmaPseudoDes& des = HdrToDes(aHdr);
   274 		des.iSrc = aSrc;
   275 		des.iDest = aDest;
   276 		des.iCount = aCount;
   277 		des.iFlags = aFlags;
   278 		des.iPslInfo = aPslInfo;
   279 		des.iCookie = aCookie;
   280 		}
   281 	}
   282 
   283 
   284 void TDmac::InitHwDes(const SDmaDesHdr& /*aHdr*/, TUint32 /*aSrc*/, TUint32 /*aDest*/, TInt /*aCount*/,
   285 					  TUint /*aFlags*/, TUint32 /*aPslInfo*/, TUint32 /*aCookie*/)
   286 	{
   287 	// concrete controller must override if KCapsBitHwDes set
   288 	__DMA_CANT_HAPPEN();
   289 	}
   290 
   291 
   292 void TDmac::ChainHwDes(const SDmaDesHdr& /*aHdr*/, const SDmaDesHdr& /*aNextHdr*/)
   293 	{
   294 	// concrete controller must override if KCapsBitHwDes set
   295 	__DMA_CANT_HAPPEN();
   296 	}
   297 
   298 
   299 void TDmac::AppendHwDes(const TDmaChannel& /*aChannel*/, const SDmaDesHdr& /*aLastHdr*/,
   300 						const SDmaDesHdr& /*aNewHdr*/)
   301 	{
   302  	// concrete controller must override if KCapsBitHwDes set
   303 	__DMA_CANT_HAPPEN();
   304 	}
   305 
   306 
   307 void TDmac::UnlinkHwDes(const TDmaChannel& /*aChannel*/, SDmaDesHdr& /*aHdr*/)
   308 	{
   309  	// concrete controller must override if KCapsBitHwDes set
   310 	__DMA_CANT_HAPPEN();
   311 	}
   312 
   313 
   314 TInt TDmac::FailNext(const TDmaChannel& /*aChannel*/)
   315 	{
   316 	return KErrNotSupported;
   317 	}
   318 
   319 
   320 TInt TDmac::MissNextInterrupts(const TDmaChannel& /*aChannel*/, TInt /*aInterruptCount*/)
   321 	{
   322 	return KErrNotSupported;
   323 	}
   324 
   325 
   326 TInt TDmac::Extension(TDmaChannel& /*aChannel*/, TInt /*aCmd*/, TAny* /*aArg*/)
   327 	{
   328 	// default implementation - NOP
   329 	return KErrNotSupported;
   330 	}
   331 
   332 
   333 #ifdef _DEBUG
   334 
   335 void TDmac::Invariant()
   336 	{
   337 	Wait();
   338 	__DMA_ASSERTD(0 <= iAvailDesCount && iAvailDesCount <= iMaxDesCount);
   339 	__DMA_ASSERTD(! iFreeHdr || IsValidHdr(iFreeHdr));
   340 	for (TInt i = 0; i < iMaxDesCount; i++)
   341 		__DMA_ASSERTD(iHdrPool[i].iNext == NULL || IsValidHdr(iHdrPool[i].iNext));
   342 	Signal();
   343 	}
   344 
   345 
   346 TBool TDmac::IsValidHdr(const SDmaDesHdr* aHdr)
   347 	{
   348 	return (iHdrPool <= aHdr) && (aHdr < iHdrPool + iMaxDesCount);
   349 	}
   350 
   351 #endif
   352 
   353 //////////////////////////////////////////////////////////////////////////////
   354 // DDmaRequest
   355 
   356 
   357 EXPORT_C DDmaRequest::DDmaRequest(TDmaChannel& aChannel, TCallback aCb, TAny* aCbArg, TInt aMaxTransferSize)
   358 	: iChannel(aChannel),
   359 	  iCb(aCb),
   360 	  iCbArg(aCbArg),
   361 	  iMaxTransferSize(aMaxTransferSize)
   362 	{
   363 	// iDesCount = 0;
   364 	// iFirstHdr = iLastHdr = NULL;
   365 	// iQueued = EFalse;
   366 	iChannel.iReqCount++;
   367 	__DMA_INVARIANT();
   368 	}
   369 
   370 
   371 
   372 EXPORT_C DDmaRequest::~DDmaRequest()
   373 	{
   374 	__DMA_ASSERTD(!iQueued);
   375 	__DMA_INVARIANT();
   376 	FreeDesList();
   377 	iChannel.iReqCount--;
   378 	}
   379 
   380 
   381 
   382 EXPORT_C TInt DDmaRequest::Fragment(TUint32 aSrc, TUint32 aDest, TInt aCount, TUint aFlags, TUint32 aPslInfo)
   383 	{
   384 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::Fragment thread %O "
   385 									"src=0x%08X dest=0x%08X count=%d flags=0x%X psl=0x%08X",
   386 									&Kern::CurrentThread(), aSrc, aDest, aCount, aFlags, aPslInfo));
   387 	__DMA_ASSERTD(aCount > 0);
   388 	__DMA_ASSERTD(!iQueued);
   389 
   390 	const TUint alignMask = iChannel.MemAlignMask(aFlags, aPslInfo);
   391 	const TBool memSrc  = aFlags & KDmaMemSrc;
   392 	const TBool memDest = aFlags & KDmaMemDest;
   393 
   394 	// Memory buffers must satisfy alignment constraint
   395 	__DMA_ASSERTD(!memSrc || ((aSrc & alignMask) == 0));
   396 	__DMA_ASSERTD(!memDest || ((aDest & alignMask) == 0));
   397 
   398 	// Ask the PSL what the maximum size possible for this transfer is
   399 	TInt maxTransferSize = iChannel.MaxTransferSize(aFlags, aPslInfo);
   400 	if (!maxTransferSize)
   401 		{
   402 		__KTRACE_OPT(KPANIC, Kern::Printf("Error: maxTransferSize == 0"));
   403 		return KErrArgument;
   404 		}
   405 
   406 	if (iMaxTransferSize)
   407 		{
   408 		// User has set a size cap
   409 		__DMA_ASSERTA((iMaxTransferSize <= maxTransferSize) || (maxTransferSize == -1));
   410 		maxTransferSize = iMaxTransferSize;
   411 		}
   412 	else
   413 		{
   414 		// User doesn't care about max size
   415 		if (maxTransferSize == -1)
   416 			{
   417 			// No maximum imposed by controller
   418 			maxTransferSize = aCount;
   419 			}
   420 		}
   421 
   422 	const TInt maxAlignedSize = (maxTransferSize & ~alignMask);
   423 	__DMA_ASSERTD(maxAlignedSize > 0);						// bug in PSL if not true
   424 
   425 	FreeDesList();
   426 
   427 	TInt r = KErrNone;
   428 	do
   429 		{
   430 		// Allocate fragment
   431 		r = ExpandDesList();
   432 		if (r != KErrNone)
   433 			{
   434 			FreeDesList();
   435 			break;
   436 			}
   437 
   438 		// Compute fragment size
   439 		TInt c = Min(maxTransferSize, aCount);
   440 		if (memSrc && ((aFlags & KDmaPhysAddrSrc) == 0))
   441 			c = MaxPhysSize(aSrc, c);
   442 		if (memDest && ((aFlags & KDmaPhysAddrDest) == 0))
   443 			c = MaxPhysSize(aDest, c);
   444 		if ((memSrc || memDest) && (c < aCount) && (c > maxAlignedSize))
   445 			{
   446 			// This is not last fragment of transfer to/from memory. We must
   447 			// round down fragment size so next one is correctly aligned.
   448 			c = maxAlignedSize;
   449 			}
   450 
   451 		// Initialise fragment
   452 		__KTRACE_OPT(KDMA, Kern::Printf("fragment: src=0x%08X dest=0x%08X count=%d", aSrc, aDest, c));
   453 		iChannel.iController->InitDes(*iLastHdr, aSrc, aDest, c, aFlags, aPslInfo, iChannel.PslId());
   454 
   455 		// Update for next iteration
   456 		aCount -= c;
   457 		if (memSrc)
   458 			aSrc += c;
   459 		if (memDest)
   460 			aDest += c;
   461 		}
   462 	while (aCount > 0);
   463 
   464 	__DMA_INVARIANT();
   465 	return r;
   466 	}
   467 
   468 
   469 
   470 EXPORT_C void DDmaRequest::Queue()
   471 	{
   472 	__KTRACE_OPT(KDMA, Kern::Printf("DDmaRequest::Queue thread %O", &Kern::CurrentThread()));
   473 	__DMA_ASSERTD(iDesCount > 0);	// Not configured? call Fragment() first !
   474 	__DMA_ASSERTD(!iQueued);
   475 
   476 	// append request to queue and link new descriptor list to existing one.
   477 	iChannel.Wait();
   478 
   479 	TUint32 req_count = iChannel.iQueuedRequests++;
   480 	if (req_count == 0)
   481 		{
   482 		iChannel.Signal();
   483 		iChannel.QueuedRequestCountChanged();
   484 		iChannel.Wait();
   485 		}
   486 
   487 	if (!(iChannel.iIsrDfc & (TUint32)TDmaChannel::KCancelFlagMask))
   488 		{
   489 		iQueued = ETrue;
   490 		iChannel.iReqQ.Add(&iLink);
   491 		*iChannel.iNullPtr = iFirstHdr;
   492 		iChannel.iNullPtr = &(iLastHdr->iNext);
   493 		iChannel.DoQueue(*this);
   494 		iChannel.Signal();
   495 		}
   496 	else
   497 		{
   498 		// Someone is cancelling all requests...
   499 		req_count = --iChannel.iQueuedRequests;
   500 		iChannel.Signal();
   501 		if (req_count == 0)
   502 			{
   503 			iChannel.QueuedRequestCountChanged();
   504 			}
   505 		}
   506 
   507 	__DMA_INVARIANT();
   508 	}
   509 
   510 EXPORT_C TInt DDmaRequest::ExpandDesList(TInt aCount)
   511 	{
   512 	__DMA_ASSERTD(!iQueued);
   513 	__DMA_ASSERTD(aCount > 0);
   514 
   515 	if (aCount > iChannel.iAvailDesCount)
   516 		return KErrTooBig;
   517 
   518 	iChannel.iAvailDesCount -= aCount;
   519 	iDesCount += aCount;
   520 
   521 	TDmac& c = *(iChannel.iController);
   522 	c.Wait();
   523 
   524 	if (iFirstHdr == NULL)
   525 		{
   526 		// handle empty list specially to simplify following loop
   527 		iFirstHdr = iLastHdr = c.iFreeHdr;
   528 		c.iFreeHdr = c.iFreeHdr->iNext;
   529 		--aCount;
   530 		}
   531 	else
   532 		iLastHdr->iNext = c.iFreeHdr;
   533 
   534 	// Remove as many descriptors and headers from free pool as necessary and
   535 	// ensure hardware descriptors are chained together.
   536 	while (aCount-- > 0)
   537 		{
   538 		__DMA_ASSERTD(c.iFreeHdr != NULL);
   539 		if (c.iCaps & TDmac::KCapsBitHwDes)
   540 			c.ChainHwDes(*iLastHdr, *(c.iFreeHdr));
   541 		iLastHdr = c.iFreeHdr;
   542 		c.iFreeHdr = c.iFreeHdr->iNext;
   543 		}
   544 
   545 	c.Signal();
   546 
   547 	iLastHdr->iNext = NULL;
   548 
   549 	__DMA_INVARIANT();
   550 	return KErrNone;
   551 	}
   552 
   553 
   554 
   555 
   556 EXPORT_C void DDmaRequest::FreeDesList()
   557 	{
   558 	__DMA_ASSERTD(!iQueued);
   559 	if (iDesCount > 0)
   560 		{
   561 		iChannel.iAvailDesCount += iDesCount;
   562 		TDmac& c = *(iChannel.iController);
   563 		c.Wait();
   564 		iLastHdr->iNext = c.iFreeHdr;
   565 		c.iFreeHdr = iFirstHdr;
   566 		c.Signal();
   567 		iFirstHdr = iLastHdr = NULL;
   568 		iDesCount = 0;
   569 		}
   570 	}
   571 
   572 
   573 #ifdef _DEBUG
   574 
   575 void DDmaRequest::Invariant()
   576 	{
   577 	iChannel.Wait();
   578 	__DMA_ASSERTD(iChannel.IsOpened());
   579 	__DMA_ASSERTD(0 <= iMaxTransferSize);
   580 	__DMA_ASSERTD(0 <= iDesCount && iDesCount <= iChannel.iMaxDesCount);
   581 	if (iDesCount == 0)
   582 		{
   583 		__DMA_ASSERTD(!iQueued);
   584 		__DMA_ASSERTD(!iFirstHdr && !iLastHdr);
   585 		}
   586 	else
   587 		{
   588 		__DMA_ASSERTD(iChannel.iController->IsValidHdr(iFirstHdr));
   589 		__DMA_ASSERTD(iChannel.iController->IsValidHdr(iLastHdr));
   590 		}
   591 	iChannel.Signal();
   592 	}
   593 
   594 #endif
   595 
   596 
   597 //////////////////////////////////////////////////////////////////////////////
   598 // TDmaChannel
   599 
   600 
   601 EXPORT_C TInt TDmaChannel::StaticExtension(TInt aCmd, TAny* aArg)
   602 	{
   603 	return DmaChannelMgr::StaticExtension(aCmd, aArg);
   604 	}
   605 
   606 
   607 TDmaChannel::TDmaChannel()
   608 	: iController(NULL),
   609 	  iPslId(0),
   610 	  iCurHdr(NULL),
   611 	  iNullPtr(&iCurHdr),
   612 	  iDfc(Dfc, NULL, 0),
   613 	  iMaxDesCount(0),
   614 	  iAvailDesCount(0),
   615 	  iIsrDfc(0),
   616 	  iReqQ(),
   617 	  iReqCount(0),
   618 	  iQueuedRequests(0),
   619 	  iCancelInfo(NULL)
   620 	{
   621 	__DMA_INVARIANT();
   622 	}
   623 
   624 
   625 EXPORT_C TInt TDmaChannel::Open(const SCreateInfo& aInfo, TDmaChannel*& aChannel)
   626 	{
   627 	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::Open thread %O", &Kern::CurrentThread()));
   628 	__DMA_ASSERTD(aInfo.iDfcQ != NULL);
   629 	__DMA_ASSERTD(aInfo.iDfcPriority < KNumDfcPriorities);
   630 	__DMA_ASSERTD(aInfo.iDesCount >= 1);
   631 
   632 	aChannel = NULL;
   633 
   634 	DmaChannelMgr::Wait();
   635 	TDmaChannel* pC = DmaChannelMgr::Open(aInfo.iCookie);
   636 	DmaChannelMgr::Signal();
   637 	if (!pC)
   638 		return KErrInUse;
   639 
   640 	TInt r = pC->iController->ReserveSetOfDes(aInfo.iDesCount);
   641 	if (r != KErrNone)
   642 		{
   643 		pC->Close();
   644 		return r;
   645 		}
   646 	pC->iAvailDesCount = pC->iMaxDesCount = aInfo.iDesCount;
   647 
   648 	new (&pC->iDfc) TDfc(&Dfc, pC, aInfo.iDfcQ, aInfo.iDfcPriority);
   649 
   650 	aChannel = pC;
   651 
   652 #ifdef _DEBUG
   653 	pC->Invariant();
   654 #endif
   655 	__KTRACE_OPT(KDMA, Kern::Printf("opened channel %d", pC->iPslId));
   656 	return KErrNone;
   657 	}
   658 
   659 
   660 EXPORT_C void TDmaChannel::Close()
   661 	{
   662 	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::Close %d", iPslId));
   663 	__DMA_ASSERTD(IsOpened());
   664 	__DMA_ASSERTD(IsQueueEmpty());
   665 	__DMA_ASSERTD(iReqCount == 0);
   666 
   667 	__DMA_ASSERTD(iQueuedRequests == 0);
   668 
   669 	// descriptor leak? bug in request code
   670 	__DMA_ASSERTD(iAvailDesCount == iMaxDesCount);
   671 
   672 	iController->ReleaseSetOfDes(iMaxDesCount);
   673 	iAvailDesCount = iMaxDesCount = 0;
   674 
   675 	DmaChannelMgr::Wait();
   676 	DmaChannelMgr::Close(this);
   677 	iController = NULL;
   678 	DmaChannelMgr::Signal();
   679 
   680 	__DMA_INVARIANT();
   681 	}
   682 
   683 
   684 EXPORT_C void TDmaChannel::CancelAll()
   685 	{
   686 	__KTRACE_OPT(KDMA, Kern::Printf("TDmaChannel::CancelAll thread %O channel - %d",
   687 									&Kern::CurrentThread(), iPslId));
   688 	__DMA_ASSERTD(IsOpened());
   689 
   690 	NThread* nt = NKern::CurrentThread();
   691 	TBool wait = FALSE;
   692 	TDmaCancelInfo c;
   693 	TDmaCancelInfo* waiters = 0;
   694 
   695 	NKern::ThreadEnterCS();
   696 	Wait();
   697 	const TUint32 req_count_before = iQueuedRequests;
   698 	NThreadBase* dfcnt = iDfc.Thread();
   699 	__e32_atomic_store_ord32(&iIsrDfc, (TUint32)KCancelFlagMask);
   700 	// ISRs after this point will not post a DFC, however a DFC may already be queued or running or both
   701 	if (!IsQueueEmpty())
   702 		{
   703 		// There is a transfer in progress.  It may complete before the DMAC
   704 		// has stopped, but the resulting ISR will not post a DFC.
   705 		// ISR should not happen after this function returns.
   706 		iController->StopTransfer(*this);
   707 
   708 		ResetStateMachine();
   709 
   710 		// Clean-up the request queue.
   711 		SDblQueLink* pL;
   712 		while ((pL = iReqQ.GetFirst()) != NULL)
   713 			{
   714 			iQueuedRequests--;
   715 			DDmaRequest* pR = _LOFF(pL, DDmaRequest, iLink);
   716 			pR->OnDeque();
   717 			}
   718 		}
   719 	if (!dfcnt || dfcnt==nt)
   720 		{
   721 		// no DFC queue or DFC runs in this thread, so just cancel it and we're finished
   722 		iDfc.Cancel();
   723 
   724 		// if other calls to CancelAll() are waiting for the DFC, release them here
   725 		waiters = iCancelInfo;
   726 		iCancelInfo = 0;
   727 
   728 		// reset the ISR count
   729 		__e32_atomic_store_rel32(&iIsrDfc, 0);
   730 		}
   731 	else
   732 		{
   733 		// DFC runs in another thread. Make sure it's queued and then wait for it to run.
   734 		if (iCancelInfo)
   735 			c.InsertBefore(iCancelInfo);
   736 		else
   737 			iCancelInfo = &c;
   738 		wait = TRUE;
   739 		iDfc.Enque();
   740 		}
   741 	const TUint32 req_count_after = iQueuedRequests;
   742 	Signal();
   743 	if (waiters)
   744 		waiters->Signal();
   745 	if (wait)
   746 		NKern::FSWait(&c.iSem);
   747  	NKern::ThreadLeaveCS();
   748 
   749 	// Only call PSL if there were requests queued when we entered AND there
   750 	// are now no requests left on the queue.
   751 	if ((req_count_before != 0) && (req_count_after == 0))
   752 		{
   753 		QueuedRequestCountChanged();
   754 		}
   755 
   756 	__DMA_INVARIANT();
   757 	}
   758 
   759 
   760 /**
   761  DFC callback function (static member).
   762  */
   763 
   764 void TDmaChannel::Dfc(TAny* aArg)
   765 	{
   766 	((TDmaChannel*)aArg)->DoDfc();
   767 	}
   768 
   769 
   770 void TDmaChannel::DoDfc()
   771 	{
   772 	Wait();
   773 
   774 	// Atomically fetch and reset the number of DFC queued by ISR and the error
   775 	// flag. Leave the cancel flag alone for now.
   776 	const TUint32 w = __e32_atomic_and_ord32(&iIsrDfc, (TUint32)KCancelFlagMask);
   777 	TUint32 count = w & KDfcCountMask;
   778 	const TBool error = w & (TUint32)KErrorFlagMask;
   779 	TBool stop = w & (TUint32)KCancelFlagMask;
   780 	__DMA_ASSERTD(count>0 || stop);
   781 	const TUint32 req_count_before = iQueuedRequests;
   782 	TUint32 req_count_after = 0;
   783 
   784 	while(count && !stop)
   785 		{
   786 		--count;
   787 
   788 		// If an error occurred it must have been reported on the last interrupt since transfers are
   789 		// suspended after an error.
   790 		DDmaRequest::TResult res = (count==0 && error) ? DDmaRequest::EError : DDmaRequest::EOk;
   791 		__DMA_ASSERTD(!iReqQ.IsEmpty());
   792 		DDmaRequest* pCompletedReq = NULL;
   793 		DDmaRequest* pCurReq = _LOFF(iReqQ.First(), DDmaRequest, iLink);
   794 		DDmaRequest::TCallback cb = 0;
   795 		TAny* arg = 0;
   796 
   797 		if (res == DDmaRequest::EOk)
   798 			{
   799 			// Update state machine, current fragment, completed fragment and
   800 			// tell DMAC to transfer next fragment if necessary.
   801 			SDmaDesHdr* pCompletedHdr = NULL;
   802 			DoDfc(*pCurReq, pCompletedHdr);
   803 
   804 			// If just completed last fragment from current request, switch to next
   805 			// request (if any).
   806 			if (pCompletedHdr == pCurReq->iLastHdr)
   807 				{
   808 				pCompletedReq = pCurReq;
   809 				pCurReq->iLink.Deque();
   810 				iQueuedRequests--;
   811 				if (iReqQ.IsEmpty())
   812 					iNullPtr = &iCurHdr;
   813 				pCompletedReq->OnDeque();
   814 				}
   815 			}
   816 		else if (res == DDmaRequest::EError)
   817 			pCompletedReq = pCurReq;
   818 		else
   819 			__DMA_CANT_HAPPEN();
   820 		if (pCompletedReq)
   821 			{
   822 			cb = pCompletedReq->iCb;
   823 			arg = pCompletedReq->iCbArg;
   824 			Signal();
   825 			__KTRACE_OPT(KDMA, Kern::Printf("notifying DMA client result=%d", res));
   826 			(*cb)(res,arg);
   827 			Wait();
   828 			}
   829 		if (pCompletedReq || Flash())
   830 			stop = __e32_atomic_load_acq32(&iIsrDfc) & (TUint32)KCancelFlagMask;
   831 		}
   832 
   833 	// Some interrupts may be missed (double-buffer and scatter-gather
   834 	// controllers only) if two or more transfers complete while interrupts are
   835 	// disabled in the CPU. If this happens, the framework will go out of sync
   836 	// and leave some orphaned requests in the queue.
   837 	//
   838 	// To ensure correctness we handle this case here by checking that the request
   839 	// queue is empty when all transfers have completed and, if not, cleaning up
   840 	// and notifying the client of the completion of the orphaned requests.
   841 	//
   842 	// Note that if some interrupts are missed and the controller raises an
   843 	// error while transferring a subsequent fragment, the error will be reported
   844 	// on a fragment which was successfully completed.  There is no easy solution
   845 	// to this problem, but this is okay as the only possible action following a
   846 	// failure is to flush the whole queue.
   847 	if (stop)
   848 		{
   849 		TDmaCancelInfo* waiters = iCancelInfo;
   850 		iCancelInfo = 0;
   851 
   852 		// make sure DFC doesn't run again until a new request completes
   853 		iDfc.Cancel();
   854 
   855 		// reset the ISR count - new requests can now be processed
   856 		__e32_atomic_store_rel32(&iIsrDfc, 0);
   857 
   858 		req_count_after = iQueuedRequests;
   859 		Signal();
   860 
   861 		// release threads doing CancelAll()
   862 		waiters->Signal();
   863 		}
   864 	else if (!error && !iDfc.Queued() && !iReqQ.IsEmpty() && iController->IsIdle(*this))
   865 		{
   866 		__KTRACE_OPT(KDMA, Kern::Printf("Missed interrupt(s) - draining request queue"));
   867 		ResetStateMachine();
   868 
   869 		// Move orphaned requests to temporary queue so channel queue can
   870 		// accept new requests.
   871 		SDblQue q;
   872 		q.MoveFrom(&iReqQ);
   873 
   874 		SDblQueLink* pL;
   875 		while ((pL = q.GetFirst()) != NULL)
   876 			{
   877 			iQueuedRequests--;
   878 			DDmaRequest* pR = _LOFF(pL, DDmaRequest, iLink);
   879 			__KTRACE_OPT(KDMA, Kern::Printf("Removing request from queue and notifying client"));
   880 			pR->OnDeque();
   881 			DDmaRequest::TCallback cb = pR->iCb;
   882 			TAny* arg = pR->iCbArg;
   883 			if (cb)
   884 				{
   885 				Signal();
   886 				(*cb)(DDmaRequest::EOk, arg);
   887 				Wait();
   888 				}
   889 			}
   890 		req_count_after = iQueuedRequests;
   891 		Signal();
   892 		}
   893 	else
   894 		{
   895 		req_count_after = iQueuedRequests;
   896 		Signal();
   897 		}
   898 
   899 	// Only call PSL if there were requests queued when we entered AND there
   900 	// are now no requests left on the queue (after also having executed all
   901 	// client callbacks).
   902 	if ((req_count_before != 0) && (req_count_after == 0))
   903 		{
   904 		QueuedRequestCountChanged();
   905 		}
   906 
   907 	__DMA_INVARIANT();
   908 	}
   909 
   910 
   911 /** Reset state machine only, request queue is unchanged */
   912 
   913 void TDmaChannel::ResetStateMachine()
   914 	{
   915 	DoCancelAll();
   916 	iCurHdr = NULL;
   917 	iNullPtr = &iCurHdr;
   918 	}
   919 
   920 
   921 /** Unlink the last item of a LLI chain from the next chain.
   922 	Default implementation does nothing. This is overridden by scatter-gather channels. */
   923 
   924 void TDmaChannel::DoUnlink(SDmaDesHdr& /*aHdr*/)
   925 	{
   926 	}
   927 
   928 
   929 /** PSL may override */
   930 void TDmaChannel::QueuedRequestCountChanged()
   931 	{
   932 #ifdef _DEBUG
   933 	Wait();
   934 	__KTRACE_OPT(KDMA,
   935 				 Kern::Printf("TDmaChannel::QueuedRequestCountChanged() %d",
   936 							  iQueuedRequests));
   937 	__DMA_ASSERTA(iQueuedRequests >= 0);
   938 	Signal();
   939 #endif
   940 	}
   941 
   942 
   943 #ifdef _DEBUG
   944 
   945 void TDmaChannel::Invariant()
   946 	{
   947 	Wait();
   948 
   949 	__DMA_ASSERTD(iReqCount >= 0);
   950 	// should always point to NULL pointer ending fragment queue
   951 	__DMA_ASSERTD(*iNullPtr == NULL);
   952 
   953 	__DMA_ASSERTD(0 <= iAvailDesCount && iAvailDesCount <= iMaxDesCount);
   954 
   955 	__DMA_ASSERTD(iCurHdr == NULL || iController->IsValidHdr(iCurHdr));
   956 
   957 	if (IsOpened())
   958 		{
   959 		__DMA_ASSERTD((iCurHdr && !IsQueueEmpty()) || (!iCurHdr && IsQueueEmpty()));
   960 		if (iCurHdr == NULL)
   961 			__DMA_ASSERTD(iNullPtr == &iCurHdr);
   962 		}
   963 	else
   964 		{
   965 		__DMA_ASSERTD(iCurHdr == NULL);
   966 		__DMA_ASSERTD(iNullPtr == &iCurHdr);
   967 		__DMA_ASSERTD(IsQueueEmpty());
   968 		}
   969 
   970 	Signal();
   971 	}
   972 
   973 #endif
   974 
   975 //////////////////////////////////////////////////////////////////////////////
   976 // TDmaSbChannel
   977 
   978 void TDmaSbChannel::DoQueue(DDmaRequest& /*aReq*/)
   979 	{
   980 	if (!iTransferring)
   981 		{
   982 		iController->Transfer(*this, *iCurHdr);
   983 		iTransferring = ETrue;
   984 		}
   985 	}
   986 
   987 
   988 void TDmaSbChannel::DoCancelAll()
   989 	{
   990 	__DMA_ASSERTD(iTransferring);
   991 	iTransferring = EFalse;
   992 	}
   993 
   994 
   995 void TDmaSgChannel::DoUnlink(SDmaDesHdr& aHdr)
   996 	{
   997 	iController->UnlinkHwDes(*this, aHdr);
   998 	}
   999 
  1000 
  1001 void TDmaSbChannel::DoDfc(DDmaRequest& /*aCurReq*/, SDmaDesHdr*& aCompletedHdr)
  1002 	{
  1003 	__DMA_ASSERTD(iTransferring);
  1004 	aCompletedHdr = iCurHdr;
  1005 	iCurHdr = iCurHdr->iNext;
  1006 	if (iCurHdr != NULL)
  1007 		iController->Transfer(*this, *iCurHdr);
  1008 	else
  1009 		iTransferring = EFalse;
  1010 	}
  1011 
  1012 
  1013 //////////////////////////////////////////////////////////////////////////////
  1014 // TDmaDbChannel
  1015 
  1016 void TDmaDbChannel::DoQueue(DDmaRequest& aReq)
  1017 	{
  1018 	switch (iState)
  1019 		{
  1020 	case EIdle:
  1021 		iController->Transfer(*this, *iCurHdr);
  1022 		if (iCurHdr->iNext)
  1023 			{
  1024 			iController->Transfer(*this, *(iCurHdr->iNext));
  1025 			iState = ETransferring;
  1026 			}
  1027 		else
  1028 			iState = ETransferringLast;
  1029 		break;
  1030 	case ETransferring:
  1031 		// nothing to do
  1032 		break;
  1033 	case ETransferringLast:
  1034 		iController->Transfer(*this, *(aReq.iFirstHdr));
  1035 		iState = ETransferring;
  1036 		break;
  1037 	default:
  1038 		__DMA_CANT_HAPPEN();
  1039 		}
  1040 	}
  1041 
  1042 
  1043 void TDmaDbChannel::DoCancelAll()
  1044 	{
  1045 	iState = EIdle;
  1046 	}
  1047 
  1048 
  1049 void TDmaDbChannel::DoDfc(DDmaRequest& /*aCurReq*/, SDmaDesHdr*& aCompletedHdr)
  1050 	{
  1051 	aCompletedHdr = iCurHdr;
  1052 	iCurHdr = iCurHdr->iNext;
  1053 	switch (iState)
  1054 		{
  1055 	case ETransferringLast:
  1056 		iState = EIdle;
  1057 		break;
  1058 	case ETransferring:
  1059 		if (iCurHdr->iNext == NULL)
  1060 			iState = ETransferringLast;
  1061 		else
  1062 			iController->Transfer(*this, *(iCurHdr->iNext));
  1063 		break;
  1064 	default:
  1065 		__DMA_CANT_HAPPEN();
  1066 		}
  1067 	}
  1068 
  1069 
  1070 //////////////////////////////////////////////////////////////////////////////
  1071 // TDmaSgChannel
  1072 
  1073 void TDmaSgChannel::DoQueue(DDmaRequest& aReq)
  1074 	{
  1075 	if (iTransferring)
  1076 		{
  1077 		__DMA_ASSERTD(!aReq.iLink.Alone());
  1078 		DDmaRequest* pReqPrev = _LOFF(aReq.iLink.iPrev, DDmaRequest, iLink);
  1079 		iController->AppendHwDes(*this, *(pReqPrev->iLastHdr), *(aReq.iFirstHdr));
  1080 		}
  1081 	else
  1082 		{
  1083 		iController->Transfer(*this, *(aReq.iFirstHdr));
  1084 		iTransferring = ETrue;
  1085 		}
  1086 	}
  1087 
  1088 
  1089 void TDmaSgChannel::DoCancelAll()
  1090 	{
  1091 	__DMA_ASSERTD(iTransferring);
  1092 	iTransferring = EFalse;
  1093 	}
  1094 
  1095 
  1096 void TDmaSgChannel::DoDfc(DDmaRequest& aCurReq, SDmaDesHdr*& aCompletedHdr)
  1097 	{
  1098 	__DMA_ASSERTD(iTransferring);
  1099 	aCompletedHdr = aCurReq.iLastHdr;
  1100 	iCurHdr = aCompletedHdr->iNext;
  1101 	iTransferring = (iCurHdr != NULL);
  1102 	}