1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/os/kernelhwsrv/kernel/eka/drivers/usbc/usbdma.cpp Fri Jun 15 03:10:57 2012 +0200
1.3 @@ -0,0 +1,983 @@
1.4 +// Copyright (c) 2000-2009 Nokia Corporation and/or its subsidiary(-ies).
1.5 +// All rights reserved.
1.6 +// This component and the accompanying materials are made available
1.7 +// under the terms of the License "Eclipse Public License v1.0"
1.8 +// which accompanies this distribution, and is available
1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
1.10 +//
1.11 +// Initial Contributors:
1.12 +// Nokia Corporation - initial contribution.
1.13 +//
1.14 +// Contributors:
1.15 +//
1.16 +// Description:
1.17 +// e32\drivers\usbc\usbdma.cpp
1.18 +// LDD for USB Device driver stack:
1.19 +// Management of DMA-capable data buffers.
1.20 +//
1.21 +//
1.22 +
1.23 +/**
1.24 + @file usbdma.cpp
1.25 + @internalTechnology
1.26 +*/
1.27 +
1.28 +#include <drivers/usbc.h>
1.29 +
1.30 +
1.31 +#if defined(_DEBUG)
1.32 +static const char KUsbPanicLdd[] = "USB LDD";
1.33 +#endif
1.34 +
1.35 +
1.36 +TDmaBuf::TDmaBuf(TUsbcEndpointInfo* aEndpointInfo, TInt aBandwidthPriority)
1.37 + : iBufBasePtr(NULL),
1.38 + iCurrentDrainingBuffer(NULL),
1.39 + iCurrentPacket(0),
1.40 + iCurrentPacketIndexArray(NULL),
1.41 + iCurrentPacketSizeArray(NULL)
1.42 + {
1.43 + iMaxPacketSize = aEndpointInfo->iSize;
1.44 + iEndpointType = aEndpointInfo->iType;
1.45 +
1.46 + switch (aEndpointInfo->iType)
1.47 + {
1.48 + case KUsbEpTypeControl:
1.49 + iBufSz = KUsbcDmaBufSzControl;
1.50 + iNumberofBuffers = KUsbcDmaBufNumControl;
1.51 + break;
1.52 + case KUsbEpTypeIsochronous:
1.53 + iBufSz = KUsbcDmaBufSzIsochronous;
1.54 + iNumberofBuffers = KUsbcDmaBufNumIsochronous;
1.55 + break;
1.56 + case KUsbEpTypeBulk:
1.57 + {
1.58 + if (aEndpointInfo->iDir == KUsbEpDirOut)
1.59 + {
1.60 + const TInt priorityOUT = aBandwidthPriority & 0x0f;
1.61 + iBufSz = KUsbcDmaBufSizesBulkOUT[priorityOUT];
1.62 + }
1.63 + else
1.64 + {
1.65 + const TInt priorityIN = (aBandwidthPriority >> 4) & 0x0f;
1.66 + iBufSz = KUsbcDmaBufSizesBulkIN[priorityIN];
1.67 + }
1.68 + iNumberofBuffers = KUsbcDmaBufNumBulk;
1.69 + }
1.70 + break;
1.71 + case KUsbEpTypeInterrupt:
1.72 + iBufSz = KUsbcDmaBufSzInterrupt;
1.73 + iNumberofBuffers = KUsbcDmaBufNumInterrupt;
1.74 + break;
1.75 + default:
1.76 + iBufSz = 0;
1.77 + iNumberofBuffers = 0;
1.78 + }
1.79 +
1.80 + if (aEndpointInfo->iDir == KUsbEpDirIn)
1.81 + {
1.82 + iNumberofBuffers = 1; // IN endpoints only have 1 buffer
1.83 + }
1.84 +
1.85 + for (TInt i = 0; i < KUsbcDmaBufNumMax; i++)
1.86 + {
1.87 + // Buffer logical addresses (pointers)
1.88 + iBuffers[i] = NULL;
1.89 + // Buffer physical addresses
1.90 + iBufferPhys[i] = 0;
1.91 + // Packet indexes base array
1.92 + iPacketIndex[i] = NULL;
1.93 + // Packet sizes base array
1.94 + iPacketSize[i] = NULL;
1.95 + }
1.96 + }
1.97 +
1.98 +
1.99 +TInt TDmaBuf::Construct(TUsbcEndpointInfo* aEndpointInfo)
1.100 + {
1.101 + if (aEndpointInfo->iDir != KUsbEpDirIn)
1.102 + {
1.103 + // IN endpoints don't need a packet array
1.104 +
1.105 + // At most 2 packets (clump of max packet size packets) + possible zlp
1.106 + TUsbcPacketArray* bufPtr = iPacketInfoStorage;
1.107 + // this divides up the packet indexing & packet size array over the number of buffers
1.108 + __KTRACE_OPT(KUSB, Kern::Printf("TDmaBuf::Construct() array base=0x%08x", bufPtr));
1.109 + for (TInt i = 0; i < iNumberofBuffers; i++)
1.110 + {
1.111 + iPacketIndex[i] = bufPtr;
1.112 + bufPtr += KUsbcDmaBufMaxPkts;
1.113 + iPacketSize[i] = bufPtr;
1.114 + bufPtr += KUsbcDmaBufMaxPkts;
1.115 + __KTRACE_OPT(KUSB, Kern::Printf("TDmaBuf::Construct() packetIndex[%d]=0x%08x packetSize[%d]=0x%08x",
1.116 + i, iPacketIndex[i], i, iPacketSize[i]));
1.117 + }
1.118 + }
1.119 + else
1.120 + {
1.121 + __KTRACE_OPT(KUSB, Kern::Printf("TDmaBuf::Construct() IN endpoint"));
1.122 + }
1.123 + Flush();
1.124 + return KErrNone;
1.125 + }
1.126 +
1.127 +
1.128 +TDmaBuf::~TDmaBuf()
1.129 + {
1.130 + __KTRACE_OPT(KUSB, Kern::Printf("TDmaBuf::~TDmaBuf()"));
1.131 + }
1.132 +
1.133 +TInt TDmaBuf::BufferTotalSize() const
1.134 + {
1.135 + return iBufSz * iNumberofBuffers;
1.136 + }
1.137 +
1.138 +TInt TDmaBuf::BufferSize() const
1.139 + {
1.140 + return iBufSz;
1.141 + }
1.142 +
1.143 +TInt TDmaBuf::SetBufferAddr(TInt aBufInd, TUint8* aBufAddr)
1.144 + {
1.145 + __ASSERT_DEBUG((aBufInd < iNumberofBuffers),
1.146 + Kern::Fault(KUsbPanicLdd, __LINE__));
1.147 + iDrainable[aBufInd] = iCanBeFreed[aBufInd] = EFalse;
1.148 + iBuffers[aBufInd] = aBufAddr;
1.149 + iBufferPhys[aBufInd] = Epoc::LinearToPhysical((TLinAddr)aBufAddr);
1.150 + __KTRACE_OPT(KUSB, Kern::Printf("TDmaBuf::SetBufferAddr() iBuffers[%d]=0x%08x", aBufInd, iBuffers[aBufInd]));
1.151 + return KErrNone;
1.152 + }
1.153 +
1.154 +TInt TDmaBuf::BufferNumber() const
1.155 + {
1.156 + return iNumberofBuffers;
1.157 + }
1.158 +
1.159 +void TDmaBuf::SetMaxPacketSize(TInt aSize)
1.160 + {
1.161 + iMaxPacketSize = aSize;
1.162 + }
1.163 +
1.164 +
1.165 +void TDmaBuf::Flush()
1.166 + {
1.167 + __KTRACE_OPT(KUSB, Kern::Printf("TDmaBuf::Flush %x", this));
1.168 + iRxActive = EFalse;
1.169 + iTxActive = EFalse;
1.170 + iExtractOffset = 0;
1.171 + iTotalRxBytesAvail = 0;
1.172 + iTotalRxPacketsAvail = 0;
1.173 + iCurrentDrainingBufferIndex = KUsbcInvalidBufferIndex;
1.174 + iCurrentFillingBufferIndex = 0;
1.175 + iDrainQueueIndex = KUsbcInvalidDrainQueueIndex;
1.176 + for (TInt i = 0; i < KUsbcDmaBufNumMax; i++)
1.177 + {
1.178 + iDrainable[i] = EFalse;
1.179 + iCanBeFreed[i] = EFalse;
1.180 + iNumberofBytesRx[i] = 0;
1.181 + iNumberofPacketsRx[i] = 0;
1.182 + iError[i] = KErrGeneral;
1.183 + iDrainQueue[i] = KUsbcInvalidBufferIndex;
1.184 +#if defined(USBC_LDD_BUFFER_TRACE)
1.185 + iFillingOrderArray[i] = 0;
1.186 + iNumberofBytesRxRemain[i] = 0;
1.187 + iNumberofPacketsRxRemain[i] = 0;
1.188 +#endif
1.189 + }
1.190 + // Drain queue is 1 oversized
1.191 + iDrainQueue[KUsbcDmaBufNumMax] = KUsbcInvalidBufferIndex;
1.192 +
1.193 +#if defined(USBC_LDD_BUFFER_TRACE)
1.194 + iFillingOrder = 0;
1.195 + iDrainingOrder = 0;
1.196 +#endif
1.197 + }
1.198 +
1.199 +
1.200 +void TDmaBuf::RxSetActive()
1.201 + {
1.202 + __KTRACE_OPT(KUSB, Kern::Printf("TDmaBuf::RxSetActive %x", this));
1.203 + iRxActive = ETrue;
1.204 + }
1.205 +
1.206 +
1.207 +void TDmaBuf::RxSetInActive()
1.208 + {
1.209 + __KTRACE_OPT(KUSB, Kern::Printf("TDmaBuf::RxSetInActive %x", this));
1.210 + iRxActive = EFalse;
1.211 + }
1.212 +
1.213 +
1.214 +TBool TDmaBuf::RxIsActive()
1.215 + {
1.216 + return iRxActive;
1.217 + }
1.218 +
1.219 +
1.220 +void TDmaBuf::TxSetActive()
1.221 + {
1.222 + iTxActive = ETrue;
1.223 + }
1.224 +
1.225 +
1.226 +void TDmaBuf::TxSetInActive()
1.227 + {
1.228 + iTxActive = EFalse;
1.229 + }
1.230 +
1.231 +
1.232 +TBool TDmaBuf::TxIsActive()
1.233 + {
1.234 + return iTxActive;
1.235 + }
1.236 +
1.237 +
1.238 +/**************************** Rx DMA Buffer Access *************************/
1.239 +
1.240 +void TDmaBuf::ModifyTotalRxBytesAvail(TInt aVal)
1.241 + {
1.242 + iTotalRxBytesAvail += aVal;
1.243 + }
1.244 +
1.245 +
1.246 +void TDmaBuf::ModifyTotalRxPacketsAvail(TInt aVal)
1.247 + {
1.248 + iTotalRxPacketsAvail += aVal;
1.249 + }
1.250 +
1.251 +
1.252 +TBool TDmaBuf::AdvancePacket()
1.253 + {
1.254 + ModifyTotalRxPacketsAvail(-1);
1.255 + TBool r = ETrue;
1.256 + __ASSERT_DEBUG((iCurrentDrainingBufferIndex >= 0),
1.257 + Kern::Fault(KUsbPanicLdd, __LINE__));
1.258 + if (++iCurrentPacket >= iNumberofPacketsRx[iCurrentDrainingBufferIndex])
1.259 + {
1.260 + r = NextDrainableBuffer();
1.261 + }
1.262 + iExtractOffset = 0;
1.263 + __ASSERT_DEBUG((iCurrentDrainingBufferIndex == KUsbcInvalidBufferIndex) ||
1.264 + (iCurrentPacket < KUsbcDmaBufMaxPkts),
1.265 + Kern::Fault(KUsbPanicLdd, __LINE__));
1.266 + return r;
1.267 + }
1.268 +
1.269 +
1.270 +TInt TDmaBuf::PeekNextPacketSize()
1.271 + {
1.272 + TUint pkt = iCurrentPacket;
1.273 + TInt index = iCurrentDrainingBufferIndex;
1.274 + TInt size = -1;
1.275 + if (pkt >= iNumberofPacketsRx[index])
1.276 + {
1.277 + index = PeekNextDrainableBuffer();
1.278 + pkt = 0;
1.279 + }
1.280 +
1.281 + if ((index != KUsbcInvalidBufferIndex) && iNumberofPacketsRx[index])
1.282 + {
1.283 + const TUsbcPacketArray* sizeArray = iPacketSize[index];
1.284 + size = (TInt)sizeArray[pkt];
1.285 + }
1.286 +
1.287 + __ASSERT_DEBUG((iCurrentDrainingBufferIndex == KUsbcInvalidBufferIndex) ||
1.288 + (iCurrentPacket < KUsbcDmaBufMaxPkts),
1.289 + Kern::Fault(KUsbPanicLdd, __LINE__));
1.290 + return size;
1.291 + }
1.292 +
1.293 +
1.294 +inline TInt TDmaBuf::GetCurrentError()
1.295 + {
1.296 + // USB bus errors are v.rare. To avoid having an error code attached to every packet since
1.297 + // almost every errorcode will be KErrNone, we have a single error code per buffer
1.298 + // If the error code is != KErrNone then it refers to the LAST packet in the buffer
1.299 + TInt errorCode = KErrNone;
1.300 + //Check the index, it's not equal to negative (-1) value defined in
1.301 + //KUsbcInvalidBufferIndex.
1.302 + __ASSERT_DEBUG((iCurrentDrainingBufferIndex >= 0),
1.303 + Kern::Fault(KUsbPanicLdd, __LINE__));
1.304 +
1.305 + if (iError[iCurrentDrainingBufferIndex] != KErrNone)
1.306 + {
1.307 + // See if we are at the last packet
1.308 + if ((iCurrentPacket + 1) == iNumberofPacketsRx[iCurrentDrainingBufferIndex])
1.309 + {
1.310 + errorCode = iError[iCurrentDrainingBufferIndex];
1.311 + }
1.312 + }
1.313 + return errorCode;
1.314 + }
1.315 +
1.316 +
1.317 +// used to decide whether a client read can complete straight away
1.318 +TBool TDmaBuf::IsReaderEmpty()
1.319 + {
1.320 + __KTRACE_OPT(KUSB, Kern::Printf("TDmaBuf::IsReaderEmpty iTotalRxPacketsAvail=%d",
1.321 + iTotalRxPacketsAvail));
1.322 + return (iTotalRxPacketsAvail == 0);
1.323 + }
1.324 +
1.325 +
1.326 +void TDmaBuf::ReadXferComplete(TInt aNoBytesRecv, TInt aNoPacketsRecv, TInt aErrorCode)
1.327 + {
1.328 + // Adjust pending packet
1.329 + if ((aNoBytesRecv == 0) && (aErrorCode != KErrNone))
1.330 + {
1.331 + // Make the buffer available for reuse
1.332 + iDrainable[iCurrentFillingBufferIndex] = EFalse;
1.333 + return;
1.334 + }
1.335 +
1.336 + ModifyTotalRxBytesAvail(aNoBytesRecv);
1.337 + ModifyTotalRxPacketsAvail(aNoPacketsRecv);
1.338 + iNumberofBytesRx[iCurrentFillingBufferIndex] = aNoBytesRecv;
1.339 + iNumberofPacketsRx[iCurrentFillingBufferIndex] = aNoPacketsRecv;
1.340 +
1.341 +#if defined(USBC_LDD_BUFFER_TRACE)
1.342 + iNumberofBytesRxRemain[iCurrentFillingBufferIndex] = aNoBytesRecv;
1.343 + iNumberofPacketsRxRemain[iCurrentFillingBufferIndex] = aNoPacketsRecv;
1.344 +#endif
1.345 +
1.346 + __KTRACE_OPT(KUSB, Kern::Printf("TDmaBuf::ReadXferComplete 2 # of bytes=%d # of packets=%d",
1.347 + iTotalRxBytesAvail, iTotalRxPacketsAvail));
1.348 + iDrainable[iCurrentFillingBufferIndex] = ETrue;
1.349 + iError[iCurrentFillingBufferIndex] = aErrorCode;
1.350 + AddToDrainQueue(iCurrentFillingBufferIndex);
1.351 + if (iCurrentDrainingBufferIndex == KUsbcInvalidBufferIndex)
1.352 + {
1.353 + NextDrainableBuffer();
1.354 + }
1.355 + }
1.356 +
1.357 +
1.358 +TInt TDmaBuf::RxGetNextXfer(TUint8*& aBufferAddr, TUsbcPacketArray*& aIndexArray,
1.359 + TUsbcPacketArray*& aSizeArray, TInt& aLength, TPhysAddr& aBufferPhys)
1.360 + {
1.361 + __KTRACE_OPT(KUSB, Kern::Printf("TDmaBuf::RxGetNextXfer 1"));
1.362 + if (RxIsActive())
1.363 + {
1.364 + __KTRACE_OPT(KUSB, Kern::Printf(" ---> RxIsActive, returning"));
1.365 + return KErrInUse;
1.366 + }
1.367 +
1.368 + __KTRACE_OPT(KUSB, Kern::Printf("TDmaBuf::RxGetNextXfer Current buffer=%d",
1.369 + iCurrentFillingBufferIndex));
1.370 + if (iDrainable[iCurrentFillingBufferIndex])
1.371 + {
1.372 + // If the controller refused the last read request, then the current buffer will still be marked
1.373 + // as !Drainable, because the controller never completed the read to the ldd. and therefore the buffer
1.374 + // can be reused.
1.375 + if (!NextFillableBuffer())
1.376 + {
1.377 + return KErrNoMemory;
1.378 + }
1.379 + }
1.380 +
1.381 + __KTRACE_OPT(KUSB, Kern::Printf("TDmaBuf::RxGetNextXfer New buffer=%d",
1.382 + iCurrentFillingBufferIndex));
1.383 + aBufferAddr = iBuffers[iCurrentFillingBufferIndex];
1.384 + aBufferPhys = iBufferPhys[iCurrentFillingBufferIndex];
1.385 + aIndexArray = iPacketIndex[iCurrentFillingBufferIndex];
1.386 + aSizeArray = iPacketSize[iCurrentFillingBufferIndex];
1.387 + aLength = iBufSz;
1.388 +
1.389 +#if defined(USBC_LDD_BUFFER_TRACE)
1.390 + iFillingOrderArray[iCurrentFillingBufferIndex] = ++iFillingOrder;
1.391 +#endif
1.392 +
1.393 + return KErrNone;
1.394 + }
1.395 +
1.396 +
1.397 +TInt TDmaBuf::RxCopyPacketToClient(DThread* aThread, TClientBuffer *aTcb, TInt aLength)
1.398 + {
1.399 + __KTRACE_OPT(KUSB, Kern::Printf("TDmaBuf::RxCopyPacketToClient 1"));
1.400 +
1.401 +#if defined(USBC_LDD_BUFFER_TRACE)
1.402 + const TInt numPkts = NoRxPackets();
1.403 + const TInt numPktsAlt = NoRxPacketsAlt();
1.404 + const TInt numBytes = RxBytesAvailable();
1.405 + const TInt numBytesAlt = NoRxBytesAlt();
1.406 +
1.407 + if (numPkts != numPktsAlt)
1.408 + {
1.409 + Kern::Printf(
1.410 + "TDmaBuf::RxCopyPacketToClient: Error: #pkts mismatch global=%d actual=%d",
1.411 + numPkts, numPktsAlt);
1.412 + }
1.413 + if (numBytes != numBytesAlt)
1.414 + {
1.415 + Kern::Printf(
1.416 + "TDmaBuf::RxCopyPacketToClient: Error: #bytes mismatch global=%d actual=%d",
1.417 + numBytes, numBytesAlt);
1.418 + }
1.419 + if ((numPkts == 0) && (numBytes !=0))
1.420 + {
1.421 + Kern::Printf(
1.422 + "TDmaBuf::RxCopyPacketToClient: Error: global bytes & pkts mismatch pkts=%d bytes=%d",
1.423 + numPkts, numBytes);
1.424 + }
1.425 + if ((numPktsAlt == 0) && (numBytesAlt !=0))
1.426 + {
1.427 + Kern::Printf(
1.428 + "TDmaBuf::RxCopyPacketToClient: Error: actual bytes & pkts mismatch pkts=%d bytes=%d",
1.429 + numPktsAlt, numBytesAlt);
1.430 + }
1.431 +#endif
1.432 +
1.433 + if (!NoRxPackets())
1.434 + return KErrNotFound;
1.435 +
1.436 + __KTRACE_OPT(KUSB, Kern::Printf("TDmaBuf::RxCopyPacketToClient 2"));
1.437 + // the next condition should be true because we have some packets available
1.438 + // coverity[var_tested_neg]
1.439 + if (iCurrentDrainingBufferIndex == KUsbcInvalidBufferIndex)
1.440 + {
1.441 + // Marked as Coverity "Intentional" as the member variable
1.442 + // iCurrentDrainingBufferIndex is attentionaly negative, from previous
1.443 + // initialization to KUsbcInvalidBufferIndex (which equals -1).
1.444 + if (!NextDrainableBuffer())
1.445 + return KErrNotFound;
1.446 + }
1.447 +
1.448 + __ASSERT_DEBUG((iCurrentDrainingBufferIndex >= 0 ),
1.449 + Kern::Fault(KUsbPanicLdd, __LINE__));
1.450 +
1.451 + if (!iDrainable[iCurrentDrainingBufferIndex])
1.452 + return KErrNotFound;
1.453 +
1.454 + // Calculate copy-from address & adjust for the fact that
1.455 + // some data may have already been read from the packet
1.456 + TUint8* logicalSrc = iCurrentDrainingBuffer + iCurrentPacketIndexArray[iCurrentPacket] + iExtractOffset;
1.457 + TInt packetSz = iCurrentPacketSizeArray[iCurrentPacket];
1.458 + TInt thisPacketSz = packetSz - iExtractOffset;
1.459 + TInt errorCode;
1.460 + // try and sort out what a "packet" might mean.
1.461 + // in a multi-packet dma environment, we might see super-packets
1.462 + // i.e. we might just see one packet, maybe 4K or so long, made of lots of small packets
1.463 + // Since we don't know where the packet boundaries will be, we have to assume that
1.464 + // any 'packet' larger than the max packet size of the ep is, in fact, a conglomeration
1.465 + // of smaller packets. However, for the purposes of the packet count, this is still regarded
1.466 + // as a single packet and the packet count only decremented when it is consumed.
1.467 + // As before, if the user fails to read an entire packet out then the next packet is moved onto anyway
1.468 + // To be safe the user must always supply a buffer of at least max packet size bytes.
1.469 + if (thisPacketSz > iMaxPacketSize)
1.470 + {
1.471 + // Multiple packets left in buffer
1.472 + // calculate number of bytes to end of packet
1.473 + if (iEndpointType == KUsbEpTypeBulk)
1.474 + {
1.475 + thisPacketSz = iMaxPacketSize - (iExtractOffset & (iMaxPacketSize - 1));
1.476 + }
1.477 + else
1.478 + {
1.479 + thisPacketSz = iMaxPacketSize - (iExtractOffset % iMaxPacketSize);
1.480 + }
1.481 + errorCode = KErrNone;
1.482 + }
1.483 + else
1.484 + {
1.485 + errorCode = GetCurrentError(); // single packet left
1.486 + }
1.487 +
1.488 + iExtractOffset += thisPacketSz; // iExtractOffset is now at the end of the real or notional packet
1.489 +
1.490 + ModifyTotalRxBytesAvail(-thisPacketSz);
1.491 +#if defined(USBC_LDD_BUFFER_TRACE)
1.492 + iNumberofBytesRxRemain[iCurrentDrainingBufferIndex] -= thisPacketSz;
1.493 +#endif
1.494 + // this can only be untrue if the "packet" is a conglomeration of smaller packets:
1.495 + if (iExtractOffset == packetSz)
1.496 + {
1.497 + // packet consumed, advance to next packet in buffer
1.498 +#if defined(USBC_LDD_BUFFER_TRACE)
1.499 + iNumberofPacketsRxRemain[iCurrentDrainingBufferIndex] -= 1;
1.500 +#endif
1.501 + AdvancePacket();
1.502 + }
1.503 +
1.504 + TPtrC8 des(logicalSrc, thisPacketSz);
1.505 + TInt r=Kern::ThreadBufWrite(aThread, aTcb, des, 0, 0, aThread);
1.506 + if (r == KErrNone)
1.507 + {
1.508 + r = errorCode;
1.509 + }
1.510 + __KTRACE_OPT(KUSB, Kern::Printf("TDmaBuf::RxCopyPacketToClient 3"));
1.511 +
1.512 + FreeDrainedBuffers();
1.513 +
1.514 + // Use this error code to complete client read request:
1.515 + return r;
1.516 + }
1.517 +
1.518 +
1.519 +TInt TDmaBuf::RxCopyDataToClient(DThread* aThread, TClientBuffer *aTcb, TInt aLength, TUint32& aDestOffset,
1.520 + TBool aRUS, TBool& aCompleteNow)
1.521 + {
1.522 + __KTRACE_OPT(KUSB, Kern::Printf("TDmaBuf::RxCopyDataToClient 1"));
1.523 + aCompleteNow = ETrue;
1.524 +
1.525 +#if defined(USBC_LDD_BUFFER_TRACE)
1.526 + const TInt numPkts = NoRxPackets();
1.527 + const TInt numPktsAlt = NoRxPacketsAlt();
1.528 + const TInt numBytes = RxBytesAvailable();
1.529 + const TInt numBytesAlt = NoRxBytesAlt();
1.530 +
1.531 + if (numPkts != numPktsAlt)
1.532 + {
1.533 + Kern::Printf(
1.534 + "TDmaBuf::RxCopyDataToClient: Error: #pkts mismatch global=%d actual=%d",
1.535 + numPkts, numPktsAlt);
1.536 + }
1.537 + if (numBytes != numBytesAlt)
1.538 + {
1.539 + Kern::Printf(
1.540 + "TDmaBuf::RxCopyDataToClient: Error: #bytes mismatch global=%d actual=%d",
1.541 + numBytes, numBytesAlt);
1.542 + }
1.543 + if ((numPkts == 0) && (numBytes != 0))
1.544 + {
1.545 + Kern::Printf(
1.546 + "TDmaBuf::RxCopyDataToClient: Error: global bytes & pkts mismatch pkts=%d bytes=%d",
1.547 + numPkts, numBytes);
1.548 + }
1.549 + if ((numPktsAlt == 0) && (numBytesAlt != 0))
1.550 + {
1.551 + Kern::Printf(
1.552 + "TDmaBuf::RxCopyDataToClient: Error: actual bytes & pkts mismatch pkts=%d bytes=%d",
1.553 + numPktsAlt, numBytesAlt);
1.554 + }
1.555 +#endif
1.556 +
1.557 + if (!NoRxPackets())
1.558 + {
1.559 + return KErrNotFound;
1.560 + }
1.561 +
1.562 + // coverity[var_tested_neg]
1.563 + if (iCurrentDrainingBufferIndex == KUsbcInvalidBufferIndex)
1.564 + {
1.565 + // Marked as Coverity "Inentional" as the member variable
1.566 + // iCurrentDrainingBufferIndex is attentionaly negative, from previous
1.567 + // initialization to KUsbcInvalidBufferIndex (which equals -1).
1.568 +
1.569 + if (!NextDrainableBuffer())
1.570 + {
1.571 +#if defined(USBC_LDD_BUFFER_TRACE)
1.572 + Kern::Printf("TDmaBuf::RxCopyDataToClient: Error: No buffer draining=%d, packets=%d",
1.573 + iCurrentDrainingBufferIndex, iTotalRxPacketsAvail);
1.574 +#endif
1.575 + return KErrNotFound;
1.576 + }
1.577 + }
1.578 +#if defined(USBC_LDD_BUFFER_TRACE)
1.579 +
1.580 + __ASSERT_DEBUG((iCurrentDrainingBufferIndex >= 0 ),
1.581 + Kern::Fault(KUsbPanicLdd, __LINE__));
1.582 +
1.583 + if (iDrainingOrder != iFillingOrderArray[iCurrentDrainingBufferIndex])
1.584 + {
1.585 + Kern::Printf("!!! Out of Order Draining TDmaBuf::RxCopyDataToClient 10 draining=%d",
1.586 + iCurrentDrainingBufferIndex);
1.587 + }
1.588 +#endif
1.589 + __KTRACE_OPT(KUSB, Kern::Printf("TDmaBuf::RxCopyDataToClient 2"));
1.590 +
1.591 + TUint8* blockStartAddr = iCurrentDrainingBuffer + iCurrentPacketIndexArray[iCurrentPacket] + iExtractOffset;
1.592 + TUint8* lastEndAddr = blockStartAddr; // going to track the contiguity of the memory
1.593 + TUint8* thisStartAddr = blockStartAddr;
1.594 + TInt toDo = Min(aLength - (TInt)aDestOffset, iTotalRxBytesAvail);
1.595 +#if defined(USBC_LDD_BUFFER_TRACE)
1.596 + TInt bufnum = iCurrentDrainingBufferIndex;
1.597 +#endif
1.598 + TInt errorCode = KErrNone;
1.599 + TBool isShortPacket = EFalse;
1.600 + const TInt maxPacketSizeMask = iMaxPacketSize - 1;
1.601 + do
1.602 + {
1.603 +#if defined(USBC_LDD_BUFFER_TRACE)
1.604 + if (bufnum != iCurrentDrainingBufferIndex)
1.605 + {
1.606 + bufnum = iCurrentDrainingBufferIndex;
1.607 + if (iDrainingOrder != iFillingOrderArray[iCurrentDrainingBufferIndex])
1.608 + {
1.609 + Kern::Printf("!!! Out of Order Draining TDmaBuf::RxCopyDataToClient 20 draining=%d",
1.610 + iCurrentDrainingBufferIndex);
1.611 + }
1.612 + }
1.613 +#endif
1.614 + if (errorCode == KErrNone)
1.615 + {
1.616 + errorCode = GetCurrentError();
1.617 + }
1.618 + thisStartAddr = iCurrentDrainingBuffer + iCurrentPacketIndexArray[iCurrentPacket] + iExtractOffset;
1.619 + const TInt thisPacketSize = iCurrentPacketSizeArray[iCurrentPacket];
1.620 + const TInt size = thisPacketSize - iExtractOffset;
1.621 + if (aRUS)
1.622 + {
1.623 + if (iEndpointType == KUsbEpTypeBulk)
1.624 + {
1.625 + isShortPacket = (size < iMaxPacketSize) || (size & maxPacketSizeMask);
1.626 + }
1.627 + else
1.628 + {
1.629 + // this 'if' block is arranged to avoid a division on packet sizes <= iMaxPacketSize
1.630 + isShortPacket = (size < iMaxPacketSize) ||
1.631 + ((size > iMaxPacketSize) && (size % iMaxPacketSize));
1.632 + }
1.633 + }
1.634 + TInt copySize = Min(size, toDo);
1.635 + iExtractOffset += copySize;
1.636 + toDo -= copySize;
1.637 + if (thisStartAddr != lastEndAddr)
1.638 + {
1.639 + TInt bytesToCopy = lastEndAddr - blockStartAddr;
1.640 + TInt r=CopyToUser(aThread, blockStartAddr, bytesToCopy, aTcb, aDestOffset);
1.641 + if(r != KErrNone)
1.642 + Kern::ThreadKill(aThread, EExitPanic, r, KUsbLDDKillCat);
1.643 + blockStartAddr = thisStartAddr;
1.644 + }
1.645 +
1.646 + ModifyTotalRxBytesAvail(-copySize);
1.647 +#if defined(USBC_LDD_BUFFER_TRACE)
1.648 + iNumberofBytesRxRemain[iCurrentDrainingBufferIndex] -= copySize;
1.649 +#endif
1.650 + lastEndAddr = thisStartAddr + copySize;
1.651 + if (iExtractOffset == thisPacketSize)
1.652 + {
1.653 + // More data to copy, so need to access new packet
1.654 +#if defined(USBC_LDD_BUFFER_TRACE)
1.655 + iNumberofPacketsRxRemain[iCurrentDrainingBufferIndex] -= 1;
1.656 +#endif
1.657 + if (!AdvancePacket())
1.658 + {
1.659 + break; // no more packets left
1.660 + }
1.661 + }
1.662 + } while (toDo > 0 && !isShortPacket);
1.663 +
1.664 + if (thisStartAddr != lastEndAddr)
1.665 + {
1.666 + TInt bytesToCopy = lastEndAddr - blockStartAddr;
1.667 + TInt r=CopyToUser(aThread, blockStartAddr, bytesToCopy, aTcb, aDestOffset);
1.668 + if(r != KErrNone)
1.669 + Kern::ThreadKill(aThread, EExitPanic, r, KUsbLDDKillCat);
1.670 + }
1.671 +
1.672 + // If we have transferred the requested amount of data it is still possible that
1.673 + // the next packet is a zlp which needs to be bumped over
1.674 +
1.675 + if (aRUS && (toDo == 0) && (iExtractOffset == 0) && (!isShortPacket) && (!IsReaderEmpty()) &&
1.676 + (PeekNextPacketSize() == 0))
1.677 + {
1.678 + // swallow a zlp
1.679 + isShortPacket = ETrue;
1.680 +#if defined(USBC_LDD_BUFFER_TRACE)
1.681 + iNumberofPacketsRxRemain[iCurrentDrainingBufferIndex] -= 1;
1.682 +#endif
1.683 + AdvancePacket();
1.684 + }
1.685 + aCompleteNow = isShortPacket || (((TInt)aDestOffset) == aLength) || (errorCode != KErrNone);
1.686 +
1.687 + FreeDrainedBuffers();
1.688 +
1.689 + // Use this error code to complete client read request
1.690 + return errorCode;
1.691 + }
1.692 +
1.693 +
1.694 +inline TInt TDmaBuf::CopyToUser(DThread* aThread, const TUint8* aSourceAddr,
1.695 + TInt aLength, TClientBuffer *aTcb, TUint32& aDestOffset)
1.696 + {
1.697 + TPtrC8 des(aSourceAddr, aLength);
1.698 + TInt errorCode = Kern::ThreadBufWrite(aThread, aTcb, des, aDestOffset, KChunkShiftBy0, aThread);
1.699 + if (errorCode == KErrNone)
1.700 + {
1.701 + aDestOffset += aLength;
1.702 + }
1.703 + return errorCode;
1.704 + }
1.705 +
1.706 +
1.707 +inline TInt TDmaBuf::NoRxPackets() const
1.708 + {
1.709 + return iTotalRxPacketsAvail;
1.710 + }
1.711 +
1.712 +
1.713 +inline void TDmaBuf::IncrementBufferIndex(TInt& aIndex)
1.714 + {
1.715 + if (++aIndex == iNumberofBuffers)
1.716 + aIndex = 0;
1.717 + }
1.718 +
1.719 +
1.720 +TBool TDmaBuf::NextDrainableBuffer()
1.721 + {
1.722 + TBool r = EFalse;
1.723 + if (iCurrentDrainingBufferIndex != KUsbcInvalidBufferIndex)
1.724 + {
1.725 + iCanBeFreed[iCurrentDrainingBufferIndex] = ETrue;
1.726 + iNumberofPacketsRx[iCurrentDrainingBufferIndex] = 0; // Current buffer is empty
1.727 + iNumberofBytesRx[iCurrentDrainingBufferIndex] = 0; // Current buffer is empty
1.728 +
1.729 +#if defined(USBC_LDD_BUFFER_TRACE)
1.730 + TUint& bytesRemain = iNumberofBytesRxRemain[iCurrentDrainingBufferIndex];
1.731 + TUint& pktsRemain = iNumberofPacketsRxRemain[iCurrentDrainingBufferIndex];
1.732 + if ((bytesRemain != 0) || (pktsRemain != 0))
1.733 + {
1.734 + Kern::Printf(
1.735 + "TDmaBuf::NextDrainableBuffer: Error: data discarded buffer=%d pkts=%d bytes=%d",
1.736 + iCurrentDrainingBufferIndex, pktsRemain, bytesRemain);
1.737 + bytesRemain = 0;
1.738 + pktsRemain = 0;
1.739 + }
1.740 +#endif
1.741 +
1.742 + iCurrentDrainingBufferIndex = KUsbcInvalidBufferIndex;
1.743 + iCurrentPacket = KUsbcInvalidPacketIndex;
1.744 + }
1.745 +
1.746 + if (iDrainQueueIndex != KUsbcInvalidDrainQueueIndex)
1.747 + {
1.748 + r = ETrue;
1.749 + const TInt index = iDrainQueue[0];
1.750 + iDrainQueueIndex--;
1.751 + for (TInt i = 0; i < iNumberofBuffers; i++)
1.752 + {
1.753 + iDrainQueue[i] = iDrainQueue[i+1];
1.754 + }
1.755 +
1.756 +#if defined(USBC_LDD_BUFFER_TRACE)
1.757 + if (index != KUsbcInvalidBufferIndex)
1.758 + iDrainingOrder++;
1.759 +#endif
1.760 +
1.761 + iCurrentDrainingBufferIndex = index;
1.762 + iCurrentDrainingBuffer = iBuffers[index];
1.763 + iCurrentPacketIndexArray = iPacketIndex[index];
1.764 + iCurrentPacketSizeArray = iPacketSize[index];
1.765 + iCurrentPacket = 0;
1.766 + }
1.767 + return r;
1.768 + }
1.769 +
1.770 +
1.771 +TInt TDmaBuf::PeekNextDrainableBuffer()
1.772 + {
1.773 + TInt r = KUsbcInvalidBufferIndex;
1.774 + if (iDrainQueueIndex != KUsbcInvalidDrainQueueIndex)
1.775 + {
1.776 + r = iDrainQueue[0];
1.777 + }
1.778 + return r;
1.779 + }
1.780 +
1.781 +
1.782 +TBool TDmaBuf::NextFillableBuffer()
1.783 + {
1.784 + TBool r = EFalse;
1.785 + TInt index = iCurrentFillingBufferIndex;
1.786 + IncrementBufferIndex(index);
1.787 + // the sequence will restart at 0 if a buffer can't be found this time
1.788 + iCurrentFillingBufferIndex = 0;
1.789 + for (TInt i = 0; i < iNumberofBuffers; i++)
1.790 + {
1.791 + if (!iDrainable[index])
1.792 + {
1.793 + iCurrentFillingBufferIndex = index;
1.794 + r = ETrue;
1.795 + break;
1.796 + }
1.797 + IncrementBufferIndex(index);
1.798 + }
1.799 + return r;
1.800 + }
1.801 +
1.802 +
1.803 +void TDmaBuf::FreeDrainedBuffers()
1.804 + {
1.805 + for (TInt i = 0; i < iNumberofBuffers; i++)
1.806 + {
1.807 + if (iDrainable[i] && iCanBeFreed[i])
1.808 + {
1.809 + iDrainable[i] = iCanBeFreed[i] = EFalse;
1.810 + }
1.811 + }
1.812 + }
1.813 +
1.814 +
1.815 +TBool TDmaBuf::ShortPacketExists()
1.816 + {
1.817 + // Actually, a short packet or residue data
1.818 + __KTRACE_OPT(KUSB, Kern::Printf("TDmaBuf::ShortPacketExists 1"));
1.819 + TInt index = iCurrentDrainingBufferIndex;
1.820 + TUsbcPacketArray* pktSizeArray = iCurrentPacketSizeArray;
1.821 +
1.822 + if (iMaxPacketSize > 0)
1.823 + {
1.824 + // No buffers available for draining
1.825 + if ((iCurrentDrainingBufferIndex == KUsbcInvalidBufferIndex) ||
1.826 + (iCurrentPacket == KUsbcInvalidPacketIndex))
1.827 + return EFalse;
1.828 +
1.829 + // Zlp waiting at tail
1.830 + if ((iTotalRxBytesAvail == 0) && (NoRxPackets() == 1))
1.831 + return ETrue;
1.832 +
1.833 + if (iEndpointType == KUsbEpTypeBulk)
1.834 + {
1.835 + const TInt mask = iMaxPacketSize - 1;
1.836 + if (iTotalRxBytesAvail & mask)
1.837 + return ETrue;
1.838 +
1.839 + // residue==0; this can be because
1.840 + // zlps exist, or short packets combine to n * max_packet_size
1.841 + // This means spadework
1.842 + const TInt s = iCurrentPacketSizeArray[iCurrentPacket] - iExtractOffset;
1.843 + if ((s == 0) || (s & mask))
1.844 + {
1.845 + return ETrue;
1.846 + }
1.847 +
1.848 + for (TInt i = 0; i < iNumberofBuffers; i++)
1.849 + {
1.850 + if (index == KUsbcInvalidBufferIndex)
1.851 + break;
1.852 + if (iDrainable[index])
1.853 + {
1.854 + const TInt packetCount = iNumberofPacketsRx[index];
1.855 + const TInt lastPacketSize=pktSizeArray[packetCount - 1];
1.856 + if ((lastPacketSize < iMaxPacketSize) || (lastPacketSize & mask))
1.857 + {
1.858 + return ETrue;
1.859 + }
1.860 + }
1.861 + index = iDrainQueue[i];
1.862 + pktSizeArray = iPacketSize[index];
1.863 + }
1.864 + }
1.865 + else
1.866 + {
1.867 + if (iTotalRxBytesAvail % iMaxPacketSize)
1.868 + return ETrue;
1.869 +
1.870 + // residue==0; this can be because
1.871 + // zlps exist, or short packets combine to n * max_packet_size
1.872 + // This means spadework
1.873 + const TInt s = iCurrentPacketSizeArray[iCurrentPacket] - iExtractOffset;
1.874 + if ((s == 0) || (s % iMaxPacketSize))
1.875 + {
1.876 + return ETrue;
1.877 + }
1.878 +
1.879 + for (TInt i = 0; i < iNumberofBuffers; i++)
1.880 + {
1.881 + if (index == KUsbcInvalidBufferIndex)
1.882 + break;
1.883 + if (iDrainable[index])
1.884 + {
1.885 + const TInt packetCount = iNumberofPacketsRx[index];
1.886 + const TInt lastPacketSize = pktSizeArray[packetCount - 1];
1.887 + if ((lastPacketSize < iMaxPacketSize) || (lastPacketSize % iMaxPacketSize))
1.888 + {
1.889 + return ETrue;
1.890 + }
1.891 + }
1.892 + index = iDrainQueue[i];
1.893 + pktSizeArray = iPacketSize[index];
1.894 + }
1.895 + }
1.896 + }
1.897 +
1.898 + return EFalse;
1.899 + }
1.900 +
1.901 +
1.902 +void TDmaBuf::AddToDrainQueue(TInt aBufferIndex)
1.903 + {
1.904 + if (iDrainQueue[iDrainQueueIndex + 1] != KUsbcInvalidBufferIndex)
1.905 + {
1.906 +#if defined(USBC_LDD_BUFFER_TRACE)
1.907 + Kern::Printf("TDmaBuf::AddToDrainQueue: Error: invalid iDrainQueue[x]");
1.908 +#endif
1.909 + }
1.910 + iDrainQueue[++iDrainQueueIndex] = aBufferIndex;
1.911 + }
1.912 +
1.913 +
1.914 +#if defined(USBC_LDD_BUFFER_TRACE)
1.915 +TInt TDmaBuf::NoRxPacketsAlt() const
1.916 + {
1.917 + TInt pktCount = 0;
1.918 + for(TInt i = 0; i < iNumberofBuffers; i++)
1.919 + {
1.920 + if (iDrainable[i])
1.921 + {
1.922 + pktCount += iNumberofPacketsRxRemain[i];
1.923 + }
1.924 + }
1.925 + return pktCount;
1.926 + }
1.927 +
1.928 +
1.929 +TInt TDmaBuf::NoRxBytesAlt() const
1.930 + {
1.931 + TInt byteCount = 0;
1.932 + for(TInt i = 0; i < iNumberofBuffers; i++)
1.933 + {
1.934 + if (iDrainable[i])
1.935 + {
1.936 + byteCount += iNumberofBytesRxRemain[i];
1.937 + }
1.938 + }
1.939 + return byteCount;
1.940 + }
1.941 +#endif
1.942 +
1.943 +
1.944 +// We only store 1 transaction, no other buffering is done
1.945 +TInt TDmaBuf::TxStoreData(DThread* aThread, TClientBuffer *aTcb, TInt aTxLength, TUint32 aBufferOffset)
1.946 + {
1.947 + __KTRACE_OPT(KUSB, Kern::Printf("TDmaBuf::TxStoreData 1"));
1.948 + if (!IsReaderEmpty())
1.949 + return KErrInUse;
1.950 +
1.951 + __KTRACE_OPT(KUSB, Kern::Printf("TDmaBuf::TxStoreData 2"));
1.952 +
1.953 + TInt remainTxLength = aTxLength;
1.954 + TUint32 bufferOffset = aBufferOffset;
1.955 + // Store each buffer separately
1.956 + for( TInt i=0;(i<iNumberofBuffers)&&(remainTxLength>0);i++)
1.957 + {
1.958 + TUint8* logicalDest = iBuffers[i];
1.959 + TInt xferSz = Min(remainTxLength, iBufSz);
1.960 + TPtr8 des(logicalDest, xferSz, xferSz);
1.961 + TInt r = Kern::ThreadBufRead(aThread, aTcb, des, bufferOffset, KChunkShiftBy0);
1.962 + if(r != KErrNone)
1.963 + {
1.964 + Kern::ThreadKill(aThread, EExitPanic, r, KUsbLDDKillCat);
1.965 + return r;
1.966 + }
1.967 + remainTxLength -= iBufSz;
1.968 + bufferOffset += iBufSz;
1.969 + }
1.970 +
1.971 + return KErrNone;
1.972 + }
1.973 +
1.974 +
1.975 +TInt TDmaBuf::TxGetNextXfer(TUint8*& aBufferAddr, TInt& aTxLength, TPhysAddr& aBufferPhys)
1.976 + {
1.977 + if (iTxActive)
1.978 + return KErrInUse;
1.979 +
1.980 + aBufferAddr = iBuffers[0]; // only 1 tx buffer
1.981 + aBufferPhys = iBufferPhys[0];
1.982 + aTxLength = BufferTotalSize();
1.983 +
1.984 + return KErrNone;
1.985 + }
1.986 +