os/kernelhwsrv/kernel/eka/drivers/usbc/usbdma.cpp
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
sl@0
     1
// Copyright (c) 2000-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
// e32\drivers\usbc\usbdma.cpp
sl@0
    15
// LDD for USB Device driver stack:
sl@0
    16
// Management of DMA-capable data buffers.
sl@0
    17
// 
sl@0
    18
//
sl@0
    19
sl@0
    20
/**
sl@0
    21
 @file usbdma.cpp
sl@0
    22
 @internalTechnology
sl@0
    23
*/
sl@0
    24
sl@0
    25
#include <drivers/usbc.h>
sl@0
    26
sl@0
    27
sl@0
    28
#if defined(_DEBUG)
sl@0
    29
static const char KUsbPanicLdd[] = "USB LDD";
sl@0
    30
#endif
sl@0
    31
sl@0
    32
sl@0
    33
TDmaBuf::TDmaBuf(TUsbcEndpointInfo* aEndpointInfo, TInt aBandwidthPriority)
sl@0
    34
	: iBufBasePtr(NULL),
sl@0
    35
	  iCurrentDrainingBuffer(NULL),
sl@0
    36
	  iCurrentPacket(0),
sl@0
    37
	  iCurrentPacketIndexArray(NULL),
sl@0
    38
	  iCurrentPacketSizeArray(NULL)
sl@0
    39
	{
sl@0
    40
	iMaxPacketSize = aEndpointInfo->iSize;
sl@0
    41
	iEndpointType = aEndpointInfo->iType;
sl@0
    42
sl@0
    43
	switch (aEndpointInfo->iType)
sl@0
    44
		{
sl@0
    45
	case KUsbEpTypeControl:
sl@0
    46
		iBufSz = KUsbcDmaBufSzControl;
sl@0
    47
		iNumberofBuffers = KUsbcDmaBufNumControl;
sl@0
    48
		break;
sl@0
    49
	case KUsbEpTypeIsochronous:
sl@0
    50
		iBufSz = KUsbcDmaBufSzIsochronous;
sl@0
    51
		iNumberofBuffers = KUsbcDmaBufNumIsochronous;
sl@0
    52
		break;
sl@0
    53
	case KUsbEpTypeBulk:
sl@0
    54
		{
sl@0
    55
		if (aEndpointInfo->iDir == KUsbEpDirOut)
sl@0
    56
			{
sl@0
    57
			const TInt priorityOUT = aBandwidthPriority & 0x0f;
sl@0
    58
			iBufSz = KUsbcDmaBufSizesBulkOUT[priorityOUT];
sl@0
    59
			}
sl@0
    60
		else
sl@0
    61
			{
sl@0
    62
			const TInt priorityIN = (aBandwidthPriority >> 4) & 0x0f;
sl@0
    63
			iBufSz = KUsbcDmaBufSizesBulkIN[priorityIN];
sl@0
    64
			}
sl@0
    65
		iNumberofBuffers = KUsbcDmaBufNumBulk;
sl@0
    66
		}
sl@0
    67
		break;
sl@0
    68
	case KUsbEpTypeInterrupt:
sl@0
    69
		iBufSz = KUsbcDmaBufSzInterrupt;
sl@0
    70
		iNumberofBuffers = KUsbcDmaBufNumInterrupt;
sl@0
    71
		break;
sl@0
    72
	default:
sl@0
    73
		iBufSz = 0;
sl@0
    74
		iNumberofBuffers = 0;
sl@0
    75
		}
sl@0
    76
sl@0
    77
	if (aEndpointInfo->iDir == KUsbEpDirIn)
sl@0
    78
		{
sl@0
    79
		iNumberofBuffers = 1;								// IN endpoints only have 1 buffer
sl@0
    80
		}
sl@0
    81
sl@0
    82
	for (TInt i = 0; i < KUsbcDmaBufNumMax; i++)
sl@0
    83
		{
sl@0
    84
		// Buffer logical addresses (pointers)
sl@0
    85
		iBuffers[i] = NULL;
sl@0
    86
		// Buffer physical addresses
sl@0
    87
		iBufferPhys[i] = 0;
sl@0
    88
		// Packet indexes base array
sl@0
    89
		iPacketIndex[i] = NULL;
sl@0
    90
		// Packet sizes base array
sl@0
    91
		iPacketSize[i] = NULL;
sl@0
    92
		}
sl@0
    93
	}
sl@0
    94
sl@0
    95
sl@0
    96
TInt TDmaBuf::Construct(TUsbcEndpointInfo* aEndpointInfo)
sl@0
    97
	{
sl@0
    98
	if (aEndpointInfo->iDir != KUsbEpDirIn)
sl@0
    99
		{
sl@0
   100
		// IN endpoints don't need a packet array
sl@0
   101
sl@0
   102
		// At most 2 packets (clump of max packet size packets) + possible zlp
sl@0
   103
		TUsbcPacketArray* bufPtr = iPacketInfoStorage;
sl@0
   104
		// this divides up the packet indexing & packet size array over the number of buffers
sl@0
   105
		__KTRACE_OPT(KUSB, Kern::Printf("TDmaBuf::Construct() array base=0x%08x", bufPtr));
sl@0
   106
		for (TInt i = 0; i < iNumberofBuffers; i++)
sl@0
   107
			{
sl@0
   108
			iPacketIndex[i] = bufPtr;
sl@0
   109
			bufPtr += KUsbcDmaBufMaxPkts;
sl@0
   110
			iPacketSize[i] = bufPtr;
sl@0
   111
			bufPtr += KUsbcDmaBufMaxPkts;
sl@0
   112
			__KTRACE_OPT(KUSB, Kern::Printf("TDmaBuf::Construct() packetIndex[%d]=0x%08x packetSize[%d]=0x%08x",
sl@0
   113
											i, iPacketIndex[i], i, iPacketSize[i]));
sl@0
   114
			}
sl@0
   115
		}
sl@0
   116
	else
sl@0
   117
		{
sl@0
   118
		__KTRACE_OPT(KUSB, Kern::Printf("TDmaBuf::Construct() IN endpoint"));
sl@0
   119
		}
sl@0
   120
	Flush();
sl@0
   121
	return KErrNone;
sl@0
   122
	}
sl@0
   123
sl@0
   124
sl@0
   125
TDmaBuf::~TDmaBuf()
sl@0
   126
	{
sl@0
   127
	__KTRACE_OPT(KUSB, Kern::Printf("TDmaBuf::~TDmaBuf()"));
sl@0
   128
	}
sl@0
   129
sl@0
   130
TInt TDmaBuf::BufferTotalSize() const
sl@0
   131
	{
sl@0
   132
	return iBufSz * iNumberofBuffers;
sl@0
   133
	}
sl@0
   134
sl@0
   135
TInt TDmaBuf::BufferSize() const
sl@0
   136
    {
sl@0
   137
    return iBufSz;
sl@0
   138
    }
sl@0
   139
sl@0
   140
TInt TDmaBuf::SetBufferAddr(TInt aBufInd, TUint8* aBufAddr)
sl@0
   141
    {
sl@0
   142
    __ASSERT_DEBUG((aBufInd < iNumberofBuffers),
sl@0
   143
                       Kern::Fault(KUsbPanicLdd, __LINE__));
sl@0
   144
    iDrainable[aBufInd] = iCanBeFreed[aBufInd] = EFalse;
sl@0
   145
    iBuffers[aBufInd] = aBufAddr;
sl@0
   146
    iBufferPhys[aBufInd] = Epoc::LinearToPhysical((TLinAddr)aBufAddr);
sl@0
   147
    __KTRACE_OPT(KUSB, Kern::Printf("TDmaBuf::SetBufferAddr() iBuffers[%d]=0x%08x", aBufInd, iBuffers[aBufInd]));
sl@0
   148
    return KErrNone;
sl@0
   149
    }
sl@0
   150
sl@0
   151
TInt TDmaBuf::BufferNumber() const
sl@0
   152
    {
sl@0
   153
    return iNumberofBuffers;
sl@0
   154
    }
sl@0
   155
sl@0
   156
void TDmaBuf::SetMaxPacketSize(TInt aSize)
sl@0
   157
	{
sl@0
   158
	iMaxPacketSize = aSize;
sl@0
   159
	}
sl@0
   160
sl@0
   161
sl@0
   162
void TDmaBuf::Flush()
sl@0
   163
	{
sl@0
   164
	__KTRACE_OPT(KUSB, Kern::Printf("TDmaBuf::Flush %x", this));
sl@0
   165
	iRxActive = EFalse;
sl@0
   166
	iTxActive = EFalse;
sl@0
   167
	iExtractOffset = 0;
sl@0
   168
	iTotalRxBytesAvail = 0;
sl@0
   169
	iTotalRxPacketsAvail = 0;
sl@0
   170
	iCurrentDrainingBufferIndex = KUsbcInvalidBufferIndex;
sl@0
   171
	iCurrentFillingBufferIndex = 0;
sl@0
   172
	iDrainQueueIndex = KUsbcInvalidDrainQueueIndex;
sl@0
   173
	for (TInt i = 0; i < KUsbcDmaBufNumMax; i++)
sl@0
   174
		{
sl@0
   175
		iDrainable[i] = EFalse;
sl@0
   176
		iCanBeFreed[i] = EFalse;
sl@0
   177
		iNumberofBytesRx[i] = 0;
sl@0
   178
		iNumberofPacketsRx[i] = 0;
sl@0
   179
		iError[i] = KErrGeneral;
sl@0
   180
		iDrainQueue[i] = KUsbcInvalidBufferIndex;
sl@0
   181
#if defined(USBC_LDD_BUFFER_TRACE)
sl@0
   182
		iFillingOrderArray[i] = 0;
sl@0
   183
		iNumberofBytesRxRemain[i] = 0;
sl@0
   184
		iNumberofPacketsRxRemain[i] = 0;
sl@0
   185
#endif
sl@0
   186
		}
sl@0
   187
	// Drain queue is 1 oversized
sl@0
   188
	iDrainQueue[KUsbcDmaBufNumMax] = KUsbcInvalidBufferIndex;
sl@0
   189
sl@0
   190
#if defined(USBC_LDD_BUFFER_TRACE)
sl@0
   191
	iFillingOrder = 0;
sl@0
   192
	iDrainingOrder = 0;
sl@0
   193
#endif
sl@0
   194
	}
sl@0
   195
sl@0
   196
sl@0
   197
void TDmaBuf::RxSetActive()
sl@0
   198
	{
sl@0
   199
	__KTRACE_OPT(KUSB, Kern::Printf("TDmaBuf::RxSetActive %x", this));
sl@0
   200
	iRxActive = ETrue;
sl@0
   201
	}
sl@0
   202
sl@0
   203
sl@0
   204
void TDmaBuf::RxSetInActive()
sl@0
   205
	{
sl@0
   206
	__KTRACE_OPT(KUSB, Kern::Printf("TDmaBuf::RxSetInActive %x", this));
sl@0
   207
	iRxActive = EFalse;
sl@0
   208
	}
sl@0
   209
sl@0
   210
sl@0
   211
TBool TDmaBuf::RxIsActive()
sl@0
   212
	{
sl@0
   213
	return iRxActive;
sl@0
   214
	}
sl@0
   215
sl@0
   216
sl@0
   217
void TDmaBuf::TxSetActive()
sl@0
   218
	{
sl@0
   219
	iTxActive = ETrue;
sl@0
   220
	}
sl@0
   221
sl@0
   222
sl@0
   223
void TDmaBuf::TxSetInActive()
sl@0
   224
	{
sl@0
   225
	iTxActive = EFalse;
sl@0
   226
	}
sl@0
   227
sl@0
   228
sl@0
   229
TBool TDmaBuf::TxIsActive()
sl@0
   230
	{
sl@0
   231
	return iTxActive;
sl@0
   232
	}
sl@0
   233
sl@0
   234
sl@0
   235
/**************************** Rx DMA Buffer Access *************************/
sl@0
   236
sl@0
   237
void TDmaBuf::ModifyTotalRxBytesAvail(TInt aVal)
sl@0
   238
	{
sl@0
   239
	iTotalRxBytesAvail += aVal;
sl@0
   240
	}
sl@0
   241
sl@0
   242
sl@0
   243
void TDmaBuf::ModifyTotalRxPacketsAvail(TInt aVal)
sl@0
   244
	{
sl@0
   245
	iTotalRxPacketsAvail += aVal;
sl@0
   246
	}
sl@0
   247
sl@0
   248
sl@0
   249
TBool TDmaBuf::AdvancePacket()
sl@0
   250
	{
sl@0
   251
	ModifyTotalRxPacketsAvail(-1);
sl@0
   252
	TBool r = ETrue;
sl@0
   253
	__ASSERT_DEBUG((iCurrentDrainingBufferIndex >= 0),
sl@0
   254
					   Kern::Fault(KUsbPanicLdd, __LINE__));
sl@0
   255
	if (++iCurrentPacket >= iNumberofPacketsRx[iCurrentDrainingBufferIndex])
sl@0
   256
		{
sl@0
   257
		r = NextDrainableBuffer();
sl@0
   258
		}
sl@0
   259
	iExtractOffset = 0;
sl@0
   260
	__ASSERT_DEBUG((iCurrentDrainingBufferIndex == KUsbcInvalidBufferIndex) ||
sl@0
   261
				   (iCurrentPacket < KUsbcDmaBufMaxPkts),
sl@0
   262
				   Kern::Fault(KUsbPanicLdd, __LINE__));
sl@0
   263
	return r;
sl@0
   264
	}
sl@0
   265
sl@0
   266
sl@0
   267
TInt TDmaBuf::PeekNextPacketSize()
sl@0
   268
	{
sl@0
   269
	TUint pkt = iCurrentPacket;
sl@0
   270
	TInt index = iCurrentDrainingBufferIndex;
sl@0
   271
	TInt size = -1;
sl@0
   272
	if (pkt >= iNumberofPacketsRx[index])
sl@0
   273
		{
sl@0
   274
		index = PeekNextDrainableBuffer();
sl@0
   275
		pkt = 0;
sl@0
   276
		}
sl@0
   277
sl@0
   278
	if ((index != KUsbcInvalidBufferIndex) && iNumberofPacketsRx[index])
sl@0
   279
		{
sl@0
   280
		const TUsbcPacketArray* sizeArray = iPacketSize[index];
sl@0
   281
		size = (TInt)sizeArray[pkt];
sl@0
   282
		}
sl@0
   283
sl@0
   284
	__ASSERT_DEBUG((iCurrentDrainingBufferIndex == KUsbcInvalidBufferIndex) ||
sl@0
   285
				   (iCurrentPacket < KUsbcDmaBufMaxPkts),
sl@0
   286
				   Kern::Fault(KUsbPanicLdd, __LINE__));
sl@0
   287
	return size;
sl@0
   288
	}
sl@0
   289
sl@0
   290
sl@0
   291
inline TInt TDmaBuf::GetCurrentError()
sl@0
   292
	{
sl@0
   293
	// USB bus errors are v.rare. To avoid having an error code attached to every packet since
sl@0
   294
	// almost every errorcode will be KErrNone, we have a single error code per buffer
sl@0
   295
	// If the error code is != KErrNone then it refers to the LAST packet in the buffer
sl@0
   296
	TInt errorCode = KErrNone;
sl@0
   297
	//Check the index, it's not equal to negative (-1) value defined in 
sl@0
   298
	//KUsbcInvalidBufferIndex.
sl@0
   299
	__ASSERT_DEBUG((iCurrentDrainingBufferIndex >= 0),
sl@0
   300
					   Kern::Fault(KUsbPanicLdd, __LINE__));
sl@0
   301
	
sl@0
   302
	if (iError[iCurrentDrainingBufferIndex] != KErrNone)
sl@0
   303
		{
sl@0
   304
		// See if we are at the last packet
sl@0
   305
		if ((iCurrentPacket + 1) == iNumberofPacketsRx[iCurrentDrainingBufferIndex])
sl@0
   306
			{
sl@0
   307
			errorCode = iError[iCurrentDrainingBufferIndex];
sl@0
   308
			}
sl@0
   309
		}
sl@0
   310
	return errorCode;
sl@0
   311
	}
sl@0
   312
sl@0
   313
sl@0
   314
// used to decide whether a client read can complete straight away
sl@0
   315
TBool TDmaBuf::IsReaderEmpty()
sl@0
   316
	{
sl@0
   317
	__KTRACE_OPT(KUSB, Kern::Printf("TDmaBuf::IsReaderEmpty iTotalRxPacketsAvail=%d",
sl@0
   318
									iTotalRxPacketsAvail));
sl@0
   319
	return (iTotalRxPacketsAvail == 0);
sl@0
   320
	}
sl@0
   321
sl@0
   322
sl@0
   323
void TDmaBuf::ReadXferComplete(TInt aNoBytesRecv, TInt aNoPacketsRecv, TInt aErrorCode)
sl@0
   324
	{
sl@0
   325
	// Adjust pending packet
sl@0
   326
	if ((aNoBytesRecv == 0) && (aErrorCode != KErrNone))
sl@0
   327
		{
sl@0
   328
		// Make the buffer available for reuse
sl@0
   329
		iDrainable[iCurrentFillingBufferIndex] = EFalse;
sl@0
   330
		return;
sl@0
   331
		}
sl@0
   332
sl@0
   333
	ModifyTotalRxBytesAvail(aNoBytesRecv);
sl@0
   334
	ModifyTotalRxPacketsAvail(aNoPacketsRecv);
sl@0
   335
	iNumberofBytesRx[iCurrentFillingBufferIndex] = aNoBytesRecv;
sl@0
   336
	iNumberofPacketsRx[iCurrentFillingBufferIndex] = aNoPacketsRecv;
sl@0
   337
sl@0
   338
#if defined(USBC_LDD_BUFFER_TRACE)
sl@0
   339
	iNumberofBytesRxRemain[iCurrentFillingBufferIndex] = aNoBytesRecv;
sl@0
   340
	iNumberofPacketsRxRemain[iCurrentFillingBufferIndex] = aNoPacketsRecv;
sl@0
   341
#endif
sl@0
   342
sl@0
   343
	__KTRACE_OPT(KUSB, Kern::Printf("TDmaBuf::ReadXferComplete 2 # of bytes=%d # of packets=%d",
sl@0
   344
									iTotalRxBytesAvail, iTotalRxPacketsAvail));
sl@0
   345
	iDrainable[iCurrentFillingBufferIndex] = ETrue;
sl@0
   346
	iError[iCurrentFillingBufferIndex] = aErrorCode;
sl@0
   347
	AddToDrainQueue(iCurrentFillingBufferIndex);
sl@0
   348
	if (iCurrentDrainingBufferIndex == KUsbcInvalidBufferIndex)
sl@0
   349
		{
sl@0
   350
		NextDrainableBuffer();
sl@0
   351
		}
sl@0
   352
	}
sl@0
   353
sl@0
   354
sl@0
   355
TInt TDmaBuf::RxGetNextXfer(TUint8*& aBufferAddr, TUsbcPacketArray*& aIndexArray,
sl@0
   356
							TUsbcPacketArray*& aSizeArray, TInt& aLength, TPhysAddr& aBufferPhys)
sl@0
   357
	{
sl@0
   358
	__KTRACE_OPT(KUSB, Kern::Printf("TDmaBuf::RxGetNextXfer 1"));
sl@0
   359
	if (RxIsActive())
sl@0
   360
		{
sl@0
   361
		__KTRACE_OPT(KUSB, Kern::Printf(" ---> RxIsActive, returning"));
sl@0
   362
		return KErrInUse;
sl@0
   363
		}
sl@0
   364
sl@0
   365
	__KTRACE_OPT(KUSB, Kern::Printf("TDmaBuf::RxGetNextXfer Current buffer=%d",
sl@0
   366
									iCurrentFillingBufferIndex));
sl@0
   367
	if (iDrainable[iCurrentFillingBufferIndex])
sl@0
   368
		{
sl@0
   369
		// If the controller refused the last read request, then the current buffer will still be marked
sl@0
   370
		// as !Drainable, because the controller never completed the read to the ldd. and therefore the buffer
sl@0
   371
		// can be reused.
sl@0
   372
		if (!NextFillableBuffer())
sl@0
   373
			{
sl@0
   374
			return KErrNoMemory;
sl@0
   375
			}
sl@0
   376
		}
sl@0
   377
sl@0
   378
	__KTRACE_OPT(KUSB, Kern::Printf("TDmaBuf::RxGetNextXfer New buffer=%d",
sl@0
   379
									iCurrentFillingBufferIndex));
sl@0
   380
	aBufferAddr = iBuffers[iCurrentFillingBufferIndex];
sl@0
   381
	aBufferPhys = iBufferPhys[iCurrentFillingBufferIndex];
sl@0
   382
	aIndexArray = iPacketIndex[iCurrentFillingBufferIndex];
sl@0
   383
	aSizeArray = iPacketSize[iCurrentFillingBufferIndex];
sl@0
   384
	aLength = iBufSz;
sl@0
   385
sl@0
   386
#if defined(USBC_LDD_BUFFER_TRACE)
sl@0
   387
	iFillingOrderArray[iCurrentFillingBufferIndex] = ++iFillingOrder;
sl@0
   388
#endif
sl@0
   389
sl@0
   390
	return KErrNone;
sl@0
   391
	}
sl@0
   392
sl@0
   393
sl@0
   394
TInt TDmaBuf::RxCopyPacketToClient(DThread* aThread, TClientBuffer *aTcb, TInt aLength)
sl@0
   395
	{
sl@0
   396
	__KTRACE_OPT(KUSB, Kern::Printf("TDmaBuf::RxCopyPacketToClient 1"));
sl@0
   397
sl@0
   398
#if defined(USBC_LDD_BUFFER_TRACE)
sl@0
   399
	const TInt numPkts = NoRxPackets();
sl@0
   400
	const TInt numPktsAlt = NoRxPacketsAlt();
sl@0
   401
	const TInt numBytes = RxBytesAvailable();
sl@0
   402
	const TInt numBytesAlt = NoRxBytesAlt();
sl@0
   403
sl@0
   404
	if (numPkts != numPktsAlt)
sl@0
   405
		{
sl@0
   406
		Kern::Printf(
sl@0
   407
			"TDmaBuf::RxCopyPacketToClient: Error: #pkts mismatch global=%d actual=%d",
sl@0
   408
			numPkts, numPktsAlt);
sl@0
   409
		}
sl@0
   410
	if (numBytes != numBytesAlt)
sl@0
   411
		{
sl@0
   412
		Kern::Printf(
sl@0
   413
			"TDmaBuf::RxCopyPacketToClient: Error: #bytes mismatch global=%d actual=%d",
sl@0
   414
			numBytes, numBytesAlt);
sl@0
   415
		}
sl@0
   416
	if ((numPkts == 0) && (numBytes !=0))
sl@0
   417
		{
sl@0
   418
		Kern::Printf(
sl@0
   419
			"TDmaBuf::RxCopyPacketToClient: Error: global bytes & pkts mismatch pkts=%d bytes=%d",
sl@0
   420
			numPkts, numBytes);
sl@0
   421
		}
sl@0
   422
	if ((numPktsAlt == 0) && (numBytesAlt !=0))
sl@0
   423
		{
sl@0
   424
		Kern::Printf(
sl@0
   425
			"TDmaBuf::RxCopyPacketToClient: Error: actual bytes & pkts mismatch pkts=%d bytes=%d",
sl@0
   426
			numPktsAlt, numBytesAlt);
sl@0
   427
		}
sl@0
   428
#endif
sl@0
   429
sl@0
   430
	if (!NoRxPackets())
sl@0
   431
		return KErrNotFound;
sl@0
   432
sl@0
   433
	__KTRACE_OPT(KUSB, Kern::Printf("TDmaBuf::RxCopyPacketToClient 2"));
sl@0
   434
	// the next condition should be true because we have some packets available
sl@0
   435
	// coverity[var_tested_neg]
sl@0
   436
	if (iCurrentDrainingBufferIndex == KUsbcInvalidBufferIndex)
sl@0
   437
		{
sl@0
   438
		// Marked as Coverity "Intentional" as the member variable
sl@0
   439
		// iCurrentDrainingBufferIndex is attentionaly negative, from previous 
sl@0
   440
		// initialization to KUsbcInvalidBufferIndex (which equals -1).
sl@0
   441
		if (!NextDrainableBuffer())
sl@0
   442
			return KErrNotFound;
sl@0
   443
		}
sl@0
   444
sl@0
   445
	__ASSERT_DEBUG((iCurrentDrainingBufferIndex >= 0 ),
sl@0
   446
						   Kern::Fault(KUsbPanicLdd, __LINE__));
sl@0
   447
	
sl@0
   448
	if (!iDrainable[iCurrentDrainingBufferIndex])
sl@0
   449
		return KErrNotFound;
sl@0
   450
sl@0
   451
	// Calculate copy-from address & adjust for the fact that
sl@0
   452
	// some data may have already been read from the packet
sl@0
   453
	TUint8* logicalSrc = iCurrentDrainingBuffer + iCurrentPacketIndexArray[iCurrentPacket] + iExtractOffset;
sl@0
   454
	TInt packetSz = iCurrentPacketSizeArray[iCurrentPacket];
sl@0
   455
	TInt thisPacketSz = packetSz - iExtractOffset;
sl@0
   456
	TInt errorCode;
sl@0
   457
	// try and sort out what a "packet" might mean.
sl@0
   458
	// in a multi-packet dma environment, we might see super-packets
sl@0
   459
	// i.e. we might just see one packet, maybe 4K or so long, made of lots of small packets
sl@0
   460
	// Since we don't know where the packet boundaries will be, we have to assume that
sl@0
   461
	// any 'packet' larger than the max packet size of the ep is, in fact, a conglomeration
sl@0
   462
	// of smaller packets. However, for the purposes of the packet count, this is still regarded
sl@0
   463
	// as a single packet and the packet count only decremented when it is consumed.
sl@0
   464
	// As before, if the user fails to read an entire packet out then the next packet is moved onto anyway
sl@0
   465
	// To be safe the user must always supply a buffer of at least max packet size bytes.
sl@0
   466
	if (thisPacketSz > iMaxPacketSize)
sl@0
   467
		{
sl@0
   468
		// Multiple packets left in buffer
sl@0
   469
		// calculate number of bytes to end of packet
sl@0
   470
		if (iEndpointType == KUsbEpTypeBulk)
sl@0
   471
			{
sl@0
   472
			thisPacketSz = iMaxPacketSize - (iExtractOffset & (iMaxPacketSize - 1));
sl@0
   473
			}
sl@0
   474
		else
sl@0
   475
			{
sl@0
   476
			thisPacketSz = iMaxPacketSize - (iExtractOffset % iMaxPacketSize);
sl@0
   477
			}
sl@0
   478
		errorCode = KErrNone;
sl@0
   479
		}
sl@0
   480
	else
sl@0
   481
		{
sl@0
   482
		errorCode = GetCurrentError();						// single packet left
sl@0
   483
		}
sl@0
   484
sl@0
   485
	iExtractOffset += thisPacketSz;			// iExtractOffset is now at the end of the real or notional packet
sl@0
   486
sl@0
   487
	ModifyTotalRxBytesAvail(-thisPacketSz);
sl@0
   488
#if defined(USBC_LDD_BUFFER_TRACE)
sl@0
   489
	iNumberofBytesRxRemain[iCurrentDrainingBufferIndex] -= thisPacketSz;
sl@0
   490
#endif
sl@0
   491
	// this can only be untrue if the "packet" is a conglomeration of smaller packets:
sl@0
   492
	if (iExtractOffset == packetSz)
sl@0
   493
		{
sl@0
   494
		// packet consumed, advance to next packet in buffer
sl@0
   495
#if defined(USBC_LDD_BUFFER_TRACE)
sl@0
   496
		iNumberofPacketsRxRemain[iCurrentDrainingBufferIndex] -= 1;
sl@0
   497
#endif
sl@0
   498
		AdvancePacket();
sl@0
   499
		}
sl@0
   500
sl@0
   501
	TPtrC8 des(logicalSrc, thisPacketSz);
sl@0
   502
	TInt r=Kern::ThreadBufWrite(aThread, aTcb, des, 0, 0, aThread);
sl@0
   503
	if (r == KErrNone)
sl@0
   504
		{
sl@0
   505
		r = errorCode;
sl@0
   506
		}
sl@0
   507
	__KTRACE_OPT(KUSB, Kern::Printf("TDmaBuf::RxCopyPacketToClient 3"));
sl@0
   508
sl@0
   509
	FreeDrainedBuffers();
sl@0
   510
sl@0
   511
	// Use this error code to complete client read request:
sl@0
   512
	return r;
sl@0
   513
	}
sl@0
   514
sl@0
   515
sl@0
   516
TInt TDmaBuf::RxCopyDataToClient(DThread* aThread, TClientBuffer *aTcb, TInt aLength, TUint32& aDestOffset,
sl@0
   517
								 TBool aRUS, TBool& aCompleteNow)
sl@0
   518
	{
sl@0
   519
	__KTRACE_OPT(KUSB, Kern::Printf("TDmaBuf::RxCopyDataToClient 1"));
sl@0
   520
	aCompleteNow = ETrue;
sl@0
   521
sl@0
   522
#if defined(USBC_LDD_BUFFER_TRACE)
sl@0
   523
	const TInt numPkts = NoRxPackets();
sl@0
   524
	const TInt numPktsAlt = NoRxPacketsAlt();
sl@0
   525
	const TInt numBytes = RxBytesAvailable();
sl@0
   526
	const TInt numBytesAlt = NoRxBytesAlt();
sl@0
   527
sl@0
   528
	if (numPkts != numPktsAlt)
sl@0
   529
		{
sl@0
   530
		Kern::Printf(
sl@0
   531
			"TDmaBuf::RxCopyDataToClient: Error: #pkts mismatch global=%d actual=%d",
sl@0
   532
			numPkts, numPktsAlt);
sl@0
   533
		}
sl@0
   534
	if (numBytes != numBytesAlt)
sl@0
   535
		{
sl@0
   536
		Kern::Printf(
sl@0
   537
			"TDmaBuf::RxCopyDataToClient: Error: #bytes mismatch global=%d actual=%d",
sl@0
   538
			numBytes, numBytesAlt);
sl@0
   539
		}
sl@0
   540
	if ((numPkts == 0) && (numBytes != 0))
sl@0
   541
		{
sl@0
   542
		Kern::Printf(
sl@0
   543
			"TDmaBuf::RxCopyDataToClient: Error: global bytes & pkts mismatch pkts=%d bytes=%d",
sl@0
   544
			numPkts, numBytes);
sl@0
   545
		}
sl@0
   546
	if ((numPktsAlt == 0) && (numBytesAlt != 0))
sl@0
   547
		{
sl@0
   548
		Kern::Printf(
sl@0
   549
			"TDmaBuf::RxCopyDataToClient: Error: actual bytes & pkts mismatch pkts=%d bytes=%d",
sl@0
   550
			numPktsAlt, numBytesAlt);
sl@0
   551
		}
sl@0
   552
#endif
sl@0
   553
sl@0
   554
	if (!NoRxPackets())
sl@0
   555
		{
sl@0
   556
		return KErrNotFound;
sl@0
   557
		}
sl@0
   558
sl@0
   559
	// coverity[var_tested_neg]
sl@0
   560
	if (iCurrentDrainingBufferIndex == KUsbcInvalidBufferIndex)
sl@0
   561
		{
sl@0
   562
		// Marked as Coverity "Inentional" as the member variable
sl@0
   563
		// iCurrentDrainingBufferIndex is attentionaly negative, from previous 
sl@0
   564
		// initialization to KUsbcInvalidBufferIndex (which equals -1).
sl@0
   565
sl@0
   566
		if (!NextDrainableBuffer())
sl@0
   567
			{
sl@0
   568
#if defined(USBC_LDD_BUFFER_TRACE)
sl@0
   569
			Kern::Printf("TDmaBuf::RxCopyDataToClient: Error:  No buffer draining=%d, packets=%d",
sl@0
   570
						 iCurrentDrainingBufferIndex, iTotalRxPacketsAvail);
sl@0
   571
#endif
sl@0
   572
			return KErrNotFound;
sl@0
   573
			}
sl@0
   574
		}
sl@0
   575
#if defined(USBC_LDD_BUFFER_TRACE)
sl@0
   576
sl@0
   577
	__ASSERT_DEBUG((iCurrentDrainingBufferIndex >= 0 ),
sl@0
   578
							   Kern::Fault(KUsbPanicLdd, __LINE__));
sl@0
   579
		
sl@0
   580
	if (iDrainingOrder != iFillingOrderArray[iCurrentDrainingBufferIndex])
sl@0
   581
		{
sl@0
   582
		Kern::Printf("!!! Out of Order Draining TDmaBuf::RxCopyDataToClient 10 draining=%d",
sl@0
   583
					 iCurrentDrainingBufferIndex);
sl@0
   584
		}
sl@0
   585
#endif
sl@0
   586
	__KTRACE_OPT(KUSB, Kern::Printf("TDmaBuf::RxCopyDataToClient 2"));
sl@0
   587
sl@0
   588
	TUint8* blockStartAddr = iCurrentDrainingBuffer + iCurrentPacketIndexArray[iCurrentPacket] + iExtractOffset;
sl@0
   589
	TUint8* lastEndAddr = blockStartAddr;					// going to track the contiguity of the memory
sl@0
   590
	TUint8* thisStartAddr = blockStartAddr;
sl@0
   591
	TInt toDo = Min(aLength - (TInt)aDestOffset, iTotalRxBytesAvail);
sl@0
   592
#if defined(USBC_LDD_BUFFER_TRACE)
sl@0
   593
	TInt bufnum = iCurrentDrainingBufferIndex;
sl@0
   594
#endif
sl@0
   595
	TInt errorCode = KErrNone;
sl@0
   596
	TBool isShortPacket = EFalse;
sl@0
   597
	const TInt maxPacketSizeMask = iMaxPacketSize - 1;
sl@0
   598
	do
sl@0
   599
		{
sl@0
   600
#if defined(USBC_LDD_BUFFER_TRACE)
sl@0
   601
		if (bufnum != iCurrentDrainingBufferIndex)
sl@0
   602
			{
sl@0
   603
			bufnum = iCurrentDrainingBufferIndex;
sl@0
   604
			if (iDrainingOrder != iFillingOrderArray[iCurrentDrainingBufferIndex])
sl@0
   605
				{
sl@0
   606
				Kern::Printf("!!! Out of Order Draining TDmaBuf::RxCopyDataToClient 20 draining=%d",
sl@0
   607
							 iCurrentDrainingBufferIndex);
sl@0
   608
				}
sl@0
   609
			}
sl@0
   610
#endif
sl@0
   611
		if (errorCode == KErrNone)
sl@0
   612
			{
sl@0
   613
			errorCode = GetCurrentError();
sl@0
   614
			}
sl@0
   615
		thisStartAddr = iCurrentDrainingBuffer + iCurrentPacketIndexArray[iCurrentPacket] + iExtractOffset;
sl@0
   616
		const TInt thisPacketSize = iCurrentPacketSizeArray[iCurrentPacket];
sl@0
   617
		const TInt size = thisPacketSize - iExtractOffset;
sl@0
   618
		if (aRUS)
sl@0
   619
			{
sl@0
   620
			if (iEndpointType == KUsbEpTypeBulk)
sl@0
   621
				{
sl@0
   622
				isShortPacket = (size < iMaxPacketSize) || (size & maxPacketSizeMask);
sl@0
   623
				}
sl@0
   624
			else
sl@0
   625
				{
sl@0
   626
				// this 'if' block is arranged to avoid a division on packet sizes <= iMaxPacketSize
sl@0
   627
				isShortPacket = (size < iMaxPacketSize) ||
sl@0
   628
					((size > iMaxPacketSize) && (size % iMaxPacketSize));
sl@0
   629
				}
sl@0
   630
			}
sl@0
   631
		TInt copySize = Min(size, toDo);
sl@0
   632
		iExtractOffset += copySize;
sl@0
   633
		toDo -= copySize;
sl@0
   634
		if (thisStartAddr != lastEndAddr)
sl@0
   635
			{
sl@0
   636
			TInt bytesToCopy = lastEndAddr - blockStartAddr;
sl@0
   637
			TInt r=CopyToUser(aThread, blockStartAddr, bytesToCopy, aTcb, aDestOffset);
sl@0
   638
			if(r != KErrNone)
sl@0
   639
				Kern::ThreadKill(aThread, EExitPanic, r, KUsbLDDKillCat);
sl@0
   640
			blockStartAddr = thisStartAddr;
sl@0
   641
			}
sl@0
   642
sl@0
   643
		ModifyTotalRxBytesAvail(-copySize);
sl@0
   644
#if defined(USBC_LDD_BUFFER_TRACE)
sl@0
   645
		iNumberofBytesRxRemain[iCurrentDrainingBufferIndex] -= copySize;
sl@0
   646
#endif
sl@0
   647
		lastEndAddr = thisStartAddr + copySize;
sl@0
   648
		if (iExtractOffset == thisPacketSize)
sl@0
   649
			{
sl@0
   650
			// More data to copy, so need to access new packet
sl@0
   651
#if defined(USBC_LDD_BUFFER_TRACE)
sl@0
   652
			iNumberofPacketsRxRemain[iCurrentDrainingBufferIndex] -= 1;
sl@0
   653
#endif
sl@0
   654
			if (!AdvancePacket())
sl@0
   655
				{
sl@0
   656
				break;										// no more packets left
sl@0
   657
				}
sl@0
   658
			}
sl@0
   659
		} while (toDo > 0 && !isShortPacket);
sl@0
   660
sl@0
   661
	if (thisStartAddr != lastEndAddr)
sl@0
   662
		{
sl@0
   663
		TInt bytesToCopy = lastEndAddr - blockStartAddr;
sl@0
   664
		TInt r=CopyToUser(aThread, blockStartAddr, bytesToCopy, aTcb, aDestOffset);
sl@0
   665
		if(r != KErrNone)
sl@0
   666
			Kern::ThreadKill(aThread, EExitPanic, r, KUsbLDDKillCat);
sl@0
   667
		}
sl@0
   668
sl@0
   669
	// If we have transferred the requested amount of data it is still possible that
sl@0
   670
	// the next packet is a zlp which needs to be bumped over
sl@0
   671
sl@0
   672
	if (aRUS && (toDo == 0) && (iExtractOffset == 0) && (!isShortPacket) && (!IsReaderEmpty()) &&
sl@0
   673
		(PeekNextPacketSize() == 0))
sl@0
   674
		{
sl@0
   675
		// swallow a zlp
sl@0
   676
		isShortPacket = ETrue;
sl@0
   677
#if defined(USBC_LDD_BUFFER_TRACE)
sl@0
   678
		iNumberofPacketsRxRemain[iCurrentDrainingBufferIndex] -= 1;
sl@0
   679
#endif
sl@0
   680
		AdvancePacket();
sl@0
   681
		}
sl@0
   682
	aCompleteNow = isShortPacket || (((TInt)aDestOffset) == aLength) || (errorCode != KErrNone);
sl@0
   683
sl@0
   684
	FreeDrainedBuffers();
sl@0
   685
sl@0
   686
	// Use this error code to complete client read request
sl@0
   687
	return errorCode;
sl@0
   688
	}
sl@0
   689
sl@0
   690
sl@0
   691
inline TInt TDmaBuf::CopyToUser(DThread* aThread, const TUint8* aSourceAddr,
sl@0
   692
								TInt aLength, TClientBuffer *aTcb, TUint32& aDestOffset)
sl@0
   693
	{
sl@0
   694
	TPtrC8 des(aSourceAddr, aLength);
sl@0
   695
	TInt errorCode = Kern::ThreadBufWrite(aThread, aTcb, des, aDestOffset, KChunkShiftBy0, aThread);
sl@0
   696
	if (errorCode == KErrNone)
sl@0
   697
		{
sl@0
   698
		aDestOffset += aLength;
sl@0
   699
		}
sl@0
   700
	return errorCode;
sl@0
   701
	}
sl@0
   702
sl@0
   703
sl@0
   704
inline TInt TDmaBuf::NoRxPackets() const
sl@0
   705
	{
sl@0
   706
	return iTotalRxPacketsAvail;
sl@0
   707
	}
sl@0
   708
sl@0
   709
sl@0
   710
inline void TDmaBuf::IncrementBufferIndex(TInt& aIndex)
sl@0
   711
	{
sl@0
   712
	if (++aIndex == iNumberofBuffers)
sl@0
   713
		aIndex = 0;
sl@0
   714
	}
sl@0
   715
sl@0
   716
sl@0
   717
TBool TDmaBuf::NextDrainableBuffer()
sl@0
   718
	{
sl@0
   719
	TBool r = EFalse;
sl@0
   720
	if (iCurrentDrainingBufferIndex != KUsbcInvalidBufferIndex)
sl@0
   721
		{
sl@0
   722
		iCanBeFreed[iCurrentDrainingBufferIndex] = ETrue;
sl@0
   723
		iNumberofPacketsRx[iCurrentDrainingBufferIndex] = 0; // Current buffer is empty
sl@0
   724
		iNumberofBytesRx[iCurrentDrainingBufferIndex] = 0;	// Current buffer is empty
sl@0
   725
sl@0
   726
#if defined(USBC_LDD_BUFFER_TRACE)
sl@0
   727
		TUint& bytesRemain = iNumberofBytesRxRemain[iCurrentDrainingBufferIndex];
sl@0
   728
		TUint& pktsRemain = iNumberofPacketsRxRemain[iCurrentDrainingBufferIndex];
sl@0
   729
		if ((bytesRemain != 0) || (pktsRemain != 0))
sl@0
   730
			{
sl@0
   731
			Kern::Printf(
sl@0
   732
				"TDmaBuf::NextDrainableBuffer: Error: data discarded buffer=%d pkts=%d bytes=%d",
sl@0
   733
				iCurrentDrainingBufferIndex, pktsRemain, bytesRemain);
sl@0
   734
			bytesRemain = 0;
sl@0
   735
			pktsRemain = 0;
sl@0
   736
			}
sl@0
   737
#endif
sl@0
   738
sl@0
   739
		iCurrentDrainingBufferIndex = KUsbcInvalidBufferIndex;
sl@0
   740
		iCurrentPacket = KUsbcInvalidPacketIndex;
sl@0
   741
		}
sl@0
   742
sl@0
   743
	if (iDrainQueueIndex != KUsbcInvalidDrainQueueIndex)
sl@0
   744
		{
sl@0
   745
		r = ETrue;
sl@0
   746
		const TInt index = iDrainQueue[0];
sl@0
   747
		iDrainQueueIndex--;
sl@0
   748
		for (TInt i = 0; i < iNumberofBuffers; i++)
sl@0
   749
			{
sl@0
   750
			iDrainQueue[i] = iDrainQueue[i+1];
sl@0
   751
			}
sl@0
   752
sl@0
   753
#if defined(USBC_LDD_BUFFER_TRACE)
sl@0
   754
		if (index != KUsbcInvalidBufferIndex)
sl@0
   755
			iDrainingOrder++;
sl@0
   756
#endif
sl@0
   757
sl@0
   758
		iCurrentDrainingBufferIndex = index;
sl@0
   759
		iCurrentDrainingBuffer = iBuffers[index];
sl@0
   760
		iCurrentPacketIndexArray = iPacketIndex[index];
sl@0
   761
		iCurrentPacketSizeArray = iPacketSize[index];
sl@0
   762
		iCurrentPacket = 0;
sl@0
   763
		}
sl@0
   764
	return r;
sl@0
   765
	}
sl@0
   766
sl@0
   767
sl@0
   768
TInt TDmaBuf::PeekNextDrainableBuffer()
sl@0
   769
	{
sl@0
   770
	TInt r = KUsbcInvalidBufferIndex;
sl@0
   771
	if (iDrainQueueIndex != KUsbcInvalidDrainQueueIndex)
sl@0
   772
		{
sl@0
   773
		r = iDrainQueue[0];
sl@0
   774
		}
sl@0
   775
	return r;
sl@0
   776
	}
sl@0
   777
sl@0
   778
sl@0
   779
TBool TDmaBuf::NextFillableBuffer()
sl@0
   780
	{
sl@0
   781
	TBool r = EFalse;
sl@0
   782
	TInt index = iCurrentFillingBufferIndex;
sl@0
   783
	IncrementBufferIndex(index);
sl@0
   784
	// the sequence will restart at 0 if a buffer can't be found this time
sl@0
   785
	iCurrentFillingBufferIndex = 0;
sl@0
   786
	for (TInt i = 0; i < iNumberofBuffers; i++)
sl@0
   787
		{
sl@0
   788
		if (!iDrainable[index])
sl@0
   789
			{
sl@0
   790
			iCurrentFillingBufferIndex = index;
sl@0
   791
			r = ETrue;
sl@0
   792
			break;
sl@0
   793
			}
sl@0
   794
		IncrementBufferIndex(index);
sl@0
   795
		}
sl@0
   796
	return r;
sl@0
   797
	}
sl@0
   798
sl@0
   799
sl@0
   800
void TDmaBuf::FreeDrainedBuffers()
sl@0
   801
	{
sl@0
   802
	for (TInt i = 0; i < iNumberofBuffers; i++)
sl@0
   803
		{
sl@0
   804
		if (iDrainable[i] && iCanBeFreed[i])
sl@0
   805
			{
sl@0
   806
			iDrainable[i] = iCanBeFreed[i] = EFalse;
sl@0
   807
			}
sl@0
   808
		}
sl@0
   809
	}
sl@0
   810
sl@0
   811
sl@0
   812
TBool TDmaBuf::ShortPacketExists()
sl@0
   813
	{
sl@0
   814
	// Actually, a short packet or residue data
sl@0
   815
	__KTRACE_OPT(KUSB, Kern::Printf("TDmaBuf::ShortPacketExists 1"));
sl@0
   816
	TInt index = iCurrentDrainingBufferIndex;
sl@0
   817
	TUsbcPacketArray* pktSizeArray = iCurrentPacketSizeArray;
sl@0
   818
sl@0
   819
	if (iMaxPacketSize > 0)
sl@0
   820
		{
sl@0
   821
		// No buffers available for draining
sl@0
   822
		if ((iCurrentDrainingBufferIndex == KUsbcInvalidBufferIndex) ||
sl@0
   823
			(iCurrentPacket == KUsbcInvalidPacketIndex))
sl@0
   824
			return EFalse;
sl@0
   825
sl@0
   826
		// Zlp waiting at tail
sl@0
   827
		if ((iTotalRxBytesAvail == 0) && (NoRxPackets() == 1))
sl@0
   828
			return ETrue;
sl@0
   829
sl@0
   830
		if (iEndpointType == KUsbEpTypeBulk)
sl@0
   831
			{
sl@0
   832
			const TInt mask = iMaxPacketSize - 1;
sl@0
   833
			if (iTotalRxBytesAvail & mask)
sl@0
   834
				return ETrue;
sl@0
   835
sl@0
   836
			// residue==0; this can be because
sl@0
   837
			// zlps exist, or short packets combine to n * max_packet_size
sl@0
   838
			// This means spadework
sl@0
   839
			const TInt s = iCurrentPacketSizeArray[iCurrentPacket] - iExtractOffset;
sl@0
   840
			if ((s == 0) || (s & mask))
sl@0
   841
				{
sl@0
   842
				return ETrue;
sl@0
   843
				}
sl@0
   844
sl@0
   845
			for (TInt i = 0; i < iNumberofBuffers; i++)
sl@0
   846
				{
sl@0
   847
				if (index == KUsbcInvalidBufferIndex)
sl@0
   848
					break;
sl@0
   849
				if (iDrainable[index])
sl@0
   850
					{
sl@0
   851
					const TInt packetCount = iNumberofPacketsRx[index];
sl@0
   852
					const TInt lastPacketSize=pktSizeArray[packetCount - 1];
sl@0
   853
					if ((lastPacketSize < iMaxPacketSize) || (lastPacketSize & mask))
sl@0
   854
						{
sl@0
   855
						return ETrue;
sl@0
   856
						}
sl@0
   857
					}
sl@0
   858
				index = iDrainQueue[i];
sl@0
   859
				pktSizeArray = iPacketSize[index];
sl@0
   860
				}
sl@0
   861
			}
sl@0
   862
		else
sl@0
   863
			{
sl@0
   864
			if (iTotalRxBytesAvail % iMaxPacketSize)
sl@0
   865
				return ETrue;
sl@0
   866
sl@0
   867
			// residue==0; this can be because
sl@0
   868
			// zlps exist, or short packets combine to n * max_packet_size
sl@0
   869
			// This means spadework
sl@0
   870
			const TInt s = iCurrentPacketSizeArray[iCurrentPacket] - iExtractOffset;
sl@0
   871
			if ((s == 0) || (s % iMaxPacketSize))
sl@0
   872
				{
sl@0
   873
				return ETrue;
sl@0
   874
				}
sl@0
   875
sl@0
   876
			for (TInt i = 0; i < iNumberofBuffers; i++)
sl@0
   877
				{
sl@0
   878
				if (index == KUsbcInvalidBufferIndex)
sl@0
   879
					break;
sl@0
   880
				if (iDrainable[index])
sl@0
   881
					{
sl@0
   882
					const TInt packetCount = iNumberofPacketsRx[index];
sl@0
   883
					const TInt lastPacketSize = pktSizeArray[packetCount - 1];
sl@0
   884
					if ((lastPacketSize < iMaxPacketSize) || (lastPacketSize % iMaxPacketSize))
sl@0
   885
						{
sl@0
   886
						return ETrue;
sl@0
   887
						}
sl@0
   888
					}
sl@0
   889
				index = iDrainQueue[i];
sl@0
   890
				pktSizeArray = iPacketSize[index];
sl@0
   891
				}
sl@0
   892
			}
sl@0
   893
		}
sl@0
   894
sl@0
   895
	return EFalse;
sl@0
   896
	}
sl@0
   897
sl@0
   898
sl@0
   899
void TDmaBuf::AddToDrainQueue(TInt aBufferIndex)
sl@0
   900
	{
sl@0
   901
	if (iDrainQueue[iDrainQueueIndex + 1] != KUsbcInvalidBufferIndex)
sl@0
   902
		{
sl@0
   903
#if defined(USBC_LDD_BUFFER_TRACE)
sl@0
   904
		Kern::Printf("TDmaBuf::AddToDrainQueue: Error: invalid iDrainQueue[x]");
sl@0
   905
#endif
sl@0
   906
		}
sl@0
   907
	iDrainQueue[++iDrainQueueIndex] = aBufferIndex;
sl@0
   908
	}
sl@0
   909
sl@0
   910
sl@0
   911
#if defined(USBC_LDD_BUFFER_TRACE)
sl@0
   912
TInt TDmaBuf::NoRxPacketsAlt() const
sl@0
   913
	{
sl@0
   914
	TInt pktCount = 0;
sl@0
   915
	for(TInt i = 0; i < iNumberofBuffers; i++)
sl@0
   916
		{
sl@0
   917
		if (iDrainable[i])
sl@0
   918
			{
sl@0
   919
			pktCount += iNumberofPacketsRxRemain[i];
sl@0
   920
			}
sl@0
   921
		}
sl@0
   922
	return pktCount;
sl@0
   923
	}
sl@0
   924
sl@0
   925
sl@0
   926
TInt TDmaBuf::NoRxBytesAlt() const
sl@0
   927
	{
sl@0
   928
	TInt byteCount = 0;
sl@0
   929
	for(TInt i = 0; i < iNumberofBuffers; i++)
sl@0
   930
		{
sl@0
   931
		if (iDrainable[i])
sl@0
   932
			{
sl@0
   933
			byteCount += iNumberofBytesRxRemain[i];
sl@0
   934
			}
sl@0
   935
		}
sl@0
   936
	return byteCount;
sl@0
   937
	}
sl@0
   938
#endif
sl@0
   939
sl@0
   940
sl@0
   941
// We only store 1 transaction, no other buffering is done
sl@0
   942
TInt TDmaBuf::TxStoreData(DThread* aThread, TClientBuffer *aTcb, TInt aTxLength, TUint32 aBufferOffset)
sl@0
   943
	{
sl@0
   944
	__KTRACE_OPT(KUSB, Kern::Printf("TDmaBuf::TxStoreData 1"));
sl@0
   945
	if (!IsReaderEmpty())
sl@0
   946
		return KErrInUse;
sl@0
   947
sl@0
   948
	__KTRACE_OPT(KUSB, Kern::Printf("TDmaBuf::TxStoreData 2"));
sl@0
   949
	
sl@0
   950
	TInt remainTxLength = aTxLength;
sl@0
   951
	TUint32 bufferOffset = aBufferOffset;
sl@0
   952
	// Store each buffer separately
sl@0
   953
	for( TInt i=0;(i<iNumberofBuffers)&&(remainTxLength>0);i++)
sl@0
   954
	    {
sl@0
   955
	    TUint8* logicalDest = iBuffers[i];
sl@0
   956
	    TInt xferSz = Min(remainTxLength, iBufSz);
sl@0
   957
	    TPtr8 des(logicalDest, xferSz, xferSz);
sl@0
   958
	    TInt r = Kern::ThreadBufRead(aThread, aTcb, des, bufferOffset, KChunkShiftBy0);
sl@0
   959
	    if(r != KErrNone)
sl@0
   960
	        {
sl@0
   961
	        Kern::ThreadKill(aThread, EExitPanic, r, KUsbLDDKillCat);
sl@0
   962
	        return r;
sl@0
   963
	        }
sl@0
   964
	    remainTxLength -= iBufSz;
sl@0
   965
	    bufferOffset += iBufSz;
sl@0
   966
	    }
sl@0
   967
sl@0
   968
	return KErrNone;
sl@0
   969
	}
sl@0
   970
sl@0
   971
sl@0
   972
TInt TDmaBuf::TxGetNextXfer(TUint8*& aBufferAddr, TInt& aTxLength, TPhysAddr& aBufferPhys)
sl@0
   973
	{
sl@0
   974
	if (iTxActive)
sl@0
   975
		return KErrInUse;
sl@0
   976
sl@0
   977
	aBufferAddr = iBuffers[0];								// only 1 tx buffer
sl@0
   978
	aBufferPhys = iBufferPhys[0];
sl@0
   979
	aTxLength = BufferTotalSize();
sl@0
   980
sl@0
   981
	return KErrNone;
sl@0
   982
	}
sl@0
   983