sl@0
|
1 |
// Copyright (c) 2002-2009 Nokia Corporation and/or its subsidiary(-ies).
|
sl@0
|
2 |
// All rights reserved.
|
sl@0
|
3 |
// This component and the accompanying materials are made available
|
sl@0
|
4 |
// under the terms of "Eclipse Public License v1.0"
|
sl@0
|
5 |
// which accompanies this distribution, and is available
|
sl@0
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
|
sl@0
|
7 |
//
|
sl@0
|
8 |
// Initial Contributors:
|
sl@0
|
9 |
// Nokia Corporation - initial contribution.
|
sl@0
|
10 |
//
|
sl@0
|
11 |
// Contributors:
|
sl@0
|
12 |
//
|
sl@0
|
13 |
// Description:
|
sl@0
|
14 |
// Test driver for DMA V2 framework
|
sl@0
|
15 |
//
|
sl@0
|
16 |
//
|
sl@0
|
17 |
|
sl@0
|
18 |
#include <kernel/kern_priv.h>
|
sl@0
|
19 |
#include <drivers/dma.h>
|
sl@0
|
20 |
#include "d_dma2.h"
|
sl@0
|
21 |
|
sl@0
|
22 |
_LIT(KClientPanicCat, "D_DMA2");
|
sl@0
|
23 |
_LIT(KDFCThreadName,"D_DMA_DFC_THREAD");
|
sl@0
|
24 |
_LIT(KIsrCbDfcThreadName,"D_DMA_IsrCb_thread");
|
sl@0
|
25 |
const TInt KDFCThreadPriority=26;
|
sl@0
|
26 |
|
sl@0
|
27 |
class TStopwatch
|
sl@0
|
28 |
{
|
sl@0
|
29 |
public:
|
sl@0
|
30 |
TStopwatch()
|
sl@0
|
31 |
:iStart(0), iStop(0)
|
sl@0
|
32 |
{}
|
sl@0
|
33 |
|
sl@0
|
34 |
void Start()
|
sl@0
|
35 |
{iStart = NKern::FastCounter();}
|
sl@0
|
36 |
|
sl@0
|
37 |
void Stop()
|
sl@0
|
38 |
{
|
sl@0
|
39 |
iStop = NKern::FastCounter();
|
sl@0
|
40 |
|
sl@0
|
41 |
__KTRACE_OPT(KDMA, Kern::Printf(">TStopwatch::Stop FastCounter ticks: iStart=0x%lx iStop=0x%lx", iStart, iStop));
|
sl@0
|
42 |
}
|
sl@0
|
43 |
|
sl@0
|
44 |
TUint64 ReadMicroSecs() const
|
sl@0
|
45 |
{
|
sl@0
|
46 |
#ifndef __SMP__
|
sl@0
|
47 |
TUint64 diff = 0;
|
sl@0
|
48 |
if(iStart > iStop)
|
sl@0
|
49 |
{
|
sl@0
|
50 |
diff = (KMaxTUint64 - iStart) + iStop;
|
sl@0
|
51 |
}
|
sl@0
|
52 |
else
|
sl@0
|
53 |
{
|
sl@0
|
54 |
diff = iStop - iStart;
|
sl@0
|
55 |
}
|
sl@0
|
56 |
return FastCountToMicroSecs(diff);
|
sl@0
|
57 |
#else
|
sl@0
|
58 |
//TODO On SMP it is possible for the value returned from
|
sl@0
|
59 |
//NKern::FastCounter to depend on the current CPU (ie.
|
sl@0
|
60 |
//NaviEngine)
|
sl@0
|
61 |
//
|
sl@0
|
62 |
//One solution would be to tie DFC's and ISR's to the same
|
sl@0
|
63 |
//core as the client, but this would reduce the usefulness of
|
sl@0
|
64 |
//SMP testing.
|
sl@0
|
65 |
return 0;
|
sl@0
|
66 |
#endif
|
sl@0
|
67 |
}
|
sl@0
|
68 |
private:
|
sl@0
|
69 |
|
sl@0
|
70 |
TUint64 FastCountToMicroSecs(TUint64 aCount) const
|
sl@0
|
71 |
{
|
sl@0
|
72 |
const TUint64 countsPerS = NKern::FastCounterFrequency();
|
sl@0
|
73 |
|
sl@0
|
74 |
TUint64 timeuS = (aCount*1000000)/countsPerS;
|
sl@0
|
75 |
__KTRACE_OPT(KDMA, Kern::Printf(">TStopwatch::FastCountToMicroSecs FastCounter ticks: aCount=0x%lx countsPerS=0x%lx time=0x%lx", aCount, countsPerS, timeuS));
|
sl@0
|
76 |
return timeuS;
|
sl@0
|
77 |
}
|
sl@0
|
78 |
|
sl@0
|
79 |
TUint64 iStart;
|
sl@0
|
80 |
TUint64 iStop;
|
sl@0
|
81 |
};
|
sl@0
|
82 |
|
sl@0
|
83 |
//////////////////////////////////////////////////////////////////////////////
|
sl@0
|
84 |
|
sl@0
|
85 |
class DClientDmaRequest;
|
sl@0
|
86 |
/**
|
sl@0
|
87 |
Driver channel. Only accessible by a single client thread
|
sl@0
|
88 |
*/
|
sl@0
|
89 |
class DDmaTestSession : public DLogicalChannelBase
|
sl@0
|
90 |
{
|
sl@0
|
91 |
public:
|
sl@0
|
92 |
DDmaTestSession();
|
sl@0
|
93 |
virtual ~DDmaTestSession();
|
sl@0
|
94 |
protected:
|
sl@0
|
95 |
// from DLogicalChannelBase
|
sl@0
|
96 |
virtual TInt DoCreate(TInt aUnit, const TDesC8* anInfo, const TVersion& aVer);
|
sl@0
|
97 |
virtual TInt Request(TInt aFunction, TAny* a1, TAny* a2);
|
sl@0
|
98 |
virtual TInt RequestUserHandle(DThread* aThread, TOwnerType aType);
|
sl@0
|
99 |
private:
|
sl@0
|
100 |
TInt DoGetInfo(TAny* aInfo);
|
sl@0
|
101 |
|
sl@0
|
102 |
TInt OpenDmaChannel(TUint aPslCookie, TUint& aDriverCookie);
|
sl@0
|
103 |
TInt CloseDmaChannelByCookie(TUint aDriverCookie);
|
sl@0
|
104 |
TInt PauseDmaChannelByCookie(TUint aDriverCookie);
|
sl@0
|
105 |
TInt ResumeDmaChannelByCookie(TUint aDriverCookie);
|
sl@0
|
106 |
TInt GetChannelCapsByCookie(TUint aDriverCookie, SDmacCaps& aChannelCaps);
|
sl@0
|
107 |
TInt GetChannelCapsByCookie(TUint aDriverCookie, TDmacTestCaps& aChannelCaps);
|
sl@0
|
108 |
TInt CancelAllByCookie(TUint aDriverCookie);
|
sl@0
|
109 |
TInt IsrRedoRequestByCookie(TUint aDriverCookie,TUint32 aSrcAddr,TUint32 aDstAddr,TInt aTransferCount,TUint32 aPslRequestInfo,TBool aIsrCb);
|
sl@0
|
110 |
TInt IsQueueEmptyByCookie(TUint aDriverCookie, TBool& aQueueEmpty);
|
sl@0
|
111 |
TInt ChannelIsOpenedByCookie(TUint aDriverCookie, TBool& aChannelOpen);
|
sl@0
|
112 |
void CloseDmaChannelByIndex(TInt aIndex);
|
sl@0
|
113 |
void CancelAllByIndex(TInt aIndex);
|
sl@0
|
114 |
TInt PauseDmaChannelByIndex(TInt aIndex);
|
sl@0
|
115 |
TInt ResumeDmaChannelByIndex(TInt aIndex);
|
sl@0
|
116 |
TInt IsrRedoRequestByIndex(TInt aIndex,TUint32 aSrcAddr,TUint32 aDstAddr,TInt aTransferCount,TUint32 aPslRequestInfo,TBool aIsrCb);
|
sl@0
|
117 |
TInt CreateSharedChunk();
|
sl@0
|
118 |
TUint OpenSharedChunkHandle();
|
sl@0
|
119 |
|
sl@0
|
120 |
/**
|
sl@0
|
121 |
Creates a new kernel-side DMA request object, associated with a previously
|
sl@0
|
122 |
opened channel
|
sl@0
|
123 |
|
sl@0
|
124 |
@param aChannelCookie - A channel cookie as returned by OpenDmaChannel
|
sl@0
|
125 |
@param aRequestCookie - On success will be a cookie by which the dma request can be referred to
|
sl@0
|
126 |
@param aNewCallback - If true, then a new style DMA callback will be used
|
sl@0
|
127 |
*/
|
sl@0
|
128 |
TInt CreateDmaRequest(TUint aChannelCookie, TUint& aRequestCookie, TBool aNewCallback = EFalse, TInt aMaxFragmentSizeBytes=0);
|
sl@0
|
129 |
|
sl@0
|
130 |
//TODO what happens if a client closes a channel that
|
sl@0
|
131 |
//it still has dma requests associated with?
|
sl@0
|
132 |
|
sl@0
|
133 |
/**
|
sl@0
|
134 |
Destroys a previously created dma request object
|
sl@0
|
135 |
*/
|
sl@0
|
136 |
TInt DestroyDmaRequestByCookie(TUint aRequestCookie);
|
sl@0
|
137 |
|
sl@0
|
138 |
void DestroyDmaRequestByIndex(TInt aIndex);
|
sl@0
|
139 |
|
sl@0
|
140 |
|
sl@0
|
141 |
TInt CookieToChannelIndex(TUint aDriverCookie) const;
|
sl@0
|
142 |
TInt CookieToRequestIndex(TUint aRequestCookie) const;
|
sl@0
|
143 |
|
sl@0
|
144 |
void FixupTransferArgs(TDmaTransferArgs& aTransferArgs) const;
|
sl@0
|
145 |
TInt FragmentRequest(TUint aRequestCookie, const TDmaTransferArgs& aTransferArgs, TBool aLegacy=ETrue);
|
sl@0
|
146 |
|
sl@0
|
147 |
TInt QueueRequest(TUint aRequestCookie, TRequestStatus* aStatus, TCallbackRecord* aRecord, TUint64* aDurationMicroSecs);
|
sl@0
|
148 |
DClientDmaRequest* RequestFromCookie(TUint aRequestCookie) const;
|
sl@0
|
149 |
TInt RequestFragmentCount(TUint aRequestCookie);
|
sl@0
|
150 |
|
sl@0
|
151 |
TDmaV2TestInfo ConvertTestInfo(const TDmaTestInfo& aOldInfo) const;
|
sl@0
|
152 |
private:
|
sl@0
|
153 |
DThread* iClient;
|
sl@0
|
154 |
TDynamicDfcQue* iDfcQ;
|
sl@0
|
155 |
TDynamicDfcQue* iIsrCallbackDfcQ; // Will be used by requests which complete with an ISR callback
|
sl@0
|
156 |
static const TInt KMaxChunkSize = 8 * KMega;
|
sl@0
|
157 |
TLinAddr iChunkBase;
|
sl@0
|
158 |
DChunk* iChunk;
|
sl@0
|
159 |
|
sl@0
|
160 |
RPointerArray<TDmaChannel> iChannels;
|
sl@0
|
161 |
RPointerArray<DClientDmaRequest> iClientDmaReqs;
|
sl@0
|
162 |
};
|
sl@0
|
163 |
|
sl@0
|
164 |
|
sl@0
|
165 |
/**
|
sl@0
|
166 |
Allows a TClientRequest to be associated with a DDmaRequest
|
sl@0
|
167 |
*/
|
sl@0
|
168 |
class DClientDmaRequest : public DDmaRequest
|
sl@0
|
169 |
{
|
sl@0
|
170 |
public:
|
sl@0
|
171 |
static DClientDmaRequest* Construct(DThread* aClient, TDfcQue* const aDfcQ, TDmaChannel& aChannel, TBool aNewStyle=EFalse, TInt aMaxTransferSize=0);
|
sl@0
|
172 |
~DClientDmaRequest();
|
sl@0
|
173 |
|
sl@0
|
174 |
TInt Queue(TRequestStatus* aRequestStatus, TCallbackRecord* aRecord, TUint64* aDurationMicroSecs);
|
sl@0
|
175 |
void AddRequeArgs(const TIsrRequeArgsSet& aRequeArgSet);
|
sl@0
|
176 |
|
sl@0
|
177 |
TUint64 GetDuration()
|
sl@0
|
178 |
{return iStopwatch.ReadMicroSecs();}
|
sl@0
|
179 |
|
sl@0
|
180 |
protected:
|
sl@0
|
181 |
TInt Create();
|
sl@0
|
182 |
/** Construct with old style callback */
|
sl@0
|
183 |
DClientDmaRequest(DThread* aClient, TDfcQue* const aDfcQ, TDmaChannel& aChannel, TInt aMaxTransferSize);
|
sl@0
|
184 |
|
sl@0
|
185 |
/** Construct with new style callback */
|
sl@0
|
186 |
DClientDmaRequest(DThread* aClient, TDfcQue* const aDfcQ, TDmaChannel& aChannel, TBool aNewStyle, TInt aMaxTransferSize);
|
sl@0
|
187 |
|
sl@0
|
188 |
private:
|
sl@0
|
189 |
static void CallbackOldStyle(TResult aResult, TAny* aRequest);
|
sl@0
|
190 |
static void Callback(TUint, TDmaResult, TAny*, SDmaDesHdr*);
|
sl@0
|
191 |
static void CompleteCallback(TAny* aRequest);
|
sl@0
|
192 |
|
sl@0
|
193 |
void DoCallback(TUint, TDmaResult);
|
sl@0
|
194 |
TBool RedoRequest();
|
sl@0
|
195 |
|
sl@0
|
196 |
//!< Used to return a TCallbackRecord and transfer time
|
sl@0
|
197 |
TClientDataRequest2<TCallbackRecord, TUint64>* iClientDataRequest;
|
sl@0
|
198 |
|
sl@0
|
199 |
DThread* const iClient;
|
sl@0
|
200 |
TDfcQue* const iDfcQ; //!< Use the DDmaTestSession's dfc queue
|
sl@0
|
201 |
TDfc iDfc;
|
sl@0
|
202 |
|
sl@0
|
203 |
TStopwatch iStopwatch;
|
sl@0
|
204 |
TIsrRequeArgsSet iIsrRequeArgSet;
|
sl@0
|
205 |
};
|
sl@0
|
206 |
|
sl@0
|
207 |
DClientDmaRequest* DClientDmaRequest::Construct(DThread* aClient, TDfcQue* const aDfcQ, TDmaChannel& aChannel, TBool aNewStyle, TInt aMaxTransferSize)
|
sl@0
|
208 |
{
|
sl@0
|
209 |
DClientDmaRequest* dmaRequest = NULL;
|
sl@0
|
210 |
if(aNewStyle)
|
sl@0
|
211 |
{
|
sl@0
|
212 |
#ifdef DMA_APIV2
|
sl@0
|
213 |
dmaRequest = new DClientDmaRequest(aClient, aDfcQ, aChannel, aNewStyle, aMaxTransferSize);
|
sl@0
|
214 |
#else
|
sl@0
|
215 |
TEST_FAULT; // if a new style dma request was requested it should have been caught earlier
|
sl@0
|
216 |
#endif
|
sl@0
|
217 |
}
|
sl@0
|
218 |
else
|
sl@0
|
219 |
{
|
sl@0
|
220 |
dmaRequest = new DClientDmaRequest(aClient, aDfcQ, aChannel, aMaxTransferSize);
|
sl@0
|
221 |
}
|
sl@0
|
222 |
|
sl@0
|
223 |
if(dmaRequest == NULL)
|
sl@0
|
224 |
{
|
sl@0
|
225 |
return dmaRequest;
|
sl@0
|
226 |
}
|
sl@0
|
227 |
|
sl@0
|
228 |
const TInt r = dmaRequest->Create();
|
sl@0
|
229 |
if(r != KErrNone)
|
sl@0
|
230 |
{
|
sl@0
|
231 |
delete dmaRequest;
|
sl@0
|
232 |
dmaRequest = NULL;
|
sl@0
|
233 |
}
|
sl@0
|
234 |
return dmaRequest;
|
sl@0
|
235 |
}
|
sl@0
|
236 |
|
sl@0
|
237 |
DClientDmaRequest::DClientDmaRequest(DThread* aClient, TDfcQue* const aDfcQ, TDmaChannel& aChannel, TInt aMaxFragmentSize)
|
sl@0
|
238 |
:DDmaRequest(aChannel, &CallbackOldStyle, this, aMaxFragmentSize),
|
sl@0
|
239 |
iClientDataRequest(NULL),
|
sl@0
|
240 |
iClient(aClient),
|
sl@0
|
241 |
iDfcQ(aDfcQ),
|
sl@0
|
242 |
iDfc(CompleteCallback,NULL, iDfcQ, KMaxDfcPriority)
|
sl@0
|
243 |
{
|
sl@0
|
244 |
}
|
sl@0
|
245 |
#ifdef DMA_APIV2
|
sl@0
|
246 |
DClientDmaRequest::DClientDmaRequest(DThread* aClient, TDfcQue* const aDfcQ, TDmaChannel& aChannel, TBool /*aNewStyle*/, TInt aMaxFragmentSize)
|
sl@0
|
247 |
:DDmaRequest(aChannel, &Callback, this, aMaxFragmentSize),
|
sl@0
|
248 |
iClientDataRequest(NULL),
|
sl@0
|
249 |
iClient(aClient),
|
sl@0
|
250 |
iDfcQ(aDfcQ),
|
sl@0
|
251 |
iDfc(CompleteCallback,NULL, iDfcQ, KMaxDfcPriority)
|
sl@0
|
252 |
{
|
sl@0
|
253 |
}
|
sl@0
|
254 |
#endif
|
sl@0
|
255 |
|
sl@0
|
256 |
TInt DClientDmaRequest::Create()
|
sl@0
|
257 |
{
|
sl@0
|
258 |
return Kern::CreateClientDataRequest2(iClientDataRequest);
|
sl@0
|
259 |
}
|
sl@0
|
260 |
|
sl@0
|
261 |
DClientDmaRequest::~DClientDmaRequest()
|
sl@0
|
262 |
{
|
sl@0
|
263 |
__KTRACE_OPT(KDMA, Kern::Printf(">DClientDmaRequest::~DClientDmaRequest"));
|
sl@0
|
264 |
if(iClientDataRequest)
|
sl@0
|
265 |
{
|
sl@0
|
266 |
Kern::DestroyClientRequest(iClientDataRequest);
|
sl@0
|
267 |
}
|
sl@0
|
268 |
}
|
sl@0
|
269 |
|
sl@0
|
270 |
/**
|
sl@0
|
271 |
Queue the DClientDmaRequest.
|
sl@0
|
272 |
|
sl@0
|
273 |
@param aRequestStatus Pointer to the client's request status
|
sl@0
|
274 |
@param aRecord Pointer to the user's TCallbackRecord, may be null
|
sl@0
|
275 |
@return
|
sl@0
|
276 |
-KErrInUse The client request is in use
|
sl@0
|
277 |
-KErrNone success
|
sl@0
|
278 |
*/
|
sl@0
|
279 |
TInt DClientDmaRequest::Queue(TRequestStatus* aRequestStatus, TCallbackRecord* aRecord, TUint64* aDurationMicroSecs)
|
sl@0
|
280 |
{
|
sl@0
|
281 |
__NK_ASSERT_ALWAYS(aRecord);
|
sl@0
|
282 |
__NK_ASSERT_ALWAYS(aDurationMicroSecs);
|
sl@0
|
283 |
|
sl@0
|
284 |
//erase results from last transfer
|
sl@0
|
285 |
iClientDataRequest->Data1().Reset();
|
sl@0
|
286 |
iClientDataRequest->SetDestPtr1(aRecord);
|
sl@0
|
287 |
|
sl@0
|
288 |
iClientDataRequest->SetDestPtr2(aDurationMicroSecs);
|
sl@0
|
289 |
|
sl@0
|
290 |
|
sl@0
|
291 |
TInt r = iClientDataRequest->SetStatus(aRequestStatus);
|
sl@0
|
292 |
if(r != KErrNone)
|
sl@0
|
293 |
{
|
sl@0
|
294 |
return r;
|
sl@0
|
295 |
}
|
sl@0
|
296 |
|
sl@0
|
297 |
iStopwatch.Start();
|
sl@0
|
298 |
#ifdef DMA_APIV2
|
sl@0
|
299 |
r = DDmaRequest::Queue();
|
sl@0
|
300 |
#else
|
sl@0
|
301 |
// old version of queue did not return an error code
|
sl@0
|
302 |
DDmaRequest::Queue();
|
sl@0
|
303 |
r = KErrNone;
|
sl@0
|
304 |
#endif
|
sl@0
|
305 |
|
sl@0
|
306 |
return r;
|
sl@0
|
307 |
}
|
sl@0
|
308 |
|
sl@0
|
309 |
void DClientDmaRequest::AddRequeArgs(const TIsrRequeArgsSet& aRequeArgSet)
|
sl@0
|
310 |
{
|
sl@0
|
311 |
iIsrRequeArgSet = aRequeArgSet;
|
sl@0
|
312 |
}
|
sl@0
|
313 |
|
sl@0
|
314 |
/**
|
sl@0
|
315 |
If a transfer complete callback in ISR context s received this will be
|
sl@0
|
316 |
called to redo the request with the first entry in the array
|
sl@0
|
317 |
|
sl@0
|
318 |
@return ETrue If the redo was successful - indicates that another callback is comming
|
sl@0
|
319 |
*/
|
sl@0
|
320 |
TBool DClientDmaRequest::RedoRequest()
|
sl@0
|
321 |
{
|
sl@0
|
322 |
TIsrRequeArgs args = iIsrRequeArgSet.GetArgs();
|
sl@0
|
323 |
const TInt r = args.Call(iChannel);
|
sl@0
|
324 |
TCallbackRecord& record = iClientDataRequest->Data1();
|
sl@0
|
325 |
record.IsrRedoResult(r);
|
sl@0
|
326 |
return (r == KErrNone);
|
sl@0
|
327 |
}
|
sl@0
|
328 |
|
sl@0
|
329 |
|
sl@0
|
330 |
/**
|
sl@0
|
331 |
Calls TDmaChannel::IsrRedoRequest on aChannel
|
sl@0
|
332 |
with this object's parameters
|
sl@0
|
333 |
*/
|
sl@0
|
334 |
TInt TIsrRequeArgs::Call(TDmaChannel& aChannel)
|
sl@0
|
335 |
{
|
sl@0
|
336 |
#ifdef DMA_APIV2
|
sl@0
|
337 |
return aChannel.IsrRedoRequest(iSrcAddr, iDstAddr, iTransferCount, iPslRequestInfo, iIsrCb);
|
sl@0
|
338 |
#else
|
sl@0
|
339 |
TEST_FAULT;
|
sl@0
|
340 |
return KErrNotSupported;
|
sl@0
|
341 |
#endif
|
sl@0
|
342 |
}
|
sl@0
|
343 |
|
sl@0
|
344 |
/**
|
sl@0
|
345 |
Check that both source and destination of ISR reque args will
|
sl@0
|
346 |
lie within the range specified by aStart and aSize.
|
sl@0
|
347 |
|
sl@0
|
348 |
@param aStart The linear base address of the region
|
sl@0
|
349 |
@param aSize The size of the region
|
sl@0
|
350 |
*/
|
sl@0
|
351 |
TBool TIsrRequeArgs::CheckRange(TLinAddr aStart, TUint aSize) const
|
sl@0
|
352 |
{
|
sl@0
|
353 |
TUint physStart = Epoc::LinearToPhysical(aStart);
|
sl@0
|
354 |
TEST_ASSERT(physStart != KPhysAddrInvalid);
|
sl@0
|
355 |
|
sl@0
|
356 |
TAddrRange chunk(physStart, aSize);
|
sl@0
|
357 |
TBool sourceOk = (iSrcAddr == KPhysAddrInvalid) ? ETrue : chunk.Contains(SourceRange());
|
sl@0
|
358 |
|
sl@0
|
359 |
TBool destOk = (iDstAddr == KPhysAddrInvalid) ? ETrue : chunk.Contains(DestRange());
|
sl@0
|
360 |
|
sl@0
|
361 |
return sourceOk && destOk;
|
sl@0
|
362 |
}
|
sl@0
|
363 |
|
sl@0
|
364 |
TBool TIsrRequeArgsSet::CheckRange(TLinAddr aAddr, TUint aSize) const
|
sl@0
|
365 |
{
|
sl@0
|
366 |
for(TInt i=0; i<iCount; i++)
|
sl@0
|
367 |
{
|
sl@0
|
368 |
if(!iRequeArgs[i].CheckRange(aAddr, aSize))
|
sl@0
|
369 |
return EFalse;
|
sl@0
|
370 |
}
|
sl@0
|
371 |
return ETrue;
|
sl@0
|
372 |
}
|
sl@0
|
373 |
|
sl@0
|
374 |
/**
|
sl@0
|
375 |
Translate an old style dma callback to a new-style one
|
sl@0
|
376 |
*/
|
sl@0
|
377 |
void DClientDmaRequest::CallbackOldStyle(TResult aResult, TAny* aArg)
|
sl@0
|
378 |
{
|
sl@0
|
379 |
__KTRACE_OPT(KDMA, Kern::Printf(">DClientDmaRequest::CallBackOldStyle: TResult result=%d", aResult));
|
sl@0
|
380 |
TEST_ASSERT(aResult != EBadResult);
|
sl@0
|
381 |
//translate result code
|
sl@0
|
382 |
const TDmaResult result = (aResult == EOk) ? EDmaResultOK : EDmaResultError;
|
sl@0
|
383 |
|
sl@0
|
384 |
//call the new-style callback
|
sl@0
|
385 |
Callback(EDmaCallbackRequestCompletion, result, aArg, NULL);
|
sl@0
|
386 |
}
|
sl@0
|
387 |
|
sl@0
|
388 |
|
sl@0
|
389 |
/**
|
sl@0
|
390 |
The new style callback called by the DMA framework
|
sl@0
|
391 |
may be called in either thread or ISR context
|
sl@0
|
392 |
*/
|
sl@0
|
393 |
void DClientDmaRequest::Callback(TUint aCallbackType, TDmaResult aResult, TAny* aArg, SDmaDesHdr* aHdr)
|
sl@0
|
394 |
{
|
sl@0
|
395 |
const TInt context = NKern::CurrentContext();
|
sl@0
|
396 |
__KTRACE_OPT(KDMA, Kern::Printf(">DClientDmaRequest::CallBack: TDmaResult result = %d, NKern::TContext context = %d", aResult, context));
|
sl@0
|
397 |
|
sl@0
|
398 |
DClientDmaRequest& self = *reinterpret_cast<DClientDmaRequest*>(aArg);
|
sl@0
|
399 |
self.DoCallback(aCallbackType, aResult);
|
sl@0
|
400 |
|
sl@0
|
401 |
// decide if callback is complete
|
sl@0
|
402 |
const TBool transferComplete = aCallbackType & EDmaCallbackRequestCompletion;
|
sl@0
|
403 |
if(!transferComplete)
|
sl@0
|
404 |
{
|
sl@0
|
405 |
return;
|
sl@0
|
406 |
}
|
sl@0
|
407 |
|
sl@0
|
408 |
// If there are reque args then redo this request
|
sl@0
|
409 |
// another callback would then be expected.
|
sl@0
|
410 |
// Requests can only be re-queued in ISR context, but we
|
sl@0
|
411 |
// do not check that here as it is up to the client to get
|
sl@0
|
412 |
// it right - also, we want to test that the PIL catches this
|
sl@0
|
413 |
// error
|
sl@0
|
414 |
if(!self.iIsrRequeArgSet.IsEmpty())
|
sl@0
|
415 |
{
|
sl@0
|
416 |
// If redo call was succesful, return and wait for next call back
|
sl@0
|
417 |
if(self.RedoRequest())
|
sl@0
|
418 |
return;
|
sl@0
|
419 |
}
|
sl@0
|
420 |
|
sl@0
|
421 |
switch(context)
|
sl@0
|
422 |
{
|
sl@0
|
423 |
case NKern::EThread:
|
sl@0
|
424 |
{
|
sl@0
|
425 |
CompleteCallback(aArg);
|
sl@0
|
426 |
break;
|
sl@0
|
427 |
}
|
sl@0
|
428 |
case NKern::EInterrupt:
|
sl@0
|
429 |
{
|
sl@0
|
430 |
self.iDfc.iPtr = aArg;
|
sl@0
|
431 |
self.iDfc.Add();
|
sl@0
|
432 |
break;
|
sl@0
|
433 |
}
|
sl@0
|
434 |
case NKern::EIDFC: //fall-through
|
sl@0
|
435 |
case NKern::EEscaped:
|
sl@0
|
436 |
default:
|
sl@0
|
437 |
TEST_FAULT;
|
sl@0
|
438 |
}
|
sl@0
|
439 |
}
|
sl@0
|
440 |
|
sl@0
|
441 |
/**
|
sl@0
|
442 |
Log results of callback. May be called in either thread or ISR context
|
sl@0
|
443 |
*/
|
sl@0
|
444 |
void DClientDmaRequest::DoCallback(TUint aCallbackType, TDmaResult aResult)
|
sl@0
|
445 |
{
|
sl@0
|
446 |
iStopwatch.Stop(); //sucessive calls will simply over write the stop time
|
sl@0
|
447 |
|
sl@0
|
448 |
// This will always be done whether the client requested a
|
sl@0
|
449 |
// callback record or not
|
sl@0
|
450 |
TCallbackRecord& record = iClientDataRequest->Data1();
|
sl@0
|
451 |
record.ProcessCallback(aCallbackType, aResult);
|
sl@0
|
452 |
}
|
sl@0
|
453 |
|
sl@0
|
454 |
/**
|
sl@0
|
455 |
This function may either be called directly or queued as a DFC
|
sl@0
|
456 |
*/
|
sl@0
|
457 |
void DClientDmaRequest::CompleteCallback(TAny* aArg)
|
sl@0
|
458 |
{
|
sl@0
|
459 |
__KTRACE_OPT(KDMA, Kern::Printf(">DClientDmaRequest::CompleteCallBack thread %O", &Kern::CurrentThread()));
|
sl@0
|
460 |
__ASSERT_NOT_ISR;
|
sl@0
|
461 |
|
sl@0
|
462 |
DClientDmaRequest& self = *reinterpret_cast<DClientDmaRequest*>(aArg);
|
sl@0
|
463 |
|
sl@0
|
464 |
self.iClientDataRequest->Data2() = self.iStopwatch.ReadMicroSecs();
|
sl@0
|
465 |
|
sl@0
|
466 |
//Assert that we called SetRequestStatus on this object before
|
sl@0
|
467 |
//queueing
|
sl@0
|
468 |
__NK_ASSERT_DEBUG(self.iClientDataRequest->IsReady());
|
sl@0
|
469 |
|
sl@0
|
470 |
// This is an inelegant, temporary, solution to the following problem:
|
sl@0
|
471 |
//
|
sl@0
|
472 |
// If a dma request completes with an ISR callback the test
|
sl@0
|
473 |
// framework will queue this function as a DFC which
|
sl@0
|
474 |
// will then signal the user-side client. As a consequence of
|
sl@0
|
475 |
// this the user side client may then decide to destroy this
|
sl@0
|
476 |
// request. However, untill the DMA framework's DFC has run
|
sl@0
|
477 |
// and called OnDeque() on this request, it is still considered as
|
sl@0
|
478 |
// queued. Since it is possible that this DFC could run
|
sl@0
|
479 |
// before the DMA fw's DFC, this request could get destroyed while
|
sl@0
|
480 |
// it is stil queued, triggering a PIL assertion.
|
sl@0
|
481 |
//
|
sl@0
|
482 |
// The real fix is likely be for the PIL to call the callback
|
sl@0
|
483 |
// twice, but with different arguments, once to annonunce the
|
sl@0
|
484 |
// ISR and again to announce the dequeue.
|
sl@0
|
485 |
//
|
sl@0
|
486 |
// Here we poll and wait for this request to be dequeued. Note,
|
sl@0
|
487 |
// this DFC is currently run on a separate DFC queue, otherwise
|
sl@0
|
488 |
// it could get deadlocked. An alternative to polling would be
|
sl@0
|
489 |
// to use DCondVar, but that would require PIL modification
|
sl@0
|
490 |
|
sl@0
|
491 |
if(NKern::CurrentThread() == self.iDfcQ->iThread)
|
sl@0
|
492 |
{
|
sl@0
|
493 |
// Only need to poll if we aren't on the channel's DFC queue
|
sl@0
|
494 |
for(;;)
|
sl@0
|
495 |
{
|
sl@0
|
496 |
// once the request has been unqueued it
|
sl@0
|
497 |
// can only be queued again by the client
|
sl@0
|
498 |
const TBool queued = __e32_atomic_load_acq32(&self.iQueued);
|
sl@0
|
499 |
if(!queued)
|
sl@0
|
500 |
break;
|
sl@0
|
501 |
__KTRACE_OPT(KDMA, Kern::Printf("Waiting for requeuest to be dequeued"));
|
sl@0
|
502 |
NKern::Sleep(10);
|
sl@0
|
503 |
}
|
sl@0
|
504 |
}
|
sl@0
|
505 |
else
|
sl@0
|
506 |
{
|
sl@0
|
507 |
// If we are on the channel's DFCQ we should be dequeued
|
sl@0
|
508 |
// already
|
sl@0
|
509 |
__NK_ASSERT_DEBUG(!__e32_atomic_load_acq32(&self.iQueued));
|
sl@0
|
510 |
}
|
sl@0
|
511 |
|
sl@0
|
512 |
// We can always complete with KErrNone, the actual DMA result is
|
sl@0
|
513 |
// logged in the TCallbackRecord
|
sl@0
|
514 |
Kern::QueueRequestComplete(self.iClient, self.iClientDataRequest, KErrNone);
|
sl@0
|
515 |
}
|
sl@0
|
516 |
|
sl@0
|
517 |
const TInt DDmaTestSession::KMaxChunkSize;
|
sl@0
|
518 |
|
sl@0
|
519 |
TInt DDmaTestSession::RequestUserHandle(DThread* aThread, TOwnerType aType)
|
sl@0
|
520 |
{
|
sl@0
|
521 |
if (aType!=EOwnerThread || aThread!=iClient)
|
sl@0
|
522 |
return KErrAccessDenied;
|
sl@0
|
523 |
return KErrNone;
|
sl@0
|
524 |
}
|
sl@0
|
525 |
|
sl@0
|
526 |
DDmaTestSession::DDmaTestSession()
|
sl@0
|
527 |
: iClient(NULL), iDfcQ(NULL), iIsrCallbackDfcQ(NULL), iChunkBase(0), iChunk(NULL)
|
sl@0
|
528 |
{}
|
sl@0
|
529 |
|
sl@0
|
530 |
// called in thread critical section
|
sl@0
|
531 |
TInt DDmaTestSession::DoCreate(TInt /*aUnit*/, const TDesC8* aInfo, const TVersion& /*aVer*/)
|
sl@0
|
532 |
{
|
sl@0
|
533 |
__NK_ASSERT_ALWAYS(iDfcQ == NULL);
|
sl@0
|
534 |
__NK_ASSERT_ALWAYS(iIsrCallbackDfcQ == NULL);
|
sl@0
|
535 |
|
sl@0
|
536 |
TInt r = Kern::DynamicDfcQCreate(iDfcQ, KDFCThreadPriority, KDFCThreadName);
|
sl@0
|
537 |
if (r != KErrNone)
|
sl@0
|
538 |
return r;
|
sl@0
|
539 |
NKern::ThreadSetCpuAffinity((NThread*)(iDfcQ->iThread), KCpuAffinityAny);
|
sl@0
|
540 |
|
sl@0
|
541 |
r = Kern::DynamicDfcQCreate(iIsrCallbackDfcQ, KDFCThreadPriority, KIsrCbDfcThreadName);
|
sl@0
|
542 |
if (r != KErrNone)
|
sl@0
|
543 |
return r;
|
sl@0
|
544 |
NKern::ThreadSetCpuAffinity((NThread*)(iIsrCallbackDfcQ->iThread), KCpuAffinityAny);
|
sl@0
|
545 |
|
sl@0
|
546 |
iClient = &Kern::CurrentThread();
|
sl@0
|
547 |
|
sl@0
|
548 |
r = CreateSharedChunk();
|
sl@0
|
549 |
return r;
|
sl@0
|
550 |
}
|
sl@0
|
551 |
|
sl@0
|
552 |
DDmaTestSession::~DDmaTestSession()
|
sl@0
|
553 |
{
|
sl@0
|
554 |
//Destroy requests before channels
|
sl@0
|
555 |
//or we will trigger an assertion
|
sl@0
|
556 |
while(iClientDmaReqs.Count())
|
sl@0
|
557 |
{
|
sl@0
|
558 |
DestroyDmaRequestByIndex(0);
|
sl@0
|
559 |
}
|
sl@0
|
560 |
iClientDmaReqs.Close();
|
sl@0
|
561 |
|
sl@0
|
562 |
while(iChannels.Count())
|
sl@0
|
563 |
{
|
sl@0
|
564 |
CloseDmaChannelByIndex(0);
|
sl@0
|
565 |
}
|
sl@0
|
566 |
iChannels.Close();
|
sl@0
|
567 |
|
sl@0
|
568 |
|
sl@0
|
569 |
if (iDfcQ)
|
sl@0
|
570 |
{
|
sl@0
|
571 |
iDfcQ->Destroy();
|
sl@0
|
572 |
}
|
sl@0
|
573 |
|
sl@0
|
574 |
if (iIsrCallbackDfcQ)
|
sl@0
|
575 |
{
|
sl@0
|
576 |
iIsrCallbackDfcQ->Destroy();
|
sl@0
|
577 |
}
|
sl@0
|
578 |
|
sl@0
|
579 |
if(iChunk)
|
sl@0
|
580 |
{
|
sl@0
|
581 |
Kern::ChunkClose(iChunk);
|
sl@0
|
582 |
iChunk = NULL;
|
sl@0
|
583 |
}
|
sl@0
|
584 |
}
|
sl@0
|
585 |
|
sl@0
|
586 |
TInt DDmaTestSession::Request(TInt aFunction, TAny* a1, TAny* a2)
|
sl@0
|
587 |
{
|
sl@0
|
588 |
__NK_ASSERT_DEBUG(&Kern::CurrentThread() == iClient);
|
sl@0
|
589 |
|
sl@0
|
590 |
switch (aFunction)
|
sl@0
|
591 |
{
|
sl@0
|
592 |
case RDmaSession::EOpenChannel:
|
sl@0
|
593 |
{
|
sl@0
|
594 |
TUint pslCookie = (TUint)a1;
|
sl@0
|
595 |
TUint driverCookie = 0;
|
sl@0
|
596 |
TInt r = OpenDmaChannel(pslCookie, driverCookie);
|
sl@0
|
597 |
umemput32(a2, &driverCookie, sizeof(TAny*));
|
sl@0
|
598 |
return r;
|
sl@0
|
599 |
}
|
sl@0
|
600 |
case RDmaSession::ECloseChannel:
|
sl@0
|
601 |
{
|
sl@0
|
602 |
TUint driverCookie = reinterpret_cast<TUint>(a1);
|
sl@0
|
603 |
TInt r = CloseDmaChannelByCookie(driverCookie);
|
sl@0
|
604 |
return r;
|
sl@0
|
605 |
}
|
sl@0
|
606 |
case RDmaSession::EChannelCaps:
|
sl@0
|
607 |
{
|
sl@0
|
608 |
TUint driverCookie = reinterpret_cast<TUint>(a1);
|
sl@0
|
609 |
TPckgBuf<TDmacTestCaps> capsBuf;
|
sl@0
|
610 |
TInt r = GetChannelCapsByCookie(driverCookie, capsBuf());
|
sl@0
|
611 |
Kern::KUDesPut(*reinterpret_cast<TDes8*>(a2), capsBuf);
|
sl@0
|
612 |
return r;
|
sl@0
|
613 |
}
|
sl@0
|
614 |
case RDmaSession::EPauseChannel:
|
sl@0
|
615 |
{
|
sl@0
|
616 |
TUint driverCookie = reinterpret_cast<TUint>(a1);
|
sl@0
|
617 |
TInt r = PauseDmaChannelByCookie(driverCookie);
|
sl@0
|
618 |
return r;
|
sl@0
|
619 |
}
|
sl@0
|
620 |
case RDmaSession::EResumeChannel:
|
sl@0
|
621 |
{
|
sl@0
|
622 |
TUint driverCookie = reinterpret_cast<TUint>(a1);
|
sl@0
|
623 |
TInt r = ResumeDmaChannelByCookie(driverCookie);
|
sl@0
|
624 |
return r;
|
sl@0
|
625 |
}
|
sl@0
|
626 |
case RDmaSession::EFragmentCount:
|
sl@0
|
627 |
{
|
sl@0
|
628 |
TUint requestCookie = reinterpret_cast<TUint>(a1);
|
sl@0
|
629 |
TInt r = RequestFragmentCount(requestCookie);
|
sl@0
|
630 |
return r;
|
sl@0
|
631 |
}
|
sl@0
|
632 |
case RDmaSession::ERequestOpen:
|
sl@0
|
633 |
{
|
sl@0
|
634 |
RDmaSession::TRequestCreateArgs createArgs(0, EFalse, 0);
|
sl@0
|
635 |
TPckg<RDmaSession::TRequestCreateArgs> package(createArgs);
|
sl@0
|
636 |
Kern::KUDesGet(package, *reinterpret_cast<TDes8*>(a1));
|
sl@0
|
637 |
|
sl@0
|
638 |
const TUint channelCookie = createArgs.iChannelCookie;
|
sl@0
|
639 |
TUint requestCookie = 0;
|
sl@0
|
640 |
|
sl@0
|
641 |
TInt r = CreateDmaRequest(channelCookie, requestCookie, createArgs.iNewStyle, createArgs.iMaxFragmentSize);
|
sl@0
|
642 |
|
sl@0
|
643 |
umemput32(a2, &requestCookie, sizeof(TAny*));
|
sl@0
|
644 |
return r;
|
sl@0
|
645 |
}
|
sl@0
|
646 |
case RDmaSession::ERequestClose:
|
sl@0
|
647 |
{
|
sl@0
|
648 |
const TUint requestCookie = reinterpret_cast<TUint>(a1);
|
sl@0
|
649 |
return DestroyDmaRequestByCookie(requestCookie);
|
sl@0
|
650 |
}
|
sl@0
|
651 |
case RDmaSession::EFragmentLegacy:
|
sl@0
|
652 |
case RDmaSession::EFragment:
|
sl@0
|
653 |
{
|
sl@0
|
654 |
TPckgBuf<RDmaSession::TFragmentArgs> argsBuff;
|
sl@0
|
655 |
Kern::KUDesGet(argsBuff, *reinterpret_cast<TDes8*>(a1));
|
sl@0
|
656 |
const TUint requestCookie = argsBuff().iRequestCookie;
|
sl@0
|
657 |
|
sl@0
|
658 |
//must remove constness as we actually need to
|
sl@0
|
659 |
//convert the src and dst offsets to addresses
|
sl@0
|
660 |
TDmaTransferArgs& transferArgs = const_cast<TDmaTransferArgs&>(argsBuff().iTransferArgs);
|
sl@0
|
661 |
|
sl@0
|
662 |
//convert address offsets in to kernel virtual addresses
|
sl@0
|
663 |
FixupTransferArgs(transferArgs);
|
sl@0
|
664 |
|
sl@0
|
665 |
TEST_ASSERT((TAddressParms(transferArgs).CheckRange(iChunkBase, iChunk->Size())));
|
sl@0
|
666 |
|
sl@0
|
667 |
TInt r = KErrGeneral;
|
sl@0
|
668 |
|
sl@0
|
669 |
TStopwatch clock;
|
sl@0
|
670 |
clock.Start();
|
sl@0
|
671 |
switch (aFunction)
|
sl@0
|
672 |
{
|
sl@0
|
673 |
case RDmaSession::EFragmentLegacy:
|
sl@0
|
674 |
r = FragmentRequest(requestCookie, transferArgs, ETrue); break;
|
sl@0
|
675 |
case RDmaSession::EFragment:
|
sl@0
|
676 |
r = FragmentRequest(requestCookie, transferArgs, EFalse); break;
|
sl@0
|
677 |
default:
|
sl@0
|
678 |
TEST_FAULT;
|
sl@0
|
679 |
}
|
sl@0
|
680 |
clock.Stop();
|
sl@0
|
681 |
|
sl@0
|
682 |
const TUint64 time = clock.ReadMicroSecs();
|
sl@0
|
683 |
|
sl@0
|
684 |
TUint64* const timePtr = argsBuff().iDurationMicroSecs;
|
sl@0
|
685 |
if(timePtr)
|
sl@0
|
686 |
{
|
sl@0
|
687 |
umemput(timePtr, &time, sizeof(time));
|
sl@0
|
688 |
}
|
sl@0
|
689 |
return r;
|
sl@0
|
690 |
}
|
sl@0
|
691 |
case RDmaSession::EQueueRequest:
|
sl@0
|
692 |
{
|
sl@0
|
693 |
TPckgBuf<RDmaSession::TQueueArgs> argsBuff;
|
sl@0
|
694 |
Kern::KUDesGet(argsBuff, *reinterpret_cast<TDes8*>(a1));
|
sl@0
|
695 |
|
sl@0
|
696 |
//this is an Asynchronous request
|
sl@0
|
697 |
const TUint requestCookie = argsBuff().iRequestCookie;
|
sl@0
|
698 |
TRequestStatus* requestStatus = argsBuff().iStatus;
|
sl@0
|
699 |
TCallbackRecord* callbackRec = argsBuff().iCallbackRecord;
|
sl@0
|
700 |
TUint64* duration = argsBuff().iDurationMicroSecs;
|
sl@0
|
701 |
|
sl@0
|
702 |
TInt r = QueueRequest(requestCookie, requestStatus, callbackRec, duration);
|
sl@0
|
703 |
if(r != KErrNone)
|
sl@0
|
704 |
{
|
sl@0
|
705 |
Kern::RequestComplete(requestStatus, r);
|
sl@0
|
706 |
}
|
sl@0
|
707 |
return r;
|
sl@0
|
708 |
}
|
sl@0
|
709 |
case RDmaSession::EQueueRequestWithReque:
|
sl@0
|
710 |
{
|
sl@0
|
711 |
//TODO can common code with EQueueRequest be extracted?
|
sl@0
|
712 |
TPckgBuf<RDmaSession::TQueueArgsWithReque> argsBuff;
|
sl@0
|
713 |
Kern::KUDesGet(argsBuff, *reinterpret_cast<TDes8*>(a1));
|
sl@0
|
714 |
|
sl@0
|
715 |
//this is an Asynchronous request
|
sl@0
|
716 |
const TUint requestCookie = argsBuff().iRequestCookie;
|
sl@0
|
717 |
TRequestStatus* requestStatus = argsBuff().iStatus;
|
sl@0
|
718 |
TCallbackRecord* callbackRec = argsBuff().iCallbackRecord;
|
sl@0
|
719 |
TUint64* duration = argsBuff().iDurationMicroSecs;
|
sl@0
|
720 |
|
sl@0
|
721 |
TInt r = KErrNotFound;
|
sl@0
|
722 |
|
sl@0
|
723 |
DClientDmaRequest* const request = RequestFromCookie(requestCookie);
|
sl@0
|
724 |
if(request != NULL)
|
sl@0
|
725 |
{
|
sl@0
|
726 |
argsBuff().iRequeSet.Fixup(iChunkBase);
|
sl@0
|
727 |
//TODO reque args must be substituted in order to
|
sl@0
|
728 |
//check the range. The original transfer args are not
|
sl@0
|
729 |
//available when queue is called, they could
|
sl@0
|
730 |
//however be stored within DClientDmaRequest
|
sl@0
|
731 |
//TEST_ASSERT((argsBuff().iRequeSet.CheckRange(iChunkBase, iChunk->Size())));
|
sl@0
|
732 |
request->AddRequeArgs(argsBuff().iRequeSet);
|
sl@0
|
733 |
|
sl@0
|
734 |
r = QueueRequest(requestCookie, requestStatus, callbackRec, duration);
|
sl@0
|
735 |
}
|
sl@0
|
736 |
|
sl@0
|
737 |
if(r != KErrNone)
|
sl@0
|
738 |
{
|
sl@0
|
739 |
Kern::RequestComplete(requestStatus, r);
|
sl@0
|
740 |
}
|
sl@0
|
741 |
return r;
|
sl@0
|
742 |
}
|
sl@0
|
743 |
case RDmaSession::EIsrRedoRequest:
|
sl@0
|
744 |
{
|
sl@0
|
745 |
TPckgBuf<RDmaSession::TIsrRedoReqArgs> argsBuff;
|
sl@0
|
746 |
Kern::KUDesGet(argsBuff, *reinterpret_cast<TDes8*>(a1));
|
sl@0
|
747 |
|
sl@0
|
748 |
const TUint driverCookie = argsBuff().iDriverCookie;
|
sl@0
|
749 |
const TUint32 srcAddr = argsBuff().iSrcAddr;
|
sl@0
|
750 |
const TUint32 dstAddr = argsBuff().iDstAddr;
|
sl@0
|
751 |
const TInt transferCount = argsBuff().iTransferCount;
|
sl@0
|
752 |
const TUint32 pslRequestInfo = argsBuff().iPslRequestInfo;
|
sl@0
|
753 |
const TBool isrCb = argsBuff().iIsrCb;
|
sl@0
|
754 |
|
sl@0
|
755 |
TInt r = IsrRedoRequestByCookie(driverCookie,srcAddr,dstAddr,transferCount,pslRequestInfo,isrCb);
|
sl@0
|
756 |
return r;
|
sl@0
|
757 |
}
|
sl@0
|
758 |
case RDmaSession::EIsOpened:
|
sl@0
|
759 |
{
|
sl@0
|
760 |
TUint driverCookie = (TUint)a1;
|
sl@0
|
761 |
TBool channelOpen = EFalse;;
|
sl@0
|
762 |
TInt r = ChannelIsOpenedByCookie(driverCookie,channelOpen);
|
sl@0
|
763 |
umemput32(a2, &channelOpen, sizeof(TAny*));
|
sl@0
|
764 |
return r;
|
sl@0
|
765 |
}
|
sl@0
|
766 |
case RDmaSession::EIsQueueEmpty:
|
sl@0
|
767 |
{
|
sl@0
|
768 |
TUint driverCookie = (TUint)a1;
|
sl@0
|
769 |
TBool queueEmpty = EFalse;;
|
sl@0
|
770 |
TInt r = IsQueueEmptyByCookie(driverCookie,queueEmpty);
|
sl@0
|
771 |
umemput32(a2, &queueEmpty, sizeof(TAny*));
|
sl@0
|
772 |
return r;
|
sl@0
|
773 |
}
|
sl@0
|
774 |
case RDmaSession::ECancelAllChannel:
|
sl@0
|
775 |
{
|
sl@0
|
776 |
TUint driverCookie = reinterpret_cast<TUint>(a1);
|
sl@0
|
777 |
TInt r = CancelAllByCookie(driverCookie);
|
sl@0
|
778 |
return r;
|
sl@0
|
779 |
}
|
sl@0
|
780 |
case RDmaSession::EOpenSharedChunk:
|
sl@0
|
781 |
{
|
sl@0
|
782 |
return OpenSharedChunkHandle();
|
sl@0
|
783 |
}
|
sl@0
|
784 |
case RDmaSession::EGetTestInfo:
|
sl@0
|
785 |
{
|
sl@0
|
786 |
#ifdef DMA_APIV2
|
sl@0
|
787 |
TPckgC<TDmaV2TestInfo> package(DmaTestInfoV2());
|
sl@0
|
788 |
#else
|
sl@0
|
789 |
TPckgC<TDmaV2TestInfo> package(ConvertTestInfo(DmaTestInfo()));
|
sl@0
|
790 |
#endif
|
sl@0
|
791 |
Kern::KUDesPut(*reinterpret_cast<TDes8*>(a1), package);
|
sl@0
|
792 |
return KErrNone;
|
sl@0
|
793 |
}
|
sl@0
|
794 |
default:
|
sl@0
|
795 |
Kern::PanicCurrentThread(KClientPanicCat, __LINE__);
|
sl@0
|
796 |
return KErrGeneral;
|
sl@0
|
797 |
}
|
sl@0
|
798 |
}
|
sl@0
|
799 |
|
sl@0
|
800 |
TInt DDmaTestSession::OpenDmaChannel(TUint aPslCookie, TUint& aDriverCookie )
|
sl@0
|
801 |
{
|
sl@0
|
802 |
TDmaChannel::SCreateInfo info;
|
sl@0
|
803 |
info.iCookie = aPslCookie;
|
sl@0
|
804 |
info.iDfcQ = iDfcQ;
|
sl@0
|
805 |
info.iDfcPriority = 3;
|
sl@0
|
806 |
info.iDesCount = 128;
|
sl@0
|
807 |
|
sl@0
|
808 |
TDmaChannel* channel = NULL;
|
sl@0
|
809 |
|
sl@0
|
810 |
//cs so thread can't be killed between
|
sl@0
|
811 |
//opening channel and adding to array
|
sl@0
|
812 |
NKern::ThreadEnterCS();
|
sl@0
|
813 |
TInt r = TDmaChannel::Open(info, channel);
|
sl@0
|
814 |
if(KErrNone == r)
|
sl@0
|
815 |
{
|
sl@0
|
816 |
__NK_ASSERT_ALWAYS(channel);
|
sl@0
|
817 |
|
sl@0
|
818 |
__KTRACE_OPT(KDMA, Kern::Printf("OpenDmaChannel: channel@ 0x%08x", channel));
|
sl@0
|
819 |
|
sl@0
|
820 |
|
sl@0
|
821 |
TInt err = iChannels.Append(channel);
|
sl@0
|
822 |
if(KErrNone == err)
|
sl@0
|
823 |
{
|
sl@0
|
824 |
aDriverCookie = reinterpret_cast<TUint>(channel);
|
sl@0
|
825 |
}
|
sl@0
|
826 |
else
|
sl@0
|
827 |
{
|
sl@0
|
828 |
channel->Close();
|
sl@0
|
829 |
r = KErrNoMemory;
|
sl@0
|
830 |
}
|
sl@0
|
831 |
}
|
sl@0
|
832 |
NKern::ThreadLeaveCS();
|
sl@0
|
833 |
|
sl@0
|
834 |
return r;
|
sl@0
|
835 |
}
|
sl@0
|
836 |
|
sl@0
|
837 |
TInt DDmaTestSession::CookieToChannelIndex(TUint aDriverCookie) const
|
sl@0
|
838 |
{
|
sl@0
|
839 |
const TInt r = iChannels.Find(reinterpret_cast<TDmaChannel*>(aDriverCookie));
|
sl@0
|
840 |
|
sl@0
|
841 |
if(r < 0)
|
sl@0
|
842 |
{
|
sl@0
|
843 |
__KTRACE_OPT(KDMA, Kern::Printf("CookieToChannelIndex: cookie 0x%08x not found!", aDriverCookie));
|
sl@0
|
844 |
}
|
sl@0
|
845 |
return r;
|
sl@0
|
846 |
}
|
sl@0
|
847 |
|
sl@0
|
848 |
TInt DDmaTestSession::CookieToRequestIndex(TUint aRequestCookie) const
|
sl@0
|
849 |
{
|
sl@0
|
850 |
const TInt r = iClientDmaReqs.Find(reinterpret_cast<DClientDmaRequest*>(aRequestCookie));
|
sl@0
|
851 |
|
sl@0
|
852 |
if(r < 0)
|
sl@0
|
853 |
{
|
sl@0
|
854 |
__KTRACE_OPT(KDMA, Kern::Printf("CookieToRequestIndex: cookie 0x%08x not found!", aRequestCookie));
|
sl@0
|
855 |
}
|
sl@0
|
856 |
return r;
|
sl@0
|
857 |
}
|
sl@0
|
858 |
|
sl@0
|
859 |
void DDmaTestSession::CloseDmaChannelByIndex(TInt aIndex)
|
sl@0
|
860 |
{
|
sl@0
|
861 |
__KTRACE_OPT(KDMA, Kern::Printf("CloseDmaChannelByIndex: %d", aIndex));
|
sl@0
|
862 |
__NK_ASSERT_DEBUG(aIndex < iChannels.Count());
|
sl@0
|
863 |
// cs so client thread can't be killed between removing channel from
|
sl@0
|
864 |
// array and closing it.
|
sl@0
|
865 |
NKern::ThreadEnterCS();
|
sl@0
|
866 |
TDmaChannel* channel = iChannels[aIndex];
|
sl@0
|
867 |
iChannels.Remove(aIndex);
|
sl@0
|
868 |
channel->Close();
|
sl@0
|
869 |
NKern::ThreadLeaveCS();
|
sl@0
|
870 |
}
|
sl@0
|
871 |
|
sl@0
|
872 |
TInt DDmaTestSession::CloseDmaChannelByCookie(TUint aDriverCookie)
|
sl@0
|
873 |
{
|
sl@0
|
874 |
__KTRACE_OPT(KDMA, Kern::Printf("CloseDmaChannelByCookie: 0x%08x", aDriverCookie));
|
sl@0
|
875 |
const TInt index = CookieToChannelIndex(aDriverCookie);
|
sl@0
|
876 |
|
sl@0
|
877 |
if(index >= 0)
|
sl@0
|
878 |
{
|
sl@0
|
879 |
CloseDmaChannelByIndex(index);
|
sl@0
|
880 |
return KErrNone;
|
sl@0
|
881 |
}
|
sl@0
|
882 |
else
|
sl@0
|
883 |
{
|
sl@0
|
884 |
return KErrNotFound;
|
sl@0
|
885 |
}
|
sl@0
|
886 |
}
|
sl@0
|
887 |
|
sl@0
|
888 |
TInt DDmaTestSession::CancelAllByCookie(TUint aDriverCookie)
|
sl@0
|
889 |
{
|
sl@0
|
890 |
__KTRACE_OPT(KDMA, Kern::Printf("CancelAllByCookie: 0x%08x", aDriverCookie));
|
sl@0
|
891 |
const TInt index = CookieToChannelIndex(aDriverCookie);
|
sl@0
|
892 |
|
sl@0
|
893 |
if(index >= 0)
|
sl@0
|
894 |
{
|
sl@0
|
895 |
CancelAllByIndex(index);
|
sl@0
|
896 |
return KErrNone;
|
sl@0
|
897 |
}
|
sl@0
|
898 |
else
|
sl@0
|
899 |
{
|
sl@0
|
900 |
return KErrNotFound;
|
sl@0
|
901 |
}
|
sl@0
|
902 |
}
|
sl@0
|
903 |
|
sl@0
|
904 |
void DDmaTestSession::CancelAllByIndex(TInt aIndex)
|
sl@0
|
905 |
{
|
sl@0
|
906 |
__KTRACE_OPT(KDMA, Kern::Printf("CancelAllByIndex: %d", aIndex));
|
sl@0
|
907 |
__NK_ASSERT_DEBUG(aIndex < iChannels.Count());
|
sl@0
|
908 |
|
sl@0
|
909 |
TDmaChannel* channel = iChannels[aIndex];
|
sl@0
|
910 |
iChannels.Remove(aIndex);
|
sl@0
|
911 |
channel->CancelAll();
|
sl@0
|
912 |
}
|
sl@0
|
913 |
|
sl@0
|
914 |
TInt DDmaTestSession::PauseDmaChannelByIndex(TInt aIndex)
|
sl@0
|
915 |
{
|
sl@0
|
916 |
__KTRACE_OPT(KDMA, Kern::Printf("PauseDmaChannelByIndex: %d", aIndex));
|
sl@0
|
917 |
__NK_ASSERT_DEBUG(aIndex < iChannels.Count());
|
sl@0
|
918 |
|
sl@0
|
919 |
#ifdef DMA_APIV2
|
sl@0
|
920 |
TDmaChannel* channel = iChannels[aIndex];
|
sl@0
|
921 |
return channel->Pause();
|
sl@0
|
922 |
#else
|
sl@0
|
923 |
return KErrNotSupported;
|
sl@0
|
924 |
#endif
|
sl@0
|
925 |
}
|
sl@0
|
926 |
|
sl@0
|
927 |
TInt DDmaTestSession::PauseDmaChannelByCookie(TUint aDriverCookie)
|
sl@0
|
928 |
{
|
sl@0
|
929 |
__KTRACE_OPT(KDMA, Kern::Printf("PauseDmaChannelByCookie: 0x%08x", aDriverCookie));
|
sl@0
|
930 |
const TInt index = CookieToChannelIndex(aDriverCookie);
|
sl@0
|
931 |
|
sl@0
|
932 |
if(index >= 0)
|
sl@0
|
933 |
{
|
sl@0
|
934 |
TInt r = PauseDmaChannelByIndex(index);
|
sl@0
|
935 |
return r;
|
sl@0
|
936 |
}
|
sl@0
|
937 |
else
|
sl@0
|
938 |
{
|
sl@0
|
939 |
return KErrNotFound;
|
sl@0
|
940 |
}
|
sl@0
|
941 |
}
|
sl@0
|
942 |
|
sl@0
|
943 |
TInt DDmaTestSession::ResumeDmaChannelByIndex(TInt aIndex)
|
sl@0
|
944 |
{
|
sl@0
|
945 |
__KTRACE_OPT(KDMA, Kern::Printf("ResumeDmaChannelByIndex: %d", aIndex));
|
sl@0
|
946 |
__NK_ASSERT_DEBUG(aIndex < iChannels.Count());
|
sl@0
|
947 |
|
sl@0
|
948 |
#ifdef DMA_APIV2
|
sl@0
|
949 |
TDmaChannel* channel = iChannels[aIndex];
|
sl@0
|
950 |
return channel->Resume();
|
sl@0
|
951 |
#else
|
sl@0
|
952 |
return KErrNotSupported;
|
sl@0
|
953 |
#endif
|
sl@0
|
954 |
}
|
sl@0
|
955 |
|
sl@0
|
956 |
TInt DDmaTestSession::ResumeDmaChannelByCookie(TUint aDriverCookie)
|
sl@0
|
957 |
{
|
sl@0
|
958 |
__KTRACE_OPT(KDMA, Kern::Printf("ResumeDmaChannelByCookie: 0x%08x", aDriverCookie));
|
sl@0
|
959 |
const TInt index = CookieToChannelIndex(aDriverCookie);
|
sl@0
|
960 |
|
sl@0
|
961 |
if(index >= 0)
|
sl@0
|
962 |
{
|
sl@0
|
963 |
TInt r = ResumeDmaChannelByIndex(index);
|
sl@0
|
964 |
return r;
|
sl@0
|
965 |
}
|
sl@0
|
966 |
else
|
sl@0
|
967 |
{
|
sl@0
|
968 |
return KErrNotFound;
|
sl@0
|
969 |
}
|
sl@0
|
970 |
}
|
sl@0
|
971 |
|
sl@0
|
972 |
TInt DDmaTestSession::IsrRedoRequestByCookie(TUint aDriverCookie,TUint32 aSrcAddr,TUint32 aDstAddr,TInt aTransferCount,TUint32 aPslRequestInfo,TBool aIsrCb)
|
sl@0
|
973 |
{
|
sl@0
|
974 |
__KTRACE_OPT(KDMA, Kern::Printf("IsrRedoRequestByCookie: 0x%08x", aDriverCookie));
|
sl@0
|
975 |
const TInt index = CookieToChannelIndex(aDriverCookie);
|
sl@0
|
976 |
|
sl@0
|
977 |
if(index >= 0)
|
sl@0
|
978 |
{
|
sl@0
|
979 |
TInt r = IsrRedoRequestByIndex(index,aSrcAddr,aDstAddr,aTransferCount,aPslRequestInfo,aIsrCb);
|
sl@0
|
980 |
return r;
|
sl@0
|
981 |
}
|
sl@0
|
982 |
else
|
sl@0
|
983 |
{
|
sl@0
|
984 |
return KErrNotFound;
|
sl@0
|
985 |
}
|
sl@0
|
986 |
}
|
sl@0
|
987 |
|
sl@0
|
988 |
TInt DDmaTestSession::IsrRedoRequestByIndex(TInt aIndex,TUint32 aSrcAddr,TUint32 aDstAddr,TInt aTransferCount,TUint32 aPslRequestInfo,TBool aIsrCb)
|
sl@0
|
989 |
{
|
sl@0
|
990 |
__KTRACE_OPT(KDMA, Kern::Printf("IsrRedoRequestByIndex: %d", aIndex));
|
sl@0
|
991 |
__NK_ASSERT_DEBUG(aIndex < iChannels.Count());
|
sl@0
|
992 |
|
sl@0
|
993 |
#ifdef DMA_APIV2
|
sl@0
|
994 |
TDmaChannel* channel = iChannels[aIndex];
|
sl@0
|
995 |
return channel->IsrRedoRequest(aSrcAddr,aDstAddr,aTransferCount,aPslRequestInfo,aIsrCb);
|
sl@0
|
996 |
#else
|
sl@0
|
997 |
return KErrNotSupported;
|
sl@0
|
998 |
#endif
|
sl@0
|
999 |
}
|
sl@0
|
1000 |
|
sl@0
|
1001 |
/**
|
sl@0
|
1002 |
aChannelCaps will be set to "NULL" values
|
sl@0
|
1003 |
*/
|
sl@0
|
1004 |
TInt DDmaTestSession::GetChannelCapsByCookie(TUint aDriverCookie, TDmacTestCaps& aChannelCaps)
|
sl@0
|
1005 |
{
|
sl@0
|
1006 |
SDmacCaps caps = {0,}; //initialise with NULL values
|
sl@0
|
1007 |
TInt r = GetChannelCapsByCookie(aDriverCookie, caps);
|
sl@0
|
1008 |
|
sl@0
|
1009 |
if(r == KErrNotSupported)
|
sl@0
|
1010 |
{
|
sl@0
|
1011 |
//If we can not query caps it means
|
sl@0
|
1012 |
//that we are using the v1 driver
|
sl@0
|
1013 |
//we construct a empty TDmacTestCaps
|
sl@0
|
1014 |
//but with an iPILVersion of 1
|
sl@0
|
1015 |
const TDmacTestCaps nullCapsV1(caps, 1);
|
sl@0
|
1016 |
aChannelCaps = nullCapsV1;
|
sl@0
|
1017 |
r = KErrNone;
|
sl@0
|
1018 |
}
|
sl@0
|
1019 |
else if(r == KErrNone)
|
sl@0
|
1020 |
{
|
sl@0
|
1021 |
const TDmacTestCaps capsV2(caps, 2);
|
sl@0
|
1022 |
aChannelCaps = capsV2;
|
sl@0
|
1023 |
}
|
sl@0
|
1024 |
|
sl@0
|
1025 |
return r;
|
sl@0
|
1026 |
}
|
sl@0
|
1027 |
|
sl@0
|
1028 |
/**
|
sl@0
|
1029 |
Will return the capabilities of the DMA channel.
|
sl@0
|
1030 |
Querying SDmacCaps is not possible on V1 of the DMA framework.
|
sl@0
|
1031 |
In that case an error of KErrNotSupported will be returned
|
sl@0
|
1032 |
*/
|
sl@0
|
1033 |
TInt DDmaTestSession::GetChannelCapsByCookie(TUint aDriverCookie, SDmacCaps& aChannelCaps)
|
sl@0
|
1034 |
{
|
sl@0
|
1035 |
__KTRACE_OPT(KDMA, Kern::Printf("GetChannelCapsByCookie: 0x%08x", aDriverCookie));
|
sl@0
|
1036 |
const TInt index = CookieToChannelIndex(aDriverCookie);
|
sl@0
|
1037 |
if(index >= 0)
|
sl@0
|
1038 |
{
|
sl@0
|
1039 |
#ifdef DMA_APIV2
|
sl@0
|
1040 |
aChannelCaps = iChannels[index]->DmacCaps();
|
sl@0
|
1041 |
return KErrNone;
|
sl@0
|
1042 |
#else
|
sl@0
|
1043 |
return KErrNotSupported;
|
sl@0
|
1044 |
#endif
|
sl@0
|
1045 |
}
|
sl@0
|
1046 |
else
|
sl@0
|
1047 |
{
|
sl@0
|
1048 |
return KErrNotFound;
|
sl@0
|
1049 |
}
|
sl@0
|
1050 |
}
|
sl@0
|
1051 |
|
sl@0
|
1052 |
TInt DDmaTestSession::IsQueueEmptyByCookie(TUint aDriverCookie, TBool& aQueueEmpty)
|
sl@0
|
1053 |
{
|
sl@0
|
1054 |
__KTRACE_OPT(KDMA, Kern::Printf("IsQueueEmptyByCookie: 0x%08x", aDriverCookie));
|
sl@0
|
1055 |
const TInt index = CookieToChannelIndex(aDriverCookie);
|
sl@0
|
1056 |
|
sl@0
|
1057 |
if(index >= 0)
|
sl@0
|
1058 |
{
|
sl@0
|
1059 |
aQueueEmpty=iChannels[index]->IsQueueEmpty();
|
sl@0
|
1060 |
return KErrNone;
|
sl@0
|
1061 |
}
|
sl@0
|
1062 |
else
|
sl@0
|
1063 |
{
|
sl@0
|
1064 |
return KErrNotFound;
|
sl@0
|
1065 |
}
|
sl@0
|
1066 |
}
|
sl@0
|
1067 |
|
sl@0
|
1068 |
TInt DDmaTestSession::ChannelIsOpenedByCookie(TUint aDriverCookie, TBool& aChannelOpen)
|
sl@0
|
1069 |
{
|
sl@0
|
1070 |
__KTRACE_OPT(KDMA, Kern::Printf("ChannelIsOpenedByCookie: 0x%08x", aDriverCookie));
|
sl@0
|
1071 |
const TInt index = CookieToChannelIndex(aDriverCookie);
|
sl@0
|
1072 |
|
sl@0
|
1073 |
if(index >= 0)
|
sl@0
|
1074 |
{
|
sl@0
|
1075 |
aChannelOpen=iChannels[index]->IsOpened();
|
sl@0
|
1076 |
return KErrNone;
|
sl@0
|
1077 |
}
|
sl@0
|
1078 |
else
|
sl@0
|
1079 |
{
|
sl@0
|
1080 |
return KErrNotFound;
|
sl@0
|
1081 |
}
|
sl@0
|
1082 |
}
|
sl@0
|
1083 |
|
sl@0
|
1084 |
TInt DDmaTestSession::CreateDmaRequest(TUint aChannelCookie, TUint& aRequestCookie, TBool aNewCallback, TInt aMaxFragmentSizeBytes)
|
sl@0
|
1085 |
{
|
sl@0
|
1086 |
#ifndef DMA_APIV2
|
sl@0
|
1087 |
if(aNewCallback)
|
sl@0
|
1088 |
return KErrNotSupported;
|
sl@0
|
1089 |
#endif
|
sl@0
|
1090 |
|
sl@0
|
1091 |
TInt channelIndex = CookieToChannelIndex(aChannelCookie);
|
sl@0
|
1092 |
if(channelIndex < 0)
|
sl@0
|
1093 |
return channelIndex;
|
sl@0
|
1094 |
|
sl@0
|
1095 |
NKern::ThreadEnterCS();
|
sl@0
|
1096 |
DClientDmaRequest* request = DClientDmaRequest::Construct(iClient, iIsrCallbackDfcQ, *iChannels[channelIndex], aNewCallback, aMaxFragmentSizeBytes);
|
sl@0
|
1097 |
if(request == NULL)
|
sl@0
|
1098 |
{
|
sl@0
|
1099 |
NKern::ThreadLeaveCS();
|
sl@0
|
1100 |
return KErrNoMemory;
|
sl@0
|
1101 |
}
|
sl@0
|
1102 |
|
sl@0
|
1103 |
TInt r = iClientDmaReqs.Append(request);
|
sl@0
|
1104 |
if(r == KErrNone)
|
sl@0
|
1105 |
{
|
sl@0
|
1106 |
aRequestCookie = reinterpret_cast<TUint>(request);
|
sl@0
|
1107 |
}
|
sl@0
|
1108 |
else
|
sl@0
|
1109 |
{
|
sl@0
|
1110 |
delete request;
|
sl@0
|
1111 |
}
|
sl@0
|
1112 |
NKern::ThreadLeaveCS();
|
sl@0
|
1113 |
|
sl@0
|
1114 |
return r;
|
sl@0
|
1115 |
}
|
sl@0
|
1116 |
|
sl@0
|
1117 |
TInt DDmaTestSession::DestroyDmaRequestByCookie(TUint aRequestCookie)
|
sl@0
|
1118 |
{
|
sl@0
|
1119 |
TInt requestIndex = CookieToRequestIndex(aRequestCookie);
|
sl@0
|
1120 |
if(requestIndex < 0)
|
sl@0
|
1121 |
return requestIndex;
|
sl@0
|
1122 |
|
sl@0
|
1123 |
DestroyDmaRequestByIndex(requestIndex);
|
sl@0
|
1124 |
|
sl@0
|
1125 |
return KErrNone;
|
sl@0
|
1126 |
}
|
sl@0
|
1127 |
|
sl@0
|
1128 |
void DDmaTestSession::DestroyDmaRequestByIndex(TInt aIndex)
|
sl@0
|
1129 |
{
|
sl@0
|
1130 |
__KTRACE_OPT(KDMA, Kern::Printf("DestroyDmaRequestByIndex: %d", aIndex));
|
sl@0
|
1131 |
__NK_ASSERT_DEBUG(aIndex < iClientDmaReqs.Count());
|
sl@0
|
1132 |
NKern::ThreadEnterCS();
|
sl@0
|
1133 |
|
sl@0
|
1134 |
DClientDmaRequest* request = iClientDmaReqs[aIndex];
|
sl@0
|
1135 |
iClientDmaReqs.Remove(aIndex);
|
sl@0
|
1136 |
delete request;
|
sl@0
|
1137 |
|
sl@0
|
1138 |
NKern::ThreadLeaveCS();
|
sl@0
|
1139 |
}
|
sl@0
|
1140 |
|
sl@0
|
1141 |
TInt DDmaTestSession::CreateSharedChunk()
|
sl@0
|
1142 |
{
|
sl@0
|
1143 |
// Enter critical section so we can't die and leak the objects we are creating
|
sl@0
|
1144 |
// I.e. the TChunkCleanup and DChunk (Shared Chunk)
|
sl@0
|
1145 |
NKern::ThreadEnterCS();
|
sl@0
|
1146 |
|
sl@0
|
1147 |
// Create the chunk
|
sl@0
|
1148 |
TChunkCreateInfo info;
|
sl@0
|
1149 |
info.iType = TChunkCreateInfo::ESharedKernelSingle;
|
sl@0
|
1150 |
info.iMaxSize = KMaxChunkSize;
|
sl@0
|
1151 |
info.iMapAttr = EMapAttrFullyBlocking | EMapAttrUserRw;
|
sl@0
|
1152 |
info.iOwnsMemory = ETrue;
|
sl@0
|
1153 |
info.iDestroyedDfc = NULL;
|
sl@0
|
1154 |
|
sl@0
|
1155 |
DChunk* chunk;
|
sl@0
|
1156 |
TUint32 mapAttr;
|
sl@0
|
1157 |
TInt r = Kern::ChunkCreate(info, chunk, iChunkBase, mapAttr);
|
sl@0
|
1158 |
if(r!=KErrNone)
|
sl@0
|
1159 |
{
|
sl@0
|
1160 |
NKern::ThreadLeaveCS();
|
sl@0
|
1161 |
return r;
|
sl@0
|
1162 |
}
|
sl@0
|
1163 |
|
sl@0
|
1164 |
// Map our device's memory into the chunk (at offset 0)
|
sl@0
|
1165 |
TUint32 physicalAddr;
|
sl@0
|
1166 |
r = Kern::ChunkCommitContiguous(chunk,0,KMaxChunkSize, physicalAddr);
|
sl@0
|
1167 |
if(r!=KErrNone)
|
sl@0
|
1168 |
{
|
sl@0
|
1169 |
// Commit failed so tidy-up...
|
sl@0
|
1170 |
Kern::ChunkClose(chunk);
|
sl@0
|
1171 |
}
|
sl@0
|
1172 |
else
|
sl@0
|
1173 |
{
|
sl@0
|
1174 |
iChunk = chunk;
|
sl@0
|
1175 |
}
|
sl@0
|
1176 |
|
sl@0
|
1177 |
// Can leave critical section now that we have saved pointers to created objects
|
sl@0
|
1178 |
NKern::ThreadLeaveCS();
|
sl@0
|
1179 |
|
sl@0
|
1180 |
return r;
|
sl@0
|
1181 |
}
|
sl@0
|
1182 |
|
sl@0
|
1183 |
TUint DDmaTestSession::OpenSharedChunkHandle()
|
sl@0
|
1184 |
{
|
sl@0
|
1185 |
NKern::ThreadEnterCS();
|
sl@0
|
1186 |
const TInt r = Kern::MakeHandleAndOpen(NULL, iChunk);
|
sl@0
|
1187 |
NKern::ThreadLeaveCS();
|
sl@0
|
1188 |
return r;
|
sl@0
|
1189 |
}
|
sl@0
|
1190 |
|
sl@0
|
1191 |
void DDmaTestSession::FixupTransferArgs(TDmaTransferArgs& aTransferArgs) const
|
sl@0
|
1192 |
{
|
sl@0
|
1193 |
aTransferArgs.iSrcConfig.iAddr += iChunkBase;
|
sl@0
|
1194 |
aTransferArgs.iDstConfig.iAddr += iChunkBase;
|
sl@0
|
1195 |
}
|
sl@0
|
1196 |
|
sl@0
|
1197 |
#ifndef DMA_APIV2
|
sl@0
|
1198 |
static TInt FragmentCount(DDmaRequest* aRequest)
|
sl@0
|
1199 |
{
|
sl@0
|
1200 |
TInt count = 0;
|
sl@0
|
1201 |
for (SDmaDesHdr* pH = aRequest->iFirstHdr; pH != NULL; pH = pH->iNext)
|
sl@0
|
1202 |
count++;
|
sl@0
|
1203 |
return count;
|
sl@0
|
1204 |
}
|
sl@0
|
1205 |
#endif
|
sl@0
|
1206 |
|
sl@0
|
1207 |
TInt DDmaTestSession::RequestFragmentCount(TUint aRequestCookie)
|
sl@0
|
1208 |
{
|
sl@0
|
1209 |
TInt requestIndex = CookieToRequestIndex(aRequestCookie);
|
sl@0
|
1210 |
if(requestIndex < 0)
|
sl@0
|
1211 |
return requestIndex;
|
sl@0
|
1212 |
#ifdef DMA_APIV2
|
sl@0
|
1213 |
TInt r = iClientDmaReqs[requestIndex]->FragmentCount();
|
sl@0
|
1214 |
#else
|
sl@0
|
1215 |
TInt r = FragmentCount(iClientDmaReqs[requestIndex]);
|
sl@0
|
1216 |
#endif
|
sl@0
|
1217 |
|
sl@0
|
1218 |
return r;
|
sl@0
|
1219 |
}
|
sl@0
|
1220 |
|
sl@0
|
1221 |
TInt DDmaTestSession::FragmentRequest(TUint aRequestCookie, const TDmaTransferArgs& aTransferArgs, TBool aLegacy)
|
sl@0
|
1222 |
{
|
sl@0
|
1223 |
__KTRACE_OPT(KDMA, Kern::Printf(">FragmentRequest: cookie=0x%08x, legacy=%d", aRequestCookie, aLegacy));
|
sl@0
|
1224 |
TInt requestIndex = CookieToRequestIndex(aRequestCookie);
|
sl@0
|
1225 |
if(requestIndex < 0)
|
sl@0
|
1226 |
return requestIndex;
|
sl@0
|
1227 |
|
sl@0
|
1228 |
TInt r = KErrNotSupported;
|
sl@0
|
1229 |
if(aLegacy)
|
sl@0
|
1230 |
{
|
sl@0
|
1231 |
// TODO we can extract the required info from the struct to
|
sl@0
|
1232 |
// set flags
|
sl@0
|
1233 |
TUint flags = KDmaMemSrc | KDmaIncSrc | KDmaMemDest | KDmaIncDest;
|
sl@0
|
1234 |
|
sl@0
|
1235 |
const TUint src = aTransferArgs.iSrcConfig.iAddr;
|
sl@0
|
1236 |
const TUint dst = aTransferArgs.iDstConfig.iAddr;
|
sl@0
|
1237 |
r = iClientDmaReqs[requestIndex]->Fragment(src, dst, aTransferArgs.iTransferCount, flags, NULL);
|
sl@0
|
1238 |
}
|
sl@0
|
1239 |
else
|
sl@0
|
1240 |
{
|
sl@0
|
1241 |
#ifdef DMA_APIV2
|
sl@0
|
1242 |
r = iClientDmaReqs[requestIndex]->Fragment(aTransferArgs);
|
sl@0
|
1243 |
#else
|
sl@0
|
1244 |
r = KErrNotSupported;
|
sl@0
|
1245 |
#endif
|
sl@0
|
1246 |
}
|
sl@0
|
1247 |
return r;
|
sl@0
|
1248 |
}
|
sl@0
|
1249 |
|
sl@0
|
1250 |
/**
|
sl@0
|
1251 |
Queue the request refered to by aRequestCookie
|
sl@0
|
1252 |
|
sl@0
|
1253 |
@param aRequestCookie Client identifier for the DDmaRequest
|
sl@0
|
1254 |
@param aStatus Pointer to the client's TRequestStatus
|
sl@0
|
1255 |
@param aRecord Pointer to the client's TCallbackRecord
|
sl@0
|
1256 |
@return
|
sl@0
|
1257 |
- KErrNotFound - aRequestCookie was invalid
|
sl@0
|
1258 |
- KErrNone - Success
|
sl@0
|
1259 |
*/
|
sl@0
|
1260 |
TInt DDmaTestSession::QueueRequest(TUint aRequestCookie, TRequestStatus* aStatus, TCallbackRecord* aRecord, TUint64* aDurationMicroSecs)
|
sl@0
|
1261 |
{
|
sl@0
|
1262 |
__KTRACE_OPT(KDMA, Kern::Printf(">QueueRequest: 0x%08x", aRequestCookie));
|
sl@0
|
1263 |
|
sl@0
|
1264 |
DClientDmaRequest* request = RequestFromCookie(aRequestCookie);
|
sl@0
|
1265 |
if(request == NULL)
|
sl@0
|
1266 |
return KErrNotFound;
|
sl@0
|
1267 |
|
sl@0
|
1268 |
return request->Queue(aStatus, aRecord, aDurationMicroSecs);
|
sl@0
|
1269 |
}
|
sl@0
|
1270 |
|
sl@0
|
1271 |
DClientDmaRequest* DDmaTestSession::RequestFromCookie(TUint aRequestCookie) const
|
sl@0
|
1272 |
{
|
sl@0
|
1273 |
TInt requestIndex = CookieToRequestIndex(aRequestCookie);
|
sl@0
|
1274 |
if(requestIndex < 0)
|
sl@0
|
1275 |
return NULL;
|
sl@0
|
1276 |
|
sl@0
|
1277 |
return (iClientDmaReqs[requestIndex]);
|
sl@0
|
1278 |
}
|
sl@0
|
1279 |
|
sl@0
|
1280 |
TDmaV2TestInfo DDmaTestSession::ConvertTestInfo(const TDmaTestInfo& aOldInfo) const
|
sl@0
|
1281 |
{
|
sl@0
|
1282 |
TDmaV2TestInfo newInfo;
|
sl@0
|
1283 |
newInfo.iMaxTransferSize = aOldInfo.iMaxTransferSize;
|
sl@0
|
1284 |
newInfo.iMemAlignMask = aOldInfo.iMemAlignMask;
|
sl@0
|
1285 |
newInfo.iMemMemPslInfo = aOldInfo.iMemMemPslInfo;
|
sl@0
|
1286 |
|
sl@0
|
1287 |
newInfo.iMaxSbChannels = aOldInfo.iMaxSbChannels;
|
sl@0
|
1288 |
for(TInt i=0; i<aOldInfo.iMaxSbChannels; i++)
|
sl@0
|
1289 |
newInfo.iSbChannels[i] = aOldInfo.iSbChannels[i];
|
sl@0
|
1290 |
|
sl@0
|
1291 |
newInfo.iMaxDbChannels = aOldInfo.iMaxDbChannels;
|
sl@0
|
1292 |
for(TInt i=0; i<aOldInfo.iMaxDbChannels; i++)
|
sl@0
|
1293 |
newInfo.iDbChannels[i] = aOldInfo.iDbChannels[i];
|
sl@0
|
1294 |
|
sl@0
|
1295 |
newInfo.iMaxSgChannels = aOldInfo.iMaxSgChannels;
|
sl@0
|
1296 |
for(TInt i=0; i<aOldInfo.iMaxSgChannels; i++)
|
sl@0
|
1297 |
newInfo.iSgChannels[i] = aOldInfo.iSgChannels[i];
|
sl@0
|
1298 |
|
sl@0
|
1299 |
//TODO will want to add initialisation for Asym channels
|
sl@0
|
1300 |
//when these are available
|
sl@0
|
1301 |
|
sl@0
|
1302 |
return newInfo;
|
sl@0
|
1303 |
}
|
sl@0
|
1304 |
//////////////////////////////////////////////////////////////////////////////
|
sl@0
|
1305 |
|
sl@0
|
1306 |
class DDmaTestFactory : public DLogicalDevice
|
sl@0
|
1307 |
{
|
sl@0
|
1308 |
public:
|
sl@0
|
1309 |
DDmaTestFactory();
|
sl@0
|
1310 |
// from DLogicalDevice
|
sl@0
|
1311 |
virtual ~DDmaTestFactory()
|
sl@0
|
1312 |
{
|
sl@0
|
1313 |
__KTRACE_OPT(KDMA, Kern::Printf(">DDmaTestFactory::~DDmaTestFactory"));
|
sl@0
|
1314 |
}
|
sl@0
|
1315 |
virtual TInt Install();
|
sl@0
|
1316 |
virtual void GetCaps(TDes8& aDes) const;
|
sl@0
|
1317 |
virtual TInt Create(DLogicalChannelBase*& aChannel);
|
sl@0
|
1318 |
};
|
sl@0
|
1319 |
|
sl@0
|
1320 |
|
sl@0
|
1321 |
DDmaTestFactory::DDmaTestFactory()
|
sl@0
|
1322 |
{
|
sl@0
|
1323 |
iVersion = TestDmaLddVersion();
|
sl@0
|
1324 |
iParseMask = KDeviceAllowUnit; // no info, no PDD
|
sl@0
|
1325 |
// iUnitsMask = 0; // Only one thing
|
sl@0
|
1326 |
}
|
sl@0
|
1327 |
|
sl@0
|
1328 |
|
sl@0
|
1329 |
TInt DDmaTestFactory::Create(DLogicalChannelBase*& aChannel)
|
sl@0
|
1330 |
{
|
sl@0
|
1331 |
aChannel=new DDmaTestSession;
|
sl@0
|
1332 |
return aChannel ? KErrNone : KErrNoMemory;
|
sl@0
|
1333 |
}
|
sl@0
|
1334 |
|
sl@0
|
1335 |
|
sl@0
|
1336 |
TInt DDmaTestFactory::Install()
|
sl@0
|
1337 |
{
|
sl@0
|
1338 |
return SetName(&KTestDmaLddName);
|
sl@0
|
1339 |
}
|
sl@0
|
1340 |
|
sl@0
|
1341 |
|
sl@0
|
1342 |
void DDmaTestFactory::GetCaps(TDes8& /*aDes*/) const
|
sl@0
|
1343 |
{
|
sl@0
|
1344 |
}
|
sl@0
|
1345 |
|
sl@0
|
1346 |
//////////////////////////////////////////////////////////////////////////////
|
sl@0
|
1347 |
|
sl@0
|
1348 |
DECLARE_STANDARD_LDD()
|
sl@0
|
1349 |
{
|
sl@0
|
1350 |
return new DDmaTestFactory;
|
sl@0
|
1351 |
}
|