1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/os/kernelhwsrv/kerneltest/e32test/dma/d_dma.cpp Fri Jun 15 03:10:57 2012 +0200
1.3 @@ -0,0 +1,474 @@
1.4 +// Copyright (c) 2002-2009 Nokia Corporation and/or its subsidiary(-ies).
1.5 +// All rights reserved.
1.6 +// This component and the accompanying materials are made available
1.7 +// under the terms of the License "Eclipse Public License v1.0"
1.8 +// which accompanies this distribution, and is available
1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
1.10 +//
1.11 +// Initial Contributors:
1.12 +// Nokia Corporation - initial contribution.
1.13 +//
1.14 +// Contributors:
1.15 +//
1.16 +// Description:
1.17 +// e32test\dma\d_dma.cpp
1.18 +//
1.19 +//
1.20 +
1.21 +#include "platform.h"
1.22 +#include <kernel/kern_priv.h>
1.23 +#include <drivers/dma.h>
1.24 +#include "d_dma.h"
1.25 +
1.26 +_LIT(KClientPanicCat, "D_DMA");
1.27 +_LIT(KDFCThreadName,"D_DMA_DFC_THREAD");
1.28 +const TInt KDFCThreadPriority=26;
1.29 +
1.30 +//////////////////////////////////////////////////////////////////////////////
1.31 +
1.32 +//
1.33 +// Class abstracting the way DMA buffers are created and destroyed to
1.34 +// allow tests to run both on WINS and hardware.
1.35 +//
1.36 +
1.37 +class TBufferMgr
1.38 + {
1.39 +public:
1.40 + TInt Alloc(TInt aIdx, TInt aSize);
1.41 + void FreeAll();
1.42 + TUint8* Addr(TInt aIdx) const;
1.43 + TPhysAddr PhysAddr(TInt aIdx) const;
1.44 + TInt Size(TInt aIdx) const;
1.45 + enum { KMaxBuf = 8 };
1.46 +private:
1.47 +#ifdef __WINS__
1.48 + struct {TUint8* iPtr; TInt iSize;} iBufs[KMaxBuf];
1.49 +#else
1.50 + struct {DPlatChunkHw* iChunk; TInt iSize;} iBufs[KMaxBuf];
1.51 +#endif
1.52 + };
1.53 +
1.54 +#ifdef __WINS__
1.55 +
1.56 +TUint8* TBufferMgr::Addr(TInt aIdx) const
1.57 + {
1.58 + __ASSERT_DEBUG(0 <= aIdx && aIdx < KMaxBuf, Kern::PanicCurrentThread(KClientPanicCat, __LINE__));
1.59 + __ASSERT_DEBUG(iBufs[aIdx].iPtr != NULL, Kern::PanicCurrentThread(KClientPanicCat, __LINE__));
1.60 + return iBufs[aIdx].iPtr;
1.61 + }
1.62 +
1.63 +
1.64 +TInt TBufferMgr::Size(TInt aIdx) const
1.65 + {
1.66 + __ASSERT_DEBUG(0 <= aIdx && aIdx < KMaxBuf, Kern::PanicCurrentThread(KClientPanicCat, __LINE__));
1.67 + __ASSERT_DEBUG(iBufs[aIdx].iPtr != NULL, Kern::PanicCurrentThread(KClientPanicCat, __LINE__));
1.68 + return iBufs[aIdx].iSize;
1.69 + }
1.70 +
1.71 +
1.72 +TInt TBufferMgr::Alloc(TInt aIdx, TInt aSize)
1.73 + {
1.74 + __ASSERT_DEBUG(0 <= aIdx && aIdx < KMaxBuf, Kern::PanicCurrentThread(KClientPanicCat, __LINE__));
1.75 + __ASSERT_DEBUG(iBufs[aIdx].iPtr == NULL, Kern::PanicCurrentThread(KClientPanicCat, __LINE__));
1.76 + NKern::ThreadEnterCS();
1.77 + iBufs[aIdx].iPtr = new TUint8[aSize];
1.78 + NKern::ThreadLeaveCS();
1.79 + iBufs[aIdx].iSize = aSize;
1.80 + return iBufs[aIdx].iPtr ? KErrNone : KErrNoMemory;
1.81 + }
1.82 +
1.83 +
1.84 +void TBufferMgr::FreeAll()
1.85 + {
1.86 + NKern::ThreadEnterCS();
1.87 + for (TInt i=0; i<KMaxBuf; ++i)
1.88 + {
1.89 + delete iBufs[i].iPtr;
1.90 + iBufs[i].iPtr = NULL;
1.91 + }
1.92 + NKern::ThreadLeaveCS();
1.93 + }
1.94 +
1.95 +#else
1.96 +
1.97 +TUint8* TBufferMgr::Addr(TInt aIdx) const
1.98 + {
1.99 + __ASSERT_DEBUG(0 <= aIdx && aIdx < KMaxBuf, Kern::PanicCurrentThread(KClientPanicCat, __LINE__));
1.100 + __ASSERT_DEBUG(iBufs[aIdx].iChunk != NULL, Kern::PanicCurrentThread(KClientPanicCat, __LINE__));
1.101 + return (TUint8*)iBufs[aIdx].iChunk->LinearAddress();
1.102 + }
1.103 +
1.104 +
1.105 +TPhysAddr TBufferMgr::PhysAddr(TInt aIdx) const
1.106 + {
1.107 + __ASSERT_DEBUG(0 <= aIdx && aIdx < KMaxBuf, Kern::PanicCurrentThread(KClientPanicCat, __LINE__));
1.108 + __ASSERT_DEBUG(iBufs[aIdx].iChunk != NULL, Kern::PanicCurrentThread(KClientPanicCat, __LINE__));
1.109 + return iBufs[aIdx].iChunk->PhysicalAddress();
1.110 + }
1.111 +
1.112 +
1.113 +TInt TBufferMgr::Size(TInt aIdx) const
1.114 + {
1.115 + __ASSERT_DEBUG(0 <= aIdx && aIdx < KMaxBuf, Kern::PanicCurrentThread(KClientPanicCat, __LINE__));
1.116 + __ASSERT_DEBUG(iBufs[aIdx].iChunk != NULL, Kern::PanicCurrentThread(KClientPanicCat, __LINE__));
1.117 + return iBufs[aIdx].iSize;
1.118 + }
1.119 +
1.120 +
1.121 +TInt TBufferMgr::Alloc(TInt aIdx, TInt aSize)
1.122 + {
1.123 + __ASSERT_DEBUG(0 <= aIdx && aIdx < KMaxBuf, Kern::PanicCurrentThread(KClientPanicCat, __LINE__));
1.124 + __ASSERT_DEBUG(iBufs[aIdx].iChunk == NULL, Kern::PanicCurrentThread(KClientPanicCat, __LINE__));
1.125 + NKern::ThreadEnterCS();
1.126 + TPhysAddr phys;
1.127 + TInt r = Epoc::AllocPhysicalRam(aSize, phys);
1.128 + if (r == KErrNone)
1.129 + {
1.130 + r = DPlatChunkHw::New(iBufs[aIdx].iChunk, phys, aSize, EMapAttrSupRw | EMapAttrFullyBlocking);
1.131 + if (r != KErrNone)
1.132 + Epoc::FreePhysicalRam(phys, aSize);
1.133 + iBufs[aIdx].iSize = aSize;
1.134 +
1.135 + __KTRACE_OPT(KDMA, Kern::Printf("TBufferMgr::Alloc buffer %d linAddr=0x%08x, physAddr=0x%08x, size=%d",
1.136 + aIdx, Addr(aIdx), PhysAddr(aIdx), Size(aIdx)));
1.137 + }
1.138 + NKern::ThreadLeaveCS();
1.139 + return r;
1.140 + }
1.141 +
1.142 +
1.143 +void TBufferMgr::FreeAll()
1.144 + {
1.145 + for (TInt i=0; i<KMaxBuf; ++i)
1.146 + {
1.147 + if (iBufs[i].iChunk)
1.148 + {
1.149 + TPhysAddr base = iBufs[i].iChunk->PhysicalAddress();
1.150 + TInt size = iBufs[i].iSize;
1.151 + __ASSERT_DEBUG(iBufs[i].iChunk->AccessCount() == 1,
1.152 + Kern::PanicCurrentThread(KClientPanicCat, __LINE__));
1.153 + NKern::ThreadEnterCS();
1.154 + iBufs[i].iChunk->Close(NULL);
1.155 + iBufs[i].iChunk = NULL;
1.156 + Epoc::FreePhysicalRam(base, size);
1.157 + NKern::ThreadLeaveCS();
1.158 + }
1.159 + }
1.160 + }
1.161 +
1.162 +#endif
1.163 +
1.164 +
1.165 +#ifndef DMA_APIV2
1.166 +static TInt FragmentCount(DDmaRequest* aRequest)
1.167 + {
1.168 + TInt count = 0;
1.169 + for (SDmaDesHdr* pH = aRequest->iFirstHdr; pH != NULL; pH = pH->iNext)
1.170 + count++;
1.171 + return count;
1.172 + }
1.173 +#endif
1.174 +
1.175 +
1.176 +//////////////////////////////////////////////////////////////////////////////
1.177 +
1.178 +class DDmaTestChannel : public DLogicalChannelBase
1.179 + {
1.180 +public:
1.181 + virtual ~DDmaTestChannel();
1.182 +protected:
1.183 + // from DLogicalChannelBase
1.184 + virtual TInt DoCreate(TInt aUnit, const TDesC8* anInfo, const TVersion& aVer);
1.185 + virtual TInt Request(TInt aFunction, TAny* a1, TAny* a2);
1.186 + virtual TInt RequestUserHandle(DThread* aThread, TOwnerType aType);
1.187 +private:
1.188 + TInt Execute(const TDesC8& aDes);
1.189 + static void Dfc(DDmaRequest::TResult aResult, TAny* aArg);
1.190 + TInt DoGetInfo(TAny* aInfo);
1.191 +private:
1.192 + TUint32 iCookie;
1.193 + TBufferMgr iBufMgr;
1.194 + TDmaChannel* iChannel;
1.195 + enum { KMaxRequests = 8 };
1.196 + DDmaRequest* iRequests[KMaxRequests];
1.197 + TClientRequest* iClientRequests[KMaxRequests];
1.198 + DDmaTestChannel* iMap[KMaxRequests];
1.199 + TUint32 iMemMemPslInfo;
1.200 + DThread* iClient;
1.201 + TDynamicDfcQue* iDfcQ;
1.202 + };
1.203 +
1.204 +
1.205 +TInt DDmaTestChannel::RequestUserHandle(DThread* aThread, TOwnerType aType)
1.206 + {
1.207 + if (aType!=EOwnerThread || aThread!=iClient)
1.208 + return KErrAccessDenied;
1.209 + return KErrNone;
1.210 + }
1.211 +
1.212 +TInt DDmaTestChannel::DoGetInfo(TAny* aInfo)
1.213 + {
1.214 + RTestDma::TInfo uinfo;
1.215 + const TDmaTestInfo& kinfo = DmaTestInfo();
1.216 + uinfo.iMaxTransferSize = kinfo.iMaxTransferSize;
1.217 + uinfo.iMemAlignMask = kinfo.iMemAlignMask;
1.218 + uinfo.iMaxSbChannels = kinfo.iMaxSbChannels;
1.219 + memcpy(&(uinfo.iSbChannels), kinfo.iSbChannels, 4 * kinfo.iMaxSbChannels);
1.220 + uinfo.iMaxDbChannels = kinfo.iMaxDbChannels;
1.221 + memcpy(&(uinfo.iDbChannels), kinfo.iDbChannels, 4 * kinfo.iMaxDbChannels);
1.222 + uinfo.iMaxSgChannels = kinfo.iMaxSgChannels;
1.223 + memcpy(&(uinfo.iSgChannels), kinfo.iSgChannels, 4 * kinfo.iMaxSgChannels);
1.224 +
1.225 + XTRAPD(r, XT_DEFAULT, kumemput(aInfo, &uinfo, sizeof(RTestDma::TInfo)));
1.226 + return r == KErrNone ? KErrDied : KErrGeneral;
1.227 + }
1.228 +
1.229 +
1.230 +// called in thread critical section
1.231 +TInt DDmaTestChannel::DoCreate(TInt /*aUnit*/, const TDesC8* aInfo, const TVersion& /*aVer*/)
1.232 + {
1.233 + TPckgBuf<RTestDma::TOpenInfo> infoBuf;
1.234 +
1.235 + TInt r=Kern::ThreadDesRead(&Kern::CurrentThread(), aInfo, infoBuf, 0, KChunkShiftBy0);
1.236 + if (r != KErrNone)
1.237 + return r;
1.238 +
1.239 + if (infoBuf().iWhat == RTestDma::TOpenInfo::EGetInfo)
1.240 + return DoGetInfo(infoBuf().U.iInfo);
1.241 + else
1.242 + {
1.243 + if (!iDfcQ)
1.244 + {
1.245 + r = Kern::DynamicDfcQCreate(iDfcQ, KDFCThreadPriority, KDFCThreadName);
1.246 + if (r != KErrNone)
1.247 + return r;
1.248 +#ifdef CPU_AFFINITY_ANY
1.249 + NKern::ThreadSetCpuAffinity((NThread*)(iDfcQ->iThread), KCpuAffinityAny);
1.250 +#endif
1.251 + }
1.252 +
1.253 + iMemMemPslInfo = DmaTestInfo().iMemMemPslInfo;
1.254 + iCookie = infoBuf().U.iOpen.iId;
1.255 + TDmaChannel::SCreateInfo info;
1.256 + info.iCookie = iCookie;
1.257 + info.iDfcQ = iDfcQ;
1.258 + info.iDfcPriority = 3;
1.259 + info.iDesCount = infoBuf().U.iOpen.iDesCount;
1.260 + r = TDmaChannel::Open(info, iChannel);
1.261 + if (r!= KErrNone)
1.262 + return r;
1.263 + iClient = &Kern::CurrentThread();
1.264 + for (TInt i=0; i<KMaxRequests; ++i)
1.265 + {
1.266 + r = Kern::CreateClientRequest(iClientRequests[i]);
1.267 + if (r!=KErrNone)
1.268 + return r;
1.269 + iMap[i] = this;
1.270 + TInt max = infoBuf().U.iOpen.iMaxTransferSize;
1.271 + if (max)
1.272 + {
1.273 + // Exercise request with custom limit
1.274 + iRequests[i] = new DDmaRequest(*iChannel, Dfc, iMap+i, max);
1.275 + }
1.276 + else
1.277 + {
1.278 + // Exercise request with default limit
1.279 + iRequests[i] = new DDmaRequest(*iChannel, Dfc, iMap+i);
1.280 + }
1.281 + if (! iRequests[i])
1.282 + return KErrNoMemory;
1.283 + }
1.284 + return KErrNone;
1.285 + }
1.286 + }
1.287 +
1.288 +
1.289 +DDmaTestChannel::~DDmaTestChannel()
1.290 + {
1.291 + if (iChannel)
1.292 + {
1.293 + iChannel->CancelAll();
1.294 + TInt i;
1.295 + for (i=0; i<KMaxRequests; ++i)
1.296 + delete iRequests[i];
1.297 + iChannel->Close();
1.298 + for (i=0; i<KMaxRequests; ++i)
1.299 + Kern::DestroyClientRequest(iClientRequests[i]);
1.300 + }
1.301 + if (iDfcQ)
1.302 + {
1.303 + iDfcQ->Destroy();
1.304 + }
1.305 + iBufMgr.FreeAll();
1.306 + }
1.307 +
1.308 +
1.309 +TInt DDmaTestChannel::Request(TInt aFunction, TAny* a1, TAny* a2)
1.310 + {
1.311 + switch (aFunction)
1.312 + {
1.313 + case RTestDma::EAllocBuffer:
1.314 + return iBufMgr.Alloc((TInt)a1, (TInt)a2);
1.315 + case RTestDma::EFreeAllBuffers:
1.316 + iBufMgr.FreeAll();
1.317 + return KErrNone;
1.318 + case RTestDma::EFillBuffer:
1.319 + {
1.320 + TInt i = (TInt)a1;
1.321 + TUint8 val = (TUint8)(TUint)a2;
1.322 + memset(iBufMgr.Addr(i), val, iBufMgr.Size(i));
1.323 + return KErrNone;
1.324 + }
1.325 + case RTestDma::ECheckBuffer:
1.326 + {
1.327 + TInt i = (TInt)a1;
1.328 + TUint8 val = (TUint8)(TUint)a2;
1.329 + TUint8* p = iBufMgr.Addr(i);
1.330 + TUint8* end = p + iBufMgr.Size(i);
1.331 + while (p < end)
1.332 + if (*p++ != val)
1.333 + {
1.334 +#ifdef _DEBUG
1.335 + const TUint8 prevValue = *(p-1);
1.336 +#endif
1.337 + __KTRACE_OPT(KDMA, Kern::Printf("Check DMA buffer number %d failed at offset: %d value: %d(%c)",
1.338 + i, p-iBufMgr.Addr(i)-1, prevValue, prevValue));
1.339 + return EFalse;
1.340 + }
1.341 + return ETrue;
1.342 + }
1.343 + case RTestDma::EFragment:
1.344 + {
1.345 + RTestDma::TFragmentInfo info;
1.346 + kumemget(&info, a1, sizeof info);
1.347 + __ASSERT_DEBUG(iBufMgr.Size(info.iSrcBufIdx) == iBufMgr.Size(info.iDestBufIdx),
1.348 + Kern::PanicCurrentThread(KClientPanicCat, __LINE__));
1.349 + __ASSERT_DEBUG(info.iSize <= iBufMgr.Size(info.iSrcBufIdx),
1.350 + Kern::PanicCurrentThread(KClientPanicCat, __LINE__));
1.351 + __ASSERT_DEBUG(0 <= info.iRequestIdx && info.iRequestIdx < KMaxRequests,
1.352 + Kern::PanicCurrentThread(KClientPanicCat, __LINE__));
1.353 +#ifdef __DMASIM__
1.354 + // DMASIM doesn't use physical addresses
1.355 + TUint32 src = (TUint32)iBufMgr.Addr(info.iSrcBufIdx);
1.356 + TUint32 dest = (TUint32)iBufMgr.Addr(info.iDestBufIdx);
1.357 + TUint KFlags = KDmaMemSrc | KDmaIncSrc | KDmaMemDest | KDmaIncDest;
1.358 +#else
1.359 + TUint32 src = iBufMgr.PhysAddr(info.iSrcBufIdx);
1.360 + TUint32 dest = iBufMgr.PhysAddr(info.iDestBufIdx);
1.361 + TUint KFlags = KDmaMemSrc | KDmaIncSrc | KDmaPhysAddrSrc |
1.362 + KDmaMemDest | KDmaIncDest | KDmaPhysAddrDest | KDmaAltTransferLen;
1.363 +#endif
1.364 + TInt r = iRequests[info.iRequestIdx]->Fragment(src, dest, info.iSize, KFlags, iMemMemPslInfo);
1.365 + if (r == KErrNone && info.iRs)
1.366 + r = iClientRequests[info.iRequestIdx]->SetStatus(info.iRs);
1.367 + return r;
1.368 + }
1.369 + case RTestDma::EExecute:
1.370 + return Execute(*(TDesC8*)a1);
1.371 + case RTestDma::EFailNext:
1.372 + return iChannel->FailNext((TInt)a1);
1.373 + case RTestDma::EFragmentCount:
1.374 + {
1.375 + TInt reqIdx = (TInt)a1;
1.376 + __ASSERT_DEBUG(0 <= reqIdx && reqIdx < KMaxRequests, Kern::PanicCurrentThread(KClientPanicCat, __LINE__));
1.377 +#ifdef DMA_APIV2
1.378 + return iRequests[reqIdx]->FragmentCount();
1.379 +#else
1.380 + return FragmentCount(iRequests[reqIdx]);
1.381 +#endif
1.382 + }
1.383 + case RTestDma::EMissInterrupts:
1.384 + return iChannel->MissNextInterrupts((TInt)a1);
1.385 + default:
1.386 + Kern::PanicCurrentThread(KClientPanicCat, __LINE__);
1.387 + return KErrNone; // work-around spurious warning
1.388 + }
1.389 + }
1.390 +
1.391 +
1.392 +TInt DDmaTestChannel::Execute(const TDesC8& aDes)
1.393 + {
1.394 + TBuf8<64> cmd;
1.395 + Kern::KUDesGet(cmd, aDes);
1.396 + __KTRACE_OPT(KDMA, Kern::Printf("DDmaTestChannel::Execute cmd=%S", &cmd));
1.397 +
1.398 + const TText8* p = cmd.Ptr();
1.399 + const TText8* pEnd = p + cmd.Length();
1.400 + while (p<pEnd)
1.401 + {
1.402 + TText8 opcode = *p++;
1.403 + switch (opcode)
1.404 + {
1.405 + case 'Q':
1.406 + {
1.407 + TInt arg = *p++ - '0';
1.408 + __ASSERT_DEBUG(0 <= arg && arg < KMaxRequests, Kern::PanicCurrentThread(KClientPanicCat, __LINE__));
1.409 + iRequests[arg]->Queue();
1.410 + break;
1.411 + }
1.412 + case 'C':
1.413 + iChannel->CancelAll();
1.414 + break;
1.415 + default:
1.416 + Kern::PanicCurrentThread(KClientPanicCat, __LINE__);
1.417 + }
1.418 + }
1.419 + return KErrNone;
1.420 + }
1.421 +
1.422 +
1.423 +void DDmaTestChannel::Dfc(DDmaRequest::TResult aResult, TAny* aArg)
1.424 + {
1.425 + DDmaTestChannel** ppC = (DDmaTestChannel**)aArg;
1.426 + DDmaTestChannel* pC = *ppC;
1.427 + TInt i = ppC - pC->iMap;
1.428 + TClientRequest* req = pC->iClientRequests[i];
1.429 + TInt r = (aResult==DDmaRequest::EOk) ? KErrNone : KErrGeneral;
1.430 + if (req->IsReady())
1.431 + Kern::QueueRequestComplete(pC->iClient, req, r);
1.432 + }
1.433 +
1.434 +//////////////////////////////////////////////////////////////////////////////
1.435 +
1.436 +class DDmaTestFactory : public DLogicalDevice
1.437 + {
1.438 +public:
1.439 + DDmaTestFactory();
1.440 + // from DLogicalDevice
1.441 + virtual TInt Install();
1.442 + virtual void GetCaps(TDes8& aDes) const;
1.443 + virtual TInt Create(DLogicalChannelBase*& aChannel);
1.444 + };
1.445 +
1.446 +
1.447 +DDmaTestFactory::DDmaTestFactory()
1.448 + {
1.449 + iVersion = TestDmaLddVersion();
1.450 + iParseMask = KDeviceAllowUnit; // no info, no PDD
1.451 + // iUnitsMask = 0; // Only one thing
1.452 + }
1.453 +
1.454 +
1.455 +TInt DDmaTestFactory::Create(DLogicalChannelBase*& aChannel)
1.456 + {
1.457 + aChannel=new DDmaTestChannel;
1.458 + return aChannel ? KErrNone : KErrNoMemory;
1.459 + }
1.460 +
1.461 +
1.462 +TInt DDmaTestFactory::Install()
1.463 + {
1.464 + return SetName(&KTestDmaLddName);
1.465 + }
1.466 +
1.467 +
1.468 +void DDmaTestFactory::GetCaps(TDes8& /*aDes*/) const
1.469 + {
1.470 + }
1.471 +
1.472 +//////////////////////////////////////////////////////////////////////////////
1.473 +
1.474 +DECLARE_STANDARD_LDD()
1.475 + {
1.476 + return new DDmaTestFactory;
1.477 + }