1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/os/kernelhwsrv/kerneltest/e32test/defrag/d_ramdefrag.cpp Fri Jun 15 03:10:57 2012 +0200
1.3 @@ -0,0 +1,1765 @@
1.4 +// Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
1.5 +// All rights reserved.
1.6 +// This component and the accompanying materials are made available
1.7 +// under the terms of the License "Eclipse Public License v1.0"
1.8 +// which accompanies this distribution, and is available
1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
1.10 +//
1.11 +// Initial Contributors:
1.12 +// Nokia Corporation - initial contribution.
1.13 +//
1.14 +// Contributors:
1.15 +//
1.16 +// Description:
1.17 +// e32test\defrag\d_testramdefrag.cpp
1.18 +//
1.19 +//
1.20 +
1.21 +//#define DEBUG_VER // Uncomment for tracing
1.22 +
1.23 +#include "platform.h"
1.24 +#include <kernel/kern_priv.h>
1.25 +#include <kernel/cache.h>
1.26 +#include "t_ramdefrag.h"
1.27 +
1.28 +//
1.29 +// Class definitions
1.30 +//
1.31 +const TInt KMajorVersionNumber=0;
1.32 +const TInt KMinorVersionNumber=1;
1.33 +const TInt KBuildVersionNumber=1;
1.34 +
1.35 +
1.36 +const TInt KDefragCompleteThreadPriority = 27;
1.37 +_LIT(KDefragCompleteThread,"DefragCompleteThread");
1.38 +
1.39 +class DRamDefragFuncTestFactory : public DLogicalDevice
1.40 + {
1.41 +public:
1.42 +
1.43 + DRamDefragFuncTestFactory();
1.44 + ~DRamDefragFuncTestFactory();
1.45 + virtual TInt Install();
1.46 + virtual void GetCaps(TDes8& aDes) const;
1.47 + virtual TInt Create(DLogicalChannelBase*& aChannel);
1.48 +
1.49 + TDynamicDfcQue* iDfcQ;
1.50 + };
1.51 +
1.52 +class DRamDefragFuncTestChannel : public DLogicalChannelBase
1.53 + {
1.54 +public:
1.55 + DRamDefragFuncTestChannel(TDfcQue* aDfcQ);
1.56 +
1.57 + DRamDefragFuncTestChannel();
1.58 + ~DRamDefragFuncTestChannel();
1.59 + virtual TInt DoCreate(TInt aUnit, const TDesC8* anInfo, const TVersion& aVer);
1.60 + virtual TInt Request(TInt aFunction, TAny* a1, TAny* a2);
1.61 +
1.62 + TInt FreeAllFixedPages();
1.63 + TInt AllocFixedPages(TInt aNumPages);
1.64 + TInt AllocFixedArray(TInt aNumPages);
1.65 + TInt AllocateFixed2(TInt aNumPages);
1.66 + TInt GetAllocDiff(TUint aNumPages);
1.67 + TInt FreeAllFixedPagesRead();
1.68 + TInt AllocFixedPagesWrite(TInt aNumPages);
1.69 + TInt ZoneAllocContiguous(TUint aZoneID, TUint aNumBytes);
1.70 + TInt ZoneAllocContiguous(TUint* aZoneIdList, TUint aZoneIdCount, TUint aNumBytes);
1.71 + TInt ZoneAllocDiscontiguous(TUint aZoneID, TInt aNumPages);
1.72 + TInt ZoneAllocDiscontiguous(TUint* aZoneIdList, TUint aZoneIdCount, TInt aNumPages);
1.73 + TInt ZoneAllocToMany(TInt aZoneIndex, TInt aNumPages);
1.74 + TInt ZoneAllocToManyArray(TInt aZoneIndex, TInt aNumPages);
1.75 + TInt ZoneAllocToMany2(TInt aZoneIndex, TInt aNumPages);
1.76 + TInt AllocContiguous(TUint aNumBytes);
1.77 + TInt FreeZone(TInt aNumPages);
1.78 + TInt FreeFromAllZones();
1.79 + TInt FreeFromAddr(TInt aNumPages, TUint32 aAddr);
1.80 + TInt PageCount(TUint aId, STestUserSidePageCount* aPageData);
1.81 + TInt CancelDefrag();
1.82 + TInt CheckCancel(STestParameters* aParams);
1.83 + TInt CallDefrag(STestParameters* aParams);
1.84 + TInt CheckPriorities(STestParameters* aParams);
1.85 + TInt SetZoneFlag(STestFlagParams* aParams);
1.86 + TInt GetDefragOrder();
1.87 + TInt FreeRam();
1.88 + TInt DoSetDebugFlag(TInt aState);
1.89 + TInt ResetDriver();
1.90 + TInt ZoneAllocDiscontiguous2(TUint aZoneID, TInt aNumPages);
1.91 +public:
1.92 + DRamDefragFuncTestFactory* iFactory;
1.93 +
1.94 +protected:
1.95 + static void DefragCompleteDfc(TAny* aSelf);
1.96 + void DefragComplete();
1.97 + static void Defrag2CompleteDfc(TAny* aSelf);
1.98 + void Defrag2Complete();
1.99 + static void Defrag3CompleteDfc(TAny* aSelf);
1.100 + void Defrag3Complete();
1.101 +private:
1.102 + TPhysAddr iContigAddr; /**< The base address of fixed contiguous allocations*/
1.103 + TUint iContigBytes; /**< The no. of contiguous fixed bytes allocated*/
1.104 + TPhysAddr* iAddrArray;
1.105 + TUint iAddrArrayPages;
1.106 + TUint iAddrArraySize;
1.107 + TPhysAddr** iAddrPtrArray;
1.108 + TInt* iNumPagesArray;
1.109 + TInt iDebug;
1.110 + TInt iThreadCounter;
1.111 + DChunk* iChunk;
1.112 + TLinAddr iKernAddrStart;
1.113 + TInt iPageSize;
1.114 + TUint iPageShift; /**< The system's page shift */
1.115 + TUint iZoneCount;
1.116 + TRamDefragRequest iDefragRequest; // Defrag request object
1.117 + TRamDefragRequest iDefragRequest2;
1.118 + TRamDefragRequest iDefragRequest3;
1.119 + TUint* iZoneIdArray; /**< Pointer to an kernel heap array of zone IDs*/
1.120 +
1.121 +
1.122 + DSemaphore* iDefragSemaphore; // Semaphore enusre only one defrag operation is active per channel
1.123 + TRequestStatus* iCompleteReq; // Pointer to a request status that will signal to the user side client once the defrag has completed
1.124 + TRequestStatus* iCompleteReq2;
1.125 + TRequestStatus* iCompleteReq3;
1.126 + TRequestStatus iTmpRequestStatus1;
1.127 + TRequestStatus iTmpRequestStatus2;
1.128 + DThread* iRequestThread; // Pointer to the thread that made the defrag request
1.129 + DThread* iRequestThread2;
1.130 + DThread* iRequestThread3;
1.131 +
1.132 + TDfcQue* iDfcQ; // The DFC queue used for driver functions
1.133 + TDfc iDefragCompleteDfc; // DFC to be queued once a defrag operation has completed
1.134 + TDfc iDefragComplete2Dfc;
1.135 + TDfc iDefragComplete3Dfc;
1.136 + TInt iCounter; // Counts the number of defrags that have taken place
1.137 + TInt iOrder; // Stores the order in which queued defrags took place
1.138 + };
1.139 +
1.140 +
1.141 +
1.142 +//
1.143 +// DRamDefragFuncTestFactory
1.144 +//
1.145 +
1.146 +DRamDefragFuncTestFactory::DRamDefragFuncTestFactory()
1.147 +//
1.148 +// Constructor
1.149 +//
1.150 + {
1.151 + iVersion=TVersion(KMajorVersionNumber,KMinorVersionNumber,KBuildVersionNumber);
1.152 + //iParseMask=0;//No units, no info, no PDD
1.153 + //iUnitsMask=0;//Only one thing
1.154 + }
1.155 +
1.156 +TInt DRamDefragFuncTestFactory::Install()
1.157 + {
1.158 + return SetName(&KRamDefragFuncTestLddName);
1.159 + }
1.160 +
1.161 +DRamDefragFuncTestFactory::~DRamDefragFuncTestFactory()
1.162 + {
1.163 + if (iDfcQ != NULL)
1.164 + {// Destroy the DFC queue created when this device drvier was loaded.
1.165 + iDfcQ->Destroy();
1.166 + }
1.167 + }
1.168 +
1.169 +void DRamDefragFuncTestFactory::GetCaps(TDes8& /*aDes*/) const
1.170 + {
1.171 + // Not used but required as DLogicalDevice::GetCaps is pure virtual
1.172 + }
1.173 +
1.174 +TInt DRamDefragFuncTestFactory::Create(DLogicalChannelBase*& aChannel)
1.175 + {
1.176 + DRamDefragFuncTestChannel* channel=new DRamDefragFuncTestChannel(iDfcQ);
1.177 + if(!channel)
1.178 + return KErrNoMemory;
1.179 + channel->iFactory = this;
1.180 + aChannel = channel;
1.181 + return KErrNone;
1.182 + }
1.183 +
1.184 +DECLARE_STANDARD_LDD()
1.185 + {
1.186 + DRamDefragFuncTestFactory* factory = new DRamDefragFuncTestFactory;
1.187 + if (factory)
1.188 + {
1.189 + // Allocate a kernel thread to run the DFC
1.190 + TInt r = Kern::DynamicDfcQCreate(factory->iDfcQ, KDefragCompleteThreadPriority, KDefragCompleteThread);
1.191 +
1.192 + if (r != KErrNone)
1.193 + {// Must close rather than delete factory as it is a DObject object.
1.194 + factory->AsyncClose();
1.195 + return NULL;
1.196 + }
1.197 + }
1.198 + return factory;
1.199 + }
1.200 +
1.201 +//
1.202 +// DRamDefragFuncTestChannel
1.203 +//
1.204 +
1.205 +TInt DRamDefragFuncTestChannel::DoCreate(TInt /*aUnit*/, const TDesC8* /*aInfo*/, const TVersion& /*aVer*/)
1.206 + {
1.207 +
1.208 + TInt ret = Kern::HalFunction(EHalGroupRam, ERamHalGetZoneCount, (TAny*)&iZoneCount, NULL);
1.209 +
1.210 +
1.211 + // Retrieve the page size and use it to detemine the page shift (assumes 32-bit system).
1.212 + TInt r = Kern::HalFunction(EHalGroupKernel, EKernelHalPageSizeInBytes, &iPageSize, 0);
1.213 + if (r != KErrNone)
1.214 + {
1.215 + TESTDEBUG(Kern::Printf("ERROR - Unable to determine page size"));
1.216 + return r;
1.217 + }
1.218 + TUint32 pageMask = iPageSize;
1.219 + TUint i = 0;
1.220 + for (; i < 32; i++)
1.221 + {
1.222 + if (pageMask & 1)
1.223 + {
1.224 + if (pageMask & ~1u)
1.225 + {
1.226 + TESTDEBUG(Kern::Printf("ERROR - page size not a power of 2"));
1.227 + return KErrNotSupported;
1.228 + }
1.229 + iPageShift = i;
1.230 + break;
1.231 + }
1.232 + pageMask >>= 1;
1.233 + }
1.234 +
1.235 + // Create a semaphore to protect defrag invocation. OK to just use one name as
1.236 + // the semaphore is not global so it's name doesn't need to be unique.
1.237 + ret = Kern::SemaphoreCreate(iDefragSemaphore, _L("DefragRefSem"), 1);
1.238 + if (ret != KErrNone)
1.239 + {
1.240 + return ret;
1.241 + }
1.242 + iDefragCompleteDfc.SetDfcQ(iDfcQ);
1.243 + iDefragComplete2Dfc.SetDfcQ(iDfcQ);
1.244 + iDefragComplete3Dfc.SetDfcQ(iDfcQ);
1.245 +
1.246 + // Create an array to store some RAM zone IDs for use but the multi-zone
1.247 + // specific allcoation methods.
1.248 + NKern::ThreadEnterCS();
1.249 + iZoneIdArray = new TUint[KMaxRamZones];
1.250 + if (iZoneIdArray == NULL)
1.251 + {
1.252 + ret = KErrNoMemory;
1.253 + }
1.254 + NKern::ThreadLeaveCS();
1.255 +
1.256 + return ret;
1.257 + }
1.258 +
1.259 +DRamDefragFuncTestChannel::DRamDefragFuncTestChannel(TDfcQue* aDfcQ)
1.260 + :
1.261 + iContigAddr(KPhysAddrInvalid),
1.262 + iContigBytes(0),
1.263 + iAddrArray(NULL),
1.264 + iAddrArrayPages(0),
1.265 + iAddrArraySize(0),
1.266 + iAddrPtrArray(NULL),
1.267 + iNumPagesArray(NULL),
1.268 + iDebug(0),
1.269 + iThreadCounter(1),
1.270 + iChunk(NULL),
1.271 + iPageSize(0),
1.272 + iPageShift(0),
1.273 + iZoneCount(0),
1.274 + iZoneIdArray(NULL),
1.275 + iDefragSemaphore(NULL),
1.276 + iCompleteReq(NULL),
1.277 + iCompleteReq2(NULL),
1.278 + iCompleteReq3(NULL),
1.279 + iRequestThread(NULL),
1.280 + iRequestThread2(NULL),
1.281 + iRequestThread3(NULL),
1.282 + iDfcQ(aDfcQ),
1.283 + iDefragCompleteDfc(DefragCompleteDfc, (TAny*)this, 1),
1.284 + iDefragComplete2Dfc(Defrag2CompleteDfc, (TAny*)this, 1),
1.285 + iDefragComplete3Dfc(Defrag3CompleteDfc, (TAny*)this, 1),
1.286 + iCounter(0),
1.287 + iOrder(0)
1.288 + {
1.289 + }
1.290 +
1.291 +DRamDefragFuncTestChannel::~DRamDefragFuncTestChannel()
1.292 + {
1.293 + if (iDefragSemaphore != NULL)
1.294 + {
1.295 + iDefragSemaphore->Close(NULL);
1.296 + }
1.297 + if (iZoneIdArray != NULL)
1.298 + {
1.299 + NKern::ThreadEnterCS();
1.300 + delete[] iZoneIdArray;
1.301 + NKern::ThreadLeaveCS();
1.302 + }
1.303 + }
1.304 +
1.305 +TInt DRamDefragFuncTestChannel::Request(TInt aFunction, TAny* a1, TAny* a2)
1.306 + {
1.307 + TInt threadCount = __e32_atomic_tas_ord32(&iThreadCounter, 1, 1, 0);
1.308 + if (threadCount >= 2)
1.309 + {
1.310 + Kern::Printf("DRamDefragFuncTestChannel::Request threadCount = %d\n", threadCount);
1.311 + }
1.312 +
1.313 + Kern::SemaphoreWait(*iDefragSemaphore);
1.314 +
1.315 +
1.316 + TInt retVal = KErrNotSupported;
1.317 + switch(aFunction)
1.318 + {
1.319 + case RRamDefragFuncTestLdd::EAllocateFixed:
1.320 + retVal = DRamDefragFuncTestChannel::AllocFixedPages((TInt)a1);
1.321 + break;
1.322 +
1.323 + case RRamDefragFuncTestLdd::EAllocFixedArray:
1.324 + retVal = DRamDefragFuncTestChannel::AllocFixedArray((TInt)a1);
1.325 + break;
1.326 +
1.327 + case RRamDefragFuncTestLdd::EAllocateFixed2:
1.328 + retVal = DRamDefragFuncTestChannel::AllocateFixed2((TInt)a1);
1.329 + break;
1.330 +
1.331 + case RRamDefragFuncTestLdd::EGetAllocDiff:
1.332 + retVal = DRamDefragFuncTestChannel::GetAllocDiff((TUint)a1);
1.333 + break;
1.334 +
1.335 + case RRamDefragFuncTestLdd::EFreeAllFixed:
1.336 + retVal = DRamDefragFuncTestChannel::FreeAllFixedPages();
1.337 + break;
1.338 +
1.339 + case RRamDefragFuncTestLdd::EAllocateFixedWrite:
1.340 + retVal = DRamDefragFuncTestChannel::AllocFixedPagesWrite((TInt)a1);
1.341 + break;
1.342 +
1.343 + case RRamDefragFuncTestLdd::EFreeAllFixedRead:
1.344 + retVal = DRamDefragFuncTestChannel::FreeAllFixedPagesRead();
1.345 + break;
1.346 +
1.347 + case RRamDefragFuncTestLdd::EZoneAllocContiguous:
1.348 + retVal = DRamDefragFuncTestChannel::ZoneAllocContiguous((TUint)a1, (TUint)a2);
1.349 + break;
1.350 +
1.351 + case RRamDefragFuncTestLdd::EMultiZoneAllocContiguous:
1.352 + {
1.353 + SMultiZoneAlloc multiZone;
1.354 + kumemget(&multiZone, a1, sizeof(SMultiZoneAlloc));
1.355 + retVal = DRamDefragFuncTestChannel::ZoneAllocContiguous(multiZone.iZoneId, multiZone.iZoneIdSize, (TUint)a2);
1.356 + }
1.357 + break;
1.358 +
1.359 + case RRamDefragFuncTestLdd::EZoneAllocDiscontiguous:
1.360 + retVal = DRamDefragFuncTestChannel::ZoneAllocDiscontiguous((TUint)a1, (TUint)a2);
1.361 + break;
1.362 +
1.363 + case RRamDefragFuncTestLdd::EMultiZoneAllocDiscontiguous:
1.364 + {
1.365 + SMultiZoneAlloc multiZone;
1.366 + kumemget(&multiZone, a1, sizeof(SMultiZoneAlloc));
1.367 + retVal = DRamDefragFuncTestChannel::ZoneAllocDiscontiguous(multiZone.iZoneId, multiZone.iZoneIdSize, (TUint)a2);
1.368 + }
1.369 + break;
1.370 +
1.371 + case RRamDefragFuncTestLdd::EZoneAllocDiscontiguous2:
1.372 + retVal = DRamDefragFuncTestChannel::ZoneAllocDiscontiguous2((TUint)a1, (TUint)a2);
1.373 + break;
1.374 +
1.375 + case RRamDefragFuncTestLdd::EZoneAllocToMany:
1.376 + retVal = DRamDefragFuncTestChannel::ZoneAllocToMany((TUint)a1, (TInt)a2);
1.377 + break;
1.378 +
1.379 + case RRamDefragFuncTestLdd::EZoneAllocToManyArray:
1.380 + retVal = DRamDefragFuncTestChannel::ZoneAllocToManyArray((TUint)a1, (TInt)a2);
1.381 + break;
1.382 +
1.383 + case RRamDefragFuncTestLdd::EZoneAllocToMany2:
1.384 + retVal = DRamDefragFuncTestChannel::ZoneAllocToMany2((TUint)a1, (TInt)a2);
1.385 + break;
1.386 +
1.387 + case RRamDefragFuncTestLdd::EAllocContiguous:
1.388 + retVal = DRamDefragFuncTestChannel::AllocContiguous((TUint)a1);
1.389 + break;
1.390 +
1.391 + case RRamDefragFuncTestLdd::EFreeZone:
1.392 + retVal = DRamDefragFuncTestChannel::FreeZone((TInt)a1);
1.393 + break;
1.394 +
1.395 + case RRamDefragFuncTestLdd::EFreeFromAllZones:
1.396 + retVal = DRamDefragFuncTestChannel::FreeFromAllZones();
1.397 + break;
1.398 +
1.399 + case RRamDefragFuncTestLdd::EFreeFromAddr:
1.400 + retVal = DRamDefragFuncTestChannel::FreeFromAddr((TInt)a1, (TUint32)a2);
1.401 + break;
1.402 +
1.403 + case RRamDefragFuncTestLdd::EPageCount:
1.404 + retVal = DRamDefragFuncTestChannel::PageCount((TUint)a1, (STestUserSidePageCount*)a2);
1.405 + break;
1.406 +
1.407 + case RRamDefragFuncTestLdd::ECheckCancel:
1.408 + retVal = DRamDefragFuncTestChannel::CheckCancel((STestParameters*)a1);
1.409 + break;
1.410 +
1.411 + case RRamDefragFuncTestLdd::ECallDefrag:
1.412 + retVal = DRamDefragFuncTestChannel::CallDefrag((STestParameters*)a1);
1.413 + break;
1.414 +
1.415 + case RRamDefragFuncTestLdd::ESetZoneFlag:
1.416 + retVal = DRamDefragFuncTestChannel::SetZoneFlag((STestFlagParams*)a1);
1.417 + break;
1.418 +
1.419 + case RRamDefragFuncTestLdd::ECheckPriorities:
1.420 + retVal = DRamDefragFuncTestChannel::CheckPriorities((STestParameters*)a1);
1.421 + break;
1.422 +
1.423 + case RRamDefragFuncTestLdd::EGetDefragOrder:
1.424 + retVal = DRamDefragFuncTestChannel::GetDefragOrder();
1.425 + break;
1.426 +
1.427 + case RRamDefragFuncTestLdd::EDoSetDebugFlag:
1.428 + retVal = DoSetDebugFlag((TInt) a1);
1.429 + break;
1.430 +
1.431 + case RRamDefragFuncTestLdd::EResetDriver:
1.432 + retVal = ResetDriver();
1.433 + break;
1.434 +
1.435 + default:
1.436 + break;
1.437 + }
1.438 +
1.439 + Kern::SemaphoreSignal(*iDefragSemaphore);
1.440 + __e32_atomic_tas_ord32(&iThreadCounter, 1, -1, 0);
1.441 + return retVal;
1.442 + }
1.443 +
1.444 +
1.445 +#define CHECK(c) { if(!(c)) { Kern::Printf("Fail %d", __LINE__); ; retVal = __LINE__;} }
1.446 +
1.447 +
1.448 +//
1.449 +// FreeAllFixedPages
1.450 +//
1.451 +// Free ALL of the fixed pages that were allocated
1.452 +//
1.453 +TInt DRamDefragFuncTestChannel::FreeAllFixedPages()
1.454 + {
1.455 + NKern::ThreadEnterCS();
1.456 +
1.457 + TInt retVal = KErrNone;
1.458 +
1.459 + if (iAddrArray != NULL)
1.460 + {
1.461 + retVal = Epoc::FreePhysicalRam(iAddrArrayPages, iAddrArray);
1.462 + CHECK(retVal == KErrNone);
1.463 +
1.464 + delete[] iAddrArray;
1.465 + iAddrArray = NULL;
1.466 + iAddrArrayPages = 0;
1.467 + }
1.468 +
1.469 + if (iContigAddr != KPhysAddrInvalid)
1.470 + {
1.471 + retVal = Epoc::FreePhysicalRam(iContigAddr, iContigBytes);
1.472 + iContigAddr = KPhysAddrInvalid;
1.473 + iContigBytes = 0;
1.474 + CHECK(retVal == KErrNone);
1.475 + }
1.476 + NKern::ThreadLeaveCS();
1.477 +
1.478 + retVal = FreeFromAllZones();
1.479 + return retVal;
1.480 + }
1.481 +
1.482 +
1.483 +
1.484 +//
1.485 +// FreeAllFixedPagesRead()
1.486 +//
1.487 +// Read the fixed pages that were mapped to iChunk and verify that
1.488 +// the contents have not changed. Then free the fixed pages
1.489 +// that were allocated for iChunk.
1.490 +//
1.491 +TInt DRamDefragFuncTestChannel::FreeAllFixedPagesRead()
1.492 + {
1.493 +
1.494 + TInt retVal = KErrNone;
1.495 + TUint index;
1.496 +
1.497 + if (iAddrArray == NULL || iChunk == NULL || !iAddrArrayPages)
1.498 + {
1.499 + return KErrCorrupt;
1.500 + }
1.501 +
1.502 + TInt r = Kern::ChunkAddress(iChunk, 0, iAddrArrayPages << iPageShift, iKernAddrStart);
1.503 + if (r != KErrNone)
1.504 + {
1.505 + Kern::Printf("ERROR ? FreeAllFixedPages : Couldn't get linear address of iChunk! %d", r);
1.506 + }
1.507 + else
1.508 + {
1.509 + for (index = 0; index < iAddrArrayPages; index ++)
1.510 + {
1.511 + if (iAddrArray[index] != NULL)
1.512 + {
1.513 + TUint* pInt = (TUint *)(iKernAddrStart + (index << iPageShift));
1.514 + TUint* pIntEnd = pInt + (iPageSize / sizeof(TInt));
1.515 + // Read each word in this the page and verify that
1.516 + // they are still the index of the current page in the chunk.
1.517 + while (pInt < pIntEnd)
1.518 + {
1.519 + if (*pInt++ != index)
1.520 + {
1.521 + Kern::Printf("ERROR ? FreeAllFixedPages : page at index %d is corrupt! 0x%08x", index, *pInt);
1.522 + }
1.523 + }
1.524 + }
1.525 + }
1.526 + }
1.527 + NKern::ThreadEnterCS();
1.528 +
1.529 + // Must close chunk before we free memory otherwise it would still be
1.530 + // possible to access memory that has been freed and potentially reused.
1.531 + Kern::ChunkClose(iChunk);
1.532 + iChunk = NULL;
1.533 + retVal = Epoc::FreePhysicalRam(iAddrArrayPages, iAddrArray);
1.534 + delete[] iAddrArray;
1.535 +
1.536 + NKern::ThreadLeaveCS();
1.537 +
1.538 + iAddrArray = NULL;
1.539 + iAddrArrayPages = 0;
1.540 + return retVal;
1.541 + }
1.542 +
1.543 +//
1.544 +// AllocFixedPagesWrite
1.545 +//
1.546 +// Allocate a number of fixed pages to memory then create a shared chunk and map these pages into the chunk
1.547 +//
1.548 +TInt DRamDefragFuncTestChannel::AllocFixedPagesWrite(TInt aNumPages)
1.549 + {
1.550 +
1.551 + TInt retVal = KErrNone;
1.552 + TUint index = 0;
1.553 + TChunkCreateInfo chunkInfo;
1.554 + TUint32 mapAttr;
1.555 +
1.556 + if (iAddrArray != NULL || iChunk != NULL)
1.557 + {
1.558 + return KErrInUse;
1.559 + }
1.560 +
1.561 + if (aNumPages == FILL_ALL_FIXED)
1.562 + {// Fill memory with fixed pages, leaving room for the kernel to expand.
1.563 + TUint freePages = FreeRam() >> iPageShift;
1.564 + // Calculate how many page tables will be required:
1.565 + // 1024 pages per page table
1.566 + // 4 page table per page
1.567 + TUint pageTablePages = (freePages >> 10) >> 2;
1.568 + TUint physAddrPages = (sizeof(TPhysAddr) * freePages) >> iPageShift;
1.569 + TESTDEBUG(Kern::Printf("pageTablePages %d physAddrPages %d", pageTablePages, physAddrPages));
1.570 + // Determine how many heap pages will be required, with some extra space as well.
1.571 + TUint fixedOverhead = (pageTablePages + physAddrPages) << 4;
1.572 + TESTDEBUG(Kern::Printf("freePages %d fixedOverhead %d", freePages, fixedOverhead));
1.573 + aNumPages = freePages - fixedOverhead;
1.574 + TESTDEBUG(Kern::Printf("aNumPages = %d", aNumPages));
1.575 + }
1.576 +
1.577 + NKern::ThreadEnterCS();
1.578 +
1.579 + iAddrArray = new TPhysAddr[aNumPages];
1.580 + if(!iAddrArray)
1.581 + {
1.582 + retVal = KErrNoMemory;
1.583 + goto exit;
1.584 + }
1.585 +
1.586 + TESTDEBUG(Kern::Printf("amount of free pages = %d", FreeRam() >> iPageShift));
1.587 +
1.588 + // create a shared chunk and map these pages into the chunk.
1.589 +
1.590 + chunkInfo.iType = TChunkCreateInfo::ESharedKernelSingle;
1.591 + chunkInfo.iMaxSize = aNumPages << iPageShift;
1.592 + chunkInfo.iMapAttr = EMapAttrFullyBlocking;
1.593 + chunkInfo.iOwnsMemory = EFalse;
1.594 +
1.595 + TESTDEBUG(Kern::Printf("Creating chunk - amount of free pages = %d\n", FreeRam() >> iPageShift));
1.596 + retVal = Kern::ChunkCreate(chunkInfo, iChunk, iKernAddrStart, mapAttr);
1.597 + if (retVal != KErrNone)
1.598 + {
1.599 + Kern::Printf("ChunkCreate failed retVal = %d", retVal);
1.600 + goto exit;
1.601 + }
1.602 +
1.603 + TESTDEBUG(Kern::Printf("Created chunk - amount of free pages = %d\n", FreeRam() >> iPageShift));
1.604 +
1.605 + retVal = Epoc::AllocPhysicalRam(aNumPages, iAddrArray);
1.606 + if (retVal != KErrNone)
1.607 + {
1.608 + TESTDEBUG(Kern::Printf("Alloc of %d pages was unsuccessful\n", aNumPages));
1.609 + goto exit;
1.610 + }
1.611 + iAddrArrayPages = aNumPages;
1.612 + TESTDEBUG(Kern::Printf("Committing chunk - amount of free pages = %d\n", FreeRam() >> iPageShift));
1.613 + retVal = Kern::ChunkCommitPhysical(iChunk, 0, iAddrArrayPages << iPageShift, iAddrArray);
1.614 + if (retVal != KErrNone)
1.615 + {
1.616 + Kern::Printf("Commit was bad retVal = %d", retVal);
1.617 + goto exit;
1.618 + }
1.619 + TESTDEBUG(Kern::Printf("Committed chunk - amount of free pages = %d\n", FreeRam() >> iPageShift));
1.620 + TESTDEBUG(Kern::Printf("Start - 0x%08x\n", iKernAddrStart));
1.621 + for (index = 0; index < iAddrArrayPages; index ++)
1.622 + {
1.623 + TInt* pInt = (TInt *)(iKernAddrStart + (index << iPageShift));
1.624 + TInt* pIntEnd = pInt + (iPageSize / sizeof(TInt));
1.625 + // write the index into all of the words of the page.
1.626 + while (pInt < pIntEnd)
1.627 + {
1.628 + *pInt++ = index;
1.629 + }
1.630 + }
1.631 +
1.632 + TESTDEBUG(Kern::Printf("Allocated %d pages\n", iAddrArrayPages));
1.633 +exit:
1.634 + if (retVal != KErrNone)
1.635 + {// Cleanup as something went wrong
1.636 + if (iChunk)
1.637 + {
1.638 + Kern::ChunkClose(iChunk);
1.639 + iChunk = NULL;
1.640 + }
1.641 + if (iAddrArray != NULL)
1.642 + {
1.643 + Epoc::FreePhysicalRam(iAddrArrayPages, iAddrArray);
1.644 + delete[] iAddrArray;
1.645 + iAddrArray = NULL;
1.646 + }
1.647 + iAddrArrayPages = 0;
1.648 + }
1.649 +
1.650 + NKern::ThreadLeaveCS();
1.651 + return retVal;
1.652 + }
1.653 +
1.654 +TInt DRamDefragFuncTestChannel::GetAllocDiff(TUint aNumPages)
1.655 + {
1.656 + TUint initialFreeRam = FreeRam();
1.657 + TInt ret = KErrNone;
1.658 + TInt ramDifference;
1.659 +
1.660 + NKern::ThreadEnterCS();
1.661 +
1.662 + if (iAddrArray != NULL)
1.663 + {
1.664 + ret = KErrInUse;
1.665 + goto exit;
1.666 + }
1.667 + iAddrArray = (TPhysAddr *)Kern::AllocZ(sizeof(TPhysAddr) * aNumPages);
1.668 +
1.669 + if(!iAddrArray)
1.670 + {
1.671 + ret = KErrNoMemory;
1.672 + goto exit;
1.673 + }
1.674 +
1.675 + ramDifference = initialFreeRam - FreeRam();
1.676 +
1.677 + Kern::Free(iAddrArray);
1.678 + iAddrArray = NULL;
1.679 +
1.680 + ret = ramDifference >> iPageShift;
1.681 +exit:
1.682 + NKern::ThreadLeaveCS();
1.683 + return ret;
1.684 + }
1.685 +//
1.686 +// AllocFixedPages
1.687 +//
1.688 +// Allocate a number of fixed pages to memory
1.689 +//
1.690 +TInt DRamDefragFuncTestChannel::AllocFixedPages(TInt aNumPages)
1.691 + {
1.692 + TInt r = AllocFixedArray(aNumPages);
1.693 + if (r != KErrNone)
1.694 + {
1.695 + return r;
1.696 + }
1.697 + return AllocateFixed2(aNumPages);
1.698 + }
1.699 +
1.700 +/**
1.701 +Allocate the array required to store the physical addresses of
1.702 +number of fixed pages to be allocated.
1.703 +
1.704 +@param aNumPages The number of fixed pages to be allocated.
1.705 +@return KErrNone on success.
1.706 +*/
1.707 +TInt DRamDefragFuncTestChannel::AllocFixedArray(TInt aNumPages)
1.708 + {
1.709 + if (iAddrArray != NULL)
1.710 + {
1.711 + return KErrInUse;
1.712 + }
1.713 +
1.714 + if (aNumPages == FILL_ALL_FIXED)
1.715 + {// Fill memory with fixed pages.
1.716 + aNumPages = FreeRam() >> iPageShift;
1.717 + TESTDEBUG(Kern::Printf("aNumPages %d FreeRam() %d", aNumPages, FreeRam()));
1.718 + }
1.719 + NKern::ThreadEnterCS();
1.720 +
1.721 + iAddrArray = new TPhysAddr[aNumPages];
1.722 + iAddrArraySize = aNumPages; // Only required for AllocateFixed2() when aNumPages == FILL_ALL_FIXED.
1.723 + iAddrArrayPages = 0; // No physical pages have been allocated yet.
1.724 +
1.725 + NKern::ThreadLeaveCS();
1.726 +
1.727 + if (!iAddrArray)
1.728 + {
1.729 + return KErrNoMemory;
1.730 + }
1.731 + return KErrNone;
1.732 + }
1.733 +
1.734 +
1.735 +/**
1.736 +Allocate the specified number of fixed pages.
1.737 +This should only be invoked when iAddrArray has already been allocated
1.738 +
1.739 +@param aNumPages The number of pages to allocate.
1.740 +*/
1.741 +TInt DRamDefragFuncTestChannel::AllocateFixed2(TInt aNumPages)
1.742 + {
1.743 + if (iAddrArray == NULL)
1.744 + {
1.745 + return KErrGeneral;
1.746 + }
1.747 + TInt retVal = KErrNone;
1.748 + NKern::ThreadEnterCS();
1.749 + if (aNumPages == FILL_ALL_FIXED)
1.750 + {
1.751 + // Allocate a number of fixed pages to RAM a page at time so that the allocations
1.752 + // will always fill as much memory as possible.
1.753 + TPhysAddr* addrPtr = iAddrArray;
1.754 + TPhysAddr* addrPtrEnd = addrPtr + iAddrArraySize;
1.755 + while (addrPtr < addrPtrEnd)
1.756 + {
1.757 + retVal = Epoc::AllocPhysicalRam(1, addrPtr++);
1.758 + if (retVal != KErrNone)
1.759 + break;
1.760 + iAddrArrayPages++;
1.761 + }
1.762 + }
1.763 + else
1.764 + {
1.765 + retVal = Epoc::AllocPhysicalRam(aNumPages, iAddrArray);
1.766 + if (retVal != KErrNone)
1.767 + {
1.768 + TESTDEBUG(Kern::Printf("aNumPages %d FreeRam() %d", aNumPages, FreeRam()));
1.769 + delete[] iAddrArray;
1.770 + iAddrArray = NULL;
1.771 + TESTDEBUG(Kern::Printf("aNumPages %d FreeRam() %d", aNumPages, FreeRam()));
1.772 + TESTDEBUG(Kern::Printf("Fixed pages alloc was unsuccessful\n"));
1.773 + }
1.774 + else
1.775 + iAddrArrayPages = aNumPages;
1.776 + }
1.777 +
1.778 + NKern::ThreadLeaveCS();
1.779 + return retVal;
1.780 + }
1.781 +//
1.782 +// CheckCancel
1.783 +//
1.784 +// Check that when a defrag is cancelled, the correct return value is reported
1.785 +//
1.786 +TInt DRamDefragFuncTestChannel::CheckCancel(STestParameters* aParams)
1.787 + {
1.788 + TInt returnValue = KErrNone;
1.789 + STestParameters params;
1.790 + kumemget(¶ms, aParams, sizeof(STestParameters));
1.791 +
1.792 + Kern::Printf( "defragtype = %d, defragversion = %d, priority = %d, maxpages = %d, ID = %d",
1.793 + params.iDefragType, params.iDefragVersion, params.iPriority, params.iMaxPages, params.iID);
1.794 +
1.795 +
1.796 + NFastSemaphore sem;
1.797 + NKern::FSSetOwner(&sem, 0);
1.798 + TPhysAddr zoneAddress;
1.799 + TInt maxPages = 0;
1.800 + TInt priority = (NKern::CurrentThread()->iPriority) - 2;
1.801 +
1.802 + if (params.iDefragType == DEFRAG_TYPE_GEN) // DefragRam
1.803 + {
1.804 + returnValue = iDefragRequest.DefragRam(&sem, priority, maxPages);
1.805 + }
1.806 + else if (params.iDefragType == DEFRAG_TYPE_EMPTY) // EmptyRamZone
1.807 + {
1.808 + returnValue = iDefragRequest.EmptyRamZone(params.iID, &sem, priority);
1.809 + }
1.810 + else if (params.iDefragType == DEFRAG_TYPE_CLAIM) // ClaimRamZone
1.811 + {
1.812 + returnValue = iDefragRequest.ClaimRamZone(params.iID, zoneAddress, &sem, priority);
1.813 + }
1.814 + else
1.815 + {
1.816 + Kern::Printf("A valid defrag type was not specified");
1.817 + return KErrGeneral;
1.818 + }
1.819 +
1.820 + iDefragRequest.Cancel();
1.821 + NKern::FSWait(&sem);
1.822 + returnValue = iDefragRequest.Result();
1.823 + return returnValue;
1.824 + }
1.825 +
1.826 +
1.827 +//
1.828 +// CheckPriorities
1.829 +//
1.830 +// Queue defrags with differing priorities and ensure they complete in the correct order
1.831 +//
1.832 +TInt DRamDefragFuncTestChannel::CheckPriorities(STestParameters* aParams)
1.833 + {
1.834 + STestParameters params;
1.835 + kumemget(¶ms, aParams, sizeof(STestParameters));
1.836 +
1.837 + // Still have an outstanding defrag operation
1.838 + if (iCompleteReq != NULL | iCompleteReq2 != NULL | iCompleteReq3 != NULL)
1.839 + {
1.840 + return KErrInUse;
1.841 + }
1.842 +
1.843 + // Open a handle to the thread so that it isn't destroyed as defrag dfc may
1.844 + // then try to complete the request on a destroyed thread.
1.845 + iRequestThread = &Kern::CurrentThread();
1.846 + iRequestThread->Open();
1.847 + iCompleteReq = params.iReqStat;
1.848 +
1.849 + // Open a reference on this channel to stop the destructor running before
1.850 + // this defrag request has completed.
1.851 + Open();
1.852 + TUint defragZone = params.iID - 1;
1.853 + TInt returnValue = iDefragRequest.EmptyRamZone(defragZone, &iDefragCompleteDfc, 1);
1.854 + if (returnValue != KErrNone)
1.855 + {
1.856 + AsyncClose();
1.857 + iCompleteReq = NULL;
1.858 + iRequestThread->AsyncClose();
1.859 + iRequestThread = NULL;
1.860 + return returnValue;
1.861 + }
1.862 +
1.863 + // Open a handle to the thread so that it isn't destroyed as defrag dfc may
1.864 + // then try to complete the request on a destroyed thread.
1.865 + iRequestThread2 = &Kern::CurrentThread();
1.866 + iRequestThread2->Open();
1.867 + iCompleteReq2 = params.iReqStat2;
1.868 + // Open a reference on this channel to stop the destructor running before
1.869 + // this defrag request has completed.
1.870 + Open();
1.871 + defragZone = params.iID;
1.872 + returnValue = iDefragRequest2.EmptyRamZone(defragZone, &iDefragComplete2Dfc, 30);
1.873 + if (returnValue != KErrNone)
1.874 + {
1.875 + // Cancel any successfully queued operations.
1.876 + // Set dfcs to signal dummy request statuses as user side
1.877 + // request status shouldn't be signalled.
1.878 + iCompleteReq = &iTmpRequestStatus1;
1.879 + iDefragRequest.Cancel();
1.880 +
1.881 + // Clean up this operation.
1.882 + AsyncClose();
1.883 + iCompleteReq2 = NULL;
1.884 + iRequestThread2->AsyncClose();
1.885 + iRequestThread2 = NULL;
1.886 + return returnValue;
1.887 + }
1.888 +
1.889 + // Open a handle to the thread so that it isn't destroyed as defrag dfc may
1.890 + // then try to complete the request on a destroyed thread.
1.891 + iRequestThread3 = &Kern::CurrentThread();
1.892 + iRequestThread3->Open();
1.893 + iCompleteReq3 = params.iReqStat3;
1.894 + // Open a reference on this channel to stop the destructor running before
1.895 + // this defrag request has completed.
1.896 + Open();
1.897 + defragZone = params.iID + 2;
1.898 + returnValue = iDefragRequest3.EmptyRamZone(defragZone, &iDefragComplete3Dfc, 60);
1.899 + if (returnValue != KErrNone)
1.900 + {
1.901 + // Cancel any successfully queued operations.
1.902 + // Set dfcs to signal dummy request statuses as user side
1.903 + // request status shouldn't be signalled.
1.904 + iCompleteReq = &iTmpRequestStatus1;
1.905 + iCompleteReq2 = &iTmpRequestStatus2;
1.906 + iDefragRequest.Cancel();
1.907 + iDefragRequest2.Cancel();
1.908 +
1.909 + // clean up this defrag operation
1.910 + AsyncClose();
1.911 + iCompleteReq3 = NULL;
1.912 + iRequestThread3->AsyncClose();
1.913 + iRequestThread3 = NULL;
1.914 + return returnValue;
1.915 + }
1.916 + return returnValue;
1.917 + }
1.918 +
1.919 +//
1.920 +// GetDefragOrder
1.921 +//
1.922 +// Get the order in which the defrags were completed
1.923 +//
1.924 +TInt DRamDefragFuncTestChannel::GetDefragOrder()
1.925 + {
1.926 + Kern::Printf("order = %d", iOrder);
1.927 + return iOrder;
1.928 + }
1.929 +
1.930 +
1.931 +//
1.932 +// CallDefrag
1.933 +//
1.934 +// Call a specific defrag depening on the parameters that it is called with
1.935 +//
1.936 +TInt DRamDefragFuncTestChannel::CallDefrag(STestParameters* aParams)
1.937 + {
1.938 + TInt returnValue = 0;
1.939 + STestParameters params;
1.940 + kumemget(¶ms, aParams, sizeof(STestParameters));
1.941 +
1.942 + TESTDEBUG(Kern::Printf("defragtype = %d, defragversion = %d, priority = %d, maxpages = %d, ID = %d",
1.943 + params.iDefragType, params.iDefragVersion, params.iPriority, params.iMaxPages, params.iID));
1.944 +
1.945 +
1.946 + NFastSemaphore sem;
1.947 + NKern::FSSetOwner(&sem, 0);
1.948 +
1.949 + if (params.iDefragType == DEFRAG_TYPE_GEN) // DefragRam
1.950 + {
1.951 + switch(params.iDefragVersion)
1.952 + {
1.953 + case DEFRAG_VER_SYNC: // Sync
1.954 + returnValue = iDefragRequest.DefragRam(params.iPriority, params.iMaxPages);
1.955 + break;
1.956 +
1.957 + case DEFRAG_VER_SEM: // Semaphore
1.958 + returnValue = iDefragRequest.DefragRam(&sem, params.iPriority, params.iMaxPages);
1.959 + NKern::FSWait(&sem);
1.960 + returnValue = iDefragRequest.Result();
1.961 + break;
1.962 +
1.963 + case DEFRAG_VER_DFC: // Dfc
1.964 + // Open a handle to the thread so that it isn't destroyed as defrag dfc may
1.965 + // then try to complete the request on a destroyed thread.
1.966 + if (iCompleteReq == NULL)
1.967 + {
1.968 + iRequestThread = &Kern::CurrentThread();
1.969 + iRequestThread->Open();
1.970 + iCompleteReq = params.iReqStat;
1.971 + // Open a reference on this channel to stop the destructor running before
1.972 + // the defrag request has completed.
1.973 + Open();
1.974 +
1.975 + returnValue = iDefragRequest.DefragRam(&iDefragCompleteDfc, params.iPriority, params.iMaxPages);
1.976 + if (returnValue != KErrNone)
1.977 + {// defrag operation didn't start so close all openned handles
1.978 + AsyncClose();
1.979 + iRequestThread->AsyncClose();
1.980 + iRequestThread = NULL;
1.981 + iCompleteReq = NULL;
1.982 + }
1.983 + }
1.984 + else
1.985 + {// Still have a pending defrag request
1.986 + returnValue = KErrInUse;
1.987 + }
1.988 + break;
1.989 +
1.990 + default:
1.991 + break;
1.992 + }
1.993 + }
1.994 +
1.995 + else if (params.iDefragType == DEFRAG_TYPE_EMPTY) // EmptyRamZone
1.996 + {
1.997 + switch(params.iDefragVersion)
1.998 + {
1.999 + case DEFRAG_VER_SYNC: // Sync
1.1000 +
1.1001 + returnValue = iDefragRequest.EmptyRamZone(params.iID, params.iPriority);
1.1002 + break;
1.1003 +
1.1004 + case DEFRAG_VER_SEM: // Semaphore
1.1005 + returnValue = iDefragRequest.EmptyRamZone(params.iID, &sem, params.iPriority);
1.1006 + NKern::FSWait(&sem);
1.1007 + returnValue = iDefragRequest.Result();
1.1008 + break;
1.1009 +
1.1010 + case DEFRAG_VER_DFC: // Dfc
1.1011 + if (iCompleteReq == NULL)
1.1012 + {
1.1013 + // Open a handle to the thread so that it isn't destroyed as defrag dfc may
1.1014 + // then try to complete the request on a destroyed thread.
1.1015 + iRequestThread = &Kern::CurrentThread();
1.1016 + iRequestThread->Open();
1.1017 + iCompleteReq = params.iReqStat;
1.1018 + // Open a reference on this channel to stop the destructor running before
1.1019 + // the defrag request has completed.
1.1020 + Open();
1.1021 +
1.1022 + returnValue = iDefragRequest.EmptyRamZone(params.iID, &iDefragCompleteDfc, params.iPriority);
1.1023 + if (returnValue != KErrNone)
1.1024 + {// defrag operation didn't start so close all openned handles
1.1025 + AsyncClose();
1.1026 + iRequestThread->AsyncClose();
1.1027 + iRequestThread = NULL;
1.1028 + iCompleteReq = NULL;
1.1029 + }
1.1030 + }
1.1031 + else
1.1032 + {// Still have a pending defrag request
1.1033 + returnValue = KErrInUse;
1.1034 + }
1.1035 + break;
1.1036 +
1.1037 + default:
1.1038 + break;
1.1039 + }
1.1040 + }
1.1041 +
1.1042 + else if (params.iDefragType == DEFRAG_TYPE_CLAIM) // ClaimRamZone
1.1043 + {
1.1044 + if (iContigAddr != KPhysAddrInvalid)
1.1045 + {
1.1046 + return KErrInUse;
1.1047 + }
1.1048 + switch(params.iDefragVersion)
1.1049 + {
1.1050 + case DEFRAG_VER_SYNC: // Sync
1.1051 +
1.1052 + returnValue = iDefragRequest.ClaimRamZone(params.iID, iContigAddr, params.iPriority);
1.1053 + break;
1.1054 +
1.1055 + case DEFRAG_VER_SEM: // Semaphore
1.1056 + returnValue = iDefragRequest.ClaimRamZone(params.iID, iContigAddr, &sem, params.iPriority);
1.1057 + NKern::FSWait(&sem);
1.1058 + returnValue = iDefragRequest.Result();
1.1059 + break;
1.1060 +
1.1061 + case DEFRAG_VER_DFC: // Dfc
1.1062 + if (iCompleteReq == NULL)
1.1063 + {
1.1064 + // Open a handle to the thread so that it isn't destroyed as defrag dfc may
1.1065 + // then try to complete the request on a destroyed thread.
1.1066 + iRequestThread = &Kern::CurrentThread();
1.1067 + iRequestThread->Open();
1.1068 + iCompleteReq = params.iReqStat;
1.1069 + // Open a reference on this channel to stop the destructor running before
1.1070 + // the defrag request has completed.
1.1071 + Open();
1.1072 +
1.1073 + // If the claim is successful iContigAddr will be set just before the dfc
1.1074 + // callback function to the physical base address of the RAM zone claimed.
1.1075 + // Therefore, the check for iContigAddr is not necessarily safe so use
1.1076 + // this DFC version with care and don't use it combination with any
1.1077 + // contiguous allocation methods.
1.1078 + returnValue = iDefragRequest.ClaimRamZone(params.iID, iContigAddr, &iDefragCompleteDfc,
1.1079 + params.iPriority);
1.1080 + if (returnValue != KErrNone)
1.1081 + {// defrag operation didn't start so close all openned handles
1.1082 + AsyncClose();
1.1083 + iRequestThread->AsyncClose();
1.1084 + iRequestThread = NULL;
1.1085 + iCompleteReq = NULL;
1.1086 + }
1.1087 + }
1.1088 + else
1.1089 + {// Still have a pending defrag request
1.1090 + returnValue = KErrInUse;
1.1091 + }
1.1092 + break;
1.1093 +
1.1094 + default:
1.1095 + break;
1.1096 + }
1.1097 + if (returnValue == KErrNone && params.iDefragVersion != DEFRAG_VER_DFC)
1.1098 + {
1.1099 + // Get the size of the zone just claimed so that it can be freed. Don't set
1.1100 + // iContigBytes for DFC method as it will be cleared by address in t_ramdefrag
1.1101 +
1.1102 + NKern::ThreadEnterCS();
1.1103 +
1.1104 + SRamZonePageCount pageCount;
1.1105 + returnValue = Epoc::GetRamZonePageCount(params.iID, pageCount);
1.1106 +
1.1107 + NKern::ThreadLeaveCS();
1.1108 +
1.1109 + __NK_ASSERT_ALWAYS(returnValue == KErrNone); // If this fails something is seriously wrong
1.1110 + iContigBytes = pageCount.iFixedPages << iPageShift;
1.1111 + }
1.1112 + else
1.1113 + {// The claim failed so allow other contiguous allocations.
1.1114 + iContigAddr = KPhysAddrInvalid;
1.1115 + }
1.1116 + }
1.1117 +
1.1118 + return returnValue;
1.1119 + }
1.1120 +
1.1121 +
1.1122 +
1.1123 +//
1.1124 +// SetZoneFlag
1.1125 +//
1.1126 +// Change the flag settings of a zone
1.1127 +//
1.1128 +TInt DRamDefragFuncTestChannel::SetZoneFlag(STestFlagParams* aParams)
1.1129 + {
1.1130 +
1.1131 + TInt returnValue = 0;
1.1132 + STestFlagParams flagParams;
1.1133 + kumemget(&flagParams, aParams, sizeof(STestFlagParams));
1.1134 + TUint setFlag = 0x0;
1.1135 + switch(flagParams.iSetFlag)
1.1136 + {
1.1137 + case NO_FIXED_FLAG:
1.1138 + setFlag = KRamZoneFlagNoFixed;
1.1139 + break;
1.1140 +
1.1141 + case NO_MOVE_FLAG:
1.1142 + setFlag = KRamZoneFlagNoMovable;
1.1143 + break;
1.1144 +
1.1145 + case NO_DISCARD_FLAG:
1.1146 + setFlag = KRamZoneFlagNoDiscard;
1.1147 + break;
1.1148 +
1.1149 + case NO_ALLOC_FLAG:
1.1150 + setFlag = KRamZoneFlagNoAlloc;
1.1151 + break;
1.1152 +
1.1153 + case ONLY_DISCARD_FLAG:
1.1154 + setFlag = KRamZoneFlagDiscardOnly;
1.1155 + break;
1.1156 +
1.1157 + case RESET_FLAG:
1.1158 + setFlag = 0x00;
1.1159 + break;
1.1160 +
1.1161 + case ORIG_FLAG:
1.1162 + setFlag = flagParams.iOptSetFlag;
1.1163 + break;
1.1164 +
1.1165 + default:
1.1166 + break;
1.1167 + }
1.1168 +
1.1169 + NKern::ThreadEnterCS();
1.1170 +
1.1171 + returnValue = Epoc::ModifyRamZoneFlags(flagParams.iZoneID, flagParams.iZoneFlag, setFlag);
1.1172 +
1.1173 + NKern::ThreadLeaveCS();
1.1174 + return returnValue;
1.1175 + }
1.1176 +//
1.1177 +// PageCount
1.1178 +//
1.1179 +// Call the GetRamZonePageCount function
1.1180 +//
1.1181 +TInt DRamDefragFuncTestChannel::PageCount(TUint aId, STestUserSidePageCount* aPageData)
1.1182 + {
1.1183 + TInt returnValue = 0;
1.1184 + STestUserSidePageCount pageData;
1.1185 + SRamZonePageCount pageCount;
1.1186 +
1.1187 + NKern::ThreadEnterCS();
1.1188 +
1.1189 + returnValue = Epoc::GetRamZonePageCount(aId, pageCount);
1.1190 +
1.1191 + NKern::ThreadLeaveCS();
1.1192 +
1.1193 + pageData.iFreePages = pageCount.iFreePages;
1.1194 + pageData.iFixedPages = pageCount.iFixedPages;
1.1195 + pageData.iMovablePages = pageCount.iMovablePages;
1.1196 + pageData.iDiscardablePages = pageCount.iDiscardablePages;
1.1197 +
1.1198 + kumemput(aPageData, &pageData, sizeof(STestUserSidePageCount));
1.1199 + return returnValue;
1.1200 + }
1.1201 +
1.1202 +//
1.1203 +// ZoneAllocContiguous
1.1204 +//
1.1205 +// Call the contiguous overload of the Epoc::ZoneAllocPhysicalRam() function
1.1206 +//
1.1207 +TInt DRamDefragFuncTestChannel::ZoneAllocContiguous(TUint aZoneID, TUint aNumBytes)
1.1208 + {
1.1209 + TInt returnValue = KErrNone;
1.1210 +
1.1211 + if (iContigAddr != KPhysAddrInvalid)
1.1212 + {
1.1213 + return KErrInUse;
1.1214 + }
1.1215 + iContigBytes = aNumBytes;
1.1216 +
1.1217 + NKern::ThreadEnterCS();
1.1218 +
1.1219 + returnValue = Epoc::ZoneAllocPhysicalRam(aZoneID, iContigBytes, iContigAddr, 0);
1.1220 +
1.1221 + NKern::ThreadLeaveCS();
1.1222 +
1.1223 + if (returnValue != KErrNone)
1.1224 + {
1.1225 + iContigAddr = KPhysAddrInvalid;
1.1226 + }
1.1227 + return returnValue;
1.1228 + }
1.1229 +
1.1230 +//
1.1231 +// ZoneAllocContiguous
1.1232 +//
1.1233 +// Call the contiguous overload of the Epoc::ZoneAllocPhysicalRam() function
1.1234 +//
1.1235 +TInt DRamDefragFuncTestChannel::ZoneAllocContiguous(TUint* aZoneIdList, TUint aZoneIdCount, TUint aNumBytes)
1.1236 + {
1.1237 + TInt returnValue = KErrNone;
1.1238 +
1.1239 + if (iContigAddr != KPhysAddrInvalid)
1.1240 + {
1.1241 + return KErrInUse;
1.1242 + }
1.1243 + iContigBytes = aNumBytes;
1.1244 +
1.1245 + // Copy the RAM zone IDs from user side memory to kernel memory.
1.1246 + if (aZoneIdCount > KMaxRamZones)
1.1247 + {// Too many IDs.
1.1248 + return KErrArgument;
1.1249 + }
1.1250 + kumemget32(iZoneIdArray, aZoneIdList, sizeof(TUint) * aZoneIdCount);
1.1251 +
1.1252 + NKern::ThreadEnterCS();
1.1253 +
1.1254 + returnValue = Epoc::ZoneAllocPhysicalRam(iZoneIdArray, aZoneIdCount, iContigBytes, iContigAddr, 0);
1.1255 +
1.1256 + NKern::ThreadLeaveCS();
1.1257 +
1.1258 + if (returnValue != KErrNone)
1.1259 + {
1.1260 + iContigAddr = KPhysAddrInvalid;
1.1261 + }
1.1262 + return returnValue;
1.1263 + }
1.1264 +
1.1265 +//
1.1266 +// AllocContiguous
1.1267 +//
1.1268 +// Call the contiguous overload of Epoc::AllocPhysicalRam()
1.1269 +//
1.1270 +TInt DRamDefragFuncTestChannel::AllocContiguous(TUint aNumBytes)
1.1271 + {
1.1272 + TInt returnValue = 0;
1.1273 +
1.1274 + if (iContigAddr != KPhysAddrInvalid)
1.1275 + {
1.1276 + return KErrInUse;
1.1277 + }
1.1278 +
1.1279 + NKern::ThreadEnterCS();
1.1280 +
1.1281 + returnValue = Epoc::AllocPhysicalRam(aNumBytes, iContigAddr, 0);
1.1282 +
1.1283 + NKern::ThreadLeaveCS();
1.1284 +
1.1285 + if (returnValue != KErrNone)
1.1286 + {
1.1287 + iContigAddr = KPhysAddrInvalid;
1.1288 + }
1.1289 + iContigBytes = aNumBytes;
1.1290 + return returnValue;
1.1291 + }
1.1292 +
1.1293 +
1.1294 +//
1.1295 +// ZoneAllocDiscontiguous
1.1296 +//
1.1297 +// Call the discontiguous overload of Epoc::ZoneAllocPhysicalRam() function
1.1298 +//
1.1299 +TInt DRamDefragFuncTestChannel::ZoneAllocDiscontiguous(TUint aZoneId, TInt aNumPages)
1.1300 + {
1.1301 + TInt r = AllocFixedArray(aNumPages);
1.1302 + if (r != KErrNone)
1.1303 + {
1.1304 + return r;
1.1305 + }
1.1306 + return ZoneAllocDiscontiguous2(aZoneId, aNumPages);
1.1307 + }
1.1308 +
1.1309 +/**
1.1310 +Allocate the specified number of fixed pages from the specified RAM zone.
1.1311 +This should only be invoked when iAddrArray has already been allocated
1.1312 +
1.1313 +@param aZoneID The ID of the RAM zone to allocate from
1.1314 +@param aNumPages The number of pages to allocate.
1.1315 +*/
1.1316 +TInt DRamDefragFuncTestChannel::ZoneAllocDiscontiguous2(TUint aZoneID, TInt aNumPages)
1.1317 + {
1.1318 + if (iAddrArray == NULL)
1.1319 + {
1.1320 + return KErrGeneral;
1.1321 + }
1.1322 +
1.1323 + NKern::ThreadEnterCS();
1.1324 +
1.1325 + TESTDEBUG(Kern::Printf("Allocating fixed pages"));
1.1326 + TInt returnValue = Epoc::ZoneAllocPhysicalRam(aZoneID, aNumPages, iAddrArray);
1.1327 +
1.1328 + if (KErrNone != returnValue)
1.1329 + {
1.1330 + TESTDEBUG(Kern::Printf("Alloc was unsuccessful, r = %d\n", returnValue));
1.1331 + TESTDEBUG(Kern::Printf("aNumPages = %d, aZoneID = %d", aNumPages, aZoneID));
1.1332 + Kern::Free(iAddrArray);
1.1333 + iAddrArray = NULL;
1.1334 + goto exit;
1.1335 + }
1.1336 + iAddrArrayPages = aNumPages;
1.1337 + TESTDEBUG(Kern::Printf("iAddrArrayPages = %d, aZoneID = %d", iAddrArrayPages, aZoneID));
1.1338 +
1.1339 +exit:
1.1340 + NKern::ThreadLeaveCS();
1.1341 + return returnValue;
1.1342 + }
1.1343 +
1.1344 +
1.1345 +//
1.1346 +// ZoneAllocDiscontiguous
1.1347 +//
1.1348 +// Call the discontiguous overload of Epoc::ZoneAllocPhysicalRam() function
1.1349 +//
1.1350 +TInt DRamDefragFuncTestChannel::ZoneAllocDiscontiguous(TUint* aZoneIdList, TUint aZoneIdCount, TInt aNumPages)
1.1351 + {
1.1352 + TInt returnValue = 0;
1.1353 +
1.1354 + if (iAddrArray != NULL)
1.1355 + {
1.1356 + return KErrInUse;
1.1357 + }
1.1358 + NKern::ThreadEnterCS();
1.1359 +
1.1360 + iAddrArray = new TPhysAddr[aNumPages];
1.1361 +
1.1362 + NKern::ThreadLeaveCS();
1.1363 +
1.1364 + if (iAddrArray == NULL)
1.1365 + {
1.1366 + return KErrNoMemory;
1.1367 + }
1.1368 +
1.1369 + // copy user side data to kernel side buffer.
1.1370 + if (aZoneIdCount > KMaxRamZones)
1.1371 + {// Too many IDs.
1.1372 + return KErrArgument;
1.1373 + }
1.1374 + kumemget(iZoneIdArray, aZoneIdList, sizeof(TUint) * aZoneIdCount);
1.1375 +
1.1376 + NKern::ThreadEnterCS();
1.1377 +
1.1378 + TESTDEBUG(Kern::Printf("Allocating fixed pages"));
1.1379 + returnValue = Epoc::ZoneAllocPhysicalRam(iZoneIdArray, aZoneIdCount, aNumPages, iAddrArray);
1.1380 +
1.1381 + if (KErrNone != returnValue)
1.1382 + {
1.1383 + TESTDEBUG(Kern::Printf("Alloc was unsuccessful, r = %d\n", returnValue));
1.1384 + TESTDEBUG(Kern::Printf("aNumPages = %d, aZoneID = %d", aNumPages, aZoneIdCount));
1.1385 + delete[] iAddrArray;
1.1386 + iAddrArray = NULL;
1.1387 + goto exit;
1.1388 + }
1.1389 + iAddrArrayPages = aNumPages;
1.1390 + TESTDEBUG(Kern::Printf("iAddrArrayPages = %d, zones = %d", iAddrArrayPages, aZoneIdCount));
1.1391 +
1.1392 +exit:
1.1393 + NKern::ThreadLeaveCS();
1.1394 + return returnValue;
1.1395 + }
1.1396 +
1.1397 +//
1.1398 +// ZoneAllocToMany
1.1399 +//
1.1400 +// Call the overloaded Epoc::ZoneAllocPhysicalRam function on a number of zones
1.1401 +//
1.1402 +TInt DRamDefragFuncTestChannel::ZoneAllocToMany(TInt aZoneIndex, TInt aNumPages)
1.1403 + {
1.1404 + TInt r = ZoneAllocToManyArray(aZoneIndex, aNumPages);
1.1405 + if (r != KErrNone)
1.1406 + {
1.1407 + return r;
1.1408 + }
1.1409 + return ZoneAllocToMany2(aZoneIndex, aNumPages);
1.1410 + }
1.1411 +
1.1412 +//
1.1413 +// ZoneAllocToManyArray
1.1414 +//
1.1415 +// Allocate the arrays required to store the physical addresses of the different zones
1.1416 +// for the number of fixed pages to be allocated to that zone.
1.1417 +//
1.1418 +TInt DRamDefragFuncTestChannel::ZoneAllocToManyArray(TInt aZoneIndex, TInt aNumPages)
1.1419 + {
1.1420 + TInt returnValue = KErrNone;
1.1421 + NKern::ThreadEnterCS();
1.1422 +
1.1423 + if (iAddrPtrArray == NULL)
1.1424 + {
1.1425 + iAddrPtrArray = (TPhysAddr**)Kern::AllocZ(sizeof(TPhysAddr*) * iZoneCount);
1.1426 + }
1.1427 + if (iNumPagesArray == NULL)
1.1428 + {
1.1429 + iNumPagesArray = (TInt *)Kern::AllocZ(sizeof(TInt) * iZoneCount);
1.1430 + }
1.1431 +
1.1432 + if (iAddrPtrArray[aZoneIndex] != NULL)
1.1433 + {
1.1434 + returnValue = KErrInUse;
1.1435 + goto exit;
1.1436 + }
1.1437 +
1.1438 + iAddrPtrArray[aZoneIndex] = (TPhysAddr *)Kern::AllocZ(sizeof(TPhysAddr) * aNumPages);
1.1439 + if (iAddrPtrArray[aZoneIndex] == NULL)
1.1440 + {
1.1441 + returnValue = KErrNoMemory;
1.1442 + goto exit;
1.1443 + }
1.1444 +
1.1445 +exit:
1.1446 + NKern::ThreadLeaveCS();
1.1447 + return returnValue;
1.1448 + }
1.1449 +
1.1450 +//
1.1451 +// ZoneAllocToMany2
1.1452 +//
1.1453 +// Call the overloaded Epoc::ZoneAllocPhysicalRam function on a number of zones
1.1454 +// This should only be invoked when iAddrPtrArray, iNumPagesArray and iAddrPtrArray[aZoneIndex]
1.1455 +// have already been allocated
1.1456 +//
1.1457 +TInt DRamDefragFuncTestChannel::ZoneAllocToMany2(TInt aZoneIndex, TInt aNumPages)
1.1458 + {
1.1459 + TInt returnValue = KErrNone;
1.1460 + struct SRamZoneConfig zoneConfig;
1.1461 + TUint zoneID = KRamZoneInvalidId;
1.1462 +
1.1463 + if (iAddrPtrArray == NULL ||
1.1464 + iNumPagesArray == NULL ||
1.1465 + iAddrPtrArray[aZoneIndex] == NULL)
1.1466 + {
1.1467 + return KErrGeneral;
1.1468 + }
1.1469 +
1.1470 +
1.1471 + NKern::ThreadEnterCS();
1.1472 +
1.1473 + // Get the zone ID
1.1474 + Kern::HalFunction(EHalGroupRam,ERamHalGetZoneConfig,(TAny*)aZoneIndex, (TAny*)&zoneConfig);
1.1475 + zoneID = zoneConfig.iZoneId;
1.1476 + returnValue = Epoc::ZoneAllocPhysicalRam(zoneID, aNumPages, iAddrPtrArray[aZoneIndex]);
1.1477 +
1.1478 + if (KErrNone != returnValue)
1.1479 + {
1.1480 + TESTDEBUG(Kern::Printf("Alloc was unsuccessful, r = %d\n", returnValue));
1.1481 + Kern::Free(iAddrPtrArray[aZoneIndex]);
1.1482 + iAddrPtrArray[aZoneIndex] = NULL;
1.1483 + goto exit;
1.1484 + }
1.1485 + iNumPagesArray[aZoneIndex] = aNumPages;
1.1486 +
1.1487 +exit:
1.1488 + NKern::ThreadLeaveCS();
1.1489 + return returnValue;
1.1490 + }
1.1491 +
1.1492 +//
1.1493 +// FreeZone
1.1494 +//
1.1495 +// Call the overloaded Epoc::FreePhysicalRam function
1.1496 +//
1.1497 +TInt DRamDefragFuncTestChannel::FreeZone(TInt aNumPages)
1.1498 + {
1.1499 + TInt returnValue = 0;
1.1500 +
1.1501 + if (iAddrArray == NULL)
1.1502 + {
1.1503 + return KErrCorrupt;
1.1504 + }
1.1505 +
1.1506 + NKern::ThreadEnterCS();
1.1507 +
1.1508 + returnValue = Epoc::FreePhysicalRam(aNumPages, iAddrArray);
1.1509 +
1.1510 + Kern::Free(iAddrArray);
1.1511 + iAddrArray = NULL;
1.1512 +
1.1513 + NKern::ThreadLeaveCS();
1.1514 + return returnValue;
1.1515 + }
1.1516 +
1.1517 +//
1.1518 +// FreeFromAllZones
1.1519 +//
1.1520 +// Call the overloaded Epoc::FreePhysicalRam function
1.1521 +//
1.1522 +TInt DRamDefragFuncTestChannel::FreeFromAllZones()
1.1523 + {
1.1524 + TInt returnValue = 0;
1.1525 +
1.1526 + if (iAddrPtrArray == NULL)
1.1527 + {
1.1528 + return KErrCorrupt;
1.1529 + }
1.1530 +
1.1531 + NKern::ThreadEnterCS();
1.1532 +
1.1533 + for (TUint i=0; i<iZoneCount; i++)
1.1534 + {
1.1535 + if (iAddrPtrArray[i] != NULL)
1.1536 + {
1.1537 + returnValue = Epoc::FreePhysicalRam(iNumPagesArray[i], iAddrPtrArray[i]);
1.1538 + iAddrPtrArray[i] = NULL;
1.1539 + }
1.1540 + }
1.1541 + Kern::Free(iAddrPtrArray);
1.1542 + iAddrPtrArray = NULL;
1.1543 +
1.1544 + Kern::Free(iNumPagesArray);
1.1545 + iNumPagesArray = NULL;
1.1546 +
1.1547 + NKern::ThreadLeaveCS();
1.1548 + return returnValue;
1.1549 + }
1.1550 +//
1.1551 +// FreeFromAddr
1.1552 +//
1.1553 +// Free a specific number of pages starting from a specific address
1.1554 +//
1.1555 +TInt DRamDefragFuncTestChannel::FreeFromAddr(TInt aNumPages, TUint32 aAddr)
1.1556 + {
1.1557 + TInt returnValue = 0;
1.1558 + TPhysAddr address = aAddr;
1.1559 +
1.1560 + NKern::ThreadEnterCS();
1.1561 +
1.1562 + returnValue = Epoc::FreePhysicalRam(address, aNumPages << iPageShift);
1.1563 +
1.1564 + NKern::ThreadLeaveCS();
1.1565 +
1.1566 + return returnValue;
1.1567 + }
1.1568 +
1.1569 +//
1.1570 +// FreeRam
1.1571 +//
1.1572 +// Returns the current free RAM available in bytes
1.1573 +//
1.1574 +TInt DRamDefragFuncTestChannel::FreeRam()
1.1575 + {
1.1576 + return Kern::FreeRamInBytes();
1.1577 + }
1.1578 +
1.1579 +TInt DRamDefragFuncTestChannel::DoSetDebugFlag(TInt aState)
1.1580 + {
1.1581 + iDebug = aState;
1.1582 + return KErrNone;
1.1583 + }
1.1584 +
1.1585 +
1.1586 +//
1.1587 +// DefragCompleteDfc
1.1588 +//
1.1589 +// DFC callback called when a defrag operation has completed.
1.1590 +//
1.1591 +void DRamDefragFuncTestChannel::DefragCompleteDfc(TAny* aSelf)
1.1592 + {
1.1593 + // Just call non-static method
1.1594 + TESTDEBUG(Kern::Printf("Calling DefragCompleteDfc"));
1.1595 + ((DRamDefragFuncTestChannel*)aSelf)->DefragComplete();
1.1596 + }
1.1597 +
1.1598 +
1.1599 +//
1.1600 +// DefragComplete
1.1601 +//
1.1602 +// Invoked by the DFC callback which is called when a defrag
1.1603 +// operation has completed.
1.1604 +//
1.1605 +void DRamDefragFuncTestChannel::DefragComplete()
1.1606 + {
1.1607 + TESTDEBUG(Kern::Printf(">DDefragChannel::DefragComplete - First Defrag"));
1.1608 + TInt result = iDefragRequest.Result();
1.1609 + TESTDEBUG(Kern::Printf("complete code %d", result));
1.1610 +
1.1611 + // Complete the request and close the handle to the driver
1.1612 + Kern::SemaphoreWait(*iDefragSemaphore);
1.1613 +
1.1614 + Kern::RequestComplete(iRequestThread, iCompleteReq, result);
1.1615 + iCompleteReq = NULL;
1.1616 + iRequestThread->Close(NULL);
1.1617 + iRequestThread = NULL;
1.1618 +
1.1619 + Kern::SemaphoreSignal(*iDefragSemaphore);
1.1620 +
1.1621 + ++iCounter;
1.1622 + if (iCounter == 1)
1.1623 + iOrder = 1;
1.1624 + else if (iCounter == 2 && iOrder == 2)
1.1625 + iOrder = 21;
1.1626 + else if (iCounter == 2 && iOrder == 3)
1.1627 + iOrder = 31;
1.1628 + else if (iCounter == 3 && iOrder == 23)
1.1629 + iOrder = 231;
1.1630 + else if (iCounter == 3 && iOrder == 32)
1.1631 + iOrder = 321;
1.1632 + TESTDEBUG(Kern::Printf("order = %d", iOrder));
1.1633 + TESTDEBUG(Kern::Printf("<DDefragChannel::DefragComplete"));
1.1634 +
1.1635 + // Close the handle on this channel - WARNING this channel may be
1.1636 + // deleted immmediately after this call so don't access any members
1.1637 + AsyncClose();
1.1638 + }
1.1639 +
1.1640 +
1.1641 +//
1.1642 +// Defrag2CompleteDfc
1.1643 +//
1.1644 +// DFC callback called when a defrag operation has completed.
1.1645 +// This is used for a particular test case when 3
1.1646 +// defrags are queued at the same time.
1.1647 +//
1.1648 +void DRamDefragFuncTestChannel::Defrag2CompleteDfc(TAny* aSelf)
1.1649 + {
1.1650 + // Just call non-static method
1.1651 + TESTDEBUG(Kern::Printf("Calling DefragCompleteDfc"));
1.1652 + ((DRamDefragFuncTestChannel*)aSelf)->Defrag2Complete();
1.1653 + }
1.1654 +
1.1655 +
1.1656 +//
1.1657 +// Defrag2Complete
1.1658 +//
1.1659 +// Invoked by the DFC callback which is called when a defrag
1.1660 +// operation has completed. This is used for a particular test case when 3
1.1661 +// defrags are queued at the same time.
1.1662 +//
1.1663 +void DRamDefragFuncTestChannel::Defrag2Complete()
1.1664 + {
1.1665 + TESTDEBUG(Kern::Printf(">DDefragChannel::Defrag2Complete - Second Defrag"));
1.1666 + TInt result = iDefragRequest2.Result();
1.1667 + TESTDEBUG(Kern::Printf("complete code %d", result));
1.1668 + // Complete the request and close the handle to the driver
1.1669 + Kern::SemaphoreWait(*iDefragSemaphore);
1.1670 +
1.1671 + Kern::RequestComplete(iRequestThread2, iCompleteReq2, result);
1.1672 + iCompleteReq2 = NULL;
1.1673 + iRequestThread2->Close(NULL);
1.1674 + iRequestThread2 = NULL;
1.1675 +
1.1676 + Kern::SemaphoreSignal(*iDefragSemaphore);
1.1677 +
1.1678 + ++iCounter;
1.1679 + if (iCounter == 1)
1.1680 + iOrder = 2;
1.1681 + else if (iCounter == 2 && iOrder == 1)
1.1682 + iOrder = 12;
1.1683 + else if (iCounter == 2 && iOrder == 3)
1.1684 + iOrder = 32;
1.1685 + else if (iCounter == 3 && iOrder == 13)
1.1686 + iOrder = 132;
1.1687 + else if (iCounter == 3 && iOrder == 31)
1.1688 + iOrder = 312;
1.1689 + TESTDEBUG(Kern::Printf("order = %d", iOrder));
1.1690 + TESTDEBUG(Kern::Printf("<DDefragChannel::DefragComplete"));
1.1691 +
1.1692 + // Close the handle on this channel - WARNING this channel may be
1.1693 + // deleted immmediately after this call so don't access any members
1.1694 + AsyncClose();
1.1695 + }
1.1696 +
1.1697 +
1.1698 +//
1.1699 +// Defrag3CompleteDfc
1.1700 +//
1.1701 +// DFC callback called when a defrag operation has completed.
1.1702 +// This is used for a particular test case when 3
1.1703 +// defrags are queued at the same time.
1.1704 +//
1.1705 +void DRamDefragFuncTestChannel::Defrag3CompleteDfc(TAny* aSelf)
1.1706 + {
1.1707 + // Just call non-static method
1.1708 + TESTDEBUG(Kern::Printf("Calling DefragCompleteDfc"));
1.1709 + ((DRamDefragFuncTestChannel*)aSelf)->Defrag3Complete();
1.1710 + }
1.1711 +
1.1712 +//
1.1713 +// Defrag3Complete
1.1714 +//
1.1715 +// Invoked by the DFC callback which is called when a defrag
1.1716 +// operation has completed. This is used for a particular test case when 3
1.1717 +// defrags are queued at the same time.
1.1718 +//
1.1719 +void DRamDefragFuncTestChannel::Defrag3Complete()
1.1720 + {
1.1721 + TESTDEBUG(Kern::Printf(">DDefragChannel::DefragComplete - Third Defrag"));
1.1722 + TInt result = iDefragRequest3.Result();
1.1723 + TESTDEBUG(Kern::Printf("complete code %d", result));
1.1724 +
1.1725 + Kern::SemaphoreWait(*iDefragSemaphore);
1.1726 +
1.1727 + Kern::RequestComplete(iRequestThread3, iCompleteReq3, result);
1.1728 + iCompleteReq3 = NULL;
1.1729 + iRequestThread3->Close(NULL);
1.1730 + iRequestThread3 = NULL;
1.1731 +
1.1732 + Kern::SemaphoreSignal(*iDefragSemaphore);
1.1733 +
1.1734 +
1.1735 + ++iCounter;
1.1736 + if (iCounter == 1)
1.1737 + iOrder = 3;
1.1738 + else if (iCounter == 2 && iOrder == 1)
1.1739 + iOrder = 13;
1.1740 + else if (iCounter == 2 && iOrder == 2)
1.1741 + iOrder = 23;
1.1742 + else if (iCounter == 3 && iOrder == 12)
1.1743 + iOrder = 123;
1.1744 + else if (iCounter == 3 && iOrder == 21)
1.1745 + iOrder = 213;
1.1746 + TESTDEBUG(Kern::Printf("order = %d", iOrder));
1.1747 + TESTDEBUG(Kern::Printf("<DDefragChannel::DefragComplete"));
1.1748 +
1.1749 + // Close the handle on this channel - WARNING this channel may be
1.1750 + // deleted immmediately after this call so don't access any members
1.1751 + AsyncClose();
1.1752 + }
1.1753 +
1.1754 +//
1.1755 +// ResetDriver
1.1756 +//
1.1757 +// Reset all the member variables in the driver
1.1758 +//
1.1759 +TInt DRamDefragFuncTestChannel::ResetDriver()
1.1760 + {
1.1761 + iDebug = 0;
1.1762 + iThreadCounter = 1;
1.1763 + iCounter = 0;
1.1764 + iOrder = 0;
1.1765 + FreeAllFixedPages();
1.1766 +
1.1767 + return KErrNone;
1.1768 + }