os/kernelhwsrv/kerneltest/e32test/examples/defrag/d_defrag_ref.cpp
author sl
Tue, 10 Jun 2014 14:32:02 +0200
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
sl@0
     1
// Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
// e32test\examples\defrag\d_defrag_ref.cpp
sl@0
    15
// Reference LDD for invoking defrag APIs.
sl@0
    16
// 
sl@0
    17
//
sl@0
    18
sl@0
    19
#include <kernel/kern_priv.h>
sl@0
    20
#include "platform.h"
sl@0
    21
#include "nk_priv.h"
sl@0
    22
#include "d_defrag_ref.h"
sl@0
    23
sl@0
    24
const TInt KMajorVersionNumber=0;
sl@0
    25
const TInt KMinorVersionNumber=1;
sl@0
    26
const TInt KBuildVersionNumber=1;
sl@0
    27
sl@0
    28
#if 1  // Set true for tracing
sl@0
    29
#define TRACE(x) x
sl@0
    30
#else
sl@0
    31
#define TRACE(x)
sl@0
    32
#endif
sl@0
    33
sl@0
    34
const TInt KDefragCompleteThreadPriority = 27;
sl@0
    35
const TInt KDefragRamThreadPriority = 1;
sl@0
    36
_LIT(KDefragCompleteThread,"DefragCompleteThread");
sl@0
    37
sl@0
    38
class DDefragChannel;
sl@0
    39
sl@0
    40
/**
sl@0
    41
	Clean up item responsible for ensuring all memory commmited to a chunk is
sl@0
    42
	freed once the chunk is destroyed
sl@0
    43
*/
sl@0
    44
class TChunkCleanup : public TDfc
sl@0
    45
    {
sl@0
    46
public:
sl@0
    47
    TChunkCleanup(DDefragChannel* aDevice, TPhysAddr* aBufAddrs, TUint aBufPages);
sl@0
    48
	TChunkCleanup(DDefragChannel* aDevice, TPhysAddr aBufBase, TUint aBufBytes);
sl@0
    49
    static void ChunkDestroyed(TChunkCleanup* aSelf);
sl@0
    50
	void RemoveDevice();
sl@0
    51
sl@0
    52
private:
sl@0
    53
    void DoChunkDestroyed();
sl@0
    54
sl@0
    55
private:
sl@0
    56
	TPhysAddr* iBufAddrs;		/**< Pointer to an array of the addresses of discontiguous buffer pages*/
sl@0
    57
	TPhysAddr iBufBase;			/**< Physical base address of a physically contiguous the buffer*/
sl@0
    58
	TUint iBufSize;				/**< The number of pages or bytes in the buffer depending if this is 
sl@0
    59
								discontiguous or contiguous buffer, repsectively*/
sl@0
    60
	TBool iBufContiguous;		/**< ETrue when the memory to be freed is contiguous, EFalse otherwise*/
sl@0
    61
	DDefragChannel* iDevice; 	/**< The device to be informed when the chunk is destroyed */
sl@0
    62
    };
sl@0
    63
sl@0
    64
sl@0
    65
/**
sl@0
    66
	Reference defrag LDD factory.
sl@0
    67
*/
sl@0
    68
class DDefragChannelFactory : public DLogicalDevice
sl@0
    69
	{
sl@0
    70
public:
sl@0
    71
	DDefragChannelFactory();
sl@0
    72
	~DDefragChannelFactory();
sl@0
    73
	virtual TInt Install();								//overriding pure virtual
sl@0
    74
	virtual void GetCaps(TDes8& aDes) const;			//overriding pure virtual
sl@0
    75
	virtual TInt Create(DLogicalChannelBase*& aChannel);//overriding pure virtual
sl@0
    76
sl@0
    77
	TDynamicDfcQue* iDfcQ;
sl@0
    78
	};
sl@0
    79
sl@0
    80
sl@0
    81
/**
sl@0
    82
	Reference defrag logical channel.
sl@0
    83
*/
sl@0
    84
class DDefragChannel : public DLogicalChannelBase
sl@0
    85
	{
sl@0
    86
public:
sl@0
    87
	DDefragChannel(TDfcQue* aDfcQ);
sl@0
    88
	~DDefragChannel();
sl@0
    89
	void ChunkDestroyed();
sl@0
    90
protected:
sl@0
    91
	virtual TInt DoCreate(TInt aUnit, const TDesC8* anInfo, const TVersion& aVer);
sl@0
    92
	virtual TInt Request(TInt aFunction, TAny* a1, TAny* a2);
sl@0
    93
sl@0
    94
	TInt DoAllocLowestZone();
sl@0
    95
	TInt DoClaimLowestZone();
sl@0
    96
	TInt DoChunkClose();
sl@0
    97
	TInt FindLowestPrefZone();
sl@0
    98
sl@0
    99
	static void DefragCompleteDfc(TAny* aSelf);
sl@0
   100
	void DefragComplete();
sl@0
   101
sl@0
   102
private:
sl@0
   103
	TInt iPageShift;			/**< The system's page shift */
sl@0
   104
	DSemaphore* iDefragSemaphore;/**< Semaphore to ensure only one defrag operation is active per channel*/
sl@0
   105
	TClientRequest* iCompleteReq;/**< Pointer to a request status that will signal to the user side client once the defrag has completed*/
sl@0
   106
	DThread* iRequestThread;	/**< Pointer to the thread that made the defrag request*/
sl@0
   107
	TRamDefragRequest iDefragReq;/**< The defrag request used to queue defrag operations*/
sl@0
   108
	DChunk* iBufChunk;			/**< Pointer to a chunk that can be mapped to a physical RAM area*/
sl@0
   109
	TChunkCleanup* iChunkCleanup;/**< Pointer to iBufChunk's cleanup object */
sl@0
   110
	TDfcQue* iDfcQ;				/**< The DFC queue used for driver functions */
sl@0
   111
	TDfc iDefragCompleteDfc;	/**< DFC to be queued once a defrag operation has completed */
sl@0
   112
	TBool iDefragDfcFree;		/**< Set to fase whenever a dfc defrag operation is still pending*/
sl@0
   113
	TUint iLowestPrefZoneId;	/**< The ID of the least preferable RAM zone*/
sl@0
   114
	TUint iLowestPrefZonePages;	/**< The number of pages in the least preferable RAM zone*/
sl@0
   115
	TUint iLowestPrefZoneIndex; /**< The test HAL function index of the least preferable RAM zone*/
sl@0
   116
	};
sl@0
   117
sl@0
   118
/**
sl@0
   119
Utility functions to wait for chunk clean dfc to be queued by waiting for the 
sl@0
   120
idle thread to be queued.
sl@0
   121
*/
sl@0
   122
void signal_sem(TAny* aPtr)
sl@0
   123
	{
sl@0
   124
	NKern::FSSignal((NFastSemaphore*)aPtr);
sl@0
   125
	}
sl@0
   126
sl@0
   127
TInt WaitForIdle()
sl@0
   128
	{// Wait for chunk to be destroyed and then for the chunk cleanup dfc to run.
sl@0
   129
	for (TUint i = 0; i < 2; i++)
sl@0
   130
		{
sl@0
   131
		NFastSemaphore s(0);
sl@0
   132
		TDfc idler(&signal_sem, &s, Kern::SvMsgQue(), 0);	// supervisor thread, priority 0, so will run after destroyed DFC
sl@0
   133
		NTimer timer(&signal_sem, &s);
sl@0
   134
		idler.QueueOnIdle();
sl@0
   135
		timer.OneShot(NKern::TimerTicks(5000), ETrue);	// runs in DFCThread1
sl@0
   136
		NKern::FSWait(&s);	// wait for either idle DFC or timer
sl@0
   137
		TBool timeout = idler.Cancel();	// cancel idler, return TRUE if it hadn't run
sl@0
   138
		TBool tmc = timer.Cancel();	// cancel timer, return TRUE if it hadn't expired
sl@0
   139
		if (!timeout && !tmc)
sl@0
   140
			NKern::FSWait(&s);	// both the DFC and the timer went off - wait for the second one
sl@0
   141
		if (timeout)
sl@0
   142
			return KErrTimedOut;
sl@0
   143
		}
sl@0
   144
	return KErrNone;
sl@0
   145
	}
sl@0
   146
sl@0
   147
/** 
sl@0
   148
	Standard logical device driver entry point.  
sl@0
   149
	Called the first time this device driver is loaded.
sl@0
   150
*/
sl@0
   151
DECLARE_STANDARD_LDD()
sl@0
   152
	{
sl@0
   153
	DDefragChannelFactory* factory = new DDefragChannelFactory;
sl@0
   154
	if (factory)
sl@0
   155
	{
sl@0
   156
		// Allocate a kernel thread to run the DFC 
sl@0
   157
		TInt r = Kern::DynamicDfcQCreate(factory->iDfcQ, KDefragCompleteThreadPriority, KDefragCompleteThread);
sl@0
   158
sl@0
   159
		if (r != KErrNone)
sl@0
   160
			{ 
sl@0
   161
			// Must close rather than delete factory as it is a DObject object.
sl@0
   162
			factory->AsyncClose();
sl@0
   163
			return NULL; 	
sl@0
   164
			} 	
sl@0
   165
	}
sl@0
   166
    return factory;
sl@0
   167
    }
sl@0
   168
sl@0
   169
sl@0
   170
/**
sl@0
   171
	Constructor
sl@0
   172
*/
sl@0
   173
DDefragChannelFactory::DDefragChannelFactory()
sl@0
   174
    {
sl@0
   175
    iVersion=TVersion(KMajorVersionNumber,KMinorVersionNumber,KBuildVersionNumber);
sl@0
   176
    }
sl@0
   177
sl@0
   178
sl@0
   179
/**
sl@0
   180
	Destructor
sl@0
   181
*/
sl@0
   182
DDefragChannelFactory::~DDefragChannelFactory()
sl@0
   183
    {
sl@0
   184
	if (iDfcQ != NULL)
sl@0
   185
		{// Destroy the DFC queue created when this device drvier was loaded.
sl@0
   186
		iDfcQ->Destroy();
sl@0
   187
		}
sl@0
   188
    }
sl@0
   189
sl@0
   190
sl@0
   191
/**
sl@0
   192
	Create a new DDefragChannel on this logical device.
sl@0
   193
sl@0
   194
@param  aChannel On successful return this will point to the new channel.
sl@0
   195
@return KErrNone on success or KErrNoMemory if the channel couldn't be created.
sl@0
   196
*/
sl@0
   197
TInt DDefragChannelFactory::Create(DLogicalChannelBase*& aChannel)
sl@0
   198
    {
sl@0
   199
	aChannel = new DDefragChannel(iDfcQ);
sl@0
   200
	return (aChannel)? KErrNone : KErrNoMemory;
sl@0
   201
    }
sl@0
   202
sl@0
   203
sl@0
   204
/**
sl@0
   205
	Install the LDD - overriding pure virtual
sl@0
   206
sl@0
   207
@return KErrNone on success or one of the system wide error codes.
sl@0
   208
*/
sl@0
   209
TInt DDefragChannelFactory::Install()
sl@0
   210
    {
sl@0
   211
    return SetName(&KLddName);
sl@0
   212
    }
sl@0
   213
sl@0
   214
sl@0
   215
/**
sl@0
   216
	Get capabilities - overriding pure virtual
sl@0
   217
sl@0
   218
@param aDes A descriptor to be loaded with the capabilities.
sl@0
   219
*/
sl@0
   220
void DDefragChannelFactory::GetCaps(TDes8& aDes) const
sl@0
   221
    {
sl@0
   222
    TCapsDefragTestV01 b;
sl@0
   223
    b.iVersion=TVersion(KMajorVersionNumber,KMinorVersionNumber,KBuildVersionNumber);
sl@0
   224
    Kern::InfoCopy(aDes,(TUint8*)&b,sizeof(b));
sl@0
   225
    }
sl@0
   226
sl@0
   227
sl@0
   228
/**
sl@0
   229
	Constructor
sl@0
   230
sl@0
   231
@param aDfcQ The DFC queue to use for defrag completion DFCs.
sl@0
   232
*/
sl@0
   233
DDefragChannel::DDefragChannel(TDfcQue* aDfcQ) 
sl@0
   234
		:
sl@0
   235
		iDefragSemaphore(NULL),
sl@0
   236
		iCompleteReq(NULL),
sl@0
   237
		iBufChunk(NULL),
sl@0
   238
		iChunkCleanup(NULL),
sl@0
   239
		iDfcQ(aDfcQ),
sl@0
   240
		iDefragCompleteDfc(DefragCompleteDfc, (TAny*)this, 1)  // DFC is priority '1', it is the only type of dfc on this queue.
sl@0
   241
    {
sl@0
   242
    }
sl@0
   243
sl@0
   244
sl@0
   245
/**
sl@0
   246
	Create channel.
sl@0
   247
sl@0
   248
@param aVer The version number required.
sl@0
   249
@return KErrNone on success, KErrNotSupported if the device doesn't support defragmentation.
sl@0
   250
*/
sl@0
   251
TInt DDefragChannel::DoCreate(TInt /*aUnit*/, const TDesC8* /*anInfo*/, const TVersion& aVer)
sl@0
   252
    {
sl@0
   253
	// Check the client has ECapabilityPowerMgmt capability.
sl@0
   254
	if(!Kern::CurrentThreadHasCapability(ECapabilityPowerMgmt, __PLATSEC_DIAGNOSTIC_STRING("Checked by DDefragChannel")))
sl@0
   255
		{
sl@0
   256
		return KErrPermissionDenied;
sl@0
   257
		}
sl@0
   258
	TInt pageSize;
sl@0
   259
	TInt r = Kern::HalFunction(EHalGroupKernel, EKernelHalPageSizeInBytes, &pageSize, 0);
sl@0
   260
	if (r != KErrNone)
sl@0
   261
		{
sl@0
   262
		TRACE(Kern::Printf("ERROR - Unable to determine page size"));
sl@0
   263
		return r;
sl@0
   264
		}
sl@0
   265
	TUint32 pageMask = pageSize;
sl@0
   266
	TUint i = 0;
sl@0
   267
	for (; i < 32; i++)
sl@0
   268
		{
sl@0
   269
		if (pageMask & 1)
sl@0
   270
			{
sl@0
   271
			if (pageMask & ~1u)
sl@0
   272
				{
sl@0
   273
				TRACE(Kern::Printf("ERROR - page size not a power of 2"));
sl@0
   274
				return KErrNotSupported;
sl@0
   275
				}
sl@0
   276
			iPageShift = i;
sl@0
   277
			break;
sl@0
   278
			}
sl@0
   279
		pageMask >>= 1;
sl@0
   280
		}
sl@0
   281
sl@0
   282
	// Check the client is a supported version.
sl@0
   283
    if (!Kern::QueryVersionSupported(TVersion(KMajorVersionNumber,KMinorVersionNumber,KBuildVersionNumber),aVer))
sl@0
   284
		{
sl@0
   285
    	return KErrNotSupported;
sl@0
   286
		}
sl@0
   287
sl@0
   288
	// Check this system has more than one RAM zone defined.
sl@0
   289
	// A real driver shouldn't need to do this as any driver that uses defrag should 
sl@0
   290
	// only be loaded on devices that support it.
sl@0
   291
	TInt ret = FindLowestPrefZone();
sl@0
   292
	if (ret != KErrNone)
sl@0
   293
		{// Only one zone so can't move pages anywhere or empty a zone
sl@0
   294
		return KErrNotSupported;
sl@0
   295
		}
sl@0
   296
sl@0
   297
	// Create a semaphore to protect defrag invocation.  OK to just use one name as
sl@0
   298
	// the semaphore is not global so it's name doesn't need to be unique.
sl@0
   299
	ret = Kern::SemaphoreCreate(iDefragSemaphore, _L("DefragRefSem"), 1);
sl@0
   300
	if (ret != KErrNone)
sl@0
   301
		{
sl@0
   302
		return ret;
sl@0
   303
		}
sl@0
   304
sl@0
   305
	// Create a client request for completing dfc defrag requests.
sl@0
   306
	ret = Kern::CreateClientRequest(iCompleteReq);
sl@0
   307
	if (ret != KErrNone)
sl@0
   308
		{
sl@0
   309
		iDefragSemaphore->Close(NULL);
sl@0
   310
		return ret;
sl@0
   311
		}
sl@0
   312
sl@0
   313
	// Setup a DFC to be invoked when a defrag operation completes.
sl@0
   314
	iDefragCompleteDfc.SetDfcQ(iDfcQ);
sl@0
   315
	iDefragDfcFree = ETrue;
sl@0
   316
sl@0
   317
	return KErrNone;
sl@0
   318
	}
sl@0
   319
sl@0
   320
sl@0
   321
/**
sl@0
   322
	Destructor
sl@0
   323
*/
sl@0
   324
DDefragChannel::~DDefragChannel()
sl@0
   325
    {
sl@0
   326
	// Clean up any heap objects.
sl@0
   327
	if (iDefragSemaphore != NULL)
sl@0
   328
		{
sl@0
   329
		iDefragSemaphore->Close(NULL);
sl@0
   330
		}
sl@0
   331
sl@0
   332
	// Unregister from any chunk cleanup object as we are to be deleted.
sl@0
   333
	if (iChunkCleanup != NULL)
sl@0
   334
		{
sl@0
   335
		iChunkCleanup->RemoveDevice();
sl@0
   336
		}
sl@0
   337
	// Clean up any client request object.
sl@0
   338
	if (iCompleteReq)
sl@0
   339
		{
sl@0
   340
		Kern::DestroyClientRequest(iCompleteReq);
sl@0
   341
		}
sl@0
   342
	// Free any existing chunk.
sl@0
   343
	DoChunkClose();
sl@0
   344
    }
sl@0
   345
sl@0
   346
sl@0
   347
/**
sl@0
   348
	Handle the requests for this channel.
sl@0
   349
sl@0
   350
@param aFunction 	The operation the LDD should perform.
sl@0
   351
@param a1 			The first argument for the operation.
sl@0
   352
@param a2 			The second argument for the operation.
sl@0
   353
@return KErrNone on success or one of the system wide error codes.
sl@0
   354
*/
sl@0
   355
TInt DDefragChannel::Request(TInt aFunction, TAny* a1, TAny* a2)
sl@0
   356
	{
sl@0
   357
	TInt r = KErrNone;
sl@0
   358
	NKern::ThreadEnterCS();
sl@0
   359
sl@0
   360
	Kern::SemaphoreWait(*iDefragSemaphore);
sl@0
   361
	if (!iDefragDfcFree && aFunction != RDefragChannel::EControlGeneralDefragDfcComplete)
sl@0
   362
		{// Only allow a single defrag operation at a time.
sl@0
   363
		r = KErrInUse;
sl@0
   364
		goto exit;
sl@0
   365
		}
sl@0
   366
sl@0
   367
	switch (aFunction)
sl@0
   368
		{
sl@0
   369
		case RDefragChannel::EControlGeneralDefragDfc:
sl@0
   370
			// Queue a defrag operation so that on completion it queues a
sl@0
   371
			// DFC on this driver.
sl@0
   372
			iRequestThread = &Kern::CurrentThread();
sl@0
   373
			iRequestThread->Open();
sl@0
   374
sl@0
   375
			// Open a reference on this channel to stop the destructor running before
sl@0
   376
			// the defrag request has completed.
sl@0
   377
			Open();
sl@0
   378
			r = iCompleteReq->SetStatus((TRequestStatus*)a1);
sl@0
   379
			if (r == KErrNone)
sl@0
   380
				r = iDefragReq.DefragRam(&iDefragCompleteDfc, KDefragRamThreadPriority);
sl@0
   381
			if (r != KErrNone)
sl@0
   382
				{// defrag operation didn't start so close all openned handles
sl@0
   383
				AsyncClose();
sl@0
   384
				iRequestThread->AsyncClose();
sl@0
   385
				iRequestThread = NULL;
sl@0
   386
				}
sl@0
   387
			else
sl@0
   388
				iDefragDfcFree = EFalse;
sl@0
   389
			break;
sl@0
   390
sl@0
   391
		case RDefragChannel::EControlGeneralDefragDfcComplete:
sl@0
   392
			if (iRequestThread != NULL)
sl@0
   393
				{// The defrag dfc hasn't completed so this shouldn't have been invoked.
sl@0
   394
				r = KErrGeneral;
sl@0
   395
				}
sl@0
   396
			else
sl@0
   397
				{
sl@0
   398
				iDefragDfcFree = ETrue;
sl@0
   399
				}
sl@0
   400
			break;
sl@0
   401
sl@0
   402
		case RDefragChannel::EControlGeneralDefragSem:
sl@0
   403
			{// Queue a defrag operation so that it will signal a fast mutex once
sl@0
   404
			// it has completed.
sl@0
   405
			NFastSemaphore sem;
sl@0
   406
			NKern::FSSetOwner(&sem, 0);
sl@0
   407
			r = iDefragReq.DefragRam(&sem, KDefragRamThreadPriority);
sl@0
   408
sl@0
   409
			if (r != KErrNone)
sl@0
   410
				{// Error occurred attempting to queue the defrag operation.
sl@0
   411
				break;
sl@0
   412
				}
sl@0
   413
sl@0
   414
			// Defrag operation has now been queued so wait for it to finish.
sl@0
   415
			// Could do some extra kernel side work here before waiting on the 
sl@0
   416
			// semaphore.
sl@0
   417
			NKern::FSWait(&sem);
sl@0
   418
			r = iDefragReq.Result();
sl@0
   419
			}
sl@0
   420
			break;
sl@0
   421
sl@0
   422
		case RDefragChannel::EControlGeneralDefrag:
sl@0
   423
			// Synchronously perform a defrag.
sl@0
   424
			{
sl@0
   425
			r = iDefragReq.DefragRam(KDefragRamThreadPriority);
sl@0
   426
			}
sl@0
   427
			break;
sl@0
   428
sl@0
   429
		case RDefragChannel::EControlAllocLowestZone:
sl@0
   430
			// Allocate from the lowest preference zone
sl@0
   431
			r = DoAllocLowestZone();
sl@0
   432
			break;
sl@0
   433
sl@0
   434
		case RDefragChannel::EControlClaimLowestZone:
sl@0
   435
			// Claims the lowest preference zone
sl@0
   436
			r = DoClaimLowestZone();
sl@0
   437
			break;
sl@0
   438
			
sl@0
   439
		case RDefragChannel::EControlCloseChunk:
sl@0
   440
			// Have finished with the chunk so close it then free the RAM mapped by it
sl@0
   441
			r = DoChunkClose();
sl@0
   442
			TRACE( if (r != KErrNone) {Kern::Printf("ChunkClose returns %d", r);});
sl@0
   443
			break;
sl@0
   444
sl@0
   445
		default:
sl@0
   446
			r=KErrNotSupported;
sl@0
   447
			break;
sl@0
   448
		}
sl@0
   449
exit:
sl@0
   450
	Kern::SemaphoreSignal(*iDefragSemaphore);
sl@0
   451
	NKern::ThreadLeaveCS();
sl@0
   452
	TRACE(if (r!=KErrNone)	{Kern::Printf("DDefragChannel::Request returns %d", r);	});
sl@0
   453
	return r;
sl@0
   454
	}
sl@0
   455
sl@0
   456
sl@0
   457
/**
sl@0
   458
	Allocates RAM from the lowest preference zone and maps it to a shared chunk.
sl@0
   459
sl@0
   460
	Real drivers would not need to determine which zone to allocate from as they
sl@0
   461
	will know the zone's ID.
sl@0
   462
sl@0
   463
@return KErrNone on success, otherwise one of the system wide error codes.
sl@0
   464
*/
sl@0
   465
TInt DDefragChannel::DoAllocLowestZone()
sl@0
   466
	{
sl@0
   467
	TInt r = KErrNone;
sl@0
   468
	TLinAddr chunkAddr = NULL;
sl@0
   469
	TUint32 mapAttr = NULL;
sl@0
   470
	TChunkCreateInfo createInfo;
sl@0
   471
	TLinAddr bufBaseAddr;
sl@0
   472
	TUint bufPages;
sl@0
   473
	TPhysAddr* bufAddrs;
sl@0
   474
sl@0
   475
	if (iBufChunk != NULL)
sl@0
   476
		{// The buffer chunk is already mapped so can't use again until it is 
sl@0
   477
		// freed/closed. Wait a short while for it to be freed as it may be in the 
sl@0
   478
		// process of being destroyed.
sl@0
   479
		if (WaitForIdle() != KErrNone || iBufChunk != NULL)
sl@0
   480
			{// chunk still hasn't been freed so can't proceed.
sl@0
   481
			r = KErrInUse;
sl@0
   482
			goto exit;
sl@0
   483
			}
sl@0
   484
		}
sl@0
   485
	
sl@0
   486
	// Attempt to allocate all the pages it should be possible to allocate.
sl@0
   487
	// Real device drivers will now how much they need to allocate so they
sl@0
   488
	// wouldn't determine it here.
sl@0
   489
	SRamZoneUtilisation zoneUtil;
sl@0
   490
	Kern::HalFunction(EHalGroupRam, ERamHalGetZoneUtilisation, (TAny*)iLowestPrefZoneIndex, (TAny*)&zoneUtil);
sl@0
   491
	bufPages = iLowestPrefZonePages - (zoneUtil.iAllocFixed + zoneUtil.iAllocUnknown + zoneUtil.iAllocOther);
sl@0
   492
	bufAddrs = new TPhysAddr[bufPages];
sl@0
   493
	if (!bufAddrs)
sl@0
   494
		{
sl@0
   495
		TRACE(Kern::Printf("Failed to allocate an array for bufAddrs"));
sl@0
   496
		r = KErrNoMemory;
sl@0
   497
		goto exit;
sl@0
   498
		}
sl@0
   499
sl@0
   500
	// Update the page count as bufAddrs allocation may have caused the kernel 
sl@0
   501
	// heap to grow.
sl@0
   502
	Kern::HalFunction(EHalGroupRam, ERamHalGetZoneUtilisation, (TAny*)iLowestPrefZoneIndex, (TAny*)&zoneUtil);
sl@0
   503
	bufPages = iLowestPrefZonePages - (zoneUtil.iAllocFixed + zoneUtil.iAllocUnknown + zoneUtil.iAllocOther);
sl@0
   504
sl@0
   505
	// Allocate discontiguous pages from the zone
sl@0
   506
	r = Epoc::ZoneAllocPhysicalRam(iLowestPrefZoneId, bufPages, bufAddrs);
sl@0
   507
	if (r != KErrNone && r != KErrNoMemory)
sl@0
   508
		{
sl@0
   509
		TRACE(Kern::Printf("Zone Alloc returns %d bufPages %x", r, bufPages));
sl@0
   510
		goto exit;
sl@0
   511
		}
sl@0
   512
	// If we couldn't allocate all the required pages then empty the zone
sl@0
   513
	// and retry.
sl@0
   514
	if (r == KErrNoMemory)
sl@0
   515
		{
sl@0
   516
		r = iDefragReq.EmptyRamZone(iLowestPrefZoneId, TRamDefragRequest::KInheritPriority);
sl@0
   517
		if (r != KErrNone)
sl@0
   518
			{
sl@0
   519
			TRACE(Kern::Printf("Empty returns %d", r));
sl@0
   520
			goto exit;
sl@0
   521
			}
sl@0
   522
		r = Epoc::ZoneAllocPhysicalRam(iLowestPrefZoneId, bufPages, bufAddrs);
sl@0
   523
		if (r != KErrNone)
sl@0
   524
			{
sl@0
   525
			TRACE(Kern::Printf("ZoneAlloc1 returns %d bufPages %x", r, bufPages));
sl@0
   526
			goto exit;
sl@0
   527
			}
sl@0
   528
		}
sl@0
   529
	
sl@0
   530
	// Create a chunk cleanup object which will free the physical RAM when the 
sl@0
   531
	// chunk is detroyed
sl@0
   532
	iChunkCleanup = new TChunkCleanup(this, bufAddrs, bufPages);
sl@0
   533
	if (!iChunkCleanup)
sl@0
   534
		{
sl@0
   535
		TRACE(Kern::Printf("iChunkCleanup creation failed"));
sl@0
   536
		r = Epoc::FreePhysicalRam(bufPages, bufAddrs);
sl@0
   537
		if (r != KErrNone)
sl@0
   538
			{
sl@0
   539
			TRACE(Kern::Printf("ERROR - freeing physical memory when chunkCleanup create failed"));
sl@0
   540
			}
sl@0
   541
		else
sl@0
   542
			{
sl@0
   543
			r = KErrNoMemory;
sl@0
   544
			}
sl@0
   545
		goto exit;
sl@0
   546
		}
sl@0
   547
sl@0
   548
	// Map the allocated buffer pages to a chunk so we can use it.	
sl@0
   549
	createInfo.iType = TChunkCreateInfo::ESharedKernelSingle; // could also be ESharedKernelMultiple
sl@0
   550
	createInfo.iMaxSize = bufPages << iPageShift;
sl@0
   551
	createInfo.iMapAttr = EMapAttrFullyBlocking; // Non-cached - See TMappingAttributes for all options
sl@0
   552
	createInfo.iOwnsMemory = EFalse; // Must be false as the physical RAM has already been allocated
sl@0
   553
	createInfo.iDestroyedDfc = iChunkCleanup;
sl@0
   554
	r = Kern::ChunkCreate(createInfo, iBufChunk, chunkAddr, mapAttr);
sl@0
   555
	if (r != KErrNone)
sl@0
   556
		{
sl@0
   557
		TRACE(Kern::Printf("ChunkCreate returns %d size %x pages %x", r, createInfo.iMaxSize, bufPages));
sl@0
   558
		goto exit;
sl@0
   559
		}
sl@0
   560
sl@0
   561
	// Map the physical memory to the chunk
sl@0
   562
	r = Kern::ChunkCommitPhysical(iBufChunk, 0, createInfo.iMaxSize, bufAddrs);
sl@0
   563
	if (r != KErrNone)
sl@0
   564
		{
sl@0
   565
		TRACE(Kern::Printf("CommitPhys returns %d", r));
sl@0
   566
		goto exit;
sl@0
   567
		}
sl@0
   568
sl@0
   569
	// Now that the RAM is mapped into a chunk get the kernel-side virtual 
sl@0
   570
	// base address of the buffer.
sl@0
   571
	r = Kern::ChunkAddress(iBufChunk, 0, createInfo.iMaxSize, bufBaseAddr);
sl@0
   572
sl@0
   573
	// Using bufBaseAddr a real driver may now do something with the buffer.  We'll just return.
sl@0
   574
sl@0
   575
exit:
sl@0
   576
	return r;
sl@0
   577
	}
sl@0
   578
sl@0
   579
sl@0
   580
/**
sl@0
   581
	Claims the lowest preference zone and maps it to a shared chunk.
sl@0
   582
sl@0
   583
	Real drivers would not need to determine which zone to allocate from as they
sl@0
   584
	will know the zone's ID.
sl@0
   585
sl@0
   586
@return KErrNone on success, otherwise one of the system wide error codes.
sl@0
   587
*/
sl@0
   588
TInt DDefragChannel::DoClaimLowestZone()
sl@0
   589
	{
sl@0
   590
	TInt r = KErrNone;
sl@0
   591
	TChunkCreateInfo createInfo;
sl@0
   592
	TLinAddr bufBaseAddr;
sl@0
   593
	TLinAddr chunkAddr;
sl@0
   594
	TUint32 mapAttr = NULL;
sl@0
   595
	TPhysAddr bufBase;
sl@0
   596
	TUint bufBytes;
sl@0
   597
sl@0
   598
	if (iBufChunk != NULL)
sl@0
   599
		{// The buffer chunk is already mapped so can't use again until it is 
sl@0
   600
		// freed/closed. Wait a short while for it to be freed as it may be in the 
sl@0
   601
		// process of being destroyed.
sl@0
   602
		if (WaitForIdle() != KErrNone || iBufChunk != NULL)
sl@0
   603
			{// chunk still hasn't been freed so can't proceed.
sl@0
   604
			r = KErrInUse;
sl@0
   605
			goto exit;
sl@0
   606
			}
sl@0
   607
		}
sl@0
   608
sl@0
   609
	// Claim the zone the base address of which will be stored in iBufBase.
sl@0
   610
	r = iDefragReq.ClaimRamZone(iLowestPrefZoneId, bufBase, TRamDefragRequest::KInheritPriority);
sl@0
   611
	if (r != KErrNone)
sl@0
   612
		{
sl@0
   613
		TRACE(Kern::Printf("Claim returns %d", r));
sl@0
   614
		goto exit;
sl@0
   615
		}
sl@0
   616
sl@0
   617
	// Create a chunk cleanup object which will free the physical RAM when the 
sl@0
   618
	// chunk is detroyed
sl@0
   619
	bufBytes = iLowestPrefZonePages << iPageShift;
sl@0
   620
	iChunkCleanup = new TChunkCleanup(this, bufBase, bufBytes);
sl@0
   621
	if (!iChunkCleanup)
sl@0
   622
		{
sl@0
   623
		TRACE(Kern::Printf("chunkCleanup creation failed"));
sl@0
   624
		r = Epoc::FreePhysicalRam(bufBytes, bufBase);
sl@0
   625
		if (r != KErrNone)
sl@0
   626
			{
sl@0
   627
			TRACE(Kern::Printf("ERROR - freeing physical memory when chunkCleanup create failed"));
sl@0
   628
			}
sl@0
   629
		else
sl@0
   630
			{
sl@0
   631
			r = KErrNoMemory;
sl@0
   632
			}
sl@0
   633
		goto exit;
sl@0
   634
		}
sl@0
   635
sl@0
   636
	// Map the allocated buffer pages to a chunk so we can use it.	
sl@0
   637
	createInfo.iType = TChunkCreateInfo::ESharedKernelSingle; // could also be ESharedKernelMultiple
sl@0
   638
	createInfo.iMaxSize = bufBytes;
sl@0
   639
	createInfo.iMapAttr = EMapAttrFullyBlocking; // Non-cached - See TMappingAttributes for all options
sl@0
   640
	createInfo.iOwnsMemory = EFalse; // Must be false as the physical RAM has already been allocated
sl@0
   641
	createInfo.iDestroyedDfc = iChunkCleanup;
sl@0
   642
	r = Kern::ChunkCreate(createInfo, iBufChunk, chunkAddr, mapAttr);
sl@0
   643
	if (r != KErrNone)
sl@0
   644
		{
sl@0
   645
		TRACE(Kern::Printf("ChunkCreate returns %d size %x bytes %x", r, createInfo.iMaxSize, bufBytes));
sl@0
   646
		goto exit;
sl@0
   647
		}
sl@0
   648
sl@0
   649
	// Map the physically contiguous memory to the chunk
sl@0
   650
	r = Kern::ChunkCommitPhysical(iBufChunk, 0, createInfo.iMaxSize, bufBase);
sl@0
   651
	if (r != KErrNone)
sl@0
   652
		{
sl@0
   653
		TRACE(Kern::Printf("CommitPhys returns %d", r));
sl@0
   654
		goto exit;
sl@0
   655
		}
sl@0
   656
sl@0
   657
	// Now that the RAM is mapped into a chunk get the kernel-side virtual 
sl@0
   658
	// base address of the buffer.
sl@0
   659
	r = Kern::ChunkAddress(iBufChunk, 0, createInfo.iMaxSize, bufBaseAddr);
sl@0
   660
sl@0
   661
	// Using bufBaseAddr a real driver may now do something with the buffer.  We'll just return.
sl@0
   662
sl@0
   663
exit:
sl@0
   664
	return r;
sl@0
   665
	}
sl@0
   666
sl@0
   667
sl@0
   668
/**
sl@0
   669
	Determine the lowest preference zone.
sl@0
   670
sl@0
   671
@return KErrNone on success or KErrNotFound if there is only one zone.
sl@0
   672
*/
sl@0
   673
TInt DDefragChannel::FindLowestPrefZone()
sl@0
   674
	{
sl@0
   675
	TUint zoneCount;
sl@0
   676
	TInt r = Kern::HalFunction(EHalGroupRam, ERamHalGetZoneCount, (TAny*)&zoneCount, NULL);
sl@0
   677
	if(r!=KErrNone)
sl@0
   678
		return r;
sl@0
   679
sl@0
   680
	if (zoneCount == 1)
sl@0
   681
		{// Only one zone so can't move pages anywhere or empty a zone
sl@0
   682
		return KErrNotFound;
sl@0
   683
		}
sl@0
   684
sl@0
   685
	SRamZoneConfig zoneConfig;
sl@0
   686
	SRamZoneUtilisation zoneUtil;
sl@0
   687
	Kern::HalFunction(EHalGroupRam, ERamHalGetZoneConfig, (TAny*)0, (TAny*)&zoneConfig);
sl@0
   688
	Kern::HalFunction(EHalGroupRam, ERamHalGetZoneUtilisation, (TAny*)0, (TAny*)&zoneUtil);
sl@0
   689
	TUint lowestPref = zoneConfig.iPref;
sl@0
   690
	TUint lowestFreePages = zoneUtil.iFreePages;
sl@0
   691
	iLowestPrefZoneIndex = 0;
sl@0
   692
	iLowestPrefZoneId = zoneConfig.iZoneId;
sl@0
   693
	TUint i = 1;
sl@0
   694
	for (; i < zoneCount; i++)
sl@0
   695
		{
sl@0
   696
		Kern::HalFunction(EHalGroupRam, ERamHalGetZoneConfig, (TAny*)i, (TAny*)&zoneConfig);
sl@0
   697
		Kern::HalFunction(EHalGroupRam, ERamHalGetZoneUtilisation, (TAny*)i, (TAny*)&zoneUtil);
sl@0
   698
		// When zones have the same preference the zone higher in the zone list is picked.
sl@0
   699
		if (zoneConfig.iPref > lowestPref || 
sl@0
   700
			(zoneConfig.iPref == lowestPref && zoneUtil.iFreePages >= lowestFreePages))
sl@0
   701
			{
sl@0
   702
			lowestPref = zoneConfig.iPref;
sl@0
   703
			lowestFreePages = zoneUtil.iFreePages;
sl@0
   704
			iLowestPrefZoneIndex = i;
sl@0
   705
			iLowestPrefZoneId = zoneConfig.iZoneId;
sl@0
   706
			}
sl@0
   707
		}
sl@0
   708
	// Now that we know the current least preferable zone store its size.
sl@0
   709
	Kern::HalFunction(EHalGroupRam, ERamHalGetZoneConfig, (TAny*)iLowestPrefZoneIndex, (TAny*)&zoneConfig);
sl@0
   710
	iLowestPrefZonePages = zoneConfig.iPhysPages;
sl@0
   711
	TRACE(Kern::Printf("LowestPrefZone %x size %x", iLowestPrefZoneId, iLowestPrefZonePages));
sl@0
   712
	return KErrNone;
sl@0
   713
	}
sl@0
   714
sl@0
   715
sl@0
   716
/**
sl@0
   717
	DFC callback called when a defrag operation has completed.
sl@0
   718
sl@0
   719
@param aSelf A pointer to the DDefragChannel that requested the defrag operation
sl@0
   720
*/
sl@0
   721
void DDefragChannel::DefragCompleteDfc(TAny* aSelf)
sl@0
   722
	{
sl@0
   723
	// Just call non-static method
sl@0
   724
	((DDefragChannel*)aSelf)->DefragComplete();
sl@0
   725
	}
sl@0
   726
sl@0
   727
sl@0
   728
/**
sl@0
   729
	Invoked by the DFC callback which is called when a defrag 
sl@0
   730
	operation has completed.
sl@0
   731
*/
sl@0
   732
void DDefragChannel::DefragComplete()
sl@0
   733
	{
sl@0
   734
	TRACE(Kern::Printf(">DDefragChannel::DefragComplete"));
sl@0
   735
	TInt result = iDefragReq.Result();
sl@0
   736
	TRACE(Kern::Printf("complete code %d", result));
sl@0
   737
sl@0
   738
	Kern::SemaphoreWait(*iDefragSemaphore);
sl@0
   739
sl@0
   740
	Kern::QueueRequestComplete(iRequestThread, iCompleteReq, result);
sl@0
   741
	iRequestThread->AsyncClose();
sl@0
   742
	iRequestThread = NULL;
sl@0
   743
sl@0
   744
	Kern::SemaphoreSignal(*iDefragSemaphore);
sl@0
   745
sl@0
   746
	TRACE(Kern::Printf("<DDefragChannel::DefragComplete"));
sl@0
   747
	// Close the handle on this channel - WARNING this channel may be 
sl@0
   748
	// deleted immmediately after this call so don't access any members
sl@0
   749
	AsyncClose();
sl@0
   750
	}
sl@0
   751
sl@0
   752
sl@0
   753
/**
sl@0
   754
	Close the chunk.
sl@0
   755
sl@0
   756
@return KErrNone on success or one of the system wide error codes.
sl@0
   757
*/
sl@0
   758
TInt DDefragChannel::DoChunkClose()
sl@0
   759
	{
sl@0
   760
	if (iBufChunk == NULL)
sl@0
   761
		{// Someone tried to close the chunk before using it
sl@0
   762
		return KErrNotFound;
sl@0
   763
		}
sl@0
   764
sl@0
   765
	// Rely on the chunk cleanup object being called as that
sl@0
   766
	// is what will actually free the physical RAM commited to the chunk.
sl@0
   767
	Kern::ChunkClose(iBufChunk);
sl@0
   768
	return KErrNone;
sl@0
   769
	}
sl@0
   770
sl@0
   771
sl@0
   772
/**
sl@0
   773
	The chunk has now been destroyed so reset the pointers to allow a new
sl@0
   774
	chunk to be created.
sl@0
   775
*/
sl@0
   776
void DDefragChannel::ChunkDestroyed()
sl@0
   777
	{
sl@0
   778
	__e32_atomic_store_ord_ptr(&iBufChunk, 0);
sl@0
   779
	__e32_atomic_store_ord_ptr(&iChunkCleanup, 0);
sl@0
   780
	}
sl@0
   781
sl@0
   782
sl@0
   783
/**
sl@0
   784
	Contruct a Shared Chunk cleanup object which will free the chunk's discontiguous
sl@0
   785
	physical memory when a chunk is destroyed.
sl@0
   786
sl@0
   787
@param aDevice The device to inform when the chunk is destroyed.
sl@0
   788
@param aBufBase The physical base addresses of each of the chunk's memory pages.
sl@0
   789
@param aBufPages The total number of the chunk's pages.
sl@0
   790
*/
sl@0
   791
TChunkCleanup::TChunkCleanup(DDefragChannel* aDevice, TPhysAddr* aBufAddrs, TUint aBufPages)
sl@0
   792
    : TDfc((TDfcFn)TChunkCleanup::ChunkDestroyed,this,Kern::SvMsgQue(),0),
sl@0
   793
    iBufAddrs(aBufAddrs),
sl@0
   794
	iBufSize(aBufPages),
sl@0
   795
	iBufContiguous(EFalse),
sl@0
   796
	iDevice(aDevice)
sl@0
   797
    {}
sl@0
   798
sl@0
   799
sl@0
   800
/**
sl@0
   801
	Contruct a Shared Chunk cleanup object which will free the chunk's contiguous 
sl@0
   802
	physical memory when a chunk is destroyed.
sl@0
   803
sl@0
   804
@param aDevice The device to inform when the chunk is destroyed.
sl@0
   805
@param aBufBase The physical base address of the chunk's memory.
sl@0
   806
@param aBufBytes The total number of the chunk's bytes.
sl@0
   807
*/
sl@0
   808
TChunkCleanup::TChunkCleanup(DDefragChannel* aDevice, TPhysAddr aBufBase, TUint aBufBytes)
sl@0
   809
    : TDfc((TDfcFn)TChunkCleanup::ChunkDestroyed,this,Kern::SvMsgQue(),0),
sl@0
   810
    iBufBase(aBufBase),
sl@0
   811
	iBufSize(aBufBytes),
sl@0
   812
	iBufContiguous(ETrue),
sl@0
   813
	iDevice(aDevice)
sl@0
   814
    {}
sl@0
   815
sl@0
   816
/**
sl@0
   817
	Callback function which is called the DFC runs, i.e. when a chunk is destroyed 
sl@0
   818
	and frees the physical memory allocated when the chunk was created.
sl@0
   819
sl@0
   820
@param aSelf Pointer to the cleanup object associated with the chunk that has 
sl@0
   821
been destroyed.
sl@0
   822
*/
sl@0
   823
void TChunkCleanup::ChunkDestroyed(TChunkCleanup* aSelf)
sl@0
   824
	{
sl@0
   825
	aSelf->DoChunkDestroyed();
sl@0
   826
sl@0
   827
    // We've finished so now delete ourself
sl@0
   828
    delete aSelf;
sl@0
   829
	}
sl@0
   830
sl@0
   831
sl@0
   832
/**
sl@0
   833
	The chunk has been destroyed so free the physical RAM that was allocated
sl@0
   834
	for its use and inform iDevice that it has been destroyed.
sl@0
   835
*/
sl@0
   836
void TChunkCleanup::DoChunkDestroyed()
sl@0
   837
    {
sl@0
   838
	if (iBufContiguous)
sl@0
   839
		{
sl@0
   840
		__NK_ASSERT_ALWAYS(Epoc::FreePhysicalRam(iBufBase, iBufSize) == KErrNone);
sl@0
   841
		}
sl@0
   842
	else
sl@0
   843
		{
sl@0
   844
		__NK_ASSERT_ALWAYS(Epoc::FreePhysicalRam(iBufSize, iBufAddrs) == KErrNone);
sl@0
   845
		}
sl@0
   846
sl@0
   847
	if (iDevice != NULL)
sl@0
   848
		{// Allow iDevice to perform any cleanup it requires for this chunk.
sl@0
   849
		iDevice->ChunkDestroyed();
sl@0
   850
		}
sl@0
   851
    }
sl@0
   852
sl@0
   853
sl@0
   854
/**
sl@0
   855
	Remove the device so its ChunkDestroyed() method isn't invoked  when the chunk is 
sl@0
   856
	destroyed.
sl@0
   857
*/
sl@0
   858
void TChunkCleanup::RemoveDevice()
sl@0
   859
	{
sl@0
   860
	__e32_atomic_store_ord_ptr(&iDevice, 0);
sl@0
   861
	}