os/kernelhwsrv/kerneltest/e32test/examples/defrag/d_defrag_ref.cpp
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
     1 // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
     2 // All rights reserved.
     3 // This component and the accompanying materials are made available
     4 // under the terms of the License "Eclipse Public License v1.0"
     5 // which accompanies this distribution, and is available
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
     7 //
     8 // Initial Contributors:
     9 // Nokia Corporation - initial contribution.
    10 //
    11 // Contributors:
    12 //
    13 // Description:
    14 // e32test\examples\defrag\d_defrag_ref.cpp
    15 // Reference LDD for invoking defrag APIs.
    16 // 
    17 //
    18 
    19 #include <kernel/kern_priv.h>
    20 #include "platform.h"
    21 #include "nk_priv.h"
    22 #include "d_defrag_ref.h"
    23 
    24 const TInt KMajorVersionNumber=0;
    25 const TInt KMinorVersionNumber=1;
    26 const TInt KBuildVersionNumber=1;
    27 
    28 #if 1  // Set true for tracing
    29 #define TRACE(x) x
    30 #else
    31 #define TRACE(x)
    32 #endif
    33 
    34 const TInt KDefragCompleteThreadPriority = 27;
    35 const TInt KDefragRamThreadPriority = 1;
    36 _LIT(KDefragCompleteThread,"DefragCompleteThread");
    37 
    38 class DDefragChannel;
    39 
    40 /**
    41 	Clean up item responsible for ensuring all memory commmited to a chunk is
    42 	freed once the chunk is destroyed
    43 */
    44 class TChunkCleanup : public TDfc
    45     {
    46 public:
    47     TChunkCleanup(DDefragChannel* aDevice, TPhysAddr* aBufAddrs, TUint aBufPages);
    48 	TChunkCleanup(DDefragChannel* aDevice, TPhysAddr aBufBase, TUint aBufBytes);
    49     static void ChunkDestroyed(TChunkCleanup* aSelf);
    50 	void RemoveDevice();
    51 
    52 private:
    53     void DoChunkDestroyed();
    54 
    55 private:
    56 	TPhysAddr* iBufAddrs;		/**< Pointer to an array of the addresses of discontiguous buffer pages*/
    57 	TPhysAddr iBufBase;			/**< Physical base address of a physically contiguous the buffer*/
    58 	TUint iBufSize;				/**< The number of pages or bytes in the buffer depending if this is 
    59 								discontiguous or contiguous buffer, repsectively*/
    60 	TBool iBufContiguous;		/**< ETrue when the memory to be freed is contiguous, EFalse otherwise*/
    61 	DDefragChannel* iDevice; 	/**< The device to be informed when the chunk is destroyed */
    62     };
    63 
    64 
    65 /**
    66 	Reference defrag LDD factory.
    67 */
    68 class DDefragChannelFactory : public DLogicalDevice
    69 	{
    70 public:
    71 	DDefragChannelFactory();
    72 	~DDefragChannelFactory();
    73 	virtual TInt Install();								//overriding pure virtual
    74 	virtual void GetCaps(TDes8& aDes) const;			//overriding pure virtual
    75 	virtual TInt Create(DLogicalChannelBase*& aChannel);//overriding pure virtual
    76 
    77 	TDynamicDfcQue* iDfcQ;
    78 	};
    79 
    80 
    81 /**
    82 	Reference defrag logical channel.
    83 */
    84 class DDefragChannel : public DLogicalChannelBase
    85 	{
    86 public:
    87 	DDefragChannel(TDfcQue* aDfcQ);
    88 	~DDefragChannel();
    89 	void ChunkDestroyed();
    90 protected:
    91 	virtual TInt DoCreate(TInt aUnit, const TDesC8* anInfo, const TVersion& aVer);
    92 	virtual TInt Request(TInt aFunction, TAny* a1, TAny* a2);
    93 
    94 	TInt DoAllocLowestZone();
    95 	TInt DoClaimLowestZone();
    96 	TInt DoChunkClose();
    97 	TInt FindLowestPrefZone();
    98 
    99 	static void DefragCompleteDfc(TAny* aSelf);
   100 	void DefragComplete();
   101 
   102 private:
   103 	TInt iPageShift;			/**< The system's page shift */
   104 	DSemaphore* iDefragSemaphore;/**< Semaphore to ensure only one defrag operation is active per channel*/
   105 	TClientRequest* iCompleteReq;/**< Pointer to a request status that will signal to the user side client once the defrag has completed*/
   106 	DThread* iRequestThread;	/**< Pointer to the thread that made the defrag request*/
   107 	TRamDefragRequest iDefragReq;/**< The defrag request used to queue defrag operations*/
   108 	DChunk* iBufChunk;			/**< Pointer to a chunk that can be mapped to a physical RAM area*/
   109 	TChunkCleanup* iChunkCleanup;/**< Pointer to iBufChunk's cleanup object */
   110 	TDfcQue* iDfcQ;				/**< The DFC queue used for driver functions */
   111 	TDfc iDefragCompleteDfc;	/**< DFC to be queued once a defrag operation has completed */
   112 	TBool iDefragDfcFree;		/**< Set to fase whenever a dfc defrag operation is still pending*/
   113 	TUint iLowestPrefZoneId;	/**< The ID of the least preferable RAM zone*/
   114 	TUint iLowestPrefZonePages;	/**< The number of pages in the least preferable RAM zone*/
   115 	TUint iLowestPrefZoneIndex; /**< The test HAL function index of the least preferable RAM zone*/
   116 	};
   117 
   118 /**
   119 Utility functions to wait for chunk clean dfc to be queued by waiting for the 
   120 idle thread to be queued.
   121 */
   122 void signal_sem(TAny* aPtr)
   123 	{
   124 	NKern::FSSignal((NFastSemaphore*)aPtr);
   125 	}
   126 
   127 TInt WaitForIdle()
   128 	{// Wait for chunk to be destroyed and then for the chunk cleanup dfc to run.
   129 	for (TUint i = 0; i < 2; i++)
   130 		{
   131 		NFastSemaphore s(0);
   132 		TDfc idler(&signal_sem, &s, Kern::SvMsgQue(), 0);	// supervisor thread, priority 0, so will run after destroyed DFC
   133 		NTimer timer(&signal_sem, &s);
   134 		idler.QueueOnIdle();
   135 		timer.OneShot(NKern::TimerTicks(5000), ETrue);	// runs in DFCThread1
   136 		NKern::FSWait(&s);	// wait for either idle DFC or timer
   137 		TBool timeout = idler.Cancel();	// cancel idler, return TRUE if it hadn't run
   138 		TBool tmc = timer.Cancel();	// cancel timer, return TRUE if it hadn't expired
   139 		if (!timeout && !tmc)
   140 			NKern::FSWait(&s);	// both the DFC and the timer went off - wait for the second one
   141 		if (timeout)
   142 			return KErrTimedOut;
   143 		}
   144 	return KErrNone;
   145 	}
   146 
   147 /** 
   148 	Standard logical device driver entry point.  
   149 	Called the first time this device driver is loaded.
   150 */
   151 DECLARE_STANDARD_LDD()
   152 	{
   153 	DDefragChannelFactory* factory = new DDefragChannelFactory;
   154 	if (factory)
   155 	{
   156 		// Allocate a kernel thread to run the DFC 
   157 		TInt r = Kern::DynamicDfcQCreate(factory->iDfcQ, KDefragCompleteThreadPriority, KDefragCompleteThread);
   158 
   159 		if (r != KErrNone)
   160 			{ 
   161 			// Must close rather than delete factory as it is a DObject object.
   162 			factory->AsyncClose();
   163 			return NULL; 	
   164 			} 	
   165 	}
   166     return factory;
   167     }
   168 
   169 
   170 /**
   171 	Constructor
   172 */
   173 DDefragChannelFactory::DDefragChannelFactory()
   174     {
   175     iVersion=TVersion(KMajorVersionNumber,KMinorVersionNumber,KBuildVersionNumber);
   176     }
   177 
   178 
   179 /**
   180 	Destructor
   181 */
   182 DDefragChannelFactory::~DDefragChannelFactory()
   183     {
   184 	if (iDfcQ != NULL)
   185 		{// Destroy the DFC queue created when this device drvier was loaded.
   186 		iDfcQ->Destroy();
   187 		}
   188     }
   189 
   190 
   191 /**
   192 	Create a new DDefragChannel on this logical device.
   193 
   194 @param  aChannel On successful return this will point to the new channel.
   195 @return KErrNone on success or KErrNoMemory if the channel couldn't be created.
   196 */
   197 TInt DDefragChannelFactory::Create(DLogicalChannelBase*& aChannel)
   198     {
   199 	aChannel = new DDefragChannel(iDfcQ);
   200 	return (aChannel)? KErrNone : KErrNoMemory;
   201     }
   202 
   203 
   204 /**
   205 	Install the LDD - overriding pure virtual
   206 
   207 @return KErrNone on success or one of the system wide error codes.
   208 */
   209 TInt DDefragChannelFactory::Install()
   210     {
   211     return SetName(&KLddName);
   212     }
   213 
   214 
   215 /**
   216 	Get capabilities - overriding pure virtual
   217 
   218 @param aDes A descriptor to be loaded with the capabilities.
   219 */
   220 void DDefragChannelFactory::GetCaps(TDes8& aDes) const
   221     {
   222     TCapsDefragTestV01 b;
   223     b.iVersion=TVersion(KMajorVersionNumber,KMinorVersionNumber,KBuildVersionNumber);
   224     Kern::InfoCopy(aDes,(TUint8*)&b,sizeof(b));
   225     }
   226 
   227 
   228 /**
   229 	Constructor
   230 
   231 @param aDfcQ The DFC queue to use for defrag completion DFCs.
   232 */
   233 DDefragChannel::DDefragChannel(TDfcQue* aDfcQ) 
   234 		:
   235 		iDefragSemaphore(NULL),
   236 		iCompleteReq(NULL),
   237 		iBufChunk(NULL),
   238 		iChunkCleanup(NULL),
   239 		iDfcQ(aDfcQ),
   240 		iDefragCompleteDfc(DefragCompleteDfc, (TAny*)this, 1)  // DFC is priority '1', it is the only type of dfc on this queue.
   241     {
   242     }
   243 
   244 
   245 /**
   246 	Create channel.
   247 
   248 @param aVer The version number required.
   249 @return KErrNone on success, KErrNotSupported if the device doesn't support defragmentation.
   250 */
   251 TInt DDefragChannel::DoCreate(TInt /*aUnit*/, const TDesC8* /*anInfo*/, const TVersion& aVer)
   252     {
   253 	// Check the client has ECapabilityPowerMgmt capability.
   254 	if(!Kern::CurrentThreadHasCapability(ECapabilityPowerMgmt, __PLATSEC_DIAGNOSTIC_STRING("Checked by DDefragChannel")))
   255 		{
   256 		return KErrPermissionDenied;
   257 		}
   258 	TInt pageSize;
   259 	TInt r = Kern::HalFunction(EHalGroupKernel, EKernelHalPageSizeInBytes, &pageSize, 0);
   260 	if (r != KErrNone)
   261 		{
   262 		TRACE(Kern::Printf("ERROR - Unable to determine page size"));
   263 		return r;
   264 		}
   265 	TUint32 pageMask = pageSize;
   266 	TUint i = 0;
   267 	for (; i < 32; i++)
   268 		{
   269 		if (pageMask & 1)
   270 			{
   271 			if (pageMask & ~1u)
   272 				{
   273 				TRACE(Kern::Printf("ERROR - page size not a power of 2"));
   274 				return KErrNotSupported;
   275 				}
   276 			iPageShift = i;
   277 			break;
   278 			}
   279 		pageMask >>= 1;
   280 		}
   281 
   282 	// Check the client is a supported version.
   283     if (!Kern::QueryVersionSupported(TVersion(KMajorVersionNumber,KMinorVersionNumber,KBuildVersionNumber),aVer))
   284 		{
   285     	return KErrNotSupported;
   286 		}
   287 
   288 	// Check this system has more than one RAM zone defined.
   289 	// A real driver shouldn't need to do this as any driver that uses defrag should 
   290 	// only be loaded on devices that support it.
   291 	TInt ret = FindLowestPrefZone();
   292 	if (ret != KErrNone)
   293 		{// Only one zone so can't move pages anywhere or empty a zone
   294 		return KErrNotSupported;
   295 		}
   296 
   297 	// Create a semaphore to protect defrag invocation.  OK to just use one name as
   298 	// the semaphore is not global so it's name doesn't need to be unique.
   299 	ret = Kern::SemaphoreCreate(iDefragSemaphore, _L("DefragRefSem"), 1);
   300 	if (ret != KErrNone)
   301 		{
   302 		return ret;
   303 		}
   304 
   305 	// Create a client request for completing dfc defrag requests.
   306 	ret = Kern::CreateClientRequest(iCompleteReq);
   307 	if (ret != KErrNone)
   308 		{
   309 		iDefragSemaphore->Close(NULL);
   310 		return ret;
   311 		}
   312 
   313 	// Setup a DFC to be invoked when a defrag operation completes.
   314 	iDefragCompleteDfc.SetDfcQ(iDfcQ);
   315 	iDefragDfcFree = ETrue;
   316 
   317 	return KErrNone;
   318 	}
   319 
   320 
   321 /**
   322 	Destructor
   323 */
   324 DDefragChannel::~DDefragChannel()
   325     {
   326 	// Clean up any heap objects.
   327 	if (iDefragSemaphore != NULL)
   328 		{
   329 		iDefragSemaphore->Close(NULL);
   330 		}
   331 
   332 	// Unregister from any chunk cleanup object as we are to be deleted.
   333 	if (iChunkCleanup != NULL)
   334 		{
   335 		iChunkCleanup->RemoveDevice();
   336 		}
   337 	// Clean up any client request object.
   338 	if (iCompleteReq)
   339 		{
   340 		Kern::DestroyClientRequest(iCompleteReq);
   341 		}
   342 	// Free any existing chunk.
   343 	DoChunkClose();
   344     }
   345 
   346 
   347 /**
   348 	Handle the requests for this channel.
   349 
   350 @param aFunction 	The operation the LDD should perform.
   351 @param a1 			The first argument for the operation.
   352 @param a2 			The second argument for the operation.
   353 @return KErrNone on success or one of the system wide error codes.
   354 */
   355 TInt DDefragChannel::Request(TInt aFunction, TAny* a1, TAny* a2)
   356 	{
   357 	TInt r = KErrNone;
   358 	NKern::ThreadEnterCS();
   359 
   360 	Kern::SemaphoreWait(*iDefragSemaphore);
   361 	if (!iDefragDfcFree && aFunction != RDefragChannel::EControlGeneralDefragDfcComplete)
   362 		{// Only allow a single defrag operation at a time.
   363 		r = KErrInUse;
   364 		goto exit;
   365 		}
   366 
   367 	switch (aFunction)
   368 		{
   369 		case RDefragChannel::EControlGeneralDefragDfc:
   370 			// Queue a defrag operation so that on completion it queues a
   371 			// DFC on this driver.
   372 			iRequestThread = &Kern::CurrentThread();
   373 			iRequestThread->Open();
   374 
   375 			// Open a reference on this channel to stop the destructor running before
   376 			// the defrag request has completed.
   377 			Open();
   378 			r = iCompleteReq->SetStatus((TRequestStatus*)a1);
   379 			if (r == KErrNone)
   380 				r = iDefragReq.DefragRam(&iDefragCompleteDfc, KDefragRamThreadPriority);
   381 			if (r != KErrNone)
   382 				{// defrag operation didn't start so close all openned handles
   383 				AsyncClose();
   384 				iRequestThread->AsyncClose();
   385 				iRequestThread = NULL;
   386 				}
   387 			else
   388 				iDefragDfcFree = EFalse;
   389 			break;
   390 
   391 		case RDefragChannel::EControlGeneralDefragDfcComplete:
   392 			if (iRequestThread != NULL)
   393 				{// The defrag dfc hasn't completed so this shouldn't have been invoked.
   394 				r = KErrGeneral;
   395 				}
   396 			else
   397 				{
   398 				iDefragDfcFree = ETrue;
   399 				}
   400 			break;
   401 
   402 		case RDefragChannel::EControlGeneralDefragSem:
   403 			{// Queue a defrag operation so that it will signal a fast mutex once
   404 			// it has completed.
   405 			NFastSemaphore sem;
   406 			NKern::FSSetOwner(&sem, 0);
   407 			r = iDefragReq.DefragRam(&sem, KDefragRamThreadPriority);
   408 
   409 			if (r != KErrNone)
   410 				{// Error occurred attempting to queue the defrag operation.
   411 				break;
   412 				}
   413 
   414 			// Defrag operation has now been queued so wait for it to finish.
   415 			// Could do some extra kernel side work here before waiting on the 
   416 			// semaphore.
   417 			NKern::FSWait(&sem);
   418 			r = iDefragReq.Result();
   419 			}
   420 			break;
   421 
   422 		case RDefragChannel::EControlGeneralDefrag:
   423 			// Synchronously perform a defrag.
   424 			{
   425 			r = iDefragReq.DefragRam(KDefragRamThreadPriority);
   426 			}
   427 			break;
   428 
   429 		case RDefragChannel::EControlAllocLowestZone:
   430 			// Allocate from the lowest preference zone
   431 			r = DoAllocLowestZone();
   432 			break;
   433 
   434 		case RDefragChannel::EControlClaimLowestZone:
   435 			// Claims the lowest preference zone
   436 			r = DoClaimLowestZone();
   437 			break;
   438 			
   439 		case RDefragChannel::EControlCloseChunk:
   440 			// Have finished with the chunk so close it then free the RAM mapped by it
   441 			r = DoChunkClose();
   442 			TRACE( if (r != KErrNone) {Kern::Printf("ChunkClose returns %d", r);});
   443 			break;
   444 
   445 		default:
   446 			r=KErrNotSupported;
   447 			break;
   448 		}
   449 exit:
   450 	Kern::SemaphoreSignal(*iDefragSemaphore);
   451 	NKern::ThreadLeaveCS();
   452 	TRACE(if (r!=KErrNone)	{Kern::Printf("DDefragChannel::Request returns %d", r);	});
   453 	return r;
   454 	}
   455 
   456 
   457 /**
   458 	Allocates RAM from the lowest preference zone and maps it to a shared chunk.
   459 
   460 	Real drivers would not need to determine which zone to allocate from as they
   461 	will know the zone's ID.
   462 
   463 @return KErrNone on success, otherwise one of the system wide error codes.
   464 */
   465 TInt DDefragChannel::DoAllocLowestZone()
   466 	{
   467 	TInt r = KErrNone;
   468 	TLinAddr chunkAddr = NULL;
   469 	TUint32 mapAttr = NULL;
   470 	TChunkCreateInfo createInfo;
   471 	TLinAddr bufBaseAddr;
   472 	TUint bufPages;
   473 	TPhysAddr* bufAddrs;
   474 
   475 	if (iBufChunk != NULL)
   476 		{// The buffer chunk is already mapped so can't use again until it is 
   477 		// freed/closed. Wait a short while for it to be freed as it may be in the 
   478 		// process of being destroyed.
   479 		if (WaitForIdle() != KErrNone || iBufChunk != NULL)
   480 			{// chunk still hasn't been freed so can't proceed.
   481 			r = KErrInUse;
   482 			goto exit;
   483 			}
   484 		}
   485 	
   486 	// Attempt to allocate all the pages it should be possible to allocate.
   487 	// Real device drivers will now how much they need to allocate so they
   488 	// wouldn't determine it here.
   489 	SRamZoneUtilisation zoneUtil;
   490 	Kern::HalFunction(EHalGroupRam, ERamHalGetZoneUtilisation, (TAny*)iLowestPrefZoneIndex, (TAny*)&zoneUtil);
   491 	bufPages = iLowestPrefZonePages - (zoneUtil.iAllocFixed + zoneUtil.iAllocUnknown + zoneUtil.iAllocOther);
   492 	bufAddrs = new TPhysAddr[bufPages];
   493 	if (!bufAddrs)
   494 		{
   495 		TRACE(Kern::Printf("Failed to allocate an array for bufAddrs"));
   496 		r = KErrNoMemory;
   497 		goto exit;
   498 		}
   499 
   500 	// Update the page count as bufAddrs allocation may have caused the kernel 
   501 	// heap to grow.
   502 	Kern::HalFunction(EHalGroupRam, ERamHalGetZoneUtilisation, (TAny*)iLowestPrefZoneIndex, (TAny*)&zoneUtil);
   503 	bufPages = iLowestPrefZonePages - (zoneUtil.iAllocFixed + zoneUtil.iAllocUnknown + zoneUtil.iAllocOther);
   504 
   505 	// Allocate discontiguous pages from the zone
   506 	r = Epoc::ZoneAllocPhysicalRam(iLowestPrefZoneId, bufPages, bufAddrs);
   507 	if (r != KErrNone && r != KErrNoMemory)
   508 		{
   509 		TRACE(Kern::Printf("Zone Alloc returns %d bufPages %x", r, bufPages));
   510 		goto exit;
   511 		}
   512 	// If we couldn't allocate all the required pages then empty the zone
   513 	// and retry.
   514 	if (r == KErrNoMemory)
   515 		{
   516 		r = iDefragReq.EmptyRamZone(iLowestPrefZoneId, TRamDefragRequest::KInheritPriority);
   517 		if (r != KErrNone)
   518 			{
   519 			TRACE(Kern::Printf("Empty returns %d", r));
   520 			goto exit;
   521 			}
   522 		r = Epoc::ZoneAllocPhysicalRam(iLowestPrefZoneId, bufPages, bufAddrs);
   523 		if (r != KErrNone)
   524 			{
   525 			TRACE(Kern::Printf("ZoneAlloc1 returns %d bufPages %x", r, bufPages));
   526 			goto exit;
   527 			}
   528 		}
   529 	
   530 	// Create a chunk cleanup object which will free the physical RAM when the 
   531 	// chunk is detroyed
   532 	iChunkCleanup = new TChunkCleanup(this, bufAddrs, bufPages);
   533 	if (!iChunkCleanup)
   534 		{
   535 		TRACE(Kern::Printf("iChunkCleanup creation failed"));
   536 		r = Epoc::FreePhysicalRam(bufPages, bufAddrs);
   537 		if (r != KErrNone)
   538 			{
   539 			TRACE(Kern::Printf("ERROR - freeing physical memory when chunkCleanup create failed"));
   540 			}
   541 		else
   542 			{
   543 			r = KErrNoMemory;
   544 			}
   545 		goto exit;
   546 		}
   547 
   548 	// Map the allocated buffer pages to a chunk so we can use it.	
   549 	createInfo.iType = TChunkCreateInfo::ESharedKernelSingle; // could also be ESharedKernelMultiple
   550 	createInfo.iMaxSize = bufPages << iPageShift;
   551 	createInfo.iMapAttr = EMapAttrFullyBlocking; // Non-cached - See TMappingAttributes for all options
   552 	createInfo.iOwnsMemory = EFalse; // Must be false as the physical RAM has already been allocated
   553 	createInfo.iDestroyedDfc = iChunkCleanup;
   554 	r = Kern::ChunkCreate(createInfo, iBufChunk, chunkAddr, mapAttr);
   555 	if (r != KErrNone)
   556 		{
   557 		TRACE(Kern::Printf("ChunkCreate returns %d size %x pages %x", r, createInfo.iMaxSize, bufPages));
   558 		goto exit;
   559 		}
   560 
   561 	// Map the physical memory to the chunk
   562 	r = Kern::ChunkCommitPhysical(iBufChunk, 0, createInfo.iMaxSize, bufAddrs);
   563 	if (r != KErrNone)
   564 		{
   565 		TRACE(Kern::Printf("CommitPhys returns %d", r));
   566 		goto exit;
   567 		}
   568 
   569 	// Now that the RAM is mapped into a chunk get the kernel-side virtual 
   570 	// base address of the buffer.
   571 	r = Kern::ChunkAddress(iBufChunk, 0, createInfo.iMaxSize, bufBaseAddr);
   572 
   573 	// Using bufBaseAddr a real driver may now do something with the buffer.  We'll just return.
   574 
   575 exit:
   576 	return r;
   577 	}
   578 
   579 
   580 /**
   581 	Claims the lowest preference zone and maps it to a shared chunk.
   582 
   583 	Real drivers would not need to determine which zone to allocate from as they
   584 	will know the zone's ID.
   585 
   586 @return KErrNone on success, otherwise one of the system wide error codes.
   587 */
   588 TInt DDefragChannel::DoClaimLowestZone()
   589 	{
   590 	TInt r = KErrNone;
   591 	TChunkCreateInfo createInfo;
   592 	TLinAddr bufBaseAddr;
   593 	TLinAddr chunkAddr;
   594 	TUint32 mapAttr = NULL;
   595 	TPhysAddr bufBase;
   596 	TUint bufBytes;
   597 
   598 	if (iBufChunk != NULL)
   599 		{// The buffer chunk is already mapped so can't use again until it is 
   600 		// freed/closed. Wait a short while for it to be freed as it may be in the 
   601 		// process of being destroyed.
   602 		if (WaitForIdle() != KErrNone || iBufChunk != NULL)
   603 			{// chunk still hasn't been freed so can't proceed.
   604 			r = KErrInUse;
   605 			goto exit;
   606 			}
   607 		}
   608 
   609 	// Claim the zone the base address of which will be stored in iBufBase.
   610 	r = iDefragReq.ClaimRamZone(iLowestPrefZoneId, bufBase, TRamDefragRequest::KInheritPriority);
   611 	if (r != KErrNone)
   612 		{
   613 		TRACE(Kern::Printf("Claim returns %d", r));
   614 		goto exit;
   615 		}
   616 
   617 	// Create a chunk cleanup object which will free the physical RAM when the 
   618 	// chunk is detroyed
   619 	bufBytes = iLowestPrefZonePages << iPageShift;
   620 	iChunkCleanup = new TChunkCleanup(this, bufBase, bufBytes);
   621 	if (!iChunkCleanup)
   622 		{
   623 		TRACE(Kern::Printf("chunkCleanup creation failed"));
   624 		r = Epoc::FreePhysicalRam(bufBytes, bufBase);
   625 		if (r != KErrNone)
   626 			{
   627 			TRACE(Kern::Printf("ERROR - freeing physical memory when chunkCleanup create failed"));
   628 			}
   629 		else
   630 			{
   631 			r = KErrNoMemory;
   632 			}
   633 		goto exit;
   634 		}
   635 
   636 	// Map the allocated buffer pages to a chunk so we can use it.	
   637 	createInfo.iType = TChunkCreateInfo::ESharedKernelSingle; // could also be ESharedKernelMultiple
   638 	createInfo.iMaxSize = bufBytes;
   639 	createInfo.iMapAttr = EMapAttrFullyBlocking; // Non-cached - See TMappingAttributes for all options
   640 	createInfo.iOwnsMemory = EFalse; // Must be false as the physical RAM has already been allocated
   641 	createInfo.iDestroyedDfc = iChunkCleanup;
   642 	r = Kern::ChunkCreate(createInfo, iBufChunk, chunkAddr, mapAttr);
   643 	if (r != KErrNone)
   644 		{
   645 		TRACE(Kern::Printf("ChunkCreate returns %d size %x bytes %x", r, createInfo.iMaxSize, bufBytes));
   646 		goto exit;
   647 		}
   648 
   649 	// Map the physically contiguous memory to the chunk
   650 	r = Kern::ChunkCommitPhysical(iBufChunk, 0, createInfo.iMaxSize, bufBase);
   651 	if (r != KErrNone)
   652 		{
   653 		TRACE(Kern::Printf("CommitPhys returns %d", r));
   654 		goto exit;
   655 		}
   656 
   657 	// Now that the RAM is mapped into a chunk get the kernel-side virtual 
   658 	// base address of the buffer.
   659 	r = Kern::ChunkAddress(iBufChunk, 0, createInfo.iMaxSize, bufBaseAddr);
   660 
   661 	// Using bufBaseAddr a real driver may now do something with the buffer.  We'll just return.
   662 
   663 exit:
   664 	return r;
   665 	}
   666 
   667 
   668 /**
   669 	Determine the lowest preference zone.
   670 
   671 @return KErrNone on success or KErrNotFound if there is only one zone.
   672 */
   673 TInt DDefragChannel::FindLowestPrefZone()
   674 	{
   675 	TUint zoneCount;
   676 	TInt r = Kern::HalFunction(EHalGroupRam, ERamHalGetZoneCount, (TAny*)&zoneCount, NULL);
   677 	if(r!=KErrNone)
   678 		return r;
   679 
   680 	if (zoneCount == 1)
   681 		{// Only one zone so can't move pages anywhere or empty a zone
   682 		return KErrNotFound;
   683 		}
   684 
   685 	SRamZoneConfig zoneConfig;
   686 	SRamZoneUtilisation zoneUtil;
   687 	Kern::HalFunction(EHalGroupRam, ERamHalGetZoneConfig, (TAny*)0, (TAny*)&zoneConfig);
   688 	Kern::HalFunction(EHalGroupRam, ERamHalGetZoneUtilisation, (TAny*)0, (TAny*)&zoneUtil);
   689 	TUint lowestPref = zoneConfig.iPref;
   690 	TUint lowestFreePages = zoneUtil.iFreePages;
   691 	iLowestPrefZoneIndex = 0;
   692 	iLowestPrefZoneId = zoneConfig.iZoneId;
   693 	TUint i = 1;
   694 	for (; i < zoneCount; i++)
   695 		{
   696 		Kern::HalFunction(EHalGroupRam, ERamHalGetZoneConfig, (TAny*)i, (TAny*)&zoneConfig);
   697 		Kern::HalFunction(EHalGroupRam, ERamHalGetZoneUtilisation, (TAny*)i, (TAny*)&zoneUtil);
   698 		// When zones have the same preference the zone higher in the zone list is picked.
   699 		if (zoneConfig.iPref > lowestPref || 
   700 			(zoneConfig.iPref == lowestPref && zoneUtil.iFreePages >= lowestFreePages))
   701 			{
   702 			lowestPref = zoneConfig.iPref;
   703 			lowestFreePages = zoneUtil.iFreePages;
   704 			iLowestPrefZoneIndex = i;
   705 			iLowestPrefZoneId = zoneConfig.iZoneId;
   706 			}
   707 		}
   708 	// Now that we know the current least preferable zone store its size.
   709 	Kern::HalFunction(EHalGroupRam, ERamHalGetZoneConfig, (TAny*)iLowestPrefZoneIndex, (TAny*)&zoneConfig);
   710 	iLowestPrefZonePages = zoneConfig.iPhysPages;
   711 	TRACE(Kern::Printf("LowestPrefZone %x size %x", iLowestPrefZoneId, iLowestPrefZonePages));
   712 	return KErrNone;
   713 	}
   714 
   715 
   716 /**
   717 	DFC callback called when a defrag operation has completed.
   718 
   719 @param aSelf A pointer to the DDefragChannel that requested the defrag operation
   720 */
   721 void DDefragChannel::DefragCompleteDfc(TAny* aSelf)
   722 	{
   723 	// Just call non-static method
   724 	((DDefragChannel*)aSelf)->DefragComplete();
   725 	}
   726 
   727 
   728 /**
   729 	Invoked by the DFC callback which is called when a defrag 
   730 	operation has completed.
   731 */
   732 void DDefragChannel::DefragComplete()
   733 	{
   734 	TRACE(Kern::Printf(">DDefragChannel::DefragComplete"));
   735 	TInt result = iDefragReq.Result();
   736 	TRACE(Kern::Printf("complete code %d", result));
   737 
   738 	Kern::SemaphoreWait(*iDefragSemaphore);
   739 
   740 	Kern::QueueRequestComplete(iRequestThread, iCompleteReq, result);
   741 	iRequestThread->AsyncClose();
   742 	iRequestThread = NULL;
   743 
   744 	Kern::SemaphoreSignal(*iDefragSemaphore);
   745 
   746 	TRACE(Kern::Printf("<DDefragChannel::DefragComplete"));
   747 	// Close the handle on this channel - WARNING this channel may be 
   748 	// deleted immmediately after this call so don't access any members
   749 	AsyncClose();
   750 	}
   751 
   752 
   753 /**
   754 	Close the chunk.
   755 
   756 @return KErrNone on success or one of the system wide error codes.
   757 */
   758 TInt DDefragChannel::DoChunkClose()
   759 	{
   760 	if (iBufChunk == NULL)
   761 		{// Someone tried to close the chunk before using it
   762 		return KErrNotFound;
   763 		}
   764 
   765 	// Rely on the chunk cleanup object being called as that
   766 	// is what will actually free the physical RAM commited to the chunk.
   767 	Kern::ChunkClose(iBufChunk);
   768 	return KErrNone;
   769 	}
   770 
   771 
   772 /**
   773 	The chunk has now been destroyed so reset the pointers to allow a new
   774 	chunk to be created.
   775 */
   776 void DDefragChannel::ChunkDestroyed()
   777 	{
   778 	__e32_atomic_store_ord_ptr(&iBufChunk, 0);
   779 	__e32_atomic_store_ord_ptr(&iChunkCleanup, 0);
   780 	}
   781 
   782 
   783 /**
   784 	Contruct a Shared Chunk cleanup object which will free the chunk's discontiguous
   785 	physical memory when a chunk is destroyed.
   786 
   787 @param aDevice The device to inform when the chunk is destroyed.
   788 @param aBufBase The physical base addresses of each of the chunk's memory pages.
   789 @param aBufPages The total number of the chunk's pages.
   790 */
   791 TChunkCleanup::TChunkCleanup(DDefragChannel* aDevice, TPhysAddr* aBufAddrs, TUint aBufPages)
   792     : TDfc((TDfcFn)TChunkCleanup::ChunkDestroyed,this,Kern::SvMsgQue(),0),
   793     iBufAddrs(aBufAddrs),
   794 	iBufSize(aBufPages),
   795 	iBufContiguous(EFalse),
   796 	iDevice(aDevice)
   797     {}
   798 
   799 
   800 /**
   801 	Contruct a Shared Chunk cleanup object which will free the chunk's contiguous 
   802 	physical memory when a chunk is destroyed.
   803 
   804 @param aDevice The device to inform when the chunk is destroyed.
   805 @param aBufBase The physical base address of the chunk's memory.
   806 @param aBufBytes The total number of the chunk's bytes.
   807 */
   808 TChunkCleanup::TChunkCleanup(DDefragChannel* aDevice, TPhysAddr aBufBase, TUint aBufBytes)
   809     : TDfc((TDfcFn)TChunkCleanup::ChunkDestroyed,this,Kern::SvMsgQue(),0),
   810     iBufBase(aBufBase),
   811 	iBufSize(aBufBytes),
   812 	iBufContiguous(ETrue),
   813 	iDevice(aDevice)
   814     {}
   815 
   816 /**
   817 	Callback function which is called the DFC runs, i.e. when a chunk is destroyed 
   818 	and frees the physical memory allocated when the chunk was created.
   819 
   820 @param aSelf Pointer to the cleanup object associated with the chunk that has 
   821 been destroyed.
   822 */
   823 void TChunkCleanup::ChunkDestroyed(TChunkCleanup* aSelf)
   824 	{
   825 	aSelf->DoChunkDestroyed();
   826 
   827     // We've finished so now delete ourself
   828     delete aSelf;
   829 	}
   830 
   831 
   832 /**
   833 	The chunk has been destroyed so free the physical RAM that was allocated
   834 	for its use and inform iDevice that it has been destroyed.
   835 */
   836 void TChunkCleanup::DoChunkDestroyed()
   837     {
   838 	if (iBufContiguous)
   839 		{
   840 		__NK_ASSERT_ALWAYS(Epoc::FreePhysicalRam(iBufBase, iBufSize) == KErrNone);
   841 		}
   842 	else
   843 		{
   844 		__NK_ASSERT_ALWAYS(Epoc::FreePhysicalRam(iBufSize, iBufAddrs) == KErrNone);
   845 		}
   846 
   847 	if (iDevice != NULL)
   848 		{// Allow iDevice to perform any cleanup it requires for this chunk.
   849 		iDevice->ChunkDestroyed();
   850 		}
   851     }
   852 
   853 
   854 /**
   855 	Remove the device so its ChunkDestroyed() method isn't invoked  when the chunk is 
   856 	destroyed.
   857 */
   858 void TChunkCleanup::RemoveDevice()
   859 	{
   860 	__e32_atomic_store_ord_ptr(&iDevice, 0);
   861 	}