First public contribution.
1 // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32test\examples\defrag\d_defrag_ref.cpp
15 // Reference LDD for invoking defrag APIs.
19 #include <kernel/kern_priv.h>
22 #include "d_defrag_ref.h"
24 const TInt KMajorVersionNumber=0;
25 const TInt KMinorVersionNumber=1;
26 const TInt KBuildVersionNumber=1;
28 #if 1 // Set true for tracing
34 const TInt KDefragCompleteThreadPriority = 27;
35 const TInt KDefragRamThreadPriority = 1;
36 _LIT(KDefragCompleteThread,"DefragCompleteThread");
41 Clean up item responsible for ensuring all memory commmited to a chunk is
42 freed once the chunk is destroyed
44 class TChunkCleanup : public TDfc
47 TChunkCleanup(DDefragChannel* aDevice, TPhysAddr* aBufAddrs, TUint aBufPages);
48 TChunkCleanup(DDefragChannel* aDevice, TPhysAddr aBufBase, TUint aBufBytes);
49 static void ChunkDestroyed(TChunkCleanup* aSelf);
53 void DoChunkDestroyed();
56 TPhysAddr* iBufAddrs; /**< Pointer to an array of the addresses of discontiguous buffer pages*/
57 TPhysAddr iBufBase; /**< Physical base address of a physically contiguous the buffer*/
58 TUint iBufSize; /**< The number of pages or bytes in the buffer depending if this is
59 discontiguous or contiguous buffer, repsectively*/
60 TBool iBufContiguous; /**< ETrue when the memory to be freed is contiguous, EFalse otherwise*/
61 DDefragChannel* iDevice; /**< The device to be informed when the chunk is destroyed */
66 Reference defrag LDD factory.
68 class DDefragChannelFactory : public DLogicalDevice
71 DDefragChannelFactory();
72 ~DDefragChannelFactory();
73 virtual TInt Install(); //overriding pure virtual
74 virtual void GetCaps(TDes8& aDes) const; //overriding pure virtual
75 virtual TInt Create(DLogicalChannelBase*& aChannel);//overriding pure virtual
77 TDynamicDfcQue* iDfcQ;
82 Reference defrag logical channel.
84 class DDefragChannel : public DLogicalChannelBase
87 DDefragChannel(TDfcQue* aDfcQ);
89 void ChunkDestroyed();
91 virtual TInt DoCreate(TInt aUnit, const TDesC8* anInfo, const TVersion& aVer);
92 virtual TInt Request(TInt aFunction, TAny* a1, TAny* a2);
94 TInt DoAllocLowestZone();
95 TInt DoClaimLowestZone();
97 TInt FindLowestPrefZone();
99 static void DefragCompleteDfc(TAny* aSelf);
100 void DefragComplete();
103 TInt iPageShift; /**< The system's page shift */
104 DSemaphore* iDefragSemaphore;/**< Semaphore to ensure only one defrag operation is active per channel*/
105 TClientRequest* iCompleteReq;/**< Pointer to a request status that will signal to the user side client once the defrag has completed*/
106 DThread* iRequestThread; /**< Pointer to the thread that made the defrag request*/
107 TRamDefragRequest iDefragReq;/**< The defrag request used to queue defrag operations*/
108 DChunk* iBufChunk; /**< Pointer to a chunk that can be mapped to a physical RAM area*/
109 TChunkCleanup* iChunkCleanup;/**< Pointer to iBufChunk's cleanup object */
110 TDfcQue* iDfcQ; /**< The DFC queue used for driver functions */
111 TDfc iDefragCompleteDfc; /**< DFC to be queued once a defrag operation has completed */
112 TBool iDefragDfcFree; /**< Set to fase whenever a dfc defrag operation is still pending*/
113 TUint iLowestPrefZoneId; /**< The ID of the least preferable RAM zone*/
114 TUint iLowestPrefZonePages; /**< The number of pages in the least preferable RAM zone*/
115 TUint iLowestPrefZoneIndex; /**< The test HAL function index of the least preferable RAM zone*/
119 Utility functions to wait for chunk clean dfc to be queued by waiting for the
120 idle thread to be queued.
122 void signal_sem(TAny* aPtr)
124 NKern::FSSignal((NFastSemaphore*)aPtr);
128 {// Wait for chunk to be destroyed and then for the chunk cleanup dfc to run.
129 for (TUint i = 0; i < 2; i++)
132 TDfc idler(&signal_sem, &s, Kern::SvMsgQue(), 0); // supervisor thread, priority 0, so will run after destroyed DFC
133 NTimer timer(&signal_sem, &s);
135 timer.OneShot(NKern::TimerTicks(5000), ETrue); // runs in DFCThread1
136 NKern::FSWait(&s); // wait for either idle DFC or timer
137 TBool timeout = idler.Cancel(); // cancel idler, return TRUE if it hadn't run
138 TBool tmc = timer.Cancel(); // cancel timer, return TRUE if it hadn't expired
139 if (!timeout && !tmc)
140 NKern::FSWait(&s); // both the DFC and the timer went off - wait for the second one
148 Standard logical device driver entry point.
149 Called the first time this device driver is loaded.
151 DECLARE_STANDARD_LDD()
153 DDefragChannelFactory* factory = new DDefragChannelFactory;
156 // Allocate a kernel thread to run the DFC
157 TInt r = Kern::DynamicDfcQCreate(factory->iDfcQ, KDefragCompleteThreadPriority, KDefragCompleteThread);
161 // Must close rather than delete factory as it is a DObject object.
162 factory->AsyncClose();
173 DDefragChannelFactory::DDefragChannelFactory()
175 iVersion=TVersion(KMajorVersionNumber,KMinorVersionNumber,KBuildVersionNumber);
182 DDefragChannelFactory::~DDefragChannelFactory()
185 {// Destroy the DFC queue created when this device drvier was loaded.
192 Create a new DDefragChannel on this logical device.
194 @param aChannel On successful return this will point to the new channel.
195 @return KErrNone on success or KErrNoMemory if the channel couldn't be created.
197 TInt DDefragChannelFactory::Create(DLogicalChannelBase*& aChannel)
199 aChannel = new DDefragChannel(iDfcQ);
200 return (aChannel)? KErrNone : KErrNoMemory;
205 Install the LDD - overriding pure virtual
207 @return KErrNone on success or one of the system wide error codes.
209 TInt DDefragChannelFactory::Install()
211 return SetName(&KLddName);
216 Get capabilities - overriding pure virtual
218 @param aDes A descriptor to be loaded with the capabilities.
220 void DDefragChannelFactory::GetCaps(TDes8& aDes) const
222 TCapsDefragTestV01 b;
223 b.iVersion=TVersion(KMajorVersionNumber,KMinorVersionNumber,KBuildVersionNumber);
224 Kern::InfoCopy(aDes,(TUint8*)&b,sizeof(b));
231 @param aDfcQ The DFC queue to use for defrag completion DFCs.
233 DDefragChannel::DDefragChannel(TDfcQue* aDfcQ)
235 iDefragSemaphore(NULL),
240 iDefragCompleteDfc(DefragCompleteDfc, (TAny*)this, 1) // DFC is priority '1', it is the only type of dfc on this queue.
248 @param aVer The version number required.
249 @return KErrNone on success, KErrNotSupported if the device doesn't support defragmentation.
251 TInt DDefragChannel::DoCreate(TInt /*aUnit*/, const TDesC8* /*anInfo*/, const TVersion& aVer)
253 // Check the client has ECapabilityPowerMgmt capability.
254 if(!Kern::CurrentThreadHasCapability(ECapabilityPowerMgmt, __PLATSEC_DIAGNOSTIC_STRING("Checked by DDefragChannel")))
256 return KErrPermissionDenied;
259 TInt r = Kern::HalFunction(EHalGroupKernel, EKernelHalPageSizeInBytes, &pageSize, 0);
262 TRACE(Kern::Printf("ERROR - Unable to determine page size"));
265 TUint32 pageMask = pageSize;
273 TRACE(Kern::Printf("ERROR - page size not a power of 2"));
274 return KErrNotSupported;
282 // Check the client is a supported version.
283 if (!Kern::QueryVersionSupported(TVersion(KMajorVersionNumber,KMinorVersionNumber,KBuildVersionNumber),aVer))
285 return KErrNotSupported;
288 // Check this system has more than one RAM zone defined.
289 // A real driver shouldn't need to do this as any driver that uses defrag should
290 // only be loaded on devices that support it.
291 TInt ret = FindLowestPrefZone();
293 {// Only one zone so can't move pages anywhere or empty a zone
294 return KErrNotSupported;
297 // Create a semaphore to protect defrag invocation. OK to just use one name as
298 // the semaphore is not global so it's name doesn't need to be unique.
299 ret = Kern::SemaphoreCreate(iDefragSemaphore, _L("DefragRefSem"), 1);
305 // Create a client request for completing dfc defrag requests.
306 ret = Kern::CreateClientRequest(iCompleteReq);
309 iDefragSemaphore->Close(NULL);
313 // Setup a DFC to be invoked when a defrag operation completes.
314 iDefragCompleteDfc.SetDfcQ(iDfcQ);
315 iDefragDfcFree = ETrue;
324 DDefragChannel::~DDefragChannel()
326 // Clean up any heap objects.
327 if (iDefragSemaphore != NULL)
329 iDefragSemaphore->Close(NULL);
332 // Unregister from any chunk cleanup object as we are to be deleted.
333 if (iChunkCleanup != NULL)
335 iChunkCleanup->RemoveDevice();
337 // Clean up any client request object.
340 Kern::DestroyClientRequest(iCompleteReq);
342 // Free any existing chunk.
348 Handle the requests for this channel.
350 @param aFunction The operation the LDD should perform.
351 @param a1 The first argument for the operation.
352 @param a2 The second argument for the operation.
353 @return KErrNone on success or one of the system wide error codes.
355 TInt DDefragChannel::Request(TInt aFunction, TAny* a1, TAny* a2)
358 NKern::ThreadEnterCS();
360 Kern::SemaphoreWait(*iDefragSemaphore);
361 if (!iDefragDfcFree && aFunction != RDefragChannel::EControlGeneralDefragDfcComplete)
362 {// Only allow a single defrag operation at a time.
369 case RDefragChannel::EControlGeneralDefragDfc:
370 // Queue a defrag operation so that on completion it queues a
371 // DFC on this driver.
372 iRequestThread = &Kern::CurrentThread();
373 iRequestThread->Open();
375 // Open a reference on this channel to stop the destructor running before
376 // the defrag request has completed.
378 r = iCompleteReq->SetStatus((TRequestStatus*)a1);
380 r = iDefragReq.DefragRam(&iDefragCompleteDfc, KDefragRamThreadPriority);
382 {// defrag operation didn't start so close all openned handles
384 iRequestThread->AsyncClose();
385 iRequestThread = NULL;
388 iDefragDfcFree = EFalse;
391 case RDefragChannel::EControlGeneralDefragDfcComplete:
392 if (iRequestThread != NULL)
393 {// The defrag dfc hasn't completed so this shouldn't have been invoked.
398 iDefragDfcFree = ETrue;
402 case RDefragChannel::EControlGeneralDefragSem:
403 {// Queue a defrag operation so that it will signal a fast mutex once
406 NKern::FSSetOwner(&sem, 0);
407 r = iDefragReq.DefragRam(&sem, KDefragRamThreadPriority);
410 {// Error occurred attempting to queue the defrag operation.
414 // Defrag operation has now been queued so wait for it to finish.
415 // Could do some extra kernel side work here before waiting on the
418 r = iDefragReq.Result();
422 case RDefragChannel::EControlGeneralDefrag:
423 // Synchronously perform a defrag.
425 r = iDefragReq.DefragRam(KDefragRamThreadPriority);
429 case RDefragChannel::EControlAllocLowestZone:
430 // Allocate from the lowest preference zone
431 r = DoAllocLowestZone();
434 case RDefragChannel::EControlClaimLowestZone:
435 // Claims the lowest preference zone
436 r = DoClaimLowestZone();
439 case RDefragChannel::EControlCloseChunk:
440 // Have finished with the chunk so close it then free the RAM mapped by it
442 TRACE( if (r != KErrNone) {Kern::Printf("ChunkClose returns %d", r);});
450 Kern::SemaphoreSignal(*iDefragSemaphore);
451 NKern::ThreadLeaveCS();
452 TRACE(if (r!=KErrNone) {Kern::Printf("DDefragChannel::Request returns %d", r); });
458 Allocates RAM from the lowest preference zone and maps it to a shared chunk.
460 Real drivers would not need to determine which zone to allocate from as they
461 will know the zone's ID.
463 @return KErrNone on success, otherwise one of the system wide error codes.
465 TInt DDefragChannel::DoAllocLowestZone()
468 TLinAddr chunkAddr = NULL;
469 TUint32 mapAttr = NULL;
470 TChunkCreateInfo createInfo;
471 TLinAddr bufBaseAddr;
475 if (iBufChunk != NULL)
476 {// The buffer chunk is already mapped so can't use again until it is
477 // freed/closed. Wait a short while for it to be freed as it may be in the
478 // process of being destroyed.
479 if (WaitForIdle() != KErrNone || iBufChunk != NULL)
480 {// chunk still hasn't been freed so can't proceed.
486 // Attempt to allocate all the pages it should be possible to allocate.
487 // Real device drivers will now how much they need to allocate so they
488 // wouldn't determine it here.
489 SRamZoneUtilisation zoneUtil;
490 Kern::HalFunction(EHalGroupRam, ERamHalGetZoneUtilisation, (TAny*)iLowestPrefZoneIndex, (TAny*)&zoneUtil);
491 bufPages = iLowestPrefZonePages - (zoneUtil.iAllocFixed + zoneUtil.iAllocUnknown + zoneUtil.iAllocOther);
492 bufAddrs = new TPhysAddr[bufPages];
495 TRACE(Kern::Printf("Failed to allocate an array for bufAddrs"));
500 // Update the page count as bufAddrs allocation may have caused the kernel
502 Kern::HalFunction(EHalGroupRam, ERamHalGetZoneUtilisation, (TAny*)iLowestPrefZoneIndex, (TAny*)&zoneUtil);
503 bufPages = iLowestPrefZonePages - (zoneUtil.iAllocFixed + zoneUtil.iAllocUnknown + zoneUtil.iAllocOther);
505 // Allocate discontiguous pages from the zone
506 r = Epoc::ZoneAllocPhysicalRam(iLowestPrefZoneId, bufPages, bufAddrs);
507 if (r != KErrNone && r != KErrNoMemory)
509 TRACE(Kern::Printf("Zone Alloc returns %d bufPages %x", r, bufPages));
512 // If we couldn't allocate all the required pages then empty the zone
514 if (r == KErrNoMemory)
516 r = iDefragReq.EmptyRamZone(iLowestPrefZoneId, TRamDefragRequest::KInheritPriority);
519 TRACE(Kern::Printf("Empty returns %d", r));
522 r = Epoc::ZoneAllocPhysicalRam(iLowestPrefZoneId, bufPages, bufAddrs);
525 TRACE(Kern::Printf("ZoneAlloc1 returns %d bufPages %x", r, bufPages));
530 // Create a chunk cleanup object which will free the physical RAM when the
532 iChunkCleanup = new TChunkCleanup(this, bufAddrs, bufPages);
535 TRACE(Kern::Printf("iChunkCleanup creation failed"));
536 r = Epoc::FreePhysicalRam(bufPages, bufAddrs);
539 TRACE(Kern::Printf("ERROR - freeing physical memory when chunkCleanup create failed"));
548 // Map the allocated buffer pages to a chunk so we can use it.
549 createInfo.iType = TChunkCreateInfo::ESharedKernelSingle; // could also be ESharedKernelMultiple
550 createInfo.iMaxSize = bufPages << iPageShift;
551 createInfo.iMapAttr = EMapAttrFullyBlocking; // Non-cached - See TMappingAttributes for all options
552 createInfo.iOwnsMemory = EFalse; // Must be false as the physical RAM has already been allocated
553 createInfo.iDestroyedDfc = iChunkCleanup;
554 r = Kern::ChunkCreate(createInfo, iBufChunk, chunkAddr, mapAttr);
557 TRACE(Kern::Printf("ChunkCreate returns %d size %x pages %x", r, createInfo.iMaxSize, bufPages));
561 // Map the physical memory to the chunk
562 r = Kern::ChunkCommitPhysical(iBufChunk, 0, createInfo.iMaxSize, bufAddrs);
565 TRACE(Kern::Printf("CommitPhys returns %d", r));
569 // Now that the RAM is mapped into a chunk get the kernel-side virtual
570 // base address of the buffer.
571 r = Kern::ChunkAddress(iBufChunk, 0, createInfo.iMaxSize, bufBaseAddr);
573 // Using bufBaseAddr a real driver may now do something with the buffer. We'll just return.
581 Claims the lowest preference zone and maps it to a shared chunk.
583 Real drivers would not need to determine which zone to allocate from as they
584 will know the zone's ID.
586 @return KErrNone on success, otherwise one of the system wide error codes.
588 TInt DDefragChannel::DoClaimLowestZone()
591 TChunkCreateInfo createInfo;
592 TLinAddr bufBaseAddr;
594 TUint32 mapAttr = NULL;
598 if (iBufChunk != NULL)
599 {// The buffer chunk is already mapped so can't use again until it is
600 // freed/closed. Wait a short while for it to be freed as it may be in the
601 // process of being destroyed.
602 if (WaitForIdle() != KErrNone || iBufChunk != NULL)
603 {// chunk still hasn't been freed so can't proceed.
609 // Claim the zone the base address of which will be stored in iBufBase.
610 r = iDefragReq.ClaimRamZone(iLowestPrefZoneId, bufBase, TRamDefragRequest::KInheritPriority);
613 TRACE(Kern::Printf("Claim returns %d", r));
617 // Create a chunk cleanup object which will free the physical RAM when the
619 bufBytes = iLowestPrefZonePages << iPageShift;
620 iChunkCleanup = new TChunkCleanup(this, bufBase, bufBytes);
623 TRACE(Kern::Printf("chunkCleanup creation failed"));
624 r = Epoc::FreePhysicalRam(bufBytes, bufBase);
627 TRACE(Kern::Printf("ERROR - freeing physical memory when chunkCleanup create failed"));
636 // Map the allocated buffer pages to a chunk so we can use it.
637 createInfo.iType = TChunkCreateInfo::ESharedKernelSingle; // could also be ESharedKernelMultiple
638 createInfo.iMaxSize = bufBytes;
639 createInfo.iMapAttr = EMapAttrFullyBlocking; // Non-cached - See TMappingAttributes for all options
640 createInfo.iOwnsMemory = EFalse; // Must be false as the physical RAM has already been allocated
641 createInfo.iDestroyedDfc = iChunkCleanup;
642 r = Kern::ChunkCreate(createInfo, iBufChunk, chunkAddr, mapAttr);
645 TRACE(Kern::Printf("ChunkCreate returns %d size %x bytes %x", r, createInfo.iMaxSize, bufBytes));
649 // Map the physically contiguous memory to the chunk
650 r = Kern::ChunkCommitPhysical(iBufChunk, 0, createInfo.iMaxSize, bufBase);
653 TRACE(Kern::Printf("CommitPhys returns %d", r));
657 // Now that the RAM is mapped into a chunk get the kernel-side virtual
658 // base address of the buffer.
659 r = Kern::ChunkAddress(iBufChunk, 0, createInfo.iMaxSize, bufBaseAddr);
661 // Using bufBaseAddr a real driver may now do something with the buffer. We'll just return.
669 Determine the lowest preference zone.
671 @return KErrNone on success or KErrNotFound if there is only one zone.
673 TInt DDefragChannel::FindLowestPrefZone()
676 TInt r = Kern::HalFunction(EHalGroupRam, ERamHalGetZoneCount, (TAny*)&zoneCount, NULL);
681 {// Only one zone so can't move pages anywhere or empty a zone
685 SRamZoneConfig zoneConfig;
686 SRamZoneUtilisation zoneUtil;
687 Kern::HalFunction(EHalGroupRam, ERamHalGetZoneConfig, (TAny*)0, (TAny*)&zoneConfig);
688 Kern::HalFunction(EHalGroupRam, ERamHalGetZoneUtilisation, (TAny*)0, (TAny*)&zoneUtil);
689 TUint lowestPref = zoneConfig.iPref;
690 TUint lowestFreePages = zoneUtil.iFreePages;
691 iLowestPrefZoneIndex = 0;
692 iLowestPrefZoneId = zoneConfig.iZoneId;
694 for (; i < zoneCount; i++)
696 Kern::HalFunction(EHalGroupRam, ERamHalGetZoneConfig, (TAny*)i, (TAny*)&zoneConfig);
697 Kern::HalFunction(EHalGroupRam, ERamHalGetZoneUtilisation, (TAny*)i, (TAny*)&zoneUtil);
698 // When zones have the same preference the zone higher in the zone list is picked.
699 if (zoneConfig.iPref > lowestPref ||
700 (zoneConfig.iPref == lowestPref && zoneUtil.iFreePages >= lowestFreePages))
702 lowestPref = zoneConfig.iPref;
703 lowestFreePages = zoneUtil.iFreePages;
704 iLowestPrefZoneIndex = i;
705 iLowestPrefZoneId = zoneConfig.iZoneId;
708 // Now that we know the current least preferable zone store its size.
709 Kern::HalFunction(EHalGroupRam, ERamHalGetZoneConfig, (TAny*)iLowestPrefZoneIndex, (TAny*)&zoneConfig);
710 iLowestPrefZonePages = zoneConfig.iPhysPages;
711 TRACE(Kern::Printf("LowestPrefZone %x size %x", iLowestPrefZoneId, iLowestPrefZonePages));
717 DFC callback called when a defrag operation has completed.
719 @param aSelf A pointer to the DDefragChannel that requested the defrag operation
721 void DDefragChannel::DefragCompleteDfc(TAny* aSelf)
723 // Just call non-static method
724 ((DDefragChannel*)aSelf)->DefragComplete();
729 Invoked by the DFC callback which is called when a defrag
730 operation has completed.
732 void DDefragChannel::DefragComplete()
734 TRACE(Kern::Printf(">DDefragChannel::DefragComplete"));
735 TInt result = iDefragReq.Result();
736 TRACE(Kern::Printf("complete code %d", result));
738 Kern::SemaphoreWait(*iDefragSemaphore);
740 Kern::QueueRequestComplete(iRequestThread, iCompleteReq, result);
741 iRequestThread->AsyncClose();
742 iRequestThread = NULL;
744 Kern::SemaphoreSignal(*iDefragSemaphore);
746 TRACE(Kern::Printf("<DDefragChannel::DefragComplete"));
747 // Close the handle on this channel - WARNING this channel may be
748 // deleted immmediately after this call so don't access any members
756 @return KErrNone on success or one of the system wide error codes.
758 TInt DDefragChannel::DoChunkClose()
760 if (iBufChunk == NULL)
761 {// Someone tried to close the chunk before using it
765 // Rely on the chunk cleanup object being called as that
766 // is what will actually free the physical RAM commited to the chunk.
767 Kern::ChunkClose(iBufChunk);
773 The chunk has now been destroyed so reset the pointers to allow a new
776 void DDefragChannel::ChunkDestroyed()
778 __e32_atomic_store_ord_ptr(&iBufChunk, 0);
779 __e32_atomic_store_ord_ptr(&iChunkCleanup, 0);
784 Contruct a Shared Chunk cleanup object which will free the chunk's discontiguous
785 physical memory when a chunk is destroyed.
787 @param aDevice The device to inform when the chunk is destroyed.
788 @param aBufBase The physical base addresses of each of the chunk's memory pages.
789 @param aBufPages The total number of the chunk's pages.
791 TChunkCleanup::TChunkCleanup(DDefragChannel* aDevice, TPhysAddr* aBufAddrs, TUint aBufPages)
792 : TDfc((TDfcFn)TChunkCleanup::ChunkDestroyed,this,Kern::SvMsgQue(),0),
793 iBufAddrs(aBufAddrs),
795 iBufContiguous(EFalse),
801 Contruct a Shared Chunk cleanup object which will free the chunk's contiguous
802 physical memory when a chunk is destroyed.
804 @param aDevice The device to inform when the chunk is destroyed.
805 @param aBufBase The physical base address of the chunk's memory.
806 @param aBufBytes The total number of the chunk's bytes.
808 TChunkCleanup::TChunkCleanup(DDefragChannel* aDevice, TPhysAddr aBufBase, TUint aBufBytes)
809 : TDfc((TDfcFn)TChunkCleanup::ChunkDestroyed,this,Kern::SvMsgQue(),0),
812 iBufContiguous(ETrue),
817 Callback function which is called the DFC runs, i.e. when a chunk is destroyed
818 and frees the physical memory allocated when the chunk was created.
820 @param aSelf Pointer to the cleanup object associated with the chunk that has
823 void TChunkCleanup::ChunkDestroyed(TChunkCleanup* aSelf)
825 aSelf->DoChunkDestroyed();
827 // We've finished so now delete ourself
833 The chunk has been destroyed so free the physical RAM that was allocated
834 for its use and inform iDevice that it has been destroyed.
836 void TChunkCleanup::DoChunkDestroyed()
840 __NK_ASSERT_ALWAYS(Epoc::FreePhysicalRam(iBufBase, iBufSize) == KErrNone);
844 __NK_ASSERT_ALWAYS(Epoc::FreePhysicalRam(iBufSize, iBufAddrs) == KErrNone);
848 {// Allow iDevice to perform any cleanup it requires for this chunk.
849 iDevice->ChunkDestroyed();
855 Remove the device so its ChunkDestroyed() method isn't invoked when the chunk is
858 void TChunkCleanup::RemoveDevice()
860 __e32_atomic_store_ord_ptr(&iDevice, 0);