First public contribution.
1 // Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
16 #include <plat_priv.h>
28 Manages the swap via the data paging device.
37 EUninitialised = 1 << 1,
42 ESwapIndexMask = 0xffffffff << ESwapIndexShift,
45 TInt Create(DPagingDevice* aDevice);
47 TInt ReserveSwap(DMemoryObject* aMemory, TUint aStartIndex, TUint aPageCount);
48 TInt UnreserveSwap(DMemoryObject* aMemory, TUint aStartIndex, TUint aPageCount);
49 TBool IsReserved(DMemoryObject* aMemory, TUint aStartIndex, TUint aPageCount);
51 TInt ReadSwapPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TLinAddr aLinAddr, DPageReadRequest* aRequest, TPhysAddr* aPhysAddrs);
52 TInt WriteSwapPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TLinAddr aLinAddr, DPageWriteRequest* aRequest);
53 void DoDeleteNotify(TUint aSwapData);
55 void GetSwapInfo(SVMSwapInfo& aInfoOut);
56 TInt SetSwapThresholds(const SVMSwapThresholds& aThresholds);
57 void CheckSwapThresholds(TUint aInitial, TUint aFinal);
60 DPagingDevice* iDevice;
61 TBitMapAllocator* iBitMap;
64 TUint iSwapThesholdLow;
65 TUint iSwapThesholdGood;
66 TThreadMessage iDelNotifyMsg;
71 Manager for demand paged memory objects which contain writeable data.
72 The contents of the memory are written to a backing store whenever its
73 pages are 'paged out'.
77 class DDataPagedMemoryManager : public DPagedMemoryManager
80 // from DMemoryManager...
81 virtual TInt Alloc(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
82 virtual void Free(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
83 virtual TInt Wipe(DMemoryObject* aMemory);
84 virtual TInt CleanPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TPhysAddr*& aPageArrayEntry);
86 // Methods inherited from DPagedMemoryManager
88 virtual TInt InstallPagingDevice(DPagingDevice* aDevice);
89 virtual TInt AcquirePageReadRequest(DPageReadRequest*& aRequest, DMemoryObject* aMemory, TUint aIndex, TUint aCount);
90 virtual TInt AcquirePageWriteRequest(DPageWriteRequest*& aRequest, DMemoryObject* aMemory, TUint aIndex, TUint aCount);
91 virtual TInt ReadPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages, DPageReadRequest* aRequest);
92 virtual TInt WritePages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages, DPageWriteRequest* aRequest);
93 virtual TBool IsAllocated(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
96 void GetSwapInfo(SVMSwapInfo& aInfoOut);
97 TInt SetSwapThresholds(const SVMSwapThresholds& aThresholds);
101 The paging device used for accessing the backing store.
102 This is set by #InstallPagingDevice.
104 DPagingDevice* iDevice;
107 The instance of #DSwapManager being used by this manager.
109 DSwapManager* iSwapManager;
113 The single instance of this manager class.
115 static DDataPagedMemoryManager TheManager;
119 DDataPagedMemoryManager DDataPagedMemoryManager::TheManager;
120 DPagedMemoryManager* TheDataPagedMemoryManager = &DDataPagedMemoryManager::TheManager;
124 Create a swap manager.
126 @param aDevice The demand paging device for access to the swap.
128 TInt DSwapManager::Create(DPagingDevice* aDevice)
130 __ASSERT_COMPILE(!(ESwapIndexMask & ESwapFlagsMask));
131 __NK_ASSERT_DEBUG(iDevice == NULL);
134 // Create the structures required to track the swap usage.
135 TUint swapPages = (iDevice->iSwapSize << iDevice->iReadUnitShift) >> KPageShift;
136 // Can't have more swap pages than we can map.
137 __NK_ASSERT_DEBUG(swapPages<=DMemoryObject::KMaxPagingManagerData);
138 __NK_ASSERT_DEBUG(swapPages<=(KMaxTUint>>ESwapIndexShift));
140 if ((TheMmu.TotalPhysicalRamPages() << 2) < swapPages)
141 {// The swap is limited to a maximum of 4 times the amount of RAM.
145 iBitMap = TBitMapAllocator::New(swapPages, ETrue);
147 {// Not enough RAM to keep track of the swap.
150 iBitMapFree = swapPages;
157 Reserve some swap pages for the requested region of the memory object
159 @param aMemory The memory object to reserve pages for.
160 @param aStartIndex The page index in the memory object of the start of the region.
161 @param aPageCount The number of pages to reserve.
163 @return KErrNone on success, KErrNoMemory if not enough swap space available.
164 @pre aMemory's lock is held.
165 @post aMemory's lock is held.
167 TInt DSwapManager::ReserveSwap(DMemoryObject* aMemory, TUint aStartIndex, TUint aPageCount)
169 __NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
170 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
172 const TUint indexEnd = aStartIndex + aPageCount;
173 TUint index = aStartIndex;
176 for (; index < indexEnd; index++)
177 {// This page shouldn't already be in use.
179 __NK_ASSERT_DEBUG(!(aMemory->PagingManagerData(index) & ESwapFlagsMask));
184 if (iBitMapFree < aPageCount)
186 Kern::AsyncNotifyChanges(EChangesOutOfMemory);
189 // Reserve the required swap space and mark each page as allocated and uninitialised.
190 TUint initFree = iBitMapFree;
191 iBitMapFree -= aPageCount;
192 for (index = aStartIndex; index < indexEnd; index++)
194 // Grab MmuLock to stop manager data being accessed.
196 TUint swapData = aMemory->PagingManagerData(index);
197 __NK_ASSERT_DEBUG(!(swapData & EAllocated));
198 swapData = EAllocated | EUninitialised;
199 aMemory->SetPagingManagerData(index, swapData);
203 CheckSwapThresholds(initFree, iBitMapFree);
209 Unreserve swap pages for the requested region of the memory object.
211 @param aMemory The memory object to unreserve pages for.
212 @param aStartIndex The page index in the memory object of the start of the region.
213 @param aPageCount The number of pages to unreserve.
215 @return The number of pages freed.
216 @pre aMemory's lock is held.
217 @post aMemory's lock is held.
219 TInt DSwapManager::UnreserveSwap(DMemoryObject* aMemory, TUint aStartIndex, TUint aPageCount)
221 __NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
222 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
224 TUint initFree = iBitMapFree;
225 TUint freedPages = 0;
226 const TUint indexEnd = aStartIndex + aPageCount;
227 for (TUint index = aStartIndex; index < indexEnd; index++)
229 // Grab MmuLock to stop manager data being accessed.
231 TUint swapData = aMemory->PagingManagerData(index);
232 TUint swapIndex = swapData >> ESwapIndexShift;
233 TBool notifyDelete = EFalse;
234 if (swapData & EAllocated)
236 if (swapData & ESaved)
238 notifyDelete = ETrue;
239 iBitMap->Free(swapIndex);
242 aMemory->SetPagingManagerData(index, 0);
246 __NK_ASSERT_DEBUG(swapData == 0);
252 DoDeleteNotify(swapIndex);
254 iBitMapFree += freedPages;
255 CheckSwapThresholds(initFree, iBitMapFree);
261 Determine whether the specified pages in the memory object have swap reserved for them.
263 @param aMemory The memory object that owns the pages.
264 @param aStartIndex The first index of the pages to check.
265 @param aPageCount The number of pages to check.
267 @return ETrue if swap is reserved for all the pages, EFalse otherwise.
269 TBool DSwapManager::IsReserved(DMemoryObject* aMemory, TUint aStartIndex, TUint aPageCount)
270 {// MmuLock required to protect manager data.
271 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
272 __NK_ASSERT_DEBUG(aStartIndex < aMemory->iSizeInPages);
273 __NK_ASSERT_DEBUG(aStartIndex + aPageCount <= aMemory->iSizeInPages);
275 const TUint indexEnd = aStartIndex + aPageCount;
276 for (TUint index = aStartIndex; index < indexEnd; index++)
278 if (!(aMemory->PagingManagerData(index) & DSwapManager::EAllocated))
279 {// This page is not allocated by swap manager.
288 Read from the swap the specified pages associated with the memory object.
290 @param aMemory The memory object to read the pages for
291 @param aIndex The index of the first page within the memory object.
292 @param aCount The number of pages to read.
293 @param aLinAddr The address to copy the pages to.
294 @param aRequest The request to use for the read.
295 @param aPhysAddrs An array of the physical addresses for each page to read in.
297 TInt DSwapManager::ReadSwapPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TLinAddr aLinAddr, DPageReadRequest* aRequest, TPhysAddr* aPhysAddrs)
300 const TUint readUnitShift = iDevice->iReadUnitShift;
301 TUint readSize = KPageSize >> readUnitShift;
302 TThreadMessage* msg = const_cast<TThreadMessage*>(&aRequest->iMessage);
304 // Determine the wipe byte values for uninitialised pages.
305 TUint allocFlags = aMemory->RamAllocFlags();
306 TBool wipePages = !(allocFlags & Mmu::EAllocNoWipe);
307 TUint8 wipeByte = (allocFlags & Mmu::EAllocUseCustomWipeByte) ? (allocFlags >> Mmu::EAllocWipeByteShift) & 0xff : 0x03;
309 const TUint indexEnd = aIndex + aCount;
310 for (TUint index = aIndex; index < indexEnd; index++, aLinAddr += KPageSize, aPhysAddrs++)
312 START_PAGING_BENCHMARK;
314 MmuLock::Lock(); // MmuLock required for atomic access to manager data.
315 TUint swapData = aMemory->PagingManagerData(index);
317 if (!(swapData & EAllocated))
318 {// This page is not committed to the memory object
322 if (swapData & EUninitialised)
323 {// This page has not been written to yet so don't read from swap
324 // just wipe it if required.
328 memset((TAny*)aLinAddr, wipeByte, KPageSize);
333 __NK_ASSERT_DEBUG(swapData & ESaved);
334 TUint swapIndex = swapData >> ESwapIndexShift;
335 // OK to release as if the object's data is decommitted the pager
336 // will check that data is still valid before mapping it.
338 TUint readStart = (swapIndex << KPageShift) >> readUnitShift;
339 START_PAGING_BENCHMARK;
340 r = iDevice->Read(msg, aLinAddr, readStart, readSize, DPagingDevice::EDriveDataPaging);
342 __KTRACE_OPT(KPANIC, Kern::Printf("DSwapManager::ReadSwapPages: error reading media at %08x + %x: %d", readStart << readUnitShift, readSize << readUnitShift, r));
343 __NK_ASSERT_DEBUG(r!=KErrNoMemory); // not allowed to allocate memory, therefore can't fail with KErrNoMemory
344 END_PAGING_BENCHMARK(EPagingBmReadDataMedia);
345 // TODO: Work out what to do if page in fails, unmap all pages????
346 __NK_ASSERT_ALWAYS(r == KErrNone);
348 END_PAGING_BENCHMARK(EPagingBmReadDataPage);
356 Write the specified memory object's pages from the RAM into the swap.
358 @param aMemory The memory object who owns the pages.
359 @param aIndex The index within the memory object.
360 @param aCount The number of pages to write out.
361 @param aLinAddr The location of the pages to write out.
362 @param aRequest The demand paging request to use.
365 TInt DSwapManager::WriteSwapPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TLinAddr aLinAddr, DPageWriteRequest* aRequest)
366 {// The RamAllocLock prevents the object's swap pages being reassigned.
367 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
369 // Write the page out to the swap.
371 const TUint readUnitShift = iDevice->iReadUnitShift;
372 TUint writeSize = KPageSize >> readUnitShift;
373 TThreadMessage* msg = const_cast<TThreadMessage*>(&aRequest->iMessage);
375 const TUint indexEnd = aIndex + aCount;
376 for (TUint index = aIndex; index < indexEnd; index++)
378 START_PAGING_BENCHMARK;
381 TUint swapData = aMemory->PagingManagerData(index);
382 // OK to release as ram alloc lock prevents manager data being updated.
384 if (!(swapData & EAllocated))
385 {// This page is being decommited from aMemory so it is clean/unrequired.
388 TInt swapIndex = swapData >> ESwapIndexShift;
389 if (swapData & ESaved)
390 {// An old version of this page has been saved to swap so free it now
391 // as it will be out of date.
392 iBitMap->Free(swapIndex);
393 DoDeleteNotify(swapIndex);
395 // Get a new swap location for this page.
396 swapIndex = iBitMap->AllocFrom(iAllocOffset);
397 __NK_ASSERT_DEBUG(swapIndex != -1 && swapIndex < iBitMap->iSize);
398 iAllocOffset = swapIndex + 1;
399 if (iAllocOffset == (TUint)iBitMap->iSize)
402 TUint writeOffset = (swapIndex << KPageShift) >> readUnitShift;
404 START_PAGING_BENCHMARK;
405 r = iDevice->Write(msg, aLinAddr, writeOffset, writeSize, EFalse);
407 __KTRACE_OPT(KPANIC, Kern::Printf("DSwapManager::WriteSwapPages: error writing media at %08x + %x: %d", writeOffset << readUnitShift, writeSize << readUnitShift, r));
408 __NK_ASSERT_DEBUG(r!=KErrNoMemory); // not allowed to allocate memory, therefore can't fail with KErrNoMemory
409 END_PAGING_BENCHMARK(EPagingBmWriteDataMedia);
411 // TODO: Work out what to do if page out fails.
412 __NK_ASSERT_ALWAYS(r == KErrNone);
414 // The swap data should not have been modified.
415 __NK_ASSERT_DEBUG(swapData == aMemory->PagingManagerData(index));
416 // Store the new swap location and mark the page as saved.
417 swapData &= ~(EUninitialised | ESwapIndexMask);
418 swapData |= (swapIndex << ESwapIndexShift) | ESaved;
419 aMemory->SetPagingManagerData(index, swapData);
422 END_PAGING_BENCHMARK(EPagingBmWriteDataPage);
430 Notify the media driver that the page written to swap is no longer required.
432 void DSwapManager::DoDeleteNotify(TUint aSwapIndex)
434 // Ram Alloc lock prevents the swap location being assigned to another page.
435 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
437 #ifdef __PAGING_DELETE_NOTIFY_ENABLED
438 const TUint readUnitShift = iDevice->iReadUnitShift;
439 const TUint size = KPageSize >> readUnitShift;
440 TUint offset = (aSwapIndex << KPageShift) >> readUnitShift;
442 START_PAGING_BENCHMARK;
443 // Ignore the return value as this is just an optimisation that is not supported on all media.
444 (void)iDevice->DeleteNotify(&iDelNotifyMsg, offset, size);
445 END_PAGING_BENCHMARK(EPagingBmDeleteNotifyDataPage);
450 // Check swap thresholds and notify (see K::CheckFreeMemoryLevel)
451 void DSwapManager::CheckSwapThresholds(TUint aInitial, TUint aFinal)
454 if (aFinal < iSwapThesholdLow && aInitial >= iSwapThesholdLow)
455 changes |= (EChangesFreeMemory | EChangesLowMemory);
456 if (aFinal >= iSwapThesholdGood && aInitial < iSwapThesholdGood)
457 changes |= EChangesFreeMemory;
459 Kern::AsyncNotifyChanges(changes);
463 void DSwapManager::GetSwapInfo(SVMSwapInfo& aInfoOut)
465 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
466 aInfoOut.iSwapSize = iBitMap->iSize << KPageShift;
467 aInfoOut.iSwapFree = iBitMapFree << KPageShift;
471 TInt DSwapManager::SetSwapThresholds(const SVMSwapThresholds& aThresholds)
473 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
474 if (aThresholds.iLowThreshold > aThresholds.iGoodThreshold)
476 TInt low = (aThresholds.iLowThreshold + KPageSize - 1) >> KPageShift;
477 TInt good = (aThresholds.iGoodThreshold + KPageSize - 1) >> KPageShift;
478 if (good > iBitMap->iSize)
480 iSwapThesholdLow = low;
481 iSwapThesholdGood = good;
487 TInt DDataPagedMemoryManager::InstallPagingDevice(DPagingDevice* aDevice)
489 TRACEB(("DDataPagedMemoryManager::InstallPagingDevice(0x%08x)",aDevice));
491 TUint dataPolicy = TheSuperPage().KernelConfigFlags() & EKernelConfigDataPagingPolicyMask;
492 TRACEB(("Data Paging Policy = %d", dataPolicy >> EKernelConfigDataPagingPolicyShift));
493 if (dataPolicy == EKernelConfigDataPagingPolicyNoPaging)
494 {// No paging allowed so don't register the device.
498 // Store the device, blocking any other devices from installing.
499 if (!NKern::CompareAndSwap((TAny*&)iDevice, (TAny*)NULL, (TAny*)aDevice))
500 {// Data paging device already installed.
501 __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("**** Attempt to install more than one data paging device !!!!!!!! ****"));
502 return KErrAlreadyExists;
505 // Now we can determine the size of the swap, create the swap manager.
506 iSwapManager = new DSwapManager;
507 __NK_ASSERT_ALWAYS(iSwapManager);
509 TInt r = iSwapManager->Create(iDevice);
511 {// Couldn't create the swap manager.
514 NKern::SafeSwap(NULL, (TAny*&)iDevice);
517 NKern::LockedSetClear(K::MemModelAttributes, 0, EMemModelAttrDataPaging);
523 TInt DDataPagedMemoryManager::AcquirePageReadRequest(DPageReadRequest*& aRequest, DMemoryObject* aMemory, TUint aIndex, TUint aCount)
525 aRequest = iDevice->iRequestPool->AcquirePageReadRequest(aMemory,aIndex,aCount);
530 TInt DDataPagedMemoryManager::AcquirePageWriteRequest(DPageWriteRequest*& aRequest, DMemoryObject* aMemory, TUint aIndex, TUint aCount)
532 aRequest = iDevice->iRequestPool->AcquirePageWriteRequest(aMemory,aIndex,aCount);
537 void DDataPagedMemoryManager::Init3()
542 TInt DDataPagedMemoryManager::Alloc(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
544 __NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
546 // re-initialise any decommitted pages which we may still own because they were pinned...
547 ReAllocDecommitted(aMemory,aIndex,aCount);
549 // Reserve the swap pages required.
550 RamAllocLock::Lock();
551 TInt r = iSwapManager->ReserveSwap(aMemory, aIndex, aCount);
552 RamAllocLock::Unlock();
558 void DDataPagedMemoryManager::Free(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
560 TRACE2(("DDataPagedMemoryManager::Free(0x%08x,0x%x,0x%x)", aMemory, aIndex, aCount));
561 __NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
563 // Unreserve the swap pages associated with the memory object. Do this before
564 // removing the page array entries to prevent a page fault reallocating these pages.
565 RamAllocLock::Lock();
566 TInt freed = iSwapManager->UnreserveSwap(aMemory, aIndex, aCount);
568 RamAllocLock::Unlock();
570 DoFree(aMemory,aIndex,aCount);
575 @copydoc DMemoryManager::Wipe
576 @todo Not yet implemented.
577 Need to handle this smartly, e.g. throw RAM away and set to uninitialised
579 TInt DDataPagedMemoryManager::Wipe(DMemoryObject* aMemory)
581 __NK_ASSERT_ALWAYS(0); // not implemented yet
583 return KErrNotSupported;
587 TInt DDataPagedMemoryManager::ReadPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages, DPageReadRequest* aRequest)
589 __NK_ASSERT_DEBUG(aRequest->CheckUse(aMemory,aIndex,aCount));
591 // Map pages temporarily so that we can copy into them.
592 const TLinAddr linAddr = aRequest->MapPages(aIndex, aCount, aPages);
594 TInt r = iSwapManager->ReadSwapPages(aMemory, aIndex, aCount, linAddr, aRequest, aPages);
596 // The memory object allows executable mappings then need IMB.
597 aRequest->UnmapPages(aMemory->IsExecutable());
603 TInt DDataPagedMemoryManager::WritePages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages, DPageWriteRequest* aRequest)
605 __NK_ASSERT_DEBUG(aRequest->CheckUse(aMemory,aIndex,aCount));
607 // Map pages temporarily so that we can copy into them.
608 const TLinAddr linAddr = aRequest->MapPages(aIndex, aCount, aPages);
610 TInt r = iSwapManager->WriteSwapPages(aMemory, aIndex, aCount, linAddr, aRequest);
612 // The memory object allows executable mappings then need IMB.
613 aRequest->UnmapPages(aMemory->IsExecutable());
619 TInt DDataPagedMemoryManager::CleanPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TPhysAddr*& aPageArrayEntry)
621 if(aPageInfo->IsDirty()==false)
624 // shouldn't be asked to clean a page which is writable...
625 __NK_ASSERT_DEBUG(aPageInfo->IsWritable()==false);
627 // mark page as being modified by us...
628 TUint modifierInstance; // dummy variable used only for it's storage address on the stack
629 aPageInfo->SetModifier(&modifierInstance);
631 // get info about page...
632 TUint index = aPageInfo->Index();
633 TPhysAddr physAddr = aPageInfo->PhysAddr();
635 // Release the mmu lock while we write out the page. This is safe as the
636 // RamAllocLock stops the physical address being freed from this object.
639 // get paging request object...
640 DPageWriteRequest* req;
641 TInt r = AcquirePageWriteRequest(req, aMemory, index, 1);
642 __NK_ASSERT_DEBUG(r==KErrNone); // we should always get a write request because the previous function blocks until it gets one
643 __NK_ASSERT_DEBUG(req); // we should always get a write request because the previous function blocks until it gets one
645 r = WritePages(aMemory, index, 1, &physAddr, req);
654 // check if page is clean...
655 if(aPageInfo->CheckModified(&modifierInstance) || aPageInfo->IsWritable())
657 // someone else modified the page, or it became writable, so fail...
662 // page is now clean!
663 ThePager.SetClean(*aPageInfo);
670 TBool DDataPagedMemoryManager::IsAllocated(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
671 {// MmuLock required to protect manager data.
672 // DPagedMemoryManager::DoPageInDone() won't allow MmuLock to be released
673 // so can only cope with a maximum of KMaxPagesInOneGo.
674 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
675 __NK_ASSERT_DEBUG(aCount <= KMaxPagesInOneGo);
677 return iSwapManager->IsReserved(aMemory, aIndex, aCount);
681 void DDataPagedMemoryManager::GetSwapInfo(SVMSwapInfo& aInfoOut)
683 NKern::ThreadEnterCS();
684 RamAllocLock::Lock();
685 iSwapManager->GetSwapInfo(aInfoOut);
686 RamAllocLock::Unlock();
687 NKern::ThreadLeaveCS();
691 TInt DDataPagedMemoryManager::SetSwapThresholds(const SVMSwapThresholds& aThresholds)
693 NKern::ThreadEnterCS();
694 RamAllocLock::Lock();
695 TInt r = iSwapManager->SetSwapThresholds(aThresholds);
696 RamAllocLock::Unlock();
697 NKern::ThreadLeaveCS();
702 void GetSwapInfo(SVMSwapInfo& aInfoOut)
704 ((DDataPagedMemoryManager*)TheDataPagedMemoryManager)->GetSwapInfo(aInfoOut);
708 TInt SetSwapThresholds(const SVMSwapThresholds& aThresholds)
710 return ((DDataPagedMemoryManager*)TheDataPagedMemoryManager)->SetSwapThresholds(aThresholds);