Update contrib.
1 // Copyright (c) 2005-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
25 class DMemModelThread;
26 class DMemoryMappingBase;
35 FORCE_INLINE TUint NumberOfFreePages()
37 return iNumberOfFreePages;
40 FORCE_INLINE TUint NumberOfDirtyPages()
44 ret = iNumberOfDirtyPages;
49 FORCE_INLINE void SetWritable(SPageInfo& aPageInfo)
51 if (!aPageInfo.IsDirty())
52 {// This is the first mapping to write to the page so increase the
54 aPageInfo.SetWritable();
55 iNumberOfDirtyPages++;
59 FORCE_INLINE void SetClean(SPageInfo& aPageInfo)
61 __NK_ASSERT_DEBUG(iNumberOfDirtyPages);
62 __NK_ASSERT_DEBUG(aPageInfo.IsDirty());
64 iNumberOfDirtyPages--;
68 Remove RAM pages from the cache and return them to the system's free pool.
71 This is called by class Mmu when it requires more free RAM to meet an
74 @param aNumPages The number of pages to free up.
75 @return True if all pages could be freed, false otherwise
76 @pre RamAlloc mutex held.
78 TBool GetFreePages(TInt aNumPages);
82 Attempts to rejuvenate or page in the page to the mapping that took the page fault.
84 @param aPc Address of instruction causing the fault.
85 @param aFaultAddress Address of memory access which faulted.
86 @param aFaultAsid The asid of the faulting thread's process.
87 @param aAccessPermissions Bitmask of values from enum TAccessPermissions, which
88 indicates the permissions required by faulting memory access.
89 @param aMapInstanceCount The instance count of the mapping when it took the page fault.
90 @param aThread The thread that took the page fault.
91 @param aExceptionInfo The processor specific exception info.
93 @return KErrNone if the page is now accessable, otherwise one of the system wide error codes.
95 TInt HandlePageFault( TLinAddr aPc, TLinAddr aFaultAddress, TUint aFaultAsid, TUint aFaultIndex,
96 TUint aAccessPermissions, DMemoryObject* aMemory, DMemoryMapping* aMapping,
97 TUint aMapInstanceCount, DThread* aThread, TAny* aExceptionInfo);
110 static void Fault(TFault aFault);
113 Get state of live page list.
115 void GetLiveListInfo(SVMCacheInfo& aInfo);
118 Resize the live page list.
120 TInt ResizeLiveList(TUint aMinimumPageCount, TUint aMaximumPageCount);
123 Recalculate live list size.
125 TInt ResizeLiveList();
128 Flush (unmap) all memory which is demand paged.
129 This reduces the live page list to a minimum.
134 Give pages to paging system for managing.
136 void DonatePages(TUint aCount, TPhysAddr* aPages);
139 Reclaim pages from paging system which were previously donated with DonatePages.
141 @param aCount Number of pages.
142 @param aPages Array of pages (as stored in an RPageArray).
144 @return KErrNone if successful.
145 KErrNoMemory if paging system doesn't have enough spare pages. This will leave some or all of the pages still managed by the pager.
146 KErrNotFound if some of the pages were not actually being managed by the pager.
148 TInt ReclaimPages(TUint aCount, TPhysAddr* aPages);
151 Called by class Mmu whenever a page of RAM is freed. The page state will be EUnused.
152 If the page was being used by the pager then this gives it the opportunity to update
153 any internal state. If the pager wishes to retain ownership of the page the it must
154 return the result KErrNone, any other value will cause the page to be returned to the
157 TInt PageFreed(SPageInfo* aPageInfo);
160 // following public members for use by memory managers...
164 Allocate a number of RAM pages to store demand paged content.
165 These pages are obtained from...
167 1. An unused page in the live page list.
168 2. The systems free pool.
169 3. The oldest page from the live page list.
171 TInt PageInAllocPages(TPhysAddr* aPages, TUint aCount, Mmu::TRamAllocFlags aAllocFlags);
174 Free a number of RAM pages allocated by PageInAllocPages.
176 void PageInFreePages(TPhysAddr* aPages, TUint aCount);
179 Called to add a new page to the live list after a fault has occurred.
181 @param aPageInfo The page.
184 @post MmuLock held (but may have been released by this function)
186 void PagedIn(SPageInfo* aPageInfo);
189 @param aPageInfo The page.
190 @param aPinArgs Owner of a replacement page which will be used to substitute for the pinned page.
193 @post MmuLock held (but may have been released by this function)
195 void PagedInPinned(SPageInfo* aPageInfo, TPinArgs& aPinArgs);
199 @post MmuLock left unchanged.
201 void PagedInUnneeded(SPageInfo* aPageInfo);
204 @param aPageInfo The page to unpin.
205 @param aPinArgs The resources used for pinning. The replacement pages allocated
206 to this will be increased for each page which was became completely
210 @post MmuLock held (but may have been released by this function)
212 void Unpin(SPageInfo* aPageInfo, TPinArgs& aPinArgs);
215 @param aPageInfo The page to pin. Must be page being demand paged.
216 @param aPinArgs Owner of a replacement page which will be used to substitute for the pinned page.
219 @post MmuLock held (but may have been released by this function)
221 void Pin(SPageInfo* aPageInfo, TPinArgs& aPinArgs);
226 @post MmuLock held (but may have been released by this function)
228 void RejuvenatePageTable(TPte* aPt);
232 TBool ReservePages(TUint aRequiredCount, TUint& aCount);
236 void UnreservePages(TUint& aCount);
239 Enumeration of instrumented paging events which only require the
240 SPageInfo object as an argument.
246 EEventPageInUnneeded,
256 EEventPagePageTableAlloc
260 Signal the occurrence of an event of type TEventSimple.
262 void Event(TEventSimple aEvent, SPageInfo* aPageInfo);
265 Enumeration of instrumented paging events which require the faulting address
266 and program counter as arguments.
268 enum TEventWithAddresses
275 Signal the occurrence of an event of type TEventWithAddresses.
277 void Event(TEventWithAddresses aEvent, SPageInfo* aPageInfo, TLinAddr aPc, TLinAddr aFaultAddress, TUint aAccessPermissions);
280 Get the pager's event info data.
282 void GetEventInfo(SVMEventInfo& aInfoOut);
285 Reset the pager's event info data.
287 void ResetEventInfo();
290 Attempt to discard the specified page.
292 @param aOldPageInfo The page info of the page to discard.
293 @param aBlockZoneId The ID of the RAM zone not to allocate any required new page into.
294 @param aBlockRest Set to ETrue when we don't want the allocator to search for new pages if the RAM
295 zone with ID==aBlockZoneId is encountered, i.e. a general RAM defrag operation.
297 TInt DiscardPage(SPageInfo* aOldPageInfo, TUint aBlockZoneId, TBool aBlockRest);
301 Update any live list links to replace the old page with the new page.
302 This is to be used when a page has been moved.
304 @param aOldPageInfo The page info of the page to replace.
305 @param aNewPageInfo The page info of the page to be used instead of the old page.
307 void ReplacePage(SPageInfo& aOldPageInfo, SPageInfo& aNewPageInfo);
311 // following public members for use by TPinArgs...
316 TBool AllocPinReplacementPages(TUint aNumPages);
320 void FreePinReplacementPages(TUint aNumPages);
324 Add a page to the head of the live page list. I.e. make it the 'youngest' page.
327 @post MmuLock left unchanged.
329 void AddAsYoungestPage(SPageInfo* aPageInfo);
332 Mark a page as type EUnused and add it to the end of the live page list.
333 I.e. make it the 'oldest' page, so that it is the first page to be reused.
336 @post MmuLock left unchanged.
338 void AddAsFreePage(SPageInfo* aPageInfo);
341 Remove a page from live page list.
342 It paged state is set to EUnpaged.
345 @post MmuLock left unchanged.
347 void RemovePage(SPageInfo* aPageInfo);
350 Remove the oldest page from the live page list and perform #StealPage.
353 @post MmuLock left unchanged.
355 SPageInfo* StealOldestPage();
358 Steal a page from the memory object (if any) which is using the page.
359 If successful the returned page will be in the EUnknown state and the
360 cache state for the page is indeterminate. This is the same state as
361 if the page had been allocated by Mmu::AllocRam.
363 @pre RamAlloc mutex held
365 @post MmuLock held (but may have been released by this function)
367 TInt StealPage(SPageInfo* aPageInfo);
370 Restrict the access permissions for a page.
372 @param aPageInfo The page.
373 @param aRestriction The restriction type to apply.
375 TInt RestrictPage(SPageInfo* aPageInfo, TRestrictPagesType aRestriction);
378 Get a RAM page from the system's free pool and add it to the live list as a free page.
380 @return False if out of memory;
381 true otherwise, though new free page may still have already been used.
384 @post MmuLock held (but may have been released by this function)
386 TBool TryGrowLiveList();
389 Get a RAM page from the system's free pool.
391 @pre RamAllocLock held.
393 @return The page or NULL if no page is available.
395 SPageInfo* GetPageFromSystem(Mmu::TRamAllocFlags aAllocFlags, TUint aBlockZoneId=KRamZoneInvalidId, TBool aBlockRest=EFalse);
398 Put a page back on the system's free pool.
400 @pre RamAllocLock held.
402 void ReturnPageToSystem();
405 Put a specific page back on the system's free pool.
407 @pre RamAllocLock held.
409 void ReturnPageToSystem(SPageInfo& aPageInfo);
412 Allocate a RAM page to store demand paged content.
413 This tries to obtain a RAM from the following places:
414 1. An unused page in the live page list.
415 2. The systems free pool.
416 3. The oldest page from the live page list.
418 SPageInfo* PageInAllocPage(Mmu::TRamAllocFlags aAllocFlags);
421 If the number of young pages exceeds that specified by iYoungOldRatio then a
422 single page is made 'old'. Call this after adding a new 'young' page.
425 @post MmuLock held (but may have been released by this function)
430 If HaveTooManyPages() then return them to the system.
432 void RemoveExcessPages();
435 @return True if pager has too many pages, false otherwise.
437 TBool HaveTooManyPages();
440 @return True if pager has its maximum number of pages, false otherwise.
442 TBool HaveMaximumPages();
445 Attempt to rejuvenate a page in which a page fault occurred.
447 @param aOsAsid Address space ID in which fault occurred.
448 @param aAddress Address of memory access which faulted.
449 @param aAccessPermissions Bitmask of values from enum TAccessPermissions, which
450 indicates the permissions required by faulting memory access.
451 @param aPc Address of instruction causing the fault. (Used for tracing.)
452 @param aMapping The mapping that maps the page that took the fault.
453 @param aMapInstanceCount The instance count of the mappig when the page fault occurred.
454 @param aThread The thread that took the page fault.
455 @param aExceptionInfo The processor specific exception info.
457 @return KErrNone if the page was remapped, KErrAbort if the mapping has be reused or detached,
458 KErrNotFound if it may be possible to page in the page.
460 TInt TryRejuvenate( TInt aOsAsid, TLinAddr aAddress, TUint aAccessPermissions, TLinAddr aPc,
461 DMemoryMappingBase* aMapping, TUint aMapInstanceCount, DThread* aThread,
462 TAny* aExceptionInfo);
465 Reserve one page for guaranteed locking use.
466 Increments iReservePageCount if successful.
468 @return True if operation was successful.
473 Called when a realtime thread takes a paging fault.
474 Checks whether it's OK for the thread to take to fault.
475 @return KErrNone if the paging fault should be further processed
477 TInt CheckRealtimeThreadFault(DThread* aThread, TAny* aExceptionInfo);
480 Attempt to find the page table entry and page info for a page in the specified mapping.
482 @param aOsAsid The OsAsid of the process that owns the mapping.
483 @param aAddress The linear address of the page.
484 @param aMapping The mapping that maps the linear address.
485 @param aMapInstanceCount The instance count of the mapping.
486 @param[out] aPte Will return a pointer to the page table entry for the page.
487 @param[out] aPageInfo Will return a pointer to the page info for the page.
489 @return KErrNone on success, KErrAbort when the mapping is now invalid, KErrNotFound when
490 the page table or page info can't be found.
492 TInt PteAndInfoFromLinAddr( TInt aOsAsid, TLinAddr aAddress, DMemoryMappingBase* aMapping,
493 TUint aMapInstanceCount, TPte*& aPte, SPageInfo*& aPageInfo);
496 Check consistency of live list.
504 TUint iMinYoungPages; ///< Minimum number of young pages in live list required for correct functioning.
505 TUint iAbsoluteMinPageCount;///< Absolute minimum number of pages in live to meet algorithm constraints
507 TUint iMinimumPageCount; /**< Minimum size for the live page list, including locked pages */
508 TUint iMaximumPageCount; /**< Maximum size for the live page list, including locked pages */
509 TUint16 iYoungOldRatio; /**< Ratio of young to old pages in the live page list */
510 SDblQue iYoungList; /**< Head of 'young' page list. */
511 TUint iYoungCount; /**< Number of young pages */
512 SDblQue iOldList; /**< Head of 'old' page list. */
513 TUint iOldCount; /**< Number of old pages */
514 #ifdef _USE_OLDEST_LISTS
515 SDblQue iOldestCleanList; /**< Head of 'oldestClean' page list. */
516 TUint iOldestCleanCount; /**< Number of 'oldestClean' pages */
517 SDblQue iOldestDirtyList; /**< Head of 'oldestDirty' page list. */
518 TUint iOldestDirtyCount; /**< Number of 'oldestDirty' pages */
519 TUint16 iOldOldestRatio; /**< Ratio of old pages to oldest to clean and dirty in the live page list*/
521 TUint iNumberOfFreePages;
522 TUint iNumberOfDirtyPages; /**< The total number of dirty pages in the paging cache. Protected by MmuLock */
523 TUint iInitMinimumPageCount;/**< Initial value for iMinimumPageCount */
524 TUint iInitMaximumPageCount;/**< Initial value for iMaximumPageCount */
525 TUint iReservePageCount; /**< Number of pages reserved for locking */
526 TUint iMinimumPageLimit; /**< Minimum size for iMinimumPageCount, not including locked pages.
527 iMinimumPageCount >= iMinimumPageLimit + iReservePageCount */
528 SVMEventInfo iEventInfo;
530 #ifdef __DEMAND_PAGING_BENCHMARKS__
532 void RecordBenchmarkData(TPagingBenchmark aBm, TUint32 aStartTime, TUint32 aEndTime);
533 void ResetBenchmarkData(TPagingBenchmark aBm);
534 SPagingBenchmarkInfo iBenchmarkInfo[EMaxPagingBm];
535 #endif //__DEMAND_PAGING_BENCHMARKS__
538 extern DPager ThePager;
541 #ifdef __DEMAND_PAGING_BENCHMARKS__
543 #define START_PAGING_BENCHMARK TUint32 _bmStart = NKern::FastCounter()
544 #define END_PAGING_BENCHMARK(bm) ThePager.RecordBenchmarkData(bm, _bmStart, NKern::FastCounter())
548 #define START_PAGING_BENCHMARK
549 #define END_PAGING_BENCHMARK(bm)
550 #endif // __DEMAND_PAGING_BENCHMARKS__
553 FORCE_INLINE void DPager::Event(TEventSimple aEvent, SPageInfo* aPageInfo)
557 case EEventPageInNew:
558 TRACEP(("DP: %O PageIn 0x%08x",TheCurrentThread,aPageInfo->PhysAddr()));
560 BTraceContext12(BTrace::EPaging,BTrace::EPagingPageIn,aPageInfo->PhysAddr(),aPageInfo->Owner(),aPageInfo->Index());
562 ++iEventInfo.iPageInReadCount;
565 case EEventPageInAgain:
566 TRACEP(("DP: %O PageIn (again) 0x%08x",TheCurrentThread,aPageInfo->PhysAddr()));
568 BTraceContext4(BTrace::EPaging,BTrace::EPagingMapPage,aPageInfo->PhysAddr());
572 case EEventPageInUnneeded:
573 TRACEP(("DP: %O PageIn (unneeded) 0x%08x",TheCurrentThread,aPageInfo->PhysAddr()));
575 BTraceContext0(BTrace::EPaging,BTrace::EPagingPageInUnneeded);
579 case EEventPageInFree:
580 TRACEP(("DP: %O PageInFree 0x%08x",TheCurrentThread,aPageInfo->PhysAddr()));
582 BTraceContext4(BTrace::EPaging,BTrace::EPagingPageInFree,aPageInfo->PhysAddr());
587 TRACEP(("DP: %O PageOut 0x%08x",TheCurrentThread,aPageInfo->PhysAddr()));
589 BTraceContext4(BTrace::EPaging,BTrace::EPagingPageOut,aPageInfo->PhysAddr());
594 TRACEP(("DP: %O Aged 0x%08x",TheCurrentThread,aPageInfo->PhysAddr()));
595 #ifdef BTRACE_PAGING_VERBOSE
596 BTraceContext4(BTrace::EPaging,BTrace::EPagingAged,aPageInfo->PhysAddr());
601 TRACEP(("DP: %O Pin 0x%08x count=%d",TheCurrentThread,aPageInfo->PhysAddr(),aPageInfo->PinCount()));
603 BTraceContext8(BTrace::EPaging,BTrace::EPagingPageLock,aPageInfo->PhysAddr(),aPageInfo->PinCount());
607 case EEventPageUnpin:
608 TRACEP(("DP: %O Unpin 0x%08x count=%d",TheCurrentThread,aPageInfo->PhysAddr(),aPageInfo->PinCount()));
610 BTraceContext8(BTrace::EPaging,BTrace::EPagingPageUnlock,aPageInfo->PhysAddr(),aPageInfo->PinCount());
614 case EEventPageDonate:
615 TRACEP(("DP: %O Donate 0x%08x",TheCurrentThread,aPageInfo->PhysAddr()));
617 BTraceContext12(BTrace::EPaging,BTrace::EPagingDonatePage,aPageInfo->PhysAddr(),aPageInfo->Owner(),aPageInfo->Index());
621 case EEventPageReclaim:
622 TRACEP(("DP: %O Reclaim 0x%08x",TheCurrentThread,aPageInfo->PhysAddr()));
624 BTraceContext4(BTrace::EPaging,BTrace::EPagingReclaimPage,aPageInfo->PhysAddr());
628 case EEventPageAgedClean:
629 TRACEP(("DP: %O AgedClean 0x%08x",TheCurrentThread,aPageInfo->PhysAddr()));
630 #ifdef BTRACE_PAGING_VERBOSE
631 BTraceContext4(BTrace::EPaging,BTrace::EPagingAgedClean,aPageInfo->PhysAddr());
635 case EEventPageAgedDirty:
636 TRACEP(("DP: %O AgedDirty 0x%08x",TheCurrentThread,aPageInfo->PhysAddr()));
637 #ifdef BTRACE_PAGING_VERBOSE
638 BTraceContext4(BTrace::EPaging,BTrace::EPagingAgedDirty,aPageInfo->PhysAddr());
642 case EEventPagePageTableAlloc:
643 TRACEP(("DP: %O PageTableAlloc 0x%08x",TheCurrentThread,aPageInfo->PhysAddr()));
645 BTraceContext4(BTrace::EPaging,BTrace::EPagingPageTableAlloc,aPageInfo->PhysAddr());
650 __NK_ASSERT_DEBUG(0);
657 FORCE_INLINE void DPager::Event(TEventWithAddresses aEvent, SPageInfo* aPageInfo, TLinAddr aPc, TLinAddr aFaultAddress, TUint aAccessPermissions)
661 case EEventPageInStart:
662 TRACEP(("DP: %O HandlePageFault 0x%08x 0x%08x %d",TheCurrentThread,aFaultAddress,aPc,aAccessPermissions));
664 BTraceContext12(BTrace::EPaging,BTrace::EPagingPageInBegin,aFaultAddress,aPc,aAccessPermissions);
666 ++iEventInfo.iPageFaultCount;
669 case EEventPageRejuvenate:
670 TRACEP(("DP: %O Rejuvenate 0x%08x 0x%08x 0x%08x %d",TheCurrentThread,aPageInfo->PhysAddr(),aFaultAddress,aPc,aAccessPermissions));
672 BTraceContext12(BTrace::EPaging,BTrace::EPagingRejuvenate,aPageInfo->PhysAddr(),aFaultAddress,aPc);
674 ++iEventInfo.iPageFaultCount;
678 __NK_ASSERT_DEBUG(0);
686 Multiplier for number of request objects in pool per drive that supports paging.
688 const TInt KPagingRequestsPerDevice = 2;
691 class DPagingRequest;
692 class DPageReadRequest;
693 class DPageWriteRequest;
696 A pool of paging requests for use by a single paging device.
698 class DPagingRequestPool : public DBase
701 DPagingRequestPool(TUint aNumPageReadRequest,TUint aNumPageWriteRequest);
702 DPageReadRequest* AcquirePageReadRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
703 DPageWriteRequest* AcquirePageWriteRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
705 ~DPagingRequestPool();
710 TGroup(TUint aNumRequests);
711 DPagingRequest* FindCollision(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
712 DPagingRequest* GetRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
713 void Signal(DPagingRequest* aRequest);
716 DPagingRequest** iRequests;
719 TGroup iPageReadRequests;
720 TGroup iPageWriteRequests;
722 friend class DPagingRequest;
723 friend class DPageReadRequest;
724 friend class DPageWriteRequest;
729 Resources needed to service a paging request.
731 class DPagingRequest : public SDblQueLink
734 DPagingRequest(DPagingRequestPool::TGroup& aPoolGroup);
738 void SetUse(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
739 TBool CheckUse(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
740 TBool IsCollision(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
741 TLinAddr MapPages(TUint aColour, TUint aCount, TPhysAddr* aPages);
742 void UnmapPages(TBool aIMBRequired);
744 TThreadMessage iMessage; /**< Used by the media driver to queue requests */
745 DMutex* iMutex; /**< A mutex for synchronisation and priority inheritance. */
746 TInt iUsageCount;/**< How many threads are using or waiting for this object. */
747 TLinAddr iBuffer; /**< A buffer to read compressed data into. Size is EMaxPages+1 pages.*/
749 Mmu::TTempMapping iTempMapping;
751 DPagingRequestPool::TGroup& iPoolGroup;
752 // used to identify memory request is used for...
753 DMemoryObject* iUseRegionMemory;
754 TUint iUseRegionIndex;
755 TUint iUseRegionCount;
760 Resources needed to service a page in request.
762 class DPageReadRequest : public DPagingRequest
765 inline DPageReadRequest(DPagingRequestPool::TGroup& aPoolGroup)
766 : DPagingRequest(aPoolGroup)
773 static TUint ReservedPagesRequired();
775 ~DPageReadRequest(); // can't delete
777 TLinAddr iBuffer; /**< A buffer to read compressed data into. Size is EMaxPages+1 pages.*/
779 DMemoryObject* iMemory;
781 static TInt iAllocNext;
785 FORCE_INLINE TUint DPageReadRequest::ReservedPagesRequired()
787 return iAllocNext*EMaxPages;
792 Resources needed to service a page out request.
794 class DPageWriteRequest : public DPagingRequest
797 inline DPageWriteRequest(DPagingRequestPool::TGroup& aPoolGroup)
798 : DPagingRequest(aPoolGroup)
806 ~DPageWriteRequest(); // can't delete
808 static TInt iAllocNext;