os/kernelhwsrv/kernel/eka/memmodel/epoc/flexible/mmu/mpager.h
author sl
Tue, 10 Jun 2014 14:32:02 +0200
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
     1 // Copyright (c) 2005-2009 Nokia Corporation and/or its subsidiary(-ies).
     2 // All rights reserved.
     3 // This component and the accompanying materials are made available
     4 // under the terms of the License "Eclipse Public License v1.0"
     5 // which accompanies this distribution, and is available
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
     7 //
     8 // Initial Contributors:
     9 // Nokia Corporation - initial contribution.
    10 //
    11 // Contributors:
    12 //
    13 // Description:
    14 //
    15 
    16 /**
    17  @file
    18  @internalComponent
    19 */
    20 
    21 #ifndef MPAGER_H
    22 #define MPAGER_H
    23 
    24 struct SVMCacheInfo;
    25 class DMemModelThread;
    26 class DMemoryMappingBase;
    27 
    28 class DPager
    29 	{
    30 public:
    31 	DPager();
    32 	void Init2();
    33 	void Init3();
    34 
    35 	FORCE_INLINE TUint NumberOfFreePages()
    36 		{
    37 		return iNumberOfFreePages;
    38 		}
    39 
    40 	FORCE_INLINE TUint NumberOfDirtyPages()
    41 		{
    42 		TUint ret;
    43 		MmuLock::Lock();
    44 		ret = iNumberOfDirtyPages;
    45 		MmuLock::Unlock();
    46 		return ret;
    47 		}
    48 	
    49 	FORCE_INLINE void SetWritable(SPageInfo& aPageInfo)
    50 		{
    51 		if (!aPageInfo.IsDirty())
    52 			{// This is the first mapping to write to the page so increase the 
    53 			// dirty page count.
    54 			aPageInfo.SetWritable();
    55 			iNumberOfDirtyPages++;
    56 			}
    57 		}
    58 	
    59 	FORCE_INLINE void SetClean(SPageInfo& aPageInfo)
    60 		{
    61 		__NK_ASSERT_DEBUG(iNumberOfDirtyPages);
    62 		__NK_ASSERT_DEBUG(aPageInfo.IsDirty());
    63 		aPageInfo.SetClean();
    64 		iNumberOfDirtyPages--;
    65 		}
    66 
    67 	/**
    68 	Remove RAM pages from the cache and return them to the system's free pool.
    69 	(Free them.)
    70 
    71 	This is called by class Mmu when it requires more free RAM to meet an
    72 	allocation request.
    73 
    74 	@param	aNumPages The number of pages to free up.
    75 	@return	True if all pages could be freed, false otherwise
    76 	@pre	RamAlloc mutex held.
    77 	*/
    78 	TBool GetFreePages(TInt aNumPages);
    79 
    80 
    81 	/**
    82 	Attempts to rejuvenate or page in the page to the mapping that took the page fault.
    83 
    84 	@param aPc					Address of instruction causing the fault.
    85 	@param aFaultAddress		Address of memory access which faulted.
    86 	@param aFaultAsid			The asid of the faulting thread's process.
    87 	@param aAccessPermissions	Bitmask of values from enum TAccessPermissions, which
    88 								indicates the permissions required by faulting memory access.
    89 	@param aMapInstanceCount	The instance count of the mapping when it took the page fault.
    90 	@param aThread				The thread that took the page fault.
    91 	@param aExceptionInfo		The processor specific exception info.
    92 	
    93 	@return KErrNone if the page is now accessable, otherwise one of the system wide error codes.
    94 	*/
    95 	TInt HandlePageFault(	TLinAddr aPc, TLinAddr aFaultAddress, TUint aFaultAsid, TUint aFaultIndex,
    96 							TUint aAccessPermissions, DMemoryObject* aMemory, DMemoryMapping* aMapping,
    97 							TUint aMapInstanceCount, DThread* aThread, TAny* aExceptionInfo);
    98 
    99 
   100 	/**
   101 	Fault enumeration
   102 	*/
   103 	enum TFault
   104 		{
   105 		};
   106 
   107 	/**
   108 	Fault the system.
   109 	*/
   110 	static void Fault(TFault aFault);
   111 
   112 	/**
   113 	Get state of live page list.
   114 	*/
   115 	void GetLiveListInfo(SVMCacheInfo& aInfo);
   116 
   117 	/**
   118 	Resize the live page list.
   119 	*/
   120 	TInt ResizeLiveList(TUint aMinimumPageCount, TUint aMaximumPageCount);
   121 
   122 	/**
   123 	Recalculate live list size.
   124 	*/
   125 	TInt ResizeLiveList();
   126 
   127 	/**
   128 	Flush (unmap) all memory which is demand paged.
   129 	This reduces the live page list to a minimum.
   130 	*/
   131 	void FlushAll();
   132 
   133 	/**
   134 	Give pages to paging system for managing.
   135 	*/
   136 	void DonatePages(TUint aCount, TPhysAddr* aPages);
   137 
   138 	/**
   139 	Reclaim pages from paging system which were previously donated with DonatePages.
   140 
   141 	@param aCount Number of pages.
   142 	@param aPages Array of pages (as stored in an RPageArray).
   143 
   144 	@return KErrNone if successful.
   145 			KErrNoMemory if paging system doesn't have enough spare pages. This will leave some or all of the pages still managed by the pager.
   146 			KErrNotFound if some of the pages were not actually being managed by the pager.
   147 	*/
   148 	TInt ReclaimPages(TUint aCount, TPhysAddr* aPages);
   149 
   150 	/**
   151 	Called by class Mmu whenever a page of RAM is freed. The page state will be EUnused.
   152 	If the page was being used by the pager then this gives it the opportunity to update
   153 	any internal state. If the pager wishes to retain ownership of the page the it must
   154 	return the result KErrNone, any other value will cause the page to be returned to the
   155 	systems free pool.
   156 	*/
   157 	TInt PageFreed(SPageInfo* aPageInfo);
   158 
   159 	//
   160 	// following public members for use by memory managers...
   161 	//
   162 
   163 	/**
   164 	Allocate a number of RAM pages to store demand paged content.
   165 	These pages are obtained from...
   166 
   167 	1. An unused page in the live page list.
   168 	2. The systems free pool.
   169 	3. The oldest page from the live page list.
   170 	*/
   171 	TInt PageInAllocPages(TPhysAddr* aPages, TUint aCount, Mmu::TRamAllocFlags aAllocFlags);
   172 
   173 	/**
   174 	Free a number of RAM pages allocated by PageInAllocPages.
   175 	*/
   176 	void PageInFreePages(TPhysAddr* aPages, TUint aCount);
   177 
   178 	/**
   179 	Called to add a new page to the live list after a fault has occurred.
   180 
   181 	@param aPageInfo		The page.
   182 
   183 	@pre MmuLock held
   184 	@post MmuLock held (but may have been released by this function)
   185 	*/
   186 	void PagedIn(SPageInfo* aPageInfo);
   187 
   188 	/**
   189 	@param aPageInfo		The page.
   190 	@param aPinArgs			Owner of a replacement page which will be used to substitute for the pinned page.
   191 
   192 	@pre MmuLock held
   193 	@post MmuLock held (but may have been released by this function)
   194 	*/
   195 	void PagedInPinned(SPageInfo* aPageInfo, TPinArgs& aPinArgs);
   196 
   197 	/**
   198 	@pre MmuLock held
   199 	@post MmuLock left unchanged.
   200 	*/
   201 	void PagedInUnneeded(SPageInfo* aPageInfo);
   202 
   203 	/**
   204 	@param aPageInfo		The page to unpin.
   205 	@param aPinArgs			The resources used for pinning. The replacement pages allocated
   206 							to this will be increased for each page which was became completely
   207 							unpinned.
   208 
   209 	@pre MmuLock held
   210 	@post MmuLock held (but may have been released by this function)
   211 	*/
   212 	void Unpin(SPageInfo* aPageInfo, TPinArgs& aPinArgs);
   213 
   214 	/**
   215 	@param aPageInfo		The page to pin. Must be page being demand paged.
   216 	@param aPinArgs			Owner of a replacement page which will be used to substitute for the pinned page.
   217 
   218 	@pre MmuLock held
   219 	@post MmuLock held (but may have been released by this function)
   220 	*/
   221 	void Pin(SPageInfo* aPageInfo, TPinArgs& aPinArgs);
   222 
   223 
   224 	/**
   225 	@pre MmuLock held
   226 	@post MmuLock held (but may have been released by this function)
   227 	*/
   228 	void RejuvenatePageTable(TPte* aPt);
   229 
   230 	/**
   231 	*/
   232 	TBool ReservePages(TUint aRequiredCount, TUint& aCount);
   233 
   234 	/**
   235 	*/
   236 	void UnreservePages(TUint& aCount);
   237 
   238 	/**
   239 	Enumeration of instrumented paging events which only require the
   240 	SPageInfo object as an argument. 
   241 	*/
   242 	enum TEventSimple
   243 		{
   244 		EEventPageInNew,
   245 		EEventPageInAgain,
   246 		EEventPageInUnneeded,
   247 		EEventPageInFree,
   248 		EEventPageOut,
   249 		EEventPageAged,
   250 		EEventPagePin,
   251 		EEventPageUnpin,
   252 		EEventPageDonate,
   253 		EEventPageReclaim,
   254 		EEventPageAgedClean,
   255 		EEventPageAgedDirty,
   256 		EEventPagePageTableAlloc
   257 		};
   258 
   259 	/**
   260 	Signal the occurrence of an event of type TEventSimple.
   261 	*/
   262 	void Event(TEventSimple aEvent, SPageInfo* aPageInfo);
   263 
   264 	/**
   265 	Enumeration of instrumented paging events which require the faulting address
   266 	and program counter as arguments. 
   267 	*/
   268 	enum TEventWithAddresses
   269 		{
   270 		EEventPageInStart,
   271 		EEventPageRejuvenate
   272 		};
   273 
   274 	/**
   275 	Signal the occurrence of an event of type TEventWithAddresses.
   276 	*/
   277 	void Event(TEventWithAddresses aEvent, SPageInfo* aPageInfo, TLinAddr aPc, TLinAddr aFaultAddress, TUint aAccessPermissions);
   278 
   279 	/**
   280 	Get the pager's event info data.
   281 	*/
   282 	void GetEventInfo(SVMEventInfo& aInfoOut);
   283 
   284 	/**
   285 	Reset the pager's event info data.
   286 	*/
   287 	void ResetEventInfo();
   288 
   289 	/**
   290 	Attempt to discard the specified page.
   291 	
   292 	@param aOldPageInfo	The page info of the page to discard.
   293 	@param aBlockZoneId	The ID of the RAM zone not to allocate any required new page into.
   294 	@param aBlockRest	Set to ETrue when we don't want the allocator to search for new pages if the RAM 
   295 						zone with ID==aBlockZoneId is encountered, i.e. a general RAM defrag operation.
   296 	*/
   297 	TInt DiscardPage(SPageInfo* aOldPageInfo, TUint aBlockZoneId, TBool aBlockRest);
   298 
   299 
   300 	/**
   301 	Update any live list links to replace the old page with the new page.
   302 	This is to be used when a page has been moved.
   303 
   304 	@param aOldPageInfo	The page info of the page to replace.
   305 	@param aNewPageInfo	The page info of the page to be used instead of the old page.
   306 	*/
   307 	void ReplacePage(SPageInfo& aOldPageInfo, SPageInfo& aNewPageInfo);
   308 
   309 
   310 	//
   311 	// following public members for use by TPinArgs...
   312 	//
   313 
   314 	/**
   315 	*/
   316 	TBool AllocPinReplacementPages(TUint aNumPages);
   317 
   318 	/**
   319 	*/
   320 	void FreePinReplacementPages(TUint aNumPages);
   321 
   322 private:
   323 	/**
   324 	Add a page to the head of the live page list. I.e. make it the 'youngest' page.
   325 
   326 	@pre MmuLock held
   327 	@post MmuLock left unchanged.
   328 	*/
   329 	void AddAsYoungestPage(SPageInfo* aPageInfo);
   330 
   331 	/**
   332 	Mark a page as type EUnused and add it to the end of the live page list.
   333 	I.e. make it the 'oldest' page, so that it is the first page to be reused.
   334 
   335 	@pre MmuLock held
   336 	@post MmuLock left unchanged.
   337 	*/
   338 	void AddAsFreePage(SPageInfo* aPageInfo);
   339 
   340 	/**
   341 	Remove a page from live page list.
   342 	It paged state is set to EUnpaged.
   343 
   344 	@pre MmuLock held
   345 	@post MmuLock left unchanged.
   346 	*/
   347 	void RemovePage(SPageInfo* aPageInfo);
   348 
   349 	/**
   350 	Remove the oldest page from the live page list and perform #StealPage.
   351 
   352 	@pre MmuLock held
   353 	@post MmuLock left unchanged.
   354 	*/
   355 	SPageInfo* StealOldestPage();
   356 
   357 	/**
   358 	Steal a page from the memory object (if any) which is using the page.
   359 	If successful the returned page will be in the EUnknown state and the
   360 	cache state for the page is indeterminate. This is the same state as
   361 	if the page had been allocated by Mmu::AllocRam.
   362 
   363 	@pre RamAlloc mutex held
   364 	@pre MmuLock held
   365 	@post MmuLock held (but may have been released by this function)
   366 	*/
   367 	TInt StealPage(SPageInfo* aPageInfo);
   368 
   369 	/**
   370 	Restrict the access permissions for a page.
   371 
   372 	@param aPageInfo	The page.
   373 	@param aRestriction	The restriction type to apply.
   374 	*/
   375 	TInt RestrictPage(SPageInfo* aPageInfo, TRestrictPagesType aRestriction);
   376 
   377 	/**
   378 	Get a RAM page from the system's free pool and add it to the live list as a free page.
   379 
   380 	@return False if out of memory;
   381 			true otherwise, though new free page may still have already been used.
   382 
   383 	@pre MmuLock held
   384 	@post MmuLock held (but may have been released by this function)
   385 	*/
   386 	TBool TryGrowLiveList();
   387 
   388 	/**
   389 	Get a RAM page from the system's free pool.
   390 
   391  	@pre RamAllocLock held.
   392 
   393 	@return The page or NULL if no page is available.
   394 	*/
   395 	SPageInfo* GetPageFromSystem(Mmu::TRamAllocFlags aAllocFlags, TUint aBlockZoneId=KRamZoneInvalidId, TBool aBlockRest=EFalse);
   396 
   397 	/**
   398 	Put a page back on the system's free pool.
   399 
   400 	@pre RamAllocLock held.
   401 	*/
   402 	void ReturnPageToSystem();
   403 
   404 	/**
   405 	Put a specific page back on the system's free pool.
   406 
   407 	@pre RamAllocLock held.
   408 	*/
   409 	void ReturnPageToSystem(SPageInfo& aPageInfo);
   410 
   411 	/**
   412 	Allocate a RAM page to store demand paged content.
   413 	This tries to obtain a RAM from the following places:
   414 	1. An unused page in the live page list.
   415 	2. The systems free pool.
   416 	3. The oldest page from the live page list.
   417 	*/
   418 	SPageInfo* PageInAllocPage(Mmu::TRamAllocFlags aAllocFlags);
   419 
   420 	/**
   421 	If the number of young pages exceeds that specified by iYoungOldRatio then a
   422 	single page is made 'old'. Call this after adding a new 'young' page.
   423 
   424 	@pre MmuLock held
   425 	@post MmuLock held (but may have been released by this function)
   426 	*/
   427 	void BalanceAges();
   428 
   429 	/**
   430 	If HaveTooManyPages() then return them to the system.
   431 	*/
   432 	void RemoveExcessPages();
   433 
   434 	/**
   435 	@return True if pager has too many pages, false otherwise.
   436 	*/
   437 	TBool HaveTooManyPages();
   438 
   439 	/**
   440 	@return True if pager has its maximum number of pages, false otherwise.
   441 	*/
   442 	TBool HaveMaximumPages();
   443 
   444 	/**
   445 	Attempt to rejuvenate a page in which a page fault occurred.
   446 
   447 	@param aOsAsid 				Address space ID in which fault occurred.
   448 	@param aAddress				Address of memory access which faulted.
   449 	@param aAccessPermissions 	Bitmask of values from enum TAccessPermissions, which
   450 								indicates the permissions required by faulting memory access.
   451 	@param aPc				  	Address of instruction causing the fault. (Used for tracing.)
   452 	@param aMapping				The mapping that maps the page that took the fault.
   453 	@param aMapInstanceCount	The instance count of the mappig when the page fault occurred.
   454 	@param aThread				The thread that took the page fault.
   455 	@param aExceptionInfo		The processor specific exception info.
   456 	
   457 	@return KErrNone if the page was remapped, KErrAbort if the mapping has be reused or detached,
   458 	KErrNotFound if it may be possible to page in the page.
   459 	*/	
   460 	TInt TryRejuvenate(	TInt aOsAsid, TLinAddr aAddress, TUint aAccessPermissions, TLinAddr aPc,
   461 						DMemoryMappingBase* aMapping, TUint aMapInstanceCount, DThread* aThread, 
   462 						TAny* aExceptionInfo);
   463 
   464 	/**
   465 	Reserve one page for guaranteed locking use.
   466 	Increments iReservePageCount if successful.
   467 
   468 	@return True if operation was successful.
   469 	*/
   470 	TBool ReservePage();
   471 
   472 	/**
   473 	Called when a realtime thread takes a paging fault.
   474 	Checks whether it's OK for the thread to take to fault.
   475 	@return KErrNone if the paging fault should be further processed
   476 	*/
   477 	TInt CheckRealtimeThreadFault(DThread* aThread, TAny* aExceptionInfo);
   478 	
   479 	/**
   480 	Attempt to find the page table entry and page info for a page in the specified mapping.
   481 
   482 	@param aOsAsid				The OsAsid of the process that owns the mapping.
   483 	@param aAddress				The linear address of the page.
   484 	@param aMapping				The mapping that maps the linear address.
   485 	@param aMapInstanceCount	The instance count of the mapping.
   486 	@param[out] aPte			Will return a pointer to the page table entry for the page.
   487 	@param[out] aPageInfo		Will return a pointer to the page info for the page.
   488 
   489 	@return KErrNone on success, KErrAbort when the mapping is now invalid, KErrNotFound when
   490 	the page table or page info can't be found.
   491 	*/
   492 	TInt PteAndInfoFromLinAddr(	TInt aOsAsid, TLinAddr aAddress, DMemoryMappingBase* aMapping, 
   493 								TUint aMapInstanceCount, TPte*& aPte, SPageInfo*& aPageInfo);
   494 #ifdef _DEBUG
   495 	/**
   496 	Check consistency of live list.
   497 	*/
   498 	TBool CheckLists();
   499 
   500 	void TraceCounts();
   501 #endif
   502 
   503 private:
   504 	TUint iMinYoungPages;		///< Minimum number of young pages in live list required for correct functioning.
   505 	TUint iAbsoluteMinPageCount;///< Absolute minimum number of pages in live to meet algorithm constraints
   506 private:
   507 	TUint iMinimumPageCount;	/**< Minimum size for the live page list, including locked pages */
   508 	TUint iMaximumPageCount;	/**< Maximum size for the live page list, including locked pages */
   509 	TUint16 iYoungOldRatio;		/**< Ratio of young to old pages in the live page list */
   510 	SDblQue iYoungList;			/**< Head of 'young' page list. */
   511 	TUint iYoungCount;			/**< Number of young pages */
   512 	SDblQue iOldList;			/**< Head of 'old' page list. */
   513 	TUint iOldCount;			/**< Number of old pages */
   514 #ifdef _USE_OLDEST_LISTS
   515 	SDblQue iOldestCleanList;	/**< Head of 'oldestClean' page list. */
   516 	TUint iOldestCleanCount;	/**< Number of 'oldestClean' pages */
   517 	SDblQue iOldestDirtyList;	/**< Head of 'oldestDirty' page list. */
   518 	TUint iOldestDirtyCount;	/**< Number of 'oldestDirty' pages */
   519 	TUint16 iOldOldestRatio;	/**< Ratio of old pages to oldest to clean and dirty in the live page list*/
   520 #endif
   521 	TUint iNumberOfFreePages;
   522 	TUint iNumberOfDirtyPages;	/**< The total number of dirty pages in the paging cache. Protected by MmuLock */
   523 	TUint iInitMinimumPageCount;/**< Initial value for iMinimumPageCount */
   524 	TUint iInitMaximumPageCount;/**< Initial value for iMaximumPageCount  */
   525 	TUint iReservePageCount;	/**< Number of pages reserved for locking */
   526 	TUint iMinimumPageLimit;	/**< Minimum size for iMinimumPageCount, not including locked pages.
   527 								     iMinimumPageCount >= iMinimumPageLimit + iReservePageCount */
   528 	SVMEventInfo iEventInfo;
   529 
   530 #ifdef __DEMAND_PAGING_BENCHMARKS__
   531 public:
   532 	void RecordBenchmarkData(TPagingBenchmark aBm, TUint32 aStartTime, TUint32 aEndTime);
   533 	void ResetBenchmarkData(TPagingBenchmark aBm);
   534 	SPagingBenchmarkInfo iBenchmarkInfo[EMaxPagingBm];
   535 #endif //__DEMAND_PAGING_BENCHMARKS__
   536 	};
   537 
   538 extern DPager ThePager;
   539 
   540 
   541 #ifdef __DEMAND_PAGING_BENCHMARKS__
   542 
   543 #define START_PAGING_BENCHMARK TUint32 _bmStart = NKern::FastCounter()
   544 #define END_PAGING_BENCHMARK(bm) ThePager.RecordBenchmarkData(bm, _bmStart, NKern::FastCounter())
   545 
   546 #else
   547 
   548 #define START_PAGING_BENCHMARK
   549 #define END_PAGING_BENCHMARK(bm)
   550 #endif // __DEMAND_PAGING_BENCHMARKS__
   551 
   552 
   553 FORCE_INLINE void DPager::Event(TEventSimple aEvent, SPageInfo* aPageInfo)
   554 	{
   555 	switch(aEvent)
   556 		{
   557 	case EEventPageInNew:
   558 		TRACEP(("DP: %O PageIn 0x%08x",TheCurrentThread,aPageInfo->PhysAddr()));
   559 		#ifdef BTRACE_PAGING
   560 			BTraceContext12(BTrace::EPaging,BTrace::EPagingPageIn,aPageInfo->PhysAddr(),aPageInfo->Owner(),aPageInfo->Index());
   561 		#endif
   562 		++iEventInfo.iPageInReadCount;
   563 		break;
   564 
   565 	case EEventPageInAgain:
   566 		TRACEP(("DP: %O PageIn (again) 0x%08x",TheCurrentThread,aPageInfo->PhysAddr()));
   567 		#ifdef BTRACE_PAGING
   568 			BTraceContext4(BTrace::EPaging,BTrace::EPagingMapPage,aPageInfo->PhysAddr());
   569 		#endif
   570 		break;
   571 
   572 	case EEventPageInUnneeded:
   573 		TRACEP(("DP: %O PageIn (unneeded) 0x%08x",TheCurrentThread,aPageInfo->PhysAddr()));
   574 		#ifdef BTRACE_PAGING
   575 			BTraceContext0(BTrace::EPaging,BTrace::EPagingPageInUnneeded);
   576 		#endif
   577 		break;
   578 
   579 	case EEventPageInFree:
   580 		TRACEP(("DP: %O PageInFree 0x%08x",TheCurrentThread,aPageInfo->PhysAddr()));
   581 		#ifdef BTRACE_PAGING
   582 			BTraceContext4(BTrace::EPaging,BTrace::EPagingPageInFree,aPageInfo->PhysAddr());
   583 		#endif
   584 		break;
   585 
   586 	case EEventPageOut:
   587 		TRACEP(("DP: %O PageOut 0x%08x",TheCurrentThread,aPageInfo->PhysAddr()));
   588 		#ifdef BTRACE_PAGING
   589 			BTraceContext4(BTrace::EPaging,BTrace::EPagingPageOut,aPageInfo->PhysAddr());
   590 		#endif
   591 		break;
   592 
   593 	case EEventPageAged:
   594 		TRACEP(("DP: %O Aged 0x%08x",TheCurrentThread,aPageInfo->PhysAddr()));
   595 		#ifdef BTRACE_PAGING_VERBOSE
   596 			BTraceContext4(BTrace::EPaging,BTrace::EPagingAged,aPageInfo->PhysAddr());
   597 		#endif
   598 		break;
   599 
   600 	case EEventPagePin:
   601 		TRACEP(("DP: %O Pin 0x%08x count=%d",TheCurrentThread,aPageInfo->PhysAddr(),aPageInfo->PinCount()));
   602 		#ifdef BTRACE_PAGING
   603 			BTraceContext8(BTrace::EPaging,BTrace::EPagingPageLock,aPageInfo->PhysAddr(),aPageInfo->PinCount());
   604 		#endif
   605 		break;
   606 
   607 	case EEventPageUnpin:
   608 		TRACEP(("DP: %O Unpin 0x%08x count=%d",TheCurrentThread,aPageInfo->PhysAddr(),aPageInfo->PinCount()));
   609 		#ifdef BTRACE_PAGING
   610 			BTraceContext8(BTrace::EPaging,BTrace::EPagingPageUnlock,aPageInfo->PhysAddr(),aPageInfo->PinCount());
   611 		#endif
   612 		break;
   613 
   614 	case EEventPageDonate:
   615 		TRACEP(("DP: %O Donate 0x%08x",TheCurrentThread,aPageInfo->PhysAddr()));
   616 		#ifdef BTRACE_PAGING
   617 			BTraceContext12(BTrace::EPaging,BTrace::EPagingDonatePage,aPageInfo->PhysAddr(),aPageInfo->Owner(),aPageInfo->Index());
   618 		#endif
   619 		break;
   620 
   621 	case EEventPageReclaim:
   622 		TRACEP(("DP: %O Reclaim 0x%08x",TheCurrentThread,aPageInfo->PhysAddr()));
   623 		#ifdef BTRACE_PAGING
   624 			BTraceContext4(BTrace::EPaging,BTrace::EPagingReclaimPage,aPageInfo->PhysAddr());
   625 		#endif
   626 		break;
   627 
   628 	case EEventPageAgedClean:
   629 		TRACEP(("DP: %O AgedClean 0x%08x",TheCurrentThread,aPageInfo->PhysAddr()));
   630 		#ifdef BTRACE_PAGING_VERBOSE
   631 			BTraceContext4(BTrace::EPaging,BTrace::EPagingAgedClean,aPageInfo->PhysAddr());
   632 		#endif
   633 		break;
   634 
   635 	case EEventPageAgedDirty:
   636 		TRACEP(("DP: %O AgedDirty 0x%08x",TheCurrentThread,aPageInfo->PhysAddr()));
   637 		#ifdef BTRACE_PAGING_VERBOSE
   638 			BTraceContext4(BTrace::EPaging,BTrace::EPagingAgedDirty,aPageInfo->PhysAddr());
   639 		#endif
   640 		break;
   641 
   642 	case EEventPagePageTableAlloc:
   643 		TRACEP(("DP: %O PageTableAlloc 0x%08x",TheCurrentThread,aPageInfo->PhysAddr()));
   644 		#ifdef BTRACE_PAGING
   645 			BTraceContext4(BTrace::EPaging,BTrace::EPagingPageTableAlloc,aPageInfo->PhysAddr());
   646 		#endif
   647 		break;
   648 
   649 	default:
   650 		__NK_ASSERT_DEBUG(0);
   651 		break;
   652 		}
   653 	}
   654 
   655 
   656 
   657 FORCE_INLINE void DPager::Event(TEventWithAddresses aEvent, SPageInfo* aPageInfo, TLinAddr aPc, TLinAddr aFaultAddress, TUint aAccessPermissions)
   658 	{
   659 	switch(aEvent)
   660 		{
   661 	case EEventPageInStart:
   662 		TRACEP(("DP: %O HandlePageFault 0x%08x 0x%08x %d",TheCurrentThread,aFaultAddress,aPc,aAccessPermissions));
   663 		#ifdef BTRACE_PAGING
   664 			BTraceContext12(BTrace::EPaging,BTrace::EPagingPageInBegin,aFaultAddress,aPc,aAccessPermissions);
   665 		#endif
   666 		++iEventInfo.iPageFaultCount;
   667 		break;
   668 
   669 	case EEventPageRejuvenate:
   670 		TRACEP(("DP: %O Rejuvenate 0x%08x 0x%08x 0x%08x %d",TheCurrentThread,aPageInfo->PhysAddr(),aFaultAddress,aPc,aAccessPermissions));
   671 		#ifdef BTRACE_PAGING
   672 			BTraceContext12(BTrace::EPaging,BTrace::EPagingRejuvenate,aPageInfo->PhysAddr(),aFaultAddress,aPc);
   673 		#endif
   674 		++iEventInfo.iPageFaultCount;
   675 		break;
   676 
   677 	default:
   678 		__NK_ASSERT_DEBUG(0);
   679 		break;
   680 		}
   681 	}
   682 
   683 
   684 
   685 /**
   686 Multiplier for number of request objects in pool per drive that supports paging.
   687 */
   688 const TInt KPagingRequestsPerDevice = 2;
   689 
   690 
   691 class DPagingRequest;
   692 class DPageReadRequest;
   693 class DPageWriteRequest;
   694 
   695 /**
   696 A pool of paging requests for use by a single paging device.
   697 */
   698 class DPagingRequestPool : public DBase
   699 	{
   700 public:
   701 	DPagingRequestPool(TUint aNumPageReadRequest,TUint aNumPageWriteRequest);
   702 	DPageReadRequest* AcquirePageReadRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
   703 	DPageWriteRequest* AcquirePageWriteRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
   704 private:
   705 	~DPagingRequestPool();
   706 private:
   707 	class TGroup
   708 		{
   709 	public:
   710 		TGroup(TUint aNumRequests);
   711 		DPagingRequest* FindCollision(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
   712 		DPagingRequest* GetRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
   713 		void Signal(DPagingRequest* aRequest);
   714 	public:
   715 		TUint iNumRequests;
   716 		DPagingRequest** iRequests;
   717 		SDblQue iFreeList;
   718 		};
   719 	TGroup iPageReadRequests;
   720 	TGroup iPageWriteRequests;
   721 
   722 	friend class DPagingRequest;
   723 	friend class DPageReadRequest;
   724 	friend class DPageWriteRequest;
   725 	};
   726 
   727 
   728 /**
   729 Resources needed to service a paging request.
   730 */
   731 class DPagingRequest : public SDblQueLink
   732 	{
   733 public:
   734 	DPagingRequest(DPagingRequestPool::TGroup& aPoolGroup);
   735 	void Release();
   736 	void Wait();
   737 	void Signal();
   738 	void SetUse(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
   739 	TBool CheckUse(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
   740 	TBool IsCollision(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
   741 	TLinAddr MapPages(TUint aColour, TUint aCount, TPhysAddr* aPages);
   742 	void UnmapPages(TBool aIMBRequired);
   743 public:
   744 	TThreadMessage	iMessage;	/**< Used by the media driver to queue requests */
   745 	DMutex*			iMutex;		/**< A mutex for synchronisation and priority inheritance. */
   746 	TInt			iUsageCount;/**< How many threads are using or waiting for this object. */
   747 	TLinAddr		iBuffer;	/**< A buffer to read compressed data into. Size is EMaxPages+1 pages.*/
   748 protected:
   749 	Mmu::TTempMapping	iTempMapping;
   750 private:
   751 	DPagingRequestPool::TGroup& iPoolGroup;
   752 	// used to identify memory request is used for...
   753 	DMemoryObject*	iUseRegionMemory;
   754 	TUint			iUseRegionIndex;
   755 	TUint			iUseRegionCount;
   756 	};
   757 
   758 
   759 /**
   760 Resources needed to service a page in request.
   761 */
   762 class DPageReadRequest : public DPagingRequest
   763 	{
   764 public:
   765 	inline DPageReadRequest(DPagingRequestPool::TGroup& aPoolGroup)
   766 		: DPagingRequest(aPoolGroup)
   767 		{}
   768 	TInt Construct();
   769 	enum
   770 		{
   771 		EMaxPages = 4
   772 		};
   773 	static TUint ReservedPagesRequired();
   774 private:
   775 	~DPageReadRequest(); // can't delete
   776 public:
   777 	TLinAddr		iBuffer;	/**< A buffer to read compressed data into. Size is EMaxPages+1 pages.*/
   778 private:
   779 	DMemoryObject*	iMemory;
   780 private:
   781 	static TInt iAllocNext;
   782 	};
   783 
   784 
   785 FORCE_INLINE TUint DPageReadRequest::ReservedPagesRequired()
   786 	{
   787 	return iAllocNext*EMaxPages;
   788 	}
   789 
   790 
   791 /**
   792 Resources needed to service a page out request.
   793 */
   794 class DPageWriteRequest : public DPagingRequest
   795 	{
   796 public:
   797 	inline DPageWriteRequest(DPagingRequestPool::TGroup& aPoolGroup)
   798 		: DPagingRequest(aPoolGroup)
   799 		{}
   800 	TInt Construct();
   801 	enum
   802 		{
   803 		EMaxPages = 1
   804 		};
   805 private:
   806 	~DPageWriteRequest(); // can't delete
   807 private:
   808 	static TInt iAllocNext;
   809 	};
   810 
   811 
   812 #endif