os/kernelhwsrv/kernel/eka/memmodel/epoc/flexible/mmu/mpager.h
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
sl@0
     1
// Copyright (c) 2005-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
//
sl@0
    15
sl@0
    16
/**
sl@0
    17
 @file
sl@0
    18
 @internalComponent
sl@0
    19
*/
sl@0
    20
sl@0
    21
#ifndef MPAGER_H
sl@0
    22
#define MPAGER_H
sl@0
    23
sl@0
    24
struct SVMCacheInfo;
sl@0
    25
class DMemModelThread;
sl@0
    26
class DMemoryMappingBase;
sl@0
    27
sl@0
    28
class DPager
sl@0
    29
	{
sl@0
    30
public:
sl@0
    31
	DPager();
sl@0
    32
	void Init2();
sl@0
    33
	void Init3();
sl@0
    34
sl@0
    35
	FORCE_INLINE TUint NumberOfFreePages()
sl@0
    36
		{
sl@0
    37
		return iNumberOfFreePages;
sl@0
    38
		}
sl@0
    39
sl@0
    40
	FORCE_INLINE TUint NumberOfDirtyPages()
sl@0
    41
		{
sl@0
    42
		TUint ret;
sl@0
    43
		MmuLock::Lock();
sl@0
    44
		ret = iNumberOfDirtyPages;
sl@0
    45
		MmuLock::Unlock();
sl@0
    46
		return ret;
sl@0
    47
		}
sl@0
    48
	
sl@0
    49
	FORCE_INLINE void SetWritable(SPageInfo& aPageInfo)
sl@0
    50
		{
sl@0
    51
		if (!aPageInfo.IsDirty())
sl@0
    52
			{// This is the first mapping to write to the page so increase the 
sl@0
    53
			// dirty page count.
sl@0
    54
			aPageInfo.SetWritable();
sl@0
    55
			iNumberOfDirtyPages++;
sl@0
    56
			}
sl@0
    57
		}
sl@0
    58
	
sl@0
    59
	FORCE_INLINE void SetClean(SPageInfo& aPageInfo)
sl@0
    60
		{
sl@0
    61
		__NK_ASSERT_DEBUG(iNumberOfDirtyPages);
sl@0
    62
		__NK_ASSERT_DEBUG(aPageInfo.IsDirty());
sl@0
    63
		aPageInfo.SetClean();
sl@0
    64
		iNumberOfDirtyPages--;
sl@0
    65
		}
sl@0
    66
sl@0
    67
	/**
sl@0
    68
	Remove RAM pages from the cache and return them to the system's free pool.
sl@0
    69
	(Free them.)
sl@0
    70
sl@0
    71
	This is called by class Mmu when it requires more free RAM to meet an
sl@0
    72
	allocation request.
sl@0
    73
sl@0
    74
	@param	aNumPages The number of pages to free up.
sl@0
    75
	@return	True if all pages could be freed, false otherwise
sl@0
    76
	@pre	RamAlloc mutex held.
sl@0
    77
	*/
sl@0
    78
	TBool GetFreePages(TInt aNumPages);
sl@0
    79
sl@0
    80
sl@0
    81
	/**
sl@0
    82
	Attempts to rejuvenate or page in the page to the mapping that took the page fault.
sl@0
    83
sl@0
    84
	@param aPc					Address of instruction causing the fault.
sl@0
    85
	@param aFaultAddress		Address of memory access which faulted.
sl@0
    86
	@param aFaultAsid			The asid of the faulting thread's process.
sl@0
    87
	@param aAccessPermissions	Bitmask of values from enum TAccessPermissions, which
sl@0
    88
								indicates the permissions required by faulting memory access.
sl@0
    89
	@param aMapInstanceCount	The instance count of the mapping when it took the page fault.
sl@0
    90
	@param aThread				The thread that took the page fault.
sl@0
    91
	@param aExceptionInfo		The processor specific exception info.
sl@0
    92
	
sl@0
    93
	@return KErrNone if the page is now accessable, otherwise one of the system wide error codes.
sl@0
    94
	*/
sl@0
    95
	TInt HandlePageFault(	TLinAddr aPc, TLinAddr aFaultAddress, TUint aFaultAsid, TUint aFaultIndex,
sl@0
    96
							TUint aAccessPermissions, DMemoryObject* aMemory, DMemoryMapping* aMapping,
sl@0
    97
							TUint aMapInstanceCount, DThread* aThread, TAny* aExceptionInfo);
sl@0
    98
sl@0
    99
sl@0
   100
	/**
sl@0
   101
	Fault enumeration
sl@0
   102
	*/
sl@0
   103
	enum TFault
sl@0
   104
		{
sl@0
   105
		};
sl@0
   106
sl@0
   107
	/**
sl@0
   108
	Fault the system.
sl@0
   109
	*/
sl@0
   110
	static void Fault(TFault aFault);
sl@0
   111
sl@0
   112
	/**
sl@0
   113
	Get state of live page list.
sl@0
   114
	*/
sl@0
   115
	void GetLiveListInfo(SVMCacheInfo& aInfo);
sl@0
   116
sl@0
   117
	/**
sl@0
   118
	Resize the live page list.
sl@0
   119
	*/
sl@0
   120
	TInt ResizeLiveList(TUint aMinimumPageCount, TUint aMaximumPageCount);
sl@0
   121
sl@0
   122
	/**
sl@0
   123
	Recalculate live list size.
sl@0
   124
	*/
sl@0
   125
	TInt ResizeLiveList();
sl@0
   126
sl@0
   127
	/**
sl@0
   128
	Flush (unmap) all memory which is demand paged.
sl@0
   129
	This reduces the live page list to a minimum.
sl@0
   130
	*/
sl@0
   131
	void FlushAll();
sl@0
   132
sl@0
   133
	/**
sl@0
   134
	Give pages to paging system for managing.
sl@0
   135
	*/
sl@0
   136
	void DonatePages(TUint aCount, TPhysAddr* aPages);
sl@0
   137
sl@0
   138
	/**
sl@0
   139
	Reclaim pages from paging system which were previously donated with DonatePages.
sl@0
   140
sl@0
   141
	@param aCount Number of pages.
sl@0
   142
	@param aPages Array of pages (as stored in an RPageArray).
sl@0
   143
sl@0
   144
	@return KErrNone if successful.
sl@0
   145
			KErrNoMemory if paging system doesn't have enough spare pages. This will leave some or all of the pages still managed by the pager.
sl@0
   146
			KErrNotFound if some of the pages were not actually being managed by the pager.
sl@0
   147
	*/
sl@0
   148
	TInt ReclaimPages(TUint aCount, TPhysAddr* aPages);
sl@0
   149
sl@0
   150
	/**
sl@0
   151
	Called by class Mmu whenever a page of RAM is freed. The page state will be EUnused.
sl@0
   152
	If the page was being used by the pager then this gives it the opportunity to update
sl@0
   153
	any internal state. If the pager wishes to retain ownership of the page the it must
sl@0
   154
	return the result KErrNone, any other value will cause the page to be returned to the
sl@0
   155
	systems free pool.
sl@0
   156
	*/
sl@0
   157
	TInt PageFreed(SPageInfo* aPageInfo);
sl@0
   158
sl@0
   159
	//
sl@0
   160
	// following public members for use by memory managers...
sl@0
   161
	//
sl@0
   162
sl@0
   163
	/**
sl@0
   164
	Allocate a number of RAM pages to store demand paged content.
sl@0
   165
	These pages are obtained from...
sl@0
   166
sl@0
   167
	1. An unused page in the live page list.
sl@0
   168
	2. The systems free pool.
sl@0
   169
	3. The oldest page from the live page list.
sl@0
   170
	*/
sl@0
   171
	TInt PageInAllocPages(TPhysAddr* aPages, TUint aCount, Mmu::TRamAllocFlags aAllocFlags);
sl@0
   172
sl@0
   173
	/**
sl@0
   174
	Free a number of RAM pages allocated by PageInAllocPages.
sl@0
   175
	*/
sl@0
   176
	void PageInFreePages(TPhysAddr* aPages, TUint aCount);
sl@0
   177
sl@0
   178
	/**
sl@0
   179
	Called to add a new page to the live list after a fault has occurred.
sl@0
   180
sl@0
   181
	@param aPageInfo		The page.
sl@0
   182
sl@0
   183
	@pre MmuLock held
sl@0
   184
	@post MmuLock held (but may have been released by this function)
sl@0
   185
	*/
sl@0
   186
	void PagedIn(SPageInfo* aPageInfo);
sl@0
   187
sl@0
   188
	/**
sl@0
   189
	@param aPageInfo		The page.
sl@0
   190
	@param aPinArgs			Owner of a replacement page which will be used to substitute for the pinned page.
sl@0
   191
sl@0
   192
	@pre MmuLock held
sl@0
   193
	@post MmuLock held (but may have been released by this function)
sl@0
   194
	*/
sl@0
   195
	void PagedInPinned(SPageInfo* aPageInfo, TPinArgs& aPinArgs);
sl@0
   196
sl@0
   197
	/**
sl@0
   198
	@pre MmuLock held
sl@0
   199
	@post MmuLock left unchanged.
sl@0
   200
	*/
sl@0
   201
	void PagedInUnneeded(SPageInfo* aPageInfo);
sl@0
   202
sl@0
   203
	/**
sl@0
   204
	@param aPageInfo		The page to unpin.
sl@0
   205
	@param aPinArgs			The resources used for pinning. The replacement pages allocated
sl@0
   206
							to this will be increased for each page which was became completely
sl@0
   207
							unpinned.
sl@0
   208
sl@0
   209
	@pre MmuLock held
sl@0
   210
	@post MmuLock held (but may have been released by this function)
sl@0
   211
	*/
sl@0
   212
	void Unpin(SPageInfo* aPageInfo, TPinArgs& aPinArgs);
sl@0
   213
sl@0
   214
	/**
sl@0
   215
	@param aPageInfo		The page to pin. Must be page being demand paged.
sl@0
   216
	@param aPinArgs			Owner of a replacement page which will be used to substitute for the pinned page.
sl@0
   217
sl@0
   218
	@pre MmuLock held
sl@0
   219
	@post MmuLock held (but may have been released by this function)
sl@0
   220
	*/
sl@0
   221
	void Pin(SPageInfo* aPageInfo, TPinArgs& aPinArgs);
sl@0
   222
sl@0
   223
sl@0
   224
	/**
sl@0
   225
	@pre MmuLock held
sl@0
   226
	@post MmuLock held (but may have been released by this function)
sl@0
   227
	*/
sl@0
   228
	void RejuvenatePageTable(TPte* aPt);
sl@0
   229
sl@0
   230
	/**
sl@0
   231
	*/
sl@0
   232
	TBool ReservePages(TUint aRequiredCount, TUint& aCount);
sl@0
   233
sl@0
   234
	/**
sl@0
   235
	*/
sl@0
   236
	void UnreservePages(TUint& aCount);
sl@0
   237
sl@0
   238
	/**
sl@0
   239
	Enumeration of instrumented paging events which only require the
sl@0
   240
	SPageInfo object as an argument. 
sl@0
   241
	*/
sl@0
   242
	enum TEventSimple
sl@0
   243
		{
sl@0
   244
		EEventPageInNew,
sl@0
   245
		EEventPageInAgain,
sl@0
   246
		EEventPageInUnneeded,
sl@0
   247
		EEventPageInFree,
sl@0
   248
		EEventPageOut,
sl@0
   249
		EEventPageAged,
sl@0
   250
		EEventPagePin,
sl@0
   251
		EEventPageUnpin,
sl@0
   252
		EEventPageDonate,
sl@0
   253
		EEventPageReclaim,
sl@0
   254
		EEventPageAgedClean,
sl@0
   255
		EEventPageAgedDirty,
sl@0
   256
		EEventPagePageTableAlloc
sl@0
   257
		};
sl@0
   258
sl@0
   259
	/**
sl@0
   260
	Signal the occurrence of an event of type TEventSimple.
sl@0
   261
	*/
sl@0
   262
	void Event(TEventSimple aEvent, SPageInfo* aPageInfo);
sl@0
   263
sl@0
   264
	/**
sl@0
   265
	Enumeration of instrumented paging events which require the faulting address
sl@0
   266
	and program counter as arguments. 
sl@0
   267
	*/
sl@0
   268
	enum TEventWithAddresses
sl@0
   269
		{
sl@0
   270
		EEventPageInStart,
sl@0
   271
		EEventPageRejuvenate
sl@0
   272
		};
sl@0
   273
sl@0
   274
	/**
sl@0
   275
	Signal the occurrence of an event of type TEventWithAddresses.
sl@0
   276
	*/
sl@0
   277
	void Event(TEventWithAddresses aEvent, SPageInfo* aPageInfo, TLinAddr aPc, TLinAddr aFaultAddress, TUint aAccessPermissions);
sl@0
   278
sl@0
   279
	/**
sl@0
   280
	Get the pager's event info data.
sl@0
   281
	*/
sl@0
   282
	void GetEventInfo(SVMEventInfo& aInfoOut);
sl@0
   283
sl@0
   284
	/**
sl@0
   285
	Reset the pager's event info data.
sl@0
   286
	*/
sl@0
   287
	void ResetEventInfo();
sl@0
   288
sl@0
   289
	/**
sl@0
   290
	Attempt to discard the specified page.
sl@0
   291
	
sl@0
   292
	@param aOldPageInfo	The page info of the page to discard.
sl@0
   293
	@param aBlockZoneId	The ID of the RAM zone not to allocate any required new page into.
sl@0
   294
	@param aBlockRest	Set to ETrue when we don't want the allocator to search for new pages if the RAM 
sl@0
   295
						zone with ID==aBlockZoneId is encountered, i.e. a general RAM defrag operation.
sl@0
   296
	*/
sl@0
   297
	TInt DiscardPage(SPageInfo* aOldPageInfo, TUint aBlockZoneId, TBool aBlockRest);
sl@0
   298
sl@0
   299
sl@0
   300
	/**
sl@0
   301
	Update any live list links to replace the old page with the new page.
sl@0
   302
	This is to be used when a page has been moved.
sl@0
   303
sl@0
   304
	@param aOldPageInfo	The page info of the page to replace.
sl@0
   305
	@param aNewPageInfo	The page info of the page to be used instead of the old page.
sl@0
   306
	*/
sl@0
   307
	void ReplacePage(SPageInfo& aOldPageInfo, SPageInfo& aNewPageInfo);
sl@0
   308
sl@0
   309
sl@0
   310
	//
sl@0
   311
	// following public members for use by TPinArgs...
sl@0
   312
	//
sl@0
   313
sl@0
   314
	/**
sl@0
   315
	*/
sl@0
   316
	TBool AllocPinReplacementPages(TUint aNumPages);
sl@0
   317
sl@0
   318
	/**
sl@0
   319
	*/
sl@0
   320
	void FreePinReplacementPages(TUint aNumPages);
sl@0
   321
sl@0
   322
private:
sl@0
   323
	/**
sl@0
   324
	Add a page to the head of the live page list. I.e. make it the 'youngest' page.
sl@0
   325
sl@0
   326
	@pre MmuLock held
sl@0
   327
	@post MmuLock left unchanged.
sl@0
   328
	*/
sl@0
   329
	void AddAsYoungestPage(SPageInfo* aPageInfo);
sl@0
   330
sl@0
   331
	/**
sl@0
   332
	Mark a page as type EUnused and add it to the end of the live page list.
sl@0
   333
	I.e. make it the 'oldest' page, so that it is the first page to be reused.
sl@0
   334
sl@0
   335
	@pre MmuLock held
sl@0
   336
	@post MmuLock left unchanged.
sl@0
   337
	*/
sl@0
   338
	void AddAsFreePage(SPageInfo* aPageInfo);
sl@0
   339
sl@0
   340
	/**
sl@0
   341
	Remove a page from live page list.
sl@0
   342
	It paged state is set to EUnpaged.
sl@0
   343
sl@0
   344
	@pre MmuLock held
sl@0
   345
	@post MmuLock left unchanged.
sl@0
   346
	*/
sl@0
   347
	void RemovePage(SPageInfo* aPageInfo);
sl@0
   348
sl@0
   349
	/**
sl@0
   350
	Remove the oldest page from the live page list and perform #StealPage.
sl@0
   351
sl@0
   352
	@pre MmuLock held
sl@0
   353
	@post MmuLock left unchanged.
sl@0
   354
	*/
sl@0
   355
	SPageInfo* StealOldestPage();
sl@0
   356
sl@0
   357
	/**
sl@0
   358
	Steal a page from the memory object (if any) which is using the page.
sl@0
   359
	If successful the returned page will be in the EUnknown state and the
sl@0
   360
	cache state for the page is indeterminate. This is the same state as
sl@0
   361
	if the page had been allocated by Mmu::AllocRam.
sl@0
   362
sl@0
   363
	@pre RamAlloc mutex held
sl@0
   364
	@pre MmuLock held
sl@0
   365
	@post MmuLock held (but may have been released by this function)
sl@0
   366
	*/
sl@0
   367
	TInt StealPage(SPageInfo* aPageInfo);
sl@0
   368
sl@0
   369
	/**
sl@0
   370
	Restrict the access permissions for a page.
sl@0
   371
sl@0
   372
	@param aPageInfo	The page.
sl@0
   373
	@param aRestriction	The restriction type to apply.
sl@0
   374
	*/
sl@0
   375
	TInt RestrictPage(SPageInfo* aPageInfo, TRestrictPagesType aRestriction);
sl@0
   376
sl@0
   377
	/**
sl@0
   378
	Get a RAM page from the system's free pool and add it to the live list as a free page.
sl@0
   379
sl@0
   380
	@return False if out of memory;
sl@0
   381
			true otherwise, though new free page may still have already been used.
sl@0
   382
sl@0
   383
	@pre MmuLock held
sl@0
   384
	@post MmuLock held (but may have been released by this function)
sl@0
   385
	*/
sl@0
   386
	TBool TryGrowLiveList();
sl@0
   387
sl@0
   388
	/**
sl@0
   389
	Get a RAM page from the system's free pool.
sl@0
   390
sl@0
   391
 	@pre RamAllocLock held.
sl@0
   392
sl@0
   393
	@return The page or NULL if no page is available.
sl@0
   394
	*/
sl@0
   395
	SPageInfo* GetPageFromSystem(Mmu::TRamAllocFlags aAllocFlags, TUint aBlockZoneId=KRamZoneInvalidId, TBool aBlockRest=EFalse);
sl@0
   396
sl@0
   397
	/**
sl@0
   398
	Put a page back on the system's free pool.
sl@0
   399
sl@0
   400
	@pre RamAllocLock held.
sl@0
   401
	*/
sl@0
   402
	void ReturnPageToSystem();
sl@0
   403
sl@0
   404
	/**
sl@0
   405
	Put a specific page back on the system's free pool.
sl@0
   406
sl@0
   407
	@pre RamAllocLock held.
sl@0
   408
	*/
sl@0
   409
	void ReturnPageToSystem(SPageInfo& aPageInfo);
sl@0
   410
sl@0
   411
	/**
sl@0
   412
	Allocate a RAM page to store demand paged content.
sl@0
   413
	This tries to obtain a RAM from the following places:
sl@0
   414
	1. An unused page in the live page list.
sl@0
   415
	2. The systems free pool.
sl@0
   416
	3. The oldest page from the live page list.
sl@0
   417
	*/
sl@0
   418
	SPageInfo* PageInAllocPage(Mmu::TRamAllocFlags aAllocFlags);
sl@0
   419
sl@0
   420
	/**
sl@0
   421
	If the number of young pages exceeds that specified by iYoungOldRatio then a
sl@0
   422
	single page is made 'old'. Call this after adding a new 'young' page.
sl@0
   423
sl@0
   424
	@pre MmuLock held
sl@0
   425
	@post MmuLock held (but may have been released by this function)
sl@0
   426
	*/
sl@0
   427
	void BalanceAges();
sl@0
   428
sl@0
   429
	/**
sl@0
   430
	If HaveTooManyPages() then return them to the system.
sl@0
   431
	*/
sl@0
   432
	void RemoveExcessPages();
sl@0
   433
sl@0
   434
	/**
sl@0
   435
	@return True if pager has too many pages, false otherwise.
sl@0
   436
	*/
sl@0
   437
	TBool HaveTooManyPages();
sl@0
   438
sl@0
   439
	/**
sl@0
   440
	@return True if pager has its maximum number of pages, false otherwise.
sl@0
   441
	*/
sl@0
   442
	TBool HaveMaximumPages();
sl@0
   443
sl@0
   444
	/**
sl@0
   445
	Attempt to rejuvenate a page in which a page fault occurred.
sl@0
   446
sl@0
   447
	@param aOsAsid 				Address space ID in which fault occurred.
sl@0
   448
	@param aAddress				Address of memory access which faulted.
sl@0
   449
	@param aAccessPermissions 	Bitmask of values from enum TAccessPermissions, which
sl@0
   450
								indicates the permissions required by faulting memory access.
sl@0
   451
	@param aPc				  	Address of instruction causing the fault. (Used for tracing.)
sl@0
   452
	@param aMapping				The mapping that maps the page that took the fault.
sl@0
   453
	@param aMapInstanceCount	The instance count of the mappig when the page fault occurred.
sl@0
   454
	@param aThread				The thread that took the page fault.
sl@0
   455
	@param aExceptionInfo		The processor specific exception info.
sl@0
   456
	
sl@0
   457
	@return KErrNone if the page was remapped, KErrAbort if the mapping has be reused or detached,
sl@0
   458
	KErrNotFound if it may be possible to page in the page.
sl@0
   459
	*/	
sl@0
   460
	TInt TryRejuvenate(	TInt aOsAsid, TLinAddr aAddress, TUint aAccessPermissions, TLinAddr aPc,
sl@0
   461
						DMemoryMappingBase* aMapping, TUint aMapInstanceCount, DThread* aThread, 
sl@0
   462
						TAny* aExceptionInfo);
sl@0
   463
sl@0
   464
	/**
sl@0
   465
	Reserve one page for guaranteed locking use.
sl@0
   466
	Increments iReservePageCount if successful.
sl@0
   467
sl@0
   468
	@return True if operation was successful.
sl@0
   469
	*/
sl@0
   470
	TBool ReservePage();
sl@0
   471
sl@0
   472
	/**
sl@0
   473
	Called when a realtime thread takes a paging fault.
sl@0
   474
	Checks whether it's OK for the thread to take to fault.
sl@0
   475
	@return KErrNone if the paging fault should be further processed
sl@0
   476
	*/
sl@0
   477
	TInt CheckRealtimeThreadFault(DThread* aThread, TAny* aExceptionInfo);
sl@0
   478
	
sl@0
   479
	/**
sl@0
   480
	Attempt to find the page table entry and page info for a page in the specified mapping.
sl@0
   481
sl@0
   482
	@param aOsAsid				The OsAsid of the process that owns the mapping.
sl@0
   483
	@param aAddress				The linear address of the page.
sl@0
   484
	@param aMapping				The mapping that maps the linear address.
sl@0
   485
	@param aMapInstanceCount	The instance count of the mapping.
sl@0
   486
	@param[out] aPte			Will return a pointer to the page table entry for the page.
sl@0
   487
	@param[out] aPageInfo		Will return a pointer to the page info for the page.
sl@0
   488
sl@0
   489
	@return KErrNone on success, KErrAbort when the mapping is now invalid, KErrNotFound when
sl@0
   490
	the page table or page info can't be found.
sl@0
   491
	*/
sl@0
   492
	TInt PteAndInfoFromLinAddr(	TInt aOsAsid, TLinAddr aAddress, DMemoryMappingBase* aMapping, 
sl@0
   493
								TUint aMapInstanceCount, TPte*& aPte, SPageInfo*& aPageInfo);
sl@0
   494
#ifdef _DEBUG
sl@0
   495
	/**
sl@0
   496
	Check consistency of live list.
sl@0
   497
	*/
sl@0
   498
	TBool CheckLists();
sl@0
   499
sl@0
   500
	void TraceCounts();
sl@0
   501
#endif
sl@0
   502
sl@0
   503
private:
sl@0
   504
	TUint iMinYoungPages;		///< Minimum number of young pages in live list required for correct functioning.
sl@0
   505
	TUint iAbsoluteMinPageCount;///< Absolute minimum number of pages in live to meet algorithm constraints
sl@0
   506
private:
sl@0
   507
	TUint iMinimumPageCount;	/**< Minimum size for the live page list, including locked pages */
sl@0
   508
	TUint iMaximumPageCount;	/**< Maximum size for the live page list, including locked pages */
sl@0
   509
	TUint16 iYoungOldRatio;		/**< Ratio of young to old pages in the live page list */
sl@0
   510
	SDblQue iYoungList;			/**< Head of 'young' page list. */
sl@0
   511
	TUint iYoungCount;			/**< Number of young pages */
sl@0
   512
	SDblQue iOldList;			/**< Head of 'old' page list. */
sl@0
   513
	TUint iOldCount;			/**< Number of old pages */
sl@0
   514
#ifdef _USE_OLDEST_LISTS
sl@0
   515
	SDblQue iOldestCleanList;	/**< Head of 'oldestClean' page list. */
sl@0
   516
	TUint iOldestCleanCount;	/**< Number of 'oldestClean' pages */
sl@0
   517
	SDblQue iOldestDirtyList;	/**< Head of 'oldestDirty' page list. */
sl@0
   518
	TUint iOldestDirtyCount;	/**< Number of 'oldestDirty' pages */
sl@0
   519
	TUint16 iOldOldestRatio;	/**< Ratio of old pages to oldest to clean and dirty in the live page list*/
sl@0
   520
#endif
sl@0
   521
	TUint iNumberOfFreePages;
sl@0
   522
	TUint iNumberOfDirtyPages;	/**< The total number of dirty pages in the paging cache. Protected by MmuLock */
sl@0
   523
	TUint iInitMinimumPageCount;/**< Initial value for iMinimumPageCount */
sl@0
   524
	TUint iInitMaximumPageCount;/**< Initial value for iMaximumPageCount  */
sl@0
   525
	TUint iReservePageCount;	/**< Number of pages reserved for locking */
sl@0
   526
	TUint iMinimumPageLimit;	/**< Minimum size for iMinimumPageCount, not including locked pages.
sl@0
   527
								     iMinimumPageCount >= iMinimumPageLimit + iReservePageCount */
sl@0
   528
	SVMEventInfo iEventInfo;
sl@0
   529
sl@0
   530
#ifdef __DEMAND_PAGING_BENCHMARKS__
sl@0
   531
public:
sl@0
   532
	void RecordBenchmarkData(TPagingBenchmark aBm, TUint32 aStartTime, TUint32 aEndTime);
sl@0
   533
	void ResetBenchmarkData(TPagingBenchmark aBm);
sl@0
   534
	SPagingBenchmarkInfo iBenchmarkInfo[EMaxPagingBm];
sl@0
   535
#endif //__DEMAND_PAGING_BENCHMARKS__
sl@0
   536
	};
sl@0
   537
sl@0
   538
extern DPager ThePager;
sl@0
   539
sl@0
   540
sl@0
   541
#ifdef __DEMAND_PAGING_BENCHMARKS__
sl@0
   542
sl@0
   543
#define START_PAGING_BENCHMARK TUint32 _bmStart = NKern::FastCounter()
sl@0
   544
#define END_PAGING_BENCHMARK(bm) ThePager.RecordBenchmarkData(bm, _bmStart, NKern::FastCounter())
sl@0
   545
sl@0
   546
#else
sl@0
   547
sl@0
   548
#define START_PAGING_BENCHMARK
sl@0
   549
#define END_PAGING_BENCHMARK(bm)
sl@0
   550
#endif // __DEMAND_PAGING_BENCHMARKS__
sl@0
   551
sl@0
   552
sl@0
   553
FORCE_INLINE void DPager::Event(TEventSimple aEvent, SPageInfo* aPageInfo)
sl@0
   554
	{
sl@0
   555
	switch(aEvent)
sl@0
   556
		{
sl@0
   557
	case EEventPageInNew:
sl@0
   558
		TRACEP(("DP: %O PageIn 0x%08x",TheCurrentThread,aPageInfo->PhysAddr()));
sl@0
   559
		#ifdef BTRACE_PAGING
sl@0
   560
			BTraceContext12(BTrace::EPaging,BTrace::EPagingPageIn,aPageInfo->PhysAddr(),aPageInfo->Owner(),aPageInfo->Index());
sl@0
   561
		#endif
sl@0
   562
		++iEventInfo.iPageInReadCount;
sl@0
   563
		break;
sl@0
   564
sl@0
   565
	case EEventPageInAgain:
sl@0
   566
		TRACEP(("DP: %O PageIn (again) 0x%08x",TheCurrentThread,aPageInfo->PhysAddr()));
sl@0
   567
		#ifdef BTRACE_PAGING
sl@0
   568
			BTraceContext4(BTrace::EPaging,BTrace::EPagingMapPage,aPageInfo->PhysAddr());
sl@0
   569
		#endif
sl@0
   570
		break;
sl@0
   571
sl@0
   572
	case EEventPageInUnneeded:
sl@0
   573
		TRACEP(("DP: %O PageIn (unneeded) 0x%08x",TheCurrentThread,aPageInfo->PhysAddr()));
sl@0
   574
		#ifdef BTRACE_PAGING
sl@0
   575
			BTraceContext0(BTrace::EPaging,BTrace::EPagingPageInUnneeded);
sl@0
   576
		#endif
sl@0
   577
		break;
sl@0
   578
sl@0
   579
	case EEventPageInFree:
sl@0
   580
		TRACEP(("DP: %O PageInFree 0x%08x",TheCurrentThread,aPageInfo->PhysAddr()));
sl@0
   581
		#ifdef BTRACE_PAGING
sl@0
   582
			BTraceContext4(BTrace::EPaging,BTrace::EPagingPageInFree,aPageInfo->PhysAddr());
sl@0
   583
		#endif
sl@0
   584
		break;
sl@0
   585
sl@0
   586
	case EEventPageOut:
sl@0
   587
		TRACEP(("DP: %O PageOut 0x%08x",TheCurrentThread,aPageInfo->PhysAddr()));
sl@0
   588
		#ifdef BTRACE_PAGING
sl@0
   589
			BTraceContext4(BTrace::EPaging,BTrace::EPagingPageOut,aPageInfo->PhysAddr());
sl@0
   590
		#endif
sl@0
   591
		break;
sl@0
   592
sl@0
   593
	case EEventPageAged:
sl@0
   594
		TRACEP(("DP: %O Aged 0x%08x",TheCurrentThread,aPageInfo->PhysAddr()));
sl@0
   595
		#ifdef BTRACE_PAGING_VERBOSE
sl@0
   596
			BTraceContext4(BTrace::EPaging,BTrace::EPagingAged,aPageInfo->PhysAddr());
sl@0
   597
		#endif
sl@0
   598
		break;
sl@0
   599
sl@0
   600
	case EEventPagePin:
sl@0
   601
		TRACEP(("DP: %O Pin 0x%08x count=%d",TheCurrentThread,aPageInfo->PhysAddr(),aPageInfo->PinCount()));
sl@0
   602
		#ifdef BTRACE_PAGING
sl@0
   603
			BTraceContext8(BTrace::EPaging,BTrace::EPagingPageLock,aPageInfo->PhysAddr(),aPageInfo->PinCount());
sl@0
   604
		#endif
sl@0
   605
		break;
sl@0
   606
sl@0
   607
	case EEventPageUnpin:
sl@0
   608
		TRACEP(("DP: %O Unpin 0x%08x count=%d",TheCurrentThread,aPageInfo->PhysAddr(),aPageInfo->PinCount()));
sl@0
   609
		#ifdef BTRACE_PAGING
sl@0
   610
			BTraceContext8(BTrace::EPaging,BTrace::EPagingPageUnlock,aPageInfo->PhysAddr(),aPageInfo->PinCount());
sl@0
   611
		#endif
sl@0
   612
		break;
sl@0
   613
sl@0
   614
	case EEventPageDonate:
sl@0
   615
		TRACEP(("DP: %O Donate 0x%08x",TheCurrentThread,aPageInfo->PhysAddr()));
sl@0
   616
		#ifdef BTRACE_PAGING
sl@0
   617
			BTraceContext12(BTrace::EPaging,BTrace::EPagingDonatePage,aPageInfo->PhysAddr(),aPageInfo->Owner(),aPageInfo->Index());
sl@0
   618
		#endif
sl@0
   619
		break;
sl@0
   620
sl@0
   621
	case EEventPageReclaim:
sl@0
   622
		TRACEP(("DP: %O Reclaim 0x%08x",TheCurrentThread,aPageInfo->PhysAddr()));
sl@0
   623
		#ifdef BTRACE_PAGING
sl@0
   624
			BTraceContext4(BTrace::EPaging,BTrace::EPagingReclaimPage,aPageInfo->PhysAddr());
sl@0
   625
		#endif
sl@0
   626
		break;
sl@0
   627
sl@0
   628
	case EEventPageAgedClean:
sl@0
   629
		TRACEP(("DP: %O AgedClean 0x%08x",TheCurrentThread,aPageInfo->PhysAddr()));
sl@0
   630
		#ifdef BTRACE_PAGING_VERBOSE
sl@0
   631
			BTraceContext4(BTrace::EPaging,BTrace::EPagingAgedClean,aPageInfo->PhysAddr());
sl@0
   632
		#endif
sl@0
   633
		break;
sl@0
   634
sl@0
   635
	case EEventPageAgedDirty:
sl@0
   636
		TRACEP(("DP: %O AgedDirty 0x%08x",TheCurrentThread,aPageInfo->PhysAddr()));
sl@0
   637
		#ifdef BTRACE_PAGING_VERBOSE
sl@0
   638
			BTraceContext4(BTrace::EPaging,BTrace::EPagingAgedDirty,aPageInfo->PhysAddr());
sl@0
   639
		#endif
sl@0
   640
		break;
sl@0
   641
sl@0
   642
	case EEventPagePageTableAlloc:
sl@0
   643
		TRACEP(("DP: %O PageTableAlloc 0x%08x",TheCurrentThread,aPageInfo->PhysAddr()));
sl@0
   644
		#ifdef BTRACE_PAGING
sl@0
   645
			BTraceContext4(BTrace::EPaging,BTrace::EPagingPageTableAlloc,aPageInfo->PhysAddr());
sl@0
   646
		#endif
sl@0
   647
		break;
sl@0
   648
sl@0
   649
	default:
sl@0
   650
		__NK_ASSERT_DEBUG(0);
sl@0
   651
		break;
sl@0
   652
		}
sl@0
   653
	}
sl@0
   654
sl@0
   655
sl@0
   656
sl@0
   657
FORCE_INLINE void DPager::Event(TEventWithAddresses aEvent, SPageInfo* aPageInfo, TLinAddr aPc, TLinAddr aFaultAddress, TUint aAccessPermissions)
sl@0
   658
	{
sl@0
   659
	switch(aEvent)
sl@0
   660
		{
sl@0
   661
	case EEventPageInStart:
sl@0
   662
		TRACEP(("DP: %O HandlePageFault 0x%08x 0x%08x %d",TheCurrentThread,aFaultAddress,aPc,aAccessPermissions));
sl@0
   663
		#ifdef BTRACE_PAGING
sl@0
   664
			BTraceContext12(BTrace::EPaging,BTrace::EPagingPageInBegin,aFaultAddress,aPc,aAccessPermissions);
sl@0
   665
		#endif
sl@0
   666
		++iEventInfo.iPageFaultCount;
sl@0
   667
		break;
sl@0
   668
sl@0
   669
	case EEventPageRejuvenate:
sl@0
   670
		TRACEP(("DP: %O Rejuvenate 0x%08x 0x%08x 0x%08x %d",TheCurrentThread,aPageInfo->PhysAddr(),aFaultAddress,aPc,aAccessPermissions));
sl@0
   671
		#ifdef BTRACE_PAGING
sl@0
   672
			BTraceContext12(BTrace::EPaging,BTrace::EPagingRejuvenate,aPageInfo->PhysAddr(),aFaultAddress,aPc);
sl@0
   673
		#endif
sl@0
   674
		++iEventInfo.iPageFaultCount;
sl@0
   675
		break;
sl@0
   676
sl@0
   677
	default:
sl@0
   678
		__NK_ASSERT_DEBUG(0);
sl@0
   679
		break;
sl@0
   680
		}
sl@0
   681
	}
sl@0
   682
sl@0
   683
sl@0
   684
sl@0
   685
/**
sl@0
   686
Multiplier for number of request objects in pool per drive that supports paging.
sl@0
   687
*/
sl@0
   688
const TInt KPagingRequestsPerDevice = 2;
sl@0
   689
sl@0
   690
sl@0
   691
class DPagingRequest;
sl@0
   692
class DPageReadRequest;
sl@0
   693
class DPageWriteRequest;
sl@0
   694
sl@0
   695
/**
sl@0
   696
A pool of paging requests for use by a single paging device.
sl@0
   697
*/
sl@0
   698
class DPagingRequestPool : public DBase
sl@0
   699
	{
sl@0
   700
public:
sl@0
   701
	DPagingRequestPool(TUint aNumPageReadRequest,TUint aNumPageWriteRequest);
sl@0
   702
	DPageReadRequest* AcquirePageReadRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
sl@0
   703
	DPageWriteRequest* AcquirePageWriteRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
sl@0
   704
private:
sl@0
   705
	~DPagingRequestPool();
sl@0
   706
private:
sl@0
   707
	class TGroup
sl@0
   708
		{
sl@0
   709
	public:
sl@0
   710
		TGroup(TUint aNumRequests);
sl@0
   711
		DPagingRequest* FindCollision(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
sl@0
   712
		DPagingRequest* GetRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
sl@0
   713
		void Signal(DPagingRequest* aRequest);
sl@0
   714
	public:
sl@0
   715
		TUint iNumRequests;
sl@0
   716
		DPagingRequest** iRequests;
sl@0
   717
		SDblQue iFreeList;
sl@0
   718
		};
sl@0
   719
	TGroup iPageReadRequests;
sl@0
   720
	TGroup iPageWriteRequests;
sl@0
   721
sl@0
   722
	friend class DPagingRequest;
sl@0
   723
	friend class DPageReadRequest;
sl@0
   724
	friend class DPageWriteRequest;
sl@0
   725
	};
sl@0
   726
sl@0
   727
sl@0
   728
/**
sl@0
   729
Resources needed to service a paging request.
sl@0
   730
*/
sl@0
   731
class DPagingRequest : public SDblQueLink
sl@0
   732
	{
sl@0
   733
public:
sl@0
   734
	DPagingRequest(DPagingRequestPool::TGroup& aPoolGroup);
sl@0
   735
	void Release();
sl@0
   736
	void Wait();
sl@0
   737
	void Signal();
sl@0
   738
	void SetUse(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
sl@0
   739
	TBool CheckUse(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
sl@0
   740
	TBool IsCollision(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
sl@0
   741
	TLinAddr MapPages(TUint aColour, TUint aCount, TPhysAddr* aPages);
sl@0
   742
	void UnmapPages(TBool aIMBRequired);
sl@0
   743
public:
sl@0
   744
	TThreadMessage	iMessage;	/**< Used by the media driver to queue requests */
sl@0
   745
	DMutex*			iMutex;		/**< A mutex for synchronisation and priority inheritance. */
sl@0
   746
	TInt			iUsageCount;/**< How many threads are using or waiting for this object. */
sl@0
   747
	TLinAddr		iBuffer;	/**< A buffer to read compressed data into. Size is EMaxPages+1 pages.*/
sl@0
   748
protected:
sl@0
   749
	Mmu::TTempMapping	iTempMapping;
sl@0
   750
private:
sl@0
   751
	DPagingRequestPool::TGroup& iPoolGroup;
sl@0
   752
	// used to identify memory request is used for...
sl@0
   753
	DMemoryObject*	iUseRegionMemory;
sl@0
   754
	TUint			iUseRegionIndex;
sl@0
   755
	TUint			iUseRegionCount;
sl@0
   756
	};
sl@0
   757
sl@0
   758
sl@0
   759
/**
sl@0
   760
Resources needed to service a page in request.
sl@0
   761
*/
sl@0
   762
class DPageReadRequest : public DPagingRequest
sl@0
   763
	{
sl@0
   764
public:
sl@0
   765
	inline DPageReadRequest(DPagingRequestPool::TGroup& aPoolGroup)
sl@0
   766
		: DPagingRequest(aPoolGroup)
sl@0
   767
		{}
sl@0
   768
	TInt Construct();
sl@0
   769
	enum
sl@0
   770
		{
sl@0
   771
		EMaxPages = 4
sl@0
   772
		};
sl@0
   773
	static TUint ReservedPagesRequired();
sl@0
   774
private:
sl@0
   775
	~DPageReadRequest(); // can't delete
sl@0
   776
public:
sl@0
   777
	TLinAddr		iBuffer;	/**< A buffer to read compressed data into. Size is EMaxPages+1 pages.*/
sl@0
   778
private:
sl@0
   779
	DMemoryObject*	iMemory;
sl@0
   780
private:
sl@0
   781
	static TInt iAllocNext;
sl@0
   782
	};
sl@0
   783
sl@0
   784
sl@0
   785
FORCE_INLINE TUint DPageReadRequest::ReservedPagesRequired()
sl@0
   786
	{
sl@0
   787
	return iAllocNext*EMaxPages;
sl@0
   788
	}
sl@0
   789
sl@0
   790
sl@0
   791
/**
sl@0
   792
Resources needed to service a page out request.
sl@0
   793
*/
sl@0
   794
class DPageWriteRequest : public DPagingRequest
sl@0
   795
	{
sl@0
   796
public:
sl@0
   797
	inline DPageWriteRequest(DPagingRequestPool::TGroup& aPoolGroup)
sl@0
   798
		: DPagingRequest(aPoolGroup)
sl@0
   799
		{}
sl@0
   800
	TInt Construct();
sl@0
   801
	enum
sl@0
   802
		{
sl@0
   803
		EMaxPages = 1
sl@0
   804
		};
sl@0
   805
private:
sl@0
   806
	~DPageWriteRequest(); // can't delete
sl@0
   807
private:
sl@0
   808
	static TInt iAllocNext;
sl@0
   809
	};
sl@0
   810
sl@0
   811
sl@0
   812
#endif