os/kernelhwsrv/kernel/eka/memmodel/epoc/flexible/mmu/mmu.h
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
sl@0
     1
// Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
//
sl@0
    15
sl@0
    16
/**
sl@0
    17
 @file
sl@0
    18
 @internalComponent
sl@0
    19
*/
sl@0
    20
sl@0
    21
#ifndef __MMU_H__
sl@0
    22
#define __MMU_H__
sl@0
    23
sl@0
    24
#define _USE_OLDEST_LISTS
sl@0
    25
sl@0
    26
#include "mm.h"
sl@0
    27
#include "mmboot.h"
sl@0
    28
#include <mmtypes.h>
sl@0
    29
#include <kern_priv.h>
sl@0
    30
sl@0
    31
sl@0
    32
class DCoarseMemory;
sl@0
    33
class DMemoryObject;
sl@0
    34
class DMemoryMapping;
sl@0
    35
sl@0
    36
/**
sl@0
    37
A page information structure giving the current use and state for a
sl@0
    38
RAM page being managed by the kernel.
sl@0
    39
sl@0
    40
Any modification to the contents of any SPageInfo structure requires the
sl@0
    41
#MmuLock to be held. The exceptions to this is when a page is unused (#Type()==#EUnused),
sl@0
    42
in this case only the #RamAllocLock is required to use #SetAllocated(), #SetUncached(),
sl@0
    43
and #CacheInvalidateCounter().
sl@0
    44
sl@0
    45
These structures are stored in an array at the virtual address #KPageInfoLinearBase
sl@0
    46
which is indexed by the physical address of the page they are associated with, divided
sl@0
    47
by #KPageSize. The memory for this array is allocated by the bootstrap and it has
sl@0
    48
unallocated regions where no memory is required to store SPageInfo structures.
sl@0
    49
These unallocated memory regions are indicated by zeros in the bitmap stored at
sl@0
    50
#KPageInfoMap.
sl@0
    51
*/
sl@0
    52
struct SPageInfo
sl@0
    53
	{
sl@0
    54
	/**
sl@0
    55
	Enumeration for the usage of a RAM page. This is stored in #iType.
sl@0
    56
	*/
sl@0
    57
	enum TType
sl@0
    58
		{
sl@0
    59
		/**
sl@0
    60
		No physical RAM exists for this page.
sl@0
    61
sl@0
    62
		This represents memory which doesn't exist or is not part of the physical
sl@0
    63
		address range being managed by the kernel.
sl@0
    64
		*/
sl@0
    65
		EInvalid,
sl@0
    66
sl@0
    67
		/**
sl@0
    68
		RAM fixed at boot time.
sl@0
    69
sl@0
    70
		This is for memory which was allocated by the bootstrap and which
sl@0
    71
		the kernel does not actively manage.
sl@0
    72
		*/
sl@0
    73
		EFixed,
sl@0
    74
sl@0
    75
		/**
sl@0
    76
		Page is unused.
sl@0
    77
sl@0
    78
		The page is either free memory in Mmu::iRamPageAllocator or the demand
sl@0
    79
		paging 'live' list.
sl@0
    80
sl@0
    81
		To change from or to this type the #RamAllocLock must be held.
sl@0
    82
		*/
sl@0
    83
		EUnused,
sl@0
    84
sl@0
    85
		/**
sl@0
    86
		Page is in an indeterminate state.
sl@0
    87
sl@0
    88
		A page is placed into this state by Mmu::PagesAllocated when it is
sl@0
    89
		allocated (ceases to be #EUnused). Once the page
sl@0
    90
		*/
sl@0
    91
		EUnknown,
sl@0
    92
sl@0
    93
		/**
sl@0
    94
		Page was allocated with Mmu::AllocPhysicalRam, Mmu::ClaimPhysicalRam
sl@0
    95
		or is part of a reserved RAM bank set at system boot.
sl@0
    96
		*/
sl@0
    97
		EPhysAlloc,
sl@0
    98
sl@0
    99
		/**
sl@0
   100
		Page is owned by a memory object.
sl@0
   101
sl@0
   102
		#iOwner will point to the owning memory object and #iIndex will
sl@0
   103
		be the page index into its memory for this page.
sl@0
   104
		*/
sl@0
   105
		EManaged,
sl@0
   106
sl@0
   107
		/**
sl@0
   108
		Page is being used as a shadow page.
sl@0
   109
sl@0
   110
		@see DShadowPage.
sl@0
   111
		*/
sl@0
   112
		EShadow
sl@0
   113
		};
sl@0
   114
sl@0
   115
sl@0
   116
	/**
sl@0
   117
	Flags stored in #iFlags.
sl@0
   118
sl@0
   119
	The least significant bits of these flags are used for the #TMemoryAttributes
sl@0
   120
	value for the page.
sl@0
   121
	*/
sl@0
   122
	enum TFlags
sl@0
   123
		{
sl@0
   124
		// lower bits hold TMemoryAttribute value for this page
sl@0
   125
sl@0
   126
		/**
sl@0
   127
		Flag set to indicate that the page has writable mappings.
sl@0
   128
		(This is to facilitate demand paged memory.)
sl@0
   129
		*/
sl@0
   130
		EWritable			= 1<<(EMemoryAttributeShift),
sl@0
   131
sl@0
   132
		/**
sl@0
   133
		Flag set to indicate that the memory page contents may be different
sl@0
   134
		to those previously saved to backing store (contents are 'dirty').
sl@0
   135
		This is set whenever a page gains a writeable mapping and only every
sl@0
   136
		cleared once a demand paging memory manager 'cleans' the page.
sl@0
   137
		*/
sl@0
   138
		EDirty				= 1<<(EMemoryAttributeShift+1)
sl@0
   139
		};
sl@0
   140
sl@0
   141
sl@0
   142
	/**
sl@0
   143
	State for the page when being used to contain demand paged content.
sl@0
   144
	*/
sl@0
   145
	enum TPagedState
sl@0
   146
		{
sl@0
   147
		/**
sl@0
   148
		Page is not being managed for demand paging purposes, is has been transiently
sl@0
   149
		removed from the demand paging live list.
sl@0
   150
		*/
sl@0
   151
		EUnpaged 			= 0x0,
sl@0
   152
sl@0
   153
		/**
sl@0
   154
		Page is in the live list as a young page.
sl@0
   155
		*/
sl@0
   156
		EPagedYoung 		= 0x1,
sl@0
   157
sl@0
   158
		/**
sl@0
   159
		Page is in the live list as an old page.
sl@0
   160
		*/
sl@0
   161
		EPagedOld 			= 0x2,
sl@0
   162
sl@0
   163
		/**
sl@0
   164
		Page was pinned but it has been moved but not yet freed.
sl@0
   165
		*/
sl@0
   166
		EPagedPinnedMoved	= 0x3,
sl@0
   167
sl@0
   168
		/**
sl@0
   169
		Page has been removed from live list to prevent contents being paged-out.
sl@0
   170
		*/
sl@0
   171
		// NOTE - This must be the same value as EStatePagedLocked as defined in mmubase.h
sl@0
   172
		EPagedPinned 		= 0x4,
sl@0
   173
sl@0
   174
#ifdef _USE_OLDEST_LISTS
sl@0
   175
		/**
sl@0
   176
		Page is in the live list as one of oldest pages that is clean.
sl@0
   177
		*/
sl@0
   178
		EPagedOldestClean	= 0x5,
sl@0
   179
sl@0
   180
		/**
sl@0
   181
		Page is in the live list as one of oldest pages that is dirty.
sl@0
   182
		*/
sl@0
   183
		EPagedOldestDirty 	= 0x6
sl@0
   184
#endif
sl@0
   185
		};
sl@0
   186
sl@0
   187
sl@0
   188
	/**
sl@0
   189
	Additional flags, stored in #iFlags2.
sl@0
   190
	*/
sl@0
   191
	enum TFlags2
sl@0
   192
		{
sl@0
   193
		/**
sl@0
   194
		When #iPagedState==#EPagedPinned this indicates the page is a 'reserved' page
sl@0
   195
		and is does not increase free page count when returned to the live list.
sl@0
   196
		*/
sl@0
   197
		EPinnedReserve	= 1<<0,
sl@0
   198
		};
sl@0
   199
sl@0
   200
private:
sl@0
   201
	/**
sl@0
   202
	Value from enum #TType, returned by #Type().
sl@0
   203
	*/
sl@0
   204
	TUint8 iType;
sl@0
   205
sl@0
   206
	/**
sl@0
   207
	Bitmask of values from #TFlags, returned by #Flags().
sl@0
   208
	*/
sl@0
   209
	TUint8 iFlags;
sl@0
   210
sl@0
   211
	/**
sl@0
   212
	Value from enum #TPagedState, returned by #PagedState().
sl@0
   213
	*/
sl@0
   214
	TUint8 iPagedState;
sl@0
   215
sl@0
   216
	/**
sl@0
   217
	Bitmask of values from #TFlags2.
sl@0
   218
	*/
sl@0
   219
	TUint8 iFlags2;
sl@0
   220
sl@0
   221
	union
sl@0
   222
		{
sl@0
   223
		/**
sl@0
   224
		The memory object which owns this page.
sl@0
   225
		Used for always set for #EManaged pages and can be set for #PhysAlloc pages.
sl@0
   226
		*/
sl@0
   227
		DMemoryObject* iOwner;
sl@0
   228
sl@0
   229
		/**
sl@0
   230
		A pointer to the SPageInfo of the page that is being shadowed.
sl@0
   231
		For use with #EShadow pages only.
sl@0
   232
		*/
sl@0
   233
		SPageInfo* iOriginalPageInfo;
sl@0
   234
		};
sl@0
   235
sl@0
   236
	/**
sl@0
   237
	The index for this page within within the owning object's (#iOwner) memory.
sl@0
   238
	*/
sl@0
   239
	TUint32 iIndex;
sl@0
   240
sl@0
   241
	/**
sl@0
   242
	Pointer identifying the current modifier of the page. See #SetModifier.
sl@0
   243
	*/
sl@0
   244
	TAny* iModifier;
sl@0
   245
sl@0
   246
	/**
sl@0
   247
	Storage location for data specific to the memory manager object handling this page.
sl@0
   248
	See #SetPagingManagerData.
sl@0
   249
	*/
sl@0
   250
	TUint32 iPagingManagerData;
sl@0
   251
sl@0
   252
	/**
sl@0
   253
	Union of values which vary depending of the current value of #iType.
sl@0
   254
	*/
sl@0
   255
	union
sl@0
   256
		{
sl@0
   257
		/**
sl@0
   258
		When #iType==#EPhysAlloc, this stores a count of the number of memory objects
sl@0
   259
		this page has been added to.
sl@0
   260
		*/
sl@0
   261
		TUint32 iUseCount;
sl@0
   262
sl@0
   263
		/**
sl@0
   264
		When #iType==#EUnused, this stores the value of Mmu::iCacheInvalidateCounter
sl@0
   265
		at the time the page was freed. This is used for some cache maintenance optimisations.
sl@0
   266
		*/
sl@0
   267
		TUint32 iCacheInvalidateCounter;
sl@0
   268
sl@0
   269
		/**
sl@0
   270
		When #iType==#EManaged, this holds the count of the number of times the page was pinned.
sl@0
   271
		This will only be non-zero for demand paged memory.
sl@0
   272
		*/
sl@0
   273
		TUint32 iPinCount;
sl@0
   274
		};
sl@0
   275
sl@0
   276
public:
sl@0
   277
	/**
sl@0
   278
	Used for placing page into linked lists. E.g. the various demand paging live lists.
sl@0
   279
	*/
sl@0
   280
	SDblQueLink iLink;
sl@0
   281
sl@0
   282
public:
sl@0
   283
	/**
sl@0
   284
	Return the SPageInfo for a given page of physical RAM.
sl@0
   285
	*/
sl@0
   286
	static SPageInfo* FromPhysAddr(TPhysAddr aAddress);
sl@0
   287
sl@0
   288
	/**
sl@0
   289
	Return physical address of the RAM page which this SPageInfo object is associated.
sl@0
   290
	If the address has no SPageInfo, then a null pointer is returned.
sl@0
   291
	*/
sl@0
   292
	static SPageInfo* SafeFromPhysAddr(TPhysAddr aAddress);
sl@0
   293
sl@0
   294
	/**
sl@0
   295
	Return physical address of the RAM page which this SPageInfo object is associated.
sl@0
   296
	*/
sl@0
   297
	FORCE_INLINE TPhysAddr PhysAddr();
sl@0
   298
sl@0
   299
	/**
sl@0
   300
	Return a SPageInfo by conversion from the address of its embedded link member #iLink.
sl@0
   301
	*/
sl@0
   302
	FORCE_INLINE static SPageInfo* FromLink(SDblQueLink* aLink)
sl@0
   303
		{
sl@0
   304
		return (SPageInfo*)((TInt)aLink-_FOFF(SPageInfo,iLink));
sl@0
   305
		}
sl@0
   306
sl@0
   307
	//
sl@0
   308
	// Getters...
sl@0
   309
	//
sl@0
   310
sl@0
   311
	/**
sl@0
   312
	Return the current #TType value stored in #iType.
sl@0
   313
	@pre #MmuLock held.
sl@0
   314
	*/
sl@0
   315
	FORCE_INLINE TType Type()
sl@0
   316
		{
sl@0
   317
		CheckAccess("Type");
sl@0
   318
		return (TType)iType;
sl@0
   319
		}
sl@0
   320
sl@0
   321
	/**
sl@0
   322
	Return the current value of #iFlags.
sl@0
   323
	@pre #MmuLock held (if \a aNoCheck false).
sl@0
   324
	*/
sl@0
   325
	FORCE_INLINE TUint Flags(TBool aNoCheck=false)
sl@0
   326
		{
sl@0
   327
		if(!aNoCheck)
sl@0
   328
			CheckAccess("Flags");
sl@0
   329
		return iFlags;
sl@0
   330
		}
sl@0
   331
sl@0
   332
	/**
sl@0
   333
	Return the current value of #iPagedState.
sl@0
   334
	@pre #MmuLock held.
sl@0
   335
	*/
sl@0
   336
	FORCE_INLINE TPagedState PagedState()
sl@0
   337
		{
sl@0
   338
		CheckAccess("PagedState");
sl@0
   339
		return (TPagedState)iPagedState;
sl@0
   340
		}
sl@0
   341
sl@0
   342
	/**
sl@0
   343
	Return the current value of #iOwner.
sl@0
   344
	@pre #MmuLock held.
sl@0
   345
	*/
sl@0
   346
	FORCE_INLINE DMemoryObject* Owner()
sl@0
   347
		{
sl@0
   348
		CheckAccess("Owner");
sl@0
   349
		return iOwner;
sl@0
   350
		}
sl@0
   351
sl@0
   352
	/**
sl@0
   353
	Return the current value of #iIndex.
sl@0
   354
	@pre #MmuLock held (if \a aNoCheck false).
sl@0
   355
	*/
sl@0
   356
	FORCE_INLINE TUint32 Index(TBool aNoCheck=false)
sl@0
   357
		{
sl@0
   358
		if(!aNoCheck)
sl@0
   359
			CheckAccess("Index");
sl@0
   360
		return iIndex;
sl@0
   361
		}
sl@0
   362
sl@0
   363
	/**
sl@0
   364
	Return the current value of #iModifier.
sl@0
   365
	@pre #MmuLock held (if \a aNoCheck false).
sl@0
   366
	*/
sl@0
   367
	FORCE_INLINE TAny* Modifier()
sl@0
   368
		{
sl@0
   369
		CheckAccess("Modifier");
sl@0
   370
		return iModifier;
sl@0
   371
		}
sl@0
   372
sl@0
   373
sl@0
   374
	//
sl@0
   375
	// Setters..
sl@0
   376
	//
sl@0
   377
sl@0
   378
	/**
sl@0
   379
	Set this page as type #EFixed.
sl@0
   380
	This is only used during boot by Mmu::Init2Common.
sl@0
   381
	*/
sl@0
   382
	inline void SetFixed(TUint32 aIndex=0)
sl@0
   383
		{
sl@0
   384
		CheckAccess("SetFixed");
sl@0
   385
		Set(EFixed,0,aIndex);
sl@0
   386
		}
sl@0
   387
sl@0
   388
	/**
sl@0
   389
	Set this page as type #EUnused.
sl@0
   390
sl@0
   391
	@pre #MmuLock held.
sl@0
   392
	@pre #RamAllocLock held if previous page type != #EUnknown.
sl@0
   393
sl@0
   394
	@post #iModifier==0 to indicate that page usage has changed.
sl@0
   395
	*/
sl@0
   396
	inline void SetUnused()
sl@0
   397
		{
sl@0
   398
		CheckAccess("SetUnused",ECheckNotUnused|((iType!=EUnknown)?(TInt)ECheckRamAllocLock:0));
sl@0
   399
		iType = EUnused;
sl@0
   400
		iModifier = 0;
sl@0
   401
		// do not modify iFlags or iIndex in this function because page allocating cache cleaning operations rely on using this value
sl@0
   402
		}
sl@0
   403
sl@0
   404
	/**
sl@0
   405
	Set this page as type #EUnknown.
sl@0
   406
	This is only used by Mmu::PagesAllocated.
sl@0
   407
sl@0
   408
	@pre #RamAllocLock held.
sl@0
   409
sl@0
   410
	@post #iModifier==0 to indicate that page usage has changed.
sl@0
   411
	*/
sl@0
   412
	inline void SetAllocated()
sl@0
   413
		{
sl@0
   414
		CheckAccess("SetAllocated",ECheckUnused|ECheckRamAllocLock|ENoCheckMmuLock);
sl@0
   415
		iType = EUnknown;
sl@0
   416
		iModifier = 0;
sl@0
   417
		// do not modify iFlags or iIndex in this function because cache cleaning operations rely on using this value
sl@0
   418
		}
sl@0
   419
sl@0
   420
	/**
sl@0
   421
	Set this page as type #EPhysAlloc.
sl@0
   422
	@param aOwner	 Optional value for #iOwner.
sl@0
   423
	@param aIndex	 Optional value for #iIndex.
sl@0
   424
sl@0
   425
	@pre #MmuLock held.
sl@0
   426
sl@0
   427
	@post #iModifier==0 to indicate that page usage has changed.
sl@0
   428
	*/
sl@0
   429
	inline void SetPhysAlloc(DMemoryObject* aOwner=0, TUint32 aIndex=0)
sl@0
   430
		{
sl@0
   431
		CheckAccess("SetPhysAlloc");
sl@0
   432
		Set(EPhysAlloc,aOwner,aIndex);
sl@0
   433
		iUseCount = 0;
sl@0
   434
		}
sl@0
   435
sl@0
   436
	/**
sl@0
   437
	Set this page as type #EManaged.
sl@0
   438
sl@0
   439
	@param aOwner	Value for #iOwner.
sl@0
   440
	@param aIndex 	Value for #iIndex.
sl@0
   441
	@param aFlags 	Value for #iFlags (aOwner->PageInfoFlags()).
sl@0
   442
sl@0
   443
	@pre #MmuLock held.
sl@0
   444
sl@0
   445
	@post #iModifier==0 to indicate that page usage has changed.
sl@0
   446
	*/
sl@0
   447
	inline void SetManaged(DMemoryObject* aOwner, TUint32 aIndex, TUint8 aFlags)
sl@0
   448
		{
sl@0
   449
		CheckAccess("SetManaged");
sl@0
   450
		Set(EManaged,aOwner,aIndex);
sl@0
   451
		iFlags = aFlags;
sl@0
   452
		iPinCount = 0;
sl@0
   453
		}
sl@0
   454
sl@0
   455
	/**
sl@0
   456
	Set this page as type #EShadow.
sl@0
   457
sl@0
   458
	This is for use by #DShadowPage.
sl@0
   459
sl@0
   460
	@param aIndex 	Value for #iIndex.
sl@0
   461
	@param aFlags 	Value for #iFlags.
sl@0
   462
sl@0
   463
	@pre #MmuLock held.
sl@0
   464
sl@0
   465
	@post #iModifier==0 to indicate that page usage has changed.
sl@0
   466
	*/
sl@0
   467
	inline void SetShadow(TUint32 aIndex, TUint8 aFlags)
sl@0
   468
		{
sl@0
   469
		CheckAccess("SetShadow");
sl@0
   470
		Set(EShadow,0,aIndex);
sl@0
   471
		iFlags = aFlags;
sl@0
   472
		}
sl@0
   473
sl@0
   474
	/**
sl@0
   475
	Store a pointer to the SPageInfo of the page that this page is shadowing.
sl@0
   476
sl@0
   477
	@param aOrigPageInfo	Pointer to the SPageInfo that this page is shadowing
sl@0
   478
sl@0
   479
	@pre #MmuLock held.
sl@0
   480
	*/
sl@0
   481
	inline void SetOriginalPage(SPageInfo* aOrigPageInfo)
sl@0
   482
		{
sl@0
   483
		CheckAccess("SetOriginalPage");
sl@0
   484
		__NK_ASSERT_DEBUG(iType == EShadow);
sl@0
   485
		__NK_ASSERT_DEBUG(!iOriginalPageInfo);
sl@0
   486
		iOriginalPageInfo = aOrigPageInfo;
sl@0
   487
		}
sl@0
   488
sl@0
   489
	/**
sl@0
   490
	Reutrns a pointer to the SPageInfo of the page that this page is shadowing.
sl@0
   491
sl@0
   492
	@return	A pointer to the SPageInfo that this page is shadowing
sl@0
   493
sl@0
   494
	@pre #MmuLock held.
sl@0
   495
	*/
sl@0
   496
	inline SPageInfo* GetOriginalPage()
sl@0
   497
		{
sl@0
   498
		CheckAccess("GetOriginalPage");
sl@0
   499
		__NK_ASSERT_DEBUG(iType == EShadow);
sl@0
   500
		__NK_ASSERT_DEBUG(iOriginalPageInfo);
sl@0
   501
		return iOriginalPageInfo;
sl@0
   502
		}
sl@0
   503
sl@0
   504
sl@0
   505
private:
sl@0
   506
	/** Internal implementation factor for methods which set page type. */
sl@0
   507
	FORCE_INLINE void Set(TType aType, DMemoryObject* aOwner, TUint32 aIndex)
sl@0
   508
		{
sl@0
   509
		CheckAccess("Set",ECheckNotAllocated|ECheckNotPaged);
sl@0
   510
		(TUint32&)iType = aType; // also clears iFlags, iFlags2 and iPagedState
sl@0
   511
		iOwner = aOwner;
sl@0
   512
		iIndex = aIndex;
sl@0
   513
		iModifier = 0;
sl@0
   514
		}
sl@0
   515
sl@0
   516
public:
sl@0
   517
sl@0
   518
sl@0
   519
	//
sl@0
   520
	//
sl@0
   521
	//
sl@0
   522
sl@0
   523
	/**
sl@0
   524
	Set #iFlags to indicate that the contents of this page have been removed from
sl@0
   525
	any caches.
sl@0
   526
sl@0
   527
	@pre #MmuLock held if #iType!=#EUnused, #RamAllocLock held if #iType==#EUnused.
sl@0
   528
	*/
sl@0
   529
	FORCE_INLINE void SetUncached()
sl@0
   530
		{
sl@0
   531
		CheckAccess("SetUncached",iType==EUnused ? ECheckRamAllocLock|ENoCheckMmuLock : 0);
sl@0
   532
		__NK_ASSERT_DEBUG(iType==EUnused || (iType==EPhysAlloc && iUseCount==0));
sl@0
   533
		iFlags = EMemAttNormalUncached;
sl@0
   534
		}
sl@0
   535
sl@0
   536
	/**
sl@0
   537
	Set memory attributes and colour for a page of type #EPhysAlloc.
sl@0
   538
	
sl@0
   539
	This is set the first time a page of type #EPhysAlloc is added to a memory
sl@0
   540
	object with DMemoryManager::AddPages or DMemoryManager::AddContiguous.
sl@0
   541
	The set values are used to check constraints are met if the page is
sl@0
   542
	also added to other memory objects.
sl@0
   543
sl@0
   544
	@param aIndex	The page index within a memory object at which this page
sl@0
   545
					has been added. This is stored in #iIndex and used to determine
sl@0
   546
					the page's 'colour'.
sl@0
   547
	@param aFlags 	Value for #iFlags. This sets the memory attributes for the page.
sl@0
   548
sl@0
   549
	@post #iModifier==0 to indicate that page usage has changed.
sl@0
   550
	*/
sl@0
   551
	inline void SetMapped(TUint32 aIndex, TUint aFlags)
sl@0
   552
		{
sl@0
   553
		CheckAccess("SetMapped");
sl@0
   554
		__NK_ASSERT_DEBUG(iType==EPhysAlloc);
sl@0
   555
		__NK_ASSERT_DEBUG(iUseCount==0); // check page not already added to an object
sl@0
   556
		iIndex = aIndex;
sl@0
   557
		iFlags = aFlags;
sl@0
   558
		iModifier = 0;
sl@0
   559
		}
sl@0
   560
sl@0
   561
	/**
sl@0
   562
	Set #iPagedState
sl@0
   563
sl@0
   564
	@pre #MmuLock held.
sl@0
   565
sl@0
   566
	@post #iModifier==0 to indicate that page state has changed.
sl@0
   567
	*/
sl@0
   568
	FORCE_INLINE void SetPagedState(TPagedState aPagedState)
sl@0
   569
		{
sl@0
   570
		CheckAccess("SetPagedState");
sl@0
   571
		__NK_ASSERT_DEBUG(aPagedState==iPagedState || iPagedState!=EPagedPinned || iPinCount==0); // make sure don't set an unpinned state if iPinCount!=0
sl@0
   572
		iPagedState = aPagedState;
sl@0
   573
		iModifier = 0;
sl@0
   574
		}
sl@0
   575
sl@0
   576
	/**
sl@0
   577
	The the pages #iModifier value.
sl@0
   578
sl@0
   579
	#iModifier is cleared to zero whenever the usage or paging state of the page
sl@0
   580
	changes. So if a thread sets this to a suitable unique value (e.g. the address
sl@0
   581
	of a local variable) then it may perform a long running operation on the page
sl@0
   582
	and later check with #CheckModified that no other thread has changed the page
sl@0
   583
	state or used SetModifier in the intervening time.
sl@0
   584
	Example.
sl@0
   585
sl@0
   586
	@code
sl@0
   587
	TInt anyLocalVariable; // arbitrary local variable
sl@0
   588
sl@0
   589
	MmuLock::Lock();
sl@0
   590
	SPageInfo* thePageInfo = GetAPage();
sl@0
   591
	thePageInfo->SetModifier(&anyLocalVariable); // use &anyLocalVariable as value unique to this thread 
sl@0
   592
	MmuLock::Unlock();
sl@0
   593
sl@0
   594
	DoOperation(thePageInfo);
sl@0
   595
sl@0
   596
	MmuLock::Lock();
sl@0
   597
	TInt r;
sl@0
   598
	if(!thePageInfo->CheckModified(&anyLocalVariable));
sl@0
   599
		{
sl@0
   600
		// nobody else touched the page...
sl@0
   601
		OperationSucceeded(thePageInfo);
sl@0
   602
		r = KErrNone;
sl@0
   603
		}
sl@0
   604
	else
sl@0
   605
		{
sl@0
   606
		// somebody else changed our page...
sl@0
   607
		OperationInterrupted(thePageInfo);
sl@0
   608
		r = KErrAbort;
sl@0
   609
		}
sl@0
   610
	MmuLock::Unlock();
sl@0
   611
sl@0
   612
	return r;
sl@0
   613
	@endcode
sl@0
   614
sl@0
   615
	@pre #MmuLock held.
sl@0
   616
	*/
sl@0
   617
	FORCE_INLINE void SetModifier(TAny* aModifier)
sl@0
   618
		{
sl@0
   619
		CheckAccess("SetModifier");
sl@0
   620
		iModifier = aModifier;
sl@0
   621
		}
sl@0
   622
sl@0
   623
	/**
sl@0
   624
	Return true if the #iModifier value does not match a specified value.
sl@0
   625
sl@0
   626
	@param aModifier	A 'modifier' value previously set with #SetModifier.
sl@0
   627
sl@0
   628
	@pre #MmuLock held.
sl@0
   629
sl@0
   630
	@see SetModifier.
sl@0
   631
	*/
sl@0
   632
	FORCE_INLINE TBool CheckModified(TAny* aModifier)
sl@0
   633
		{
sl@0
   634
		CheckAccess("CheckModified");
sl@0
   635
		return iModifier!=aModifier;
sl@0
   636
		}
sl@0
   637
sl@0
   638
	/**
sl@0
   639
	Flag this page as having Page Table Entries which give writeable access permissions.
sl@0
   640
	This sets flags #EWritable and #EDirty.
sl@0
   641
sl@0
   642
	@pre #MmuLock held.
sl@0
   643
	*/
sl@0
   644
	FORCE_INLINE void SetWritable()
sl@0
   645
		{
sl@0
   646
		CheckAccess("SetWritable");
sl@0
   647
		// This should only be invoked on paged pages.
sl@0
   648
		__NK_ASSERT_DEBUG(PagedState() != EUnpaged);
sl@0
   649
		iFlags |= EWritable;
sl@0
   650
		SetDirty();
sl@0
   651
		}
sl@0
   652
sl@0
   653
	/**
sl@0
   654
	Flag this page as having no longer having any Page Table Entries which give writeable
sl@0
   655
	access permissions.
sl@0
   656
	This clears the flag #EWritable.
sl@0
   657
sl@0
   658
	@pre #MmuLock held.
sl@0
   659
	*/
sl@0
   660
	FORCE_INLINE void SetReadOnly()
sl@0
   661
		{
sl@0
   662
		CheckAccess("SetReadOnly");
sl@0
   663
		iFlags &= ~EWritable;
sl@0
   664
		}
sl@0
   665
sl@0
   666
	/**
sl@0
   667
	Returns true if #SetWritable has been called without a subsequent #SetReadOnly.
sl@0
   668
	This returns the flag #EWritable.
sl@0
   669
sl@0
   670
	@pre #MmuLock held.
sl@0
   671
	*/
sl@0
   672
	FORCE_INLINE TBool IsWritable()
sl@0
   673
		{
sl@0
   674
		CheckAccess("IsWritable");
sl@0
   675
		return iFlags&EWritable;
sl@0
   676
		}
sl@0
   677
sl@0
   678
	/**
sl@0
   679
	Flag this page as 'dirty', indicating that its contents may no longer match those saved
sl@0
   680
	to a backing store. This sets the flag #EWritable.
sl@0
   681
sl@0
   682
	This is used in the management of demand paged memory.
sl@0
   683
sl@0
   684
	@pre #MmuLock held.
sl@0
   685
	*/
sl@0
   686
	FORCE_INLINE void SetDirty()
sl@0
   687
		{
sl@0
   688
		CheckAccess("SetDirty");
sl@0
   689
		iFlags |= EDirty;
sl@0
   690
		}
sl@0
   691
sl@0
   692
	/**
sl@0
   693
	Flag this page as 'clean', indicating that its contents now match those saved
sl@0
   694
	to a backing store. This clears the flag #EWritable.
sl@0
   695
sl@0
   696
	This is used in the management of demand paged memory.
sl@0
   697
sl@0
   698
	@pre #MmuLock held.
sl@0
   699
	*/
sl@0
   700
	FORCE_INLINE void SetClean()
sl@0
   701
		{
sl@0
   702
		CheckAccess("SetClean");
sl@0
   703
		iFlags &= ~EDirty;
sl@0
   704
		}
sl@0
   705
sl@0
   706
	/**
sl@0
   707
	Return the  #EDirty flag. See #SetDirty and #SetClean.
sl@0
   708
sl@0
   709
	This is used in the management of demand paged memory.
sl@0
   710
sl@0
   711
	@pre #MmuLock held.
sl@0
   712
	*/
sl@0
   713
	FORCE_INLINE TBool IsDirty()
sl@0
   714
		{
sl@0
   715
		CheckAccess("IsDirty");
sl@0
   716
		return iFlags&EDirty;
sl@0
   717
		}
sl@0
   718
sl@0
   719
sl@0
   720
	//
sl@0
   721
	// Type specific...
sl@0
   722
	//
sl@0
   723
sl@0
   724
	/**
sl@0
   725
	Set #iCacheInvalidateCounter to the specified value.
sl@0
   726
sl@0
   727
	@pre #MmuLock held.
sl@0
   728
	@pre #iType==#EUnused.
sl@0
   729
	*/
sl@0
   730
	void SetCacheInvalidateCounter(TUint32 aCacheInvalidateCounter)
sl@0
   731
		{
sl@0
   732
		CheckAccess("SetCacheInvalidateCounter");
sl@0
   733
		__NK_ASSERT_DEBUG(iType==EUnused);
sl@0
   734
		iCacheInvalidateCounter = aCacheInvalidateCounter;
sl@0
   735
		}
sl@0
   736
sl@0
   737
	/**
sl@0
   738
	Return #iCacheInvalidateCounter.
sl@0
   739
sl@0
   740
	@pre #MmuLock held.
sl@0
   741
	@pre #iType==#EUnused.
sl@0
   742
	*/
sl@0
   743
	TUint32 CacheInvalidateCounter()
sl@0
   744
		{
sl@0
   745
		CheckAccess("CacheInvalidateCounter",ECheckRamAllocLock|ENoCheckMmuLock);
sl@0
   746
		__NK_ASSERT_DEBUG(iType==EUnused);
sl@0
   747
		return iCacheInvalidateCounter;
sl@0
   748
		}
sl@0
   749
sl@0
   750
	/**
sl@0
   751
	Increment #iUseCount to indicate that the page has been added to a memory object.
sl@0
   752
sl@0
   753
	@return New value of #iUseCount.
sl@0
   754
sl@0
   755
	@pre #MmuLock held.
sl@0
   756
	@pre #iType==#EPhysAlloc.
sl@0
   757
	*/
sl@0
   758
	TUint32 IncUseCount()
sl@0
   759
		{
sl@0
   760
		CheckAccess("IncUseCount");
sl@0
   761
		__NK_ASSERT_DEBUG(iType==EPhysAlloc);
sl@0
   762
		return ++iUseCount;
sl@0
   763
		}
sl@0
   764
sl@0
   765
	/**
sl@0
   766
	Decrement #iUseCount to indicate that the page has been removed from a memory object.
sl@0
   767
sl@0
   768
	@return New value of #iUseCount.
sl@0
   769
sl@0
   770
	@pre #MmuLock held.
sl@0
   771
	@pre #iType==#EPhysAlloc.
sl@0
   772
	*/
sl@0
   773
	TUint32 DecUseCount()
sl@0
   774
		{
sl@0
   775
		CheckAccess("DecUseCount");
sl@0
   776
		__NK_ASSERT_DEBUG(iType==EPhysAlloc);
sl@0
   777
		__NK_ASSERT_DEBUG(iUseCount);
sl@0
   778
		return --iUseCount;
sl@0
   779
		}
sl@0
   780
sl@0
   781
	/**
sl@0
   782
	Return #iUseCount, this indicates the number of times the page has been added to memory object(s).
sl@0
   783
sl@0
   784
	@return #iUseCount.
sl@0
   785
sl@0
   786
	@pre #MmuLock held.
sl@0
   787
	@pre #iType==#EPhysAlloc.
sl@0
   788
	*/
sl@0
   789
	TUint32 UseCount()
sl@0
   790
		{
sl@0
   791
		CheckAccess("UseCount");
sl@0
   792
		__NK_ASSERT_DEBUG(iType==EPhysAlloc);
sl@0
   793
		return iUseCount;
sl@0
   794
		}
sl@0
   795
sl@0
   796
	/**
sl@0
   797
	Increment #iPinCount to indicate that a mapping has pinned this page.
sl@0
   798
	This is only done for demand paged memory; unpaged memory does not have
sl@0
   799
	#iPinCount updated when it is pinned.
sl@0
   800
sl@0
   801
	@return New value of #iPinCount.
sl@0
   802
sl@0
   803
	@pre #MmuLock held.
sl@0
   804
	@pre #iType==#EManaged.
sl@0
   805
	*/
sl@0
   806
	TUint32 IncPinCount()
sl@0
   807
		{
sl@0
   808
		CheckAccess("IncPinCount");
sl@0
   809
		__NK_ASSERT_DEBUG(iType==EManaged);
sl@0
   810
		return ++iPinCount;
sl@0
   811
		}
sl@0
   812
sl@0
   813
	/**
sl@0
   814
	Decrement #iPinCount to indicate that a mapping which was pinning this page has been removed.
sl@0
   815
	This is only done for demand paged memory; unpaged memory does not have
sl@0
   816
	#iPinCount updated when it is unpinned.
sl@0
   817
sl@0
   818
	@return New value of #iPinCount.
sl@0
   819
sl@0
   820
	@pre #MmuLock held.
sl@0
   821
	@pre #iType==#EManaged.
sl@0
   822
	*/
sl@0
   823
	TUint32 DecPinCount()
sl@0
   824
		{
sl@0
   825
		CheckAccess("DecPinCount");
sl@0
   826
		__NK_ASSERT_DEBUG(iType==EManaged);
sl@0
   827
		__NK_ASSERT_DEBUG(iPinCount);
sl@0
   828
		return --iPinCount;
sl@0
   829
		}
sl@0
   830
sl@0
   831
	/**
sl@0
   832
	Clear #iPinCount to zero as this page is no longer being used for the 
sl@0
   833
	pinned page.
sl@0
   834
	This is only done for demand paged memory; unpaged memory does not have
sl@0
   835
	#iPinCount set.
sl@0
   836
sl@0
   837
	@pre #MmuLock held.
sl@0
   838
	@pre #iType==#EManaged.
sl@0
   839
	*/
sl@0
   840
	void ClearPinCount()
sl@0
   841
		{
sl@0
   842
		CheckAccess("ClearPinCount");
sl@0
   843
		__NK_ASSERT_DEBUG(iType==EManaged);
sl@0
   844
		__NK_ASSERT_DEBUG(iPinCount);
sl@0
   845
		iPinCount = 0;
sl@0
   846
		}
sl@0
   847
sl@0
   848
	/**
sl@0
   849
	Return #iPinCount which indicates the number of mappings that have pinned this page.
sl@0
   850
	This is only valid for demand paged memory; unpaged memory does not have
sl@0
   851
	#iPinCount updated when it is pinned.
sl@0
   852
sl@0
   853
	@return #iPinCount.
sl@0
   854
sl@0
   855
	@pre #MmuLock held.
sl@0
   856
	@pre #iType==#EManaged.
sl@0
   857
	*/
sl@0
   858
	TUint32 PinCount()
sl@0
   859
		{
sl@0
   860
		CheckAccess("PinCount");
sl@0
   861
		__NK_ASSERT_DEBUG(iType==EManaged);
sl@0
   862
		return iPinCount;
sl@0
   863
		}
sl@0
   864
sl@0
   865
	/**
sl@0
   866
	Set the #EPinnedReserve flag.
sl@0
   867
	@pre #MmuLock held.
sl@0
   868
	@see EPinnedReserve.
sl@0
   869
	*/
sl@0
   870
	void SetPinnedReserve()
sl@0
   871
		{
sl@0
   872
		CheckAccess("SetPinnedReserve");
sl@0
   873
		iFlags2 |= EPinnedReserve;
sl@0
   874
		}
sl@0
   875
sl@0
   876
	/**
sl@0
   877
	Clear the #EPinnedReserve flag.
sl@0
   878
	@pre #MmuLock held.
sl@0
   879
	@see EPinnedReserve.
sl@0
   880
	*/
sl@0
   881
	TBool ClearPinnedReserve()
sl@0
   882
		{
sl@0
   883
		CheckAccess("ClearPinnedReserve");
sl@0
   884
		TUint oldFlags2 = iFlags2;
sl@0
   885
		iFlags2 = oldFlags2&~EPinnedReserve;
sl@0
   886
		return oldFlags2&EPinnedReserve;
sl@0
   887
		}
sl@0
   888
sl@0
   889
	/**
sl@0
   890
	Set #iPagingManagerData to the specified value.
sl@0
   891
	@pre #MmuLock held.
sl@0
   892
	@pre #iType==#EManaged.
sl@0
   893
	*/
sl@0
   894
	void SetPagingManagerData(TUint32 aPagingManagerData)
sl@0
   895
		{
sl@0
   896
		CheckAccess("SetPagingManagerData");
sl@0
   897
		__NK_ASSERT_DEBUG(iType==EManaged);
sl@0
   898
		iPagingManagerData = aPagingManagerData;
sl@0
   899
		}
sl@0
   900
sl@0
   901
	/**
sl@0
   902
	Return #iPagingManagerData.
sl@0
   903
	@pre #MmuLock held.
sl@0
   904
	@pre #iType==#EManaged.
sl@0
   905
	*/
sl@0
   906
	TUint32 PagingManagerData()
sl@0
   907
		{
sl@0
   908
		CheckAccess("PagingManagerData");
sl@0
   909
		__NK_ASSERT_DEBUG(iType==EManaged);
sl@0
   910
		return iPagingManagerData;
sl@0
   911
		}
sl@0
   912
sl@0
   913
	//
sl@0
   914
	// Debug...
sl@0
   915
	//
sl@0
   916
sl@0
   917
private:
sl@0
   918
	enum TChecks
sl@0
   919
		{
sl@0
   920
		ECheckNotAllocated	= 1<<0,
sl@0
   921
		ECheckNotUnused		= 1<<1,
sl@0
   922
		ECheckUnused		= 1<<2,
sl@0
   923
		ECheckNotPaged		= 1<<3,
sl@0
   924
		ECheckRamAllocLock	= 1<<4,
sl@0
   925
		ENoCheckMmuLock		= 1<<5
sl@0
   926
		};
sl@0
   927
#ifdef _DEBUG
sl@0
   928
	void CheckAccess(const char* aMessage, TUint aFlags=0);
sl@0
   929
#else
sl@0
   930
	FORCE_INLINE void CheckAccess(const char* /*aMessage*/, TUint /*aFlags*/=0)
sl@0
   931
		{}
sl@0
   932
#endif
sl@0
   933
sl@0
   934
public:
sl@0
   935
#ifdef _DEBUG
sl@0
   936
	/**
sl@0
   937
	Debug function which outputs the contents of this object to the kernel debug port.
sl@0
   938
	*/
sl@0
   939
	void Dump();
sl@0
   940
#else
sl@0
   941
	FORCE_INLINE void Dump()
sl@0
   942
		{}
sl@0
   943
#endif
sl@0
   944
	};
sl@0
   945
sl@0
   946
sl@0
   947
const TInt KPageInfosPerPageShift = KPageShift-KPageInfoShift;
sl@0
   948
const TInt KPageInfosPerPage = 1<<KPageInfosPerPageShift;
sl@0
   949
const TInt KNumPageInfoPagesShift = 32-KPageShift-KPageInfosPerPageShift;
sl@0
   950
const TInt KNumPageInfoPages = 1<<KNumPageInfoPagesShift;
sl@0
   951
sl@0
   952
FORCE_INLINE SPageInfo* SPageInfo::FromPhysAddr(TPhysAddr aAddress)
sl@0
   953
	{
sl@0
   954
	return ((SPageInfo*)KPageInfoLinearBase)+(aAddress>>KPageShift);
sl@0
   955
	}
sl@0
   956
sl@0
   957
FORCE_INLINE TPhysAddr SPageInfo::PhysAddr()
sl@0
   958
	{
sl@0
   959
	return ((TPhysAddr)this)<<KPageInfosPerPageShift;
sl@0
   960
	}
sl@0
   961
sl@0
   962
sl@0
   963
sl@0
   964
/**
sl@0
   965
A page table information structure giving the current use and state for a
sl@0
   966
page table.
sl@0
   967
*/
sl@0
   968
struct SPageTableInfo
sl@0
   969
	{
sl@0
   970
public:
sl@0
   971
sl@0
   972
	/**
sl@0
   973
	Enumeration for the usage of a page table. This is stored in #iType.
sl@0
   974
	*/
sl@0
   975
	enum TType
sl@0
   976
		{
sl@0
   977
		/**
sl@0
   978
		Page table is unused (implementation assumes this enumeration == 0).
sl@0
   979
		@see #iUnused and #SPageTableInfo::TUnused.
sl@0
   980
		*/
sl@0
   981
		EUnused=0,
sl@0
   982
sl@0
   983
		/**
sl@0
   984
		Page table has undetermined use.
sl@0
   985
		(Either created by the bootstrap or is newly allocated but not yet assigned.)
sl@0
   986
		*/
sl@0
   987
		EUnknown=1,
sl@0
   988
sl@0
   989
		/**
sl@0
   990
		Page table is being used by a coarse memory object.
sl@0
   991
		@see #iCoarse and #SPageTableInfo::TCoarse.
sl@0
   992
		*/
sl@0
   993
		ECoarseMapping=2,
sl@0
   994
sl@0
   995
		/**
sl@0
   996
		Page table is being used for fine mappings.
sl@0
   997
		@see #iFine and #SPageTableInfo::TFine.
sl@0
   998
		*/
sl@0
   999
		EFineMapping=3
sl@0
  1000
		};
sl@0
  1001
sl@0
  1002
private:
sl@0
  1003
sl@0
  1004
	/**
sl@0
  1005
	Flags stored in #iFlags.
sl@0
  1006
	*/
sl@0
  1007
	enum TFlags
sl@0
  1008
		{
sl@0
  1009
		/**
sl@0
  1010
		Page table if for mapping demand paged content.
sl@0
  1011
		*/
sl@0
  1012
		EDemandPaged		= 	1<<0,
sl@0
  1013
		/**
sl@0
  1014
		Page table is in Page Table Allocator's cleanup list
sl@0
  1015
		(only set for first page table in a RAM page)
sl@0
  1016
		*/
sl@0
  1017
		EOnCleanupList		= 	1<<1,
sl@0
  1018
		/**
sl@0
  1019
		The page table cluster that this page table info refers to is currently allocated.
sl@0
  1020
		*/
sl@0
  1021
		EPtClusterAllocated 	=	1<<2
sl@0
  1022
		};
sl@0
  1023
sl@0
  1024
	/**
sl@0
  1025
	Value from enum #TType.
sl@0
  1026
	*/
sl@0
  1027
	TUint8 iType;				
sl@0
  1028
sl@0
  1029
	/**
sl@0
  1030
	Bitmask of values from #TFlags.
sl@0
  1031
	*/
sl@0
  1032
	TUint8 iFlags;
sl@0
  1033
sl@0
  1034
	/**
sl@0
  1035
	Spare member used for padding.
sl@0
  1036
	*/
sl@0
  1037
	TUint16 iSpare2;
sl@0
  1038
sl@0
  1039
	/**
sl@0
  1040
	Number of pages currently mapped by this page table.
sl@0
  1041
	Normally, when #iPageCount==0 and #iPermanenceCount==0, the page table is freed.
sl@0
  1042
	*/
sl@0
  1043
	TUint16 iPageCount;
sl@0
  1044
sl@0
  1045
	/**
sl@0
  1046
	Count for the number of uses of this page table which require it to be permanently allocated;
sl@0
  1047
	even when it maps no pages (#iPageCount==0).
sl@0
  1048
	*/
sl@0
  1049
	TUint16 iPermanenceCount;
sl@0
  1050
sl@0
  1051
	/**
sl@0
  1052
	Information about a page table when #iType==#EUnused.
sl@0
  1053
	*/
sl@0
  1054
	struct TUnused
sl@0
  1055
		{
sl@0
  1056
		/**
sl@0
  1057
		Cast this object to a SDblQueLink reference.
sl@0
  1058
		This is used for placing unused SPageTableInfo objects into free lists.
sl@0
  1059
		*/
sl@0
  1060
		FORCE_INLINE SDblQueLink& Link()
sl@0
  1061
			{ return *(SDblQueLink*)this; }
sl@0
  1062
	private:
sl@0
  1063
		SDblQueLink* iNext;	///< Next free page table
sl@0
  1064
		SDblQueLink* iPrev;	///< Previous free page table
sl@0
  1065
		};
sl@0
  1066
sl@0
  1067
	/**
sl@0
  1068
	Information about a page table when #iType==#ECoarseMapping.
sl@0
  1069
	*/
sl@0
  1070
	struct TCoarse
sl@0
  1071
		{
sl@0
  1072
		/**
sl@0
  1073
		Memory object which owns this page table.
sl@0
  1074
		*/
sl@0
  1075
		DCoarseMemory*	iMemoryObject;
sl@0
  1076
sl@0
  1077
		/**
sl@0
  1078
		The index of the page table, i.e. the offset, in 'chunks',
sl@0
  1079
		into the object's memory that the page table is being used to map.
sl@0
  1080
		*/
sl@0
  1081
		TUint16			iChunkIndex;
sl@0
  1082
sl@0
  1083
		/**
sl@0
  1084
		The #TPteType the page table is being used for.
sl@0
  1085
		*/
sl@0
  1086
		TUint8			iPteType;
sl@0
  1087
		};
sl@0
  1088
sl@0
  1089
	/**
sl@0
  1090
	Information about a page table when #iType==#EFineMapping.
sl@0
  1091
	*/
sl@0
  1092
	struct TFine
sl@0
  1093
		{
sl@0
  1094
		/**
sl@0
  1095
		Start of the virtual address region that this page table is currently
sl@0
  1096
		mapping memory at, ORed with the OS ASID of the address space this lies in.
sl@0
  1097
		*/
sl@0
  1098
		TLinAddr		iLinAddrAndOsAsid;
sl@0
  1099
		};
sl@0
  1100
sl@0
  1101
	/**
sl@0
  1102
	Union of type specific info.
sl@0
  1103
	*/
sl@0
  1104
	union
sl@0
  1105
		{
sl@0
  1106
		TUnused	iUnused; ///< Information about a page table when #iType==#EUnused.
sl@0
  1107
		TCoarse	iCoarse; ///< Information about a page table when #iType==#ECoarseMapping.
sl@0
  1108
		TFine	iFine;   ///< Information about a page table when #iType==#EFineMapping.
sl@0
  1109
		};
sl@0
  1110
sl@0
  1111
public:
sl@0
  1112
	/**
sl@0
  1113
	Return the SPageTableInfo for the page table in which a given PTE lies.
sl@0
  1114
	*/
sl@0
  1115
	static SPageTableInfo* FromPtPtr(TPte* aPtPte);
sl@0
  1116
sl@0
  1117
	/**
sl@0
  1118
	Return the page table with which this SPageTableInfo is associated.
sl@0
  1119
	*/
sl@0
  1120
	TPte* PageTable();
sl@0
  1121
sl@0
  1122
	/**
sl@0
  1123
	Used at boot time to initialise page tables which were allocated by the bootstrap. 
sl@0
  1124
sl@0
  1125
	@param aCount	The number of pages being mapped by this page table.
sl@0
  1126
	*/
sl@0
  1127
	FORCE_INLINE void Boot(TUint aCount)
sl@0
  1128
		{
sl@0
  1129
		CheckInit("Boot");
sl@0
  1130
		iPageCount = aCount;
sl@0
  1131
		iPermanenceCount = 1; // assume page table shouldn't be freed
sl@0
  1132
		iType = EUnknown;
sl@0
  1133
		iFlags = EPtClusterAllocated;
sl@0
  1134
		}
sl@0
  1135
sl@0
  1136
	/**
sl@0
  1137
	Initialise a page table after it has had memory allocated for it.
sl@0
  1138
sl@0
  1139
	@param aDemandPaged	True if this page table has been allocated for use with
sl@0
  1140
						demand paged memory.
sl@0
  1141
	*/
sl@0
  1142
	FORCE_INLINE void New(TBool aDemandPaged)
sl@0
  1143
		{
sl@0
  1144
		iType = EUnused;
sl@0
  1145
		iFlags = EPtClusterAllocated | (aDemandPaged ? EDemandPaged : 0);
sl@0
  1146
		}
sl@0
  1147
sl@0
  1148
	/**
sl@0
  1149
	Return true if the page table cluster that this page table info refers to has
sl@0
  1150
	been previously allocated.
sl@0
  1151
	*/
sl@0
  1152
	FORCE_INLINE TBool IsPtClusterAllocated()
sl@0
  1153
		{
sl@0
  1154
		return iFlags & EPtClusterAllocated;
sl@0
  1155
		}
sl@0
  1156
sl@0
  1157
	/**
sl@0
  1158
	The page table cluster that this page table info refers to has been freed.
sl@0
  1159
	*/
sl@0
  1160
	FORCE_INLINE void PtClusterFreed()
sl@0
  1161
		{
sl@0
  1162
		__NK_ASSERT_DEBUG(IsPtClusterAllocated());
sl@0
  1163
		iFlags &= ~EPtClusterAllocated;
sl@0
  1164
		}
sl@0
  1165
sl@0
  1166
	/**
sl@0
  1167
	The page table cluster that this page table info refers to has been allocated.
sl@0
  1168
	*/
sl@0
  1169
	FORCE_INLINE void PtClusterAlloc()
sl@0
  1170
		{
sl@0
  1171
		__NK_ASSERT_DEBUG(!IsPtClusterAllocated());
sl@0
  1172
		iFlags |= EPtClusterAllocated;
sl@0
  1173
		}
sl@0
  1174
sl@0
  1175
	/**
sl@0
  1176
	Initialilse a page table to type #EUnknown after it has been newly allocated.
sl@0
  1177
sl@0
  1178
	@pre #PageTablesLockIsHeld.
sl@0
  1179
	*/
sl@0
  1180
	FORCE_INLINE void Init()
sl@0
  1181
		{
sl@0
  1182
		__NK_ASSERT_DEBUG(IsPtClusterAllocated());
sl@0
  1183
		CheckInit("Init");
sl@0
  1184
		iPageCount = 0;
sl@0
  1185
		iPermanenceCount = 0;
sl@0
  1186
		iType = EUnknown;
sl@0
  1187
		}
sl@0
  1188
sl@0
  1189
	/**
sl@0
  1190
	Increment #iPageCount to account for newly mapped pages.
sl@0
  1191
sl@0
  1192
	@param aStep	Amount to add to #iPageCount. Default is one.
sl@0
  1193
sl@0
  1194
	@return New value of #iPageCount.
sl@0
  1195
sl@0
  1196
	@pre #MmuLock held.
sl@0
  1197
	*/
sl@0
  1198
	FORCE_INLINE TUint IncPageCount(TUint aStep=1)
sl@0
  1199
		{
sl@0
  1200
		CheckAccess("IncPageCount");
sl@0
  1201
		TUint count = iPageCount; // compiler handles half-word values stupidly, so give it a hand
sl@0
  1202
		count += aStep;
sl@0
  1203
		iPageCount = count;
sl@0
  1204
		return count;
sl@0
  1205
		}
sl@0
  1206
sl@0
  1207
	/**
sl@0
  1208
	Decrement #iPageCount to account for removed pages.
sl@0
  1209
sl@0
  1210
	@param aStep	Amount to subtract from #iPageCount. Default is one.
sl@0
  1211
sl@0
  1212
	@return New value of #iPageCount.
sl@0
  1213
sl@0
  1214
	@pre #MmuLock held.
sl@0
  1215
	*/
sl@0
  1216
	FORCE_INLINE TUint DecPageCount(TUint aStep=1)
sl@0
  1217
		{
sl@0
  1218
		CheckAccess("DecPageCount");
sl@0
  1219
		TUint count = iPageCount; // compiler handles half-word values stupidly, so give it a hand
sl@0
  1220
		count -= aStep;
sl@0
  1221
		iPageCount = count;
sl@0
  1222
		return count;
sl@0
  1223
		}
sl@0
  1224
sl@0
  1225
	/**
sl@0
  1226
	Return #iPageCount.
sl@0
  1227
	@pre #MmuLock held.
sl@0
  1228
	*/
sl@0
  1229
	FORCE_INLINE TUint PageCount()
sl@0
  1230
		{
sl@0
  1231
		CheckAccess("PageCount");
sl@0
  1232
		return iPageCount;
sl@0
  1233
		}
sl@0
  1234
sl@0
  1235
	/**
sl@0
  1236
	Increment #iPermanenceCount to indicate a new use of this page table which
sl@0
  1237
	requires it to be permanently allocated.
sl@0
  1238
sl@0
  1239
	@return New value of #iPermanenceCount.
sl@0
  1240
sl@0
  1241
	@pre #MmuLock held.
sl@0
  1242
	*/
sl@0
  1243
	FORCE_INLINE TUint IncPermanenceCount()
sl@0
  1244
		{
sl@0
  1245
		CheckAccess("IncPermanenceCount");
sl@0
  1246
		TUint count = iPermanenceCount; // compiler handles half-word values stupidly, so give it a hand
sl@0
  1247
		++count;
sl@0
  1248
		iPermanenceCount = count;
sl@0
  1249
		return count;
sl@0
  1250
		}
sl@0
  1251
sl@0
  1252
	/**
sl@0
  1253
	Decrement #iPermanenceCount to indicate the removal of a use added by #IncPermanenceCount.
sl@0
  1254
sl@0
  1255
	@return New value of #iPermanenceCount.
sl@0
  1256
sl@0
  1257
	@pre #MmuLock held.
sl@0
  1258
	*/
sl@0
  1259
	FORCE_INLINE TUint DecPermanenceCount()
sl@0
  1260
		{
sl@0
  1261
		CheckAccess("DecPermanenceCount");
sl@0
  1262
		TUint count = iPermanenceCount; // compiler handles half-word values stupidly, so give it a hand
sl@0
  1263
		__NK_ASSERT_DEBUG(count);
sl@0
  1264
		--count;
sl@0
  1265
		iPermanenceCount = count;
sl@0
  1266
		return count;
sl@0
  1267
		}
sl@0
  1268
sl@0
  1269
	/**
sl@0
  1270
	Return #iPermanenceCount.
sl@0
  1271
sl@0
  1272
	@pre #MmuLock held.
sl@0
  1273
	*/
sl@0
  1274
	FORCE_INLINE TUint PermanenceCount()
sl@0
  1275
		{
sl@0
  1276
		CheckAccess("PermanenceCount");
sl@0
  1277
		return iPermanenceCount;
sl@0
  1278
		}
sl@0
  1279
sl@0
  1280
	/**
sl@0
  1281
	Set page table to the #EUnused state.
sl@0
  1282
	This is only intended for use by #PageTableAllocator.
sl@0
  1283
sl@0
  1284
	@pre #MmuLock held and #PageTablesLockIsHeld.
sl@0
  1285
	*/
sl@0
  1286
	FORCE_INLINE void SetUnused()
sl@0
  1287
		{
sl@0
  1288
		CheckChangeUse("SetUnused");
sl@0
  1289
		iType = EUnused;
sl@0
  1290
		}
sl@0
  1291
sl@0
  1292
	/**
sl@0
  1293
	Return true if the page table is in the #EUnused state.
sl@0
  1294
	This is only intended for use by #PageTableAllocator.
sl@0
  1295
sl@0
  1296
	@pre #MmuLock held or #PageTablesLockIsHeld.
sl@0
  1297
	*/
sl@0
  1298
	FORCE_INLINE TBool IsUnused()
sl@0
  1299
		{
sl@0
  1300
		CheckCheckUse("IsUnused");
sl@0
  1301
		return iType==EUnused;
sl@0
  1302
		}
sl@0
  1303
sl@0
  1304
	/**
sl@0
  1305
	Set page table as being used by a coarse memory object.
sl@0
  1306
sl@0
  1307
	@param aMemory		Memory object which owns this page table.
sl@0
  1308
	@param aChunkIndex	The index of the page table, i.e. the offset, in 'chunks',
sl@0
  1309
						into the object's memory that the page table is being used to map.
sl@0
  1310
	@param aPteType		The #TPteType the page table is being used for.
sl@0
  1311
sl@0
  1312
	@pre #MmuLock held and #PageTablesLockIsHeld.
sl@0
  1313
sl@0
  1314
	@see TCoarse.
sl@0
  1315
	*/
sl@0
  1316
	inline void SetCoarse(DCoarseMemory* aMemory, TUint aChunkIndex, TUint aPteType)
sl@0
  1317
		{
sl@0
  1318
		CheckChangeUse("SetCoarse");
sl@0
  1319
		iPageCount = 0;
sl@0
  1320
		iPermanenceCount = 0;
sl@0
  1321
		iType = ECoarseMapping;
sl@0
  1322
		iCoarse.iMemoryObject = aMemory;
sl@0
  1323
		iCoarse.iChunkIndex = aChunkIndex;
sl@0
  1324
		iCoarse.iPteType = aPteType;
sl@0
  1325
		}
sl@0
  1326
sl@0
  1327
	/**
sl@0
  1328
	Return true if this page table is currently being used by a coarse memory object
sl@0
  1329
	matching the specified arguments.
sl@0
  1330
	For arguments, see #SetCoarse.
sl@0
  1331
sl@0
  1332
	@pre #MmuLock held or #PageTablesLockIsHeld.
sl@0
  1333
	*/
sl@0
  1334
	inline TBool CheckCoarse(DCoarseMemory* aMemory, TUint aChunkIndex, TUint aPteType)
sl@0
  1335
		{
sl@0
  1336
		CheckCheckUse("CheckCoarse");
sl@0
  1337
		return iType==ECoarseMapping
sl@0
  1338
			&& iCoarse.iMemoryObject==aMemory
sl@0
  1339
			&& iCoarse.iChunkIndex==aChunkIndex
sl@0
  1340
			&& iCoarse.iPteType==aPteType;
sl@0
  1341
		}
sl@0
  1342
sl@0
  1343
	/**
sl@0
  1344
	Set page table as being used for fine mappings.
sl@0
  1345
sl@0
  1346
	@param aLinAddr	Start of the virtual address region that the page table is
sl@0
  1347
					mapping memory at.
sl@0
  1348
	@param aOsAsid	The OS ASID of the address space which \a aLinAddr lies in.
sl@0
  1349
sl@0
  1350
	@pre #MmuLock held and #PageTablesLockIsHeld.
sl@0
  1351
	*/
sl@0
  1352
	inline void SetFine(TLinAddr aLinAddr, TUint aOsAsid)
sl@0
  1353
		{
sl@0
  1354
		CheckChangeUse("SetFine");
sl@0
  1355
		__NK_ASSERT_DEBUG((aLinAddr&KPageMask)==0);
sl@0
  1356
		iPageCount = 0;
sl@0
  1357
		iPermanenceCount = 0;
sl@0
  1358
		iType = EFineMapping;
sl@0
  1359
		iFine.iLinAddrAndOsAsid = aLinAddr|aOsAsid;
sl@0
  1360
		}
sl@0
  1361
sl@0
  1362
	/**
sl@0
  1363
	Return true if this page table is currently being used for fine mappings
sl@0
  1364
	matching the specified arguments.
sl@0
  1365
	For arguments, see #SetFine.
sl@0
  1366
sl@0
  1367
	@pre #MmuLock held or #PageTablesLockIsHeld.
sl@0
  1368
	*/
sl@0
  1369
	inline TBool CheckFine(TLinAddr aLinAddr, TUint aOsAsid)
sl@0
  1370
		{
sl@0
  1371
		CheckCheckUse("CheckFine");
sl@0
  1372
		__NK_ASSERT_DEBUG((aLinAddr&KPageMask)==0);
sl@0
  1373
		return iType==EFineMapping
sl@0
  1374
			&& iFine.iLinAddrAndOsAsid==(aLinAddr|aOsAsid);
sl@0
  1375
		}
sl@0
  1376
sl@0
  1377
	/**
sl@0
  1378
	Set a previously unknown page table as now being used for fine mappings.
sl@0
  1379
	This is used during the boot process by DFineMemory::ClaimInitialPages
sl@0
  1380
	to initialise the state of a page table allocated by the bootstrap.
sl@0
  1381
sl@0
  1382
	@param aLinAddr	Start of the virtual address region that the page table is
sl@0
  1383
					mapping memory at.
sl@0
  1384
	@param aOsAsid	The OS ASID of the address space which \a aLinAddr lies in.
sl@0
  1385
					(This should be KKernelOsAsid.)
sl@0
  1386
sl@0
  1387
	@pre #MmuLock held and #PageTablesLockIsHeld.
sl@0
  1388
	*/
sl@0
  1389
	inline TBool ClaimFine(TLinAddr aLinAddr, TUint aOsAsid)
sl@0
  1390
		{
sl@0
  1391
		CheckChangeUse("ClaimFine");
sl@0
  1392
		__NK_ASSERT_DEBUG((aLinAddr&KPageMask)==0);
sl@0
  1393
		if(iType==EFineMapping)
sl@0
  1394
			return CheckFine(aLinAddr,aOsAsid);
sl@0
  1395
		if(iType!=EUnknown)
sl@0
  1396
			return false;
sl@0
  1397
		iType = EFineMapping;
sl@0
  1398
		iFine.iLinAddrAndOsAsid = aLinAddr|aOsAsid;
sl@0
  1399
		return true;
sl@0
  1400
		}
sl@0
  1401
sl@0
  1402
	/**
sl@0
  1403
	Return true if page table was allocated for use with demand paged memory.
sl@0
  1404
	*/
sl@0
  1405
	FORCE_INLINE TBool IsDemandPaged()
sl@0
  1406
		{
sl@0
  1407
		return iFlags&EDemandPaged;
sl@0
  1408
		}
sl@0
  1409
sl@0
  1410
#ifdef _DEBUG
sl@0
  1411
	/**
sl@0
  1412
	Debug check returning true if the value of #iPageCount is consistent with
sl@0
  1413
	the PTEs in this page table.
sl@0
  1414
sl@0
  1415
	@pre #MmuLock held.
sl@0
  1416
	*/
sl@0
  1417
	TBool CheckPageCount();
sl@0
  1418
#endif
sl@0
  1419
sl@0
  1420
	/**
sl@0
  1421
	Return a reference to an embedded SDblQueLink which is used for placing this
sl@0
  1422
	SPageTableInfo objects into free lists.
sl@0
  1423
	@pre #PageTablesLockIsHeld.
sl@0
  1424
	@pre #iType==#EUnused.
sl@0
  1425
	*/
sl@0
  1426
	inline SDblQueLink& FreeLink()
sl@0
  1427
		{
sl@0
  1428
		__NK_ASSERT_DEBUG(IsUnused());
sl@0
  1429
		return iUnused.Link();
sl@0
  1430
		}
sl@0
  1431
sl@0
  1432
	/**
sl@0
  1433
	Return a pointer to a SPageTableInfo by conversion from the address
sl@0
  1434
	of its embedded link as returned by #FreeLink.
sl@0
  1435
	*/
sl@0
  1436
	FORCE_INLINE static SPageTableInfo* FromFreeLink(SDblQueLink* aLink)
sl@0
  1437
		{
sl@0
  1438
		return (SPageTableInfo*)((TInt)aLink-_FOFF(SPageTableInfo,iUnused));
sl@0
  1439
		}
sl@0
  1440
sl@0
  1441
	/**
sl@0
  1442
	Return the SPageTableInfo for the first page table in the same
sl@0
  1443
	physical ram page as the page table for this SPageTableInfo.
sl@0
  1444
	*/
sl@0
  1445
	FORCE_INLINE SPageTableInfo* FirstInPage()
sl@0
  1446
		{
sl@0
  1447
		return (SPageTableInfo*)(TLinAddr(this)&~(KPtClusterMask*sizeof(SPageTableInfo)));
sl@0
  1448
		}
sl@0
  1449
sl@0
  1450
	/**
sl@0
  1451
	Return the SPageTableInfo for the last page table in the same
sl@0
  1452
	physical ram page as the page table for this SPageTableInfo.
sl@0
  1453
	*/
sl@0
  1454
	FORCE_INLINE SPageTableInfo* LastInPage()
sl@0
  1455
		{
sl@0
  1456
		return (SPageTableInfo*)(TLinAddr(this)|(KPtClusterMask*sizeof(SPageTableInfo)));
sl@0
  1457
		}
sl@0
  1458
sl@0
  1459
	/**
sl@0
  1460
	Return true if the page table for this SPageTableInfo is
sl@0
  1461
	the first page table in the physical page it occupies.
sl@0
  1462
	*/
sl@0
  1463
	FORCE_INLINE TBool IsFirstInPage()
sl@0
  1464
		{
sl@0
  1465
		return (TLinAddr(this)&(KPtClusterMask*sizeof(SPageTableInfo)))==0;
sl@0
  1466
		}
sl@0
  1467
sl@0
  1468
	/**
sl@0
  1469
	Return true if this page table has been added to the cleanup list with
sl@0
  1470
	#AddToCleanupList.
sl@0
  1471
	Must only be used for page tables which return true for #IsFirstInPage.
sl@0
  1472
sl@0
  1473
	@pre #PageTablesLockIsHeld.
sl@0
  1474
	*/
sl@0
  1475
	FORCE_INLINE TBool IsOnCleanupList()
sl@0
  1476
		{
sl@0
  1477
		__NK_ASSERT_DEBUG(IsFirstInPage());
sl@0
  1478
		return iFlags&EOnCleanupList;
sl@0
  1479
		}
sl@0
  1480
sl@0
  1481
	/**
sl@0
  1482
	Add the RAM page containing this page table to the specified cleanup list.
sl@0
  1483
	Must only be used for page tables which return true for #IsFirstInPage.
sl@0
  1484
sl@0
  1485
	@pre #PageTablesLockIsHeld.
sl@0
  1486
	*/
sl@0
  1487
	FORCE_INLINE void AddToCleanupList(SDblQue& aCleanupList)
sl@0
  1488
		{
sl@0
  1489
		__NK_ASSERT_DEBUG(IsUnused());
sl@0
  1490
		__NK_ASSERT_DEBUG(IsFirstInPage());
sl@0
  1491
		__NK_ASSERT_DEBUG(!IsOnCleanupList());
sl@0
  1492
		aCleanupList.Add(&FreeLink());
sl@0
  1493
		iFlags |= EOnCleanupList;
sl@0
  1494
		}
sl@0
  1495
sl@0
  1496
	/**
sl@0
  1497
	Remove the RAM page containing this page table from a cleanup list it
sl@0
  1498
	was added to with aCleanupList.
sl@0
  1499
	Must only be used for page tables which return true for #IsFirstInPage.
sl@0
  1500
sl@0
  1501
	@pre #PageTablesLockIsHeld.
sl@0
  1502
	*/
sl@0
  1503
	FORCE_INLINE void RemoveFromCleanupList()
sl@0
  1504
		{
sl@0
  1505
		__NK_ASSERT_DEBUG(IsUnused());
sl@0
  1506
		__NK_ASSERT_DEBUG(IsFirstInPage());
sl@0
  1507
		__NK_ASSERT_DEBUG(IsOnCleanupList());
sl@0
  1508
		iFlags &= ~EOnCleanupList;
sl@0
  1509
		FreeLink().Deque();
sl@0
  1510
		}
sl@0
  1511
sl@0
  1512
	/**
sl@0
  1513
	Remove this page table from its owner and free it.
sl@0
  1514
	This is only used with page tables which map demand paged memory
sl@0
  1515
	and is intended for use in implementing #DPageTableMemoryManager.
sl@0
  1516
sl@0
  1517
	@return KErrNone if successful,
sl@0
  1518
			otherwise one of the system wide error codes.
sl@0
  1519
sl@0
  1520
	@pre #MmuLock held and #PageTablesLockIsHeld.
sl@0
  1521
	*/
sl@0
  1522
	TInt ForcedFree();
sl@0
  1523
sl@0
  1524
private:
sl@0
  1525
sl@0
  1526
#ifdef _DEBUG
sl@0
  1527
	void CheckChangeUse(const char* aName);
sl@0
  1528
	void CheckCheckUse(const char* aName);
sl@0
  1529
	void CheckAccess(const char* aName);
sl@0
  1530
	void CheckInit(const char* aName);
sl@0
  1531
#else
sl@0
  1532
	FORCE_INLINE void CheckChangeUse(const char* /*aName*/)
sl@0
  1533
		{}
sl@0
  1534
	FORCE_INLINE void CheckCheckUse(const char* /*aName*/)
sl@0
  1535
		{}
sl@0
  1536
	FORCE_INLINE void CheckAccess(const char* /*aName*/)
sl@0
  1537
		{}
sl@0
  1538
	FORCE_INLINE void CheckInit(const char* /*aName*/)
sl@0
  1539
		{}
sl@0
  1540
#endif
sl@0
  1541
	};
sl@0
  1542
sl@0
  1543
sl@0
  1544
const TInt KPageTableInfoShift = 4;
sl@0
  1545
__ASSERT_COMPILE(sizeof(SPageTableInfo)==(1<<KPageTableInfoShift));
sl@0
  1546
sl@0
  1547
FORCE_INLINE SPageTableInfo* SPageTableInfo::FromPtPtr(TPte* aPtPte)
sl@0
  1548
	{
sl@0
  1549
	TUint id = ((TLinAddr)aPtPte-KPageTableBase)>>KPageTableShift;
sl@0
  1550
	return (SPageTableInfo*)KPageTableInfoBase+id;
sl@0
  1551
	}
sl@0
  1552
sl@0
  1553
FORCE_INLINE TPte* SPageTableInfo::PageTable()
sl@0
  1554
	{
sl@0
  1555
	return (TPte*)
sl@0
  1556
		(KPageTableBase+
sl@0
  1557
			(
sl@0
  1558
			((TLinAddr)this-(TLinAddr)KPageTableInfoBase)
sl@0
  1559
			<<(KPageTableShift-KPageTableInfoShift)
sl@0
  1560
			)
sl@0
  1561
		);
sl@0
  1562
	}
sl@0
  1563
sl@0
  1564
sl@0
  1565
sl@0
  1566
/**
sl@0
  1567
Class providing access to the mutex used to protect memory allocation operations;
sl@0
  1568
this is the mutex Mmu::iRamAllocatorMutex.
sl@0
  1569
In addition to providing locking, these functions monitor the system's free RAM
sl@0
  1570
levels and call K::CheckFreeMemoryLevel to notify the system of changes.
sl@0
  1571
*/
sl@0
  1572
class RamAllocLock
sl@0
  1573
	{
sl@0
  1574
public:
sl@0
  1575
	/**
sl@0
  1576
	Acquire the lock.
sl@0
  1577
	The lock may be acquired multiple times by a thread, and will remain locked
sl@0
  1578
	until #Unlock has been used enough times to balance this.
sl@0
  1579
	*/
sl@0
  1580
	static void Lock();
sl@0
  1581
sl@0
  1582
	/**
sl@0
  1583
	Release the lock.
sl@0
  1584
sl@0
  1585
	@pre The current thread has previously acquired the lock.
sl@0
  1586
	*/
sl@0
  1587
	static void Unlock();
sl@0
  1588
sl@0
  1589
	/**
sl@0
  1590
	Allow another thread to acquire the lock.
sl@0
  1591
	This is equivalent to #Unlock followed by #Lock, but optimised
sl@0
  1592
	to only do this if there is another thread waiting on the lock.
sl@0
  1593
sl@0
  1594
	@return True if the lock was released by this function.
sl@0
  1595
sl@0
  1596
	@pre The current thread has previously acquired the lock.
sl@0
  1597
	*/
sl@0
  1598
	static TBool Flash();
sl@0
  1599
sl@0
  1600
	/**
sl@0
  1601
	Return true if the current thread holds the lock.
sl@0
  1602
	This is used for debug checks.
sl@0
  1603
	*/
sl@0
  1604
	static TBool IsHeld();
sl@0
  1605
	};
sl@0
  1606
sl@0
  1607
sl@0
  1608
sl@0
  1609
/**
sl@0
  1610
Return true if the PageTableLock is held by the current thread.
sl@0
  1611
This lock is the mutex used to protect page table allocation; it is acquired
sl@0
  1612
with
sl@0
  1613
@code
sl@0
  1614
	::PageTables.Lock();
sl@0
  1615
@endcode
sl@0
  1616
and released with
sl@0
  1617
@code
sl@0
  1618
	::PageTables.Unlock();
sl@0
  1619
@endcode
sl@0
  1620
*/
sl@0
  1621
TBool PageTablesLockIsHeld();
sl@0
  1622
sl@0
  1623
sl@0
  1624
sl@0
  1625
/**
sl@0
  1626
Class providing access to the fast mutex used to protect various
sl@0
  1627
low level memory operations.
sl@0
  1628
sl@0
  1629
This lock must only be held for a very short and bounded time.
sl@0
  1630
*/
sl@0
  1631
class MmuLock
sl@0
  1632
	{
sl@0
  1633
public:
sl@0
  1634
	/**
sl@0
  1635
	Acquire the lock.
sl@0
  1636
	*/
sl@0
  1637
	static void Lock();
sl@0
  1638
sl@0
  1639
	/**
sl@0
  1640
	Release the lock.
sl@0
  1641
sl@0
  1642
	@pre The current thread has previously acquired the lock.
sl@0
  1643
	*/
sl@0
  1644
	static void Unlock();
sl@0
  1645
sl@0
  1646
	/**
sl@0
  1647
	Allow another thread to acquire the lock.
sl@0
  1648
	This is equivalent to #Unlock followed by #Lock, but optimised
sl@0
  1649
	to only do this if there is another thread waiting on the lock.
sl@0
  1650
sl@0
  1651
	@return True if the lock was released by this function.
sl@0
  1652
sl@0
  1653
	@pre The current thread has previously acquired the lock.
sl@0
  1654
	*/
sl@0
  1655
	static TBool Flash();
sl@0
  1656
sl@0
  1657
	/**
sl@0
  1658
	Return true if the current thread holds the lock.
sl@0
  1659
	This is used for debug checks.
sl@0
  1660
	*/
sl@0
  1661
	static TBool IsHeld();
sl@0
  1662
sl@0
  1663
	/**
sl@0
  1664
	Increment a counter and perform the action of #Flash() once a given threshold
sl@0
  1665
	value is reached. After flashing the counter is reset.
sl@0
  1666
sl@0
  1667
	This is typically used in long running loops to periodically flash the lock
sl@0
  1668
	and so avoid holding it for too long, e.g.
sl@0
  1669
sl@0
  1670
	@code
sl@0
  1671
	MmuLock::Lock();
sl@0
  1672
	TUint flash = 0;
sl@0
  1673
	const TUint KMaxInterationsWithLock = 10;
sl@0
  1674
	while(WorkToDo)
sl@0
  1675
		{
sl@0
  1676
		DoSomeWork();
sl@0
  1677
		MmuLock::Flash(flash,KMaxInterationsWithLock); // flash every N loops
sl@0
  1678
		}
sl@0
  1679
	MmuLock::Unlock();
sl@0
  1680
	@endcode
sl@0
  1681
sl@0
  1682
	@param aCounter			Reference to the counter.
sl@0
  1683
	@param aFlashThreshold	Value \a aCounter must reach before flashing the lock.
sl@0
  1684
	@param aStep			Value to add to \a aCounter.
sl@0
  1685
sl@0
  1686
	@return True if the lock was released by this function.
sl@0
  1687
sl@0
  1688
	@pre The current thread has previously acquired the lock.
sl@0
  1689
	*/
sl@0
  1690
	static FORCE_INLINE TBool Flash(TUint& aCounter, TUint aFlashThreshold, TUint aStep=1)
sl@0
  1691
		{
sl@0
  1692
		UnlockGuardCheck();
sl@0
  1693
		if((aCounter+=aStep)<aFlashThreshold)
sl@0
  1694
			return EFalse;
sl@0
  1695
		aCounter -= aFlashThreshold;
sl@0
  1696
		return MmuLock::Flash();
sl@0
  1697
		}
sl@0
  1698
sl@0
  1699
	/**
sl@0
  1700
	Begin a debug check to test that the MmuLock is not unlocked unexpectedly.
sl@0
  1701
sl@0
  1702
	This is used in situations where a series of operation must be performed
sl@0
  1703
	atomically with the MmuLock held. It is usually used via the
sl@0
  1704
	#__UNLOCK_GUARD_START macro, e.g.
sl@0
  1705
sl@0
  1706
	@code
sl@0
  1707
	__UNLOCK_GUARD_START(MmuLock);
sl@0
  1708
	SomeCode();
sl@0
  1709
	SomeMoreCode();
sl@0
  1710
	__UNLOCK_GUARD_END(MmuLock); // fault if MmuLock released by SomeCode or SomeMoreCode
sl@0
  1711
	@endcode
sl@0
  1712
	*/
sl@0
  1713
	static FORCE_INLINE void UnlockGuardStart()
sl@0
  1714
		{
sl@0
  1715
		#ifdef _DEBUG
sl@0
  1716
			++UnlockGuardNest;
sl@0
  1717
		#endif
sl@0
  1718
		}
sl@0
  1719
sl@0
  1720
	/**
sl@0
  1721
	End a debug check testing that the MmuLock is not unlocked unexpectedly.
sl@0
  1722
	This is usually used via the #__UNLOCK_GUARD_END which faults if true is returned.
sl@0
  1723
sl@0
  1724
	@see UnlockGuardStart
sl@0
  1725
sl@0
  1726
	@return True if the MmuLock was released between a previous #UnlockGuardStart
sl@0
  1727
			and the call this function.
sl@0
  1728
	*/
sl@0
  1729
	static FORCE_INLINE TBool UnlockGuardEnd()
sl@0
  1730
		{
sl@0
  1731
		#ifdef _DEBUG
sl@0
  1732
			__NK_ASSERT_DEBUG(UnlockGuardNest);
sl@0
  1733
			--UnlockGuardNest;
sl@0
  1734
			return UnlockGuardFail==0;
sl@0
  1735
		#else
sl@0
  1736
			return true;
sl@0
  1737
		#endif
sl@0
  1738
		}
sl@0
  1739
sl@0
  1740
private:
sl@0
  1741
	/**
sl@0
  1742
	Exectued whenever the lock is released to check that
sl@0
  1743
	#UnlockGuardStart and #UnlockGuardEnd are balanced.
sl@0
  1744
	*/
sl@0
  1745
	static FORCE_INLINE void UnlockGuardCheck()
sl@0
  1746
		{
sl@0
  1747
		#ifdef _DEBUG
sl@0
  1748
			if(UnlockGuardNest)
sl@0
  1749
				UnlockGuardFail = true;
sl@0
  1750
		#endif
sl@0
  1751
		}
sl@0
  1752
sl@0
  1753
private:
sl@0
  1754
	/** The lock */
sl@0
  1755
	static NFastMutex iLock;
sl@0
  1756
sl@0
  1757
#ifdef _DEBUG
sl@0
  1758
	static TUint UnlockGuardNest;
sl@0
  1759
	static TUint UnlockGuardFail;
sl@0
  1760
#endif
sl@0
  1761
	};
sl@0
  1762
sl@0
  1763
sl@0
  1764
sl@0
  1765
/**
sl@0
  1766
Interface for accessing the lock mutex being used to serialise
sl@0
  1767
explicit modifications to a specified memory object.
sl@0
  1768
sl@0
  1769
The lock mutex is either the one which was previously assigned with
sl@0
  1770
DMemoryObject::SetLock. Or, if none was set, a dynamically assigned
sl@0
  1771
mutex from #MemoryObjectMutexPool will be of 'order' #KMutexOrdMemoryObject.
sl@0
  1772
*/
sl@0
  1773
class MemoryObjectLock
sl@0
  1774
	{
sl@0
  1775
public:
sl@0
  1776
	/**
sl@0
  1777
	Acquire the lock for the specified memory object.
sl@0
  1778
	If the object has no lock, one is assigned from #MemoryObjectMutexPool.
sl@0
  1779
	*/
sl@0
  1780
	static void Lock(DMemoryObject* aMemory);
sl@0
  1781
sl@0
  1782
	/**
sl@0
  1783
	Release the lock for the specified memory object, which was acquired
sl@0
  1784
	with #Lock. If the lock was one which was dynamically assigned, and there
sl@0
  1785
	are no threads waiting for it, the the lock is unassigned from the memory
sl@0
  1786
	object.
sl@0
  1787
	*/
sl@0
  1788
	static void Unlock(DMemoryObject* aMemory);
sl@0
  1789
sl@0
  1790
	/**
sl@0
  1791
	Return true if the current thread holds lock for the specified memory object.
sl@0
  1792
	This is used for debug checks.
sl@0
  1793
	*/
sl@0
  1794
	static TBool IsHeld(DMemoryObject* aMemory);
sl@0
  1795
	};
sl@0
  1796
sl@0
  1797
sl@0
  1798
#define __UNLOCK_GUARD_START(_l) __DEBUG_ONLY(_l::UnlockGuardStart())
sl@0
  1799
#define __UNLOCK_GUARD_END(_l) __NK_ASSERT_DEBUG(_l::UnlockGuardEnd())
sl@0
  1800
sl@0
  1801
sl@0
  1802
const TUint KMutexOrdAddresSpace = KMutexOrdKernelHeap + 2;
sl@0
  1803
const TUint KMutexOrdMemoryObject = KMutexOrdKernelHeap + 1;
sl@0
  1804
const TUint KMutexOrdMmuAlloc = KMutexOrdRamAlloc + 1;
sl@0
  1805
sl@0
  1806
sl@0
  1807
#ifdef _DEBUG
sl@0
  1808
//#define FORCE_TRACE
sl@0
  1809
//#define FORCE_TRACE2
sl@0
  1810
//#define FORCE_TRACEB
sl@0
  1811
//#define FORCE_TRACEP
sl@0
  1812
#endif
sl@0
  1813
sl@0
  1814
sl@0
  1815
sl@0
  1816
#define TRACE_printf Kern::Printf
sl@0
  1817
sl@0
  1818
#define TRACE_ALWAYS(t) TRACE_printf t
sl@0
  1819
sl@0
  1820
#ifdef FORCE_TRACE
sl@0
  1821
#define TRACE(t) TRACE_printf t
sl@0
  1822
#else
sl@0
  1823
#define TRACE(t) __KTRACE_OPT(KMMU2,TRACE_printf t)
sl@0
  1824
#endif
sl@0
  1825
sl@0
  1826
#ifdef FORCE_TRACE2
sl@0
  1827
#define TRACE2(t) TRACE_printf t
sl@0
  1828
#else
sl@0
  1829
#define TRACE2(t) __KTRACE_OPT(KMMU2,TRACE_printf t)
sl@0
  1830
#endif
sl@0
  1831
sl@0
  1832
#ifdef FORCE_TRACEB
sl@0
  1833
#define TRACEB(t) TRACE_printf t
sl@0
  1834
#else
sl@0
  1835
#define TRACEB(t) __KTRACE_OPT2(KMMU,KBOOT,TRACE_printf t)
sl@0
  1836
#endif
sl@0
  1837
sl@0
  1838
#ifdef FORCE_TRACEP
sl@0
  1839
#define TRACEP(t) TRACE_printf t
sl@0
  1840
#else
sl@0
  1841
#define TRACEP(t) __KTRACE_OPT(KPAGING,TRACE_printf t)
sl@0
  1842
#endif
sl@0
  1843
sl@0
  1844
sl@0
  1845
/**
sl@0
  1846
The maximum number of consecutive updates to #SPageInfo structures which
sl@0
  1847
should be executed without releasing the #MmuLock.
sl@0
  1848
sl@0
  1849
This value must be an integer power of two.
sl@0
  1850
*/
sl@0
  1851
const TUint KMaxPageInfoUpdatesInOneGo = 64;
sl@0
  1852
sl@0
  1853
/**
sl@0
  1854
The maximum number of simple operations on memory page state which should
sl@0
  1855
occur without releasing the #MmuLock. Examples of the operations are
sl@0
  1856
read-modify-write of a Page Table Entry (PTE) or entries in a memory objects
sl@0
  1857
RPageArray.
sl@0
  1858
sl@0
  1859
This value must be an integer power of two.
sl@0
  1860
*/
sl@0
  1861
const TUint KMaxPagesInOneGo = KMaxPageInfoUpdatesInOneGo/2;
sl@0
  1862
sl@0
  1863
/**
sl@0
  1864
The maximum number of Page Directory Entries which should be updated
sl@0
  1865
without releasing the #MmuLock.
sl@0
  1866
sl@0
  1867
This value must be an integer power of two.
sl@0
  1868
*/
sl@0
  1869
const TUint KMaxPdesInOneGo = KMaxPageInfoUpdatesInOneGo;
sl@0
  1870
sl@0
  1871
sl@0
  1872
/********************************************
sl@0
  1873
 * MMU stuff
sl@0
  1874
 ********************************************/
sl@0
  1875
sl@0
  1876
class DRamAllocator;
sl@0
  1877
class TPinArgs;
sl@0
  1878
class Defrag;
sl@0
  1879
sl@0
  1880
/**
sl@0
  1881
Interface to RAM allocation and MMU data structure manipulation.
sl@0
  1882
*/
sl@0
  1883
class Mmu
sl@0
  1884
	{
sl@0
  1885
public:
sl@0
  1886
	enum TPanic
sl@0
  1887
		{
sl@0
  1888
		EInvalidRamBankAtBoot,
sl@0
  1889
		EInvalidReservedBankAtBoot,
sl@0
  1890
		EInvalidPageTableAtBoot,
sl@0
  1891
		EInvalidPdeAtBoot,
sl@0
  1892
		EBadMappedPageAfterBoot,
sl@0
  1893
		ERamAllocMutexCreateFailed,
sl@0
  1894
		EBadFreePhysicalRam,
sl@0
  1895
		EUnsafePageInfoAccess,
sl@0
  1896
		EUnsafePageTableInfoAccess,
sl@0
  1897
		EPhysMemSyncMutexCreateFailed,
sl@0
  1898
		EDefragAllocFailed
sl@0
  1899
		};
sl@0
  1900
sl@0
  1901
	/**
sl@0
  1902
	Attribute flags used when allocating RAM pages.
sl@0
  1903
	See #AllocRam etc.
sl@0
  1904
sl@0
  1905
	The least significant bits of these flags are used for the #TMemoryType
sl@0
  1906
	value for the memory.
sl@0
  1907
	*/
sl@0
  1908
	enum TRamAllocFlags
sl@0
  1909
		{
sl@0
  1910
		// lower bits hold TMemoryType
sl@0
  1911
sl@0
  1912
		/**
sl@0
  1913
		If this flag is set, don't wipe the contents of the memory when allocated.
sl@0
  1914
		By default, for security and confidentiality reasons, the memory is filled
sl@0
  1915
		with a 'wipe' value to erase the previous contents.
sl@0
  1916
		*/
sl@0
  1917
		EAllocNoWipe			= 1<<(KMemoryTypeShift),
sl@0
  1918
sl@0
  1919
		/**
sl@0
  1920
		If this flag is set, any memory wiping will fill memory with the byte
sl@0
  1921
		value starting at bit position #EAllocWipeByteShift in these flags.
sl@0
  1922
		*/
sl@0
  1923
		EAllocUseCustomWipeByte	= 1<<(KMemoryTypeShift+1),
sl@0
  1924
sl@0
  1925
		/**
sl@0
  1926
		If this flag is set, memory allocation won't attempt to reclaim pages
sl@0
  1927
		from the demand paging system.
sl@0
  1928
		This is used to prevent deadlock when the paging system itself attempts
sl@0
  1929
		to allocate memory for itself.
sl@0
  1930
		*/
sl@0
  1931
		EAllocNoPagerReclaim	= 1<<(KMemoryTypeShift+2),
sl@0
  1932
sl@0
  1933
		/**
sl@0
  1934
		@internal
sl@0
  1935
		*/
sl@0
  1936
		EAllocFlagLast,
sl@0
  1937
sl@0
  1938
		/*
sl@0
  1939
		Bit position within these flags, for the least significant bit of the
sl@0
  1940
		byte value used when #EAllocUseCustomWipeByte is set.
sl@0
  1941
		*/
sl@0
  1942
		EAllocWipeByteShift		= 8
sl@0
  1943
		};
sl@0
  1944
sl@0
  1945
public:
sl@0
  1946
	void Init1();
sl@0
  1947
	void Init1Common();
sl@0
  1948
	void Init2();
sl@0
  1949
	void Init2Common();
sl@0
  1950
	void Init2Final();
sl@0
  1951
	void Init2FinalCommon();
sl@0
  1952
	void Init3();
sl@0
  1953
sl@0
  1954
	static void Panic(TPanic aPanic);
sl@0
  1955
sl@0
  1956
	static TInt HandlePageFault(TLinAddr aPc, TLinAddr aFaultAddress, TUint aAccessPermissions, TAny* aExceptionInfo);
sl@0
  1957
sl@0
  1958
	TUint FreeRamInPages();
sl@0
  1959
	TUint TotalPhysicalRamPages();
sl@0
  1960
sl@0
  1961
	TInt AllocRam(	TPhysAddr* aPages, TUint aCount, TRamAllocFlags aFlags, TZonePageType aZonePageType, 
sl@0
  1962
					TUint aBlockZoneId=KRamZoneInvalidId, TBool aBlockRest=EFalse);
sl@0
  1963
	void FreeRam(TPhysAddr* aPages, TUint aCount, TZonePageType aZonePageType);
sl@0
  1964
	TInt AllocContiguousRam(TPhysAddr& aPhysAddr, TUint aCount, TUint aAlign, TRamAllocFlags aFlags);
sl@0
  1965
	void FreeContiguousRam(TPhysAddr aPhysAddr, TUint aCount);
sl@0
  1966
sl@0
  1967
	const SRamZone* RamZoneConfig(TRamZoneCallback& aCallback) const;
sl@0
  1968
	void SetRamZoneConfig(const SRamZone* aZones, TRamZoneCallback aCallback);
sl@0
  1969
	TInt ModifyRamZoneFlags(TUint aId, TUint aClearMask, TUint aSetMask);
sl@0
  1970
	TInt GetRamZonePageCount(TUint aId, SRamZonePageCount& aPageData);
sl@0
  1971
	TInt ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aBytes, TPhysAddr& aPhysAddr, TInt aAlign);
sl@0
  1972
	TInt ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aNumPages, TPhysAddr* aPageList);
sl@0
  1973
	TInt RamHalFunction(TInt aFunction, TAny* a1, TAny* a2);	
sl@0
  1974
	void ChangePageType(SPageInfo* aPageInfo, TZonePageType aOldPageType, TZonePageType aNewPageType);
sl@0
  1975
sl@0
  1976
	TInt AllocPhysicalRam(TPhysAddr* aPages, TUint aCount, TRamAllocFlags aFlags);
sl@0
  1977
	void FreePhysicalRam(TPhysAddr* aPages, TUint aCount);
sl@0
  1978
	TInt AllocPhysicalRam(TPhysAddr& aPhysAddr, TUint aCount, TUint aAlign, TRamAllocFlags aFlags);
sl@0
  1979
	void FreePhysicalRam(TPhysAddr aPhysAddr, TUint aCount);
sl@0
  1980
	TInt ClaimPhysicalRam(TPhysAddr aPhysAddr, TUint aCount, TRamAllocFlags aFlags);
sl@0
  1981
	void AllocatedPhysicalRam(TPhysAddr aPhysAddr, TUint aCount, TRamAllocFlags aFlags);
sl@0
  1982
sl@0
  1983
	TLinAddr MapTemp(TPhysAddr aPage, TUint aColour, TUint aSlot=0);
sl@0
  1984
	void UnmapTemp(TUint aSlot=0);
sl@0
  1985
	void RemoveAliasesForPageTable(TPhysAddr aPageTable);
sl@0
  1986
sl@0
  1987
	static TBool MapPages(TPte* const aPtePtr, const TUint aCount, TPhysAddr* aPages, TPte aBlankPte);
sl@0
  1988
	static TBool UnmapPages(TPte* const aPtePtr, TUint aCount);
sl@0
  1989
	static TBool UnmapPages(TPte* const aPtePtr, TUint aCount, TPhysAddr* aPages);
sl@0
  1990
	static void RemapPage(TPte* const aPtePtr, TPhysAddr& aPage, TPte aBlankPte);
sl@0
  1991
	static void RestrictPagesNA(TPte* const aPtePtr, TUint aCount, TPhysAddr* aPages);
sl@0
  1992
	static TBool PageInPages(TPte* const aPtePtr, const TUint aCount, TPhysAddr* aPages, TPte aBlankPte);
sl@0
  1993
sl@0
  1994
	// implemented in CPU-specific code...
sl@0
  1995
	static TUint PteType(TMappingPermissions aPermissions, TBool aGlobal);
sl@0
  1996
	static TUint PdeType(TMemoryAttributes aAttributes);
sl@0
  1997
	static TPte BlankPte(TMemoryAttributes aAttributes, TUint aPteType);
sl@0
  1998
	static TPde BlankPde(TMemoryAttributes aAttributes);
sl@0
  1999
	static TPde BlankSectionPde(TMemoryAttributes aAttributes, TUint aPteType);
sl@0
  2000
	static TBool CheckPteTypePermissions(TUint aPteType, TUint aAccessPermissions);
sl@0
  2001
	static TMappingPermissions PermissionsFromPteType(TUint aPteType);
sl@0
  2002
	void PagesAllocated(TPhysAddr* aPageList, TUint aCount, TRamAllocFlags aFlags, TBool aReallocate=false);
sl@0
  2003
	void PageFreed(SPageInfo* aPageInfo);
sl@0
  2004
	void CleanAndInvalidatePages(TPhysAddr* aPages, TUint aCount, TMemoryAttributes aAttributes, TUint aColour);
sl@0
  2005
public:
sl@0
  2006
	// utils, implemented in CPU-specific code...
sl@0
  2007
	static TPde* PageDirectory(TInt aOsAsid);
sl@0
  2008
	static TPde* PageDirectoryEntry(TInt aOsAsid, TLinAddr aAddress);
sl@0
  2009
	static TPhysAddr PdePhysAddr(TPde aPde);
sl@0
  2010
	static TPhysAddr PtePhysAddr(TPte aPte, TUint aPteIndex);
sl@0
  2011
	static TPte* PageTableFromPde(TPde aPde);
sl@0
  2012
	static TPte* SafePageTableFromPde(TPde aPde);
sl@0
  2013
	static TPhysAddr SectionBaseFromPde(TPde aPde);
sl@0
  2014
	static TPte* PtePtrFromLinAddr(TLinAddr aAddress, TInt aOsAsid);
sl@0
  2015
	static TPte* SafePtePtrFromLinAddr(TLinAddr aAddress, TInt aOsAsid);
sl@0
  2016
	static TPhysAddr PageTablePhysAddr(TPte* aPt);
sl@0
  2017
	static TPhysAddr LinearToPhysical(TLinAddr aAddr, TInt aOsAsid=KKernelOsAsid);
sl@0
  2018
	static TPhysAddr UncheckedLinearToPhysical(TLinAddr aAddr, TInt aOsAsid);
sl@0
  2019
	static TPte MakePteInaccessible(TPte aPte, TBool aReadOnly);
sl@0
  2020
	static TPte MakePteAccessible(TPte aPte, TBool aWrite);
sl@0
  2021
	static TBool IsPteReadOnly(TPte aPte);
sl@0
  2022
	static TBool IsPteMoreAccessible(TPte aNewPte, TPte aOldPte);
sl@0
  2023
	static TBool IsPteInaccessible(TPte aPte);
sl@0
  2024
	static TBool PdeMapsPageTable(TPde aPde);
sl@0
  2025
	static TBool PdeMapsSection(TPde aPde);
sl@0
  2026
sl@0
  2027
	void SyncPhysicalMemoryBeforeDmaWrite(TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr);
sl@0
  2028
	void SyncPhysicalMemoryBeforeDmaRead (TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr);
sl@0
  2029
	void SyncPhysicalMemoryAfterDmaRead  (TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr);
sl@0
  2030
sl@0
  2031
	static TPte SectionToPageEntry(TPde& aPde);
sl@0
  2032
	static TPde PageToSectionEntry(TPte aPte, TPde aPde);
sl@0
  2033
	static TMemoryAttributes CanonicalMemoryAttributes(TMemoryAttributes aAttr);
sl@0
  2034
sl@0
  2035
public:
sl@0
  2036
	/**
sl@0
  2037
	Class representing the resources and methods required to create temporary
sl@0
  2038
	mappings of physical memory pages in order to make them accessible to
sl@0
  2039
	software.
sl@0
  2040
	These mare required by various memory model functions and are created only
sl@0
  2041
	during system boot.
sl@0
  2042
	*/
sl@0
  2043
	class TTempMapping
sl@0
  2044
		{
sl@0
  2045
	public:
sl@0
  2046
		void Alloc(TUint aNumPages);
sl@0
  2047
		TLinAddr Map(TPhysAddr aPage, TUint aColour);
sl@0
  2048
		TLinAddr Map(TPhysAddr aPage, TUint aColour, TPte aBlankPte);
sl@0
  2049
		TLinAddr Map(TPhysAddr* aPages, TUint aCount, TUint aColour);
sl@0
  2050
		void Unmap();
sl@0
  2051
		void Unmap(TBool aIMBRequired);
sl@0
  2052
		FORCE_INLINE TTempMapping()
sl@0
  2053
			: iSize(0)
sl@0
  2054
			{}
sl@0
  2055
	public:
sl@0
  2056
		TLinAddr iLinAddr;		///< Virtual address of the memory page mapped by #iPtePtr.
sl@0
  2057
		TPte* iPtePtr;			///< Pointer to first PTE allocated to this object.
sl@0
  2058
	private:
sl@0
  2059
		TPte iBlankPte;			///< PTE value to use for mapping pages, with the physical address component equal to zero.
sl@0
  2060
		TUint8 iSize;			///< Maximum number of pages which can be mapped in one go.
sl@0
  2061
		TUint8 iCount;			///< Number of pages currently mapped.
sl@0
  2062
		TUint8 iColour;			///< Colour of any pages mapped (acts as index from #iLinAddr and #iPtePtr).
sl@0
  2063
		TUint8 iSpare1;
sl@0
  2064
	private:
sl@0
  2065
		static TLinAddr iNextLinAddr;
sl@0
  2066
		};
sl@0
  2067
private:
sl@0
  2068
	enum { KNumTempMappingSlots=2 };
sl@0
  2069
	/**
sl@0
  2070
	Temporary mappings used by various functions.
sl@0
  2071
	Use of these is serialised by the #RamAllocLock.
sl@0
  2072
	*/
sl@0
  2073
	TTempMapping iTempMap[KNumTempMappingSlots];
sl@0
  2074
sl@0
  2075
	TTempMapping iPhysMemSyncTemp;	///< Temporary mapping used for physical memory sync.
sl@0
  2076
	DMutex* 	 iPhysMemSyncMutex;	///< Mutex used to serialise use of #iPhysMemSyncTemp.
sl@0
  2077
sl@0
  2078
public:
sl@0
  2079
	TPte iTempPteCached;			///< PTE value for cached temporary mappings
sl@0
  2080
	TPte iTempPteUncached;			///< PTE value for uncached temporary mappings
sl@0
  2081
	TPte iTempPteCacheMaintenance;	///< PTE value for temporary mapping of cache maintenance
sl@0
  2082
private:
sl@0
  2083
	DRamAllocator* iRamPageAllocator;			///< The RAM allocator used for managing free RAM pages.
sl@0
  2084
	const SRamZone* iRamZones;					///< A pointer to the RAM zone configuration from the variant.
sl@0
  2085
	TRamZoneCallback iRamZoneCallback;			///< Pointer to the RAM zone callback function.
sl@0
  2086
	Defrag* iDefrag;							///< The RAM defrag class implementation.
sl@0
  2087
sl@0
  2088
	/**
sl@0
  2089
	A counter incremented every time Mmu::PagesAllocated invalidates the L1 cache.
sl@0
  2090
	This is used as part of a cache maintenance optimisation.
sl@0
  2091
	*/
sl@0
  2092
	TInt iCacheInvalidateCounter;
sl@0
  2093
sl@0
  2094
	/**
sl@0
  2095
	Number of free RAM pages which are cached at L1 and have
sl@0
  2096
	SPageInfo::CacheInvalidateCounter()==#iCacheInvalidateCounter.
sl@0
  2097
	This is used as part of a cache maintenance optimisation.
sl@0
  2098
	*/
sl@0
  2099
	TInt iCacheInvalidatePageCount;
sl@0
  2100
sl@0
  2101
public:
sl@0
  2102
	/**
sl@0
  2103
	Linked list of threads which have an active IPC alias. I.e. have called
sl@0
  2104
	DMemModelThread::Alias. Threads are linked by their DMemModelThread::iAliasLink member.
sl@0
  2105
	Updates to this list are protected by the #MmuLock.
sl@0
  2106
	*/
sl@0
  2107
	SDblQue iAliasList;
sl@0
  2108
sl@0
  2109
	/**
sl@0
  2110
	The mutex used to protect RAM allocation.
sl@0
  2111
	This is the mutex #RamAllocLock operates on.
sl@0
  2112
	*/
sl@0
  2113
	DMutex* iRamAllocatorMutex;
sl@0
  2114
sl@0
  2115
private:
sl@0
  2116
	/**
sl@0
  2117
	Number of nested calls to RamAllocLock::Lock.
sl@0
  2118
	*/
sl@0
  2119
	TUint iRamAllocLockCount;
sl@0
  2120
sl@0
  2121
	/**
sl@0
  2122
	Set by various memory allocation routines to indicate that a memory allocation
sl@0
  2123
	has failed. This is used by #RamAllocLock in its management of out-of-memory
sl@0
  2124
	notifications.
sl@0
  2125
	*/
sl@0
  2126
	TBool iRamAllocFailed;
sl@0
  2127
sl@0
  2128
	/**
sl@0
  2129
	Saved value for #FreeRamInPages which is used by #RamAllocLock in its management
sl@0
  2130
	of memory level change notifications.
sl@0
  2131
	*/
sl@0
  2132
	TUint iRamAllocInitialFreePages;
sl@0
  2133
sl@0
  2134
	friend class RamAllocLock;
sl@0
  2135
private:
sl@0
  2136
	void VerifyRam();
sl@0
  2137
	};
sl@0
  2138
sl@0
  2139
/**
sl@0
  2140
The single instance of class #Mmu.
sl@0
  2141
*/
sl@0
  2142
extern Mmu TheMmu;
sl@0
  2143
sl@0
  2144
sl@0
  2145
#ifndef _DEBUG
sl@0
  2146
/**
sl@0
  2147
Perform a page table walk to return the physical address of
sl@0
  2148
the memory mapped at virtual address \a aLinAddr in the
sl@0
  2149
address space \a aOsAsid.
sl@0
  2150
sl@0
  2151
If the page table used was not one allocated by the kernel
sl@0
  2152
then the results are unpredictable and may cause a system fault.
sl@0
  2153
sl@0
  2154
@pre #MmuLock held.
sl@0
  2155
*/
sl@0
  2156
FORCE_INLINE TPhysAddr Mmu::LinearToPhysical(TLinAddr aAddr, TInt aOsAsid)
sl@0
  2157
	{
sl@0
  2158
	return Mmu::UncheckedLinearToPhysical(aAddr,aOsAsid);
sl@0
  2159
	}
sl@0
  2160
#endif
sl@0
  2161
sl@0
  2162
sl@0
  2163
__ASSERT_COMPILE((Mmu::EAllocFlagLast>>Mmu::EAllocWipeByteShift)==0); // make sure flags don't run into wipe byte value
sl@0
  2164
sl@0
  2165
sl@0
  2166
/**
sl@0
  2167
Create a temporary mapping of a physical page.
sl@0
  2168
The RamAllocatorMutex must be held before this function is called and not released
sl@0
  2169
until after UnmapTemp has been called.
sl@0
  2170
sl@0
  2171
@param aPage	The physical address of the page to be mapped.
sl@0
  2172
@param aColour	The 'colour' of the page if relevant.
sl@0
  2173
@param aSlot	Slot number to use, must be less than Mmu::KNumTempMappingSlots.
sl@0
  2174
sl@0
  2175
@return The linear address of where the page has been mapped.
sl@0
  2176
*/
sl@0
  2177
FORCE_INLINE TLinAddr Mmu::MapTemp(TPhysAddr aPage, TUint aColour, TUint aSlot)
sl@0
  2178
	{
sl@0
  2179
//	Kern::Printf("Mmu::MapTemp(0x%08x,%d,%d)",aPage,aColour,aSlot);
sl@0
  2180
	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
sl@0
  2181
	__NK_ASSERT_DEBUG(aSlot<KNumTempMappingSlots);
sl@0
  2182
	return iTempMap[aSlot].Map(aPage,aColour);
sl@0
  2183
	}
sl@0
  2184
sl@0
  2185
sl@0
  2186
/**
sl@0
  2187
Remove the temporary mapping created with MapTemp.
sl@0
  2188
sl@0
  2189
@param aSlot	Slot number which was used when temp mapping was made.
sl@0
  2190
*/
sl@0
  2191
FORCE_INLINE void Mmu::UnmapTemp(TUint aSlot)
sl@0
  2192
	{
sl@0
  2193
//	Kern::Printf("Mmu::UnmapTemp(%d)",aSlot);
sl@0
  2194
	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
sl@0
  2195
	__NK_ASSERT_DEBUG(aSlot<KNumTempMappingSlots);
sl@0
  2196
	iTempMap[aSlot].Unmap();
sl@0
  2197
	}
sl@0
  2198
sl@0
  2199
sl@0
  2200
/**
sl@0
  2201
Class representing the resources and arguments needed for various
sl@0
  2202
memory pinning operations.
sl@0
  2203
sl@0
  2204
The term 'replacement pages' in this documentation means excess
sl@0
  2205
RAM pages which have been allocated to the demand paging pool so
sl@0
  2206
that when a demand paged memory is pinned and removed the pool
sl@0
  2207
does not become too small.
sl@0
  2208
sl@0
  2209
Relacement pages are allocated with #AllocReplacementPages and their
sl@0
  2210
number remembered in #iReplacementPages. When a memory pinning operation
sl@0
  2211
removes pages from the paging pool it will reduce #iReplacementPages
sl@0
  2212
accordingly. At the end of the pinning operation, #FreeReplacementPages
sl@0
  2213
is used to free any unused replacement pages.
sl@0
  2214
*/
sl@0
  2215
class TPinArgs
sl@0
  2216
	{
sl@0
  2217
public:
sl@0
  2218
	/**
sl@0
  2219
	Boolean value set to true if the requester of the pinning operation
sl@0
  2220
	will only read from the pinned memory, not write to it.
sl@0
  2221
	This is used as an optimisation to avoid unnecessarily marking
sl@0
  2222
	demand paged memory as dirty.
sl@0
  2223
	*/
sl@0
  2224
	TBool iReadOnly;
sl@0
  2225
sl@0
  2226
	/**
sl@0
  2227
	Boolean value set to true if sufficient replacement pages already exists
sl@0
  2228
	in the demand paging pool and that #AllocReplacementPages does not need
sl@0
  2229
	to actually allocated any.
sl@0
  2230
	*/
sl@0
  2231
	TBool iUseReserve;
sl@0
  2232
sl@0
  2233
	/**
sl@0
  2234
	The number of replacement pages allocated to this object by #AllocReplacementPages.
sl@0
  2235
	A value of #EUseReserveForPinReplacementPages indicates that #iUseReserve
sl@0
  2236
	was true, and there is sufficient RAM already reserved for the operation
sl@0
  2237
	being attempted.
sl@0
  2238
	*/
sl@0
  2239
	TUint iReplacementPages;
sl@0
  2240
sl@0
  2241
	/**
sl@0
  2242
	The number of page tables which have been pinned during the course
sl@0
  2243
	of an operation. This is the number of valid entries written to
sl@0
  2244
	#iPinnedPageTables.
sl@0
  2245
	*/
sl@0
  2246
	TUint iNumPinnedPageTables;
sl@0
  2247
sl@0
  2248
	/**
sl@0
  2249
	Pointer to the location to store the addresses of any page tables
sl@0
  2250
	which have been pinned during the course of an operation. This is
sl@0
  2251
	incremented as entries are added.
sl@0
  2252
sl@0
  2253
	The null-pointer indicates that page tables do not require pinning.
sl@0
  2254
	*/
sl@0
  2255
	TPte** iPinnedPageTables;
sl@0
  2256
sl@0
  2257
public:
sl@0
  2258
	/**
sl@0
  2259
	Construct an empty TPinArgs, one which owns no resources.
sl@0
  2260
	*/
sl@0
  2261
	inline TPinArgs()
sl@0
  2262
		: iReadOnly(0), iUseReserve(0), iReplacementPages(0), iNumPinnedPageTables(0), iPinnedPageTables(0)
sl@0
  2263
		{
sl@0
  2264
		}
sl@0
  2265
sl@0
  2266
	/**
sl@0
  2267
	Return true if this TPinArgs has at least \a aRequired number of
sl@0
  2268
	replacement pages allocated.
sl@0
  2269
	*/
sl@0
  2270
	FORCE_INLINE TBool HaveSufficientPages(TUint aRequired)
sl@0
  2271
		{
sl@0
  2272
		return iReplacementPages>=aRequired; // Note, EUseReserveForPinReplacementPages will always return true.
sl@0
  2273
		}
sl@0
  2274
sl@0
  2275
	/**
sl@0
  2276
	Allocate replacement pages for this TPinArgs so that it has at least
sl@0
  2277
	\a aNumPages.
sl@0
  2278
	*/
sl@0
  2279
	TInt AllocReplacementPages(TUint aNumPages);
sl@0
  2280
sl@0
  2281
	/**
sl@0
  2282
	Free all replacement pages which this TPinArgs still owns.
sl@0
  2283
	*/
sl@0
  2284
	void FreeReplacementPages();
sl@0
  2285
sl@0
  2286
#ifdef _DEBUG
sl@0
  2287
	~TPinArgs();
sl@0
  2288
#endif
sl@0
  2289
sl@0
  2290
	/**
sl@0
  2291
	Value used to indicate that replacement pages are to come
sl@0
  2292
	from an already allocated reserve and don't need specially
sl@0
  2293
	allocating.
sl@0
  2294
	*/
sl@0
  2295
	enum { EUseReserveForPinReplacementPages = 0xffffffffu };
sl@0
  2296
	};
sl@0
  2297
sl@0
  2298
sl@0
  2299
#ifdef _DEBUG
sl@0
  2300
inline TPinArgs::~TPinArgs()
sl@0
  2301
	{
sl@0
  2302
	__NK_ASSERT_DEBUG(!iReplacementPages);
sl@0
  2303
	}
sl@0
  2304
#endif
sl@0
  2305
sl@0
  2306
sl@0
  2307
/**
sl@0
  2308
Enumeration used in various RestrictPages APIs to specify the type of restrictions to apply.
sl@0
  2309
*/
sl@0
  2310
enum TRestrictPagesType
sl@0
  2311
	{
sl@0
  2312
	/**
sl@0
  2313
	Make all mappings of page not accessible.
sl@0
  2314
	Pinned mappings will veto this operation.
sl@0
  2315
	*/
sl@0
  2316
	ERestrictPagesNoAccess			 = 1,
sl@0
  2317
sl@0
  2318
	/**
sl@0
  2319
	Demand paged memory being made 'old'.
sl@0
  2320
	Specific case of ERestrictPagesNoAccess.
sl@0
  2321
	*/
sl@0
  2322
	ERestrictPagesNoAccessForOldPage = ERestrictPagesNoAccess|0x80000000,
sl@0
  2323
sl@0
  2324
	/**
sl@0
  2325
	For page moving pinned mappings always veto the moving operation.
sl@0
  2326
	*/
sl@0
  2327
	ERestrictPagesForMovingFlag  = 0x40000000,
sl@0
  2328
sl@0
  2329
	/**
sl@0
  2330
	Movable memory being made no access whilst its being copied.
sl@0
  2331
	Special case of ERestrictPagesNoAccess where pinned mappings always veto 
sl@0
  2332
	this operation even if they are read-only mappings.
sl@0
  2333
	*/
sl@0
  2334
	ERestrictPagesNoAccessForMoving  = ERestrictPagesNoAccess|ERestrictPagesForMovingFlag,
sl@0
  2335
	};
sl@0
  2336
sl@0
  2337
#include "xmmu.h"
sl@0
  2338
sl@0
  2339
#endif