os/kernelhwsrv/kernel/eka/memmodel/epoc/flexible/mmu/mmu.h
changeset 0 bde4ae8d615e
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/os/kernelhwsrv/kernel/eka/memmodel/epoc/flexible/mmu/mmu.h	Fri Jun 15 03:10:57 2012 +0200
     1.3 @@ -0,0 +1,2339 @@
     1.4 +// Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies).
     1.5 +// All rights reserved.
     1.6 +// This component and the accompanying materials are made available
     1.7 +// under the terms of the License "Eclipse Public License v1.0"
     1.8 +// which accompanies this distribution, and is available
     1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
    1.10 +//
    1.11 +// Initial Contributors:
    1.12 +// Nokia Corporation - initial contribution.
    1.13 +//
    1.14 +// Contributors:
    1.15 +//
    1.16 +// Description:
    1.17 +//
    1.18 +
    1.19 +/**
    1.20 + @file
    1.21 + @internalComponent
    1.22 +*/
    1.23 +
    1.24 +#ifndef __MMU_H__
    1.25 +#define __MMU_H__
    1.26 +
    1.27 +#define _USE_OLDEST_LISTS
    1.28 +
    1.29 +#include "mm.h"
    1.30 +#include "mmboot.h"
    1.31 +#include <mmtypes.h>
    1.32 +#include <kern_priv.h>
    1.33 +
    1.34 +
    1.35 +class DCoarseMemory;
    1.36 +class DMemoryObject;
    1.37 +class DMemoryMapping;
    1.38 +
    1.39 +/**
    1.40 +A page information structure giving the current use and state for a
    1.41 +RAM page being managed by the kernel.
    1.42 +
    1.43 +Any modification to the contents of any SPageInfo structure requires the
    1.44 +#MmuLock to be held. The exceptions to this is when a page is unused (#Type()==#EUnused),
    1.45 +in this case only the #RamAllocLock is required to use #SetAllocated(), #SetUncached(),
    1.46 +and #CacheInvalidateCounter().
    1.47 +
    1.48 +These structures are stored in an array at the virtual address #KPageInfoLinearBase
    1.49 +which is indexed by the physical address of the page they are associated with, divided
    1.50 +by #KPageSize. The memory for this array is allocated by the bootstrap and it has
    1.51 +unallocated regions where no memory is required to store SPageInfo structures.
    1.52 +These unallocated memory regions are indicated by zeros in the bitmap stored at
    1.53 +#KPageInfoMap.
    1.54 +*/
    1.55 +struct SPageInfo
    1.56 +	{
    1.57 +	/**
    1.58 +	Enumeration for the usage of a RAM page. This is stored in #iType.
    1.59 +	*/
    1.60 +	enum TType
    1.61 +		{
    1.62 +		/**
    1.63 +		No physical RAM exists for this page.
    1.64 +
    1.65 +		This represents memory which doesn't exist or is not part of the physical
    1.66 +		address range being managed by the kernel.
    1.67 +		*/
    1.68 +		EInvalid,
    1.69 +
    1.70 +		/**
    1.71 +		RAM fixed at boot time.
    1.72 +
    1.73 +		This is for memory which was allocated by the bootstrap and which
    1.74 +		the kernel does not actively manage.
    1.75 +		*/
    1.76 +		EFixed,
    1.77 +
    1.78 +		/**
    1.79 +		Page is unused.
    1.80 +
    1.81 +		The page is either free memory in Mmu::iRamPageAllocator or the demand
    1.82 +		paging 'live' list.
    1.83 +
    1.84 +		To change from or to this type the #RamAllocLock must be held.
    1.85 +		*/
    1.86 +		EUnused,
    1.87 +
    1.88 +		/**
    1.89 +		Page is in an indeterminate state.
    1.90 +
    1.91 +		A page is placed into this state by Mmu::PagesAllocated when it is
    1.92 +		allocated (ceases to be #EUnused). Once the page
    1.93 +		*/
    1.94 +		EUnknown,
    1.95 +
    1.96 +		/**
    1.97 +		Page was allocated with Mmu::AllocPhysicalRam, Mmu::ClaimPhysicalRam
    1.98 +		or is part of a reserved RAM bank set at system boot.
    1.99 +		*/
   1.100 +		EPhysAlloc,
   1.101 +
   1.102 +		/**
   1.103 +		Page is owned by a memory object.
   1.104 +
   1.105 +		#iOwner will point to the owning memory object and #iIndex will
   1.106 +		be the page index into its memory for this page.
   1.107 +		*/
   1.108 +		EManaged,
   1.109 +
   1.110 +		/**
   1.111 +		Page is being used as a shadow page.
   1.112 +
   1.113 +		@see DShadowPage.
   1.114 +		*/
   1.115 +		EShadow
   1.116 +		};
   1.117 +
   1.118 +
   1.119 +	/**
   1.120 +	Flags stored in #iFlags.
   1.121 +
   1.122 +	The least significant bits of these flags are used for the #TMemoryAttributes
   1.123 +	value for the page.
   1.124 +	*/
   1.125 +	enum TFlags
   1.126 +		{
   1.127 +		// lower bits hold TMemoryAttribute value for this page
   1.128 +
   1.129 +		/**
   1.130 +		Flag set to indicate that the page has writable mappings.
   1.131 +		(This is to facilitate demand paged memory.)
   1.132 +		*/
   1.133 +		EWritable			= 1<<(EMemoryAttributeShift),
   1.134 +
   1.135 +		/**
   1.136 +		Flag set to indicate that the memory page contents may be different
   1.137 +		to those previously saved to backing store (contents are 'dirty').
   1.138 +		This is set whenever a page gains a writeable mapping and only every
   1.139 +		cleared once a demand paging memory manager 'cleans' the page.
   1.140 +		*/
   1.141 +		EDirty				= 1<<(EMemoryAttributeShift+1)
   1.142 +		};
   1.143 +
   1.144 +
   1.145 +	/**
   1.146 +	State for the page when being used to contain demand paged content.
   1.147 +	*/
   1.148 +	enum TPagedState
   1.149 +		{
   1.150 +		/**
   1.151 +		Page is not being managed for demand paging purposes, is has been transiently
   1.152 +		removed from the demand paging live list.
   1.153 +		*/
   1.154 +		EUnpaged 			= 0x0,
   1.155 +
   1.156 +		/**
   1.157 +		Page is in the live list as a young page.
   1.158 +		*/
   1.159 +		EPagedYoung 		= 0x1,
   1.160 +
   1.161 +		/**
   1.162 +		Page is in the live list as an old page.
   1.163 +		*/
   1.164 +		EPagedOld 			= 0x2,
   1.165 +
   1.166 +		/**
   1.167 +		Page was pinned but it has been moved but not yet freed.
   1.168 +		*/
   1.169 +		EPagedPinnedMoved	= 0x3,
   1.170 +
   1.171 +		/**
   1.172 +		Page has been removed from live list to prevent contents being paged-out.
   1.173 +		*/
   1.174 +		// NOTE - This must be the same value as EStatePagedLocked as defined in mmubase.h
   1.175 +		EPagedPinned 		= 0x4,
   1.176 +
   1.177 +#ifdef _USE_OLDEST_LISTS
   1.178 +		/**
   1.179 +		Page is in the live list as one of oldest pages that is clean.
   1.180 +		*/
   1.181 +		EPagedOldestClean	= 0x5,
   1.182 +
   1.183 +		/**
   1.184 +		Page is in the live list as one of oldest pages that is dirty.
   1.185 +		*/
   1.186 +		EPagedOldestDirty 	= 0x6
   1.187 +#endif
   1.188 +		};
   1.189 +
   1.190 +
   1.191 +	/**
   1.192 +	Additional flags, stored in #iFlags2.
   1.193 +	*/
   1.194 +	enum TFlags2
   1.195 +		{
   1.196 +		/**
   1.197 +		When #iPagedState==#EPagedPinned this indicates the page is a 'reserved' page
   1.198 +		and is does not increase free page count when returned to the live list.
   1.199 +		*/
   1.200 +		EPinnedReserve	= 1<<0,
   1.201 +		};
   1.202 +
   1.203 +private:
   1.204 +	/**
   1.205 +	Value from enum #TType, returned by #Type().
   1.206 +	*/
   1.207 +	TUint8 iType;
   1.208 +
   1.209 +	/**
   1.210 +	Bitmask of values from #TFlags, returned by #Flags().
   1.211 +	*/
   1.212 +	TUint8 iFlags;
   1.213 +
   1.214 +	/**
   1.215 +	Value from enum #TPagedState, returned by #PagedState().
   1.216 +	*/
   1.217 +	TUint8 iPagedState;
   1.218 +
   1.219 +	/**
   1.220 +	Bitmask of values from #TFlags2.
   1.221 +	*/
   1.222 +	TUint8 iFlags2;
   1.223 +
   1.224 +	union
   1.225 +		{
   1.226 +		/**
   1.227 +		The memory object which owns this page.
   1.228 +		Used for always set for #EManaged pages and can be set for #PhysAlloc pages.
   1.229 +		*/
   1.230 +		DMemoryObject* iOwner;
   1.231 +
   1.232 +		/**
   1.233 +		A pointer to the SPageInfo of the page that is being shadowed.
   1.234 +		For use with #EShadow pages only.
   1.235 +		*/
   1.236 +		SPageInfo* iOriginalPageInfo;
   1.237 +		};
   1.238 +
   1.239 +	/**
   1.240 +	The index for this page within within the owning object's (#iOwner) memory.
   1.241 +	*/
   1.242 +	TUint32 iIndex;
   1.243 +
   1.244 +	/**
   1.245 +	Pointer identifying the current modifier of the page. See #SetModifier.
   1.246 +	*/
   1.247 +	TAny* iModifier;
   1.248 +
   1.249 +	/**
   1.250 +	Storage location for data specific to the memory manager object handling this page.
   1.251 +	See #SetPagingManagerData.
   1.252 +	*/
   1.253 +	TUint32 iPagingManagerData;
   1.254 +
   1.255 +	/**
   1.256 +	Union of values which vary depending of the current value of #iType.
   1.257 +	*/
   1.258 +	union
   1.259 +		{
   1.260 +		/**
   1.261 +		When #iType==#EPhysAlloc, this stores a count of the number of memory objects
   1.262 +		this page has been added to.
   1.263 +		*/
   1.264 +		TUint32 iUseCount;
   1.265 +
   1.266 +		/**
   1.267 +		When #iType==#EUnused, this stores the value of Mmu::iCacheInvalidateCounter
   1.268 +		at the time the page was freed. This is used for some cache maintenance optimisations.
   1.269 +		*/
   1.270 +		TUint32 iCacheInvalidateCounter;
   1.271 +
   1.272 +		/**
   1.273 +		When #iType==#EManaged, this holds the count of the number of times the page was pinned.
   1.274 +		This will only be non-zero for demand paged memory.
   1.275 +		*/
   1.276 +		TUint32 iPinCount;
   1.277 +		};
   1.278 +
   1.279 +public:
   1.280 +	/**
   1.281 +	Used for placing page into linked lists. E.g. the various demand paging live lists.
   1.282 +	*/
   1.283 +	SDblQueLink iLink;
   1.284 +
   1.285 +public:
   1.286 +	/**
   1.287 +	Return the SPageInfo for a given page of physical RAM.
   1.288 +	*/
   1.289 +	static SPageInfo* FromPhysAddr(TPhysAddr aAddress);
   1.290 +
   1.291 +	/**
   1.292 +	Return physical address of the RAM page which this SPageInfo object is associated.
   1.293 +	If the address has no SPageInfo, then a null pointer is returned.
   1.294 +	*/
   1.295 +	static SPageInfo* SafeFromPhysAddr(TPhysAddr aAddress);
   1.296 +
   1.297 +	/**
   1.298 +	Return physical address of the RAM page which this SPageInfo object is associated.
   1.299 +	*/
   1.300 +	FORCE_INLINE TPhysAddr PhysAddr();
   1.301 +
   1.302 +	/**
   1.303 +	Return a SPageInfo by conversion from the address of its embedded link member #iLink.
   1.304 +	*/
   1.305 +	FORCE_INLINE static SPageInfo* FromLink(SDblQueLink* aLink)
   1.306 +		{
   1.307 +		return (SPageInfo*)((TInt)aLink-_FOFF(SPageInfo,iLink));
   1.308 +		}
   1.309 +
   1.310 +	//
   1.311 +	// Getters...
   1.312 +	//
   1.313 +
   1.314 +	/**
   1.315 +	Return the current #TType value stored in #iType.
   1.316 +	@pre #MmuLock held.
   1.317 +	*/
   1.318 +	FORCE_INLINE TType Type()
   1.319 +		{
   1.320 +		CheckAccess("Type");
   1.321 +		return (TType)iType;
   1.322 +		}
   1.323 +
   1.324 +	/**
   1.325 +	Return the current value of #iFlags.
   1.326 +	@pre #MmuLock held (if \a aNoCheck false).
   1.327 +	*/
   1.328 +	FORCE_INLINE TUint Flags(TBool aNoCheck=false)
   1.329 +		{
   1.330 +		if(!aNoCheck)
   1.331 +			CheckAccess("Flags");
   1.332 +		return iFlags;
   1.333 +		}
   1.334 +
   1.335 +	/**
   1.336 +	Return the current value of #iPagedState.
   1.337 +	@pre #MmuLock held.
   1.338 +	*/
   1.339 +	FORCE_INLINE TPagedState PagedState()
   1.340 +		{
   1.341 +		CheckAccess("PagedState");
   1.342 +		return (TPagedState)iPagedState;
   1.343 +		}
   1.344 +
   1.345 +	/**
   1.346 +	Return the current value of #iOwner.
   1.347 +	@pre #MmuLock held.
   1.348 +	*/
   1.349 +	FORCE_INLINE DMemoryObject* Owner()
   1.350 +		{
   1.351 +		CheckAccess("Owner");
   1.352 +		return iOwner;
   1.353 +		}
   1.354 +
   1.355 +	/**
   1.356 +	Return the current value of #iIndex.
   1.357 +	@pre #MmuLock held (if \a aNoCheck false).
   1.358 +	*/
   1.359 +	FORCE_INLINE TUint32 Index(TBool aNoCheck=false)
   1.360 +		{
   1.361 +		if(!aNoCheck)
   1.362 +			CheckAccess("Index");
   1.363 +		return iIndex;
   1.364 +		}
   1.365 +
   1.366 +	/**
   1.367 +	Return the current value of #iModifier.
   1.368 +	@pre #MmuLock held (if \a aNoCheck false).
   1.369 +	*/
   1.370 +	FORCE_INLINE TAny* Modifier()
   1.371 +		{
   1.372 +		CheckAccess("Modifier");
   1.373 +		return iModifier;
   1.374 +		}
   1.375 +
   1.376 +
   1.377 +	//
   1.378 +	// Setters..
   1.379 +	//
   1.380 +
   1.381 +	/**
   1.382 +	Set this page as type #EFixed.
   1.383 +	This is only used during boot by Mmu::Init2Common.
   1.384 +	*/
   1.385 +	inline void SetFixed(TUint32 aIndex=0)
   1.386 +		{
   1.387 +		CheckAccess("SetFixed");
   1.388 +		Set(EFixed,0,aIndex);
   1.389 +		}
   1.390 +
   1.391 +	/**
   1.392 +	Set this page as type #EUnused.
   1.393 +
   1.394 +	@pre #MmuLock held.
   1.395 +	@pre #RamAllocLock held if previous page type != #EUnknown.
   1.396 +
   1.397 +	@post #iModifier==0 to indicate that page usage has changed.
   1.398 +	*/
   1.399 +	inline void SetUnused()
   1.400 +		{
   1.401 +		CheckAccess("SetUnused",ECheckNotUnused|((iType!=EUnknown)?(TInt)ECheckRamAllocLock:0));
   1.402 +		iType = EUnused;
   1.403 +		iModifier = 0;
   1.404 +		// do not modify iFlags or iIndex in this function because page allocating cache cleaning operations rely on using this value
   1.405 +		}
   1.406 +
   1.407 +	/**
   1.408 +	Set this page as type #EUnknown.
   1.409 +	This is only used by Mmu::PagesAllocated.
   1.410 +
   1.411 +	@pre #RamAllocLock held.
   1.412 +
   1.413 +	@post #iModifier==0 to indicate that page usage has changed.
   1.414 +	*/
   1.415 +	inline void SetAllocated()
   1.416 +		{
   1.417 +		CheckAccess("SetAllocated",ECheckUnused|ECheckRamAllocLock|ENoCheckMmuLock);
   1.418 +		iType = EUnknown;
   1.419 +		iModifier = 0;
   1.420 +		// do not modify iFlags or iIndex in this function because cache cleaning operations rely on using this value
   1.421 +		}
   1.422 +
   1.423 +	/**
   1.424 +	Set this page as type #EPhysAlloc.
   1.425 +	@param aOwner	 Optional value for #iOwner.
   1.426 +	@param aIndex	 Optional value for #iIndex.
   1.427 +
   1.428 +	@pre #MmuLock held.
   1.429 +
   1.430 +	@post #iModifier==0 to indicate that page usage has changed.
   1.431 +	*/
   1.432 +	inline void SetPhysAlloc(DMemoryObject* aOwner=0, TUint32 aIndex=0)
   1.433 +		{
   1.434 +		CheckAccess("SetPhysAlloc");
   1.435 +		Set(EPhysAlloc,aOwner,aIndex);
   1.436 +		iUseCount = 0;
   1.437 +		}
   1.438 +
   1.439 +	/**
   1.440 +	Set this page as type #EManaged.
   1.441 +
   1.442 +	@param aOwner	Value for #iOwner.
   1.443 +	@param aIndex 	Value for #iIndex.
   1.444 +	@param aFlags 	Value for #iFlags (aOwner->PageInfoFlags()).
   1.445 +
   1.446 +	@pre #MmuLock held.
   1.447 +
   1.448 +	@post #iModifier==0 to indicate that page usage has changed.
   1.449 +	*/
   1.450 +	inline void SetManaged(DMemoryObject* aOwner, TUint32 aIndex, TUint8 aFlags)
   1.451 +		{
   1.452 +		CheckAccess("SetManaged");
   1.453 +		Set(EManaged,aOwner,aIndex);
   1.454 +		iFlags = aFlags;
   1.455 +		iPinCount = 0;
   1.456 +		}
   1.457 +
   1.458 +	/**
   1.459 +	Set this page as type #EShadow.
   1.460 +
   1.461 +	This is for use by #DShadowPage.
   1.462 +
   1.463 +	@param aIndex 	Value for #iIndex.
   1.464 +	@param aFlags 	Value for #iFlags.
   1.465 +
   1.466 +	@pre #MmuLock held.
   1.467 +
   1.468 +	@post #iModifier==0 to indicate that page usage has changed.
   1.469 +	*/
   1.470 +	inline void SetShadow(TUint32 aIndex, TUint8 aFlags)
   1.471 +		{
   1.472 +		CheckAccess("SetShadow");
   1.473 +		Set(EShadow,0,aIndex);
   1.474 +		iFlags = aFlags;
   1.475 +		}
   1.476 +
   1.477 +	/**
   1.478 +	Store a pointer to the SPageInfo of the page that this page is shadowing.
   1.479 +
   1.480 +	@param aOrigPageInfo	Pointer to the SPageInfo that this page is shadowing
   1.481 +
   1.482 +	@pre #MmuLock held.
   1.483 +	*/
   1.484 +	inline void SetOriginalPage(SPageInfo* aOrigPageInfo)
   1.485 +		{
   1.486 +		CheckAccess("SetOriginalPage");
   1.487 +		__NK_ASSERT_DEBUG(iType == EShadow);
   1.488 +		__NK_ASSERT_DEBUG(!iOriginalPageInfo);
   1.489 +		iOriginalPageInfo = aOrigPageInfo;
   1.490 +		}
   1.491 +
   1.492 +	/**
   1.493 +	Reutrns a pointer to the SPageInfo of the page that this page is shadowing.
   1.494 +
   1.495 +	@return	A pointer to the SPageInfo that this page is shadowing
   1.496 +
   1.497 +	@pre #MmuLock held.
   1.498 +	*/
   1.499 +	inline SPageInfo* GetOriginalPage()
   1.500 +		{
   1.501 +		CheckAccess("GetOriginalPage");
   1.502 +		__NK_ASSERT_DEBUG(iType == EShadow);
   1.503 +		__NK_ASSERT_DEBUG(iOriginalPageInfo);
   1.504 +		return iOriginalPageInfo;
   1.505 +		}
   1.506 +
   1.507 +
   1.508 +private:
   1.509 +	/** Internal implementation factor for methods which set page type. */
   1.510 +	FORCE_INLINE void Set(TType aType, DMemoryObject* aOwner, TUint32 aIndex)
   1.511 +		{
   1.512 +		CheckAccess("Set",ECheckNotAllocated|ECheckNotPaged);
   1.513 +		(TUint32&)iType = aType; // also clears iFlags, iFlags2 and iPagedState
   1.514 +		iOwner = aOwner;
   1.515 +		iIndex = aIndex;
   1.516 +		iModifier = 0;
   1.517 +		}
   1.518 +
   1.519 +public:
   1.520 +
   1.521 +
   1.522 +	//
   1.523 +	//
   1.524 +	//
   1.525 +
   1.526 +	/**
   1.527 +	Set #iFlags to indicate that the contents of this page have been removed from
   1.528 +	any caches.
   1.529 +
   1.530 +	@pre #MmuLock held if #iType!=#EUnused, #RamAllocLock held if #iType==#EUnused.
   1.531 +	*/
   1.532 +	FORCE_INLINE void SetUncached()
   1.533 +		{
   1.534 +		CheckAccess("SetUncached",iType==EUnused ? ECheckRamAllocLock|ENoCheckMmuLock : 0);
   1.535 +		__NK_ASSERT_DEBUG(iType==EUnused || (iType==EPhysAlloc && iUseCount==0));
   1.536 +		iFlags = EMemAttNormalUncached;
   1.537 +		}
   1.538 +
   1.539 +	/**
   1.540 +	Set memory attributes and colour for a page of type #EPhysAlloc.
   1.541 +	
   1.542 +	This is set the first time a page of type #EPhysAlloc is added to a memory
   1.543 +	object with DMemoryManager::AddPages or DMemoryManager::AddContiguous.
   1.544 +	The set values are used to check constraints are met if the page is
   1.545 +	also added to other memory objects.
   1.546 +
   1.547 +	@param aIndex	The page index within a memory object at which this page
   1.548 +					has been added. This is stored in #iIndex and used to determine
   1.549 +					the page's 'colour'.
   1.550 +	@param aFlags 	Value for #iFlags. This sets the memory attributes for the page.
   1.551 +
   1.552 +	@post #iModifier==0 to indicate that page usage has changed.
   1.553 +	*/
   1.554 +	inline void SetMapped(TUint32 aIndex, TUint aFlags)
   1.555 +		{
   1.556 +		CheckAccess("SetMapped");
   1.557 +		__NK_ASSERT_DEBUG(iType==EPhysAlloc);
   1.558 +		__NK_ASSERT_DEBUG(iUseCount==0); // check page not already added to an object
   1.559 +		iIndex = aIndex;
   1.560 +		iFlags = aFlags;
   1.561 +		iModifier = 0;
   1.562 +		}
   1.563 +
   1.564 +	/**
   1.565 +	Set #iPagedState
   1.566 +
   1.567 +	@pre #MmuLock held.
   1.568 +
   1.569 +	@post #iModifier==0 to indicate that page state has changed.
   1.570 +	*/
   1.571 +	FORCE_INLINE void SetPagedState(TPagedState aPagedState)
   1.572 +		{
   1.573 +		CheckAccess("SetPagedState");
   1.574 +		__NK_ASSERT_DEBUG(aPagedState==iPagedState || iPagedState!=EPagedPinned || iPinCount==0); // make sure don't set an unpinned state if iPinCount!=0
   1.575 +		iPagedState = aPagedState;
   1.576 +		iModifier = 0;
   1.577 +		}
   1.578 +
   1.579 +	/**
   1.580 +	The the pages #iModifier value.
   1.581 +
   1.582 +	#iModifier is cleared to zero whenever the usage or paging state of the page
   1.583 +	changes. So if a thread sets this to a suitable unique value (e.g. the address
   1.584 +	of a local variable) then it may perform a long running operation on the page
   1.585 +	and later check with #CheckModified that no other thread has changed the page
   1.586 +	state or used SetModifier in the intervening time.
   1.587 +	Example.
   1.588 +
   1.589 +	@code
   1.590 +	TInt anyLocalVariable; // arbitrary local variable
   1.591 +
   1.592 +	MmuLock::Lock();
   1.593 +	SPageInfo* thePageInfo = GetAPage();
   1.594 +	thePageInfo->SetModifier(&anyLocalVariable); // use &anyLocalVariable as value unique to this thread 
   1.595 +	MmuLock::Unlock();
   1.596 +
   1.597 +	DoOperation(thePageInfo);
   1.598 +
   1.599 +	MmuLock::Lock();
   1.600 +	TInt r;
   1.601 +	if(!thePageInfo->CheckModified(&anyLocalVariable));
   1.602 +		{
   1.603 +		// nobody else touched the page...
   1.604 +		OperationSucceeded(thePageInfo);
   1.605 +		r = KErrNone;
   1.606 +		}
   1.607 +	else
   1.608 +		{
   1.609 +		// somebody else changed our page...
   1.610 +		OperationInterrupted(thePageInfo);
   1.611 +		r = KErrAbort;
   1.612 +		}
   1.613 +	MmuLock::Unlock();
   1.614 +
   1.615 +	return r;
   1.616 +	@endcode
   1.617 +
   1.618 +	@pre #MmuLock held.
   1.619 +	*/
   1.620 +	FORCE_INLINE void SetModifier(TAny* aModifier)
   1.621 +		{
   1.622 +		CheckAccess("SetModifier");
   1.623 +		iModifier = aModifier;
   1.624 +		}
   1.625 +
   1.626 +	/**
   1.627 +	Return true if the #iModifier value does not match a specified value.
   1.628 +
   1.629 +	@param aModifier	A 'modifier' value previously set with #SetModifier.
   1.630 +
   1.631 +	@pre #MmuLock held.
   1.632 +
   1.633 +	@see SetModifier.
   1.634 +	*/
   1.635 +	FORCE_INLINE TBool CheckModified(TAny* aModifier)
   1.636 +		{
   1.637 +		CheckAccess("CheckModified");
   1.638 +		return iModifier!=aModifier;
   1.639 +		}
   1.640 +
   1.641 +	/**
   1.642 +	Flag this page as having Page Table Entries which give writeable access permissions.
   1.643 +	This sets flags #EWritable and #EDirty.
   1.644 +
   1.645 +	@pre #MmuLock held.
   1.646 +	*/
   1.647 +	FORCE_INLINE void SetWritable()
   1.648 +		{
   1.649 +		CheckAccess("SetWritable");
   1.650 +		// This should only be invoked on paged pages.
   1.651 +		__NK_ASSERT_DEBUG(PagedState() != EUnpaged);
   1.652 +		iFlags |= EWritable;
   1.653 +		SetDirty();
   1.654 +		}
   1.655 +
   1.656 +	/**
   1.657 +	Flag this page as having no longer having any Page Table Entries which give writeable
   1.658 +	access permissions.
   1.659 +	This clears the flag #EWritable.
   1.660 +
   1.661 +	@pre #MmuLock held.
   1.662 +	*/
   1.663 +	FORCE_INLINE void SetReadOnly()
   1.664 +		{
   1.665 +		CheckAccess("SetReadOnly");
   1.666 +		iFlags &= ~EWritable;
   1.667 +		}
   1.668 +
   1.669 +	/**
   1.670 +	Returns true if #SetWritable has been called without a subsequent #SetReadOnly.
   1.671 +	This returns the flag #EWritable.
   1.672 +
   1.673 +	@pre #MmuLock held.
   1.674 +	*/
   1.675 +	FORCE_INLINE TBool IsWritable()
   1.676 +		{
   1.677 +		CheckAccess("IsWritable");
   1.678 +		return iFlags&EWritable;
   1.679 +		}
   1.680 +
   1.681 +	/**
   1.682 +	Flag this page as 'dirty', indicating that its contents may no longer match those saved
   1.683 +	to a backing store. This sets the flag #EWritable.
   1.684 +
   1.685 +	This is used in the management of demand paged memory.
   1.686 +
   1.687 +	@pre #MmuLock held.
   1.688 +	*/
   1.689 +	FORCE_INLINE void SetDirty()
   1.690 +		{
   1.691 +		CheckAccess("SetDirty");
   1.692 +		iFlags |= EDirty;
   1.693 +		}
   1.694 +
   1.695 +	/**
   1.696 +	Flag this page as 'clean', indicating that its contents now match those saved
   1.697 +	to a backing store. This clears the flag #EWritable.
   1.698 +
   1.699 +	This is used in the management of demand paged memory.
   1.700 +
   1.701 +	@pre #MmuLock held.
   1.702 +	*/
   1.703 +	FORCE_INLINE void SetClean()
   1.704 +		{
   1.705 +		CheckAccess("SetClean");
   1.706 +		iFlags &= ~EDirty;
   1.707 +		}
   1.708 +
   1.709 +	/**
   1.710 +	Return the  #EDirty flag. See #SetDirty and #SetClean.
   1.711 +
   1.712 +	This is used in the management of demand paged memory.
   1.713 +
   1.714 +	@pre #MmuLock held.
   1.715 +	*/
   1.716 +	FORCE_INLINE TBool IsDirty()
   1.717 +		{
   1.718 +		CheckAccess("IsDirty");
   1.719 +		return iFlags&EDirty;
   1.720 +		}
   1.721 +
   1.722 +
   1.723 +	//
   1.724 +	// Type specific...
   1.725 +	//
   1.726 +
   1.727 +	/**
   1.728 +	Set #iCacheInvalidateCounter to the specified value.
   1.729 +
   1.730 +	@pre #MmuLock held.
   1.731 +	@pre #iType==#EUnused.
   1.732 +	*/
   1.733 +	void SetCacheInvalidateCounter(TUint32 aCacheInvalidateCounter)
   1.734 +		{
   1.735 +		CheckAccess("SetCacheInvalidateCounter");
   1.736 +		__NK_ASSERT_DEBUG(iType==EUnused);
   1.737 +		iCacheInvalidateCounter = aCacheInvalidateCounter;
   1.738 +		}
   1.739 +
   1.740 +	/**
   1.741 +	Return #iCacheInvalidateCounter.
   1.742 +
   1.743 +	@pre #MmuLock held.
   1.744 +	@pre #iType==#EUnused.
   1.745 +	*/
   1.746 +	TUint32 CacheInvalidateCounter()
   1.747 +		{
   1.748 +		CheckAccess("CacheInvalidateCounter",ECheckRamAllocLock|ENoCheckMmuLock);
   1.749 +		__NK_ASSERT_DEBUG(iType==EUnused);
   1.750 +		return iCacheInvalidateCounter;
   1.751 +		}
   1.752 +
   1.753 +	/**
   1.754 +	Increment #iUseCount to indicate that the page has been added to a memory object.
   1.755 +
   1.756 +	@return New value of #iUseCount.
   1.757 +
   1.758 +	@pre #MmuLock held.
   1.759 +	@pre #iType==#EPhysAlloc.
   1.760 +	*/
   1.761 +	TUint32 IncUseCount()
   1.762 +		{
   1.763 +		CheckAccess("IncUseCount");
   1.764 +		__NK_ASSERT_DEBUG(iType==EPhysAlloc);
   1.765 +		return ++iUseCount;
   1.766 +		}
   1.767 +
   1.768 +	/**
   1.769 +	Decrement #iUseCount to indicate that the page has been removed from a memory object.
   1.770 +
   1.771 +	@return New value of #iUseCount.
   1.772 +
   1.773 +	@pre #MmuLock held.
   1.774 +	@pre #iType==#EPhysAlloc.
   1.775 +	*/
   1.776 +	TUint32 DecUseCount()
   1.777 +		{
   1.778 +		CheckAccess("DecUseCount");
   1.779 +		__NK_ASSERT_DEBUG(iType==EPhysAlloc);
   1.780 +		__NK_ASSERT_DEBUG(iUseCount);
   1.781 +		return --iUseCount;
   1.782 +		}
   1.783 +
   1.784 +	/**
   1.785 +	Return #iUseCount, this indicates the number of times the page has been added to memory object(s).
   1.786 +
   1.787 +	@return #iUseCount.
   1.788 +
   1.789 +	@pre #MmuLock held.
   1.790 +	@pre #iType==#EPhysAlloc.
   1.791 +	*/
   1.792 +	TUint32 UseCount()
   1.793 +		{
   1.794 +		CheckAccess("UseCount");
   1.795 +		__NK_ASSERT_DEBUG(iType==EPhysAlloc);
   1.796 +		return iUseCount;
   1.797 +		}
   1.798 +
   1.799 +	/**
   1.800 +	Increment #iPinCount to indicate that a mapping has pinned this page.
   1.801 +	This is only done for demand paged memory; unpaged memory does not have
   1.802 +	#iPinCount updated when it is pinned.
   1.803 +
   1.804 +	@return New value of #iPinCount.
   1.805 +
   1.806 +	@pre #MmuLock held.
   1.807 +	@pre #iType==#EManaged.
   1.808 +	*/
   1.809 +	TUint32 IncPinCount()
   1.810 +		{
   1.811 +		CheckAccess("IncPinCount");
   1.812 +		__NK_ASSERT_DEBUG(iType==EManaged);
   1.813 +		return ++iPinCount;
   1.814 +		}
   1.815 +
   1.816 +	/**
   1.817 +	Decrement #iPinCount to indicate that a mapping which was pinning this page has been removed.
   1.818 +	This is only done for demand paged memory; unpaged memory does not have
   1.819 +	#iPinCount updated when it is unpinned.
   1.820 +
   1.821 +	@return New value of #iPinCount.
   1.822 +
   1.823 +	@pre #MmuLock held.
   1.824 +	@pre #iType==#EManaged.
   1.825 +	*/
   1.826 +	TUint32 DecPinCount()
   1.827 +		{
   1.828 +		CheckAccess("DecPinCount");
   1.829 +		__NK_ASSERT_DEBUG(iType==EManaged);
   1.830 +		__NK_ASSERT_DEBUG(iPinCount);
   1.831 +		return --iPinCount;
   1.832 +		}
   1.833 +
   1.834 +	/**
   1.835 +	Clear #iPinCount to zero as this page is no longer being used for the 
   1.836 +	pinned page.
   1.837 +	This is only done for demand paged memory; unpaged memory does not have
   1.838 +	#iPinCount set.
   1.839 +
   1.840 +	@pre #MmuLock held.
   1.841 +	@pre #iType==#EManaged.
   1.842 +	*/
   1.843 +	void ClearPinCount()
   1.844 +		{
   1.845 +		CheckAccess("ClearPinCount");
   1.846 +		__NK_ASSERT_DEBUG(iType==EManaged);
   1.847 +		__NK_ASSERT_DEBUG(iPinCount);
   1.848 +		iPinCount = 0;
   1.849 +		}
   1.850 +
   1.851 +	/**
   1.852 +	Return #iPinCount which indicates the number of mappings that have pinned this page.
   1.853 +	This is only valid for demand paged memory; unpaged memory does not have
   1.854 +	#iPinCount updated when it is pinned.
   1.855 +
   1.856 +	@return #iPinCount.
   1.857 +
   1.858 +	@pre #MmuLock held.
   1.859 +	@pre #iType==#EManaged.
   1.860 +	*/
   1.861 +	TUint32 PinCount()
   1.862 +		{
   1.863 +		CheckAccess("PinCount");
   1.864 +		__NK_ASSERT_DEBUG(iType==EManaged);
   1.865 +		return iPinCount;
   1.866 +		}
   1.867 +
   1.868 +	/**
   1.869 +	Set the #EPinnedReserve flag.
   1.870 +	@pre #MmuLock held.
   1.871 +	@see EPinnedReserve.
   1.872 +	*/
   1.873 +	void SetPinnedReserve()
   1.874 +		{
   1.875 +		CheckAccess("SetPinnedReserve");
   1.876 +		iFlags2 |= EPinnedReserve;
   1.877 +		}
   1.878 +
   1.879 +	/**
   1.880 +	Clear the #EPinnedReserve flag.
   1.881 +	@pre #MmuLock held.
   1.882 +	@see EPinnedReserve.
   1.883 +	*/
   1.884 +	TBool ClearPinnedReserve()
   1.885 +		{
   1.886 +		CheckAccess("ClearPinnedReserve");
   1.887 +		TUint oldFlags2 = iFlags2;
   1.888 +		iFlags2 = oldFlags2&~EPinnedReserve;
   1.889 +		return oldFlags2&EPinnedReserve;
   1.890 +		}
   1.891 +
   1.892 +	/**
   1.893 +	Set #iPagingManagerData to the specified value.
   1.894 +	@pre #MmuLock held.
   1.895 +	@pre #iType==#EManaged.
   1.896 +	*/
   1.897 +	void SetPagingManagerData(TUint32 aPagingManagerData)
   1.898 +		{
   1.899 +		CheckAccess("SetPagingManagerData");
   1.900 +		__NK_ASSERT_DEBUG(iType==EManaged);
   1.901 +		iPagingManagerData = aPagingManagerData;
   1.902 +		}
   1.903 +
   1.904 +	/**
   1.905 +	Return #iPagingManagerData.
   1.906 +	@pre #MmuLock held.
   1.907 +	@pre #iType==#EManaged.
   1.908 +	*/
   1.909 +	TUint32 PagingManagerData()
   1.910 +		{
   1.911 +		CheckAccess("PagingManagerData");
   1.912 +		__NK_ASSERT_DEBUG(iType==EManaged);
   1.913 +		return iPagingManagerData;
   1.914 +		}
   1.915 +
   1.916 +	//
   1.917 +	// Debug...
   1.918 +	//
   1.919 +
   1.920 +private:
   1.921 +	enum TChecks
   1.922 +		{
   1.923 +		ECheckNotAllocated	= 1<<0,
   1.924 +		ECheckNotUnused		= 1<<1,
   1.925 +		ECheckUnused		= 1<<2,
   1.926 +		ECheckNotPaged		= 1<<3,
   1.927 +		ECheckRamAllocLock	= 1<<4,
   1.928 +		ENoCheckMmuLock		= 1<<5
   1.929 +		};
   1.930 +#ifdef _DEBUG
   1.931 +	void CheckAccess(const char* aMessage, TUint aFlags=0);
   1.932 +#else
   1.933 +	FORCE_INLINE void CheckAccess(const char* /*aMessage*/, TUint /*aFlags*/=0)
   1.934 +		{}
   1.935 +#endif
   1.936 +
   1.937 +public:
   1.938 +#ifdef _DEBUG
   1.939 +	/**
   1.940 +	Debug function which outputs the contents of this object to the kernel debug port.
   1.941 +	*/
   1.942 +	void Dump();
   1.943 +#else
   1.944 +	FORCE_INLINE void Dump()
   1.945 +		{}
   1.946 +#endif
   1.947 +	};
   1.948 +
   1.949 +
   1.950 +const TInt KPageInfosPerPageShift = KPageShift-KPageInfoShift;
   1.951 +const TInt KPageInfosPerPage = 1<<KPageInfosPerPageShift;
   1.952 +const TInt KNumPageInfoPagesShift = 32-KPageShift-KPageInfosPerPageShift;
   1.953 +const TInt KNumPageInfoPages = 1<<KNumPageInfoPagesShift;
   1.954 +
   1.955 +FORCE_INLINE SPageInfo* SPageInfo::FromPhysAddr(TPhysAddr aAddress)
   1.956 +	{
   1.957 +	return ((SPageInfo*)KPageInfoLinearBase)+(aAddress>>KPageShift);
   1.958 +	}
   1.959 +
   1.960 +FORCE_INLINE TPhysAddr SPageInfo::PhysAddr()
   1.961 +	{
   1.962 +	return ((TPhysAddr)this)<<KPageInfosPerPageShift;
   1.963 +	}
   1.964 +
   1.965 +
   1.966 +
   1.967 +/**
   1.968 +A page table information structure giving the current use and state for a
   1.969 +page table.
   1.970 +*/
   1.971 +struct SPageTableInfo
   1.972 +	{
   1.973 +public:
   1.974 +
   1.975 +	/**
   1.976 +	Enumeration for the usage of a page table. This is stored in #iType.
   1.977 +	*/
   1.978 +	enum TType
   1.979 +		{
   1.980 +		/**
   1.981 +		Page table is unused (implementation assumes this enumeration == 0).
   1.982 +		@see #iUnused and #SPageTableInfo::TUnused.
   1.983 +		*/
   1.984 +		EUnused=0,
   1.985 +
   1.986 +		/**
   1.987 +		Page table has undetermined use.
   1.988 +		(Either created by the bootstrap or is newly allocated but not yet assigned.)
   1.989 +		*/
   1.990 +		EUnknown=1,
   1.991 +
   1.992 +		/**
   1.993 +		Page table is being used by a coarse memory object.
   1.994 +		@see #iCoarse and #SPageTableInfo::TCoarse.
   1.995 +		*/
   1.996 +		ECoarseMapping=2,
   1.997 +
   1.998 +		/**
   1.999 +		Page table is being used for fine mappings.
  1.1000 +		@see #iFine and #SPageTableInfo::TFine.
  1.1001 +		*/
  1.1002 +		EFineMapping=3
  1.1003 +		};
  1.1004 +
  1.1005 +private:
  1.1006 +
  1.1007 +	/**
  1.1008 +	Flags stored in #iFlags.
  1.1009 +	*/
  1.1010 +	enum TFlags
  1.1011 +		{
  1.1012 +		/**
  1.1013 +		Page table if for mapping demand paged content.
  1.1014 +		*/
  1.1015 +		EDemandPaged		= 	1<<0,
  1.1016 +		/**
  1.1017 +		Page table is in Page Table Allocator's cleanup list
  1.1018 +		(only set for first page table in a RAM page)
  1.1019 +		*/
  1.1020 +		EOnCleanupList		= 	1<<1,
  1.1021 +		/**
  1.1022 +		The page table cluster that this page table info refers to is currently allocated.
  1.1023 +		*/
  1.1024 +		EPtClusterAllocated 	=	1<<2
  1.1025 +		};
  1.1026 +
  1.1027 +	/**
  1.1028 +	Value from enum #TType.
  1.1029 +	*/
  1.1030 +	TUint8 iType;				
  1.1031 +
  1.1032 +	/**
  1.1033 +	Bitmask of values from #TFlags.
  1.1034 +	*/
  1.1035 +	TUint8 iFlags;
  1.1036 +
  1.1037 +	/**
  1.1038 +	Spare member used for padding.
  1.1039 +	*/
  1.1040 +	TUint16 iSpare2;
  1.1041 +
  1.1042 +	/**
  1.1043 +	Number of pages currently mapped by this page table.
  1.1044 +	Normally, when #iPageCount==0 and #iPermanenceCount==0, the page table is freed.
  1.1045 +	*/
  1.1046 +	TUint16 iPageCount;
  1.1047 +
  1.1048 +	/**
  1.1049 +	Count for the number of uses of this page table which require it to be permanently allocated;
  1.1050 +	even when it maps no pages (#iPageCount==0).
  1.1051 +	*/
  1.1052 +	TUint16 iPermanenceCount;
  1.1053 +
  1.1054 +	/**
  1.1055 +	Information about a page table when #iType==#EUnused.
  1.1056 +	*/
  1.1057 +	struct TUnused
  1.1058 +		{
  1.1059 +		/**
  1.1060 +		Cast this object to a SDblQueLink reference.
  1.1061 +		This is used for placing unused SPageTableInfo objects into free lists.
  1.1062 +		*/
  1.1063 +		FORCE_INLINE SDblQueLink& Link()
  1.1064 +			{ return *(SDblQueLink*)this; }
  1.1065 +	private:
  1.1066 +		SDblQueLink* iNext;	///< Next free page table
  1.1067 +		SDblQueLink* iPrev;	///< Previous free page table
  1.1068 +		};
  1.1069 +
  1.1070 +	/**
  1.1071 +	Information about a page table when #iType==#ECoarseMapping.
  1.1072 +	*/
  1.1073 +	struct TCoarse
  1.1074 +		{
  1.1075 +		/**
  1.1076 +		Memory object which owns this page table.
  1.1077 +		*/
  1.1078 +		DCoarseMemory*	iMemoryObject;
  1.1079 +
  1.1080 +		/**
  1.1081 +		The index of the page table, i.e. the offset, in 'chunks',
  1.1082 +		into the object's memory that the page table is being used to map.
  1.1083 +		*/
  1.1084 +		TUint16			iChunkIndex;
  1.1085 +
  1.1086 +		/**
  1.1087 +		The #TPteType the page table is being used for.
  1.1088 +		*/
  1.1089 +		TUint8			iPteType;
  1.1090 +		};
  1.1091 +
  1.1092 +	/**
  1.1093 +	Information about a page table when #iType==#EFineMapping.
  1.1094 +	*/
  1.1095 +	struct TFine
  1.1096 +		{
  1.1097 +		/**
  1.1098 +		Start of the virtual address region that this page table is currently
  1.1099 +		mapping memory at, ORed with the OS ASID of the address space this lies in.
  1.1100 +		*/
  1.1101 +		TLinAddr		iLinAddrAndOsAsid;
  1.1102 +		};
  1.1103 +
  1.1104 +	/**
  1.1105 +	Union of type specific info.
  1.1106 +	*/
  1.1107 +	union
  1.1108 +		{
  1.1109 +		TUnused	iUnused; ///< Information about a page table when #iType==#EUnused.
  1.1110 +		TCoarse	iCoarse; ///< Information about a page table when #iType==#ECoarseMapping.
  1.1111 +		TFine	iFine;   ///< Information about a page table when #iType==#EFineMapping.
  1.1112 +		};
  1.1113 +
  1.1114 +public:
  1.1115 +	/**
  1.1116 +	Return the SPageTableInfo for the page table in which a given PTE lies.
  1.1117 +	*/
  1.1118 +	static SPageTableInfo* FromPtPtr(TPte* aPtPte);
  1.1119 +
  1.1120 +	/**
  1.1121 +	Return the page table with which this SPageTableInfo is associated.
  1.1122 +	*/
  1.1123 +	TPte* PageTable();
  1.1124 +
  1.1125 +	/**
  1.1126 +	Used at boot time to initialise page tables which were allocated by the bootstrap. 
  1.1127 +
  1.1128 +	@param aCount	The number of pages being mapped by this page table.
  1.1129 +	*/
  1.1130 +	FORCE_INLINE void Boot(TUint aCount)
  1.1131 +		{
  1.1132 +		CheckInit("Boot");
  1.1133 +		iPageCount = aCount;
  1.1134 +		iPermanenceCount = 1; // assume page table shouldn't be freed
  1.1135 +		iType = EUnknown;
  1.1136 +		iFlags = EPtClusterAllocated;
  1.1137 +		}
  1.1138 +
  1.1139 +	/**
  1.1140 +	Initialise a page table after it has had memory allocated for it.
  1.1141 +
  1.1142 +	@param aDemandPaged	True if this page table has been allocated for use with
  1.1143 +						demand paged memory.
  1.1144 +	*/
  1.1145 +	FORCE_INLINE void New(TBool aDemandPaged)
  1.1146 +		{
  1.1147 +		iType = EUnused;
  1.1148 +		iFlags = EPtClusterAllocated | (aDemandPaged ? EDemandPaged : 0);
  1.1149 +		}
  1.1150 +
  1.1151 +	/**
  1.1152 +	Return true if the page table cluster that this page table info refers to has
  1.1153 +	been previously allocated.
  1.1154 +	*/
  1.1155 +	FORCE_INLINE TBool IsPtClusterAllocated()
  1.1156 +		{
  1.1157 +		return iFlags & EPtClusterAllocated;
  1.1158 +		}
  1.1159 +
  1.1160 +	/**
  1.1161 +	The page table cluster that this page table info refers to has been freed.
  1.1162 +	*/
  1.1163 +	FORCE_INLINE void PtClusterFreed()
  1.1164 +		{
  1.1165 +		__NK_ASSERT_DEBUG(IsPtClusterAllocated());
  1.1166 +		iFlags &= ~EPtClusterAllocated;
  1.1167 +		}
  1.1168 +
  1.1169 +	/**
  1.1170 +	The page table cluster that this page table info refers to has been allocated.
  1.1171 +	*/
  1.1172 +	FORCE_INLINE void PtClusterAlloc()
  1.1173 +		{
  1.1174 +		__NK_ASSERT_DEBUG(!IsPtClusterAllocated());
  1.1175 +		iFlags |= EPtClusterAllocated;
  1.1176 +		}
  1.1177 +
  1.1178 +	/**
  1.1179 +	Initialilse a page table to type #EUnknown after it has been newly allocated.
  1.1180 +
  1.1181 +	@pre #PageTablesLockIsHeld.
  1.1182 +	*/
  1.1183 +	FORCE_INLINE void Init()
  1.1184 +		{
  1.1185 +		__NK_ASSERT_DEBUG(IsPtClusterAllocated());
  1.1186 +		CheckInit("Init");
  1.1187 +		iPageCount = 0;
  1.1188 +		iPermanenceCount = 0;
  1.1189 +		iType = EUnknown;
  1.1190 +		}
  1.1191 +
  1.1192 +	/**
  1.1193 +	Increment #iPageCount to account for newly mapped pages.
  1.1194 +
  1.1195 +	@param aStep	Amount to add to #iPageCount. Default is one.
  1.1196 +
  1.1197 +	@return New value of #iPageCount.
  1.1198 +
  1.1199 +	@pre #MmuLock held.
  1.1200 +	*/
  1.1201 +	FORCE_INLINE TUint IncPageCount(TUint aStep=1)
  1.1202 +		{
  1.1203 +		CheckAccess("IncPageCount");
  1.1204 +		TUint count = iPageCount; // compiler handles half-word values stupidly, so give it a hand
  1.1205 +		count += aStep;
  1.1206 +		iPageCount = count;
  1.1207 +		return count;
  1.1208 +		}
  1.1209 +
  1.1210 +	/**
  1.1211 +	Decrement #iPageCount to account for removed pages.
  1.1212 +
  1.1213 +	@param aStep	Amount to subtract from #iPageCount. Default is one.
  1.1214 +
  1.1215 +	@return New value of #iPageCount.
  1.1216 +
  1.1217 +	@pre #MmuLock held.
  1.1218 +	*/
  1.1219 +	FORCE_INLINE TUint DecPageCount(TUint aStep=1)
  1.1220 +		{
  1.1221 +		CheckAccess("DecPageCount");
  1.1222 +		TUint count = iPageCount; // compiler handles half-word values stupidly, so give it a hand
  1.1223 +		count -= aStep;
  1.1224 +		iPageCount = count;
  1.1225 +		return count;
  1.1226 +		}
  1.1227 +
  1.1228 +	/**
  1.1229 +	Return #iPageCount.
  1.1230 +	@pre #MmuLock held.
  1.1231 +	*/
  1.1232 +	FORCE_INLINE TUint PageCount()
  1.1233 +		{
  1.1234 +		CheckAccess("PageCount");
  1.1235 +		return iPageCount;
  1.1236 +		}
  1.1237 +
  1.1238 +	/**
  1.1239 +	Increment #iPermanenceCount to indicate a new use of this page table which
  1.1240 +	requires it to be permanently allocated.
  1.1241 +
  1.1242 +	@return New value of #iPermanenceCount.
  1.1243 +
  1.1244 +	@pre #MmuLock held.
  1.1245 +	*/
  1.1246 +	FORCE_INLINE TUint IncPermanenceCount()
  1.1247 +		{
  1.1248 +		CheckAccess("IncPermanenceCount");
  1.1249 +		TUint count = iPermanenceCount; // compiler handles half-word values stupidly, so give it a hand
  1.1250 +		++count;
  1.1251 +		iPermanenceCount = count;
  1.1252 +		return count;
  1.1253 +		}
  1.1254 +
  1.1255 +	/**
  1.1256 +	Decrement #iPermanenceCount to indicate the removal of a use added by #IncPermanenceCount.
  1.1257 +
  1.1258 +	@return New value of #iPermanenceCount.
  1.1259 +
  1.1260 +	@pre #MmuLock held.
  1.1261 +	*/
  1.1262 +	FORCE_INLINE TUint DecPermanenceCount()
  1.1263 +		{
  1.1264 +		CheckAccess("DecPermanenceCount");
  1.1265 +		TUint count = iPermanenceCount; // compiler handles half-word values stupidly, so give it a hand
  1.1266 +		__NK_ASSERT_DEBUG(count);
  1.1267 +		--count;
  1.1268 +		iPermanenceCount = count;
  1.1269 +		return count;
  1.1270 +		}
  1.1271 +
  1.1272 +	/**
  1.1273 +	Return #iPermanenceCount.
  1.1274 +
  1.1275 +	@pre #MmuLock held.
  1.1276 +	*/
  1.1277 +	FORCE_INLINE TUint PermanenceCount()
  1.1278 +		{
  1.1279 +		CheckAccess("PermanenceCount");
  1.1280 +		return iPermanenceCount;
  1.1281 +		}
  1.1282 +
  1.1283 +	/**
  1.1284 +	Set page table to the #EUnused state.
  1.1285 +	This is only intended for use by #PageTableAllocator.
  1.1286 +
  1.1287 +	@pre #MmuLock held and #PageTablesLockIsHeld.
  1.1288 +	*/
  1.1289 +	FORCE_INLINE void SetUnused()
  1.1290 +		{
  1.1291 +		CheckChangeUse("SetUnused");
  1.1292 +		iType = EUnused;
  1.1293 +		}
  1.1294 +
  1.1295 +	/**
  1.1296 +	Return true if the page table is in the #EUnused state.
  1.1297 +	This is only intended for use by #PageTableAllocator.
  1.1298 +
  1.1299 +	@pre #MmuLock held or #PageTablesLockIsHeld.
  1.1300 +	*/
  1.1301 +	FORCE_INLINE TBool IsUnused()
  1.1302 +		{
  1.1303 +		CheckCheckUse("IsUnused");
  1.1304 +		return iType==EUnused;
  1.1305 +		}
  1.1306 +
  1.1307 +	/**
  1.1308 +	Set page table as being used by a coarse memory object.
  1.1309 +
  1.1310 +	@param aMemory		Memory object which owns this page table.
  1.1311 +	@param aChunkIndex	The index of the page table, i.e. the offset, in 'chunks',
  1.1312 +						into the object's memory that the page table is being used to map.
  1.1313 +	@param aPteType		The #TPteType the page table is being used for.
  1.1314 +
  1.1315 +	@pre #MmuLock held and #PageTablesLockIsHeld.
  1.1316 +
  1.1317 +	@see TCoarse.
  1.1318 +	*/
  1.1319 +	inline void SetCoarse(DCoarseMemory* aMemory, TUint aChunkIndex, TUint aPteType)
  1.1320 +		{
  1.1321 +		CheckChangeUse("SetCoarse");
  1.1322 +		iPageCount = 0;
  1.1323 +		iPermanenceCount = 0;
  1.1324 +		iType = ECoarseMapping;
  1.1325 +		iCoarse.iMemoryObject = aMemory;
  1.1326 +		iCoarse.iChunkIndex = aChunkIndex;
  1.1327 +		iCoarse.iPteType = aPteType;
  1.1328 +		}
  1.1329 +
  1.1330 +	/**
  1.1331 +	Return true if this page table is currently being used by a coarse memory object
  1.1332 +	matching the specified arguments.
  1.1333 +	For arguments, see #SetCoarse.
  1.1334 +
  1.1335 +	@pre #MmuLock held or #PageTablesLockIsHeld.
  1.1336 +	*/
  1.1337 +	inline TBool CheckCoarse(DCoarseMemory* aMemory, TUint aChunkIndex, TUint aPteType)
  1.1338 +		{
  1.1339 +		CheckCheckUse("CheckCoarse");
  1.1340 +		return iType==ECoarseMapping
  1.1341 +			&& iCoarse.iMemoryObject==aMemory
  1.1342 +			&& iCoarse.iChunkIndex==aChunkIndex
  1.1343 +			&& iCoarse.iPteType==aPteType;
  1.1344 +		}
  1.1345 +
  1.1346 +	/**
  1.1347 +	Set page table as being used for fine mappings.
  1.1348 +
  1.1349 +	@param aLinAddr	Start of the virtual address region that the page table is
  1.1350 +					mapping memory at.
  1.1351 +	@param aOsAsid	The OS ASID of the address space which \a aLinAddr lies in.
  1.1352 +
  1.1353 +	@pre #MmuLock held and #PageTablesLockIsHeld.
  1.1354 +	*/
  1.1355 +	inline void SetFine(TLinAddr aLinAddr, TUint aOsAsid)
  1.1356 +		{
  1.1357 +		CheckChangeUse("SetFine");
  1.1358 +		__NK_ASSERT_DEBUG((aLinAddr&KPageMask)==0);
  1.1359 +		iPageCount = 0;
  1.1360 +		iPermanenceCount = 0;
  1.1361 +		iType = EFineMapping;
  1.1362 +		iFine.iLinAddrAndOsAsid = aLinAddr|aOsAsid;
  1.1363 +		}
  1.1364 +
  1.1365 +	/**
  1.1366 +	Return true if this page table is currently being used for fine mappings
  1.1367 +	matching the specified arguments.
  1.1368 +	For arguments, see #SetFine.
  1.1369 +
  1.1370 +	@pre #MmuLock held or #PageTablesLockIsHeld.
  1.1371 +	*/
  1.1372 +	inline TBool CheckFine(TLinAddr aLinAddr, TUint aOsAsid)
  1.1373 +		{
  1.1374 +		CheckCheckUse("CheckFine");
  1.1375 +		__NK_ASSERT_DEBUG((aLinAddr&KPageMask)==0);
  1.1376 +		return iType==EFineMapping
  1.1377 +			&& iFine.iLinAddrAndOsAsid==(aLinAddr|aOsAsid);
  1.1378 +		}
  1.1379 +
  1.1380 +	/**
  1.1381 +	Set a previously unknown page table as now being used for fine mappings.
  1.1382 +	This is used during the boot process by DFineMemory::ClaimInitialPages
  1.1383 +	to initialise the state of a page table allocated by the bootstrap.
  1.1384 +
  1.1385 +	@param aLinAddr	Start of the virtual address region that the page table is
  1.1386 +					mapping memory at.
  1.1387 +	@param aOsAsid	The OS ASID of the address space which \a aLinAddr lies in.
  1.1388 +					(This should be KKernelOsAsid.)
  1.1389 +
  1.1390 +	@pre #MmuLock held and #PageTablesLockIsHeld.
  1.1391 +	*/
  1.1392 +	inline TBool ClaimFine(TLinAddr aLinAddr, TUint aOsAsid)
  1.1393 +		{
  1.1394 +		CheckChangeUse("ClaimFine");
  1.1395 +		__NK_ASSERT_DEBUG((aLinAddr&KPageMask)==0);
  1.1396 +		if(iType==EFineMapping)
  1.1397 +			return CheckFine(aLinAddr,aOsAsid);
  1.1398 +		if(iType!=EUnknown)
  1.1399 +			return false;
  1.1400 +		iType = EFineMapping;
  1.1401 +		iFine.iLinAddrAndOsAsid = aLinAddr|aOsAsid;
  1.1402 +		return true;
  1.1403 +		}
  1.1404 +
  1.1405 +	/**
  1.1406 +	Return true if page table was allocated for use with demand paged memory.
  1.1407 +	*/
  1.1408 +	FORCE_INLINE TBool IsDemandPaged()
  1.1409 +		{
  1.1410 +		return iFlags&EDemandPaged;
  1.1411 +		}
  1.1412 +
  1.1413 +#ifdef _DEBUG
  1.1414 +	/**
  1.1415 +	Debug check returning true if the value of #iPageCount is consistent with
  1.1416 +	the PTEs in this page table.
  1.1417 +
  1.1418 +	@pre #MmuLock held.
  1.1419 +	*/
  1.1420 +	TBool CheckPageCount();
  1.1421 +#endif
  1.1422 +
  1.1423 +	/**
  1.1424 +	Return a reference to an embedded SDblQueLink which is used for placing this
  1.1425 +	SPageTableInfo objects into free lists.
  1.1426 +	@pre #PageTablesLockIsHeld.
  1.1427 +	@pre #iType==#EUnused.
  1.1428 +	*/
  1.1429 +	inline SDblQueLink& FreeLink()
  1.1430 +		{
  1.1431 +		__NK_ASSERT_DEBUG(IsUnused());
  1.1432 +		return iUnused.Link();
  1.1433 +		}
  1.1434 +
  1.1435 +	/**
  1.1436 +	Return a pointer to a SPageTableInfo by conversion from the address
  1.1437 +	of its embedded link as returned by #FreeLink.
  1.1438 +	*/
  1.1439 +	FORCE_INLINE static SPageTableInfo* FromFreeLink(SDblQueLink* aLink)
  1.1440 +		{
  1.1441 +		return (SPageTableInfo*)((TInt)aLink-_FOFF(SPageTableInfo,iUnused));
  1.1442 +		}
  1.1443 +
  1.1444 +	/**
  1.1445 +	Return the SPageTableInfo for the first page table in the same
  1.1446 +	physical ram page as the page table for this SPageTableInfo.
  1.1447 +	*/
  1.1448 +	FORCE_INLINE SPageTableInfo* FirstInPage()
  1.1449 +		{
  1.1450 +		return (SPageTableInfo*)(TLinAddr(this)&~(KPtClusterMask*sizeof(SPageTableInfo)));
  1.1451 +		}
  1.1452 +
  1.1453 +	/**
  1.1454 +	Return the SPageTableInfo for the last page table in the same
  1.1455 +	physical ram page as the page table for this SPageTableInfo.
  1.1456 +	*/
  1.1457 +	FORCE_INLINE SPageTableInfo* LastInPage()
  1.1458 +		{
  1.1459 +		return (SPageTableInfo*)(TLinAddr(this)|(KPtClusterMask*sizeof(SPageTableInfo)));
  1.1460 +		}
  1.1461 +
  1.1462 +	/**
  1.1463 +	Return true if the page table for this SPageTableInfo is
  1.1464 +	the first page table in the physical page it occupies.
  1.1465 +	*/
  1.1466 +	FORCE_INLINE TBool IsFirstInPage()
  1.1467 +		{
  1.1468 +		return (TLinAddr(this)&(KPtClusterMask*sizeof(SPageTableInfo)))==0;
  1.1469 +		}
  1.1470 +
  1.1471 +	/**
  1.1472 +	Return true if this page table has been added to the cleanup list with
  1.1473 +	#AddToCleanupList.
  1.1474 +	Must only be used for page tables which return true for #IsFirstInPage.
  1.1475 +
  1.1476 +	@pre #PageTablesLockIsHeld.
  1.1477 +	*/
  1.1478 +	FORCE_INLINE TBool IsOnCleanupList()
  1.1479 +		{
  1.1480 +		__NK_ASSERT_DEBUG(IsFirstInPage());
  1.1481 +		return iFlags&EOnCleanupList;
  1.1482 +		}
  1.1483 +
  1.1484 +	/**
  1.1485 +	Add the RAM page containing this page table to the specified cleanup list.
  1.1486 +	Must only be used for page tables which return true for #IsFirstInPage.
  1.1487 +
  1.1488 +	@pre #PageTablesLockIsHeld.
  1.1489 +	*/
  1.1490 +	FORCE_INLINE void AddToCleanupList(SDblQue& aCleanupList)
  1.1491 +		{
  1.1492 +		__NK_ASSERT_DEBUG(IsUnused());
  1.1493 +		__NK_ASSERT_DEBUG(IsFirstInPage());
  1.1494 +		__NK_ASSERT_DEBUG(!IsOnCleanupList());
  1.1495 +		aCleanupList.Add(&FreeLink());
  1.1496 +		iFlags |= EOnCleanupList;
  1.1497 +		}
  1.1498 +
  1.1499 +	/**
  1.1500 +	Remove the RAM page containing this page table from a cleanup list it
  1.1501 +	was added to with aCleanupList.
  1.1502 +	Must only be used for page tables which return true for #IsFirstInPage.
  1.1503 +
  1.1504 +	@pre #PageTablesLockIsHeld.
  1.1505 +	*/
  1.1506 +	FORCE_INLINE void RemoveFromCleanupList()
  1.1507 +		{
  1.1508 +		__NK_ASSERT_DEBUG(IsUnused());
  1.1509 +		__NK_ASSERT_DEBUG(IsFirstInPage());
  1.1510 +		__NK_ASSERT_DEBUG(IsOnCleanupList());
  1.1511 +		iFlags &= ~EOnCleanupList;
  1.1512 +		FreeLink().Deque();
  1.1513 +		}
  1.1514 +
  1.1515 +	/**
  1.1516 +	Remove this page table from its owner and free it.
  1.1517 +	This is only used with page tables which map demand paged memory
  1.1518 +	and is intended for use in implementing #DPageTableMemoryManager.
  1.1519 +
  1.1520 +	@return KErrNone if successful,
  1.1521 +			otherwise one of the system wide error codes.
  1.1522 +
  1.1523 +	@pre #MmuLock held and #PageTablesLockIsHeld.
  1.1524 +	*/
  1.1525 +	TInt ForcedFree();
  1.1526 +
  1.1527 +private:
  1.1528 +
  1.1529 +#ifdef _DEBUG
  1.1530 +	void CheckChangeUse(const char* aName);
  1.1531 +	void CheckCheckUse(const char* aName);
  1.1532 +	void CheckAccess(const char* aName);
  1.1533 +	void CheckInit(const char* aName);
  1.1534 +#else
  1.1535 +	FORCE_INLINE void CheckChangeUse(const char* /*aName*/)
  1.1536 +		{}
  1.1537 +	FORCE_INLINE void CheckCheckUse(const char* /*aName*/)
  1.1538 +		{}
  1.1539 +	FORCE_INLINE void CheckAccess(const char* /*aName*/)
  1.1540 +		{}
  1.1541 +	FORCE_INLINE void CheckInit(const char* /*aName*/)
  1.1542 +		{}
  1.1543 +#endif
  1.1544 +	};
  1.1545 +
  1.1546 +
  1.1547 +const TInt KPageTableInfoShift = 4;
  1.1548 +__ASSERT_COMPILE(sizeof(SPageTableInfo)==(1<<KPageTableInfoShift));
  1.1549 +
  1.1550 +FORCE_INLINE SPageTableInfo* SPageTableInfo::FromPtPtr(TPte* aPtPte)
  1.1551 +	{
  1.1552 +	TUint id = ((TLinAddr)aPtPte-KPageTableBase)>>KPageTableShift;
  1.1553 +	return (SPageTableInfo*)KPageTableInfoBase+id;
  1.1554 +	}
  1.1555 +
  1.1556 +FORCE_INLINE TPte* SPageTableInfo::PageTable()
  1.1557 +	{
  1.1558 +	return (TPte*)
  1.1559 +		(KPageTableBase+
  1.1560 +			(
  1.1561 +			((TLinAddr)this-(TLinAddr)KPageTableInfoBase)
  1.1562 +			<<(KPageTableShift-KPageTableInfoShift)
  1.1563 +			)
  1.1564 +		);
  1.1565 +	}
  1.1566 +
  1.1567 +
  1.1568 +
  1.1569 +/**
  1.1570 +Class providing access to the mutex used to protect memory allocation operations;
  1.1571 +this is the mutex Mmu::iRamAllocatorMutex.
  1.1572 +In addition to providing locking, these functions monitor the system's free RAM
  1.1573 +levels and call K::CheckFreeMemoryLevel to notify the system of changes.
  1.1574 +*/
  1.1575 +class RamAllocLock
  1.1576 +	{
  1.1577 +public:
  1.1578 +	/**
  1.1579 +	Acquire the lock.
  1.1580 +	The lock may be acquired multiple times by a thread, and will remain locked
  1.1581 +	until #Unlock has been used enough times to balance this.
  1.1582 +	*/
  1.1583 +	static void Lock();
  1.1584 +
  1.1585 +	/**
  1.1586 +	Release the lock.
  1.1587 +
  1.1588 +	@pre The current thread has previously acquired the lock.
  1.1589 +	*/
  1.1590 +	static void Unlock();
  1.1591 +
  1.1592 +	/**
  1.1593 +	Allow another thread to acquire the lock.
  1.1594 +	This is equivalent to #Unlock followed by #Lock, but optimised
  1.1595 +	to only do this if there is another thread waiting on the lock.
  1.1596 +
  1.1597 +	@return True if the lock was released by this function.
  1.1598 +
  1.1599 +	@pre The current thread has previously acquired the lock.
  1.1600 +	*/
  1.1601 +	static TBool Flash();
  1.1602 +
  1.1603 +	/**
  1.1604 +	Return true if the current thread holds the lock.
  1.1605 +	This is used for debug checks.
  1.1606 +	*/
  1.1607 +	static TBool IsHeld();
  1.1608 +	};
  1.1609 +
  1.1610 +
  1.1611 +
  1.1612 +/**
  1.1613 +Return true if the PageTableLock is held by the current thread.
  1.1614 +This lock is the mutex used to protect page table allocation; it is acquired
  1.1615 +with
  1.1616 +@code
  1.1617 +	::PageTables.Lock();
  1.1618 +@endcode
  1.1619 +and released with
  1.1620 +@code
  1.1621 +	::PageTables.Unlock();
  1.1622 +@endcode
  1.1623 +*/
  1.1624 +TBool PageTablesLockIsHeld();
  1.1625 +
  1.1626 +
  1.1627 +
  1.1628 +/**
  1.1629 +Class providing access to the fast mutex used to protect various
  1.1630 +low level memory operations.
  1.1631 +
  1.1632 +This lock must only be held for a very short and bounded time.
  1.1633 +*/
  1.1634 +class MmuLock
  1.1635 +	{
  1.1636 +public:
  1.1637 +	/**
  1.1638 +	Acquire the lock.
  1.1639 +	*/
  1.1640 +	static void Lock();
  1.1641 +
  1.1642 +	/**
  1.1643 +	Release the lock.
  1.1644 +
  1.1645 +	@pre The current thread has previously acquired the lock.
  1.1646 +	*/
  1.1647 +	static void Unlock();
  1.1648 +
  1.1649 +	/**
  1.1650 +	Allow another thread to acquire the lock.
  1.1651 +	This is equivalent to #Unlock followed by #Lock, but optimised
  1.1652 +	to only do this if there is another thread waiting on the lock.
  1.1653 +
  1.1654 +	@return True if the lock was released by this function.
  1.1655 +
  1.1656 +	@pre The current thread has previously acquired the lock.
  1.1657 +	*/
  1.1658 +	static TBool Flash();
  1.1659 +
  1.1660 +	/**
  1.1661 +	Return true if the current thread holds the lock.
  1.1662 +	This is used for debug checks.
  1.1663 +	*/
  1.1664 +	static TBool IsHeld();
  1.1665 +
  1.1666 +	/**
  1.1667 +	Increment a counter and perform the action of #Flash() once a given threshold
  1.1668 +	value is reached. After flashing the counter is reset.
  1.1669 +
  1.1670 +	This is typically used in long running loops to periodically flash the lock
  1.1671 +	and so avoid holding it for too long, e.g.
  1.1672 +
  1.1673 +	@code
  1.1674 +	MmuLock::Lock();
  1.1675 +	TUint flash = 0;
  1.1676 +	const TUint KMaxInterationsWithLock = 10;
  1.1677 +	while(WorkToDo)
  1.1678 +		{
  1.1679 +		DoSomeWork();
  1.1680 +		MmuLock::Flash(flash,KMaxInterationsWithLock); // flash every N loops
  1.1681 +		}
  1.1682 +	MmuLock::Unlock();
  1.1683 +	@endcode
  1.1684 +
  1.1685 +	@param aCounter			Reference to the counter.
  1.1686 +	@param aFlashThreshold	Value \a aCounter must reach before flashing the lock.
  1.1687 +	@param aStep			Value to add to \a aCounter.
  1.1688 +
  1.1689 +	@return True if the lock was released by this function.
  1.1690 +
  1.1691 +	@pre The current thread has previously acquired the lock.
  1.1692 +	*/
  1.1693 +	static FORCE_INLINE TBool Flash(TUint& aCounter, TUint aFlashThreshold, TUint aStep=1)
  1.1694 +		{
  1.1695 +		UnlockGuardCheck();
  1.1696 +		if((aCounter+=aStep)<aFlashThreshold)
  1.1697 +			return EFalse;
  1.1698 +		aCounter -= aFlashThreshold;
  1.1699 +		return MmuLock::Flash();
  1.1700 +		}
  1.1701 +
  1.1702 +	/**
  1.1703 +	Begin a debug check to test that the MmuLock is not unlocked unexpectedly.
  1.1704 +
  1.1705 +	This is used in situations where a series of operation must be performed
  1.1706 +	atomically with the MmuLock held. It is usually used via the
  1.1707 +	#__UNLOCK_GUARD_START macro, e.g.
  1.1708 +
  1.1709 +	@code
  1.1710 +	__UNLOCK_GUARD_START(MmuLock);
  1.1711 +	SomeCode();
  1.1712 +	SomeMoreCode();
  1.1713 +	__UNLOCK_GUARD_END(MmuLock); // fault if MmuLock released by SomeCode or SomeMoreCode
  1.1714 +	@endcode
  1.1715 +	*/
  1.1716 +	static FORCE_INLINE void UnlockGuardStart()
  1.1717 +		{
  1.1718 +		#ifdef _DEBUG
  1.1719 +			++UnlockGuardNest;
  1.1720 +		#endif
  1.1721 +		}
  1.1722 +
  1.1723 +	/**
  1.1724 +	End a debug check testing that the MmuLock is not unlocked unexpectedly.
  1.1725 +	This is usually used via the #__UNLOCK_GUARD_END which faults if true is returned.
  1.1726 +
  1.1727 +	@see UnlockGuardStart
  1.1728 +
  1.1729 +	@return True if the MmuLock was released between a previous #UnlockGuardStart
  1.1730 +			and the call this function.
  1.1731 +	*/
  1.1732 +	static FORCE_INLINE TBool UnlockGuardEnd()
  1.1733 +		{
  1.1734 +		#ifdef _DEBUG
  1.1735 +			__NK_ASSERT_DEBUG(UnlockGuardNest);
  1.1736 +			--UnlockGuardNest;
  1.1737 +			return UnlockGuardFail==0;
  1.1738 +		#else
  1.1739 +			return true;
  1.1740 +		#endif
  1.1741 +		}
  1.1742 +
  1.1743 +private:
  1.1744 +	/**
  1.1745 +	Exectued whenever the lock is released to check that
  1.1746 +	#UnlockGuardStart and #UnlockGuardEnd are balanced.
  1.1747 +	*/
  1.1748 +	static FORCE_INLINE void UnlockGuardCheck()
  1.1749 +		{
  1.1750 +		#ifdef _DEBUG
  1.1751 +			if(UnlockGuardNest)
  1.1752 +				UnlockGuardFail = true;
  1.1753 +		#endif
  1.1754 +		}
  1.1755 +
  1.1756 +private:
  1.1757 +	/** The lock */
  1.1758 +	static NFastMutex iLock;
  1.1759 +
  1.1760 +#ifdef _DEBUG
  1.1761 +	static TUint UnlockGuardNest;
  1.1762 +	static TUint UnlockGuardFail;
  1.1763 +#endif
  1.1764 +	};
  1.1765 +
  1.1766 +
  1.1767 +
  1.1768 +/**
  1.1769 +Interface for accessing the lock mutex being used to serialise
  1.1770 +explicit modifications to a specified memory object.
  1.1771 +
  1.1772 +The lock mutex is either the one which was previously assigned with
  1.1773 +DMemoryObject::SetLock. Or, if none was set, a dynamically assigned
  1.1774 +mutex from #MemoryObjectMutexPool will be of 'order' #KMutexOrdMemoryObject.
  1.1775 +*/
  1.1776 +class MemoryObjectLock
  1.1777 +	{
  1.1778 +public:
  1.1779 +	/**
  1.1780 +	Acquire the lock for the specified memory object.
  1.1781 +	If the object has no lock, one is assigned from #MemoryObjectMutexPool.
  1.1782 +	*/
  1.1783 +	static void Lock(DMemoryObject* aMemory);
  1.1784 +
  1.1785 +	/**
  1.1786 +	Release the lock for the specified memory object, which was acquired
  1.1787 +	with #Lock. If the lock was one which was dynamically assigned, and there
  1.1788 +	are no threads waiting for it, the the lock is unassigned from the memory
  1.1789 +	object.
  1.1790 +	*/
  1.1791 +	static void Unlock(DMemoryObject* aMemory);
  1.1792 +
  1.1793 +	/**
  1.1794 +	Return true if the current thread holds lock for the specified memory object.
  1.1795 +	This is used for debug checks.
  1.1796 +	*/
  1.1797 +	static TBool IsHeld(DMemoryObject* aMemory);
  1.1798 +	};
  1.1799 +
  1.1800 +
  1.1801 +#define __UNLOCK_GUARD_START(_l) __DEBUG_ONLY(_l::UnlockGuardStart())
  1.1802 +#define __UNLOCK_GUARD_END(_l) __NK_ASSERT_DEBUG(_l::UnlockGuardEnd())
  1.1803 +
  1.1804 +
  1.1805 +const TUint KMutexOrdAddresSpace = KMutexOrdKernelHeap + 2;
  1.1806 +const TUint KMutexOrdMemoryObject = KMutexOrdKernelHeap + 1;
  1.1807 +const TUint KMutexOrdMmuAlloc = KMutexOrdRamAlloc + 1;
  1.1808 +
  1.1809 +
  1.1810 +#ifdef _DEBUG
  1.1811 +//#define FORCE_TRACE
  1.1812 +//#define FORCE_TRACE2
  1.1813 +//#define FORCE_TRACEB
  1.1814 +//#define FORCE_TRACEP
  1.1815 +#endif
  1.1816 +
  1.1817 +
  1.1818 +
  1.1819 +#define TRACE_printf Kern::Printf
  1.1820 +
  1.1821 +#define TRACE_ALWAYS(t) TRACE_printf t
  1.1822 +
  1.1823 +#ifdef FORCE_TRACE
  1.1824 +#define TRACE(t) TRACE_printf t
  1.1825 +#else
  1.1826 +#define TRACE(t) __KTRACE_OPT(KMMU2,TRACE_printf t)
  1.1827 +#endif
  1.1828 +
  1.1829 +#ifdef FORCE_TRACE2
  1.1830 +#define TRACE2(t) TRACE_printf t
  1.1831 +#else
  1.1832 +#define TRACE2(t) __KTRACE_OPT(KMMU2,TRACE_printf t)
  1.1833 +#endif
  1.1834 +
  1.1835 +#ifdef FORCE_TRACEB
  1.1836 +#define TRACEB(t) TRACE_printf t
  1.1837 +#else
  1.1838 +#define TRACEB(t) __KTRACE_OPT2(KMMU,KBOOT,TRACE_printf t)
  1.1839 +#endif
  1.1840 +
  1.1841 +#ifdef FORCE_TRACEP
  1.1842 +#define TRACEP(t) TRACE_printf t
  1.1843 +#else
  1.1844 +#define TRACEP(t) __KTRACE_OPT(KPAGING,TRACE_printf t)
  1.1845 +#endif
  1.1846 +
  1.1847 +
  1.1848 +/**
  1.1849 +The maximum number of consecutive updates to #SPageInfo structures which
  1.1850 +should be executed without releasing the #MmuLock.
  1.1851 +
  1.1852 +This value must be an integer power of two.
  1.1853 +*/
  1.1854 +const TUint KMaxPageInfoUpdatesInOneGo = 64;
  1.1855 +
  1.1856 +/**
  1.1857 +The maximum number of simple operations on memory page state which should
  1.1858 +occur without releasing the #MmuLock. Examples of the operations are
  1.1859 +read-modify-write of a Page Table Entry (PTE) or entries in a memory objects
  1.1860 +RPageArray.
  1.1861 +
  1.1862 +This value must be an integer power of two.
  1.1863 +*/
  1.1864 +const TUint KMaxPagesInOneGo = KMaxPageInfoUpdatesInOneGo/2;
  1.1865 +
  1.1866 +/**
  1.1867 +The maximum number of Page Directory Entries which should be updated
  1.1868 +without releasing the #MmuLock.
  1.1869 +
  1.1870 +This value must be an integer power of two.
  1.1871 +*/
  1.1872 +const TUint KMaxPdesInOneGo = KMaxPageInfoUpdatesInOneGo;
  1.1873 +
  1.1874 +
  1.1875 +/********************************************
  1.1876 + * MMU stuff
  1.1877 + ********************************************/
  1.1878 +
  1.1879 +class DRamAllocator;
  1.1880 +class TPinArgs;
  1.1881 +class Defrag;
  1.1882 +
  1.1883 +/**
  1.1884 +Interface to RAM allocation and MMU data structure manipulation.
  1.1885 +*/
  1.1886 +class Mmu
  1.1887 +	{
  1.1888 +public:
  1.1889 +	enum TPanic
  1.1890 +		{
  1.1891 +		EInvalidRamBankAtBoot,
  1.1892 +		EInvalidReservedBankAtBoot,
  1.1893 +		EInvalidPageTableAtBoot,
  1.1894 +		EInvalidPdeAtBoot,
  1.1895 +		EBadMappedPageAfterBoot,
  1.1896 +		ERamAllocMutexCreateFailed,
  1.1897 +		EBadFreePhysicalRam,
  1.1898 +		EUnsafePageInfoAccess,
  1.1899 +		EUnsafePageTableInfoAccess,
  1.1900 +		EPhysMemSyncMutexCreateFailed,
  1.1901 +		EDefragAllocFailed
  1.1902 +		};
  1.1903 +
  1.1904 +	/**
  1.1905 +	Attribute flags used when allocating RAM pages.
  1.1906 +	See #AllocRam etc.
  1.1907 +
  1.1908 +	The least significant bits of these flags are used for the #TMemoryType
  1.1909 +	value for the memory.
  1.1910 +	*/
  1.1911 +	enum TRamAllocFlags
  1.1912 +		{
  1.1913 +		// lower bits hold TMemoryType
  1.1914 +
  1.1915 +		/**
  1.1916 +		If this flag is set, don't wipe the contents of the memory when allocated.
  1.1917 +		By default, for security and confidentiality reasons, the memory is filled
  1.1918 +		with a 'wipe' value to erase the previous contents.
  1.1919 +		*/
  1.1920 +		EAllocNoWipe			= 1<<(KMemoryTypeShift),
  1.1921 +
  1.1922 +		/**
  1.1923 +		If this flag is set, any memory wiping will fill memory with the byte
  1.1924 +		value starting at bit position #EAllocWipeByteShift in these flags.
  1.1925 +		*/
  1.1926 +		EAllocUseCustomWipeByte	= 1<<(KMemoryTypeShift+1),
  1.1927 +
  1.1928 +		/**
  1.1929 +		If this flag is set, memory allocation won't attempt to reclaim pages
  1.1930 +		from the demand paging system.
  1.1931 +		This is used to prevent deadlock when the paging system itself attempts
  1.1932 +		to allocate memory for itself.
  1.1933 +		*/
  1.1934 +		EAllocNoPagerReclaim	= 1<<(KMemoryTypeShift+2),
  1.1935 +
  1.1936 +		/**
  1.1937 +		@internal
  1.1938 +		*/
  1.1939 +		EAllocFlagLast,
  1.1940 +
  1.1941 +		/*
  1.1942 +		Bit position within these flags, for the least significant bit of the
  1.1943 +		byte value used when #EAllocUseCustomWipeByte is set.
  1.1944 +		*/
  1.1945 +		EAllocWipeByteShift		= 8
  1.1946 +		};
  1.1947 +
  1.1948 +public:
  1.1949 +	void Init1();
  1.1950 +	void Init1Common();
  1.1951 +	void Init2();
  1.1952 +	void Init2Common();
  1.1953 +	void Init2Final();
  1.1954 +	void Init2FinalCommon();
  1.1955 +	void Init3();
  1.1956 +
  1.1957 +	static void Panic(TPanic aPanic);
  1.1958 +
  1.1959 +	static TInt HandlePageFault(TLinAddr aPc, TLinAddr aFaultAddress, TUint aAccessPermissions, TAny* aExceptionInfo);
  1.1960 +
  1.1961 +	TUint FreeRamInPages();
  1.1962 +	TUint TotalPhysicalRamPages();
  1.1963 +
  1.1964 +	TInt AllocRam(	TPhysAddr* aPages, TUint aCount, TRamAllocFlags aFlags, TZonePageType aZonePageType, 
  1.1965 +					TUint aBlockZoneId=KRamZoneInvalidId, TBool aBlockRest=EFalse);
  1.1966 +	void FreeRam(TPhysAddr* aPages, TUint aCount, TZonePageType aZonePageType);
  1.1967 +	TInt AllocContiguousRam(TPhysAddr& aPhysAddr, TUint aCount, TUint aAlign, TRamAllocFlags aFlags);
  1.1968 +	void FreeContiguousRam(TPhysAddr aPhysAddr, TUint aCount);
  1.1969 +
  1.1970 +	const SRamZone* RamZoneConfig(TRamZoneCallback& aCallback) const;
  1.1971 +	void SetRamZoneConfig(const SRamZone* aZones, TRamZoneCallback aCallback);
  1.1972 +	TInt ModifyRamZoneFlags(TUint aId, TUint aClearMask, TUint aSetMask);
  1.1973 +	TInt GetRamZonePageCount(TUint aId, SRamZonePageCount& aPageData);
  1.1974 +	TInt ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aBytes, TPhysAddr& aPhysAddr, TInt aAlign);
  1.1975 +	TInt ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aNumPages, TPhysAddr* aPageList);
  1.1976 +	TInt RamHalFunction(TInt aFunction, TAny* a1, TAny* a2);	
  1.1977 +	void ChangePageType(SPageInfo* aPageInfo, TZonePageType aOldPageType, TZonePageType aNewPageType);
  1.1978 +
  1.1979 +	TInt AllocPhysicalRam(TPhysAddr* aPages, TUint aCount, TRamAllocFlags aFlags);
  1.1980 +	void FreePhysicalRam(TPhysAddr* aPages, TUint aCount);
  1.1981 +	TInt AllocPhysicalRam(TPhysAddr& aPhysAddr, TUint aCount, TUint aAlign, TRamAllocFlags aFlags);
  1.1982 +	void FreePhysicalRam(TPhysAddr aPhysAddr, TUint aCount);
  1.1983 +	TInt ClaimPhysicalRam(TPhysAddr aPhysAddr, TUint aCount, TRamAllocFlags aFlags);
  1.1984 +	void AllocatedPhysicalRam(TPhysAddr aPhysAddr, TUint aCount, TRamAllocFlags aFlags);
  1.1985 +
  1.1986 +	TLinAddr MapTemp(TPhysAddr aPage, TUint aColour, TUint aSlot=0);
  1.1987 +	void UnmapTemp(TUint aSlot=0);
  1.1988 +	void RemoveAliasesForPageTable(TPhysAddr aPageTable);
  1.1989 +
  1.1990 +	static TBool MapPages(TPte* const aPtePtr, const TUint aCount, TPhysAddr* aPages, TPte aBlankPte);
  1.1991 +	static TBool UnmapPages(TPte* const aPtePtr, TUint aCount);
  1.1992 +	static TBool UnmapPages(TPte* const aPtePtr, TUint aCount, TPhysAddr* aPages);
  1.1993 +	static void RemapPage(TPte* const aPtePtr, TPhysAddr& aPage, TPte aBlankPte);
  1.1994 +	static void RestrictPagesNA(TPte* const aPtePtr, TUint aCount, TPhysAddr* aPages);
  1.1995 +	static TBool PageInPages(TPte* const aPtePtr, const TUint aCount, TPhysAddr* aPages, TPte aBlankPte);
  1.1996 +
  1.1997 +	// implemented in CPU-specific code...
  1.1998 +	static TUint PteType(TMappingPermissions aPermissions, TBool aGlobal);
  1.1999 +	static TUint PdeType(TMemoryAttributes aAttributes);
  1.2000 +	static TPte BlankPte(TMemoryAttributes aAttributes, TUint aPteType);
  1.2001 +	static TPde BlankPde(TMemoryAttributes aAttributes);
  1.2002 +	static TPde BlankSectionPde(TMemoryAttributes aAttributes, TUint aPteType);
  1.2003 +	static TBool CheckPteTypePermissions(TUint aPteType, TUint aAccessPermissions);
  1.2004 +	static TMappingPermissions PermissionsFromPteType(TUint aPteType);
  1.2005 +	void PagesAllocated(TPhysAddr* aPageList, TUint aCount, TRamAllocFlags aFlags, TBool aReallocate=false);
  1.2006 +	void PageFreed(SPageInfo* aPageInfo);
  1.2007 +	void CleanAndInvalidatePages(TPhysAddr* aPages, TUint aCount, TMemoryAttributes aAttributes, TUint aColour);
  1.2008 +public:
  1.2009 +	// utils, implemented in CPU-specific code...
  1.2010 +	static TPde* PageDirectory(TInt aOsAsid);
  1.2011 +	static TPde* PageDirectoryEntry(TInt aOsAsid, TLinAddr aAddress);
  1.2012 +	static TPhysAddr PdePhysAddr(TPde aPde);
  1.2013 +	static TPhysAddr PtePhysAddr(TPte aPte, TUint aPteIndex);
  1.2014 +	static TPte* PageTableFromPde(TPde aPde);
  1.2015 +	static TPte* SafePageTableFromPde(TPde aPde);
  1.2016 +	static TPhysAddr SectionBaseFromPde(TPde aPde);
  1.2017 +	static TPte* PtePtrFromLinAddr(TLinAddr aAddress, TInt aOsAsid);
  1.2018 +	static TPte* SafePtePtrFromLinAddr(TLinAddr aAddress, TInt aOsAsid);
  1.2019 +	static TPhysAddr PageTablePhysAddr(TPte* aPt);
  1.2020 +	static TPhysAddr LinearToPhysical(TLinAddr aAddr, TInt aOsAsid=KKernelOsAsid);
  1.2021 +	static TPhysAddr UncheckedLinearToPhysical(TLinAddr aAddr, TInt aOsAsid);
  1.2022 +	static TPte MakePteInaccessible(TPte aPte, TBool aReadOnly);
  1.2023 +	static TPte MakePteAccessible(TPte aPte, TBool aWrite);
  1.2024 +	static TBool IsPteReadOnly(TPte aPte);
  1.2025 +	static TBool IsPteMoreAccessible(TPte aNewPte, TPte aOldPte);
  1.2026 +	static TBool IsPteInaccessible(TPte aPte);
  1.2027 +	static TBool PdeMapsPageTable(TPde aPde);
  1.2028 +	static TBool PdeMapsSection(TPde aPde);
  1.2029 +
  1.2030 +	void SyncPhysicalMemoryBeforeDmaWrite(TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr);
  1.2031 +	void SyncPhysicalMemoryBeforeDmaRead (TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr);
  1.2032 +	void SyncPhysicalMemoryAfterDmaRead  (TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr);
  1.2033 +
  1.2034 +	static TPte SectionToPageEntry(TPde& aPde);
  1.2035 +	static TPde PageToSectionEntry(TPte aPte, TPde aPde);
  1.2036 +	static TMemoryAttributes CanonicalMemoryAttributes(TMemoryAttributes aAttr);
  1.2037 +
  1.2038 +public:
  1.2039 +	/**
  1.2040 +	Class representing the resources and methods required to create temporary
  1.2041 +	mappings of physical memory pages in order to make them accessible to
  1.2042 +	software.
  1.2043 +	These mare required by various memory model functions and are created only
  1.2044 +	during system boot.
  1.2045 +	*/
  1.2046 +	class TTempMapping
  1.2047 +		{
  1.2048 +	public:
  1.2049 +		void Alloc(TUint aNumPages);
  1.2050 +		TLinAddr Map(TPhysAddr aPage, TUint aColour);
  1.2051 +		TLinAddr Map(TPhysAddr aPage, TUint aColour, TPte aBlankPte);
  1.2052 +		TLinAddr Map(TPhysAddr* aPages, TUint aCount, TUint aColour);
  1.2053 +		void Unmap();
  1.2054 +		void Unmap(TBool aIMBRequired);
  1.2055 +		FORCE_INLINE TTempMapping()
  1.2056 +			: iSize(0)
  1.2057 +			{}
  1.2058 +	public:
  1.2059 +		TLinAddr iLinAddr;		///< Virtual address of the memory page mapped by #iPtePtr.
  1.2060 +		TPte* iPtePtr;			///< Pointer to first PTE allocated to this object.
  1.2061 +	private:
  1.2062 +		TPte iBlankPte;			///< PTE value to use for mapping pages, with the physical address component equal to zero.
  1.2063 +		TUint8 iSize;			///< Maximum number of pages which can be mapped in one go.
  1.2064 +		TUint8 iCount;			///< Number of pages currently mapped.
  1.2065 +		TUint8 iColour;			///< Colour of any pages mapped (acts as index from #iLinAddr and #iPtePtr).
  1.2066 +		TUint8 iSpare1;
  1.2067 +	private:
  1.2068 +		static TLinAddr iNextLinAddr;
  1.2069 +		};
  1.2070 +private:
  1.2071 +	enum { KNumTempMappingSlots=2 };
  1.2072 +	/**
  1.2073 +	Temporary mappings used by various functions.
  1.2074 +	Use of these is serialised by the #RamAllocLock.
  1.2075 +	*/
  1.2076 +	TTempMapping iTempMap[KNumTempMappingSlots];
  1.2077 +
  1.2078 +	TTempMapping iPhysMemSyncTemp;	///< Temporary mapping used for physical memory sync.
  1.2079 +	DMutex* 	 iPhysMemSyncMutex;	///< Mutex used to serialise use of #iPhysMemSyncTemp.
  1.2080 +
  1.2081 +public:
  1.2082 +	TPte iTempPteCached;			///< PTE value for cached temporary mappings
  1.2083 +	TPte iTempPteUncached;			///< PTE value for uncached temporary mappings
  1.2084 +	TPte iTempPteCacheMaintenance;	///< PTE value for temporary mapping of cache maintenance
  1.2085 +private:
  1.2086 +	DRamAllocator* iRamPageAllocator;			///< The RAM allocator used for managing free RAM pages.
  1.2087 +	const SRamZone* iRamZones;					///< A pointer to the RAM zone configuration from the variant.
  1.2088 +	TRamZoneCallback iRamZoneCallback;			///< Pointer to the RAM zone callback function.
  1.2089 +	Defrag* iDefrag;							///< The RAM defrag class implementation.
  1.2090 +
  1.2091 +	/**
  1.2092 +	A counter incremented every time Mmu::PagesAllocated invalidates the L1 cache.
  1.2093 +	This is used as part of a cache maintenance optimisation.
  1.2094 +	*/
  1.2095 +	TInt iCacheInvalidateCounter;
  1.2096 +
  1.2097 +	/**
  1.2098 +	Number of free RAM pages which are cached at L1 and have
  1.2099 +	SPageInfo::CacheInvalidateCounter()==#iCacheInvalidateCounter.
  1.2100 +	This is used as part of a cache maintenance optimisation.
  1.2101 +	*/
  1.2102 +	TInt iCacheInvalidatePageCount;
  1.2103 +
  1.2104 +public:
  1.2105 +	/**
  1.2106 +	Linked list of threads which have an active IPC alias. I.e. have called
  1.2107 +	DMemModelThread::Alias. Threads are linked by their DMemModelThread::iAliasLink member.
  1.2108 +	Updates to this list are protected by the #MmuLock.
  1.2109 +	*/
  1.2110 +	SDblQue iAliasList;
  1.2111 +
  1.2112 +	/**
  1.2113 +	The mutex used to protect RAM allocation.
  1.2114 +	This is the mutex #RamAllocLock operates on.
  1.2115 +	*/
  1.2116 +	DMutex* iRamAllocatorMutex;
  1.2117 +
  1.2118 +private:
  1.2119 +	/**
  1.2120 +	Number of nested calls to RamAllocLock::Lock.
  1.2121 +	*/
  1.2122 +	TUint iRamAllocLockCount;
  1.2123 +
  1.2124 +	/**
  1.2125 +	Set by various memory allocation routines to indicate that a memory allocation
  1.2126 +	has failed. This is used by #RamAllocLock in its management of out-of-memory
  1.2127 +	notifications.
  1.2128 +	*/
  1.2129 +	TBool iRamAllocFailed;
  1.2130 +
  1.2131 +	/**
  1.2132 +	Saved value for #FreeRamInPages which is used by #RamAllocLock in its management
  1.2133 +	of memory level change notifications.
  1.2134 +	*/
  1.2135 +	TUint iRamAllocInitialFreePages;
  1.2136 +
  1.2137 +	friend class RamAllocLock;
  1.2138 +private:
  1.2139 +	void VerifyRam();
  1.2140 +	};
  1.2141 +
  1.2142 +/**
  1.2143 +The single instance of class #Mmu.
  1.2144 +*/
  1.2145 +extern Mmu TheMmu;
  1.2146 +
  1.2147 +
  1.2148 +#ifndef _DEBUG
  1.2149 +/**
  1.2150 +Perform a page table walk to return the physical address of
  1.2151 +the memory mapped at virtual address \a aLinAddr in the
  1.2152 +address space \a aOsAsid.
  1.2153 +
  1.2154 +If the page table used was not one allocated by the kernel
  1.2155 +then the results are unpredictable and may cause a system fault.
  1.2156 +
  1.2157 +@pre #MmuLock held.
  1.2158 +*/
  1.2159 +FORCE_INLINE TPhysAddr Mmu::LinearToPhysical(TLinAddr aAddr, TInt aOsAsid)
  1.2160 +	{
  1.2161 +	return Mmu::UncheckedLinearToPhysical(aAddr,aOsAsid);
  1.2162 +	}
  1.2163 +#endif
  1.2164 +
  1.2165 +
  1.2166 +__ASSERT_COMPILE((Mmu::EAllocFlagLast>>Mmu::EAllocWipeByteShift)==0); // make sure flags don't run into wipe byte value
  1.2167 +
  1.2168 +
  1.2169 +/**
  1.2170 +Create a temporary mapping of a physical page.
  1.2171 +The RamAllocatorMutex must be held before this function is called and not released
  1.2172 +until after UnmapTemp has been called.
  1.2173 +
  1.2174 +@param aPage	The physical address of the page to be mapped.
  1.2175 +@param aColour	The 'colour' of the page if relevant.
  1.2176 +@param aSlot	Slot number to use, must be less than Mmu::KNumTempMappingSlots.
  1.2177 +
  1.2178 +@return The linear address of where the page has been mapped.
  1.2179 +*/
  1.2180 +FORCE_INLINE TLinAddr Mmu::MapTemp(TPhysAddr aPage, TUint aColour, TUint aSlot)
  1.2181 +	{
  1.2182 +//	Kern::Printf("Mmu::MapTemp(0x%08x,%d,%d)",aPage,aColour,aSlot);
  1.2183 +	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
  1.2184 +	__NK_ASSERT_DEBUG(aSlot<KNumTempMappingSlots);
  1.2185 +	return iTempMap[aSlot].Map(aPage,aColour);
  1.2186 +	}
  1.2187 +
  1.2188 +
  1.2189 +/**
  1.2190 +Remove the temporary mapping created with MapTemp.
  1.2191 +
  1.2192 +@param aSlot	Slot number which was used when temp mapping was made.
  1.2193 +*/
  1.2194 +FORCE_INLINE void Mmu::UnmapTemp(TUint aSlot)
  1.2195 +	{
  1.2196 +//	Kern::Printf("Mmu::UnmapTemp(%d)",aSlot);
  1.2197 +	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
  1.2198 +	__NK_ASSERT_DEBUG(aSlot<KNumTempMappingSlots);
  1.2199 +	iTempMap[aSlot].Unmap();
  1.2200 +	}
  1.2201 +
  1.2202 +
  1.2203 +/**
  1.2204 +Class representing the resources and arguments needed for various
  1.2205 +memory pinning operations.
  1.2206 +
  1.2207 +The term 'replacement pages' in this documentation means excess
  1.2208 +RAM pages which have been allocated to the demand paging pool so
  1.2209 +that when a demand paged memory is pinned and removed the pool
  1.2210 +does not become too small.
  1.2211 +
  1.2212 +Relacement pages are allocated with #AllocReplacementPages and their
  1.2213 +number remembered in #iReplacementPages. When a memory pinning operation
  1.2214 +removes pages from the paging pool it will reduce #iReplacementPages
  1.2215 +accordingly. At the end of the pinning operation, #FreeReplacementPages
  1.2216 +is used to free any unused replacement pages.
  1.2217 +*/
  1.2218 +class TPinArgs
  1.2219 +	{
  1.2220 +public:
  1.2221 +	/**
  1.2222 +	Boolean value set to true if the requester of the pinning operation
  1.2223 +	will only read from the pinned memory, not write to it.
  1.2224 +	This is used as an optimisation to avoid unnecessarily marking
  1.2225 +	demand paged memory as dirty.
  1.2226 +	*/
  1.2227 +	TBool iReadOnly;
  1.2228 +
  1.2229 +	/**
  1.2230 +	Boolean value set to true if sufficient replacement pages already exists
  1.2231 +	in the demand paging pool and that #AllocReplacementPages does not need
  1.2232 +	to actually allocated any.
  1.2233 +	*/
  1.2234 +	TBool iUseReserve;
  1.2235 +
  1.2236 +	/**
  1.2237 +	The number of replacement pages allocated to this object by #AllocReplacementPages.
  1.2238 +	A value of #EUseReserveForPinReplacementPages indicates that #iUseReserve
  1.2239 +	was true, and there is sufficient RAM already reserved for the operation
  1.2240 +	being attempted.
  1.2241 +	*/
  1.2242 +	TUint iReplacementPages;
  1.2243 +
  1.2244 +	/**
  1.2245 +	The number of page tables which have been pinned during the course
  1.2246 +	of an operation. This is the number of valid entries written to
  1.2247 +	#iPinnedPageTables.
  1.2248 +	*/
  1.2249 +	TUint iNumPinnedPageTables;
  1.2250 +
  1.2251 +	/**
  1.2252 +	Pointer to the location to store the addresses of any page tables
  1.2253 +	which have been pinned during the course of an operation. This is
  1.2254 +	incremented as entries are added.
  1.2255 +
  1.2256 +	The null-pointer indicates that page tables do not require pinning.
  1.2257 +	*/
  1.2258 +	TPte** iPinnedPageTables;
  1.2259 +
  1.2260 +public:
  1.2261 +	/**
  1.2262 +	Construct an empty TPinArgs, one which owns no resources.
  1.2263 +	*/
  1.2264 +	inline TPinArgs()
  1.2265 +		: iReadOnly(0), iUseReserve(0), iReplacementPages(0), iNumPinnedPageTables(0), iPinnedPageTables(0)
  1.2266 +		{
  1.2267 +		}
  1.2268 +
  1.2269 +	/**
  1.2270 +	Return true if this TPinArgs has at least \a aRequired number of
  1.2271 +	replacement pages allocated.
  1.2272 +	*/
  1.2273 +	FORCE_INLINE TBool HaveSufficientPages(TUint aRequired)
  1.2274 +		{
  1.2275 +		return iReplacementPages>=aRequired; // Note, EUseReserveForPinReplacementPages will always return true.
  1.2276 +		}
  1.2277 +
  1.2278 +	/**
  1.2279 +	Allocate replacement pages for this TPinArgs so that it has at least
  1.2280 +	\a aNumPages.
  1.2281 +	*/
  1.2282 +	TInt AllocReplacementPages(TUint aNumPages);
  1.2283 +
  1.2284 +	/**
  1.2285 +	Free all replacement pages which this TPinArgs still owns.
  1.2286 +	*/
  1.2287 +	void FreeReplacementPages();
  1.2288 +
  1.2289 +#ifdef _DEBUG
  1.2290 +	~TPinArgs();
  1.2291 +#endif
  1.2292 +
  1.2293 +	/**
  1.2294 +	Value used to indicate that replacement pages are to come
  1.2295 +	from an already allocated reserve and don't need specially
  1.2296 +	allocating.
  1.2297 +	*/
  1.2298 +	enum { EUseReserveForPinReplacementPages = 0xffffffffu };
  1.2299 +	};
  1.2300 +
  1.2301 +
  1.2302 +#ifdef _DEBUG
  1.2303 +inline TPinArgs::~TPinArgs()
  1.2304 +	{
  1.2305 +	__NK_ASSERT_DEBUG(!iReplacementPages);
  1.2306 +	}
  1.2307 +#endif
  1.2308 +
  1.2309 +
  1.2310 +/**
  1.2311 +Enumeration used in various RestrictPages APIs to specify the type of restrictions to apply.
  1.2312 +*/
  1.2313 +enum TRestrictPagesType
  1.2314 +	{
  1.2315 +	/**
  1.2316 +	Make all mappings of page not accessible.
  1.2317 +	Pinned mappings will veto this operation.
  1.2318 +	*/
  1.2319 +	ERestrictPagesNoAccess			 = 1,
  1.2320 +
  1.2321 +	/**
  1.2322 +	Demand paged memory being made 'old'.
  1.2323 +	Specific case of ERestrictPagesNoAccess.
  1.2324 +	*/
  1.2325 +	ERestrictPagesNoAccessForOldPage = ERestrictPagesNoAccess|0x80000000,
  1.2326 +
  1.2327 +	/**
  1.2328 +	For page moving pinned mappings always veto the moving operation.
  1.2329 +	*/
  1.2330 +	ERestrictPagesForMovingFlag  = 0x40000000,
  1.2331 +
  1.2332 +	/**
  1.2333 +	Movable memory being made no access whilst its being copied.
  1.2334 +	Special case of ERestrictPagesNoAccess where pinned mappings always veto 
  1.2335 +	this operation even if they are read-only mappings.
  1.2336 +	*/
  1.2337 +	ERestrictPagesNoAccessForMoving  = ERestrictPagesNoAccess|ERestrictPagesForMovingFlag,
  1.2338 +	};
  1.2339 +
  1.2340 +#include "xmmu.h"
  1.2341 +
  1.2342 +#endif