os/kernelhwsrv/kernel/eka/memmodel/epoc/flexible/mmu/mmapping.h
changeset 0 bde4ae8d615e
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/os/kernelhwsrv/kernel/eka/memmodel/epoc/flexible/mmu/mmapping.h	Fri Jun 15 03:10:57 2012 +0200
     1.3 @@ -0,0 +1,1136 @@
     1.4 +// Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
     1.5 +// All rights reserved.
     1.6 +// This component and the accompanying materials are made available
     1.7 +// under the terms of the License "Eclipse Public License v1.0"
     1.8 +// which accompanies this distribution, and is available
     1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
    1.10 +//
    1.11 +// Initial Contributors:
    1.12 +// Nokia Corporation - initial contribution.
    1.13 +//
    1.14 +// Contributors:
    1.15 +//
    1.16 +// Description:
    1.17 +//
    1.18 +
    1.19 +/**
    1.20 + @file
    1.21 + @internalComponent
    1.22 +*/
    1.23 +
    1.24 +#ifndef MMAPPING_H
    1.25 +#define MMAPPING_H
    1.26 +
    1.27 +#include "mrefcntobj.h"
    1.28 +#include "mmappinglist.h"
    1.29 +#include "mpagearray.h"
    1.30 +
    1.31 +
    1.32 +
    1.33 +/**
    1.34 +Base class for memory mappings.
    1.35 +
    1.36 +This provides the methods for linking a mapping to a memory object
    1.37 +as well as the interface for updating the MMU page tables associated
    1.38 +with a mapping when the memory state changes.
    1.39 +*/
    1.40 +class DMemoryMappingBase : public DReferenceCountedObject
    1.41 +	{
    1.42 +private:
    1.43 +	/**
    1.44 +	Memory object to which this mapping is currently attached.
    1.45 +	Updates to the are protected by the MmuLock.
    1.46 +	*/
    1.47 +	DMemoryObject*	 iMemory;
    1.48 +
    1.49 +public:
    1.50 +	/**
    1.51 +	Link used to maintain list of mappings attached to a memory object.
    1.52 +	*/
    1.53 +	TMappingListLink iLink;
    1.54 +
    1.55 +	/**
    1.56 +	Offset, in page units, within the memory object's memory for start of this mapping.
    1.57 +	*/
    1.58 +	TUint			 iStartIndex;
    1.59 +
    1.60 +	/**
    1.61 +	Size of this mapping, in page units.
    1.62 +	*/
    1.63 +	TUint			 iSizeInPages;
    1.64 +
    1.65 +private:
    1.66 +	/**
    1.67 +	Instance count which is incremented every time a mapping is attached to a memory object.
    1.68 +	When code is manipulating mappings, the instance count is used to detect that a
    1.69 +	mapping has been reused and that the operation it is performing is no long needed.
    1.70 +	*/
    1.71 +	TUint			 iMapInstanceCount;
    1.72 +
    1.73 +public:
    1.74 +
    1.75 +	/**
    1.76 +	Bit flags stored in #Flags giving various state and attributes of the mapping.
    1.77 +	*/
    1.78 +	enum TFlags
    1.79 +		{
    1.80 +		/**
    1.81 +		Flag set during object construction to indicate that this mapping is of
    1.82 +		class #DCoarseMapping.
    1.83 +		*/
    1.84 +		ECoarseMapping			= 1<<0,
    1.85 +
    1.86 +		/**
    1.87 +		Flag set during object construction to indicate that this mapping will pin
    1.88 +		any memory pages it maps. This may not be used with coarse memory mappings.
    1.89 +		*/
    1.90 +		EPinned					= 1<<1,
    1.91 +
    1.92 +		/**
    1.93 +		Pages have already been reserved for pinning, so when this mapping is attached
    1.94 +		to a memory object no additional pages need to be reserved. Pre-reserving pages
    1.95 +		is used to prevent the possibility of failing to pin due to an out of memory
    1.96 +		condition. It is essential that the users of these mappings ensure that there
    1.97 +		are enough reserved pages in the paging pool to meet the maximum mapping size
    1.98 +		used.
    1.99 +		*/
   1.100 +		EPinningPagesReserved	= 1<<2,
   1.101 +
   1.102 +		/**
   1.103 +		Pages have been successfully pinned by this mapping. This is set after demand
   1.104 +		paged memory has been succeeded pinned and is used to indicate that the pages
   1.105 +		need unpinning again when the mapping is later unmapped.
   1.106 +		*/
   1.107 +		EPagesPinned			= 1<<3,
   1.108 +
   1.109 +		/**
   1.110 +		Flag set during object construction to indicate that MMU page tables are to
   1.111 +		be permanently allocated for use by this mapping. Normally, page tables are
   1.112 +		allocated as needed to map memory which can result in out-of-memory errors
   1.113 +		when mapping memory pages.
   1.114 +		*/
   1.115 +		EPermanentPageTables	= 1<<4,
   1.116 +
   1.117 +		/**
   1.118 +		Permanent page tables have been successfully been allocated for this mapping.
   1.119 +		This flag is used to track allocation so they can be released when the mapping
   1.120 +		is destroyed.
   1.121 +		*/
   1.122 +		EPageTablesAllocated	= 1<<5,
   1.123 +
   1.124 +		/**
   1.125 +		For pinned mappings (EPinned) this flag is set whenever the mapping prevents
   1.126 +		any pages of memory from being fully decommitted from a memory object. When a
   1.127 +		mapping is finally unmapped from the memory object this flag is checked, and,
   1.128 +		if set, further cleanup of the decommitted pages triggered.
   1.129 +		*/
   1.130 +		EPageUnmapVetoed		= 1<<6,
   1.131 +
   1.132 +		/**
   1.133 +		Mapping is being, or has been, detached from a memory object.
   1.134 +		When set, operations on the mapping should act as though the mapping is no
   1.135 +		longer attached to a memory object. Specifically, no further pages of memory
   1.136 +		should be mapped into this mapping.
   1.137 +
   1.138 +		This flag is only set when the MmuLock is held.
   1.139 +		*/
   1.140 +		EDetaching				= 1<<7,
   1.141 +
   1.142 +		/**
   1.143 +		This mapping is a physical pinning mapping.  The pages it pins
   1.144 +		cannot be paged out or moved.
   1.145 +
   1.146 +		This flag is set when DPhysicalPinMapping objects are created.
   1.147 +		*/
   1.148 +		EPhysicalPinningMapping = 1<<8,
   1.149 +
   1.150 +		/**
   1.151 +		Flag set during object construction to indicate that this mapping is of
   1.152 +		class #DLargeMapping.
   1.153 +
   1.154 +		Note that #DLargeMapping is derived from #DCoarseMapping, therefore presence of this flag
   1.155 +		implies presence of #ECoarseMapping as well.
   1.156 +		*/
   1.157 +		ELargeMapping			= 1<<9,
   1.158 +		};
   1.159 +
   1.160 +	/**
   1.161 +	Bitmask of values from enum #TPteType which will be used to calculate
   1.162 +	the correct attributes for any page table entries this mapping uses.
   1.163 +	*/
   1.164 +	FORCE_INLINE TUint8& PteType()
   1.165 +		{ return iLink.iSpare1; }
   1.166 +
   1.167 +	/**
   1.168 +	Bitmask of values from enum #TFlags.
   1.169 +	The flags 16 bits and are stored in iLink.iSpare2 and iLink.iSpare3.
   1.170 +	*/
   1.171 +	FORCE_INLINE TUint16& Flags()
   1.172 +		{ return (TUint16&)iLink.iSpare2; }
   1.173 +
   1.174 +public:
   1.175 +	/**
   1.176 +	Return the memory object to which this mapping is currently attached.
   1.177 +
   1.178 +	@pre MmuLock is held. (If aNoCheck==false)
   1.179 +	*/
   1.180 +	FORCE_INLINE DMemoryObject* Memory(TBool aNoCheck=false)
   1.181 +		{
   1.182 +		if(!aNoCheck)
   1.183 +			__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   1.184 +		return iMemory;
   1.185 +		}
   1.186 +
   1.187 +	/**
   1.188 +	Return true if the mapping is currently attached to a memory object.
   1.189 +	*/
   1.190 +	FORCE_INLINE TBool IsAttached()
   1.191 +		{ return iLink.IsLinked(); }
   1.192 +
   1.193 +	/**
   1.194 +	Return true if the mapping is being, or has been, detached from a memory object.
   1.195 +	The mapping may or may not still be attached to a memory object, i.e. #IsAttached
   1.196 +	is indeterminate.
   1.197 +	*/
   1.198 +	FORCE_INLINE TBool BeingDetached()
   1.199 +		{ return Flags()&EDetaching; }
   1.200 +
   1.201 +	/**
   1.202 +	Return the mapping instance count.
   1.203 +	@see #iMapInstanceCount.
   1.204 +	*/
   1.205 +	FORCE_INLINE TUint MapInstanceCount()
   1.206 +		{ return iMapInstanceCount; }
   1.207 +
   1.208 +	/**
   1.209 +	Return true if this mapping provides read only access to memory.
   1.210 +	*/
   1.211 +	FORCE_INLINE TBool IsReadOnly()
   1.212 +		{ return !(PteType()&EPteTypeWritable); }
   1.213 +
   1.214 +#ifdef MMU_SUPPORTS_EXECUTE_NEVER
   1.215 +	/**
   1.216 +	Return true if this mapping provides access to memory which allows
   1.217 +	code to be executed from it.
   1.218 +	*/
   1.219 +	FORCE_INLINE TBool IsExecutable()
   1.220 +		{ return (PteType()&EPteTypeExecutable); }
   1.221 +#endif
   1.222 +
   1.223 +	/**
   1.224 +	Return true if this is a coarse mapping, in other words it is an instance of #DCoarseMapping or
   1.225 +	#DLargeMapping.
   1.226 +	*/
   1.227 +	FORCE_INLINE TBool IsCoarse()
   1.228 +		{ return Flags()&ECoarseMapping; }
   1.229 +
   1.230 +	/**
   1.231 +	Return true if this mapping is a large mapping, in other words an instance of #DLargeMapping.
   1.232 +
   1.233 +	Note that all large mappings are also coarse mappings.
   1.234 +	*/
   1.235 +	FORCE_INLINE TBool IsLarge()
   1.236 +		{ return Flags()&ELargeMapping; }
   1.237 +
   1.238 +	/**
   1.239 +	Return true if this mapping pins the memory it maps.
   1.240 +	*/
   1.241 +	FORCE_INLINE TBool IsPinned()
   1.242 +		{ return Flags()&EPinned; }
   1.243 +		
   1.244 +	/**
   1.245 +	Return true if this mapping physically pins the memory it maps.
   1.246 +	*/
   1.247 +	FORCE_INLINE TBool IsPhysicalPinning()
   1.248 +		{ return Flags()&EPhysicalPinningMapping; }
   1.249 +
   1.250 +	/**
   1.251 +	Return the access permissions which this mapping uses to maps memory.
   1.252 +	*/
   1.253 +	FORCE_INLINE TMappingPermissions Permissions()
   1.254 +		{ return Mmu::PermissionsFromPteType(PteType()); }
   1.255 +
   1.256 +	/**
   1.257 +	Link this mapping to a memory object.
   1.258 +
   1.259 +	This is called by the memory object during processing of #Attach.
   1.260 +
   1.261 +	@param aMemory		The memory object the mapping is being attached to.
   1.262 +	@param aMappingList	The list to add this mapping to.
   1.263 +
   1.264 +	@pre MmuLock is held.
   1.265 +	@pre Mapping list lock is held.
   1.266 +	*/
   1.267 +	void LinkToMemory(DMemoryObject* aMemory, TMappingList& aMappingList);
   1.268 +
   1.269 +	/**
   1.270 +	Unlink this mapping from the memory object it was previously linked to with
   1.271 +	#LinkToMemory.
   1.272 +
   1.273 +	This is called by the memory object during processing of #Detach.
   1.274 +
   1.275 +	@param aMappingList	The list that the mapping appears on.
   1.276 +	*/
   1.277 +	void UnlinkFromMemory(TMappingList& aMappingList);
   1.278 +
   1.279 +	/**
   1.280 +	Get the physical address(es) for a region of pages in this mapping.
   1.281 +
   1.282 +	@param aIndex			Page index, within the mapping, for start of the region.
   1.283 +	@param aCount			Number of pages in the region.
   1.284 +	@param aPhysicalAddress	On success, this value is set to one of two values.
   1.285 +							If the specified region is physically contiguous,
   1.286 +							the value is the physical address of the first page
   1.287 +							in the region. If the region is discontiguous, the
   1.288 +							value is set to KPhysAddrInvalid.
   1.289 +	@param aPhysicalPageList If not zero, this points to an array of TPhysAddr
   1.290 +							objects. On success, this array will be filled
   1.291 +							with the addresses of the physical pages which
   1.292 +							contain the specified region. If aPageList is
   1.293 +							zero, then the function will fail with
   1.294 +							KErrNotFound if the specified region is not
   1.295 +							physically contiguous.
   1.296 +
   1.297 +	@return 0 if successful and the whole region is physically contiguous.
   1.298 +			1 if successful but the region isn't physically contiguous.
   1.299 +			KErrNotFound, if any page in the region is not present,
   1.300 +			otherwise one of the system wide error codes.
   1.301 +
   1.302 +	@pre This mapping must have been attached to a memory object with #Pin.
   1.303 +	*/
   1.304 +	TInt PhysAddr(TUint aIndex, TUint aCount, TPhysAddr& aPhysicalAddress, TPhysAddr* aPhysicalPageList);
   1.305 +
   1.306 +protected:
   1.307 +	/**
   1.308 +	@param aType Initial value for #Flags.
   1.309 +	*/
   1.310 +	DMemoryMappingBase(TUint aType);
   1.311 +
   1.312 +	/**
   1.313 +	Attach this mapping to a memory object so that it maps a specified region of its memory.
   1.314 +
   1.315 +	@param aMemory	The memory object.
   1.316 +	@param aIndex	The page index of the first page of memory to be mapped by the mapping.
   1.317 +	@param aCount	The number of pages of memory to be mapped by the mapping.
   1.318 +
   1.319 +	@return KErrNone if successful, otherwise one of the system wide error codes.
   1.320 +	*/
   1.321 +	TInt Attach(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
   1.322 +
   1.323 +	/**
   1.324 +	Remove this mapping from the memory object it was previously attached to by #Attach.
   1.325 +	*/
   1.326 +	void Detach();
   1.327 +
   1.328 +public:
   1.329 +	/**
   1.330 +	Update the page table entries corresponding to this mapping to add entries for
   1.331 +	a specified set of memory pages.
   1.332 +
   1.333 +	This method is called by DMemoryObject::MapPages to update each mapping attached
   1.334 +	to a memory object whenever new pages of memory are added. However, it won't be
   1.335 +	called for any mapping with the #EPinned attribute as such mappings are unchanging.
   1.336 +
   1.337 +	@param aPages				An RPageArray::TIter which refers to a range of pages
   1.338 +								in a memory object. This has been clipped to fit within
   1.339 +								the range of pages mapped by this mapping.
   1.340 +								Only array entries which have state RPageArray::ECommitted
   1.341 +								should be mapped into the mapping's page tables.
   1.342 +
   1.343 +	@param aMapInstanceCount	The instance of this mapping which is to be updated.
   1.344 +								Whenever this no longer matches the current #MapInstanceCount
   1.345 +								the function must not update any more of the mapping's
   1.346 +								page table entries, (but must still return KErrNone).
   1.347 +
   1.348 +	@return KErrNone if successful, otherwise one of the system wide error codes.	
   1.349 +	*/
   1.350 +	virtual TInt MapPages(RPageArray::TIter aPages, TUint aMapInstanceCount) =0;
   1.351 +
   1.352 +	/**
   1.353 +	Update the page table entries corresponding to this mapping to remove entries for
   1.354 +	a specified set of memory pages.
   1.355 +
   1.356 +	This method is called by DMemoryObject::UnmapPages to update each mapping attached
   1.357 +	to a memory object whenever pages of memory are removed.
   1.358 +
   1.359 +	@param aPages				An RPageArray::TIter which refers to a range of pages
   1.360 +								in a memory object. This has been clipped to fit within
   1.361 +								the range of pages mapped by this mapping.
   1.362 +								Only array entries which return true for
   1.363 +								RPageArray::TargetStateIsDecommitted should be unmapped
   1.364 +								from the mapping's page tables.
   1.365 +
   1.366 +	@param aMapInstanceCount	The instance of this mapping which is to be updated.
   1.367 +								Whenever this no longer matches the current #MapInstanceCount
   1.368 +								the function must not update any more of the mapping's
   1.369 +								page table entries.
   1.370 +	*/
   1.371 +	virtual void UnmapPages(RPageArray::TIter aPages, TUint aMapInstanceCount) =0;
   1.372 +
   1.373 +	/**
   1.374 +	Update the page table entry corresponding to this mapping to update an entry for a specified
   1.375 +	page that has just been moved or shadowed.
   1.376 +
   1.377 +	@param aPages				The page array entry of the page in a memory object. 
   1.378 +								Only array entries which have a target state of 
   1.379 +								RPageArray::ECommitted should be mapped into the 
   1.380 +								mapping's page tables.
   1.381 +
   1.382 +	@param aIndex				The index of the page in the memory object.
   1.383 +
   1.384 +	@param aMapInstanceCount	The instance of this mapping which is to be updated.
   1.385 +								Whenever this no longer matches the current #MapInstanceCount
   1.386 +								the function must not update any more of the mapping's
   1.387 +								page table entries, (but must still return KErrNone).
   1.388 +
   1.389 +	@param	aInvalidateTLB		Set to ETrue when the TLB entries associated with this page
   1.390 +								should be invalidated.  This must be done when there is 
   1.391 +								already a valid pte for this page, i.e. if the page is still 
   1.392 +								mapped.
   1.393 +	*/
   1.394 +	virtual void RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB)=0;
   1.395 +
   1.396 +	/**
   1.397 +	Update the page table entries corresponding to this mapping to apply access restrictions
   1.398 +	to a specified set of memory pages.
   1.399 +
   1.400 +	This method is called by DMemoryObject::RestrictPages to update each mapping attached
   1.401 +	to a memory object whenever pages of memory are restricted.
   1.402 +
   1.403 +	@param aPages				An RPageArray::TIter which refers to a range of pages
   1.404 +								in a memory object. This has been clipped to fit within
   1.405 +								the range of pages mapped by this mapping.
   1.406 +								Only array entries which return true for
   1.407 +								RPageArray::TargetStateIsDecommitted should be unmapped
   1.408 +								from the mapping's page tables.
   1.409 +
   1.410 +	@param aMapInstanceCount	The instance of this mapping which is to be updated.
   1.411 +								Whenever this no longer matches the current #MapInstanceCount
   1.412 +								the function must not update any more of the mapping's
   1.413 +								page table entries.
   1.414 +	*/
   1.415 +	virtual void RestrictPagesNA(RPageArray::TIter aPages, TUint aMapInstanceCount) =0;
   1.416 +
   1.417 +	/**
   1.418 +	Update the page table entries corresponding to this mapping to add entries for
   1.419 +	a specified set of demand paged memory pages following a 'page in' or memory
   1.420 +	pinning operation.
   1.421 +
   1.422 +	@param aPages				An RPageArray::TIter which refers to a range of pages
   1.423 +								in a memory object. This will be within the range of pages
   1.424 +								mapped by this mapping.
   1.425 +								Only array entries which have state RPageArray::ECommitted
   1.426 +								should be mapped into the mapping's page tables.
   1.427 +
   1.428 +	@param aPinArgs				The resources required to pin any page tables the mapping uses.
   1.429 +								Page table must be pinned if \a aPinArgs.iPinnedPageTables is
   1.430 +								not the null pointer, in which case this the virtual address
   1.431 +								of the pinned must be stored in the array this points to.
   1.432 +								\a aPinArgs.iReadOnly is true if write access permissions
   1.433 +								are not needed.
   1.434 +
   1.435 +	@return KErrNone if successful, otherwise one of the system wide error codes.	
   1.436 +	*/
   1.437 +	virtual TInt PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount) =0;
   1.438 +
   1.439 +
   1.440 +	/**
   1.441 +	Update the page table entry corresponding to this mapping to add an entry for
   1.442 +	a specified page which is in the process of being moved.
   1.443 +
   1.444 +	@param aPageArrayPtr		The page array entry for the page to be mapped which must be
   1.445 +								within this mapping range of pages.
   1.446 +								Only array entries which have a target state of
   1.447 +								RPageArray::ECommitted should be mapped into the mapping's 
   1.448 +								page tables.
   1.449 +
   1.450 +	@param	aIndex				The index of the page.
   1.451 +
   1.452 +	@return ETrue if successful, EFalse otherwise.
   1.453 +	*/
   1.454 +	virtual TBool MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex)=0;
   1.455 +
   1.456 +
   1.457 +	/**
   1.458 +	In debug builds, dump information about this mapping to the kernel trace port.
   1.459 +	*/
   1.460 +	virtual void Dump();
   1.461 +
   1.462 +private:
   1.463 +	/**
   1.464 +	Update this mapping's MMU data structures to map all pages of memory
   1.465 +	currently committed to the memory object (#iMemory) in the region covered
   1.466 +	by this mapping.
   1.467 +
   1.468 +	This method is called by #Attach after the mapping has been linked
   1.469 +	into the memory object.
   1.470 +
   1.471 +	@return KErrNone if successful, otherwise one of the system wide error codes.
   1.472 +	*/
   1.473 +	virtual TInt DoMap() =0;
   1.474 +
   1.475 +	/**
   1.476 +	Update this mapping's MMU data structures to unmap all pages of memory.
   1.477 +
   1.478 +	This method is called by #Detach before the mapping has been unlinked
   1.479 +	from the memory object but after the #EDetaching flag has been set.
   1.480 +	*/
   1.481 +	virtual void DoUnmap() =0;
   1.482 +
   1.483 +protected:
   1.484 +	/**
   1.485 +	For pinned mapping, this virtual method is called by #Attach in order to pin
   1.486 +	pages of memory if required. This is called after the mapping has been linked
   1.487 +	into the memory object but before #DoMap.
   1.488 +
   1.489 +	The default implementation of this method simply calls DMemoryManager::Pin.
   1.490 +
   1.491 +	@param aPinArgs	The resources to use for pinning. This has sufficient replacement
   1.492 +					pages allocated to pin every page the mapping covers, and the
   1.493 +					value of \a aPinArgs.iReadOnly has been set to correspond to the
   1.494 +					mappings access permissions.
   1.495 +
   1.496 +	@return KErrNone if successful, otherwise one of the system wide error codes.
   1.497 +	*/
   1.498 +	virtual TInt DoPin(TPinArgs& aPinArgs);
   1.499 +
   1.500 +	/**
   1.501 +	For pinned mapping, this virtual method is called by #Detach in order to unpin
   1.502 +	pages of memory if required. This is called before the mapping has been unlinked
   1.503 +	from the memory object but after #DoUnmap.
   1.504 +
   1.505 +	The default implementation of this method simply calls DMemoryManager::Unpin.
   1.506 +
   1.507 +	@param aPinArgs	The resources used for pinning. The replacement pages allocated
   1.508 +					to this will be increased for each page which was became completely
   1.509 +					unpinned.
   1.510 +	*/
   1.511 +	virtual void DoUnpin(TPinArgs& aPinArgs);
   1.512 +	};
   1.513 +
   1.514 +
   1.515 +
   1.516 +/**
   1.517 +Base class for memory mappings which map memory contents into a address space.
   1.518 +
   1.519 +This provides methods for allocating virtual memory and holds the attributes needed
   1.520 +for MMU page table entries.
   1.521 +*/
   1.522 +class DMemoryMapping : public DMemoryMappingBase
   1.523 +	{
   1.524 +protected:
   1.525 +	/**
   1.526 +	The page directory entry (PDE) value for use when mapping this mapping's page tables.
   1.527 +	This value has the physical address component being zero, so a page table's physical
   1.528 +	address can be simply ORed in.
   1.529 +
   1.530 +	This could potentially be removed (see DMemoryMapping::PdeType()).
   1.531 +	*/
   1.532 +	TPde			iBlankPde;
   1.533 +
   1.534 +	/**
   1.535 +	The page table entry (PTE) value for use when mapping pages into this mapping.
   1.536 +	This value has the physical address component being zero, so a page's physical
   1.537 +	address can be simply ORed in.
   1.538 +	*/
   1.539 +	TPte			iBlankPte;
   1.540 +
   1.541 +	/**
   1.542 +	Start of the virtual address region allocated for use by this mapping
   1.543 +	ORed with the OS ASID of the address space this lies in.
   1.544 +
   1.545 +	Note, the address at which memory is mapped (#iLinAddrAndOsAsid) may be different
   1.546 +	to this allocated address due to page colouring restrictions.
   1.547 +
   1.548 +	@see iAllocatedSize
   1.549 +	*/
   1.550 +	TLinAddr		iAllocatedLinAddrAndOsAsid;
   1.551 +
   1.552 +	/**
   1.553 +	Size of virtual address region memory allocated for use by this mapping.
   1.554 +
   1.555 +	@see iAllocatedLinAddrAndOsAsid
   1.556 +	*/
   1.557 +	TUint			iAllocatedSize;
   1.558 +
   1.559 +private:
   1.560 +	/**
   1.561 +	Start of the virtual address region that this mapping is currently
   1.562 +	mapping memory at, ORed with the OS ASID of the address space this lies in.
   1.563 +
   1.564 +	This value is set by #Map which is called from #Attach when the mapping
   1.565 +	is attached to a memory object. The address used may be different to
   1.566 +	#iAllocatedLinAddrAndOsAsid due to page colouring restrictions.
   1.567 +
   1.568 +	The size of the region mapped is #iSizeInPages.
   1.569 +
   1.570 +	Note, access to this value is through #Base() and #OsAsid().
   1.571 +	*/
   1.572 +	TLinAddr		iLinAddrAndOsAsid;
   1.573 +
   1.574 +public:
   1.575 +	/**
   1.576 +	Second phase constructor.
   1.577 +
   1.578 +	The main function of this is to allocate a virtual address region for the mapping
   1.579 +	and to add it to an address space.
   1.580 +
   1.581 +	@param aAttributes		The attributes of the memory which this mapping is intended to map.
   1.582 +							This is only needed to setup #PdeType which is required for correct
   1.583 +							virtual address allocation so in practice the only relevant attribute
   1.584 +							is to set EMemoryAttributeUseECC if required, else use
   1.585 +							EMemoryAttributeStandard.
   1.586 +
   1.587 +	@param aFlags			A combination of the options from enum TMappingCreateFlags.
   1.588 +
   1.589 +	@param aOsAsid			The OS ASID of the address space the mapping is to be added to.
   1.590 +
   1.591 +	@param aAddr			The virtual address to use for the mapping, or zero if this is
   1.592 +							to be allocated by this function.
   1.593 +
   1.594 +	@param aSize			The maximum size of memory, in bytes, this mapping will be used to
   1.595 +							map. This determines the size of the virtual address region the
   1.596 +							mapping will use.
   1.597 +
   1.598 +	@param aColourOffset	The byte offset within a memory object's memory which this mapping
   1.599 +							is to start. This is used to adjust virtual memory allocation to
   1.600 +							meet page colouring restrictions. If this value is not known leave
   1.601 +							this argument unspecified; however, it must be specified if \a aAddr
   1.602 +							is specified.
   1.603 +
   1.604 +	@return KErrNone if successful, otherwise one of the system wide error codes.	
   1.605 +	*/
   1.606 +	TInt Construct(TMemoryAttributes aAttributes, TMappingCreateFlags aFlags, TInt aOsAsid, TLinAddr aAddr, TUint aSize, TLinAddr aColourOffset=~(TLinAddr)0);
   1.607 +
   1.608 +	/**
   1.609 +	Add this mapping to a memory object so that it maps a specified region of its memory.
   1.610 +
   1.611 +	Most of the action of this method is performed by #Attach.
   1.612 +
   1.613 +	@param aMemory		The memory object.
   1.614 +	@param aIndex		The page index of the first page of memory to be mapped by the mapping.
   1.615 +	@param aCount		The number of pages of memory to be mapped by the mapping.
   1.616 +	@param aPermissions	The memory access permissions to apply to the mapping.
   1.617 +
   1.618 +	@return KErrNone if successful, otherwise one of the system wide error codes.
   1.619 +	*/
   1.620 +	TInt Map(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TMappingPermissions aPermissions);
   1.621 +
   1.622 +	/**
   1.623 +	Remove this mapping from the memory object it was previously added to by #Map.
   1.624 +
   1.625 +	Most of the action of this method is performed by #Detach.
   1.626 +	*/
   1.627 +	void Unmap();
   1.628 +
   1.629 +	/**
   1.630 +	Return the OS ASID for the address space that this mapping is currently mapping memory in.
   1.631 +	*/
   1.632 +	FORCE_INLINE TInt OsAsid()
   1.633 +		{
   1.634 +		__NK_ASSERT_DEBUG(iLinAddrAndOsAsid); // check mapping has been added to an address space
   1.635 +		return iLinAddrAndOsAsid&KPageMask;
   1.636 +		}
   1.637 +
   1.638 +	/**
   1.639 +	Return starting virtual address that this mapping is currently mapping memory at.
   1.640 +	The size of the region mapped is #iSizeInPages.
   1.641 +	*/
   1.642 +	FORCE_INLINE TLinAddr Base()
   1.643 +		{
   1.644 +		__NK_ASSERT_DEBUG(iLinAddrAndOsAsid); // check mapping has been added to an address space
   1.645 +		return iLinAddrAndOsAsid&~KPageMask;
   1.646 +		}
   1.647 +
   1.648 +	/**
   1.649 +	Return #Base()|#OsAsid()
   1.650 +	*/
   1.651 +	FORCE_INLINE TLinAddr LinAddrAndOsAsid()
   1.652 +		{
   1.653 +		__NK_ASSERT_DEBUG(iLinAddrAndOsAsid); // check mapping has been added to an address space
   1.654 +		return iLinAddrAndOsAsid;
   1.655 +		}
   1.656 +
   1.657 +	FORCE_INLINE TBool IsUserMapping()
   1.658 +		{
   1.659 +		// Note: must be usable before the mapping has been added to an address space
   1.660 +		return (PteType() & (EPteTypeUserAccess|EPteTypeGlobal)) == EPteTypeUserAccess;
   1.661 +		}
   1.662 +
   1.663 +	/**
   1.664 +	Return #iBlankPde.
   1.665 +	*/
   1.666 +	FORCE_INLINE TPde BlankPde()
   1.667 +		{
   1.668 +		return iBlankPde;
   1.669 +		}
   1.670 +
   1.671 +	/**
   1.672 +	Emit BTrace traces identifying this mappings virtual address usage.
   1.673 +	*/
   1.674 +	void BTraceCreate();
   1.675 +
   1.676 +	/**
   1.677 +	In debug builds, dump information about this mapping to the kernel trace port.
   1.678 +	*/
   1.679 +	virtual void Dump();
   1.680 +
   1.681 +	/**
   1.682 +	Function to return a page table pointer for the specified linear address and
   1.683 +	index to this mapping.
   1.684 +
   1.685 +	This is called by #Epoc::MovePhysicalPage when moving page table or page table info pages.
   1.686 +	
   1.687 +	@param aLinAddr		The linear address to find the page table entry for.
   1.688 +	@param aMemoryIndex	The memory object index of the page to find the page 
   1.689 +						table entry for.
   1.690 +	
   1.691 +	@return A pointer to the page table entry, if the page table entry couldn't 
   1.692 +			be found this will be NULL
   1.693 +	*/
   1.694 +	virtual TPte* FindPageTable(TLinAddr aLinAddr, TUint aMemoryIndex)=0;
   1.695 +
   1.696 +protected:
   1.697 +	/**
   1.698 +	@param aType Initial value for #Flags.
   1.699 +	*/
   1.700 +	DMemoryMapping(TUint aType);
   1.701 +
   1.702 +	/**
   1.703 +	This destructor removes the mapping from any address space it was added to and
   1.704 +	frees any virtual addresses allocated to it.
   1.705 +	*/
   1.706 +	~DMemoryMapping();
   1.707 +
   1.708 +	/**
   1.709 +	Free any resources owned by this mapping, i.e. allow Construct() to be used
   1.710 +	on this mapping at a new address etc.
   1.711 +	*/
   1.712 +	void Destruct();
   1.713 +
   1.714 +	/**
   1.715 +	Allocatate virtual addresses for this mapping to use.
   1.716 +	This is called from #Construct and the arguments to this function are the same.
   1.717 +
   1.718 +	On success, iAllocatedLinAddrAndOsAsid and iAllocatedSize will be initialised.
   1.719 +	*/
   1.720 +	virtual TInt AllocateVirtualMemory(TMappingCreateFlags aFlags, TInt aOsAsid, TLinAddr aAddr, TUint aSize, TLinAddr aColourOffset);
   1.721 +
   1.722 +	/**
   1.723 +	Free the virtual addresses allocated to this mapping with AllocateVirtualMemory.
   1.724 +	*/
   1.725 +	virtual void FreeVirtualMemory();
   1.726 +	};
   1.727 +
   1.728 +
   1.729 +
   1.730 +/**
   1.731 +A memory mapping to map a 'chunk' aligned region of a DCoarseMemory object into
   1.732 +an address space. A 'chunk' is the size of memory mapped by a whole MMU page table
   1.733 +and is #KChunkSize bytes.
   1.734 +
   1.735 +These mappings make use of page tables owned by a DCoarseMemory and when
   1.736 +they are attached to a memory object they are linked into
   1.737 +DCoarseMemory::DPageTables::iMappings not DCoarseMemory::iMappings.
   1.738 +*/
   1.739 +class DCoarseMapping : public DMemoryMapping
   1.740 +	{
   1.741 +public:
   1.742 +	DCoarseMapping();
   1.743 +	~DCoarseMapping();
   1.744 +
   1.745 +protected:
   1.746 +	DCoarseMapping(TUint aFlags);
   1.747 +	
   1.748 +protected:
   1.749 +	// from DMemoryMappingBase...
   1.750 +	virtual TInt MapPages(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Not implemented. Faults in debug builds.
   1.751 +	virtual void UnmapPages(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Not implemented. Faults in debug builds.
   1.752 +	virtual void RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB); ///< Not implemented. Faults in debug builds.
   1.753 +	virtual void RestrictPagesNA(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Not implemented. Faults in debug builds.
   1.754 +	virtual TInt PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount);
   1.755 +	virtual TBool MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex);
   1.756 +	virtual TInt DoMap();
   1.757 +	virtual void DoUnmap();
   1.758 +	
   1.759 +	// from DMemoryMapping...
   1.760 +	virtual TPte* FindPageTable(TLinAddr aLinAddr, TUint aMemoryIndex);
   1.761 +	};
   1.762 +
   1.763 +
   1.764 +
   1.765 +/**
   1.766 +A memory mapping to map a page aligned region of a memory object into
   1.767 +an address space. The may be used with any memory object: DFineMemory or DCoarseMemory.
   1.768 +*/
   1.769 +class DFineMapping : public DMemoryMapping
   1.770 +	{
   1.771 +public:
   1.772 +	DFineMapping();
   1.773 +	~DFineMapping();
   1.774 +
   1.775 +private:
   1.776 +	// from DMemoryMappingBase...
   1.777 +	virtual TInt MapPages(RPageArray::TIter aPages, TUint aMapInstanceCount);
   1.778 +	virtual void UnmapPages(RPageArray::TIter aPages, TUint aMapInstanceCount);
   1.779 +	virtual void RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB);
   1.780 +	virtual void RestrictPagesNA(RPageArray::TIter aPages, TUint aMapInstanceCount);
   1.781 +	virtual TInt PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount);
   1.782 +	virtual TBool MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex);
   1.783 +	virtual TInt DoMap();
   1.784 +	virtual void DoUnmap();
   1.785 +
   1.786 +	// from DMemoryMapping...
   1.787 +
   1.788 +	/**
   1.789 +	Allocatate virtual addresses for this mapping to use.
   1.790 +
   1.791 +	In addition to performing the action of DMemoryMapping::AllocateVirtualMemory
   1.792 +	this will also allocate all permanent page tables for the mapping if it has attribute
   1.793 +	#EPermanentPageTables.
   1.794 +	*/
   1.795 +	virtual TInt AllocateVirtualMemory(TMappingCreateFlags aFlags, TInt aOsAsid, TLinAddr aAddr, TUint aSize, TLinAddr aColourOffset);
   1.796 +
   1.797 +	/**
   1.798 +	Free the virtual addresses and permanent page tables allocated to this mapping with
   1.799 +	AllocateVirtualMemory.
   1.800 +	*/
   1.801 +	virtual void FreeVirtualMemory();
   1.802 +
   1.803 +	virtual TPte* FindPageTable(TLinAddr aLinAddr, TUint aMemoryIndex);
   1.804 +
   1.805 +	// new...
   1.806 +
   1.807 +	/**
   1.808 +	Allocate all the page tables required for this mapping. This is called by
   1.809 +	AllocateVirtualMemory if the #EPermanentPageTables attribute is set.
   1.810 +
   1.811 +	Each page table for the virtual address region used by the mapping is
   1.812 +	allocated if not already present. The permanence count of any page table
   1.813 +	(SPageTableInfo::iPermanenceCount) is then incremented so that it is not
   1.814 +	freed even when it no longer maps any pages.
   1.815 +
   1.816 +	If successful, the #EPageTablesAllocated flag in #Flags will be set.
   1.817 +
   1.818 +	@return KErrNone if successful, otherwise one of the system wide error codes.
   1.819 +	*/
   1.820 +	TInt AllocatePermanentPageTables();
   1.821 +
   1.822 +	/**
   1.823 +	Free all permanent page tables allocated to this mapping.
   1.824 +
   1.825 +	This reverses the action of #AllocatePermanentPageTables by decrementing
   1.826 +	the permanence count for each page table and freeing it if is no longer in use.
   1.827 +	*/
   1.828 +	void FreePermanentPageTables();
   1.829 +
   1.830 +	/**
   1.831 +	Free a range of permanent page tables.
   1.832 +
   1.833 +	This is an implementation factor for FreePermanentPageTables and
   1.834 +	AllocatePermanentPageTables. It decrements the permanence count
   1.835 +	for each page table and frees it if is no longer in use
   1.836 +
   1.837 +	@param aFirstPde	The address of the page directory entry which refers to
   1.838 +						the first page table to be freed.
   1.839 +	@param aLastPde		The address of the page directory entry which refers to
   1.840 +						the last page table to be freed.
   1.841 +	*/
   1.842 +	void FreePermanentPageTables(TPde* aFirstPde, TPde* aLastPde);
   1.843 +
   1.844 +#ifdef _DEBUG
   1.845 +	/**
   1.846 +	Validate the contents of the page table are valid.
   1.847 +
   1.848 +	@param aPt	The page table to validate.
   1.849 +	*/
   1.850 +	void ValidatePageTable(TPte* aPt, TLinAddr aAddr);
   1.851 +#endif
   1.852 +
   1.853 +	/**
   1.854 +	Get the page table being used to map a specified virtual address if it exists.
   1.855 +
   1.856 +	@param aAddr	A virtual address in the region allocated to this mapping.
   1.857 +
   1.858 +	@return The virtual address of the page table mapping \a aAddr,
   1.859 +			or the null pointer if one wasn't found.
   1.860 +	*/
   1.861 +	TPte* GetPageTable(TLinAddr aAddr);
   1.862 +
   1.863 +	/**
   1.864 +	Get the page table being used to map a specified virtual address; allocating
   1.865 +	a new one if it didn't previously exist.
   1.866 +
   1.867 +	@param aAddr	A virtual address in the region allocated to this mapping.
   1.868 +
   1.869 +	@return The virtual address of the page table mapping \a aAddr,
   1.870 +			or the null pointer if one wasn't found and couldn't be allocated.
   1.871 +	*/
   1.872 +	TPte* GetOrAllocatePageTable(TLinAddr aAddr);
   1.873 +
   1.874 +	/**
   1.875 +	Get and pin the page table being used to map a specified virtual address;
   1.876 +	allocating a new one if it didn't previously exist.
   1.877 +
   1.878 +	@param aAddr	A virtual address in the region allocated to this mapping.
   1.879 +	@param aPinArgs	The resources required to pin the page table.
   1.880 +					On success, the page table will have been appended to
   1.881 +					\a aPinArgs.iPinnedPageTables.
   1.882 +
   1.883 +	@return The virtual address of the page table mapping \a aAddr,
   1.884 +			or the null pointer if one wasn't found and couldn't be allocated.
   1.885 +	*/
   1.886 +	TPte* GetOrAllocatePageTable(TLinAddr aAddr, TPinArgs& aPinArgs);
   1.887 +
   1.888 +	/**
   1.889 +	Allocate a single page table.
   1.890 +
   1.891 +	@param aAddr		The virtual address the page table will be used to map.
   1.892 +	@param aPdeAddress	Address of the page directory entry which is to map
   1.893 +						the newly allocated page table.
   1.894 +	@param aPermanent	True, if the page table's permanence count is to be incremented.
   1.895 +
   1.896 +	@return The virtual address of the page table if it was successfully allocated,
   1.897 +			otherwise the null pointer.
   1.898 +	*/
   1.899 +	TPte* AllocatePageTable(TLinAddr aAddr, TPde* aPdeAddress, TBool aPermanent=false);
   1.900 +
   1.901 +	/**
   1.902 +	Free a single page table if it is unused.
   1.903 +
   1.904 +	@param aPdeAddress	Address of the page directory entry (PDE) which maps the page table.
   1.905 +						If the page table is freed, this PDE will be set to an 'unallocated' value.
   1.906 +	*/
   1.907 +	void FreePageTable(TPde* aPdeAddress);
   1.908 +	};
   1.909 +
   1.910 +
   1.911 +/**
   1.912 +A mapping which maps any memory into the kernel address space and provides access to 
   1.913 +the physical address used by a memory object.
   1.914 +
   1.915 +These mappings are always of the 'pinned' type to prevent the obtained physical addresses
   1.916 +from becoming invalid.
   1.917 +*/
   1.918 +class DKernelPinMapping : public DFineMapping
   1.919 +	{
   1.920 +public:
   1.921 +	DKernelPinMapping();
   1.922 +	TInt Construct(TUint aReserveSize);
   1.923 +	TInt MapAndPin(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TMappingPermissions aPermissions);
   1.924 +	void UnmapAndUnpin();
   1.925 +
   1.926 +public:
   1.927 +	TInt iReservePages;		///< The number of pages this mapping is able to map with its reserved resources(page tables etc).
   1.928 +	};
   1.929 +
   1.930 +
   1.931 +/**
   1.932 +A mapping which provides access to the physical address used by a memory object
   1.933 +without mapping these at any virtual address accessible to software.
   1.934 +
   1.935 +These mappings are always of the 'pinned' type to prevent the obtained physical addresses
   1.936 +from becoming invalid.
   1.937 +*/
   1.938 +class DPhysicalPinMapping : public DMemoryMappingBase
   1.939 +	{
   1.940 +public:
   1.941 +	DPhysicalPinMapping();
   1.942 +
   1.943 +	/**
   1.944 +	Attach this mapping to a memory object so that it pins a specified region of its memory.
   1.945 +
   1.946 +	Most of the action of this method is performed by #Attach.
   1.947 +
   1.948 +	@param aMemory		The memory object.
   1.949 +	@param aIndex		The page index of the first page of memory to be pinned by the mapping.
   1.950 +	@param aCount		The number of pages of memory to be pinned by the mapping.
   1.951 +	@param aPermissions	The memory access permissions appropriate to the intended use
   1.952 +						of the physical addresses. E.g. if the memory contents will be
   1.953 +						changes, use EReadWrite. These permissions are used for error
   1.954 +						checking, e.g. detecting attempted writes to read-only memory.
   1.955 +						They are also used for optimising access to demand paged memory;
   1.956 +						which is more efficient if only read-only access is required.
   1.957 +
   1.958 +	@return KErrNone if successful,
   1.959 +			KErrNotFound if any part of the memory to be pinned was not present,
   1.960 +			KErrNoMemory if there was insufficient memory,
   1.961 +			otherwise one of the system wide error codes.
   1.962 +	*/
   1.963 +	TInt Pin(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TMappingPermissions aPermissions);
   1.964 +
   1.965 +	/**
   1.966 +	Remove this mapping from the memory object it was previously added to by #Pin.
   1.967 +
   1.968 +	Most of the action of this method is performed by #Detach.
   1.969 +	*/
   1.970 +	virtual void Unpin();
   1.971 +
   1.972 +private:
   1.973 +	// from DMemoryMappingBase...
   1.974 +	virtual TInt MapPages(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Not implemented. Faults in debug builds.
   1.975 +	virtual void UnmapPages(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Does nothing
   1.976 +	virtual void RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB); ///< Not implemented. Faults in debug builds.
   1.977 +	virtual void RestrictPagesNA(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Does nothing
   1.978 +	virtual TInt PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount); ///< Does nothing
   1.979 +	virtual TBool MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex);///< Not implemented. Faults in debug builds.
   1.980 +	virtual TInt DoMap(); ///< Does nothing
   1.981 +	virtual void DoUnmap(); ///< Does nothing
   1.982 +	}; 
   1.983 +
   1.984 +
   1.985 +
   1.986 +/**
   1.987 +A mapping which pins memory in order to prevent demand paging related
   1.988 +page faults from occurring.
   1.989 +*/
   1.990 +class DVirtualPinMapping : public DPhysicalPinMapping
   1.991 +	{
   1.992 +public:
   1.993 +	DVirtualPinMapping();
   1.994 +	~DVirtualPinMapping();
   1.995 +
   1.996 +	/**
   1.997 +	Create a new DVirtualPinMapping object suitable for pinning a specified number of pages.
   1.998 +
   1.999 +	If no maximum is specified (\a aMaxCount==0) then this object may be used to pin
  1.1000 +	any number of pages, however this will require dynamic allocation of storage for
  1.1001 +	page table references.
  1.1002 +
  1.1003 +	@param aMaxCount The maximum number of pages which can be pinned, or zero for no maximum.
  1.1004 +
  1.1005 +	@return The newly created DVirtualPinMapping or the null pointer if there was
  1.1006 +			insufficient memory.
  1.1007 +	*/
  1.1008 +	static DVirtualPinMapping* New(TUint aMaxCount);
  1.1009 +
  1.1010 +	/**
  1.1011 +	Attach this mapping to a memory object so that it pins a specified region of its memory.
  1.1012 +
  1.1013 +	Additionally, pin the page tables in a specified mapping (\a aMapping) which
  1.1014 +	are being used to map these pages.
  1.1015 +
  1.1016 +	The result of this function is that access to the pinned memory through the virtual
  1.1017 +	addresses used by \a aMapping will not generate any demand paging related page faults.
  1.1018 +
  1.1019 +	@param aMemory		The memory object.
  1.1020 +	@param aIndex		The page index of the first page of memory to be pinned by the mapping.
  1.1021 +	@param aCount		The number of pages of memory to be pinned by the mapping.
  1.1022 +	@param aPermissions	The memory access permissions appropriate to the intended use
  1.1023 +						of the physical addresses. E.g. if the memory contents will be
  1.1024 +						changes, use EReadWrite. These permissions are used for error
  1.1025 +						checking, e.g. detecting attempted writes to read-only memory.
  1.1026 +						They are also used for optimising access to demand paged memory;
  1.1027 +						which is more efficient if only read-only access is required.
  1.1028 +	@param aMapping		The mapping whose page tables are to be pinned. This must be
  1.1029 +						currently mapping the specified region of memory pages.
  1.1030 +	@param aMapInstanceCount	The instance count of the mapping who's page tables are to be pinned.
  1.1031 +
  1.1032 +	@return KErrNone if successful,
  1.1033 +			KErrNotFound if any part of the memory to be pinned was not present,
  1.1034 +			KErrNoMemory if there was insufficient memory,
  1.1035 +			otherwise one of the system wide error codes.
  1.1036 +	*/
  1.1037 +	TInt Pin(	DMemoryObject* aMemory, TUint aIndex, TUint aCount, TMappingPermissions aPermissions, 
  1.1038 +				DMemoryMappingBase* aMapping, TUint aMapInstanceCount);
  1.1039 +
  1.1040 +	/**
  1.1041 +	Remove this mapping from the memory object it was previously added to by #Pin.
  1.1042 +	This will unpin any memory pages and pages tables that were pinned.
  1.1043 +	*/
  1.1044 +	void Unpin();
  1.1045 +
  1.1046 +	/**
  1.1047 +	Return the maximum number of page tables which could be required to map
  1.1048 +	\a aPageCount pages. This is used by various resource reserving calculations.
  1.1049 +	*/
  1.1050 +	static TUint MaxPageTables(TUint aPageCount);
  1.1051 +
  1.1052 +	/**
  1.1053 +	In debug builds, dump information about this mapping to the kernel trace port.
  1.1054 +	*/
  1.1055 +	virtual void Dump();
  1.1056 +
  1.1057 +private:
  1.1058 +	// from DMemoryMappingBase...
  1.1059 +	virtual void RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB); ///< Does nothing.
  1.1060 +	virtual TInt PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount);
  1.1061 +	virtual TBool MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex);///< Not implemented. Faults in debug builds.
  1.1062 +	virtual TInt DoPin(TPinArgs& aPinArgs);
  1.1063 +	virtual void DoUnpin(TPinArgs& aPinArgs);
  1.1064 +
  1.1065 +private:
  1.1066 +	/**
  1.1067 +	Allocate memory to store pointers to all the page table which map
  1.1068 +	\a aCount pages of memory. The pointer to the allocated memory
  1.1069 +	is stored at iAllocatedPinnedPageTables.
  1.1070 +
  1.1071 +	If iSmallPinnedPageTablesArray is large enough, this function doesn't
  1.1072 +	allocate any memory.
  1.1073 +
  1.1074 +	@return KErrNone if successful, otherwise KErrNoMemory.
  1.1075 +	*/
  1.1076 +	TInt AllocPageTableArray(TUint aCount);
  1.1077 +
  1.1078 +	/**
  1.1079 +	Delete iAllocatedPinnedPageTables.
  1.1080 +	*/
  1.1081 +	void FreePageTableArray();
  1.1082 +
  1.1083 +	/**
  1.1084 +	Return the address of the array storing pinned page tables.
  1.1085 +	This is either iSmallPinnedPageTablesArray or iAllocatedPinnedPageTables.
  1.1086 +	*/
  1.1087 +	TPte** PageTableArray();
  1.1088 +
  1.1089 +	/**
  1.1090 +	Unpin all the page tables which have been pinned by this mapping.
  1.1091 +
  1.1092 +	@param aPinArgs	The resources used for pinning. The replacement pages allocated
  1.1093 +					to this will be increased for each page which was became completely
  1.1094 +					unpinned.
  1.1095 +	*/
  1.1096 +	void UnpinPageTables(TPinArgs& aPinArgs);
  1.1097 +private:
  1.1098 +	/**
  1.1099 +	Temporary store for the mapping passed to #Pin
  1.1100 +	*/
  1.1101 +	DMemoryMappingBase* iPinVirtualMapping;
  1.1102 +
  1.1103 +	/**
  1.1104 +	Temporary store for the mapping instance count passed to #Pin
  1.1105 +	*/
  1.1106 +	TUint iPinVirtualMapInstanceCount;
  1.1107 +
  1.1108 +	/**
  1.1109 +	The number of page tables which are currently being pinned by this mapping.
  1.1110 +	This is the number of valid entries stored at PageTableArray.
  1.1111 +	*/
  1.1112 +	TUint iNumPinnedPageTables;
  1.1113 +
  1.1114 +	/**
  1.1115 +	The maximum number of pages which can be pinned by this mapping.
  1.1116 +	If this is zero, there is no maximum.
  1.1117 +	*/
  1.1118 +	TUint iMaxCount;
  1.1119 +
  1.1120 +	/**
  1.1121 +	The memory allocated by this object for storing pointer to the page tables
  1.1122 +	it has pinned.
  1.1123 +	*/
  1.1124 +	TPte** iAllocatedPinnedPageTables;
  1.1125 +
  1.1126 +	enum
  1.1127 +		{
  1.1128 +		KSmallPinnedPageTableCount = 2 ///< Number of entries in iSmallPinnedPageTablesArray
  1.1129 +		};
  1.1130 +
  1.1131 +	/**
  1.1132 +	A small array to use for storing pinned page tables.
  1.1133 +	This is an optimisation used for the typical case of pinning a small number of pages
  1.1134 +	to avoid dynamic allocation of memory.
  1.1135 +	*/
  1.1136 +	TPte* iSmallPinnedPageTablesArray[KSmallPinnedPageTableCount];
  1.1137 +	}; 
  1.1138 +
  1.1139 +#endif