os/kernelhwsrv/kernel/eka/memmodel/epoc/flexible/mmu/mmapping.h
author sl
Tue, 10 Jun 2014 14:32:02 +0200
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
     1 // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
     2 // All rights reserved.
     3 // This component and the accompanying materials are made available
     4 // under the terms of the License "Eclipse Public License v1.0"
     5 // which accompanies this distribution, and is available
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
     7 //
     8 // Initial Contributors:
     9 // Nokia Corporation - initial contribution.
    10 //
    11 // Contributors:
    12 //
    13 // Description:
    14 //
    15 
    16 /**
    17  @file
    18  @internalComponent
    19 */
    20 
    21 #ifndef MMAPPING_H
    22 #define MMAPPING_H
    23 
    24 #include "mrefcntobj.h"
    25 #include "mmappinglist.h"
    26 #include "mpagearray.h"
    27 
    28 
    29 
    30 /**
    31 Base class for memory mappings.
    32 
    33 This provides the methods for linking a mapping to a memory object
    34 as well as the interface for updating the MMU page tables associated
    35 with a mapping when the memory state changes.
    36 */
    37 class DMemoryMappingBase : public DReferenceCountedObject
    38 	{
    39 private:
    40 	/**
    41 	Memory object to which this mapping is currently attached.
    42 	Updates to the are protected by the MmuLock.
    43 	*/
    44 	DMemoryObject*	 iMemory;
    45 
    46 public:
    47 	/**
    48 	Link used to maintain list of mappings attached to a memory object.
    49 	*/
    50 	TMappingListLink iLink;
    51 
    52 	/**
    53 	Offset, in page units, within the memory object's memory for start of this mapping.
    54 	*/
    55 	TUint			 iStartIndex;
    56 
    57 	/**
    58 	Size of this mapping, in page units.
    59 	*/
    60 	TUint			 iSizeInPages;
    61 
    62 private:
    63 	/**
    64 	Instance count which is incremented every time a mapping is attached to a memory object.
    65 	When code is manipulating mappings, the instance count is used to detect that a
    66 	mapping has been reused and that the operation it is performing is no long needed.
    67 	*/
    68 	TUint			 iMapInstanceCount;
    69 
    70 public:
    71 
    72 	/**
    73 	Bit flags stored in #Flags giving various state and attributes of the mapping.
    74 	*/
    75 	enum TFlags
    76 		{
    77 		/**
    78 		Flag set during object construction to indicate that this mapping is of
    79 		class #DCoarseMapping.
    80 		*/
    81 		ECoarseMapping			= 1<<0,
    82 
    83 		/**
    84 		Flag set during object construction to indicate that this mapping will pin
    85 		any memory pages it maps. This may not be used with coarse memory mappings.
    86 		*/
    87 		EPinned					= 1<<1,
    88 
    89 		/**
    90 		Pages have already been reserved for pinning, so when this mapping is attached
    91 		to a memory object no additional pages need to be reserved. Pre-reserving pages
    92 		is used to prevent the possibility of failing to pin due to an out of memory
    93 		condition. It is essential that the users of these mappings ensure that there
    94 		are enough reserved pages in the paging pool to meet the maximum mapping size
    95 		used.
    96 		*/
    97 		EPinningPagesReserved	= 1<<2,
    98 
    99 		/**
   100 		Pages have been successfully pinned by this mapping. This is set after demand
   101 		paged memory has been succeeded pinned and is used to indicate that the pages
   102 		need unpinning again when the mapping is later unmapped.
   103 		*/
   104 		EPagesPinned			= 1<<3,
   105 
   106 		/**
   107 		Flag set during object construction to indicate that MMU page tables are to
   108 		be permanently allocated for use by this mapping. Normally, page tables are
   109 		allocated as needed to map memory which can result in out-of-memory errors
   110 		when mapping memory pages.
   111 		*/
   112 		EPermanentPageTables	= 1<<4,
   113 
   114 		/**
   115 		Permanent page tables have been successfully been allocated for this mapping.
   116 		This flag is used to track allocation so they can be released when the mapping
   117 		is destroyed.
   118 		*/
   119 		EPageTablesAllocated	= 1<<5,
   120 
   121 		/**
   122 		For pinned mappings (EPinned) this flag is set whenever the mapping prevents
   123 		any pages of memory from being fully decommitted from a memory object. When a
   124 		mapping is finally unmapped from the memory object this flag is checked, and,
   125 		if set, further cleanup of the decommitted pages triggered.
   126 		*/
   127 		EPageUnmapVetoed		= 1<<6,
   128 
   129 		/**
   130 		Mapping is being, or has been, detached from a memory object.
   131 		When set, operations on the mapping should act as though the mapping is no
   132 		longer attached to a memory object. Specifically, no further pages of memory
   133 		should be mapped into this mapping.
   134 
   135 		This flag is only set when the MmuLock is held.
   136 		*/
   137 		EDetaching				= 1<<7,
   138 
   139 		/**
   140 		This mapping is a physical pinning mapping.  The pages it pins
   141 		cannot be paged out or moved.
   142 
   143 		This flag is set when DPhysicalPinMapping objects are created.
   144 		*/
   145 		EPhysicalPinningMapping = 1<<8,
   146 
   147 		/**
   148 		Flag set during object construction to indicate that this mapping is of
   149 		class #DLargeMapping.
   150 
   151 		Note that #DLargeMapping is derived from #DCoarseMapping, therefore presence of this flag
   152 		implies presence of #ECoarseMapping as well.
   153 		*/
   154 		ELargeMapping			= 1<<9,
   155 		};
   156 
   157 	/**
   158 	Bitmask of values from enum #TPteType which will be used to calculate
   159 	the correct attributes for any page table entries this mapping uses.
   160 	*/
   161 	FORCE_INLINE TUint8& PteType()
   162 		{ return iLink.iSpare1; }
   163 
   164 	/**
   165 	Bitmask of values from enum #TFlags.
   166 	The flags 16 bits and are stored in iLink.iSpare2 and iLink.iSpare3.
   167 	*/
   168 	FORCE_INLINE TUint16& Flags()
   169 		{ return (TUint16&)iLink.iSpare2; }
   170 
   171 public:
   172 	/**
   173 	Return the memory object to which this mapping is currently attached.
   174 
   175 	@pre MmuLock is held. (If aNoCheck==false)
   176 	*/
   177 	FORCE_INLINE DMemoryObject* Memory(TBool aNoCheck=false)
   178 		{
   179 		if(!aNoCheck)
   180 			__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   181 		return iMemory;
   182 		}
   183 
   184 	/**
   185 	Return true if the mapping is currently attached to a memory object.
   186 	*/
   187 	FORCE_INLINE TBool IsAttached()
   188 		{ return iLink.IsLinked(); }
   189 
   190 	/**
   191 	Return true if the mapping is being, or has been, detached from a memory object.
   192 	The mapping may or may not still be attached to a memory object, i.e. #IsAttached
   193 	is indeterminate.
   194 	*/
   195 	FORCE_INLINE TBool BeingDetached()
   196 		{ return Flags()&EDetaching; }
   197 
   198 	/**
   199 	Return the mapping instance count.
   200 	@see #iMapInstanceCount.
   201 	*/
   202 	FORCE_INLINE TUint MapInstanceCount()
   203 		{ return iMapInstanceCount; }
   204 
   205 	/**
   206 	Return true if this mapping provides read only access to memory.
   207 	*/
   208 	FORCE_INLINE TBool IsReadOnly()
   209 		{ return !(PteType()&EPteTypeWritable); }
   210 
   211 #ifdef MMU_SUPPORTS_EXECUTE_NEVER
   212 	/**
   213 	Return true if this mapping provides access to memory which allows
   214 	code to be executed from it.
   215 	*/
   216 	FORCE_INLINE TBool IsExecutable()
   217 		{ return (PteType()&EPteTypeExecutable); }
   218 #endif
   219 
   220 	/**
   221 	Return true if this is a coarse mapping, in other words it is an instance of #DCoarseMapping or
   222 	#DLargeMapping.
   223 	*/
   224 	FORCE_INLINE TBool IsCoarse()
   225 		{ return Flags()&ECoarseMapping; }
   226 
   227 	/**
   228 	Return true if this mapping is a large mapping, in other words an instance of #DLargeMapping.
   229 
   230 	Note that all large mappings are also coarse mappings.
   231 	*/
   232 	FORCE_INLINE TBool IsLarge()
   233 		{ return Flags()&ELargeMapping; }
   234 
   235 	/**
   236 	Return true if this mapping pins the memory it maps.
   237 	*/
   238 	FORCE_INLINE TBool IsPinned()
   239 		{ return Flags()&EPinned; }
   240 		
   241 	/**
   242 	Return true if this mapping physically pins the memory it maps.
   243 	*/
   244 	FORCE_INLINE TBool IsPhysicalPinning()
   245 		{ return Flags()&EPhysicalPinningMapping; }
   246 
   247 	/**
   248 	Return the access permissions which this mapping uses to maps memory.
   249 	*/
   250 	FORCE_INLINE TMappingPermissions Permissions()
   251 		{ return Mmu::PermissionsFromPteType(PteType()); }
   252 
   253 	/**
   254 	Link this mapping to a memory object.
   255 
   256 	This is called by the memory object during processing of #Attach.
   257 
   258 	@param aMemory		The memory object the mapping is being attached to.
   259 	@param aMappingList	The list to add this mapping to.
   260 
   261 	@pre MmuLock is held.
   262 	@pre Mapping list lock is held.
   263 	*/
   264 	void LinkToMemory(DMemoryObject* aMemory, TMappingList& aMappingList);
   265 
   266 	/**
   267 	Unlink this mapping from the memory object it was previously linked to with
   268 	#LinkToMemory.
   269 
   270 	This is called by the memory object during processing of #Detach.
   271 
   272 	@param aMappingList	The list that the mapping appears on.
   273 	*/
   274 	void UnlinkFromMemory(TMappingList& aMappingList);
   275 
   276 	/**
   277 	Get the physical address(es) for a region of pages in this mapping.
   278 
   279 	@param aIndex			Page index, within the mapping, for start of the region.
   280 	@param aCount			Number of pages in the region.
   281 	@param aPhysicalAddress	On success, this value is set to one of two values.
   282 							If the specified region is physically contiguous,
   283 							the value is the physical address of the first page
   284 							in the region. If the region is discontiguous, the
   285 							value is set to KPhysAddrInvalid.
   286 	@param aPhysicalPageList If not zero, this points to an array of TPhysAddr
   287 							objects. On success, this array will be filled
   288 							with the addresses of the physical pages which
   289 							contain the specified region. If aPageList is
   290 							zero, then the function will fail with
   291 							KErrNotFound if the specified region is not
   292 							physically contiguous.
   293 
   294 	@return 0 if successful and the whole region is physically contiguous.
   295 			1 if successful but the region isn't physically contiguous.
   296 			KErrNotFound, if any page in the region is not present,
   297 			otherwise one of the system wide error codes.
   298 
   299 	@pre This mapping must have been attached to a memory object with #Pin.
   300 	*/
   301 	TInt PhysAddr(TUint aIndex, TUint aCount, TPhysAddr& aPhysicalAddress, TPhysAddr* aPhysicalPageList);
   302 
   303 protected:
   304 	/**
   305 	@param aType Initial value for #Flags.
   306 	*/
   307 	DMemoryMappingBase(TUint aType);
   308 
   309 	/**
   310 	Attach this mapping to a memory object so that it maps a specified region of its memory.
   311 
   312 	@param aMemory	The memory object.
   313 	@param aIndex	The page index of the first page of memory to be mapped by the mapping.
   314 	@param aCount	The number of pages of memory to be mapped by the mapping.
   315 
   316 	@return KErrNone if successful, otherwise one of the system wide error codes.
   317 	*/
   318 	TInt Attach(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
   319 
   320 	/**
   321 	Remove this mapping from the memory object it was previously attached to by #Attach.
   322 	*/
   323 	void Detach();
   324 
   325 public:
   326 	/**
   327 	Update the page table entries corresponding to this mapping to add entries for
   328 	a specified set of memory pages.
   329 
   330 	This method is called by DMemoryObject::MapPages to update each mapping attached
   331 	to a memory object whenever new pages of memory are added. However, it won't be
   332 	called for any mapping with the #EPinned attribute as such mappings are unchanging.
   333 
   334 	@param aPages				An RPageArray::TIter which refers to a range of pages
   335 								in a memory object. This has been clipped to fit within
   336 								the range of pages mapped by this mapping.
   337 								Only array entries which have state RPageArray::ECommitted
   338 								should be mapped into the mapping's page tables.
   339 
   340 	@param aMapInstanceCount	The instance of this mapping which is to be updated.
   341 								Whenever this no longer matches the current #MapInstanceCount
   342 								the function must not update any more of the mapping's
   343 								page table entries, (but must still return KErrNone).
   344 
   345 	@return KErrNone if successful, otherwise one of the system wide error codes.	
   346 	*/
   347 	virtual TInt MapPages(RPageArray::TIter aPages, TUint aMapInstanceCount) =0;
   348 
   349 	/**
   350 	Update the page table entries corresponding to this mapping to remove entries for
   351 	a specified set of memory pages.
   352 
   353 	This method is called by DMemoryObject::UnmapPages to update each mapping attached
   354 	to a memory object whenever pages of memory are removed.
   355 
   356 	@param aPages				An RPageArray::TIter which refers to a range of pages
   357 								in a memory object. This has been clipped to fit within
   358 								the range of pages mapped by this mapping.
   359 								Only array entries which return true for
   360 								RPageArray::TargetStateIsDecommitted should be unmapped
   361 								from the mapping's page tables.
   362 
   363 	@param aMapInstanceCount	The instance of this mapping which is to be updated.
   364 								Whenever this no longer matches the current #MapInstanceCount
   365 								the function must not update any more of the mapping's
   366 								page table entries.
   367 	*/
   368 	virtual void UnmapPages(RPageArray::TIter aPages, TUint aMapInstanceCount) =0;
   369 
   370 	/**
   371 	Update the page table entry corresponding to this mapping to update an entry for a specified
   372 	page that has just been moved or shadowed.
   373 
   374 	@param aPages				The page array entry of the page in a memory object. 
   375 								Only array entries which have a target state of 
   376 								RPageArray::ECommitted should be mapped into the 
   377 								mapping's page tables.
   378 
   379 	@param aIndex				The index of the page in the memory object.
   380 
   381 	@param aMapInstanceCount	The instance of this mapping which is to be updated.
   382 								Whenever this no longer matches the current #MapInstanceCount
   383 								the function must not update any more of the mapping's
   384 								page table entries, (but must still return KErrNone).
   385 
   386 	@param	aInvalidateTLB		Set to ETrue when the TLB entries associated with this page
   387 								should be invalidated.  This must be done when there is 
   388 								already a valid pte for this page, i.e. if the page is still 
   389 								mapped.
   390 	*/
   391 	virtual void RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB)=0;
   392 
   393 	/**
   394 	Update the page table entries corresponding to this mapping to apply access restrictions
   395 	to a specified set of memory pages.
   396 
   397 	This method is called by DMemoryObject::RestrictPages to update each mapping attached
   398 	to a memory object whenever pages of memory are restricted.
   399 
   400 	@param aPages				An RPageArray::TIter which refers to a range of pages
   401 								in a memory object. This has been clipped to fit within
   402 								the range of pages mapped by this mapping.
   403 								Only array entries which return true for
   404 								RPageArray::TargetStateIsDecommitted should be unmapped
   405 								from the mapping's page tables.
   406 
   407 	@param aMapInstanceCount	The instance of this mapping which is to be updated.
   408 								Whenever this no longer matches the current #MapInstanceCount
   409 								the function must not update any more of the mapping's
   410 								page table entries.
   411 	*/
   412 	virtual void RestrictPagesNA(RPageArray::TIter aPages, TUint aMapInstanceCount) =0;
   413 
   414 	/**
   415 	Update the page table entries corresponding to this mapping to add entries for
   416 	a specified set of demand paged memory pages following a 'page in' or memory
   417 	pinning operation.
   418 
   419 	@param aPages				An RPageArray::TIter which refers to a range of pages
   420 								in a memory object. This will be within the range of pages
   421 								mapped by this mapping.
   422 								Only array entries which have state RPageArray::ECommitted
   423 								should be mapped into the mapping's page tables.
   424 
   425 	@param aPinArgs				The resources required to pin any page tables the mapping uses.
   426 								Page table must be pinned if \a aPinArgs.iPinnedPageTables is
   427 								not the null pointer, in which case this the virtual address
   428 								of the pinned must be stored in the array this points to.
   429 								\a aPinArgs.iReadOnly is true if write access permissions
   430 								are not needed.
   431 
   432 	@return KErrNone if successful, otherwise one of the system wide error codes.	
   433 	*/
   434 	virtual TInt PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount) =0;
   435 
   436 
   437 	/**
   438 	Update the page table entry corresponding to this mapping to add an entry for
   439 	a specified page which is in the process of being moved.
   440 
   441 	@param aPageArrayPtr		The page array entry for the page to be mapped which must be
   442 								within this mapping range of pages.
   443 								Only array entries which have a target state of
   444 								RPageArray::ECommitted should be mapped into the mapping's 
   445 								page tables.
   446 
   447 	@param	aIndex				The index of the page.
   448 
   449 	@return ETrue if successful, EFalse otherwise.
   450 	*/
   451 	virtual TBool MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex)=0;
   452 
   453 
   454 	/**
   455 	In debug builds, dump information about this mapping to the kernel trace port.
   456 	*/
   457 	virtual void Dump();
   458 
   459 private:
   460 	/**
   461 	Update this mapping's MMU data structures to map all pages of memory
   462 	currently committed to the memory object (#iMemory) in the region covered
   463 	by this mapping.
   464 
   465 	This method is called by #Attach after the mapping has been linked
   466 	into the memory object.
   467 
   468 	@return KErrNone if successful, otherwise one of the system wide error codes.
   469 	*/
   470 	virtual TInt DoMap() =0;
   471 
   472 	/**
   473 	Update this mapping's MMU data structures to unmap all pages of memory.
   474 
   475 	This method is called by #Detach before the mapping has been unlinked
   476 	from the memory object but after the #EDetaching flag has been set.
   477 	*/
   478 	virtual void DoUnmap() =0;
   479 
   480 protected:
   481 	/**
   482 	For pinned mapping, this virtual method is called by #Attach in order to pin
   483 	pages of memory if required. This is called after the mapping has been linked
   484 	into the memory object but before #DoMap.
   485 
   486 	The default implementation of this method simply calls DMemoryManager::Pin.
   487 
   488 	@param aPinArgs	The resources to use for pinning. This has sufficient replacement
   489 					pages allocated to pin every page the mapping covers, and the
   490 					value of \a aPinArgs.iReadOnly has been set to correspond to the
   491 					mappings access permissions.
   492 
   493 	@return KErrNone if successful, otherwise one of the system wide error codes.
   494 	*/
   495 	virtual TInt DoPin(TPinArgs& aPinArgs);
   496 
   497 	/**
   498 	For pinned mapping, this virtual method is called by #Detach in order to unpin
   499 	pages of memory if required. This is called before the mapping has been unlinked
   500 	from the memory object but after #DoUnmap.
   501 
   502 	The default implementation of this method simply calls DMemoryManager::Unpin.
   503 
   504 	@param aPinArgs	The resources used for pinning. The replacement pages allocated
   505 					to this will be increased for each page which was became completely
   506 					unpinned.
   507 	*/
   508 	virtual void DoUnpin(TPinArgs& aPinArgs);
   509 	};
   510 
   511 
   512 
   513 /**
   514 Base class for memory mappings which map memory contents into a address space.
   515 
   516 This provides methods for allocating virtual memory and holds the attributes needed
   517 for MMU page table entries.
   518 */
   519 class DMemoryMapping : public DMemoryMappingBase
   520 	{
   521 protected:
   522 	/**
   523 	The page directory entry (PDE) value for use when mapping this mapping's page tables.
   524 	This value has the physical address component being zero, so a page table's physical
   525 	address can be simply ORed in.
   526 
   527 	This could potentially be removed (see DMemoryMapping::PdeType()).
   528 	*/
   529 	TPde			iBlankPde;
   530 
   531 	/**
   532 	The page table entry (PTE) value for use when mapping pages into this mapping.
   533 	This value has the physical address component being zero, so a page's physical
   534 	address can be simply ORed in.
   535 	*/
   536 	TPte			iBlankPte;
   537 
   538 	/**
   539 	Start of the virtual address region allocated for use by this mapping
   540 	ORed with the OS ASID of the address space this lies in.
   541 
   542 	Note, the address at which memory is mapped (#iLinAddrAndOsAsid) may be different
   543 	to this allocated address due to page colouring restrictions.
   544 
   545 	@see iAllocatedSize
   546 	*/
   547 	TLinAddr		iAllocatedLinAddrAndOsAsid;
   548 
   549 	/**
   550 	Size of virtual address region memory allocated for use by this mapping.
   551 
   552 	@see iAllocatedLinAddrAndOsAsid
   553 	*/
   554 	TUint			iAllocatedSize;
   555 
   556 private:
   557 	/**
   558 	Start of the virtual address region that this mapping is currently
   559 	mapping memory at, ORed with the OS ASID of the address space this lies in.
   560 
   561 	This value is set by #Map which is called from #Attach when the mapping
   562 	is attached to a memory object. The address used may be different to
   563 	#iAllocatedLinAddrAndOsAsid due to page colouring restrictions.
   564 
   565 	The size of the region mapped is #iSizeInPages.
   566 
   567 	Note, access to this value is through #Base() and #OsAsid().
   568 	*/
   569 	TLinAddr		iLinAddrAndOsAsid;
   570 
   571 public:
   572 	/**
   573 	Second phase constructor.
   574 
   575 	The main function of this is to allocate a virtual address region for the mapping
   576 	and to add it to an address space.
   577 
   578 	@param aAttributes		The attributes of the memory which this mapping is intended to map.
   579 							This is only needed to setup #PdeType which is required for correct
   580 							virtual address allocation so in practice the only relevant attribute
   581 							is to set EMemoryAttributeUseECC if required, else use
   582 							EMemoryAttributeStandard.
   583 
   584 	@param aFlags			A combination of the options from enum TMappingCreateFlags.
   585 
   586 	@param aOsAsid			The OS ASID of the address space the mapping is to be added to.
   587 
   588 	@param aAddr			The virtual address to use for the mapping, or zero if this is
   589 							to be allocated by this function.
   590 
   591 	@param aSize			The maximum size of memory, in bytes, this mapping will be used to
   592 							map. This determines the size of the virtual address region the
   593 							mapping will use.
   594 
   595 	@param aColourOffset	The byte offset within a memory object's memory which this mapping
   596 							is to start. This is used to adjust virtual memory allocation to
   597 							meet page colouring restrictions. If this value is not known leave
   598 							this argument unspecified; however, it must be specified if \a aAddr
   599 							is specified.
   600 
   601 	@return KErrNone if successful, otherwise one of the system wide error codes.	
   602 	*/
   603 	TInt Construct(TMemoryAttributes aAttributes, TMappingCreateFlags aFlags, TInt aOsAsid, TLinAddr aAddr, TUint aSize, TLinAddr aColourOffset=~(TLinAddr)0);
   604 
   605 	/**
   606 	Add this mapping to a memory object so that it maps a specified region of its memory.
   607 
   608 	Most of the action of this method is performed by #Attach.
   609 
   610 	@param aMemory		The memory object.
   611 	@param aIndex		The page index of the first page of memory to be mapped by the mapping.
   612 	@param aCount		The number of pages of memory to be mapped by the mapping.
   613 	@param aPermissions	The memory access permissions to apply to the mapping.
   614 
   615 	@return KErrNone if successful, otherwise one of the system wide error codes.
   616 	*/
   617 	TInt Map(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TMappingPermissions aPermissions);
   618 
   619 	/**
   620 	Remove this mapping from the memory object it was previously added to by #Map.
   621 
   622 	Most of the action of this method is performed by #Detach.
   623 	*/
   624 	void Unmap();
   625 
   626 	/**
   627 	Return the OS ASID for the address space that this mapping is currently mapping memory in.
   628 	*/
   629 	FORCE_INLINE TInt OsAsid()
   630 		{
   631 		__NK_ASSERT_DEBUG(iLinAddrAndOsAsid); // check mapping has been added to an address space
   632 		return iLinAddrAndOsAsid&KPageMask;
   633 		}
   634 
   635 	/**
   636 	Return starting virtual address that this mapping is currently mapping memory at.
   637 	The size of the region mapped is #iSizeInPages.
   638 	*/
   639 	FORCE_INLINE TLinAddr Base()
   640 		{
   641 		__NK_ASSERT_DEBUG(iLinAddrAndOsAsid); // check mapping has been added to an address space
   642 		return iLinAddrAndOsAsid&~KPageMask;
   643 		}
   644 
   645 	/**
   646 	Return #Base()|#OsAsid()
   647 	*/
   648 	FORCE_INLINE TLinAddr LinAddrAndOsAsid()
   649 		{
   650 		__NK_ASSERT_DEBUG(iLinAddrAndOsAsid); // check mapping has been added to an address space
   651 		return iLinAddrAndOsAsid;
   652 		}
   653 
   654 	FORCE_INLINE TBool IsUserMapping()
   655 		{
   656 		// Note: must be usable before the mapping has been added to an address space
   657 		return (PteType() & (EPteTypeUserAccess|EPteTypeGlobal)) == EPteTypeUserAccess;
   658 		}
   659 
   660 	/**
   661 	Return #iBlankPde.
   662 	*/
   663 	FORCE_INLINE TPde BlankPde()
   664 		{
   665 		return iBlankPde;
   666 		}
   667 
   668 	/**
   669 	Emit BTrace traces identifying this mappings virtual address usage.
   670 	*/
   671 	void BTraceCreate();
   672 
   673 	/**
   674 	In debug builds, dump information about this mapping to the kernel trace port.
   675 	*/
   676 	virtual void Dump();
   677 
   678 	/**
   679 	Function to return a page table pointer for the specified linear address and
   680 	index to this mapping.
   681 
   682 	This is called by #Epoc::MovePhysicalPage when moving page table or page table info pages.
   683 	
   684 	@param aLinAddr		The linear address to find the page table entry for.
   685 	@param aMemoryIndex	The memory object index of the page to find the page 
   686 						table entry for.
   687 	
   688 	@return A pointer to the page table entry, if the page table entry couldn't 
   689 			be found this will be NULL
   690 	*/
   691 	virtual TPte* FindPageTable(TLinAddr aLinAddr, TUint aMemoryIndex)=0;
   692 
   693 protected:
   694 	/**
   695 	@param aType Initial value for #Flags.
   696 	*/
   697 	DMemoryMapping(TUint aType);
   698 
   699 	/**
   700 	This destructor removes the mapping from any address space it was added to and
   701 	frees any virtual addresses allocated to it.
   702 	*/
   703 	~DMemoryMapping();
   704 
   705 	/**
   706 	Free any resources owned by this mapping, i.e. allow Construct() to be used
   707 	on this mapping at a new address etc.
   708 	*/
   709 	void Destruct();
   710 
   711 	/**
   712 	Allocatate virtual addresses for this mapping to use.
   713 	This is called from #Construct and the arguments to this function are the same.
   714 
   715 	On success, iAllocatedLinAddrAndOsAsid and iAllocatedSize will be initialised.
   716 	*/
   717 	virtual TInt AllocateVirtualMemory(TMappingCreateFlags aFlags, TInt aOsAsid, TLinAddr aAddr, TUint aSize, TLinAddr aColourOffset);
   718 
   719 	/**
   720 	Free the virtual addresses allocated to this mapping with AllocateVirtualMemory.
   721 	*/
   722 	virtual void FreeVirtualMemory();
   723 	};
   724 
   725 
   726 
   727 /**
   728 A memory mapping to map a 'chunk' aligned region of a DCoarseMemory object into
   729 an address space. A 'chunk' is the size of memory mapped by a whole MMU page table
   730 and is #KChunkSize bytes.
   731 
   732 These mappings make use of page tables owned by a DCoarseMemory and when
   733 they are attached to a memory object they are linked into
   734 DCoarseMemory::DPageTables::iMappings not DCoarseMemory::iMappings.
   735 */
   736 class DCoarseMapping : public DMemoryMapping
   737 	{
   738 public:
   739 	DCoarseMapping();
   740 	~DCoarseMapping();
   741 
   742 protected:
   743 	DCoarseMapping(TUint aFlags);
   744 	
   745 protected:
   746 	// from DMemoryMappingBase...
   747 	virtual TInt MapPages(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Not implemented. Faults in debug builds.
   748 	virtual void UnmapPages(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Not implemented. Faults in debug builds.
   749 	virtual void RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB); ///< Not implemented. Faults in debug builds.
   750 	virtual void RestrictPagesNA(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Not implemented. Faults in debug builds.
   751 	virtual TInt PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount);
   752 	virtual TBool MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex);
   753 	virtual TInt DoMap();
   754 	virtual void DoUnmap();
   755 	
   756 	// from DMemoryMapping...
   757 	virtual TPte* FindPageTable(TLinAddr aLinAddr, TUint aMemoryIndex);
   758 	};
   759 
   760 
   761 
   762 /**
   763 A memory mapping to map a page aligned region of a memory object into
   764 an address space. The may be used with any memory object: DFineMemory or DCoarseMemory.
   765 */
   766 class DFineMapping : public DMemoryMapping
   767 	{
   768 public:
   769 	DFineMapping();
   770 	~DFineMapping();
   771 
   772 private:
   773 	// from DMemoryMappingBase...
   774 	virtual TInt MapPages(RPageArray::TIter aPages, TUint aMapInstanceCount);
   775 	virtual void UnmapPages(RPageArray::TIter aPages, TUint aMapInstanceCount);
   776 	virtual void RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB);
   777 	virtual void RestrictPagesNA(RPageArray::TIter aPages, TUint aMapInstanceCount);
   778 	virtual TInt PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount);
   779 	virtual TBool MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex);
   780 	virtual TInt DoMap();
   781 	virtual void DoUnmap();
   782 
   783 	// from DMemoryMapping...
   784 
   785 	/**
   786 	Allocatate virtual addresses for this mapping to use.
   787 
   788 	In addition to performing the action of DMemoryMapping::AllocateVirtualMemory
   789 	this will also allocate all permanent page tables for the mapping if it has attribute
   790 	#EPermanentPageTables.
   791 	*/
   792 	virtual TInt AllocateVirtualMemory(TMappingCreateFlags aFlags, TInt aOsAsid, TLinAddr aAddr, TUint aSize, TLinAddr aColourOffset);
   793 
   794 	/**
   795 	Free the virtual addresses and permanent page tables allocated to this mapping with
   796 	AllocateVirtualMemory.
   797 	*/
   798 	virtual void FreeVirtualMemory();
   799 
   800 	virtual TPte* FindPageTable(TLinAddr aLinAddr, TUint aMemoryIndex);
   801 
   802 	// new...
   803 
   804 	/**
   805 	Allocate all the page tables required for this mapping. This is called by
   806 	AllocateVirtualMemory if the #EPermanentPageTables attribute is set.
   807 
   808 	Each page table for the virtual address region used by the mapping is
   809 	allocated if not already present. The permanence count of any page table
   810 	(SPageTableInfo::iPermanenceCount) is then incremented so that it is not
   811 	freed even when it no longer maps any pages.
   812 
   813 	If successful, the #EPageTablesAllocated flag in #Flags will be set.
   814 
   815 	@return KErrNone if successful, otherwise one of the system wide error codes.
   816 	*/
   817 	TInt AllocatePermanentPageTables();
   818 
   819 	/**
   820 	Free all permanent page tables allocated to this mapping.
   821 
   822 	This reverses the action of #AllocatePermanentPageTables by decrementing
   823 	the permanence count for each page table and freeing it if is no longer in use.
   824 	*/
   825 	void FreePermanentPageTables();
   826 
   827 	/**
   828 	Free a range of permanent page tables.
   829 
   830 	This is an implementation factor for FreePermanentPageTables and
   831 	AllocatePermanentPageTables. It decrements the permanence count
   832 	for each page table and frees it if is no longer in use
   833 
   834 	@param aFirstPde	The address of the page directory entry which refers to
   835 						the first page table to be freed.
   836 	@param aLastPde		The address of the page directory entry which refers to
   837 						the last page table to be freed.
   838 	*/
   839 	void FreePermanentPageTables(TPde* aFirstPde, TPde* aLastPde);
   840 
   841 #ifdef _DEBUG
   842 	/**
   843 	Validate the contents of the page table are valid.
   844 
   845 	@param aPt	The page table to validate.
   846 	*/
   847 	void ValidatePageTable(TPte* aPt, TLinAddr aAddr);
   848 #endif
   849 
   850 	/**
   851 	Get the page table being used to map a specified virtual address if it exists.
   852 
   853 	@param aAddr	A virtual address in the region allocated to this mapping.
   854 
   855 	@return The virtual address of the page table mapping \a aAddr,
   856 			or the null pointer if one wasn't found.
   857 	*/
   858 	TPte* GetPageTable(TLinAddr aAddr);
   859 
   860 	/**
   861 	Get the page table being used to map a specified virtual address; allocating
   862 	a new one if it didn't previously exist.
   863 
   864 	@param aAddr	A virtual address in the region allocated to this mapping.
   865 
   866 	@return The virtual address of the page table mapping \a aAddr,
   867 			or the null pointer if one wasn't found and couldn't be allocated.
   868 	*/
   869 	TPte* GetOrAllocatePageTable(TLinAddr aAddr);
   870 
   871 	/**
   872 	Get and pin the page table being used to map a specified virtual address;
   873 	allocating a new one if it didn't previously exist.
   874 
   875 	@param aAddr	A virtual address in the region allocated to this mapping.
   876 	@param aPinArgs	The resources required to pin the page table.
   877 					On success, the page table will have been appended to
   878 					\a aPinArgs.iPinnedPageTables.
   879 
   880 	@return The virtual address of the page table mapping \a aAddr,
   881 			or the null pointer if one wasn't found and couldn't be allocated.
   882 	*/
   883 	TPte* GetOrAllocatePageTable(TLinAddr aAddr, TPinArgs& aPinArgs);
   884 
   885 	/**
   886 	Allocate a single page table.
   887 
   888 	@param aAddr		The virtual address the page table will be used to map.
   889 	@param aPdeAddress	Address of the page directory entry which is to map
   890 						the newly allocated page table.
   891 	@param aPermanent	True, if the page table's permanence count is to be incremented.
   892 
   893 	@return The virtual address of the page table if it was successfully allocated,
   894 			otherwise the null pointer.
   895 	*/
   896 	TPte* AllocatePageTable(TLinAddr aAddr, TPde* aPdeAddress, TBool aPermanent=false);
   897 
   898 	/**
   899 	Free a single page table if it is unused.
   900 
   901 	@param aPdeAddress	Address of the page directory entry (PDE) which maps the page table.
   902 						If the page table is freed, this PDE will be set to an 'unallocated' value.
   903 	*/
   904 	void FreePageTable(TPde* aPdeAddress);
   905 	};
   906 
   907 
   908 /**
   909 A mapping which maps any memory into the kernel address space and provides access to 
   910 the physical address used by a memory object.
   911 
   912 These mappings are always of the 'pinned' type to prevent the obtained physical addresses
   913 from becoming invalid.
   914 */
   915 class DKernelPinMapping : public DFineMapping
   916 	{
   917 public:
   918 	DKernelPinMapping();
   919 	TInt Construct(TUint aReserveSize);
   920 	TInt MapAndPin(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TMappingPermissions aPermissions);
   921 	void UnmapAndUnpin();
   922 
   923 public:
   924 	TInt iReservePages;		///< The number of pages this mapping is able to map with its reserved resources(page tables etc).
   925 	};
   926 
   927 
   928 /**
   929 A mapping which provides access to the physical address used by a memory object
   930 without mapping these at any virtual address accessible to software.
   931 
   932 These mappings are always of the 'pinned' type to prevent the obtained physical addresses
   933 from becoming invalid.
   934 */
   935 class DPhysicalPinMapping : public DMemoryMappingBase
   936 	{
   937 public:
   938 	DPhysicalPinMapping();
   939 
   940 	/**
   941 	Attach this mapping to a memory object so that it pins a specified region of its memory.
   942 
   943 	Most of the action of this method is performed by #Attach.
   944 
   945 	@param aMemory		The memory object.
   946 	@param aIndex		The page index of the first page of memory to be pinned by the mapping.
   947 	@param aCount		The number of pages of memory to be pinned by the mapping.
   948 	@param aPermissions	The memory access permissions appropriate to the intended use
   949 						of the physical addresses. E.g. if the memory contents will be
   950 						changes, use EReadWrite. These permissions are used for error
   951 						checking, e.g. detecting attempted writes to read-only memory.
   952 						They are also used for optimising access to demand paged memory;
   953 						which is more efficient if only read-only access is required.
   954 
   955 	@return KErrNone if successful,
   956 			KErrNotFound if any part of the memory to be pinned was not present,
   957 			KErrNoMemory if there was insufficient memory,
   958 			otherwise one of the system wide error codes.
   959 	*/
   960 	TInt Pin(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TMappingPermissions aPermissions);
   961 
   962 	/**
   963 	Remove this mapping from the memory object it was previously added to by #Pin.
   964 
   965 	Most of the action of this method is performed by #Detach.
   966 	*/
   967 	virtual void Unpin();
   968 
   969 private:
   970 	// from DMemoryMappingBase...
   971 	virtual TInt MapPages(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Not implemented. Faults in debug builds.
   972 	virtual void UnmapPages(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Does nothing
   973 	virtual void RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB); ///< Not implemented. Faults in debug builds.
   974 	virtual void RestrictPagesNA(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Does nothing
   975 	virtual TInt PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount); ///< Does nothing
   976 	virtual TBool MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex);///< Not implemented. Faults in debug builds.
   977 	virtual TInt DoMap(); ///< Does nothing
   978 	virtual void DoUnmap(); ///< Does nothing
   979 	}; 
   980 
   981 
   982 
   983 /**
   984 A mapping which pins memory in order to prevent demand paging related
   985 page faults from occurring.
   986 */
   987 class DVirtualPinMapping : public DPhysicalPinMapping
   988 	{
   989 public:
   990 	DVirtualPinMapping();
   991 	~DVirtualPinMapping();
   992 
   993 	/**
   994 	Create a new DVirtualPinMapping object suitable for pinning a specified number of pages.
   995 
   996 	If no maximum is specified (\a aMaxCount==0) then this object may be used to pin
   997 	any number of pages, however this will require dynamic allocation of storage for
   998 	page table references.
   999 
  1000 	@param aMaxCount The maximum number of pages which can be pinned, or zero for no maximum.
  1001 
  1002 	@return The newly created DVirtualPinMapping or the null pointer if there was
  1003 			insufficient memory.
  1004 	*/
  1005 	static DVirtualPinMapping* New(TUint aMaxCount);
  1006 
  1007 	/**
  1008 	Attach this mapping to a memory object so that it pins a specified region of its memory.
  1009 
  1010 	Additionally, pin the page tables in a specified mapping (\a aMapping) which
  1011 	are being used to map these pages.
  1012 
  1013 	The result of this function is that access to the pinned memory through the virtual
  1014 	addresses used by \a aMapping will not generate any demand paging related page faults.
  1015 
  1016 	@param aMemory		The memory object.
  1017 	@param aIndex		The page index of the first page of memory to be pinned by the mapping.
  1018 	@param aCount		The number of pages of memory to be pinned by the mapping.
  1019 	@param aPermissions	The memory access permissions appropriate to the intended use
  1020 						of the physical addresses. E.g. if the memory contents will be
  1021 						changes, use EReadWrite. These permissions are used for error
  1022 						checking, e.g. detecting attempted writes to read-only memory.
  1023 						They are also used for optimising access to demand paged memory;
  1024 						which is more efficient if only read-only access is required.
  1025 	@param aMapping		The mapping whose page tables are to be pinned. This must be
  1026 						currently mapping the specified region of memory pages.
  1027 	@param aMapInstanceCount	The instance count of the mapping who's page tables are to be pinned.
  1028 
  1029 	@return KErrNone if successful,
  1030 			KErrNotFound if any part of the memory to be pinned was not present,
  1031 			KErrNoMemory if there was insufficient memory,
  1032 			otherwise one of the system wide error codes.
  1033 	*/
  1034 	TInt Pin(	DMemoryObject* aMemory, TUint aIndex, TUint aCount, TMappingPermissions aPermissions, 
  1035 				DMemoryMappingBase* aMapping, TUint aMapInstanceCount);
  1036 
  1037 	/**
  1038 	Remove this mapping from the memory object it was previously added to by #Pin.
  1039 	This will unpin any memory pages and pages tables that were pinned.
  1040 	*/
  1041 	void Unpin();
  1042 
  1043 	/**
  1044 	Return the maximum number of page tables which could be required to map
  1045 	\a aPageCount pages. This is used by various resource reserving calculations.
  1046 	*/
  1047 	static TUint MaxPageTables(TUint aPageCount);
  1048 
  1049 	/**
  1050 	In debug builds, dump information about this mapping to the kernel trace port.
  1051 	*/
  1052 	virtual void Dump();
  1053 
  1054 private:
  1055 	// from DMemoryMappingBase...
  1056 	virtual void RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB); ///< Does nothing.
  1057 	virtual TInt PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount);
  1058 	virtual TBool MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex);///< Not implemented. Faults in debug builds.
  1059 	virtual TInt DoPin(TPinArgs& aPinArgs);
  1060 	virtual void DoUnpin(TPinArgs& aPinArgs);
  1061 
  1062 private:
  1063 	/**
  1064 	Allocate memory to store pointers to all the page table which map
  1065 	\a aCount pages of memory. The pointer to the allocated memory
  1066 	is stored at iAllocatedPinnedPageTables.
  1067 
  1068 	If iSmallPinnedPageTablesArray is large enough, this function doesn't
  1069 	allocate any memory.
  1070 
  1071 	@return KErrNone if successful, otherwise KErrNoMemory.
  1072 	*/
  1073 	TInt AllocPageTableArray(TUint aCount);
  1074 
  1075 	/**
  1076 	Delete iAllocatedPinnedPageTables.
  1077 	*/
  1078 	void FreePageTableArray();
  1079 
  1080 	/**
  1081 	Return the address of the array storing pinned page tables.
  1082 	This is either iSmallPinnedPageTablesArray or iAllocatedPinnedPageTables.
  1083 	*/
  1084 	TPte** PageTableArray();
  1085 
  1086 	/**
  1087 	Unpin all the page tables which have been pinned by this mapping.
  1088 
  1089 	@param aPinArgs	The resources used for pinning. The replacement pages allocated
  1090 					to this will be increased for each page which was became completely
  1091 					unpinned.
  1092 	*/
  1093 	void UnpinPageTables(TPinArgs& aPinArgs);
  1094 private:
  1095 	/**
  1096 	Temporary store for the mapping passed to #Pin
  1097 	*/
  1098 	DMemoryMappingBase* iPinVirtualMapping;
  1099 
  1100 	/**
  1101 	Temporary store for the mapping instance count passed to #Pin
  1102 	*/
  1103 	TUint iPinVirtualMapInstanceCount;
  1104 
  1105 	/**
  1106 	The number of page tables which are currently being pinned by this mapping.
  1107 	This is the number of valid entries stored at PageTableArray.
  1108 	*/
  1109 	TUint iNumPinnedPageTables;
  1110 
  1111 	/**
  1112 	The maximum number of pages which can be pinned by this mapping.
  1113 	If this is zero, there is no maximum.
  1114 	*/
  1115 	TUint iMaxCount;
  1116 
  1117 	/**
  1118 	The memory allocated by this object for storing pointer to the page tables
  1119 	it has pinned.
  1120 	*/
  1121 	TPte** iAllocatedPinnedPageTables;
  1122 
  1123 	enum
  1124 		{
  1125 		KSmallPinnedPageTableCount = 2 ///< Number of entries in iSmallPinnedPageTablesArray
  1126 		};
  1127 
  1128 	/**
  1129 	A small array to use for storing pinned page tables.
  1130 	This is an optimisation used for the typical case of pinning a small number of pages
  1131 	to avoid dynamic allocation of memory.
  1132 	*/
  1133 	TPte* iSmallPinnedPageTablesArray[KSmallPinnedPageTableCount];
  1134 	}; 
  1135 
  1136 #endif