os/kernelhwsrv/kernel/eka/memmodel/epoc/flexible/mmu/mmapping.h
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
sl@0
     1
// Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
//
sl@0
    15
sl@0
    16
/**
sl@0
    17
 @file
sl@0
    18
 @internalComponent
sl@0
    19
*/
sl@0
    20
sl@0
    21
#ifndef MMAPPING_H
sl@0
    22
#define MMAPPING_H
sl@0
    23
sl@0
    24
#include "mrefcntobj.h"
sl@0
    25
#include "mmappinglist.h"
sl@0
    26
#include "mpagearray.h"
sl@0
    27
sl@0
    28
sl@0
    29
sl@0
    30
/**
sl@0
    31
Base class for memory mappings.
sl@0
    32
sl@0
    33
This provides the methods for linking a mapping to a memory object
sl@0
    34
as well as the interface for updating the MMU page tables associated
sl@0
    35
with a mapping when the memory state changes.
sl@0
    36
*/
sl@0
    37
class DMemoryMappingBase : public DReferenceCountedObject
sl@0
    38
	{
sl@0
    39
private:
sl@0
    40
	/**
sl@0
    41
	Memory object to which this mapping is currently attached.
sl@0
    42
	Updates to the are protected by the MmuLock.
sl@0
    43
	*/
sl@0
    44
	DMemoryObject*	 iMemory;
sl@0
    45
sl@0
    46
public:
sl@0
    47
	/**
sl@0
    48
	Link used to maintain list of mappings attached to a memory object.
sl@0
    49
	*/
sl@0
    50
	TMappingListLink iLink;
sl@0
    51
sl@0
    52
	/**
sl@0
    53
	Offset, in page units, within the memory object's memory for start of this mapping.
sl@0
    54
	*/
sl@0
    55
	TUint			 iStartIndex;
sl@0
    56
sl@0
    57
	/**
sl@0
    58
	Size of this mapping, in page units.
sl@0
    59
	*/
sl@0
    60
	TUint			 iSizeInPages;
sl@0
    61
sl@0
    62
private:
sl@0
    63
	/**
sl@0
    64
	Instance count which is incremented every time a mapping is attached to a memory object.
sl@0
    65
	When code is manipulating mappings, the instance count is used to detect that a
sl@0
    66
	mapping has been reused and that the operation it is performing is no long needed.
sl@0
    67
	*/
sl@0
    68
	TUint			 iMapInstanceCount;
sl@0
    69
sl@0
    70
public:
sl@0
    71
sl@0
    72
	/**
sl@0
    73
	Bit flags stored in #Flags giving various state and attributes of the mapping.
sl@0
    74
	*/
sl@0
    75
	enum TFlags
sl@0
    76
		{
sl@0
    77
		/**
sl@0
    78
		Flag set during object construction to indicate that this mapping is of
sl@0
    79
		class #DCoarseMapping.
sl@0
    80
		*/
sl@0
    81
		ECoarseMapping			= 1<<0,
sl@0
    82
sl@0
    83
		/**
sl@0
    84
		Flag set during object construction to indicate that this mapping will pin
sl@0
    85
		any memory pages it maps. This may not be used with coarse memory mappings.
sl@0
    86
		*/
sl@0
    87
		EPinned					= 1<<1,
sl@0
    88
sl@0
    89
		/**
sl@0
    90
		Pages have already been reserved for pinning, so when this mapping is attached
sl@0
    91
		to a memory object no additional pages need to be reserved. Pre-reserving pages
sl@0
    92
		is used to prevent the possibility of failing to pin due to an out of memory
sl@0
    93
		condition. It is essential that the users of these mappings ensure that there
sl@0
    94
		are enough reserved pages in the paging pool to meet the maximum mapping size
sl@0
    95
		used.
sl@0
    96
		*/
sl@0
    97
		EPinningPagesReserved	= 1<<2,
sl@0
    98
sl@0
    99
		/**
sl@0
   100
		Pages have been successfully pinned by this mapping. This is set after demand
sl@0
   101
		paged memory has been succeeded pinned and is used to indicate that the pages
sl@0
   102
		need unpinning again when the mapping is later unmapped.
sl@0
   103
		*/
sl@0
   104
		EPagesPinned			= 1<<3,
sl@0
   105
sl@0
   106
		/**
sl@0
   107
		Flag set during object construction to indicate that MMU page tables are to
sl@0
   108
		be permanently allocated for use by this mapping. Normally, page tables are
sl@0
   109
		allocated as needed to map memory which can result in out-of-memory errors
sl@0
   110
		when mapping memory pages.
sl@0
   111
		*/
sl@0
   112
		EPermanentPageTables	= 1<<4,
sl@0
   113
sl@0
   114
		/**
sl@0
   115
		Permanent page tables have been successfully been allocated for this mapping.
sl@0
   116
		This flag is used to track allocation so they can be released when the mapping
sl@0
   117
		is destroyed.
sl@0
   118
		*/
sl@0
   119
		EPageTablesAllocated	= 1<<5,
sl@0
   120
sl@0
   121
		/**
sl@0
   122
		For pinned mappings (EPinned) this flag is set whenever the mapping prevents
sl@0
   123
		any pages of memory from being fully decommitted from a memory object. When a
sl@0
   124
		mapping is finally unmapped from the memory object this flag is checked, and,
sl@0
   125
		if set, further cleanup of the decommitted pages triggered.
sl@0
   126
		*/
sl@0
   127
		EPageUnmapVetoed		= 1<<6,
sl@0
   128
sl@0
   129
		/**
sl@0
   130
		Mapping is being, or has been, detached from a memory object.
sl@0
   131
		When set, operations on the mapping should act as though the mapping is no
sl@0
   132
		longer attached to a memory object. Specifically, no further pages of memory
sl@0
   133
		should be mapped into this mapping.
sl@0
   134
sl@0
   135
		This flag is only set when the MmuLock is held.
sl@0
   136
		*/
sl@0
   137
		EDetaching				= 1<<7,
sl@0
   138
sl@0
   139
		/**
sl@0
   140
		This mapping is a physical pinning mapping.  The pages it pins
sl@0
   141
		cannot be paged out or moved.
sl@0
   142
sl@0
   143
		This flag is set when DPhysicalPinMapping objects are created.
sl@0
   144
		*/
sl@0
   145
		EPhysicalPinningMapping = 1<<8,
sl@0
   146
sl@0
   147
		/**
sl@0
   148
		Flag set during object construction to indicate that this mapping is of
sl@0
   149
		class #DLargeMapping.
sl@0
   150
sl@0
   151
		Note that #DLargeMapping is derived from #DCoarseMapping, therefore presence of this flag
sl@0
   152
		implies presence of #ECoarseMapping as well.
sl@0
   153
		*/
sl@0
   154
		ELargeMapping			= 1<<9,
sl@0
   155
		};
sl@0
   156
sl@0
   157
	/**
sl@0
   158
	Bitmask of values from enum #TPteType which will be used to calculate
sl@0
   159
	the correct attributes for any page table entries this mapping uses.
sl@0
   160
	*/
sl@0
   161
	FORCE_INLINE TUint8& PteType()
sl@0
   162
		{ return iLink.iSpare1; }
sl@0
   163
sl@0
   164
	/**
sl@0
   165
	Bitmask of values from enum #TFlags.
sl@0
   166
	The flags 16 bits and are stored in iLink.iSpare2 and iLink.iSpare3.
sl@0
   167
	*/
sl@0
   168
	FORCE_INLINE TUint16& Flags()
sl@0
   169
		{ return (TUint16&)iLink.iSpare2; }
sl@0
   170
sl@0
   171
public:
sl@0
   172
	/**
sl@0
   173
	Return the memory object to which this mapping is currently attached.
sl@0
   174
sl@0
   175
	@pre MmuLock is held. (If aNoCheck==false)
sl@0
   176
	*/
sl@0
   177
	FORCE_INLINE DMemoryObject* Memory(TBool aNoCheck=false)
sl@0
   178
		{
sl@0
   179
		if(!aNoCheck)
sl@0
   180
			__NK_ASSERT_DEBUG(MmuLock::IsHeld());
sl@0
   181
		return iMemory;
sl@0
   182
		}
sl@0
   183
sl@0
   184
	/**
sl@0
   185
	Return true if the mapping is currently attached to a memory object.
sl@0
   186
	*/
sl@0
   187
	FORCE_INLINE TBool IsAttached()
sl@0
   188
		{ return iLink.IsLinked(); }
sl@0
   189
sl@0
   190
	/**
sl@0
   191
	Return true if the mapping is being, or has been, detached from a memory object.
sl@0
   192
	The mapping may or may not still be attached to a memory object, i.e. #IsAttached
sl@0
   193
	is indeterminate.
sl@0
   194
	*/
sl@0
   195
	FORCE_INLINE TBool BeingDetached()
sl@0
   196
		{ return Flags()&EDetaching; }
sl@0
   197
sl@0
   198
	/**
sl@0
   199
	Return the mapping instance count.
sl@0
   200
	@see #iMapInstanceCount.
sl@0
   201
	*/
sl@0
   202
	FORCE_INLINE TUint MapInstanceCount()
sl@0
   203
		{ return iMapInstanceCount; }
sl@0
   204
sl@0
   205
	/**
sl@0
   206
	Return true if this mapping provides read only access to memory.
sl@0
   207
	*/
sl@0
   208
	FORCE_INLINE TBool IsReadOnly()
sl@0
   209
		{ return !(PteType()&EPteTypeWritable); }
sl@0
   210
sl@0
   211
#ifdef MMU_SUPPORTS_EXECUTE_NEVER
sl@0
   212
	/**
sl@0
   213
	Return true if this mapping provides access to memory which allows
sl@0
   214
	code to be executed from it.
sl@0
   215
	*/
sl@0
   216
	FORCE_INLINE TBool IsExecutable()
sl@0
   217
		{ return (PteType()&EPteTypeExecutable); }
sl@0
   218
#endif
sl@0
   219
sl@0
   220
	/**
sl@0
   221
	Return true if this is a coarse mapping, in other words it is an instance of #DCoarseMapping or
sl@0
   222
	#DLargeMapping.
sl@0
   223
	*/
sl@0
   224
	FORCE_INLINE TBool IsCoarse()
sl@0
   225
		{ return Flags()&ECoarseMapping; }
sl@0
   226
sl@0
   227
	/**
sl@0
   228
	Return true if this mapping is a large mapping, in other words an instance of #DLargeMapping.
sl@0
   229
sl@0
   230
	Note that all large mappings are also coarse mappings.
sl@0
   231
	*/
sl@0
   232
	FORCE_INLINE TBool IsLarge()
sl@0
   233
		{ return Flags()&ELargeMapping; }
sl@0
   234
sl@0
   235
	/**
sl@0
   236
	Return true if this mapping pins the memory it maps.
sl@0
   237
	*/
sl@0
   238
	FORCE_INLINE TBool IsPinned()
sl@0
   239
		{ return Flags()&EPinned; }
sl@0
   240
		
sl@0
   241
	/**
sl@0
   242
	Return true if this mapping physically pins the memory it maps.
sl@0
   243
	*/
sl@0
   244
	FORCE_INLINE TBool IsPhysicalPinning()
sl@0
   245
		{ return Flags()&EPhysicalPinningMapping; }
sl@0
   246
sl@0
   247
	/**
sl@0
   248
	Return the access permissions which this mapping uses to maps memory.
sl@0
   249
	*/
sl@0
   250
	FORCE_INLINE TMappingPermissions Permissions()
sl@0
   251
		{ return Mmu::PermissionsFromPteType(PteType()); }
sl@0
   252
sl@0
   253
	/**
sl@0
   254
	Link this mapping to a memory object.
sl@0
   255
sl@0
   256
	This is called by the memory object during processing of #Attach.
sl@0
   257
sl@0
   258
	@param aMemory		The memory object the mapping is being attached to.
sl@0
   259
	@param aMappingList	The list to add this mapping to.
sl@0
   260
sl@0
   261
	@pre MmuLock is held.
sl@0
   262
	@pre Mapping list lock is held.
sl@0
   263
	*/
sl@0
   264
	void LinkToMemory(DMemoryObject* aMemory, TMappingList& aMappingList);
sl@0
   265
sl@0
   266
	/**
sl@0
   267
	Unlink this mapping from the memory object it was previously linked to with
sl@0
   268
	#LinkToMemory.
sl@0
   269
sl@0
   270
	This is called by the memory object during processing of #Detach.
sl@0
   271
sl@0
   272
	@param aMappingList	The list that the mapping appears on.
sl@0
   273
	*/
sl@0
   274
	void UnlinkFromMemory(TMappingList& aMappingList);
sl@0
   275
sl@0
   276
	/**
sl@0
   277
	Get the physical address(es) for a region of pages in this mapping.
sl@0
   278
sl@0
   279
	@param aIndex			Page index, within the mapping, for start of the region.
sl@0
   280
	@param aCount			Number of pages in the region.
sl@0
   281
	@param aPhysicalAddress	On success, this value is set to one of two values.
sl@0
   282
							If the specified region is physically contiguous,
sl@0
   283
							the value is the physical address of the first page
sl@0
   284
							in the region. If the region is discontiguous, the
sl@0
   285
							value is set to KPhysAddrInvalid.
sl@0
   286
	@param aPhysicalPageList If not zero, this points to an array of TPhysAddr
sl@0
   287
							objects. On success, this array will be filled
sl@0
   288
							with the addresses of the physical pages which
sl@0
   289
							contain the specified region. If aPageList is
sl@0
   290
							zero, then the function will fail with
sl@0
   291
							KErrNotFound if the specified region is not
sl@0
   292
							physically contiguous.
sl@0
   293
sl@0
   294
	@return 0 if successful and the whole region is physically contiguous.
sl@0
   295
			1 if successful but the region isn't physically contiguous.
sl@0
   296
			KErrNotFound, if any page in the region is not present,
sl@0
   297
			otherwise one of the system wide error codes.
sl@0
   298
sl@0
   299
	@pre This mapping must have been attached to a memory object with #Pin.
sl@0
   300
	*/
sl@0
   301
	TInt PhysAddr(TUint aIndex, TUint aCount, TPhysAddr& aPhysicalAddress, TPhysAddr* aPhysicalPageList);
sl@0
   302
sl@0
   303
protected:
sl@0
   304
	/**
sl@0
   305
	@param aType Initial value for #Flags.
sl@0
   306
	*/
sl@0
   307
	DMemoryMappingBase(TUint aType);
sl@0
   308
sl@0
   309
	/**
sl@0
   310
	Attach this mapping to a memory object so that it maps a specified region of its memory.
sl@0
   311
sl@0
   312
	@param aMemory	The memory object.
sl@0
   313
	@param aIndex	The page index of the first page of memory to be mapped by the mapping.
sl@0
   314
	@param aCount	The number of pages of memory to be mapped by the mapping.
sl@0
   315
sl@0
   316
	@return KErrNone if successful, otherwise one of the system wide error codes.
sl@0
   317
	*/
sl@0
   318
	TInt Attach(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
sl@0
   319
sl@0
   320
	/**
sl@0
   321
	Remove this mapping from the memory object it was previously attached to by #Attach.
sl@0
   322
	*/
sl@0
   323
	void Detach();
sl@0
   324
sl@0
   325
public:
sl@0
   326
	/**
sl@0
   327
	Update the page table entries corresponding to this mapping to add entries for
sl@0
   328
	a specified set of memory pages.
sl@0
   329
sl@0
   330
	This method is called by DMemoryObject::MapPages to update each mapping attached
sl@0
   331
	to a memory object whenever new pages of memory are added. However, it won't be
sl@0
   332
	called for any mapping with the #EPinned attribute as such mappings are unchanging.
sl@0
   333
sl@0
   334
	@param aPages				An RPageArray::TIter which refers to a range of pages
sl@0
   335
								in a memory object. This has been clipped to fit within
sl@0
   336
								the range of pages mapped by this mapping.
sl@0
   337
								Only array entries which have state RPageArray::ECommitted
sl@0
   338
								should be mapped into the mapping's page tables.
sl@0
   339
sl@0
   340
	@param aMapInstanceCount	The instance of this mapping which is to be updated.
sl@0
   341
								Whenever this no longer matches the current #MapInstanceCount
sl@0
   342
								the function must not update any more of the mapping's
sl@0
   343
								page table entries, (but must still return KErrNone).
sl@0
   344
sl@0
   345
	@return KErrNone if successful, otherwise one of the system wide error codes.	
sl@0
   346
	*/
sl@0
   347
	virtual TInt MapPages(RPageArray::TIter aPages, TUint aMapInstanceCount) =0;
sl@0
   348
sl@0
   349
	/**
sl@0
   350
	Update the page table entries corresponding to this mapping to remove entries for
sl@0
   351
	a specified set of memory pages.
sl@0
   352
sl@0
   353
	This method is called by DMemoryObject::UnmapPages to update each mapping attached
sl@0
   354
	to a memory object whenever pages of memory are removed.
sl@0
   355
sl@0
   356
	@param aPages				An RPageArray::TIter which refers to a range of pages
sl@0
   357
								in a memory object. This has been clipped to fit within
sl@0
   358
								the range of pages mapped by this mapping.
sl@0
   359
								Only array entries which return true for
sl@0
   360
								RPageArray::TargetStateIsDecommitted should be unmapped
sl@0
   361
								from the mapping's page tables.
sl@0
   362
sl@0
   363
	@param aMapInstanceCount	The instance of this mapping which is to be updated.
sl@0
   364
								Whenever this no longer matches the current #MapInstanceCount
sl@0
   365
								the function must not update any more of the mapping's
sl@0
   366
								page table entries.
sl@0
   367
	*/
sl@0
   368
	virtual void UnmapPages(RPageArray::TIter aPages, TUint aMapInstanceCount) =0;
sl@0
   369
sl@0
   370
	/**
sl@0
   371
	Update the page table entry corresponding to this mapping to update an entry for a specified
sl@0
   372
	page that has just been moved or shadowed.
sl@0
   373
sl@0
   374
	@param aPages				The page array entry of the page in a memory object. 
sl@0
   375
								Only array entries which have a target state of 
sl@0
   376
								RPageArray::ECommitted should be mapped into the 
sl@0
   377
								mapping's page tables.
sl@0
   378
sl@0
   379
	@param aIndex				The index of the page in the memory object.
sl@0
   380
sl@0
   381
	@param aMapInstanceCount	The instance of this mapping which is to be updated.
sl@0
   382
								Whenever this no longer matches the current #MapInstanceCount
sl@0
   383
								the function must not update any more of the mapping's
sl@0
   384
								page table entries, (but must still return KErrNone).
sl@0
   385
sl@0
   386
	@param	aInvalidateTLB		Set to ETrue when the TLB entries associated with this page
sl@0
   387
								should be invalidated.  This must be done when there is 
sl@0
   388
								already a valid pte for this page, i.e. if the page is still 
sl@0
   389
								mapped.
sl@0
   390
	*/
sl@0
   391
	virtual void RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB)=0;
sl@0
   392
sl@0
   393
	/**
sl@0
   394
	Update the page table entries corresponding to this mapping to apply access restrictions
sl@0
   395
	to a specified set of memory pages.
sl@0
   396
sl@0
   397
	This method is called by DMemoryObject::RestrictPages to update each mapping attached
sl@0
   398
	to a memory object whenever pages of memory are restricted.
sl@0
   399
sl@0
   400
	@param aPages				An RPageArray::TIter which refers to a range of pages
sl@0
   401
								in a memory object. This has been clipped to fit within
sl@0
   402
								the range of pages mapped by this mapping.
sl@0
   403
								Only array entries which return true for
sl@0
   404
								RPageArray::TargetStateIsDecommitted should be unmapped
sl@0
   405
								from the mapping's page tables.
sl@0
   406
sl@0
   407
	@param aMapInstanceCount	The instance of this mapping which is to be updated.
sl@0
   408
								Whenever this no longer matches the current #MapInstanceCount
sl@0
   409
								the function must not update any more of the mapping's
sl@0
   410
								page table entries.
sl@0
   411
	*/
sl@0
   412
	virtual void RestrictPagesNA(RPageArray::TIter aPages, TUint aMapInstanceCount) =0;
sl@0
   413
sl@0
   414
	/**
sl@0
   415
	Update the page table entries corresponding to this mapping to add entries for
sl@0
   416
	a specified set of demand paged memory pages following a 'page in' or memory
sl@0
   417
	pinning operation.
sl@0
   418
sl@0
   419
	@param aPages				An RPageArray::TIter which refers to a range of pages
sl@0
   420
								in a memory object. This will be within the range of pages
sl@0
   421
								mapped by this mapping.
sl@0
   422
								Only array entries which have state RPageArray::ECommitted
sl@0
   423
								should be mapped into the mapping's page tables.
sl@0
   424
sl@0
   425
	@param aPinArgs				The resources required to pin any page tables the mapping uses.
sl@0
   426
								Page table must be pinned if \a aPinArgs.iPinnedPageTables is
sl@0
   427
								not the null pointer, in which case this the virtual address
sl@0
   428
								of the pinned must be stored in the array this points to.
sl@0
   429
								\a aPinArgs.iReadOnly is true if write access permissions
sl@0
   430
								are not needed.
sl@0
   431
sl@0
   432
	@return KErrNone if successful, otherwise one of the system wide error codes.	
sl@0
   433
	*/
sl@0
   434
	virtual TInt PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount) =0;
sl@0
   435
sl@0
   436
sl@0
   437
	/**
sl@0
   438
	Update the page table entry corresponding to this mapping to add an entry for
sl@0
   439
	a specified page which is in the process of being moved.
sl@0
   440
sl@0
   441
	@param aPageArrayPtr		The page array entry for the page to be mapped which must be
sl@0
   442
								within this mapping range of pages.
sl@0
   443
								Only array entries which have a target state of
sl@0
   444
								RPageArray::ECommitted should be mapped into the mapping's 
sl@0
   445
								page tables.
sl@0
   446
sl@0
   447
	@param	aIndex				The index of the page.
sl@0
   448
sl@0
   449
	@return ETrue if successful, EFalse otherwise.
sl@0
   450
	*/
sl@0
   451
	virtual TBool MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex)=0;
sl@0
   452
sl@0
   453
sl@0
   454
	/**
sl@0
   455
	In debug builds, dump information about this mapping to the kernel trace port.
sl@0
   456
	*/
sl@0
   457
	virtual void Dump();
sl@0
   458
sl@0
   459
private:
sl@0
   460
	/**
sl@0
   461
	Update this mapping's MMU data structures to map all pages of memory
sl@0
   462
	currently committed to the memory object (#iMemory) in the region covered
sl@0
   463
	by this mapping.
sl@0
   464
sl@0
   465
	This method is called by #Attach after the mapping has been linked
sl@0
   466
	into the memory object.
sl@0
   467
sl@0
   468
	@return KErrNone if successful, otherwise one of the system wide error codes.
sl@0
   469
	*/
sl@0
   470
	virtual TInt DoMap() =0;
sl@0
   471
sl@0
   472
	/**
sl@0
   473
	Update this mapping's MMU data structures to unmap all pages of memory.
sl@0
   474
sl@0
   475
	This method is called by #Detach before the mapping has been unlinked
sl@0
   476
	from the memory object but after the #EDetaching flag has been set.
sl@0
   477
	*/
sl@0
   478
	virtual void DoUnmap() =0;
sl@0
   479
sl@0
   480
protected:
sl@0
   481
	/**
sl@0
   482
	For pinned mapping, this virtual method is called by #Attach in order to pin
sl@0
   483
	pages of memory if required. This is called after the mapping has been linked
sl@0
   484
	into the memory object but before #DoMap.
sl@0
   485
sl@0
   486
	The default implementation of this method simply calls DMemoryManager::Pin.
sl@0
   487
sl@0
   488
	@param aPinArgs	The resources to use for pinning. This has sufficient replacement
sl@0
   489
					pages allocated to pin every page the mapping covers, and the
sl@0
   490
					value of \a aPinArgs.iReadOnly has been set to correspond to the
sl@0
   491
					mappings access permissions.
sl@0
   492
sl@0
   493
	@return KErrNone if successful, otherwise one of the system wide error codes.
sl@0
   494
	*/
sl@0
   495
	virtual TInt DoPin(TPinArgs& aPinArgs);
sl@0
   496
sl@0
   497
	/**
sl@0
   498
	For pinned mapping, this virtual method is called by #Detach in order to unpin
sl@0
   499
	pages of memory if required. This is called before the mapping has been unlinked
sl@0
   500
	from the memory object but after #DoUnmap.
sl@0
   501
sl@0
   502
	The default implementation of this method simply calls DMemoryManager::Unpin.
sl@0
   503
sl@0
   504
	@param aPinArgs	The resources used for pinning. The replacement pages allocated
sl@0
   505
					to this will be increased for each page which was became completely
sl@0
   506
					unpinned.
sl@0
   507
	*/
sl@0
   508
	virtual void DoUnpin(TPinArgs& aPinArgs);
sl@0
   509
	};
sl@0
   510
sl@0
   511
sl@0
   512
sl@0
   513
/**
sl@0
   514
Base class for memory mappings which map memory contents into a address space.
sl@0
   515
sl@0
   516
This provides methods for allocating virtual memory and holds the attributes needed
sl@0
   517
for MMU page table entries.
sl@0
   518
*/
sl@0
   519
class DMemoryMapping : public DMemoryMappingBase
sl@0
   520
	{
sl@0
   521
protected:
sl@0
   522
	/**
sl@0
   523
	The page directory entry (PDE) value for use when mapping this mapping's page tables.
sl@0
   524
	This value has the physical address component being zero, so a page table's physical
sl@0
   525
	address can be simply ORed in.
sl@0
   526
sl@0
   527
	This could potentially be removed (see DMemoryMapping::PdeType()).
sl@0
   528
	*/
sl@0
   529
	TPde			iBlankPde;
sl@0
   530
sl@0
   531
	/**
sl@0
   532
	The page table entry (PTE) value for use when mapping pages into this mapping.
sl@0
   533
	This value has the physical address component being zero, so a page's physical
sl@0
   534
	address can be simply ORed in.
sl@0
   535
	*/
sl@0
   536
	TPte			iBlankPte;
sl@0
   537
sl@0
   538
	/**
sl@0
   539
	Start of the virtual address region allocated for use by this mapping
sl@0
   540
	ORed with the OS ASID of the address space this lies in.
sl@0
   541
sl@0
   542
	Note, the address at which memory is mapped (#iLinAddrAndOsAsid) may be different
sl@0
   543
	to this allocated address due to page colouring restrictions.
sl@0
   544
sl@0
   545
	@see iAllocatedSize
sl@0
   546
	*/
sl@0
   547
	TLinAddr		iAllocatedLinAddrAndOsAsid;
sl@0
   548
sl@0
   549
	/**
sl@0
   550
	Size of virtual address region memory allocated for use by this mapping.
sl@0
   551
sl@0
   552
	@see iAllocatedLinAddrAndOsAsid
sl@0
   553
	*/
sl@0
   554
	TUint			iAllocatedSize;
sl@0
   555
sl@0
   556
private:
sl@0
   557
	/**
sl@0
   558
	Start of the virtual address region that this mapping is currently
sl@0
   559
	mapping memory at, ORed with the OS ASID of the address space this lies in.
sl@0
   560
sl@0
   561
	This value is set by #Map which is called from #Attach when the mapping
sl@0
   562
	is attached to a memory object. The address used may be different to
sl@0
   563
	#iAllocatedLinAddrAndOsAsid due to page colouring restrictions.
sl@0
   564
sl@0
   565
	The size of the region mapped is #iSizeInPages.
sl@0
   566
sl@0
   567
	Note, access to this value is through #Base() and #OsAsid().
sl@0
   568
	*/
sl@0
   569
	TLinAddr		iLinAddrAndOsAsid;
sl@0
   570
sl@0
   571
public:
sl@0
   572
	/**
sl@0
   573
	Second phase constructor.
sl@0
   574
sl@0
   575
	The main function of this is to allocate a virtual address region for the mapping
sl@0
   576
	and to add it to an address space.
sl@0
   577
sl@0
   578
	@param aAttributes		The attributes of the memory which this mapping is intended to map.
sl@0
   579
							This is only needed to setup #PdeType which is required for correct
sl@0
   580
							virtual address allocation so in practice the only relevant attribute
sl@0
   581
							is to set EMemoryAttributeUseECC if required, else use
sl@0
   582
							EMemoryAttributeStandard.
sl@0
   583
sl@0
   584
	@param aFlags			A combination of the options from enum TMappingCreateFlags.
sl@0
   585
sl@0
   586
	@param aOsAsid			The OS ASID of the address space the mapping is to be added to.
sl@0
   587
sl@0
   588
	@param aAddr			The virtual address to use for the mapping, or zero if this is
sl@0
   589
							to be allocated by this function.
sl@0
   590
sl@0
   591
	@param aSize			The maximum size of memory, in bytes, this mapping will be used to
sl@0
   592
							map. This determines the size of the virtual address region the
sl@0
   593
							mapping will use.
sl@0
   594
sl@0
   595
	@param aColourOffset	The byte offset within a memory object's memory which this mapping
sl@0
   596
							is to start. This is used to adjust virtual memory allocation to
sl@0
   597
							meet page colouring restrictions. If this value is not known leave
sl@0
   598
							this argument unspecified; however, it must be specified if \a aAddr
sl@0
   599
							is specified.
sl@0
   600
sl@0
   601
	@return KErrNone if successful, otherwise one of the system wide error codes.	
sl@0
   602
	*/
sl@0
   603
	TInt Construct(TMemoryAttributes aAttributes, TMappingCreateFlags aFlags, TInt aOsAsid, TLinAddr aAddr, TUint aSize, TLinAddr aColourOffset=~(TLinAddr)0);
sl@0
   604
sl@0
   605
	/**
sl@0
   606
	Add this mapping to a memory object so that it maps a specified region of its memory.
sl@0
   607
sl@0
   608
	Most of the action of this method is performed by #Attach.
sl@0
   609
sl@0
   610
	@param aMemory		The memory object.
sl@0
   611
	@param aIndex		The page index of the first page of memory to be mapped by the mapping.
sl@0
   612
	@param aCount		The number of pages of memory to be mapped by the mapping.
sl@0
   613
	@param aPermissions	The memory access permissions to apply to the mapping.
sl@0
   614
sl@0
   615
	@return KErrNone if successful, otherwise one of the system wide error codes.
sl@0
   616
	*/
sl@0
   617
	TInt Map(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TMappingPermissions aPermissions);
sl@0
   618
sl@0
   619
	/**
sl@0
   620
	Remove this mapping from the memory object it was previously added to by #Map.
sl@0
   621
sl@0
   622
	Most of the action of this method is performed by #Detach.
sl@0
   623
	*/
sl@0
   624
	void Unmap();
sl@0
   625
sl@0
   626
	/**
sl@0
   627
	Return the OS ASID for the address space that this mapping is currently mapping memory in.
sl@0
   628
	*/
sl@0
   629
	FORCE_INLINE TInt OsAsid()
sl@0
   630
		{
sl@0
   631
		__NK_ASSERT_DEBUG(iLinAddrAndOsAsid); // check mapping has been added to an address space
sl@0
   632
		return iLinAddrAndOsAsid&KPageMask;
sl@0
   633
		}
sl@0
   634
sl@0
   635
	/**
sl@0
   636
	Return starting virtual address that this mapping is currently mapping memory at.
sl@0
   637
	The size of the region mapped is #iSizeInPages.
sl@0
   638
	*/
sl@0
   639
	FORCE_INLINE TLinAddr Base()
sl@0
   640
		{
sl@0
   641
		__NK_ASSERT_DEBUG(iLinAddrAndOsAsid); // check mapping has been added to an address space
sl@0
   642
		return iLinAddrAndOsAsid&~KPageMask;
sl@0
   643
		}
sl@0
   644
sl@0
   645
	/**
sl@0
   646
	Return #Base()|#OsAsid()
sl@0
   647
	*/
sl@0
   648
	FORCE_INLINE TLinAddr LinAddrAndOsAsid()
sl@0
   649
		{
sl@0
   650
		__NK_ASSERT_DEBUG(iLinAddrAndOsAsid); // check mapping has been added to an address space
sl@0
   651
		return iLinAddrAndOsAsid;
sl@0
   652
		}
sl@0
   653
sl@0
   654
	FORCE_INLINE TBool IsUserMapping()
sl@0
   655
		{
sl@0
   656
		// Note: must be usable before the mapping has been added to an address space
sl@0
   657
		return (PteType() & (EPteTypeUserAccess|EPteTypeGlobal)) == EPteTypeUserAccess;
sl@0
   658
		}
sl@0
   659
sl@0
   660
	/**
sl@0
   661
	Return #iBlankPde.
sl@0
   662
	*/
sl@0
   663
	FORCE_INLINE TPde BlankPde()
sl@0
   664
		{
sl@0
   665
		return iBlankPde;
sl@0
   666
		}
sl@0
   667
sl@0
   668
	/**
sl@0
   669
	Emit BTrace traces identifying this mappings virtual address usage.
sl@0
   670
	*/
sl@0
   671
	void BTraceCreate();
sl@0
   672
sl@0
   673
	/**
sl@0
   674
	In debug builds, dump information about this mapping to the kernel trace port.
sl@0
   675
	*/
sl@0
   676
	virtual void Dump();
sl@0
   677
sl@0
   678
	/**
sl@0
   679
	Function to return a page table pointer for the specified linear address and
sl@0
   680
	index to this mapping.
sl@0
   681
sl@0
   682
	This is called by #Epoc::MovePhysicalPage when moving page table or page table info pages.
sl@0
   683
	
sl@0
   684
	@param aLinAddr		The linear address to find the page table entry for.
sl@0
   685
	@param aMemoryIndex	The memory object index of the page to find the page 
sl@0
   686
						table entry for.
sl@0
   687
	
sl@0
   688
	@return A pointer to the page table entry, if the page table entry couldn't 
sl@0
   689
			be found this will be NULL
sl@0
   690
	*/
sl@0
   691
	virtual TPte* FindPageTable(TLinAddr aLinAddr, TUint aMemoryIndex)=0;
sl@0
   692
sl@0
   693
protected:
sl@0
   694
	/**
sl@0
   695
	@param aType Initial value for #Flags.
sl@0
   696
	*/
sl@0
   697
	DMemoryMapping(TUint aType);
sl@0
   698
sl@0
   699
	/**
sl@0
   700
	This destructor removes the mapping from any address space it was added to and
sl@0
   701
	frees any virtual addresses allocated to it.
sl@0
   702
	*/
sl@0
   703
	~DMemoryMapping();
sl@0
   704
sl@0
   705
	/**
sl@0
   706
	Free any resources owned by this mapping, i.e. allow Construct() to be used
sl@0
   707
	on this mapping at a new address etc.
sl@0
   708
	*/
sl@0
   709
	void Destruct();
sl@0
   710
sl@0
   711
	/**
sl@0
   712
	Allocatate virtual addresses for this mapping to use.
sl@0
   713
	This is called from #Construct and the arguments to this function are the same.
sl@0
   714
sl@0
   715
	On success, iAllocatedLinAddrAndOsAsid and iAllocatedSize will be initialised.
sl@0
   716
	*/
sl@0
   717
	virtual TInt AllocateVirtualMemory(TMappingCreateFlags aFlags, TInt aOsAsid, TLinAddr aAddr, TUint aSize, TLinAddr aColourOffset);
sl@0
   718
sl@0
   719
	/**
sl@0
   720
	Free the virtual addresses allocated to this mapping with AllocateVirtualMemory.
sl@0
   721
	*/
sl@0
   722
	virtual void FreeVirtualMemory();
sl@0
   723
	};
sl@0
   724
sl@0
   725
sl@0
   726
sl@0
   727
/**
sl@0
   728
A memory mapping to map a 'chunk' aligned region of a DCoarseMemory object into
sl@0
   729
an address space. A 'chunk' is the size of memory mapped by a whole MMU page table
sl@0
   730
and is #KChunkSize bytes.
sl@0
   731
sl@0
   732
These mappings make use of page tables owned by a DCoarseMemory and when
sl@0
   733
they are attached to a memory object they are linked into
sl@0
   734
DCoarseMemory::DPageTables::iMappings not DCoarseMemory::iMappings.
sl@0
   735
*/
sl@0
   736
class DCoarseMapping : public DMemoryMapping
sl@0
   737
	{
sl@0
   738
public:
sl@0
   739
	DCoarseMapping();
sl@0
   740
	~DCoarseMapping();
sl@0
   741
sl@0
   742
protected:
sl@0
   743
	DCoarseMapping(TUint aFlags);
sl@0
   744
	
sl@0
   745
protected:
sl@0
   746
	// from DMemoryMappingBase...
sl@0
   747
	virtual TInt MapPages(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Not implemented. Faults in debug builds.
sl@0
   748
	virtual void UnmapPages(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Not implemented. Faults in debug builds.
sl@0
   749
	virtual void RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB); ///< Not implemented. Faults in debug builds.
sl@0
   750
	virtual void RestrictPagesNA(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Not implemented. Faults in debug builds.
sl@0
   751
	virtual TInt PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount);
sl@0
   752
	virtual TBool MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex);
sl@0
   753
	virtual TInt DoMap();
sl@0
   754
	virtual void DoUnmap();
sl@0
   755
	
sl@0
   756
	// from DMemoryMapping...
sl@0
   757
	virtual TPte* FindPageTable(TLinAddr aLinAddr, TUint aMemoryIndex);
sl@0
   758
	};
sl@0
   759
sl@0
   760
sl@0
   761
sl@0
   762
/**
sl@0
   763
A memory mapping to map a page aligned region of a memory object into
sl@0
   764
an address space. The may be used with any memory object: DFineMemory or DCoarseMemory.
sl@0
   765
*/
sl@0
   766
class DFineMapping : public DMemoryMapping
sl@0
   767
	{
sl@0
   768
public:
sl@0
   769
	DFineMapping();
sl@0
   770
	~DFineMapping();
sl@0
   771
sl@0
   772
private:
sl@0
   773
	// from DMemoryMappingBase...
sl@0
   774
	virtual TInt MapPages(RPageArray::TIter aPages, TUint aMapInstanceCount);
sl@0
   775
	virtual void UnmapPages(RPageArray::TIter aPages, TUint aMapInstanceCount);
sl@0
   776
	virtual void RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB);
sl@0
   777
	virtual void RestrictPagesNA(RPageArray::TIter aPages, TUint aMapInstanceCount);
sl@0
   778
	virtual TInt PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount);
sl@0
   779
	virtual TBool MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex);
sl@0
   780
	virtual TInt DoMap();
sl@0
   781
	virtual void DoUnmap();
sl@0
   782
sl@0
   783
	// from DMemoryMapping...
sl@0
   784
sl@0
   785
	/**
sl@0
   786
	Allocatate virtual addresses for this mapping to use.
sl@0
   787
sl@0
   788
	In addition to performing the action of DMemoryMapping::AllocateVirtualMemory
sl@0
   789
	this will also allocate all permanent page tables for the mapping if it has attribute
sl@0
   790
	#EPermanentPageTables.
sl@0
   791
	*/
sl@0
   792
	virtual TInt AllocateVirtualMemory(TMappingCreateFlags aFlags, TInt aOsAsid, TLinAddr aAddr, TUint aSize, TLinAddr aColourOffset);
sl@0
   793
sl@0
   794
	/**
sl@0
   795
	Free the virtual addresses and permanent page tables allocated to this mapping with
sl@0
   796
	AllocateVirtualMemory.
sl@0
   797
	*/
sl@0
   798
	virtual void FreeVirtualMemory();
sl@0
   799
sl@0
   800
	virtual TPte* FindPageTable(TLinAddr aLinAddr, TUint aMemoryIndex);
sl@0
   801
sl@0
   802
	// new...
sl@0
   803
sl@0
   804
	/**
sl@0
   805
	Allocate all the page tables required for this mapping. This is called by
sl@0
   806
	AllocateVirtualMemory if the #EPermanentPageTables attribute is set.
sl@0
   807
sl@0
   808
	Each page table for the virtual address region used by the mapping is
sl@0
   809
	allocated if not already present. The permanence count of any page table
sl@0
   810
	(SPageTableInfo::iPermanenceCount) is then incremented so that it is not
sl@0
   811
	freed even when it no longer maps any pages.
sl@0
   812
sl@0
   813
	If successful, the #EPageTablesAllocated flag in #Flags will be set.
sl@0
   814
sl@0
   815
	@return KErrNone if successful, otherwise one of the system wide error codes.
sl@0
   816
	*/
sl@0
   817
	TInt AllocatePermanentPageTables();
sl@0
   818
sl@0
   819
	/**
sl@0
   820
	Free all permanent page tables allocated to this mapping.
sl@0
   821
sl@0
   822
	This reverses the action of #AllocatePermanentPageTables by decrementing
sl@0
   823
	the permanence count for each page table and freeing it if is no longer in use.
sl@0
   824
	*/
sl@0
   825
	void FreePermanentPageTables();
sl@0
   826
sl@0
   827
	/**
sl@0
   828
	Free a range of permanent page tables.
sl@0
   829
sl@0
   830
	This is an implementation factor for FreePermanentPageTables and
sl@0
   831
	AllocatePermanentPageTables. It decrements the permanence count
sl@0
   832
	for each page table and frees it if is no longer in use
sl@0
   833
sl@0
   834
	@param aFirstPde	The address of the page directory entry which refers to
sl@0
   835
						the first page table to be freed.
sl@0
   836
	@param aLastPde		The address of the page directory entry which refers to
sl@0
   837
						the last page table to be freed.
sl@0
   838
	*/
sl@0
   839
	void FreePermanentPageTables(TPde* aFirstPde, TPde* aLastPde);
sl@0
   840
sl@0
   841
#ifdef _DEBUG
sl@0
   842
	/**
sl@0
   843
	Validate the contents of the page table are valid.
sl@0
   844
sl@0
   845
	@param aPt	The page table to validate.
sl@0
   846
	*/
sl@0
   847
	void ValidatePageTable(TPte* aPt, TLinAddr aAddr);
sl@0
   848
#endif
sl@0
   849
sl@0
   850
	/**
sl@0
   851
	Get the page table being used to map a specified virtual address if it exists.
sl@0
   852
sl@0
   853
	@param aAddr	A virtual address in the region allocated to this mapping.
sl@0
   854
sl@0
   855
	@return The virtual address of the page table mapping \a aAddr,
sl@0
   856
			or the null pointer if one wasn't found.
sl@0
   857
	*/
sl@0
   858
	TPte* GetPageTable(TLinAddr aAddr);
sl@0
   859
sl@0
   860
	/**
sl@0
   861
	Get the page table being used to map a specified virtual address; allocating
sl@0
   862
	a new one if it didn't previously exist.
sl@0
   863
sl@0
   864
	@param aAddr	A virtual address in the region allocated to this mapping.
sl@0
   865
sl@0
   866
	@return The virtual address of the page table mapping \a aAddr,
sl@0
   867
			or the null pointer if one wasn't found and couldn't be allocated.
sl@0
   868
	*/
sl@0
   869
	TPte* GetOrAllocatePageTable(TLinAddr aAddr);
sl@0
   870
sl@0
   871
	/**
sl@0
   872
	Get and pin the page table being used to map a specified virtual address;
sl@0
   873
	allocating a new one if it didn't previously exist.
sl@0
   874
sl@0
   875
	@param aAddr	A virtual address in the region allocated to this mapping.
sl@0
   876
	@param aPinArgs	The resources required to pin the page table.
sl@0
   877
					On success, the page table will have been appended to
sl@0
   878
					\a aPinArgs.iPinnedPageTables.
sl@0
   879
sl@0
   880
	@return The virtual address of the page table mapping \a aAddr,
sl@0
   881
			or the null pointer if one wasn't found and couldn't be allocated.
sl@0
   882
	*/
sl@0
   883
	TPte* GetOrAllocatePageTable(TLinAddr aAddr, TPinArgs& aPinArgs);
sl@0
   884
sl@0
   885
	/**
sl@0
   886
	Allocate a single page table.
sl@0
   887
sl@0
   888
	@param aAddr		The virtual address the page table will be used to map.
sl@0
   889
	@param aPdeAddress	Address of the page directory entry which is to map
sl@0
   890
						the newly allocated page table.
sl@0
   891
	@param aPermanent	True, if the page table's permanence count is to be incremented.
sl@0
   892
sl@0
   893
	@return The virtual address of the page table if it was successfully allocated,
sl@0
   894
			otherwise the null pointer.
sl@0
   895
	*/
sl@0
   896
	TPte* AllocatePageTable(TLinAddr aAddr, TPde* aPdeAddress, TBool aPermanent=false);
sl@0
   897
sl@0
   898
	/**
sl@0
   899
	Free a single page table if it is unused.
sl@0
   900
sl@0
   901
	@param aPdeAddress	Address of the page directory entry (PDE) which maps the page table.
sl@0
   902
						If the page table is freed, this PDE will be set to an 'unallocated' value.
sl@0
   903
	*/
sl@0
   904
	void FreePageTable(TPde* aPdeAddress);
sl@0
   905
	};
sl@0
   906
sl@0
   907
sl@0
   908
/**
sl@0
   909
A mapping which maps any memory into the kernel address space and provides access to 
sl@0
   910
the physical address used by a memory object.
sl@0
   911
sl@0
   912
These mappings are always of the 'pinned' type to prevent the obtained physical addresses
sl@0
   913
from becoming invalid.
sl@0
   914
*/
sl@0
   915
class DKernelPinMapping : public DFineMapping
sl@0
   916
	{
sl@0
   917
public:
sl@0
   918
	DKernelPinMapping();
sl@0
   919
	TInt Construct(TUint aReserveSize);
sl@0
   920
	TInt MapAndPin(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TMappingPermissions aPermissions);
sl@0
   921
	void UnmapAndUnpin();
sl@0
   922
sl@0
   923
public:
sl@0
   924
	TInt iReservePages;		///< The number of pages this mapping is able to map with its reserved resources(page tables etc).
sl@0
   925
	};
sl@0
   926
sl@0
   927
sl@0
   928
/**
sl@0
   929
A mapping which provides access to the physical address used by a memory object
sl@0
   930
without mapping these at any virtual address accessible to software.
sl@0
   931
sl@0
   932
These mappings are always of the 'pinned' type to prevent the obtained physical addresses
sl@0
   933
from becoming invalid.
sl@0
   934
*/
sl@0
   935
class DPhysicalPinMapping : public DMemoryMappingBase
sl@0
   936
	{
sl@0
   937
public:
sl@0
   938
	DPhysicalPinMapping();
sl@0
   939
sl@0
   940
	/**
sl@0
   941
	Attach this mapping to a memory object so that it pins a specified region of its memory.
sl@0
   942
sl@0
   943
	Most of the action of this method is performed by #Attach.
sl@0
   944
sl@0
   945
	@param aMemory		The memory object.
sl@0
   946
	@param aIndex		The page index of the first page of memory to be pinned by the mapping.
sl@0
   947
	@param aCount		The number of pages of memory to be pinned by the mapping.
sl@0
   948
	@param aPermissions	The memory access permissions appropriate to the intended use
sl@0
   949
						of the physical addresses. E.g. if the memory contents will be
sl@0
   950
						changes, use EReadWrite. These permissions are used for error
sl@0
   951
						checking, e.g. detecting attempted writes to read-only memory.
sl@0
   952
						They are also used for optimising access to demand paged memory;
sl@0
   953
						which is more efficient if only read-only access is required.
sl@0
   954
sl@0
   955
	@return KErrNone if successful,
sl@0
   956
			KErrNotFound if any part of the memory to be pinned was not present,
sl@0
   957
			KErrNoMemory if there was insufficient memory,
sl@0
   958
			otherwise one of the system wide error codes.
sl@0
   959
	*/
sl@0
   960
	TInt Pin(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TMappingPermissions aPermissions);
sl@0
   961
sl@0
   962
	/**
sl@0
   963
	Remove this mapping from the memory object it was previously added to by #Pin.
sl@0
   964
sl@0
   965
	Most of the action of this method is performed by #Detach.
sl@0
   966
	*/
sl@0
   967
	virtual void Unpin();
sl@0
   968
sl@0
   969
private:
sl@0
   970
	// from DMemoryMappingBase...
sl@0
   971
	virtual TInt MapPages(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Not implemented. Faults in debug builds.
sl@0
   972
	virtual void UnmapPages(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Does nothing
sl@0
   973
	virtual void RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB); ///< Not implemented. Faults in debug builds.
sl@0
   974
	virtual void RestrictPagesNA(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Does nothing
sl@0
   975
	virtual TInt PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount); ///< Does nothing
sl@0
   976
	virtual TBool MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex);///< Not implemented. Faults in debug builds.
sl@0
   977
	virtual TInt DoMap(); ///< Does nothing
sl@0
   978
	virtual void DoUnmap(); ///< Does nothing
sl@0
   979
	}; 
sl@0
   980
sl@0
   981
sl@0
   982
sl@0
   983
/**
sl@0
   984
A mapping which pins memory in order to prevent demand paging related
sl@0
   985
page faults from occurring.
sl@0
   986
*/
sl@0
   987
class DVirtualPinMapping : public DPhysicalPinMapping
sl@0
   988
	{
sl@0
   989
public:
sl@0
   990
	DVirtualPinMapping();
sl@0
   991
	~DVirtualPinMapping();
sl@0
   992
sl@0
   993
	/**
sl@0
   994
	Create a new DVirtualPinMapping object suitable for pinning a specified number of pages.
sl@0
   995
sl@0
   996
	If no maximum is specified (\a aMaxCount==0) then this object may be used to pin
sl@0
   997
	any number of pages, however this will require dynamic allocation of storage for
sl@0
   998
	page table references.
sl@0
   999
sl@0
  1000
	@param aMaxCount The maximum number of pages which can be pinned, or zero for no maximum.
sl@0
  1001
sl@0
  1002
	@return The newly created DVirtualPinMapping or the null pointer if there was
sl@0
  1003
			insufficient memory.
sl@0
  1004
	*/
sl@0
  1005
	static DVirtualPinMapping* New(TUint aMaxCount);
sl@0
  1006
sl@0
  1007
	/**
sl@0
  1008
	Attach this mapping to a memory object so that it pins a specified region of its memory.
sl@0
  1009
sl@0
  1010
	Additionally, pin the page tables in a specified mapping (\a aMapping) which
sl@0
  1011
	are being used to map these pages.
sl@0
  1012
sl@0
  1013
	The result of this function is that access to the pinned memory through the virtual
sl@0
  1014
	addresses used by \a aMapping will not generate any demand paging related page faults.
sl@0
  1015
sl@0
  1016
	@param aMemory		The memory object.
sl@0
  1017
	@param aIndex		The page index of the first page of memory to be pinned by the mapping.
sl@0
  1018
	@param aCount		The number of pages of memory to be pinned by the mapping.
sl@0
  1019
	@param aPermissions	The memory access permissions appropriate to the intended use
sl@0
  1020
						of the physical addresses. E.g. if the memory contents will be
sl@0
  1021
						changes, use EReadWrite. These permissions are used for error
sl@0
  1022
						checking, e.g. detecting attempted writes to read-only memory.
sl@0
  1023
						They are also used for optimising access to demand paged memory;
sl@0
  1024
						which is more efficient if only read-only access is required.
sl@0
  1025
	@param aMapping		The mapping whose page tables are to be pinned. This must be
sl@0
  1026
						currently mapping the specified region of memory pages.
sl@0
  1027
	@param aMapInstanceCount	The instance count of the mapping who's page tables are to be pinned.
sl@0
  1028
sl@0
  1029
	@return KErrNone if successful,
sl@0
  1030
			KErrNotFound if any part of the memory to be pinned was not present,
sl@0
  1031
			KErrNoMemory if there was insufficient memory,
sl@0
  1032
			otherwise one of the system wide error codes.
sl@0
  1033
	*/
sl@0
  1034
	TInt Pin(	DMemoryObject* aMemory, TUint aIndex, TUint aCount, TMappingPermissions aPermissions, 
sl@0
  1035
				DMemoryMappingBase* aMapping, TUint aMapInstanceCount);
sl@0
  1036
sl@0
  1037
	/**
sl@0
  1038
	Remove this mapping from the memory object it was previously added to by #Pin.
sl@0
  1039
	This will unpin any memory pages and pages tables that were pinned.
sl@0
  1040
	*/
sl@0
  1041
	void Unpin();
sl@0
  1042
sl@0
  1043
	/**
sl@0
  1044
	Return the maximum number of page tables which could be required to map
sl@0
  1045
	\a aPageCount pages. This is used by various resource reserving calculations.
sl@0
  1046
	*/
sl@0
  1047
	static TUint MaxPageTables(TUint aPageCount);
sl@0
  1048
sl@0
  1049
	/**
sl@0
  1050
	In debug builds, dump information about this mapping to the kernel trace port.
sl@0
  1051
	*/
sl@0
  1052
	virtual void Dump();
sl@0
  1053
sl@0
  1054
private:
sl@0
  1055
	// from DMemoryMappingBase...
sl@0
  1056
	virtual void RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB); ///< Does nothing.
sl@0
  1057
	virtual TInt PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount);
sl@0
  1058
	virtual TBool MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex);///< Not implemented. Faults in debug builds.
sl@0
  1059
	virtual TInt DoPin(TPinArgs& aPinArgs);
sl@0
  1060
	virtual void DoUnpin(TPinArgs& aPinArgs);
sl@0
  1061
sl@0
  1062
private:
sl@0
  1063
	/**
sl@0
  1064
	Allocate memory to store pointers to all the page table which map
sl@0
  1065
	\a aCount pages of memory. The pointer to the allocated memory
sl@0
  1066
	is stored at iAllocatedPinnedPageTables.
sl@0
  1067
sl@0
  1068
	If iSmallPinnedPageTablesArray is large enough, this function doesn't
sl@0
  1069
	allocate any memory.
sl@0
  1070
sl@0
  1071
	@return KErrNone if successful, otherwise KErrNoMemory.
sl@0
  1072
	*/
sl@0
  1073
	TInt AllocPageTableArray(TUint aCount);
sl@0
  1074
sl@0
  1075
	/**
sl@0
  1076
	Delete iAllocatedPinnedPageTables.
sl@0
  1077
	*/
sl@0
  1078
	void FreePageTableArray();
sl@0
  1079
sl@0
  1080
	/**
sl@0
  1081
	Return the address of the array storing pinned page tables.
sl@0
  1082
	This is either iSmallPinnedPageTablesArray or iAllocatedPinnedPageTables.
sl@0
  1083
	*/
sl@0
  1084
	TPte** PageTableArray();
sl@0
  1085
sl@0
  1086
	/**
sl@0
  1087
	Unpin all the page tables which have been pinned by this mapping.
sl@0
  1088
sl@0
  1089
	@param aPinArgs	The resources used for pinning. The replacement pages allocated
sl@0
  1090
					to this will be increased for each page which was became completely
sl@0
  1091
					unpinned.
sl@0
  1092
	*/
sl@0
  1093
	void UnpinPageTables(TPinArgs& aPinArgs);
sl@0
  1094
private:
sl@0
  1095
	/**
sl@0
  1096
	Temporary store for the mapping passed to #Pin
sl@0
  1097
	*/
sl@0
  1098
	DMemoryMappingBase* iPinVirtualMapping;
sl@0
  1099
sl@0
  1100
	/**
sl@0
  1101
	Temporary store for the mapping instance count passed to #Pin
sl@0
  1102
	*/
sl@0
  1103
	TUint iPinVirtualMapInstanceCount;
sl@0
  1104
sl@0
  1105
	/**
sl@0
  1106
	The number of page tables which are currently being pinned by this mapping.
sl@0
  1107
	This is the number of valid entries stored at PageTableArray.
sl@0
  1108
	*/
sl@0
  1109
	TUint iNumPinnedPageTables;
sl@0
  1110
sl@0
  1111
	/**
sl@0
  1112
	The maximum number of pages which can be pinned by this mapping.
sl@0
  1113
	If this is zero, there is no maximum.
sl@0
  1114
	*/
sl@0
  1115
	TUint iMaxCount;
sl@0
  1116
sl@0
  1117
	/**
sl@0
  1118
	The memory allocated by this object for storing pointer to the page tables
sl@0
  1119
	it has pinned.
sl@0
  1120
	*/
sl@0
  1121
	TPte** iAllocatedPinnedPageTables;
sl@0
  1122
sl@0
  1123
	enum
sl@0
  1124
		{
sl@0
  1125
		KSmallPinnedPageTableCount = 2 ///< Number of entries in iSmallPinnedPageTablesArray
sl@0
  1126
		};
sl@0
  1127
sl@0
  1128
	/**
sl@0
  1129
	A small array to use for storing pinned page tables.
sl@0
  1130
	This is an optimisation used for the typical case of pinning a small number of pages
sl@0
  1131
	to avoid dynamic allocation of memory.
sl@0
  1132
	*/
sl@0
  1133
	TPte* iSmallPinnedPageTablesArray[KSmallPinnedPageTableCount];
sl@0
  1134
	}; 
sl@0
  1135
sl@0
  1136
#endif