Update contrib.
1 // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
24 #include "mrefcntobj.h"
25 #include "mmappinglist.h"
26 #include "mpagearray.h"
31 Base class for memory mappings.
33 This provides the methods for linking a mapping to a memory object
34 as well as the interface for updating the MMU page tables associated
35 with a mapping when the memory state changes.
37 class DMemoryMappingBase : public DReferenceCountedObject
41 Memory object to which this mapping is currently attached.
42 Updates to the are protected by the MmuLock.
44 DMemoryObject* iMemory;
48 Link used to maintain list of mappings attached to a memory object.
50 TMappingListLink iLink;
53 Offset, in page units, within the memory object's memory for start of this mapping.
58 Size of this mapping, in page units.
64 Instance count which is incremented every time a mapping is attached to a memory object.
65 When code is manipulating mappings, the instance count is used to detect that a
66 mapping has been reused and that the operation it is performing is no long needed.
68 TUint iMapInstanceCount;
73 Bit flags stored in #Flags giving various state and attributes of the mapping.
78 Flag set during object construction to indicate that this mapping is of
79 class #DCoarseMapping.
81 ECoarseMapping = 1<<0,
84 Flag set during object construction to indicate that this mapping will pin
85 any memory pages it maps. This may not be used with coarse memory mappings.
90 Pages have already been reserved for pinning, so when this mapping is attached
91 to a memory object no additional pages need to be reserved. Pre-reserving pages
92 is used to prevent the possibility of failing to pin due to an out of memory
93 condition. It is essential that the users of these mappings ensure that there
94 are enough reserved pages in the paging pool to meet the maximum mapping size
97 EPinningPagesReserved = 1<<2,
100 Pages have been successfully pinned by this mapping. This is set after demand
101 paged memory has been succeeded pinned and is used to indicate that the pages
102 need unpinning again when the mapping is later unmapped.
107 Flag set during object construction to indicate that MMU page tables are to
108 be permanently allocated for use by this mapping. Normally, page tables are
109 allocated as needed to map memory which can result in out-of-memory errors
110 when mapping memory pages.
112 EPermanentPageTables = 1<<4,
115 Permanent page tables have been successfully been allocated for this mapping.
116 This flag is used to track allocation so they can be released when the mapping
119 EPageTablesAllocated = 1<<5,
122 For pinned mappings (EPinned) this flag is set whenever the mapping prevents
123 any pages of memory from being fully decommitted from a memory object. When a
124 mapping is finally unmapped from the memory object this flag is checked, and,
125 if set, further cleanup of the decommitted pages triggered.
127 EPageUnmapVetoed = 1<<6,
130 Mapping is being, or has been, detached from a memory object.
131 When set, operations on the mapping should act as though the mapping is no
132 longer attached to a memory object. Specifically, no further pages of memory
133 should be mapped into this mapping.
135 This flag is only set when the MmuLock is held.
140 This mapping is a physical pinning mapping. The pages it pins
141 cannot be paged out or moved.
143 This flag is set when DPhysicalPinMapping objects are created.
145 EPhysicalPinningMapping = 1<<8,
148 Flag set during object construction to indicate that this mapping is of
149 class #DLargeMapping.
151 Note that #DLargeMapping is derived from #DCoarseMapping, therefore presence of this flag
152 implies presence of #ECoarseMapping as well.
154 ELargeMapping = 1<<9,
158 Bitmask of values from enum #TPteType which will be used to calculate
159 the correct attributes for any page table entries this mapping uses.
161 FORCE_INLINE TUint8& PteType()
162 { return iLink.iSpare1; }
165 Bitmask of values from enum #TFlags.
166 The flags 16 bits and are stored in iLink.iSpare2 and iLink.iSpare3.
168 FORCE_INLINE TUint16& Flags()
169 { return (TUint16&)iLink.iSpare2; }
173 Return the memory object to which this mapping is currently attached.
175 @pre MmuLock is held. (If aNoCheck==false)
177 FORCE_INLINE DMemoryObject* Memory(TBool aNoCheck=false)
180 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
185 Return true if the mapping is currently attached to a memory object.
187 FORCE_INLINE TBool IsAttached()
188 { return iLink.IsLinked(); }
191 Return true if the mapping is being, or has been, detached from a memory object.
192 The mapping may or may not still be attached to a memory object, i.e. #IsAttached
195 FORCE_INLINE TBool BeingDetached()
196 { return Flags()&EDetaching; }
199 Return the mapping instance count.
200 @see #iMapInstanceCount.
202 FORCE_INLINE TUint MapInstanceCount()
203 { return iMapInstanceCount; }
206 Return true if this mapping provides read only access to memory.
208 FORCE_INLINE TBool IsReadOnly()
209 { return !(PteType()&EPteTypeWritable); }
211 #ifdef MMU_SUPPORTS_EXECUTE_NEVER
213 Return true if this mapping provides access to memory which allows
214 code to be executed from it.
216 FORCE_INLINE TBool IsExecutable()
217 { return (PteType()&EPteTypeExecutable); }
221 Return true if this is a coarse mapping, in other words it is an instance of #DCoarseMapping or
224 FORCE_INLINE TBool IsCoarse()
225 { return Flags()&ECoarseMapping; }
228 Return true if this mapping is a large mapping, in other words an instance of #DLargeMapping.
230 Note that all large mappings are also coarse mappings.
232 FORCE_INLINE TBool IsLarge()
233 { return Flags()&ELargeMapping; }
236 Return true if this mapping pins the memory it maps.
238 FORCE_INLINE TBool IsPinned()
239 { return Flags()&EPinned; }
242 Return true if this mapping physically pins the memory it maps.
244 FORCE_INLINE TBool IsPhysicalPinning()
245 { return Flags()&EPhysicalPinningMapping; }
248 Return the access permissions which this mapping uses to maps memory.
250 FORCE_INLINE TMappingPermissions Permissions()
251 { return Mmu::PermissionsFromPteType(PteType()); }
254 Link this mapping to a memory object.
256 This is called by the memory object during processing of #Attach.
258 @param aMemory The memory object the mapping is being attached to.
259 @param aMappingList The list to add this mapping to.
261 @pre MmuLock is held.
262 @pre Mapping list lock is held.
264 void LinkToMemory(DMemoryObject* aMemory, TMappingList& aMappingList);
267 Unlink this mapping from the memory object it was previously linked to with
270 This is called by the memory object during processing of #Detach.
272 @param aMappingList The list that the mapping appears on.
274 void UnlinkFromMemory(TMappingList& aMappingList);
277 Get the physical address(es) for a region of pages in this mapping.
279 @param aIndex Page index, within the mapping, for start of the region.
280 @param aCount Number of pages in the region.
281 @param aPhysicalAddress On success, this value is set to one of two values.
282 If the specified region is physically contiguous,
283 the value is the physical address of the first page
284 in the region. If the region is discontiguous, the
285 value is set to KPhysAddrInvalid.
286 @param aPhysicalPageList If not zero, this points to an array of TPhysAddr
287 objects. On success, this array will be filled
288 with the addresses of the physical pages which
289 contain the specified region. If aPageList is
290 zero, then the function will fail with
291 KErrNotFound if the specified region is not
292 physically contiguous.
294 @return 0 if successful and the whole region is physically contiguous.
295 1 if successful but the region isn't physically contiguous.
296 KErrNotFound, if any page in the region is not present,
297 otherwise one of the system wide error codes.
299 @pre This mapping must have been attached to a memory object with #Pin.
301 TInt PhysAddr(TUint aIndex, TUint aCount, TPhysAddr& aPhysicalAddress, TPhysAddr* aPhysicalPageList);
305 @param aType Initial value for #Flags.
307 DMemoryMappingBase(TUint aType);
310 Attach this mapping to a memory object so that it maps a specified region of its memory.
312 @param aMemory The memory object.
313 @param aIndex The page index of the first page of memory to be mapped by the mapping.
314 @param aCount The number of pages of memory to be mapped by the mapping.
316 @return KErrNone if successful, otherwise one of the system wide error codes.
318 TInt Attach(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
321 Remove this mapping from the memory object it was previously attached to by #Attach.
327 Update the page table entries corresponding to this mapping to add entries for
328 a specified set of memory pages.
330 This method is called by DMemoryObject::MapPages to update each mapping attached
331 to a memory object whenever new pages of memory are added. However, it won't be
332 called for any mapping with the #EPinned attribute as such mappings are unchanging.
334 @param aPages An RPageArray::TIter which refers to a range of pages
335 in a memory object. This has been clipped to fit within
336 the range of pages mapped by this mapping.
337 Only array entries which have state RPageArray::ECommitted
338 should be mapped into the mapping's page tables.
340 @param aMapInstanceCount The instance of this mapping which is to be updated.
341 Whenever this no longer matches the current #MapInstanceCount
342 the function must not update any more of the mapping's
343 page table entries, (but must still return KErrNone).
345 @return KErrNone if successful, otherwise one of the system wide error codes.
347 virtual TInt MapPages(RPageArray::TIter aPages, TUint aMapInstanceCount) =0;
350 Update the page table entries corresponding to this mapping to remove entries for
351 a specified set of memory pages.
353 This method is called by DMemoryObject::UnmapPages to update each mapping attached
354 to a memory object whenever pages of memory are removed.
356 @param aPages An RPageArray::TIter which refers to a range of pages
357 in a memory object. This has been clipped to fit within
358 the range of pages mapped by this mapping.
359 Only array entries which return true for
360 RPageArray::TargetStateIsDecommitted should be unmapped
361 from the mapping's page tables.
363 @param aMapInstanceCount The instance of this mapping which is to be updated.
364 Whenever this no longer matches the current #MapInstanceCount
365 the function must not update any more of the mapping's
368 virtual void UnmapPages(RPageArray::TIter aPages, TUint aMapInstanceCount) =0;
371 Update the page table entry corresponding to this mapping to update an entry for a specified
372 page that has just been moved or shadowed.
374 @param aPages The page array entry of the page in a memory object.
375 Only array entries which have a target state of
376 RPageArray::ECommitted should be mapped into the
377 mapping's page tables.
379 @param aIndex The index of the page in the memory object.
381 @param aMapInstanceCount The instance of this mapping which is to be updated.
382 Whenever this no longer matches the current #MapInstanceCount
383 the function must not update any more of the mapping's
384 page table entries, (but must still return KErrNone).
386 @param aInvalidateTLB Set to ETrue when the TLB entries associated with this page
387 should be invalidated. This must be done when there is
388 already a valid pte for this page, i.e. if the page is still
391 virtual void RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB)=0;
394 Update the page table entries corresponding to this mapping to apply access restrictions
395 to a specified set of memory pages.
397 This method is called by DMemoryObject::RestrictPages to update each mapping attached
398 to a memory object whenever pages of memory are restricted.
400 @param aPages An RPageArray::TIter which refers to a range of pages
401 in a memory object. This has been clipped to fit within
402 the range of pages mapped by this mapping.
403 Only array entries which return true for
404 RPageArray::TargetStateIsDecommitted should be unmapped
405 from the mapping's page tables.
407 @param aMapInstanceCount The instance of this mapping which is to be updated.
408 Whenever this no longer matches the current #MapInstanceCount
409 the function must not update any more of the mapping's
412 virtual void RestrictPagesNA(RPageArray::TIter aPages, TUint aMapInstanceCount) =0;
415 Update the page table entries corresponding to this mapping to add entries for
416 a specified set of demand paged memory pages following a 'page in' or memory
419 @param aPages An RPageArray::TIter which refers to a range of pages
420 in a memory object. This will be within the range of pages
421 mapped by this mapping.
422 Only array entries which have state RPageArray::ECommitted
423 should be mapped into the mapping's page tables.
425 @param aPinArgs The resources required to pin any page tables the mapping uses.
426 Page table must be pinned if \a aPinArgs.iPinnedPageTables is
427 not the null pointer, in which case this the virtual address
428 of the pinned must be stored in the array this points to.
429 \a aPinArgs.iReadOnly is true if write access permissions
432 @return KErrNone if successful, otherwise one of the system wide error codes.
434 virtual TInt PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount) =0;
438 Update the page table entry corresponding to this mapping to add an entry for
439 a specified page which is in the process of being moved.
441 @param aPageArrayPtr The page array entry for the page to be mapped which must be
442 within this mapping range of pages.
443 Only array entries which have a target state of
444 RPageArray::ECommitted should be mapped into the mapping's
447 @param aIndex The index of the page.
449 @return ETrue if successful, EFalse otherwise.
451 virtual TBool MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex)=0;
455 In debug builds, dump information about this mapping to the kernel trace port.
461 Update this mapping's MMU data structures to map all pages of memory
462 currently committed to the memory object (#iMemory) in the region covered
465 This method is called by #Attach after the mapping has been linked
466 into the memory object.
468 @return KErrNone if successful, otherwise one of the system wide error codes.
470 virtual TInt DoMap() =0;
473 Update this mapping's MMU data structures to unmap all pages of memory.
475 This method is called by #Detach before the mapping has been unlinked
476 from the memory object but after the #EDetaching flag has been set.
478 virtual void DoUnmap() =0;
482 For pinned mapping, this virtual method is called by #Attach in order to pin
483 pages of memory if required. This is called after the mapping has been linked
484 into the memory object but before #DoMap.
486 The default implementation of this method simply calls DMemoryManager::Pin.
488 @param aPinArgs The resources to use for pinning. This has sufficient replacement
489 pages allocated to pin every page the mapping covers, and the
490 value of \a aPinArgs.iReadOnly has been set to correspond to the
491 mappings access permissions.
493 @return KErrNone if successful, otherwise one of the system wide error codes.
495 virtual TInt DoPin(TPinArgs& aPinArgs);
498 For pinned mapping, this virtual method is called by #Detach in order to unpin
499 pages of memory if required. This is called before the mapping has been unlinked
500 from the memory object but after #DoUnmap.
502 The default implementation of this method simply calls DMemoryManager::Unpin.
504 @param aPinArgs The resources used for pinning. The replacement pages allocated
505 to this will be increased for each page which was became completely
508 virtual void DoUnpin(TPinArgs& aPinArgs);
514 Base class for memory mappings which map memory contents into a address space.
516 This provides methods for allocating virtual memory and holds the attributes needed
517 for MMU page table entries.
519 class DMemoryMapping : public DMemoryMappingBase
523 The page directory entry (PDE) value for use when mapping this mapping's page tables.
524 This value has the physical address component being zero, so a page table's physical
525 address can be simply ORed in.
527 This could potentially be removed (see DMemoryMapping::PdeType()).
532 The page table entry (PTE) value for use when mapping pages into this mapping.
533 This value has the physical address component being zero, so a page's physical
534 address can be simply ORed in.
539 Start of the virtual address region allocated for use by this mapping
540 ORed with the OS ASID of the address space this lies in.
542 Note, the address at which memory is mapped (#iLinAddrAndOsAsid) may be different
543 to this allocated address due to page colouring restrictions.
547 TLinAddr iAllocatedLinAddrAndOsAsid;
550 Size of virtual address region memory allocated for use by this mapping.
552 @see iAllocatedLinAddrAndOsAsid
554 TUint iAllocatedSize;
558 Start of the virtual address region that this mapping is currently
559 mapping memory at, ORed with the OS ASID of the address space this lies in.
561 This value is set by #Map which is called from #Attach when the mapping
562 is attached to a memory object. The address used may be different to
563 #iAllocatedLinAddrAndOsAsid due to page colouring restrictions.
565 The size of the region mapped is #iSizeInPages.
567 Note, access to this value is through #Base() and #OsAsid().
569 TLinAddr iLinAddrAndOsAsid;
573 Second phase constructor.
575 The main function of this is to allocate a virtual address region for the mapping
576 and to add it to an address space.
578 @param aAttributes The attributes of the memory which this mapping is intended to map.
579 This is only needed to setup #PdeType which is required for correct
580 virtual address allocation so in practice the only relevant attribute
581 is to set EMemoryAttributeUseECC if required, else use
582 EMemoryAttributeStandard.
584 @param aFlags A combination of the options from enum TMappingCreateFlags.
586 @param aOsAsid The OS ASID of the address space the mapping is to be added to.
588 @param aAddr The virtual address to use for the mapping, or zero if this is
589 to be allocated by this function.
591 @param aSize The maximum size of memory, in bytes, this mapping will be used to
592 map. This determines the size of the virtual address region the
595 @param aColourOffset The byte offset within a memory object's memory which this mapping
596 is to start. This is used to adjust virtual memory allocation to
597 meet page colouring restrictions. If this value is not known leave
598 this argument unspecified; however, it must be specified if \a aAddr
601 @return KErrNone if successful, otherwise one of the system wide error codes.
603 TInt Construct(TMemoryAttributes aAttributes, TMappingCreateFlags aFlags, TInt aOsAsid, TLinAddr aAddr, TUint aSize, TLinAddr aColourOffset=~(TLinAddr)0);
606 Add this mapping to a memory object so that it maps a specified region of its memory.
608 Most of the action of this method is performed by #Attach.
610 @param aMemory The memory object.
611 @param aIndex The page index of the first page of memory to be mapped by the mapping.
612 @param aCount The number of pages of memory to be mapped by the mapping.
613 @param aPermissions The memory access permissions to apply to the mapping.
615 @return KErrNone if successful, otherwise one of the system wide error codes.
617 TInt Map(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TMappingPermissions aPermissions);
620 Remove this mapping from the memory object it was previously added to by #Map.
622 Most of the action of this method is performed by #Detach.
627 Return the OS ASID for the address space that this mapping is currently mapping memory in.
629 FORCE_INLINE TInt OsAsid()
631 __NK_ASSERT_DEBUG(iLinAddrAndOsAsid); // check mapping has been added to an address space
632 return iLinAddrAndOsAsid&KPageMask;
636 Return starting virtual address that this mapping is currently mapping memory at.
637 The size of the region mapped is #iSizeInPages.
639 FORCE_INLINE TLinAddr Base()
641 __NK_ASSERT_DEBUG(iLinAddrAndOsAsid); // check mapping has been added to an address space
642 return iLinAddrAndOsAsid&~KPageMask;
646 Return #Base()|#OsAsid()
648 FORCE_INLINE TLinAddr LinAddrAndOsAsid()
650 __NK_ASSERT_DEBUG(iLinAddrAndOsAsid); // check mapping has been added to an address space
651 return iLinAddrAndOsAsid;
654 FORCE_INLINE TBool IsUserMapping()
656 // Note: must be usable before the mapping has been added to an address space
657 return (PteType() & (EPteTypeUserAccess|EPteTypeGlobal)) == EPteTypeUserAccess;
663 FORCE_INLINE TPde BlankPde()
669 Emit BTrace traces identifying this mappings virtual address usage.
674 In debug builds, dump information about this mapping to the kernel trace port.
679 Function to return a page table pointer for the specified linear address and
680 index to this mapping.
682 This is called by #Epoc::MovePhysicalPage when moving page table or page table info pages.
684 @param aLinAddr The linear address to find the page table entry for.
685 @param aMemoryIndex The memory object index of the page to find the page
688 @return A pointer to the page table entry, if the page table entry couldn't
689 be found this will be NULL
691 virtual TPte* FindPageTable(TLinAddr aLinAddr, TUint aMemoryIndex)=0;
695 @param aType Initial value for #Flags.
697 DMemoryMapping(TUint aType);
700 This destructor removes the mapping from any address space it was added to and
701 frees any virtual addresses allocated to it.
706 Free any resources owned by this mapping, i.e. allow Construct() to be used
707 on this mapping at a new address etc.
712 Allocatate virtual addresses for this mapping to use.
713 This is called from #Construct and the arguments to this function are the same.
715 On success, iAllocatedLinAddrAndOsAsid and iAllocatedSize will be initialised.
717 virtual TInt AllocateVirtualMemory(TMappingCreateFlags aFlags, TInt aOsAsid, TLinAddr aAddr, TUint aSize, TLinAddr aColourOffset);
720 Free the virtual addresses allocated to this mapping with AllocateVirtualMemory.
722 virtual void FreeVirtualMemory();
728 A memory mapping to map a 'chunk' aligned region of a DCoarseMemory object into
729 an address space. A 'chunk' is the size of memory mapped by a whole MMU page table
730 and is #KChunkSize bytes.
732 These mappings make use of page tables owned by a DCoarseMemory and when
733 they are attached to a memory object they are linked into
734 DCoarseMemory::DPageTables::iMappings not DCoarseMemory::iMappings.
736 class DCoarseMapping : public DMemoryMapping
743 DCoarseMapping(TUint aFlags);
746 // from DMemoryMappingBase...
747 virtual TInt MapPages(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Not implemented. Faults in debug builds.
748 virtual void UnmapPages(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Not implemented. Faults in debug builds.
749 virtual void RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB); ///< Not implemented. Faults in debug builds.
750 virtual void RestrictPagesNA(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Not implemented. Faults in debug builds.
751 virtual TInt PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount);
752 virtual TBool MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex);
753 virtual TInt DoMap();
754 virtual void DoUnmap();
756 // from DMemoryMapping...
757 virtual TPte* FindPageTable(TLinAddr aLinAddr, TUint aMemoryIndex);
763 A memory mapping to map a page aligned region of a memory object into
764 an address space. The may be used with any memory object: DFineMemory or DCoarseMemory.
766 class DFineMapping : public DMemoryMapping
773 // from DMemoryMappingBase...
774 virtual TInt MapPages(RPageArray::TIter aPages, TUint aMapInstanceCount);
775 virtual void UnmapPages(RPageArray::TIter aPages, TUint aMapInstanceCount);
776 virtual void RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB);
777 virtual void RestrictPagesNA(RPageArray::TIter aPages, TUint aMapInstanceCount);
778 virtual TInt PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount);
779 virtual TBool MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex);
780 virtual TInt DoMap();
781 virtual void DoUnmap();
783 // from DMemoryMapping...
786 Allocatate virtual addresses for this mapping to use.
788 In addition to performing the action of DMemoryMapping::AllocateVirtualMemory
789 this will also allocate all permanent page tables for the mapping if it has attribute
790 #EPermanentPageTables.
792 virtual TInt AllocateVirtualMemory(TMappingCreateFlags aFlags, TInt aOsAsid, TLinAddr aAddr, TUint aSize, TLinAddr aColourOffset);
795 Free the virtual addresses and permanent page tables allocated to this mapping with
796 AllocateVirtualMemory.
798 virtual void FreeVirtualMemory();
800 virtual TPte* FindPageTable(TLinAddr aLinAddr, TUint aMemoryIndex);
805 Allocate all the page tables required for this mapping. This is called by
806 AllocateVirtualMemory if the #EPermanentPageTables attribute is set.
808 Each page table for the virtual address region used by the mapping is
809 allocated if not already present. The permanence count of any page table
810 (SPageTableInfo::iPermanenceCount) is then incremented so that it is not
811 freed even when it no longer maps any pages.
813 If successful, the #EPageTablesAllocated flag in #Flags will be set.
815 @return KErrNone if successful, otherwise one of the system wide error codes.
817 TInt AllocatePermanentPageTables();
820 Free all permanent page tables allocated to this mapping.
822 This reverses the action of #AllocatePermanentPageTables by decrementing
823 the permanence count for each page table and freeing it if is no longer in use.
825 void FreePermanentPageTables();
828 Free a range of permanent page tables.
830 This is an implementation factor for FreePermanentPageTables and
831 AllocatePermanentPageTables. It decrements the permanence count
832 for each page table and frees it if is no longer in use
834 @param aFirstPde The address of the page directory entry which refers to
835 the first page table to be freed.
836 @param aLastPde The address of the page directory entry which refers to
837 the last page table to be freed.
839 void FreePermanentPageTables(TPde* aFirstPde, TPde* aLastPde);
843 Validate the contents of the page table are valid.
845 @param aPt The page table to validate.
847 void ValidatePageTable(TPte* aPt, TLinAddr aAddr);
851 Get the page table being used to map a specified virtual address if it exists.
853 @param aAddr A virtual address in the region allocated to this mapping.
855 @return The virtual address of the page table mapping \a aAddr,
856 or the null pointer if one wasn't found.
858 TPte* GetPageTable(TLinAddr aAddr);
861 Get the page table being used to map a specified virtual address; allocating
862 a new one if it didn't previously exist.
864 @param aAddr A virtual address in the region allocated to this mapping.
866 @return The virtual address of the page table mapping \a aAddr,
867 or the null pointer if one wasn't found and couldn't be allocated.
869 TPte* GetOrAllocatePageTable(TLinAddr aAddr);
872 Get and pin the page table being used to map a specified virtual address;
873 allocating a new one if it didn't previously exist.
875 @param aAddr A virtual address in the region allocated to this mapping.
876 @param aPinArgs The resources required to pin the page table.
877 On success, the page table will have been appended to
878 \a aPinArgs.iPinnedPageTables.
880 @return The virtual address of the page table mapping \a aAddr,
881 or the null pointer if one wasn't found and couldn't be allocated.
883 TPte* GetOrAllocatePageTable(TLinAddr aAddr, TPinArgs& aPinArgs);
886 Allocate a single page table.
888 @param aAddr The virtual address the page table will be used to map.
889 @param aPdeAddress Address of the page directory entry which is to map
890 the newly allocated page table.
891 @param aPermanent True, if the page table's permanence count is to be incremented.
893 @return The virtual address of the page table if it was successfully allocated,
894 otherwise the null pointer.
896 TPte* AllocatePageTable(TLinAddr aAddr, TPde* aPdeAddress, TBool aPermanent=false);
899 Free a single page table if it is unused.
901 @param aPdeAddress Address of the page directory entry (PDE) which maps the page table.
902 If the page table is freed, this PDE will be set to an 'unallocated' value.
904 void FreePageTable(TPde* aPdeAddress);
909 A mapping which maps any memory into the kernel address space and provides access to
910 the physical address used by a memory object.
912 These mappings are always of the 'pinned' type to prevent the obtained physical addresses
913 from becoming invalid.
915 class DKernelPinMapping : public DFineMapping
919 TInt Construct(TUint aReserveSize);
920 TInt MapAndPin(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TMappingPermissions aPermissions);
921 void UnmapAndUnpin();
924 TInt iReservePages; ///< The number of pages this mapping is able to map with its reserved resources(page tables etc).
929 A mapping which provides access to the physical address used by a memory object
930 without mapping these at any virtual address accessible to software.
932 These mappings are always of the 'pinned' type to prevent the obtained physical addresses
933 from becoming invalid.
935 class DPhysicalPinMapping : public DMemoryMappingBase
938 DPhysicalPinMapping();
941 Attach this mapping to a memory object so that it pins a specified region of its memory.
943 Most of the action of this method is performed by #Attach.
945 @param aMemory The memory object.
946 @param aIndex The page index of the first page of memory to be pinned by the mapping.
947 @param aCount The number of pages of memory to be pinned by the mapping.
948 @param aPermissions The memory access permissions appropriate to the intended use
949 of the physical addresses. E.g. if the memory contents will be
950 changes, use EReadWrite. These permissions are used for error
951 checking, e.g. detecting attempted writes to read-only memory.
952 They are also used for optimising access to demand paged memory;
953 which is more efficient if only read-only access is required.
955 @return KErrNone if successful,
956 KErrNotFound if any part of the memory to be pinned was not present,
957 KErrNoMemory if there was insufficient memory,
958 otherwise one of the system wide error codes.
960 TInt Pin(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TMappingPermissions aPermissions);
963 Remove this mapping from the memory object it was previously added to by #Pin.
965 Most of the action of this method is performed by #Detach.
967 virtual void Unpin();
970 // from DMemoryMappingBase...
971 virtual TInt MapPages(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Not implemented. Faults in debug builds.
972 virtual void UnmapPages(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Does nothing
973 virtual void RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB); ///< Not implemented. Faults in debug builds.
974 virtual void RestrictPagesNA(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Does nothing
975 virtual TInt PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount); ///< Does nothing
976 virtual TBool MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex);///< Not implemented. Faults in debug builds.
977 virtual TInt DoMap(); ///< Does nothing
978 virtual void DoUnmap(); ///< Does nothing
984 A mapping which pins memory in order to prevent demand paging related
985 page faults from occurring.
987 class DVirtualPinMapping : public DPhysicalPinMapping
990 DVirtualPinMapping();
991 ~DVirtualPinMapping();
994 Create a new DVirtualPinMapping object suitable for pinning a specified number of pages.
996 If no maximum is specified (\a aMaxCount==0) then this object may be used to pin
997 any number of pages, however this will require dynamic allocation of storage for
998 page table references.
1000 @param aMaxCount The maximum number of pages which can be pinned, or zero for no maximum.
1002 @return The newly created DVirtualPinMapping or the null pointer if there was
1003 insufficient memory.
1005 static DVirtualPinMapping* New(TUint aMaxCount);
1008 Attach this mapping to a memory object so that it pins a specified region of its memory.
1010 Additionally, pin the page tables in a specified mapping (\a aMapping) which
1011 are being used to map these pages.
1013 The result of this function is that access to the pinned memory through the virtual
1014 addresses used by \a aMapping will not generate any demand paging related page faults.
1016 @param aMemory The memory object.
1017 @param aIndex The page index of the first page of memory to be pinned by the mapping.
1018 @param aCount The number of pages of memory to be pinned by the mapping.
1019 @param aPermissions The memory access permissions appropriate to the intended use
1020 of the physical addresses. E.g. if the memory contents will be
1021 changes, use EReadWrite. These permissions are used for error
1022 checking, e.g. detecting attempted writes to read-only memory.
1023 They are also used for optimising access to demand paged memory;
1024 which is more efficient if only read-only access is required.
1025 @param aMapping The mapping whose page tables are to be pinned. This must be
1026 currently mapping the specified region of memory pages.
1027 @param aMapInstanceCount The instance count of the mapping who's page tables are to be pinned.
1029 @return KErrNone if successful,
1030 KErrNotFound if any part of the memory to be pinned was not present,
1031 KErrNoMemory if there was insufficient memory,
1032 otherwise one of the system wide error codes.
1034 TInt Pin( DMemoryObject* aMemory, TUint aIndex, TUint aCount, TMappingPermissions aPermissions,
1035 DMemoryMappingBase* aMapping, TUint aMapInstanceCount);
1038 Remove this mapping from the memory object it was previously added to by #Pin.
1039 This will unpin any memory pages and pages tables that were pinned.
1044 Return the maximum number of page tables which could be required to map
1045 \a aPageCount pages. This is used by various resource reserving calculations.
1047 static TUint MaxPageTables(TUint aPageCount);
1050 In debug builds, dump information about this mapping to the kernel trace port.
1052 virtual void Dump();
1055 // from DMemoryMappingBase...
1056 virtual void RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB); ///< Does nothing.
1057 virtual TInt PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount);
1058 virtual TBool MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex);///< Not implemented. Faults in debug builds.
1059 virtual TInt DoPin(TPinArgs& aPinArgs);
1060 virtual void DoUnpin(TPinArgs& aPinArgs);
1064 Allocate memory to store pointers to all the page table which map
1065 \a aCount pages of memory. The pointer to the allocated memory
1066 is stored at iAllocatedPinnedPageTables.
1068 If iSmallPinnedPageTablesArray is large enough, this function doesn't
1069 allocate any memory.
1071 @return KErrNone if successful, otherwise KErrNoMemory.
1073 TInt AllocPageTableArray(TUint aCount);
1076 Delete iAllocatedPinnedPageTables.
1078 void FreePageTableArray();
1081 Return the address of the array storing pinned page tables.
1082 This is either iSmallPinnedPageTablesArray or iAllocatedPinnedPageTables.
1084 TPte** PageTableArray();
1087 Unpin all the page tables which have been pinned by this mapping.
1089 @param aPinArgs The resources used for pinning. The replacement pages allocated
1090 to this will be increased for each page which was became completely
1093 void UnpinPageTables(TPinArgs& aPinArgs);
1096 Temporary store for the mapping passed to #Pin
1098 DMemoryMappingBase* iPinVirtualMapping;
1101 Temporary store for the mapping instance count passed to #Pin
1103 TUint iPinVirtualMapInstanceCount;
1106 The number of page tables which are currently being pinned by this mapping.
1107 This is the number of valid entries stored at PageTableArray.
1109 TUint iNumPinnedPageTables;
1112 The maximum number of pages which can be pinned by this mapping.
1113 If this is zero, there is no maximum.
1118 The memory allocated by this object for storing pointer to the page tables
1121 TPte** iAllocatedPinnedPageTables;
1125 KSmallPinnedPageTableCount = 2 ///< Number of entries in iSmallPinnedPageTablesArray
1129 A small array to use for storing pinned page tables.
1130 This is an optimisation used for the typical case of pinning a small number of pages
1131 to avoid dynamic allocation of memory.
1133 TPte* iSmallPinnedPageTablesArray[KSmallPinnedPageTableCount];