sl@0: // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies). sl@0: // All rights reserved. sl@0: // This component and the accompanying materials are made available sl@0: // under the terms of the License "Eclipse Public License v1.0" sl@0: // which accompanies this distribution, and is available sl@0: // at the URL "http://www.eclipse.org/legal/epl-v10.html". sl@0: // sl@0: // Initial Contributors: sl@0: // Nokia Corporation - initial contribution. sl@0: // sl@0: // Contributors: sl@0: // sl@0: // Description: sl@0: // sl@0: sl@0: /** sl@0: @file sl@0: @internalComponent sl@0: */ sl@0: sl@0: #ifndef MMAPPING_H sl@0: #define MMAPPING_H sl@0: sl@0: #include "mrefcntobj.h" sl@0: #include "mmappinglist.h" sl@0: #include "mpagearray.h" sl@0: sl@0: sl@0: sl@0: /** sl@0: Base class for memory mappings. sl@0: sl@0: This provides the methods for linking a mapping to a memory object sl@0: as well as the interface for updating the MMU page tables associated sl@0: with a mapping when the memory state changes. sl@0: */ sl@0: class DMemoryMappingBase : public DReferenceCountedObject sl@0: { sl@0: private: sl@0: /** sl@0: Memory object to which this mapping is currently attached. sl@0: Updates to the are protected by the MmuLock. sl@0: */ sl@0: DMemoryObject* iMemory; sl@0: sl@0: public: sl@0: /** sl@0: Link used to maintain list of mappings attached to a memory object. sl@0: */ sl@0: TMappingListLink iLink; sl@0: sl@0: /** sl@0: Offset, in page units, within the memory object's memory for start of this mapping. sl@0: */ sl@0: TUint iStartIndex; sl@0: sl@0: /** sl@0: Size of this mapping, in page units. sl@0: */ sl@0: TUint iSizeInPages; sl@0: sl@0: private: sl@0: /** sl@0: Instance count which is incremented every time a mapping is attached to a memory object. sl@0: When code is manipulating mappings, the instance count is used to detect that a sl@0: mapping has been reused and that the operation it is performing is no long needed. sl@0: */ sl@0: TUint iMapInstanceCount; sl@0: sl@0: public: sl@0: sl@0: /** sl@0: Bit flags stored in #Flags giving various state and attributes of the mapping. sl@0: */ sl@0: enum TFlags sl@0: { sl@0: /** sl@0: Flag set during object construction to indicate that this mapping is of sl@0: class #DCoarseMapping. sl@0: */ sl@0: ECoarseMapping = 1<<0, sl@0: sl@0: /** sl@0: Flag set during object construction to indicate that this mapping will pin sl@0: any memory pages it maps. This may not be used with coarse memory mappings. sl@0: */ sl@0: EPinned = 1<<1, sl@0: sl@0: /** sl@0: Pages have already been reserved for pinning, so when this mapping is attached sl@0: to a memory object no additional pages need to be reserved. Pre-reserving pages sl@0: is used to prevent the possibility of failing to pin due to an out of memory sl@0: condition. It is essential that the users of these mappings ensure that there sl@0: are enough reserved pages in the paging pool to meet the maximum mapping size sl@0: used. sl@0: */ sl@0: EPinningPagesReserved = 1<<2, sl@0: sl@0: /** sl@0: Pages have been successfully pinned by this mapping. This is set after demand sl@0: paged memory has been succeeded pinned and is used to indicate that the pages sl@0: need unpinning again when the mapping is later unmapped. sl@0: */ sl@0: EPagesPinned = 1<<3, sl@0: sl@0: /** sl@0: Flag set during object construction to indicate that MMU page tables are to sl@0: be permanently allocated for use by this mapping. Normally, page tables are sl@0: allocated as needed to map memory which can result in out-of-memory errors sl@0: when mapping memory pages. sl@0: */ sl@0: EPermanentPageTables = 1<<4, sl@0: sl@0: /** sl@0: Permanent page tables have been successfully been allocated for this mapping. sl@0: This flag is used to track allocation so they can be released when the mapping sl@0: is destroyed. sl@0: */ sl@0: EPageTablesAllocated = 1<<5, sl@0: sl@0: /** sl@0: For pinned mappings (EPinned) this flag is set whenever the mapping prevents sl@0: any pages of memory from being fully decommitted from a memory object. When a sl@0: mapping is finally unmapped from the memory object this flag is checked, and, sl@0: if set, further cleanup of the decommitted pages triggered. sl@0: */ sl@0: EPageUnmapVetoed = 1<<6, sl@0: sl@0: /** sl@0: Mapping is being, or has been, detached from a memory object. sl@0: When set, operations on the mapping should act as though the mapping is no sl@0: longer attached to a memory object. Specifically, no further pages of memory sl@0: should be mapped into this mapping. sl@0: sl@0: This flag is only set when the MmuLock is held. sl@0: */ sl@0: EDetaching = 1<<7, sl@0: sl@0: /** sl@0: This mapping is a physical pinning mapping. The pages it pins sl@0: cannot be paged out or moved. sl@0: sl@0: This flag is set when DPhysicalPinMapping objects are created. sl@0: */ sl@0: EPhysicalPinningMapping = 1<<8, sl@0: sl@0: /** sl@0: Flag set during object construction to indicate that this mapping is of sl@0: class #DLargeMapping. sl@0: sl@0: Note that #DLargeMapping is derived from #DCoarseMapping, therefore presence of this flag sl@0: implies presence of #ECoarseMapping as well. sl@0: */ sl@0: ELargeMapping = 1<<9, sl@0: }; sl@0: sl@0: /** sl@0: Bitmask of values from enum #TPteType which will be used to calculate sl@0: the correct attributes for any page table entries this mapping uses. sl@0: */ sl@0: FORCE_INLINE TUint8& PteType() sl@0: { return iLink.iSpare1; } sl@0: sl@0: /** sl@0: Bitmask of values from enum #TFlags. sl@0: The flags 16 bits and are stored in iLink.iSpare2 and iLink.iSpare3. sl@0: */ sl@0: FORCE_INLINE TUint16& Flags() sl@0: { return (TUint16&)iLink.iSpare2; } sl@0: sl@0: public: sl@0: /** sl@0: Return the memory object to which this mapping is currently attached. sl@0: sl@0: @pre MmuLock is held. (If aNoCheck==false) sl@0: */ sl@0: FORCE_INLINE DMemoryObject* Memory(TBool aNoCheck=false) sl@0: { sl@0: if(!aNoCheck) sl@0: __NK_ASSERT_DEBUG(MmuLock::IsHeld()); sl@0: return iMemory; sl@0: } sl@0: sl@0: /** sl@0: Return true if the mapping is currently attached to a memory object. sl@0: */ sl@0: FORCE_INLINE TBool IsAttached() sl@0: { return iLink.IsLinked(); } sl@0: sl@0: /** sl@0: Return true if the mapping is being, or has been, detached from a memory object. sl@0: The mapping may or may not still be attached to a memory object, i.e. #IsAttached sl@0: is indeterminate. sl@0: */ sl@0: FORCE_INLINE TBool BeingDetached() sl@0: { return Flags()&EDetaching; } sl@0: sl@0: /** sl@0: Return the mapping instance count. sl@0: @see #iMapInstanceCount. sl@0: */ sl@0: FORCE_INLINE TUint MapInstanceCount() sl@0: { return iMapInstanceCount; } sl@0: sl@0: /** sl@0: Return true if this mapping provides read only access to memory. sl@0: */ sl@0: FORCE_INLINE TBool IsReadOnly() sl@0: { return !(PteType()&EPteTypeWritable); } sl@0: sl@0: #ifdef MMU_SUPPORTS_EXECUTE_NEVER sl@0: /** sl@0: Return true if this mapping provides access to memory which allows sl@0: code to be executed from it. sl@0: */ sl@0: FORCE_INLINE TBool IsExecutable() sl@0: { return (PteType()&EPteTypeExecutable); } sl@0: #endif sl@0: sl@0: /** sl@0: Return true if this is a coarse mapping, in other words it is an instance of #DCoarseMapping or sl@0: #DLargeMapping. sl@0: */ sl@0: FORCE_INLINE TBool IsCoarse() sl@0: { return Flags()&ECoarseMapping; } sl@0: sl@0: /** sl@0: Return true if this mapping is a large mapping, in other words an instance of #DLargeMapping. sl@0: sl@0: Note that all large mappings are also coarse mappings. sl@0: */ sl@0: FORCE_INLINE TBool IsLarge() sl@0: { return Flags()&ELargeMapping; } sl@0: sl@0: /** sl@0: Return true if this mapping pins the memory it maps. sl@0: */ sl@0: FORCE_INLINE TBool IsPinned() sl@0: { return Flags()&EPinned; } sl@0: sl@0: /** sl@0: Return true if this mapping physically pins the memory it maps. sl@0: */ sl@0: FORCE_INLINE TBool IsPhysicalPinning() sl@0: { return Flags()&EPhysicalPinningMapping; } sl@0: sl@0: /** sl@0: Return the access permissions which this mapping uses to maps memory. sl@0: */ sl@0: FORCE_INLINE TMappingPermissions Permissions() sl@0: { return Mmu::PermissionsFromPteType(PteType()); } sl@0: sl@0: /** sl@0: Link this mapping to a memory object. sl@0: sl@0: This is called by the memory object during processing of #Attach. sl@0: sl@0: @param aMemory The memory object the mapping is being attached to. sl@0: @param aMappingList The list to add this mapping to. sl@0: sl@0: @pre MmuLock is held. sl@0: @pre Mapping list lock is held. sl@0: */ sl@0: void LinkToMemory(DMemoryObject* aMemory, TMappingList& aMappingList); sl@0: sl@0: /** sl@0: Unlink this mapping from the memory object it was previously linked to with sl@0: #LinkToMemory. sl@0: sl@0: This is called by the memory object during processing of #Detach. sl@0: sl@0: @param aMappingList The list that the mapping appears on. sl@0: */ sl@0: void UnlinkFromMemory(TMappingList& aMappingList); sl@0: sl@0: /** sl@0: Get the physical address(es) for a region of pages in this mapping. sl@0: sl@0: @param aIndex Page index, within the mapping, for start of the region. sl@0: @param aCount Number of pages in the region. sl@0: @param aPhysicalAddress On success, this value is set to one of two values. sl@0: If the specified region is physically contiguous, sl@0: the value is the physical address of the first page sl@0: in the region. If the region is discontiguous, the sl@0: value is set to KPhysAddrInvalid. sl@0: @param aPhysicalPageList If not zero, this points to an array of TPhysAddr sl@0: objects. On success, this array will be filled sl@0: with the addresses of the physical pages which sl@0: contain the specified region. If aPageList is sl@0: zero, then the function will fail with sl@0: KErrNotFound if the specified region is not sl@0: physically contiguous. sl@0: sl@0: @return 0 if successful and the whole region is physically contiguous. sl@0: 1 if successful but the region isn't physically contiguous. sl@0: KErrNotFound, if any page in the region is not present, sl@0: otherwise one of the system wide error codes. sl@0: sl@0: @pre This mapping must have been attached to a memory object with #Pin. sl@0: */ sl@0: TInt PhysAddr(TUint aIndex, TUint aCount, TPhysAddr& aPhysicalAddress, TPhysAddr* aPhysicalPageList); sl@0: sl@0: protected: sl@0: /** sl@0: @param aType Initial value for #Flags. sl@0: */ sl@0: DMemoryMappingBase(TUint aType); sl@0: sl@0: /** sl@0: Attach this mapping to a memory object so that it maps a specified region of its memory. sl@0: sl@0: @param aMemory The memory object. sl@0: @param aIndex The page index of the first page of memory to be mapped by the mapping. sl@0: @param aCount The number of pages of memory to be mapped by the mapping. sl@0: sl@0: @return KErrNone if successful, otherwise one of the system wide error codes. sl@0: */ sl@0: TInt Attach(DMemoryObject* aMemory, TUint aIndex, TUint aCount); sl@0: sl@0: /** sl@0: Remove this mapping from the memory object it was previously attached to by #Attach. sl@0: */ sl@0: void Detach(); sl@0: sl@0: public: sl@0: /** sl@0: Update the page table entries corresponding to this mapping to add entries for sl@0: a specified set of memory pages. sl@0: sl@0: This method is called by DMemoryObject::MapPages to update each mapping attached sl@0: to a memory object whenever new pages of memory are added. However, it won't be sl@0: called for any mapping with the #EPinned attribute as such mappings are unchanging. sl@0: sl@0: @param aPages An RPageArray::TIter which refers to a range of pages sl@0: in a memory object. This has been clipped to fit within sl@0: the range of pages mapped by this mapping. sl@0: Only array entries which have state RPageArray::ECommitted sl@0: should be mapped into the mapping's page tables. sl@0: sl@0: @param aMapInstanceCount The instance of this mapping which is to be updated. sl@0: Whenever this no longer matches the current #MapInstanceCount sl@0: the function must not update any more of the mapping's sl@0: page table entries, (but must still return KErrNone). sl@0: sl@0: @return KErrNone if successful, otherwise one of the system wide error codes. sl@0: */ sl@0: virtual TInt MapPages(RPageArray::TIter aPages, TUint aMapInstanceCount) =0; sl@0: sl@0: /** sl@0: Update the page table entries corresponding to this mapping to remove entries for sl@0: a specified set of memory pages. sl@0: sl@0: This method is called by DMemoryObject::UnmapPages to update each mapping attached sl@0: to a memory object whenever pages of memory are removed. sl@0: sl@0: @param aPages An RPageArray::TIter which refers to a range of pages sl@0: in a memory object. This has been clipped to fit within sl@0: the range of pages mapped by this mapping. sl@0: Only array entries which return true for sl@0: RPageArray::TargetStateIsDecommitted should be unmapped sl@0: from the mapping's page tables. sl@0: sl@0: @param aMapInstanceCount The instance of this mapping which is to be updated. sl@0: Whenever this no longer matches the current #MapInstanceCount sl@0: the function must not update any more of the mapping's sl@0: page table entries. sl@0: */ sl@0: virtual void UnmapPages(RPageArray::TIter aPages, TUint aMapInstanceCount) =0; sl@0: sl@0: /** sl@0: Update the page table entry corresponding to this mapping to update an entry for a specified sl@0: page that has just been moved or shadowed. sl@0: sl@0: @param aPages The page array entry of the page in a memory object. sl@0: Only array entries which have a target state of sl@0: RPageArray::ECommitted should be mapped into the sl@0: mapping's page tables. sl@0: sl@0: @param aIndex The index of the page in the memory object. sl@0: sl@0: @param aMapInstanceCount The instance of this mapping which is to be updated. sl@0: Whenever this no longer matches the current #MapInstanceCount sl@0: the function must not update any more of the mapping's sl@0: page table entries, (but must still return KErrNone). sl@0: sl@0: @param aInvalidateTLB Set to ETrue when the TLB entries associated with this page sl@0: should be invalidated. This must be done when there is sl@0: already a valid pte for this page, i.e. if the page is still sl@0: mapped. sl@0: */ sl@0: virtual void RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB)=0; sl@0: sl@0: /** sl@0: Update the page table entries corresponding to this mapping to apply access restrictions sl@0: to a specified set of memory pages. sl@0: sl@0: This method is called by DMemoryObject::RestrictPages to update each mapping attached sl@0: to a memory object whenever pages of memory are restricted. sl@0: sl@0: @param aPages An RPageArray::TIter which refers to a range of pages sl@0: in a memory object. This has been clipped to fit within sl@0: the range of pages mapped by this mapping. sl@0: Only array entries which return true for sl@0: RPageArray::TargetStateIsDecommitted should be unmapped sl@0: from the mapping's page tables. sl@0: sl@0: @param aMapInstanceCount The instance of this mapping which is to be updated. sl@0: Whenever this no longer matches the current #MapInstanceCount sl@0: the function must not update any more of the mapping's sl@0: page table entries. sl@0: */ sl@0: virtual void RestrictPagesNA(RPageArray::TIter aPages, TUint aMapInstanceCount) =0; sl@0: sl@0: /** sl@0: Update the page table entries corresponding to this mapping to add entries for sl@0: a specified set of demand paged memory pages following a 'page in' or memory sl@0: pinning operation. sl@0: sl@0: @param aPages An RPageArray::TIter which refers to a range of pages sl@0: in a memory object. This will be within the range of pages sl@0: mapped by this mapping. sl@0: Only array entries which have state RPageArray::ECommitted sl@0: should be mapped into the mapping's page tables. sl@0: sl@0: @param aPinArgs The resources required to pin any page tables the mapping uses. sl@0: Page table must be pinned if \a aPinArgs.iPinnedPageTables is sl@0: not the null pointer, in which case this the virtual address sl@0: of the pinned must be stored in the array this points to. sl@0: \a aPinArgs.iReadOnly is true if write access permissions sl@0: are not needed. sl@0: sl@0: @return KErrNone if successful, otherwise one of the system wide error codes. sl@0: */ sl@0: virtual TInt PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount) =0; sl@0: sl@0: sl@0: /** sl@0: Update the page table entry corresponding to this mapping to add an entry for sl@0: a specified page which is in the process of being moved. sl@0: sl@0: @param aPageArrayPtr The page array entry for the page to be mapped which must be sl@0: within this mapping range of pages. sl@0: Only array entries which have a target state of sl@0: RPageArray::ECommitted should be mapped into the mapping's sl@0: page tables. sl@0: sl@0: @param aIndex The index of the page. sl@0: sl@0: @return ETrue if successful, EFalse otherwise. sl@0: */ sl@0: virtual TBool MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex)=0; sl@0: sl@0: sl@0: /** sl@0: In debug builds, dump information about this mapping to the kernel trace port. sl@0: */ sl@0: virtual void Dump(); sl@0: sl@0: private: sl@0: /** sl@0: Update this mapping's MMU data structures to map all pages of memory sl@0: currently committed to the memory object (#iMemory) in the region covered sl@0: by this mapping. sl@0: sl@0: This method is called by #Attach after the mapping has been linked sl@0: into the memory object. sl@0: sl@0: @return KErrNone if successful, otherwise one of the system wide error codes. sl@0: */ sl@0: virtual TInt DoMap() =0; sl@0: sl@0: /** sl@0: Update this mapping's MMU data structures to unmap all pages of memory. sl@0: sl@0: This method is called by #Detach before the mapping has been unlinked sl@0: from the memory object but after the #EDetaching flag has been set. sl@0: */ sl@0: virtual void DoUnmap() =0; sl@0: sl@0: protected: sl@0: /** sl@0: For pinned mapping, this virtual method is called by #Attach in order to pin sl@0: pages of memory if required. This is called after the mapping has been linked sl@0: into the memory object but before #DoMap. sl@0: sl@0: The default implementation of this method simply calls DMemoryManager::Pin. sl@0: sl@0: @param aPinArgs The resources to use for pinning. This has sufficient replacement sl@0: pages allocated to pin every page the mapping covers, and the sl@0: value of \a aPinArgs.iReadOnly has been set to correspond to the sl@0: mappings access permissions. sl@0: sl@0: @return KErrNone if successful, otherwise one of the system wide error codes. sl@0: */ sl@0: virtual TInt DoPin(TPinArgs& aPinArgs); sl@0: sl@0: /** sl@0: For pinned mapping, this virtual method is called by #Detach in order to unpin sl@0: pages of memory if required. This is called before the mapping has been unlinked sl@0: from the memory object but after #DoUnmap. sl@0: sl@0: The default implementation of this method simply calls DMemoryManager::Unpin. sl@0: sl@0: @param aPinArgs The resources used for pinning. The replacement pages allocated sl@0: to this will be increased for each page which was became completely sl@0: unpinned. sl@0: */ sl@0: virtual void DoUnpin(TPinArgs& aPinArgs); sl@0: }; sl@0: sl@0: sl@0: sl@0: /** sl@0: Base class for memory mappings which map memory contents into a address space. sl@0: sl@0: This provides methods for allocating virtual memory and holds the attributes needed sl@0: for MMU page table entries. sl@0: */ sl@0: class DMemoryMapping : public DMemoryMappingBase sl@0: { sl@0: protected: sl@0: /** sl@0: The page directory entry (PDE) value for use when mapping this mapping's page tables. sl@0: This value has the physical address component being zero, so a page table's physical sl@0: address can be simply ORed in. sl@0: sl@0: This could potentially be removed (see DMemoryMapping::PdeType()). sl@0: */ sl@0: TPde iBlankPde; sl@0: sl@0: /** sl@0: The page table entry (PTE) value for use when mapping pages into this mapping. sl@0: This value has the physical address component being zero, so a page's physical sl@0: address can be simply ORed in. sl@0: */ sl@0: TPte iBlankPte; sl@0: sl@0: /** sl@0: Start of the virtual address region allocated for use by this mapping sl@0: ORed with the OS ASID of the address space this lies in. sl@0: sl@0: Note, the address at which memory is mapped (#iLinAddrAndOsAsid) may be different sl@0: to this allocated address due to page colouring restrictions. sl@0: sl@0: @see iAllocatedSize sl@0: */ sl@0: TLinAddr iAllocatedLinAddrAndOsAsid; sl@0: sl@0: /** sl@0: Size of virtual address region memory allocated for use by this mapping. sl@0: sl@0: @see iAllocatedLinAddrAndOsAsid sl@0: */ sl@0: TUint iAllocatedSize; sl@0: sl@0: private: sl@0: /** sl@0: Start of the virtual address region that this mapping is currently sl@0: mapping memory at, ORed with the OS ASID of the address space this lies in. sl@0: sl@0: This value is set by #Map which is called from #Attach when the mapping sl@0: is attached to a memory object. The address used may be different to sl@0: #iAllocatedLinAddrAndOsAsid due to page colouring restrictions. sl@0: sl@0: The size of the region mapped is #iSizeInPages. sl@0: sl@0: Note, access to this value is through #Base() and #OsAsid(). sl@0: */ sl@0: TLinAddr iLinAddrAndOsAsid; sl@0: sl@0: public: sl@0: /** sl@0: Second phase constructor. sl@0: sl@0: The main function of this is to allocate a virtual address region for the mapping sl@0: and to add it to an address space. sl@0: sl@0: @param aAttributes The attributes of the memory which this mapping is intended to map. sl@0: This is only needed to setup #PdeType which is required for correct sl@0: virtual address allocation so in practice the only relevant attribute sl@0: is to set EMemoryAttributeUseECC if required, else use sl@0: EMemoryAttributeStandard. sl@0: sl@0: @param aFlags A combination of the options from enum TMappingCreateFlags. sl@0: sl@0: @param aOsAsid The OS ASID of the address space the mapping is to be added to. sl@0: sl@0: @param aAddr The virtual address to use for the mapping, or zero if this is sl@0: to be allocated by this function. sl@0: sl@0: @param aSize The maximum size of memory, in bytes, this mapping will be used to sl@0: map. This determines the size of the virtual address region the sl@0: mapping will use. sl@0: sl@0: @param aColourOffset The byte offset within a memory object's memory which this mapping sl@0: is to start. This is used to adjust virtual memory allocation to sl@0: meet page colouring restrictions. If this value is not known leave sl@0: this argument unspecified; however, it must be specified if \a aAddr sl@0: is specified. sl@0: sl@0: @return KErrNone if successful, otherwise one of the system wide error codes. sl@0: */ sl@0: TInt Construct(TMemoryAttributes aAttributes, TMappingCreateFlags aFlags, TInt aOsAsid, TLinAddr aAddr, TUint aSize, TLinAddr aColourOffset=~(TLinAddr)0); sl@0: sl@0: /** sl@0: Add this mapping to a memory object so that it maps a specified region of its memory. sl@0: sl@0: Most of the action of this method is performed by #Attach. sl@0: sl@0: @param aMemory The memory object. sl@0: @param aIndex The page index of the first page of memory to be mapped by the mapping. sl@0: @param aCount The number of pages of memory to be mapped by the mapping. sl@0: @param aPermissions The memory access permissions to apply to the mapping. sl@0: sl@0: @return KErrNone if successful, otherwise one of the system wide error codes. sl@0: */ sl@0: TInt Map(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TMappingPermissions aPermissions); sl@0: sl@0: /** sl@0: Remove this mapping from the memory object it was previously added to by #Map. sl@0: sl@0: Most of the action of this method is performed by #Detach. sl@0: */ sl@0: void Unmap(); sl@0: sl@0: /** sl@0: Return the OS ASID for the address space that this mapping is currently mapping memory in. sl@0: */ sl@0: FORCE_INLINE TInt OsAsid() sl@0: { sl@0: __NK_ASSERT_DEBUG(iLinAddrAndOsAsid); // check mapping has been added to an address space sl@0: return iLinAddrAndOsAsid&KPageMask; sl@0: } sl@0: sl@0: /** sl@0: Return starting virtual address that this mapping is currently mapping memory at. sl@0: The size of the region mapped is #iSizeInPages. sl@0: */ sl@0: FORCE_INLINE TLinAddr Base() sl@0: { sl@0: __NK_ASSERT_DEBUG(iLinAddrAndOsAsid); // check mapping has been added to an address space sl@0: return iLinAddrAndOsAsid&~KPageMask; sl@0: } sl@0: sl@0: /** sl@0: Return #Base()|#OsAsid() sl@0: */ sl@0: FORCE_INLINE TLinAddr LinAddrAndOsAsid() sl@0: { sl@0: __NK_ASSERT_DEBUG(iLinAddrAndOsAsid); // check mapping has been added to an address space sl@0: return iLinAddrAndOsAsid; sl@0: } sl@0: sl@0: FORCE_INLINE TBool IsUserMapping() sl@0: { sl@0: // Note: must be usable before the mapping has been added to an address space sl@0: return (PteType() & (EPteTypeUserAccess|EPteTypeGlobal)) == EPteTypeUserAccess; sl@0: } sl@0: sl@0: /** sl@0: Return #iBlankPde. sl@0: */ sl@0: FORCE_INLINE TPde BlankPde() sl@0: { sl@0: return iBlankPde; sl@0: } sl@0: sl@0: /** sl@0: Emit BTrace traces identifying this mappings virtual address usage. sl@0: */ sl@0: void BTraceCreate(); sl@0: sl@0: /** sl@0: In debug builds, dump information about this mapping to the kernel trace port. sl@0: */ sl@0: virtual void Dump(); sl@0: sl@0: /** sl@0: Function to return a page table pointer for the specified linear address and sl@0: index to this mapping. sl@0: sl@0: This is called by #Epoc::MovePhysicalPage when moving page table or page table info pages. sl@0: sl@0: @param aLinAddr The linear address to find the page table entry for. sl@0: @param aMemoryIndex The memory object index of the page to find the page sl@0: table entry for. sl@0: sl@0: @return A pointer to the page table entry, if the page table entry couldn't sl@0: be found this will be NULL sl@0: */ sl@0: virtual TPte* FindPageTable(TLinAddr aLinAddr, TUint aMemoryIndex)=0; sl@0: sl@0: protected: sl@0: /** sl@0: @param aType Initial value for #Flags. sl@0: */ sl@0: DMemoryMapping(TUint aType); sl@0: sl@0: /** sl@0: This destructor removes the mapping from any address space it was added to and sl@0: frees any virtual addresses allocated to it. sl@0: */ sl@0: ~DMemoryMapping(); sl@0: sl@0: /** sl@0: Free any resources owned by this mapping, i.e. allow Construct() to be used sl@0: on this mapping at a new address etc. sl@0: */ sl@0: void Destruct(); sl@0: sl@0: /** sl@0: Allocatate virtual addresses for this mapping to use. sl@0: This is called from #Construct and the arguments to this function are the same. sl@0: sl@0: On success, iAllocatedLinAddrAndOsAsid and iAllocatedSize will be initialised. sl@0: */ sl@0: virtual TInt AllocateVirtualMemory(TMappingCreateFlags aFlags, TInt aOsAsid, TLinAddr aAddr, TUint aSize, TLinAddr aColourOffset); sl@0: sl@0: /** sl@0: Free the virtual addresses allocated to this mapping with AllocateVirtualMemory. sl@0: */ sl@0: virtual void FreeVirtualMemory(); sl@0: }; sl@0: sl@0: sl@0: sl@0: /** sl@0: A memory mapping to map a 'chunk' aligned region of a DCoarseMemory object into sl@0: an address space. A 'chunk' is the size of memory mapped by a whole MMU page table sl@0: and is #KChunkSize bytes. sl@0: sl@0: These mappings make use of page tables owned by a DCoarseMemory and when sl@0: they are attached to a memory object they are linked into sl@0: DCoarseMemory::DPageTables::iMappings not DCoarseMemory::iMappings. sl@0: */ sl@0: class DCoarseMapping : public DMemoryMapping sl@0: { sl@0: public: sl@0: DCoarseMapping(); sl@0: ~DCoarseMapping(); sl@0: sl@0: protected: sl@0: DCoarseMapping(TUint aFlags); sl@0: sl@0: protected: sl@0: // from DMemoryMappingBase... sl@0: virtual TInt MapPages(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Not implemented. Faults in debug builds. sl@0: virtual void UnmapPages(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Not implemented. Faults in debug builds. sl@0: virtual void RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB); ///< Not implemented. Faults in debug builds. sl@0: virtual void RestrictPagesNA(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Not implemented. Faults in debug builds. sl@0: virtual TInt PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount); sl@0: virtual TBool MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex); sl@0: virtual TInt DoMap(); sl@0: virtual void DoUnmap(); sl@0: sl@0: // from DMemoryMapping... sl@0: virtual TPte* FindPageTable(TLinAddr aLinAddr, TUint aMemoryIndex); sl@0: }; sl@0: sl@0: sl@0: sl@0: /** sl@0: A memory mapping to map a page aligned region of a memory object into sl@0: an address space. The may be used with any memory object: DFineMemory or DCoarseMemory. sl@0: */ sl@0: class DFineMapping : public DMemoryMapping sl@0: { sl@0: public: sl@0: DFineMapping(); sl@0: ~DFineMapping(); sl@0: sl@0: private: sl@0: // from DMemoryMappingBase... sl@0: virtual TInt MapPages(RPageArray::TIter aPages, TUint aMapInstanceCount); sl@0: virtual void UnmapPages(RPageArray::TIter aPages, TUint aMapInstanceCount); sl@0: virtual void RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB); sl@0: virtual void RestrictPagesNA(RPageArray::TIter aPages, TUint aMapInstanceCount); sl@0: virtual TInt PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount); sl@0: virtual TBool MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex); sl@0: virtual TInt DoMap(); sl@0: virtual void DoUnmap(); sl@0: sl@0: // from DMemoryMapping... sl@0: sl@0: /** sl@0: Allocatate virtual addresses for this mapping to use. sl@0: sl@0: In addition to performing the action of DMemoryMapping::AllocateVirtualMemory sl@0: this will also allocate all permanent page tables for the mapping if it has attribute sl@0: #EPermanentPageTables. sl@0: */ sl@0: virtual TInt AllocateVirtualMemory(TMappingCreateFlags aFlags, TInt aOsAsid, TLinAddr aAddr, TUint aSize, TLinAddr aColourOffset); sl@0: sl@0: /** sl@0: Free the virtual addresses and permanent page tables allocated to this mapping with sl@0: AllocateVirtualMemory. sl@0: */ sl@0: virtual void FreeVirtualMemory(); sl@0: sl@0: virtual TPte* FindPageTable(TLinAddr aLinAddr, TUint aMemoryIndex); sl@0: sl@0: // new... sl@0: sl@0: /** sl@0: Allocate all the page tables required for this mapping. This is called by sl@0: AllocateVirtualMemory if the #EPermanentPageTables attribute is set. sl@0: sl@0: Each page table for the virtual address region used by the mapping is sl@0: allocated if not already present. The permanence count of any page table sl@0: (SPageTableInfo::iPermanenceCount) is then incremented so that it is not sl@0: freed even when it no longer maps any pages. sl@0: sl@0: If successful, the #EPageTablesAllocated flag in #Flags will be set. sl@0: sl@0: @return KErrNone if successful, otherwise one of the system wide error codes. sl@0: */ sl@0: TInt AllocatePermanentPageTables(); sl@0: sl@0: /** sl@0: Free all permanent page tables allocated to this mapping. sl@0: sl@0: This reverses the action of #AllocatePermanentPageTables by decrementing sl@0: the permanence count for each page table and freeing it if is no longer in use. sl@0: */ sl@0: void FreePermanentPageTables(); sl@0: sl@0: /** sl@0: Free a range of permanent page tables. sl@0: sl@0: This is an implementation factor for FreePermanentPageTables and sl@0: AllocatePermanentPageTables. It decrements the permanence count sl@0: for each page table and frees it if is no longer in use sl@0: sl@0: @param aFirstPde The address of the page directory entry which refers to sl@0: the first page table to be freed. sl@0: @param aLastPde The address of the page directory entry which refers to sl@0: the last page table to be freed. sl@0: */ sl@0: void FreePermanentPageTables(TPde* aFirstPde, TPde* aLastPde); sl@0: sl@0: #ifdef _DEBUG sl@0: /** sl@0: Validate the contents of the page table are valid. sl@0: sl@0: @param aPt The page table to validate. sl@0: */ sl@0: void ValidatePageTable(TPte* aPt, TLinAddr aAddr); sl@0: #endif sl@0: sl@0: /** sl@0: Get the page table being used to map a specified virtual address if it exists. sl@0: sl@0: @param aAddr A virtual address in the region allocated to this mapping. sl@0: sl@0: @return The virtual address of the page table mapping \a aAddr, sl@0: or the null pointer if one wasn't found. sl@0: */ sl@0: TPte* GetPageTable(TLinAddr aAddr); sl@0: sl@0: /** sl@0: Get the page table being used to map a specified virtual address; allocating sl@0: a new one if it didn't previously exist. sl@0: sl@0: @param aAddr A virtual address in the region allocated to this mapping. sl@0: sl@0: @return The virtual address of the page table mapping \a aAddr, sl@0: or the null pointer if one wasn't found and couldn't be allocated. sl@0: */ sl@0: TPte* GetOrAllocatePageTable(TLinAddr aAddr); sl@0: sl@0: /** sl@0: Get and pin the page table being used to map a specified virtual address; sl@0: allocating a new one if it didn't previously exist. sl@0: sl@0: @param aAddr A virtual address in the region allocated to this mapping. sl@0: @param aPinArgs The resources required to pin the page table. sl@0: On success, the page table will have been appended to sl@0: \a aPinArgs.iPinnedPageTables. sl@0: sl@0: @return The virtual address of the page table mapping \a aAddr, sl@0: or the null pointer if one wasn't found and couldn't be allocated. sl@0: */ sl@0: TPte* GetOrAllocatePageTable(TLinAddr aAddr, TPinArgs& aPinArgs); sl@0: sl@0: /** sl@0: Allocate a single page table. sl@0: sl@0: @param aAddr The virtual address the page table will be used to map. sl@0: @param aPdeAddress Address of the page directory entry which is to map sl@0: the newly allocated page table. sl@0: @param aPermanent True, if the page table's permanence count is to be incremented. sl@0: sl@0: @return The virtual address of the page table if it was successfully allocated, sl@0: otherwise the null pointer. sl@0: */ sl@0: TPte* AllocatePageTable(TLinAddr aAddr, TPde* aPdeAddress, TBool aPermanent=false); sl@0: sl@0: /** sl@0: Free a single page table if it is unused. sl@0: sl@0: @param aPdeAddress Address of the page directory entry (PDE) which maps the page table. sl@0: If the page table is freed, this PDE will be set to an 'unallocated' value. sl@0: */ sl@0: void FreePageTable(TPde* aPdeAddress); sl@0: }; sl@0: sl@0: sl@0: /** sl@0: A mapping which maps any memory into the kernel address space and provides access to sl@0: the physical address used by a memory object. sl@0: sl@0: These mappings are always of the 'pinned' type to prevent the obtained physical addresses sl@0: from becoming invalid. sl@0: */ sl@0: class DKernelPinMapping : public DFineMapping sl@0: { sl@0: public: sl@0: DKernelPinMapping(); sl@0: TInt Construct(TUint aReserveSize); sl@0: TInt MapAndPin(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TMappingPermissions aPermissions); sl@0: void UnmapAndUnpin(); sl@0: sl@0: public: sl@0: TInt iReservePages; ///< The number of pages this mapping is able to map with its reserved resources(page tables etc). sl@0: }; sl@0: sl@0: sl@0: /** sl@0: A mapping which provides access to the physical address used by a memory object sl@0: without mapping these at any virtual address accessible to software. sl@0: sl@0: These mappings are always of the 'pinned' type to prevent the obtained physical addresses sl@0: from becoming invalid. sl@0: */ sl@0: class DPhysicalPinMapping : public DMemoryMappingBase sl@0: { sl@0: public: sl@0: DPhysicalPinMapping(); sl@0: sl@0: /** sl@0: Attach this mapping to a memory object so that it pins a specified region of its memory. sl@0: sl@0: Most of the action of this method is performed by #Attach. sl@0: sl@0: @param aMemory The memory object. sl@0: @param aIndex The page index of the first page of memory to be pinned by the mapping. sl@0: @param aCount The number of pages of memory to be pinned by the mapping. sl@0: @param aPermissions The memory access permissions appropriate to the intended use sl@0: of the physical addresses. E.g. if the memory contents will be sl@0: changes, use EReadWrite. These permissions are used for error sl@0: checking, e.g. detecting attempted writes to read-only memory. sl@0: They are also used for optimising access to demand paged memory; sl@0: which is more efficient if only read-only access is required. sl@0: sl@0: @return KErrNone if successful, sl@0: KErrNotFound if any part of the memory to be pinned was not present, sl@0: KErrNoMemory if there was insufficient memory, sl@0: otherwise one of the system wide error codes. sl@0: */ sl@0: TInt Pin(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TMappingPermissions aPermissions); sl@0: sl@0: /** sl@0: Remove this mapping from the memory object it was previously added to by #Pin. sl@0: sl@0: Most of the action of this method is performed by #Detach. sl@0: */ sl@0: virtual void Unpin(); sl@0: sl@0: private: sl@0: // from DMemoryMappingBase... sl@0: virtual TInt MapPages(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Not implemented. Faults in debug builds. sl@0: virtual void UnmapPages(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Does nothing sl@0: virtual void RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB); ///< Not implemented. Faults in debug builds. sl@0: virtual void RestrictPagesNA(RPageArray::TIter aPages, TUint aMapInstanceCount); ///< Does nothing sl@0: virtual TInt PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount); ///< Does nothing sl@0: virtual TBool MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex);///< Not implemented. Faults in debug builds. sl@0: virtual TInt DoMap(); ///< Does nothing sl@0: virtual void DoUnmap(); ///< Does nothing sl@0: }; sl@0: sl@0: sl@0: sl@0: /** sl@0: A mapping which pins memory in order to prevent demand paging related sl@0: page faults from occurring. sl@0: */ sl@0: class DVirtualPinMapping : public DPhysicalPinMapping sl@0: { sl@0: public: sl@0: DVirtualPinMapping(); sl@0: ~DVirtualPinMapping(); sl@0: sl@0: /** sl@0: Create a new DVirtualPinMapping object suitable for pinning a specified number of pages. sl@0: sl@0: If no maximum is specified (\a aMaxCount==0) then this object may be used to pin sl@0: any number of pages, however this will require dynamic allocation of storage for sl@0: page table references. sl@0: sl@0: @param aMaxCount The maximum number of pages which can be pinned, or zero for no maximum. sl@0: sl@0: @return The newly created DVirtualPinMapping or the null pointer if there was sl@0: insufficient memory. sl@0: */ sl@0: static DVirtualPinMapping* New(TUint aMaxCount); sl@0: sl@0: /** sl@0: Attach this mapping to a memory object so that it pins a specified region of its memory. sl@0: sl@0: Additionally, pin the page tables in a specified mapping (\a aMapping) which sl@0: are being used to map these pages. sl@0: sl@0: The result of this function is that access to the pinned memory through the virtual sl@0: addresses used by \a aMapping will not generate any demand paging related page faults. sl@0: sl@0: @param aMemory The memory object. sl@0: @param aIndex The page index of the first page of memory to be pinned by the mapping. sl@0: @param aCount The number of pages of memory to be pinned by the mapping. sl@0: @param aPermissions The memory access permissions appropriate to the intended use sl@0: of the physical addresses. E.g. if the memory contents will be sl@0: changes, use EReadWrite. These permissions are used for error sl@0: checking, e.g. detecting attempted writes to read-only memory. sl@0: They are also used for optimising access to demand paged memory; sl@0: which is more efficient if only read-only access is required. sl@0: @param aMapping The mapping whose page tables are to be pinned. This must be sl@0: currently mapping the specified region of memory pages. sl@0: @param aMapInstanceCount The instance count of the mapping who's page tables are to be pinned. sl@0: sl@0: @return KErrNone if successful, sl@0: KErrNotFound if any part of the memory to be pinned was not present, sl@0: KErrNoMemory if there was insufficient memory, sl@0: otherwise one of the system wide error codes. sl@0: */ sl@0: TInt Pin( DMemoryObject* aMemory, TUint aIndex, TUint aCount, TMappingPermissions aPermissions, sl@0: DMemoryMappingBase* aMapping, TUint aMapInstanceCount); sl@0: sl@0: /** sl@0: Remove this mapping from the memory object it was previously added to by #Pin. sl@0: This will unpin any memory pages and pages tables that were pinned. sl@0: */ sl@0: void Unpin(); sl@0: sl@0: /** sl@0: Return the maximum number of page tables which could be required to map sl@0: \a aPageCount pages. This is used by various resource reserving calculations. sl@0: */ sl@0: static TUint MaxPageTables(TUint aPageCount); sl@0: sl@0: /** sl@0: In debug builds, dump information about this mapping to the kernel trace port. sl@0: */ sl@0: virtual void Dump(); sl@0: sl@0: private: sl@0: // from DMemoryMappingBase... sl@0: virtual void RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB); ///< Does nothing. sl@0: virtual TInt PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount); sl@0: virtual TBool MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex);///< Not implemented. Faults in debug builds. sl@0: virtual TInt DoPin(TPinArgs& aPinArgs); sl@0: virtual void DoUnpin(TPinArgs& aPinArgs); sl@0: sl@0: private: sl@0: /** sl@0: Allocate memory to store pointers to all the page table which map sl@0: \a aCount pages of memory. The pointer to the allocated memory sl@0: is stored at iAllocatedPinnedPageTables. sl@0: sl@0: If iSmallPinnedPageTablesArray is large enough, this function doesn't sl@0: allocate any memory. sl@0: sl@0: @return KErrNone if successful, otherwise KErrNoMemory. sl@0: */ sl@0: TInt AllocPageTableArray(TUint aCount); sl@0: sl@0: /** sl@0: Delete iAllocatedPinnedPageTables. sl@0: */ sl@0: void FreePageTableArray(); sl@0: sl@0: /** sl@0: Return the address of the array storing pinned page tables. sl@0: This is either iSmallPinnedPageTablesArray or iAllocatedPinnedPageTables. sl@0: */ sl@0: TPte** PageTableArray(); sl@0: sl@0: /** sl@0: Unpin all the page tables which have been pinned by this mapping. sl@0: sl@0: @param aPinArgs The resources used for pinning. The replacement pages allocated sl@0: to this will be increased for each page which was became completely sl@0: unpinned. sl@0: */ sl@0: void UnpinPageTables(TPinArgs& aPinArgs); sl@0: private: sl@0: /** sl@0: Temporary store for the mapping passed to #Pin sl@0: */ sl@0: DMemoryMappingBase* iPinVirtualMapping; sl@0: sl@0: /** sl@0: Temporary store for the mapping instance count passed to #Pin sl@0: */ sl@0: TUint iPinVirtualMapInstanceCount; sl@0: sl@0: /** sl@0: The number of page tables which are currently being pinned by this mapping. sl@0: This is the number of valid entries stored at PageTableArray. sl@0: */ sl@0: TUint iNumPinnedPageTables; sl@0: sl@0: /** sl@0: The maximum number of pages which can be pinned by this mapping. sl@0: If this is zero, there is no maximum. sl@0: */ sl@0: TUint iMaxCount; sl@0: sl@0: /** sl@0: The memory allocated by this object for storing pointer to the page tables sl@0: it has pinned. sl@0: */ sl@0: TPte** iAllocatedPinnedPageTables; sl@0: sl@0: enum sl@0: { sl@0: KSmallPinnedPageTableCount = 2 ///< Number of entries in iSmallPinnedPageTablesArray sl@0: }; sl@0: sl@0: /** sl@0: A small array to use for storing pinned page tables. sl@0: This is an optimisation used for the typical case of pinning a small number of pages sl@0: to avoid dynamic allocation of memory. sl@0: */ sl@0: TPte* iSmallPinnedPageTablesArray[KSmallPinnedPageTableCount]; sl@0: }; sl@0: sl@0: #endif