os/kernelhwsrv/kernel/eka/memmodel/epoc/flexible/mmu/mvalloc.h
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
     1 // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
     2 // All rights reserved.
     3 // This component and the accompanying materials are made available
     4 // under the terms of the License "Eclipse Public License v1.0"
     5 // which accompanies this distribution, and is available
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
     7 //
     8 // Initial Contributors:
     9 // Nokia Corporation - initial contribution.
    10 //
    11 // Contributors:
    12 //
    13 // Description:
    14 //
    15 
    16 /**
    17  @file
    18  @internalComponent
    19 */
    20 
    21 #ifndef MVALLOC_H
    22 #define MVALLOC_H
    23 
    24 
    25 class RVirtualAllocSlabSet;
    26 
    27 /**
    28 Allocator for virtual addresses.
    29 
    30 The allocator has the concept of addresses having a 'slab type' (#TVirtualSlabType).
    31 It ensures that addresses of different slab types will not overlap in the same 'chunk'
    32 (the region covered by a single MMU page table).
    33 
    34 Addresses will be allocated from lower address regions first, subject to slab type
    35 and allocation algorithm constraints. See #RBackwardsVirtualAllocator.
    36 */
    37 class RVirtualAllocator
    38 	{
    39 public:
    40 	RVirtualAllocator();
    41 	~RVirtualAllocator();
    42 
    43 	/**
    44 	Second phase constructor.
    45 
    46 	@param aStart			The starting virtual address of the region to be covered
    47 							by this allocator.
    48 							Must be an integer multiple of #KVirtualAllocSlabSize.
    49 	@param aEnd				The end virtual address (last valid address plus one) of the region
    50 							to be covered by this allocator.
    51 							Must be an integer multiple of #KVirtualAllocSlabSize.
    52 	@param aNumSlabTypes	The number of different 'slab types' to be allocated.
    53 							This will normally be #ENumVirtualAllocTypes.
    54 	@param aWriteLock		Reference to the mutex which is being used to protect allocations
    55 							with this object. This is only used for debug checks and may be
    56 							a mutex assigned by #DMutexPool. In practice, this will usually be an
    57 							address space lock DAddressSpace::iLock.
    58 
    59 	@return KErrNone if successful, otherwise one of the system wide error codes.
    60 	*/
    61 	TInt Construct(TLinAddr aStart, TLinAddr aEnd, TUint aNumSlabTypes, DMutex*& aWriteLock);
    62 
    63 	/**
    64 	Allocate a region of virtual addresses.
    65 
    66 	The returned region may have a start address and/or size which is different to
    67 	those requested due to various alignment requirements in the implementation.
    68 	However the returned region will always include all addresses requested.
    69 
    70 	@param[out] aAddr			Returns the start address of the region which was allocated.
    71 	@param[out] aSize			Returns the size, in bytes, of the region which was allocated.
    72 								This will always be aligned to a multiple of the page colouring
    73 								size: #KPageColourCount*#KPageSize.
    74 	@param		aRequestedAddr	The requested start address of the region to allocate,
    75 								or zero if no specific address is required.
    76 	@param		aRequestedSize	The requested size, in bytes, of the region to allocate.
    77 	@param		aSlabType		The 'slab type' of the address to be allocated.
    78 								Addresses of different slab types will not overlap in the
    79 								same 'chunk' (region covered by a single MMU page table).
    80 								This value must be less than the \a aNumSlabTypes argument
    81 								used in #Construct.
    82 
    83 	@return KErrNone if successful.
    84 			KErrAlreadyExists if a specific address was supplied and this was already
    85 			allocated, or exists in a slab already used for a different slab type.
    86 			Otherwise, one of the system wide error codes.
    87 
    88 	@pre The write lock must be held. (See \a aWriteLock argument for #Construct.)
    89 	*/
    90 	TInt Alloc(TLinAddr& aAddr, TUint& aSize, TLinAddr aRequestedAddr, TUint aRequestedSize, TUint aSlabType);
    91 
    92 	/**
    93 	Free a virtual addresses region which was allocated with #Alloc.
    94 	The region supplied to this function must either be one supplied to a
    95 	previous call to #Alloc or be one returned by that function.
    96 
    97 	@param aAddr	Start address of the region to be freed.
    98 	@param aSize	Size, in bytes, of the region to be freed.
    99 
   100 	@pre The write lock must be held. (See \a aWriteLock argument for #Construct.)
   101 	*/
   102 	void Free(TLinAddr aAddr, TUint aSize);
   103 
   104 	/**
   105 	Return true if the the address region specified by \a aAddr and \a aSize is
   106 	entirely within the region of addresses covered by this allocator.
   107 	*/
   108 	TBool InRange(TLinAddr aAddr, TUint aSize);
   109 
   110 	/**
   111 	Return true if the the address region specified by \a aAddr and \a aSize was
   112 	allocated by this allocator using the specified \a aSlabType.
   113 
   114 	@pre The write lock must be held. (See \a aWriteLock argument for #Construct.)
   115 	*/
   116 	TBool CheckSlabType(TLinAddr aAddr, TUint aSize, TUint aSlabType);
   117 
   118 private:
   119 	/**
   120 	If required, expand the region specified by \a aAddr and \a aSize 
   121 	to meet size and alignment requirements of the allocator.
   122 	This also returns log2 of the address alignment required.
   123 	*/
   124 	static TUint AdjustRegion(TLinAddr& aAddr, TUint& aSize);
   125 
   126 protected:
   127 	/**
   128 	The starting virtual address of the region covered by this allocator.
   129 	*/
   130 	TLinAddr iBase;
   131 
   132 	/**
   133 	The size, in bytes, of the virtual address of the region covered by this allocator.
   134 	*/
   135 	TUint iSize;
   136 
   137 private:
   138 	/**
   139 	Bitmap of used virtual address regions, each a 'slab' size (#KVirtualAllocSlabSize).
   140 	*/
   141 	TBitMapAllocator* iAllocator;
   142 
   143 	/**
   144 	Pointer to allocator object used for sizes less than #KVirtualAllocSlabSize.
   145 	*/
   146 	RVirtualAllocSlabSet* iSlabSet;
   147 	};
   148 
   149 
   150 inline TBool RVirtualAllocator::InRange(TLinAddr aAddr, TUint aSize)
   151 	{
   152 	aAddr -= iBase;
   153 	return aAddr<iSize && aAddr+aSize>=aAddr && aAddr+aSize<=iSize;
   154 	}
   155 
   156 
   157 
   158 /**
   159 Allocator for virtual addresses which is identical to #RVirtualAllocator
   160 except that addresses will be allocated from higher address regions first.
   161 (Subject to 'slab type' and allocation algorithm constraints).
   162 */
   163 class RBackwardsVirtualAllocator : public RVirtualAllocator
   164 	{
   165 public:
   166 	// overriding RVirtualAllocator...
   167 	TInt Alloc(TLinAddr& aAddr, TUint& aSize, TLinAddr aRequestedAddr, TUint aRequestedSize, TUint aSlabType);
   168 	void Free(TLinAddr aAddr, TUint aSize);
   169 	};
   170 
   171 
   172 /**
   173 Enumeration of the different virtual address allocation types which may not
   174 overlap in the same 'chunk' (region covered by a single MMU page table).
   175 
   176 This includes all #TPdeType values, plus addition address types.
   177 */
   178 enum TVirtualSlabType
   179 	{
   180 	/**
   181 	Bit flag used to distinguish common virtual addresses allocated with
   182 	DAddressSpace::AllocateUserCommonVirtualMemory.
   183 
   184 	It is important that these addresses reside in their own slab type,
   185 	otherwise normal local address allocation would tend to get allocated
   186 	adjacent to them; clogging up the 'common' address region.
   187 	*/
   188 	EVirtualSlabTypeCommonVirtual		= ENumPdeTypes<<0,
   189 
   190 	/**
   191 	Bit flag used to distinguish virtual addresses allocated for use in
   192 	mapping demand paged memory.
   193 
   194 	This ensures that page tables used for demand paged memory are not
   195 	used for other memory types and means they may be freed once the
   196 	memory is paged out.
   197 	*/
   198 	EVirtualSlabTypeDemandPaged			= ENumPdeTypes<<1,
   199 
   200 	/**
   201 	Total number of different 'kinds' of virtual address which may need to be allocated.
   202 	*/
   203 	ENumVirtualAllocTypes				= ENumPdeTypes<<2
   204 	};
   205 
   206 
   207 #endif