sl@0
|
1 |
// Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
|
sl@0
|
2 |
// All rights reserved.
|
sl@0
|
3 |
// This component and the accompanying materials are made available
|
sl@0
|
4 |
// under the terms of the License "Eclipse Public License v1.0"
|
sl@0
|
5 |
// which accompanies this distribution, and is available
|
sl@0
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
|
sl@0
|
7 |
//
|
sl@0
|
8 |
// Initial Contributors:
|
sl@0
|
9 |
// Nokia Corporation - initial contribution.
|
sl@0
|
10 |
//
|
sl@0
|
11 |
// Contributors:
|
sl@0
|
12 |
//
|
sl@0
|
13 |
// Description:
|
sl@0
|
14 |
//
|
sl@0
|
15 |
|
sl@0
|
16 |
/**
|
sl@0
|
17 |
@file
|
sl@0
|
18 |
@internalComponent
|
sl@0
|
19 |
*/
|
sl@0
|
20 |
|
sl@0
|
21 |
#ifndef MPTALLOC_H
|
sl@0
|
22 |
#define MPTALLOC_H
|
sl@0
|
23 |
|
sl@0
|
24 |
#include "mcleanup.h"
|
sl@0
|
25 |
|
sl@0
|
26 |
/**
|
sl@0
|
27 |
Number of #SPageTableInfo structures which will fit into a page of RAM.
|
sl@0
|
28 |
*/
|
sl@0
|
29 |
const TUint KPageTableInfosPerPage = KPageSize/sizeof(SPageTableInfo);
|
sl@0
|
30 |
|
sl@0
|
31 |
/**
|
sl@0
|
32 |
Number of pages of page tables which correspond to a page of page infos.
|
sl@0
|
33 |
*/
|
sl@0
|
34 |
const TUint KPageTableGroupSize = KPageTableInfosPerPage/KPtClusterSize;
|
sl@0
|
35 |
|
sl@0
|
36 |
/**
|
sl@0
|
37 |
Max number of RAM pages which can be used for page tables.
|
sl@0
|
38 |
*/
|
sl@0
|
39 |
const TUint KMaxPageTablePages = (KPageTableEnd-KPageTableBase)/KPageSize;
|
sl@0
|
40 |
|
sl@0
|
41 |
|
sl@0
|
42 |
/**
|
sl@0
|
43 |
The maximum number of pages required to pin a single page table.
|
sl@0
|
44 |
*/
|
sl@0
|
45 |
const TUint KNumPagesToPinOnePageTable = 2; // 1 page table page + 1 page table info page
|
sl@0
|
46 |
|
sl@0
|
47 |
/**
|
sl@0
|
48 |
The minimum number of unpinned paged page table pages required so a page fault
|
sl@0
|
49 |
can't fail to allocate a page table.
|
sl@0
|
50 |
*/
|
sl@0
|
51 |
const TUint KMinUnpinnedPagedPtPages = KMaxCpus;
|
sl@0
|
52 |
|
sl@0
|
53 |
|
sl@0
|
54 |
/**
|
sl@0
|
55 |
Class for allocating MMU page tables.
|
sl@0
|
56 |
*/
|
sl@0
|
57 |
class PageTableAllocator
|
sl@0
|
58 |
{
|
sl@0
|
59 |
public:
|
sl@0
|
60 |
void Init2(DMutex* aLock);
|
sl@0
|
61 |
void Init2B();
|
sl@0
|
62 |
|
sl@0
|
63 |
/**
|
sl@0
|
64 |
Allocate a page table.
|
sl@0
|
65 |
|
sl@0
|
66 |
@param aDemandPaged True if the page table will be used to map demand paged memory;
|
sl@0
|
67 |
false, otherwise.
|
sl@0
|
68 |
|
sl@0
|
69 |
@return Virtual address of the allocated page table,
|
sl@0
|
70 |
or the null-pointer if there was insufficient memory.
|
sl@0
|
71 |
|
sl@0
|
72 |
@pre #PageTablesLockIsHeld, i.e. current thread has called #Lock()
|
sl@0
|
73 |
*/
|
sl@0
|
74 |
TPte* Alloc(TBool aDemandPaged);
|
sl@0
|
75 |
|
sl@0
|
76 |
/**
|
sl@0
|
77 |
Free a page table previously aquired with #Alloc.
|
sl@0
|
78 |
|
sl@0
|
79 |
@param aPageTable Virtual address of the page table,
|
sl@0
|
80 |
|
sl@0
|
81 |
@pre #PageTablesLockIsHeld, i.e. current thread has called #Lock()
|
sl@0
|
82 |
*/
|
sl@0
|
83 |
void Free(TPte* aPageTable);
|
sl@0
|
84 |
|
sl@0
|
85 |
/**
|
sl@0
|
86 |
Acquire the mutex used to protect page table allocation.
|
sl@0
|
87 |
*/
|
sl@0
|
88 |
void Lock();
|
sl@0
|
89 |
|
sl@0
|
90 |
/**
|
sl@0
|
91 |
Release the mutex used to protect page table allocation.
|
sl@0
|
92 |
*/
|
sl@0
|
93 |
void Unlock();
|
sl@0
|
94 |
|
sl@0
|
95 |
/**
|
sl@0
|
96 |
Return true if the current thread has acquired the mutex used to protect page table allocation.
|
sl@0
|
97 |
I.e. has called #Lock().
|
sl@0
|
98 |
*/
|
sl@0
|
99 |
TBool LockIsHeld();
|
sl@0
|
100 |
|
sl@0
|
101 |
/**
|
sl@0
|
102 |
Steal a RAM page which is currently being used to store demand paged page tables
|
sl@0
|
103 |
or page table infos.
|
sl@0
|
104 |
|
sl@0
|
105 |
This removes the page tables contained in the RAM from any objects which own
|
sl@0
|
106 |
them and then returns the RAM to the demand paging system as a free page. This
|
sl@0
|
107 |
will not ever return KErrNone indicating that the page has been successfully
|
sl@0
|
108 |
stolen.
|
sl@0
|
109 |
|
sl@0
|
110 |
This is only intended for use by DPageTableMemoryManager::StealPage.
|
sl@0
|
111 |
|
sl@0
|
112 |
@param aPageInfo The page information structure of the page to be stolen.
|
sl@0
|
113 |
|
sl@0
|
114 |
@return KErrCompletion to indicate that the page was stolen but has been
|
sl@0
|
115 |
returned to the demand paging live list as a free page.
|
sl@0
|
116 |
Otherwise, KErrInUse if the page was not able to be freed.
|
sl@0
|
117 |
|
sl@0
|
118 |
@pre #PageTablesLockIsHeld, i.e. current thread has called #Lock()
|
sl@0
|
119 |
@pre MmuLock held.
|
sl@0
|
120 |
*/
|
sl@0
|
121 |
TInt StealPage(SPageInfo* aPageInfo);
|
sl@0
|
122 |
|
sl@0
|
123 |
/**
|
sl@0
|
124 |
Discards a page of page tables or page table infos but only if it is demand paged.
|
sl@0
|
125 |
|
sl@0
|
126 |
This will only be invoked on page table info pages or pinned paged tables
|
sl@0
|
127 |
as they aren't on the live list and so M::MovePage() will not know the pager
|
sl@0
|
128 |
can discard them.
|
sl@0
|
129 |
|
sl@0
|
130 |
@param aMemory This should always be the page table info memory object.
|
sl@0
|
131 |
@param aOldPageInfo The page info of the page to discard.
|
sl@0
|
132 |
@param aBlockZoneId The ID of any RAM zone not to be allocated into.
|
sl@0
|
133 |
@param aBlockRest ETrue when any allocations should stop if blocked zone hit.
|
sl@0
|
134 |
|
sl@0
|
135 |
@return KErrNone if the page could be successfully discarded and its RAM page freed.
|
sl@0
|
136 |
*/
|
sl@0
|
137 |
TInt MovePage( DMemoryObject* aMemory, SPageInfo* aOldPageInfo,
|
sl@0
|
138 |
TUint aBlockZoneId, TBool aBlockRest);
|
sl@0
|
139 |
|
sl@0
|
140 |
#ifdef _DEBUG
|
sl@0
|
141 |
/**
|
sl@0
|
142 |
Debug function for use by DPager::RemovePage() to allow it to remove
|
sl@0
|
143 |
pages with paged state == EUnpaged.
|
sl@0
|
144 |
|
sl@0
|
145 |
A page table page may be stolen when it is unpaged as it has been
|
sl@0
|
146 |
allocated via DMemoryMapping::AllocatePageTable() but not yet rejuvenated
|
sl@0
|
147 |
by Mmu::PageInPages() as the MmuLock is released between these stages.
|
sl@0
|
148 |
|
sl@0
|
149 |
A page table info page is never added to the live list so it will always
|
sl@0
|
150 |
be unpaged but it can be stolen so allow it to be removed.
|
sl@0
|
151 |
|
sl@0
|
152 |
@param aPageInfo The page info of the page.
|
sl@0
|
153 |
|
sl@0
|
154 |
@return ETrue if the page is a page table info page, EFalse otherwise.
|
sl@0
|
155 |
*/
|
sl@0
|
156 |
TBool IsPageTableUnpagedRemoveAllowed(SPageInfo* aPageInfo);
|
sl@0
|
157 |
#endif
|
sl@0
|
158 |
|
sl@0
|
159 |
/**
|
sl@0
|
160 |
Pin the RAM page containing a page table, as well as the RAM page
|
sl@0
|
161 |
containing its #SPageTableInfo structure.
|
sl@0
|
162 |
|
sl@0
|
163 |
@param aPageTable Virtual address of the page table,
|
sl@0
|
164 |
@param aPinArgs The resources to use for pinning. This must have
|
sl@0
|
165 |
at least #KNumPagesToPinOnePageTable replacement
|
sl@0
|
166 |
pages available.
|
sl@0
|
167 |
*/
|
sl@0
|
168 |
TInt PinPageTable(TPte* aPageTable, TPinArgs& aPinArgs);
|
sl@0
|
169 |
|
sl@0
|
170 |
/**
|
sl@0
|
171 |
Unpin the RAM page containing a page table, as well as the RAM page
|
sl@0
|
172 |
containing its #SPageTableInfo structure.
|
sl@0
|
173 |
This reverses the action of #PinPageTable.
|
sl@0
|
174 |
|
sl@0
|
175 |
@param aPageTable Virtual address of the page table,
|
sl@0
|
176 |
@param aPinArgs The resources to use for pinning. The replacement
|
sl@0
|
177 |
pages count in this will be incremented for each
|
sl@0
|
178 |
completely unpinned, e.g. those which can be reused
|
sl@0
|
179 |
as new replacement pages or freed.
|
sl@0
|
180 |
*/
|
sl@0
|
181 |
void UnpinPageTable(TPte* aPageTable, TPinArgs& aPinArgs);
|
sl@0
|
182 |
|
sl@0
|
183 |
private:
|
sl@0
|
184 |
/**
|
sl@0
|
185 |
Sub-allocator used for managing page tables of a given 'pagedness' (paged/not-paged).
|
sl@0
|
186 |
Each allocator maintains a list free page tables (#iFreeList) from which it can allocate.
|
sl@0
|
187 |
As well as a separate list of RAM pages which have no allocated page tables, #iCleanupList.
|
sl@0
|
188 |
Page tables in the RAM on #iCleanupList do not appear in #iFreeList.
|
sl@0
|
189 |
*/
|
sl@0
|
190 |
class TSubAllocator
|
sl@0
|
191 |
{
|
sl@0
|
192 |
public:
|
sl@0
|
193 |
void Init2(PageTableAllocator* iAllocator, TUint aReserveCount, TBool aDemandPaged);
|
sl@0
|
194 |
|
sl@0
|
195 |
/**
|
sl@0
|
196 |
Allocate a page table from this sub-allocator.
|
sl@0
|
197 |
|
sl@0
|
198 |
@return The #SPageTableInfo structure of the page table,
|
sl@0
|
199 |
or the null-pointer if none could be allocated.
|
sl@0
|
200 |
*/
|
sl@0
|
201 |
SPageTableInfo* Alloc();
|
sl@0
|
202 |
|
sl@0
|
203 |
/**
|
sl@0
|
204 |
Free a page table back to this sub-allocator.
|
sl@0
|
205 |
|
sl@0
|
206 |
@param aPageTableInfo The #SPageTableInfo structure of the page table.
|
sl@0
|
207 |
*/
|
sl@0
|
208 |
void Free(SPageTableInfo* aPageTableInfo);
|
sl@0
|
209 |
|
sl@0
|
210 |
/**
|
sl@0
|
211 |
Add a single page of page tables to this allocator for management.
|
sl@0
|
212 |
|
sl@0
|
213 |
@param aPageTableInfo The #SPageTableInfo structure of the first page table
|
sl@0
|
214 |
contained in the page.
|
sl@0
|
215 |
*/
|
sl@0
|
216 |
void AllocPage(SPageTableInfo* aPageTableInfo);
|
sl@0
|
217 |
|
sl@0
|
218 |
/**
|
sl@0
|
219 |
Attempt to remove a single unused page of page tables from this allocator.
|
sl@0
|
220 |
|
sl@0
|
221 |
@return The #SPageTableInfo structure of the first page table contained
|
sl@0
|
222 |
in the removed page. Or the null-pointer if there were no unused
|
sl@0
|
223 |
memory to be freed.
|
sl@0
|
224 |
*/
|
sl@0
|
225 |
SPageTableInfo* FreePage();
|
sl@0
|
226 |
|
sl@0
|
227 |
/**
|
sl@0
|
228 |
Move a page of RAM containing page tables to #iCleanupList.
|
sl@0
|
229 |
All page tables in the page must be currently unused.
|
sl@0
|
230 |
|
sl@0
|
231 |
@param aPageTableInfo The #SPageTableInfo structure of the first page table
|
sl@0
|
232 |
contained in the page.
|
sl@0
|
233 |
*/
|
sl@0
|
234 |
void MoveToCleanup(SPageTableInfo* aPageTableInfo);
|
sl@0
|
235 |
|
sl@0
|
236 |
/**
|
sl@0
|
237 |
Return true if there are whole RAM pages which can be freed from this
|
sl@0
|
238 |
sub-allocator without reducing #iFreeCount below #iReserveCount.
|
sl@0
|
239 |
*/
|
sl@0
|
240 |
TBool IsCleanupRequired();
|
sl@0
|
241 |
|
sl@0
|
242 |
/**
|
sl@0
|
243 |
Debug check returning true if this objects lists are in a valid state.
|
sl@0
|
244 |
*/
|
sl@0
|
245 |
TBool CheckFreeList();
|
sl@0
|
246 |
public:
|
sl@0
|
247 |
SDblQue iFreeList; ///< List of unused page tables, linked by their SPageTableInfo::FreeLink.
|
sl@0
|
248 |
SDblQue iCleanupList; ///< List of unused pages, linked by the SPageTableInfo::FreeLink of the first page table in the page.
|
sl@0
|
249 |
TUint iFreeCount; ///< Total free page tables in pages on #iFreeList and #iCleanupList.
|
sl@0
|
250 |
TUint iReserveCount; ///< Minimum number of page tables to keep in reserve.
|
sl@0
|
251 |
TBool iDemandPaged; ///< True if this allocator id used for demand paged page tables.
|
sl@0
|
252 |
};
|
sl@0
|
253 |
|
sl@0
|
254 |
/**
|
sl@0
|
255 |
Implementation of #Alloc.
|
sl@0
|
256 |
*/
|
sl@0
|
257 |
TPte* DoAlloc(TBool aDemandPaged);
|
sl@0
|
258 |
|
sl@0
|
259 |
/**
|
sl@0
|
260 |
Implementation of #Free.
|
sl@0
|
261 |
*/
|
sl@0
|
262 |
void DoFree(TPte* aPageTable);
|
sl@0
|
263 |
|
sl@0
|
264 |
/**
|
sl@0
|
265 |
Allocate resources for a pages worth of page tables owned by \a aSubAllocator.
|
sl@0
|
266 |
|
sl@0
|
267 |
@return True if the resources were successfully allocated.
|
sl@0
|
268 |
|
sl@0
|
269 |
@pre #PageTablesLockIsHeld, i.e. current thread has called #Lock()
|
sl@0
|
270 |
*/
|
sl@0
|
271 |
TBool AllocReserve(TSubAllocator& aSubAllocator);
|
sl@0
|
272 |
|
sl@0
|
273 |
/**
|
sl@0
|
274 |
Free the resources taken up by a pages worth of unused page tables
|
sl@0
|
275 |
owned by \a aSubAllocator.
|
sl@0
|
276 |
|
sl@0
|
277 |
@return True, if any resources were freed.
|
sl@0
|
278 |
False, if there are no more unused resources.
|
sl@0
|
279 |
|
sl@0
|
280 |
@pre #PageTablesLockIsHeld, i.e. current thread has called #Lock()
|
sl@0
|
281 |
*/
|
sl@0
|
282 |
TBool FreeReserve(TSubAllocator& aSubAllocator);
|
sl@0
|
283 |
|
sl@0
|
284 |
/**
|
sl@0
|
285 |
Steal a RAM page which is currently being used to store demand paged page
|
sl@0
|
286 |
table infos.
|
sl@0
|
287 |
|
sl@0
|
288 |
This removes all the page tables references by the page table infos contained in
|
sl@0
|
289 |
the RAM from any objects which own them and then returns the RAM to the demand
|
sl@0
|
290 |
paging system as a free page. This will not ever return KErrNone indicating that
|
sl@0
|
291 |
the page has been successfully stolen.
|
sl@0
|
292 |
|
sl@0
|
293 |
This is only intended for use by PageTableAllocator::StealPage.
|
sl@0
|
294 |
|
sl@0
|
295 |
@param aPageInfo The page information structure of the page to be stolen.
|
sl@0
|
296 |
|
sl@0
|
297 |
@return KErrCompletion to indicate that the page was stolen but has been
|
sl@0
|
298 |
returned to the demand paging live list as a free page.
|
sl@0
|
299 |
Otherwise, KErrInUse if the page was not able to be freed.
|
sl@0
|
300 |
|
sl@0
|
301 |
@pre #PageTablesLockIsHeld, i.e. current thread has called #Lock()
|
sl@0
|
302 |
@pre MmuLock held.
|
sl@0
|
303 |
*/
|
sl@0
|
304 |
TInt StealPageTableInfo(SPageInfo* aPageInfo);
|
sl@0
|
305 |
|
sl@0
|
306 |
/**
|
sl@0
|
307 |
Free all unused resources taken up for page table management.
|
sl@0
|
308 |
*/
|
sl@0
|
309 |
void Cleanup();
|
sl@0
|
310 |
|
sl@0
|
311 |
/**
|
sl@0
|
312 |
Trampoline function for use with iCleanup which redirects to #Cleanup().
|
sl@0
|
313 |
*/
|
sl@0
|
314 |
static void CleanupTrampoline(TAny* aSelf);
|
sl@0
|
315 |
|
sl@0
|
316 |
private:
|
sl@0
|
317 |
/**
|
sl@0
|
318 |
Sub-allocator for allocating unpaged page tables.
|
sl@0
|
319 |
*/
|
sl@0
|
320 |
TSubAllocator iUnpagedAllocator;
|
sl@0
|
321 |
|
sl@0
|
322 |
/**
|
sl@0
|
323 |
Sub-allocator for allocating demand paged page tables.
|
sl@0
|
324 |
*/
|
sl@0
|
325 |
TSubAllocator iPagedAllocator;
|
sl@0
|
326 |
|
sl@0
|
327 |
/**
|
sl@0
|
328 |
Object used for queueing cleanup callbacks to #CleanupTrampoline.
|
sl@0
|
329 |
*/
|
sl@0
|
330 |
TMemoryCleanup iCleanup;
|
sl@0
|
331 |
|
sl@0
|
332 |
/**
|
sl@0
|
333 |
Recursion count for #Alloc.
|
sl@0
|
334 |
*/
|
sl@0
|
335 |
TUint iAllocating;
|
sl@0
|
336 |
|
sl@0
|
337 |
/**
|
sl@0
|
338 |
The mutex used to protect page table allocation.
|
sl@0
|
339 |
*/
|
sl@0
|
340 |
DMutex* iLock;
|
sl@0
|
341 |
|
sl@0
|
342 |
/**
|
sl@0
|
343 |
The memory object used to store the memory containing page tables.
|
sl@0
|
344 |
*/
|
sl@0
|
345 |
DMemoryObject* iPageTableMemory;
|
sl@0
|
346 |
|
sl@0
|
347 |
/**
|
sl@0
|
348 |
The memory object used to store #SPageTableInfo structures.
|
sl@0
|
349 |
*/
|
sl@0
|
350 |
DMemoryObject* iPageTableInfoMemory;
|
sl@0
|
351 |
|
sl@0
|
352 |
/**
|
sl@0
|
353 |
Helper class for allocating page index values within #iPageTableMemory.
|
sl@0
|
354 |
This wraps up to two bitmap allocators, one each used for paged and unpaged
|
sl@0
|
355 |
page tables.
|
sl@0
|
356 |
|
sl@0
|
357 |
Page indexes are allocated in a way which ensures that there will not be
|
sl@0
|
358 |
any SPageTableInfo structures for unpaged page tables in the same RAM page
|
sl@0
|
359 |
as the structures for paged page tables.
|
sl@0
|
360 |
*/
|
sl@0
|
361 |
class TPtPageAllocator
|
sl@0
|
362 |
{
|
sl@0
|
363 |
public:
|
sl@0
|
364 |
void Init2(TUint aNumInitPages);
|
sl@0
|
365 |
TInt Alloc(TBool aDemandPaged);
|
sl@0
|
366 |
void Free(TUint aPageIndex, TBool aDemandPaged);
|
sl@0
|
367 |
|
sl@0
|
368 |
/**
|
sl@0
|
369 |
Determine if the page table info page is paged.
|
sl@0
|
370 |
|
sl@0
|
371 |
@param aPageInfo Pointer to the SPageInfo of the page table info page.
|
sl@0
|
372 |
@return ETrue if the page table info page is paged, EFalse otherwise.
|
sl@0
|
373 |
@pre MmuLock is held.
|
sl@0
|
374 |
*/
|
sl@0
|
375 |
inline TBool IsDemandPagedPtInfo(SPageInfo* aPageInfo)
|
sl@0
|
376 |
{
|
sl@0
|
377 |
// Is the highest page table index this page table info page can reference
|
sl@0
|
378 |
// allocated within the demand paged region of the page table address space.
|
sl@0
|
379 |
TUint groupIndex = aPageInfo->Index();
|
sl@0
|
380 |
return ((groupIndex+1) * KPageTableGroupSize)-1 >= iUpperWaterMark;
|
sl@0
|
381 |
}
|
sl@0
|
382 |
|
sl@0
|
383 |
/**
|
sl@0
|
384 |
Determine if the page table page is paged.
|
sl@0
|
385 |
|
sl@0
|
386 |
@param aPageInfo Pointer to the SPageInfo of the page table info page.
|
sl@0
|
387 |
@return ETrue if the page table page is paged, EFalse otherwise.
|
sl@0
|
388 |
@pre MmuLock is held.
|
sl@0
|
389 |
*/
|
sl@0
|
390 |
inline TBool IsDemandPagedPt(SPageInfo* aPageInfo)
|
sl@0
|
391 |
{
|
sl@0
|
392 |
return aPageInfo->Index() >= iUpperWaterMark;
|
sl@0
|
393 |
}
|
sl@0
|
394 |
|
sl@0
|
395 |
/**
|
sl@0
|
396 |
Get a random paged page table page.
|
sl@0
|
397 |
|
sl@0
|
398 |
@return The index of a paged page table page.
|
sl@0
|
399 |
@pre All paged page table pages are allocated.
|
sl@0
|
400 |
@pre Page tables lock is held.
|
sl@0
|
401 |
*/
|
sl@0
|
402 |
TUint RandomPagedPtPage();
|
sl@0
|
403 |
|
sl@0
|
404 |
/**
|
sl@0
|
405 |
Increase the count of pinned paged page table pages.
|
sl@0
|
406 |
|
sl@0
|
407 |
@return KErrNone on success, KErrNoMemory if too many pages are already pinned.
|
sl@0
|
408 |
@pre MmuLock is held
|
sl@0
|
409 |
*/
|
sl@0
|
410 |
inline TInt PtPagePinCountInc()
|
sl@0
|
411 |
{
|
sl@0
|
412 |
if (AtPinnedPagedPtsLimit(iUpperWaterMark, iLowerWaterMark, iPinnedPageTablePages + 1))
|
sl@0
|
413 |
{
|
sl@0
|
414 |
return KErrNoMemory;
|
sl@0
|
415 |
}
|
sl@0
|
416 |
iPinnedPageTablePages++;
|
sl@0
|
417 |
return KErrNone;
|
sl@0
|
418 |
}
|
sl@0
|
419 |
|
sl@0
|
420 |
/**
|
sl@0
|
421 |
Decrease the count of pinned paged page table pages.
|
sl@0
|
422 |
|
sl@0
|
423 |
@pre MmuLock is held
|
sl@0
|
424 |
*/
|
sl@0
|
425 |
inline void PtPagePinCountDec()
|
sl@0
|
426 |
{
|
sl@0
|
427 |
__NK_ASSERT_DEBUG(iPinnedPageTablePages); // Can't be zero.
|
sl@0
|
428 |
iPinnedPageTablePages--;
|
sl@0
|
429 |
}
|
sl@0
|
430 |
|
sl@0
|
431 |
private:
|
sl@0
|
432 |
/**
|
sl@0
|
433 |
Check whether it is safe to pin a paged page table or reduce the amount of
|
sl@0
|
434 |
virtual address space available to paged page tables. By checking that we
|
sl@0
|
435 |
either have spare virtual address space to increase the amount of paged page
|
sl@0
|
436 |
tables or that there are already enough unpinned paged page tables.
|
sl@0
|
437 |
|
sl@0
|
438 |
@return ETrue if there isn't or EFalse if it is ok to pin more paged page
|
sl@0
|
439 |
tables or increase the number of unpaged page tables.
|
sl@0
|
440 |
*/
|
sl@0
|
441 |
TBool AtPinnedPagedPtsLimit(TUint aUpperWaterMark, TUint aLowerWaterMark, TUint aPinnedPtPages)
|
sl@0
|
442 |
{
|
sl@0
|
443 |
TUint adjustedUpperWaterMark = aUpperWaterMark & ~(KPageTableGroupSize - 1);
|
sl@0
|
444 |
TUint availPagedPtPages = KMaxPageTablePages - adjustedUpperWaterMark;
|
sl@0
|
445 |
TUint availUnpinnedPagedPtPages = availPagedPtPages - aPinnedPtPages;
|
sl@0
|
446 |
// This check is sufficient as we only increase the pinned paged page table
|
sl@0
|
447 |
// pages or unpaged page table pages one at a time.
|
sl@0
|
448 |
return (aLowerWaterMark + 1 == adjustedUpperWaterMark &&
|
sl@0
|
449 |
availUnpinnedPagedPtPages < KMinUnpinnedPagedPtPages);
|
sl@0
|
450 |
}
|
sl@0
|
451 |
|
sl@0
|
452 |
private:
|
sl@0
|
453 |
TBitMapAllocator* iLowerAllocator; ///< Allocator for unpaged page tables
|
sl@0
|
454 |
TUint iLowerWaterMark; ///< Highest page index allocated by iLowerAllocator
|
sl@0
|
455 |
TBitMapAllocator* iUpperAllocator; ///< Allocator for demand paged page tables
|
sl@0
|
456 |
TUint iUpperWaterMark; ///< Lowest page index allocated by iUpperAllocator
|
sl@0
|
457 |
TUint iPinnedPageTablePages; ///< The number of pinned paged page table pages.
|
sl@0
|
458 |
};
|
sl@0
|
459 |
|
sl@0
|
460 |
/**
|
sl@0
|
461 |
Allocator for page indexes within #iPageTableMemory.
|
sl@0
|
462 |
*/
|
sl@0
|
463 |
TPtPageAllocator iPtPageAllocator;
|
sl@0
|
464 |
|
sl@0
|
465 |
/**
|
sl@0
|
466 |
Array which contains usage for pages of #SPageTableInfo structures.
|
sl@0
|
467 |
When the count is zero, there are no structure in use in the corresponding
|
sl@0
|
468 |
page of memory in #iPageTableInfoMemory. Indicating that the memory may be
|
sl@0
|
469 |
freed.
|
sl@0
|
470 |
*/
|
sl@0
|
471 |
TUint16 iPageTableGroupCounts[KMaxPageTablePages/KPageTableGroupSize];
|
sl@0
|
472 |
|
sl@0
|
473 |
friend class TSubAllocator;
|
sl@0
|
474 |
};
|
sl@0
|
475 |
|
sl@0
|
476 |
|
sl@0
|
477 |
/**
|
sl@0
|
478 |
The single instance of the #PageTableAllocator.
|
sl@0
|
479 |
*/
|
sl@0
|
480 |
extern PageTableAllocator PageTables;
|
sl@0
|
481 |
|
sl@0
|
482 |
|
sl@0
|
483 |
#endif
|