sl@0
|
1 |
// Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies).
|
sl@0
|
2 |
// All rights reserved.
|
sl@0
|
3 |
// This component and the accompanying materials are made available
|
sl@0
|
4 |
// under the terms of the License "Eclipse Public License v1.0"
|
sl@0
|
5 |
// which accompanies this distribution, and is available
|
sl@0
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
|
sl@0
|
7 |
//
|
sl@0
|
8 |
// Initial Contributors:
|
sl@0
|
9 |
// Nokia Corporation - initial contribution.
|
sl@0
|
10 |
//
|
sl@0
|
11 |
// Contributors:
|
sl@0
|
12 |
//
|
sl@0
|
13 |
// Description:
|
sl@0
|
14 |
// e32\include\memmodel\epoc\mmubase\demand_paging.h
|
sl@0
|
15 |
//
|
sl@0
|
16 |
//
|
sl@0
|
17 |
|
sl@0
|
18 |
#include <ramcache.h>
|
sl@0
|
19 |
#include <memmodel.h>
|
sl@0
|
20 |
|
sl@0
|
21 |
#ifdef _DEBUG
|
sl@0
|
22 |
#define __CONCURRENT_PAGING_INSTRUMENTATION__
|
sl@0
|
23 |
#endif
|
sl@0
|
24 |
|
sl@0
|
25 |
class DDemandPagingLock;
|
sl@0
|
26 |
|
sl@0
|
27 |
/**
|
sl@0
|
28 |
Maximum number of paging devices supported.
|
sl@0
|
29 |
@internalComponent
|
sl@0
|
30 |
*/
|
sl@0
|
31 |
const TInt KMaxPagingDevices = 1 + KMaxLocalDrives;
|
sl@0
|
32 |
|
sl@0
|
33 |
/**
|
sl@0
|
34 |
Multiplier for number of request objects in pool per drive that supports code paging.
|
sl@0
|
35 |
@internalComponent
|
sl@0
|
36 |
*/
|
sl@0
|
37 |
const TInt KPagingRequestsPerDevice = 2;
|
sl@0
|
38 |
|
sl@0
|
39 |
/**
|
sl@0
|
40 |
Maximum number of paging request objects supported.
|
sl@0
|
41 |
@internalComponent
|
sl@0
|
42 |
*/
|
sl@0
|
43 |
const TInt KMaxPagingRequests = KMaxPagingDevices * KPagingRequestsPerDevice;
|
sl@0
|
44 |
|
sl@0
|
45 |
/**
|
sl@0
|
46 |
Base class for the demand paging object.
|
sl@0
|
47 |
|
sl@0
|
48 |
The main functionality provided by this is:
|
sl@0
|
49 |
- Management of the live page list.
|
sl@0
|
50 |
- Interface to paging devices (media drivers).
|
sl@0
|
51 |
|
sl@0
|
52 |
The 'live page list' contains those pages which are currently present or 'paged in'.
|
sl@0
|
53 |
This is a doubly linked list of SPageInfo objects which each represent a page of physical RAM.
|
sl@0
|
54 |
|
sl@0
|
55 |
The list is split into two parts; 'young' pages (iYoungList) which are marked as accessible
|
sl@0
|
56 |
by the MMU and 'old' pages (iOldList) which are which are not marked as accessible.
|
sl@0
|
57 |
The page at the head of iYoungList is the 'youngest' page, and that at the tail
|
sl@0
|
58 |
of iOldList is the 'oldest' page.
|
sl@0
|
59 |
|
sl@0
|
60 |
This arrangement enables a pseudo Most Recently Used (MRU) algorithm to be implemented
|
sl@0
|
61 |
where access to an old page will cause a data abort and the fault handler will then
|
sl@0
|
62 |
move this page to the head of the 'young' list; the last young page will then be made
|
sl@0
|
63 |
the first old page.
|
sl@0
|
64 |
|
sl@0
|
65 |
When a data abort occurs because of an access to a page which is not live, then
|
sl@0
|
66 |
a new RAM page is obtained from the systems free pool, this is then filled with the
|
sl@0
|
67 |
correct data and added to the start of the live page list. (It is made the 'youngest'
|
sl@0
|
68 |
page.) If there are no RAM pages left in the systems free pool, or the live page list is
|
sl@0
|
69 |
at its maximum size, (iMinimumPageCount), then the oldest page from the live page list
|
sl@0
|
70 |
is recycled and used for the new page.
|
sl@0
|
71 |
|
sl@0
|
72 |
If the OS requests RAM pages from the systems free pool and there are not enough then
|
sl@0
|
73 |
pages are removed from the live page list to satisfy this request - as long as this does
|
sl@0
|
74 |
not make the live page list smaller than the limit specified by iMinimumPageCount.
|
sl@0
|
75 |
|
sl@0
|
76 |
@internalComponent
|
sl@0
|
77 |
*/
|
sl@0
|
78 |
class DemandPaging : public RamCacheBase
|
sl@0
|
79 |
{
|
sl@0
|
80 |
public:
|
sl@0
|
81 |
/**
|
sl@0
|
82 |
Fault enumeration
|
sl@0
|
83 |
*/
|
sl@0
|
84 |
enum TFault
|
sl@0
|
85 |
{
|
sl@0
|
86 |
EInitialiseFailed = 0, /**< Error occured during initialisation */
|
sl@0
|
87 |
EInitialiseBadArgs = 1, /**< Arguments used during initialisation were bad */
|
sl@0
|
88 |
ERamPageLocked = 2, /**< A page in the live page list was found to be locked */
|
sl@0
|
89 |
EUnexpectedPageType = 3, /**< A page in the live page list had an unexpected type (SPageInfo::Attribs) */
|
sl@0
|
90 |
EPageInFailed = 4, /**< An error occured whilst reading data for a 'page in' operation */
|
sl@0
|
91 |
ELockTwice = 5, /**< DDemandPagingLock::Lock was used twice without an intervening DDemandPagingLock::Unlock. */
|
sl@0
|
92 |
ELockTooBig = 6, /**< DDemandPagingLock::Lock was used with a size greater than that reserved with DDemandPagingLock::Alloc. */
|
sl@0
|
93 |
EInvalidPagingDevice = 7, /**< Raised by InstallPagingDevice when the diven device is found to have invalid parameters */
|
sl@0
|
94 |
EDeviceAlreadyExists = 8, /**< Atempt to install a paging device when one already exists for the ROM or the specified drive. */
|
sl@0
|
95 |
EDeviceMissing = 9, /**< A Paging Fault occured and the device required to service it was found to be missing. */
|
sl@0
|
96 |
EPageFaultWhilstFMHeld = 10,/**< A Paging Fault occured whilst the current thread held a fast mutex. */
|
sl@0
|
97 |
EPageFreeContiguousPages = 11,/**< An error occured when finding pages to free to make contiguous memory blocks.*/
|
sl@0
|
98 |
};
|
sl@0
|
99 |
|
sl@0
|
100 |
class DPagingRequest;
|
sl@0
|
101 |
|
sl@0
|
102 |
//
|
sl@0
|
103 |
// Configuration and initialisation...
|
sl@0
|
104 |
//
|
sl@0
|
105 |
|
sl@0
|
106 |
/**
|
sl@0
|
107 |
Tests whether rom paging has been requested.
|
sl@0
|
108 |
*/
|
sl@0
|
109 |
static TBool RomPagingRequested();
|
sl@0
|
110 |
|
sl@0
|
111 |
/**
|
sl@0
|
112 |
Tests whether code paging has been requested.
|
sl@0
|
113 |
*/
|
sl@0
|
114 |
static TBool CodePagingRequested();
|
sl@0
|
115 |
|
sl@0
|
116 |
static DemandPaging* New();
|
sl@0
|
117 |
|
sl@0
|
118 |
DemandPaging();
|
sl@0
|
119 |
|
sl@0
|
120 |
virtual ~DemandPaging();
|
sl@0
|
121 |
|
sl@0
|
122 |
/**
|
sl@0
|
123 |
Intialisation called during MmuBase:Init2.
|
sl@0
|
124 |
*/
|
sl@0
|
125 |
virtual void Init2();
|
sl@0
|
126 |
|
sl@0
|
127 |
/**
|
sl@0
|
128 |
Intialisation called from M::DemandPagingInit.
|
sl@0
|
129 |
*/
|
sl@0
|
130 |
virtual TInt Init3();
|
sl@0
|
131 |
|
sl@0
|
132 |
//
|
sl@0
|
133 |
// Live list management...
|
sl@0
|
134 |
//
|
sl@0
|
135 |
|
sl@0
|
136 |
/**
|
sl@0
|
137 |
Called by a thread whenever it takes an exception.
|
sl@0
|
138 |
|
sl@0
|
139 |
If this function returns KErrNone then the thread will continue execution at the
|
sl@0
|
140 |
instruction which caused the exception. Any other return value will cause the normal
|
sl@0
|
141 |
thread exception handling to continue.
|
sl@0
|
142 |
|
sl@0
|
143 |
The implementation of this function should determine if the exception was casued
|
sl@0
|
144 |
by access to pagable memory, if it wasn't then it should return KErrUnknown.
|
sl@0
|
145 |
|
sl@0
|
146 |
Otherwise it should perform the actions necessary to make the memory accessible
|
sl@0
|
147 |
and return KErrNone. The implementation should also call the threads
|
sl@0
|
148 |
iPagingExcTrap->PagingException
|
sl@0
|
149 |
*/
|
sl@0
|
150 |
virtual TInt Fault(TAny* aExceptionInfo)=0;
|
sl@0
|
151 |
|
sl@0
|
152 |
/**
|
sl@0
|
153 |
Make a page in the live page list not accessible. (By changing it's page table entries.)
|
sl@0
|
154 |
|
sl@0
|
155 |
@pre System Lock held
|
sl@0
|
156 |
@post System Lock held (but may have been released by this function)
|
sl@0
|
157 |
*/
|
sl@0
|
158 |
virtual void SetOld(SPageInfo* aPageInfo)=0;
|
sl@0
|
159 |
|
sl@0
|
160 |
/**
|
sl@0
|
161 |
Make a page in the live page list free to use for other purposes.
|
sl@0
|
162 |
I.e. Unmap it from all of the page tables which map it flush the cache.
|
sl@0
|
163 |
|
sl@0
|
164 |
@pre RamAlloc mutex held
|
sl@0
|
165 |
@pre System Lock held
|
sl@0
|
166 |
@post System Lock held (but may have been released by this function)
|
sl@0
|
167 |
*/
|
sl@0
|
168 |
virtual void SetFree(SPageInfo* aPageInfo)=0;
|
sl@0
|
169 |
|
sl@0
|
170 |
/**
|
sl@0
|
171 |
If the number of young pages excedes that specified by iYoungOldRatio then a
|
sl@0
|
172 |
single page is made 'old'. Call this after adding a new 'young' page.
|
sl@0
|
173 |
|
sl@0
|
174 |
@pre System Lock held
|
sl@0
|
175 |
@post System Lock left unchanged.
|
sl@0
|
176 |
*/
|
sl@0
|
177 |
void BalanceAges();
|
sl@0
|
178 |
|
sl@0
|
179 |
/**
|
sl@0
|
180 |
Add a page to the head of the live page list. I.e. make it the 'youngest' page.
|
sl@0
|
181 |
|
sl@0
|
182 |
@pre System Lock held
|
sl@0
|
183 |
@post System Lock left unchanged.
|
sl@0
|
184 |
*/
|
sl@0
|
185 |
void AddAsYoungest(SPageInfo* aPageInfo);
|
sl@0
|
186 |
|
sl@0
|
187 |
/**
|
sl@0
|
188 |
Mark a page as usused (EPagedFree) and add it to the end of the live page list.
|
sl@0
|
189 |
I.e. make it the 'oldest' page, so that it is the first page to be reused.
|
sl@0
|
190 |
|
sl@0
|
191 |
@pre System Lock held
|
sl@0
|
192 |
@post System Lock left unchanged.
|
sl@0
|
193 |
*/
|
sl@0
|
194 |
void AddAsFreePage(SPageInfo* aPageInfo);
|
sl@0
|
195 |
|
sl@0
|
196 |
/**
|
sl@0
|
197 |
Remove a page from live page list.
|
sl@0
|
198 |
It is set to the state EStatePagedDead.
|
sl@0
|
199 |
|
sl@0
|
200 |
@pre System Lock held
|
sl@0
|
201 |
@post System Lock left unchanged.
|
sl@0
|
202 |
*/
|
sl@0
|
203 |
void RemovePage(SPageInfo* aPageInfo);
|
sl@0
|
204 |
|
sl@0
|
205 |
/**
|
sl@0
|
206 |
Remove the oldest page from the live page list.
|
sl@0
|
207 |
The returned page is no longer mapped by any page table and is marked as unused.
|
sl@0
|
208 |
|
sl@0
|
209 |
@pre System Lock held
|
sl@0
|
210 |
@post System Lock left unchanged.
|
sl@0
|
211 |
*/
|
sl@0
|
212 |
SPageInfo* GetOldestPage();
|
sl@0
|
213 |
|
sl@0
|
214 |
/**
|
sl@0
|
215 |
Remove pages from the live page list and return them to the system's free pool. (Free them.)
|
sl@0
|
216 |
|
sl@0
|
217 |
@param aNumPages The number of pages to free up.
|
sl@0
|
218 |
@return True if all pages could be freed, false otherwise
|
sl@0
|
219 |
@pre RamAlloc mutex held.
|
sl@0
|
220 |
*/
|
sl@0
|
221 |
TBool GetFreePages(TInt aNumPages);
|
sl@0
|
222 |
|
sl@0
|
223 |
/**
|
sl@0
|
224 |
Give a RAM cache page to the paging system for managing.
|
sl@0
|
225 |
This page of RAM may be reused for any purpose.
|
sl@0
|
226 |
If the page has already been donated then no action is taken.
|
sl@0
|
227 |
|
sl@0
|
228 |
@param aPageInfo The page info for the donated page.
|
sl@0
|
229 |
|
sl@0
|
230 |
@see ReclaimRamCachePage.
|
sl@0
|
231 |
|
sl@0
|
232 |
@pre System Lock held
|
sl@0
|
233 |
@post System Lock left unchanged.
|
sl@0
|
234 |
*/
|
sl@0
|
235 |
void DonateRamCachePage(SPageInfo* aPageInfo);
|
sl@0
|
236 |
|
sl@0
|
237 |
/**
|
sl@0
|
238 |
Attempt to reclaim a RAM cache page given to the paging system with #DonateRamCachePage.
|
sl@0
|
239 |
If the RAM page has not been reused for other purposes then the page is
|
sl@0
|
240 |
removed from the paging system's management.
|
sl@0
|
241 |
If the page has not previousely been donated then no action is taken.
|
sl@0
|
242 |
|
sl@0
|
243 |
@param aPageInfo The page info for the page to reclaim.
|
sl@0
|
244 |
|
sl@0
|
245 |
@return True if page successfully reclaimed, false otherwise.
|
sl@0
|
246 |
|
sl@0
|
247 |
@pre System Lock held
|
sl@0
|
248 |
@post System Lock left unchanged.
|
sl@0
|
249 |
*/
|
sl@0
|
250 |
TBool ReclaimRamCachePage(SPageInfo* aPageInfo);
|
sl@0
|
251 |
|
sl@0
|
252 |
|
sl@0
|
253 |
/**
|
sl@0
|
254 |
Check whether the specified page can be discarded by the RAM cache.
|
sl@0
|
255 |
|
sl@0
|
256 |
@param aPageInfo The page info of the page being queried.
|
sl@0
|
257 |
@return ETrue when the page can be discarded, EFalse otherwise.
|
sl@0
|
258 |
@pre System lock held.
|
sl@0
|
259 |
@post System lock held.
|
sl@0
|
260 |
*/
|
sl@0
|
261 |
TBool IsPageDiscardable(SPageInfo& aPageInfo);
|
sl@0
|
262 |
|
sl@0
|
263 |
/**
|
sl@0
|
264 |
Discard the specified page.
|
sl@0
|
265 |
Should only be called on a page if a previous call to IsPageDiscardable()
|
sl@0
|
266 |
returned ETrue and the system lock hasn't been released between the calls.
|
sl@0
|
267 |
The cache will not be reduced beyond the minimum size as a new page will
|
sl@0
|
268 |
be allocated if necessary.
|
sl@0
|
269 |
|
sl@0
|
270 |
@param aPageInfo The page info of the page to be discarded
|
sl@0
|
271 |
@param aBlockZoneId The ID of the RAM zone that shouldn't be allocated into.
|
sl@0
|
272 |
@param aBlockRest Set to ETrue to stop allocation as soon as aBlockedZoneId is reached
|
sl@0
|
273 |
in preference ordering. EFalse otherwise.
|
sl@0
|
274 |
@return ETrue if the page could be discarded, EFalse otherwise.
|
sl@0
|
275 |
|
sl@0
|
276 |
@pre System lock held.
|
sl@0
|
277 |
@post System lock held.
|
sl@0
|
278 |
*/
|
sl@0
|
279 |
TBool DoDiscardPage(SPageInfo& aPageInfo, TUint aBlockedZoneId, TBool aBlockRest);
|
sl@0
|
280 |
|
sl@0
|
281 |
/**
|
sl@0
|
282 |
First stage in discarding a list of pages.
|
sl@0
|
283 |
|
sl@0
|
284 |
Must ensure that the pages will still be discardable even if system lock
|
sl@0
|
285 |
is released after this method has completed.
|
sl@0
|
286 |
To be used in conjunction with DemandPaging::DoDiscardPages1().
|
sl@0
|
287 |
|
sl@0
|
288 |
@param aPageList A NULL terminated list of the pages to be discarded
|
sl@0
|
289 |
@return KErrNone on success.
|
sl@0
|
290 |
|
sl@0
|
291 |
@pre System lock held
|
sl@0
|
292 |
@post System lock held
|
sl@0
|
293 |
*/
|
sl@0
|
294 |
TInt DoDiscardPages0(SPageInfo** aPageList);
|
sl@0
|
295 |
|
sl@0
|
296 |
|
sl@0
|
297 |
/**
|
sl@0
|
298 |
Final stage in discarding a list of page
|
sl@0
|
299 |
Finish discarding the pages previously removed by DemandPaging::DoDiscardPages0().
|
sl@0
|
300 |
|
sl@0
|
301 |
@param aPageList A NULL terminated list of the pages to be discarded
|
sl@0
|
302 |
@return KErrNone on success.
|
sl@0
|
303 |
|
sl@0
|
304 |
@pre System lock held
|
sl@0
|
305 |
@post System lock held
|
sl@0
|
306 |
*/
|
sl@0
|
307 |
TInt DoDiscardPages1(SPageInfo** aPageList);
|
sl@0
|
308 |
|
sl@0
|
309 |
/**
|
sl@0
|
310 |
Get a RAM page for use by a new page to be added to the live page list.
|
sl@0
|
311 |
This tries to obtain a RAM page from the following places:
|
sl@0
|
312 |
1. An unused page in the live page list.
|
sl@0
|
313 |
2. The systems free pool.
|
sl@0
|
314 |
3. The oldest page from the live page list.
|
sl@0
|
315 |
|
sl@0
|
316 |
@pre Calling thread must be in a critical section.
|
sl@0
|
317 |
@pre System Lock held
|
sl@0
|
318 |
@post System Lock held
|
sl@0
|
319 |
*/
|
sl@0
|
320 |
SPageInfo* AllocateNewPage();
|
sl@0
|
321 |
|
sl@0
|
322 |
/**
|
sl@0
|
323 |
Move an old page the new youngest page. I.e. move it the the head of the live page list
|
sl@0
|
324 |
and use the MMU to mark it accessible.
|
sl@0
|
325 |
*/
|
sl@0
|
326 |
void Rejuvenate(SPageInfo* aPageInfo);
|
sl@0
|
327 |
|
sl@0
|
328 |
/**
|
sl@0
|
329 |
Reserve one page for locking.
|
sl@0
|
330 |
Increments the reserved page count. May increse the size of the live list, and the minimum and
|
sl@0
|
331 |
maximum page counts. To unreserve a page, simply decrement the reserved page count.
|
sl@0
|
332 |
@return Whether the operation was sucessful.
|
sl@0
|
333 |
*/
|
sl@0
|
334 |
TBool ReservePage();
|
sl@0
|
335 |
|
sl@0
|
336 |
/**
|
sl@0
|
337 |
Ensure all pages in the given region are present and 'lock' them so that they will not
|
sl@0
|
338 |
be paged out.
|
sl@0
|
339 |
To enable the pages to be paged out again, call UnlockRegion.
|
sl@0
|
340 |
@param aProcess The process to which the linear addresses refer, or NULL for global memory
|
sl@0
|
341 |
@pre Paging mutex held
|
sl@0
|
342 |
*/
|
sl@0
|
343 |
TInt LockRegion(TLinAddr aStart,TInt aSize,DProcess* aProcess);
|
sl@0
|
344 |
|
sl@0
|
345 |
/**
|
sl@0
|
346 |
Mark in all pages in the given region as no longer locked.
|
sl@0
|
347 |
@param aProcess The process to which the linear addresses refer, or NULL for global memory
|
sl@0
|
348 |
This reverses the action of LockRegion.
|
sl@0
|
349 |
*/
|
sl@0
|
350 |
TInt UnlockRegion(TLinAddr aStart,TInt aSize,DProcess* aProcess);
|
sl@0
|
351 |
|
sl@0
|
352 |
/**
|
sl@0
|
353 |
Flush (unmap) all memory which is demand paged.
|
sl@0
|
354 |
This reduces the live page list to a minimum.
|
sl@0
|
355 |
*/
|
sl@0
|
356 |
void FlushAll();
|
sl@0
|
357 |
|
sl@0
|
358 |
/**
|
sl@0
|
359 |
Page in the specified pages and 'lock' it so it will not be paged out.
|
sl@0
|
360 |
To enable the page to be paged out again, call UnlockPage.
|
sl@0
|
361 |
@param aPage The linear address of the page to be locked.
|
sl@0
|
362 |
@param aProcess The process which the page is mapped in.
|
sl@0
|
363 |
@param aPhysAddr The physical address of the page which was locked.
|
sl@0
|
364 |
@pre System Lock held
|
sl@0
|
365 |
@post System Lock held (but may have been released by this function)
|
sl@0
|
366 |
*/
|
sl@0
|
367 |
TInt LockPage(TLinAddr aPage, DProcess* aProcess, TPhysAddr& aPhysAddr);
|
sl@0
|
368 |
|
sl@0
|
369 |
/**
|
sl@0
|
370 |
Mark the specified page as no longer locked.
|
sl@0
|
371 |
This reverses the action of LockPage.
|
sl@0
|
372 |
@param aPage The linear address of the page to be unlocked.
|
sl@0
|
373 |
@param aProcess The process which the page is mapped in.
|
sl@0
|
374 |
@param aPhysAddr The physical address of the page which was originally locked. (Or KPhysAddrInvalid.)
|
sl@0
|
375 |
@pre System Lock held
|
sl@0
|
376 |
@post System Lock held
|
sl@0
|
377 |
*/
|
sl@0
|
378 |
TInt UnlockPage(TLinAddr aPage, DProcess* aProcess, TPhysAddr aPhysAddr);
|
sl@0
|
379 |
|
sl@0
|
380 |
/**
|
sl@0
|
381 |
Implementation of DDemandPagingLock::Alloc
|
sl@0
|
382 |
*/
|
sl@0
|
383 |
TInt ReserveAlloc(TInt aSize, DDemandPagingLock& aLock);
|
sl@0
|
384 |
|
sl@0
|
385 |
/**
|
sl@0
|
386 |
Implementation of DDemandPagingLock::Free
|
sl@0
|
387 |
*/
|
sl@0
|
388 |
void ReserveFree(DDemandPagingLock& aLock);
|
sl@0
|
389 |
|
sl@0
|
390 |
/**
|
sl@0
|
391 |
Implementation of DDemandPagingLock::Lock
|
sl@0
|
392 |
*/
|
sl@0
|
393 |
TBool ReserveLock(DThread* aThread, TLinAddr aStart, TInt aSize, DDemandPagingLock& aLock);
|
sl@0
|
394 |
|
sl@0
|
395 |
/**
|
sl@0
|
396 |
Implementation of DDemandPagingLock::Unlock
|
sl@0
|
397 |
*/
|
sl@0
|
398 |
void ReserveUnlock(DDemandPagingLock& aLock);
|
sl@0
|
399 |
|
sl@0
|
400 |
/**
|
sl@0
|
401 |
Ensure a page is present, paging it in if necessary. Used in the implementation of LockPage.
|
sl@0
|
402 |
@param aPage The linear address of the page.
|
sl@0
|
403 |
@param aProcess The process the page is mapped in.
|
sl@0
|
404 |
*/
|
sl@0
|
405 |
virtual TInt EnsurePagePresent(TLinAddr aPage, DProcess* aProcess)=0;
|
sl@0
|
406 |
|
sl@0
|
407 |
/**
|
sl@0
|
408 |
Get the physical address of a page. Used in the implementation of LockPage and UnlockPage.
|
sl@0
|
409 |
@param aPage The linear address of the page.
|
sl@0
|
410 |
@param aProcess The process the page is mapped in.
|
sl@0
|
411 |
*/
|
sl@0
|
412 |
virtual TPhysAddr LinearToPhysical(TLinAddr aPage, DProcess* aProcess)=0;
|
sl@0
|
413 |
|
sl@0
|
414 |
/**
|
sl@0
|
415 |
Install the specified paging device.
|
sl@0
|
416 |
@param aDevice The device.
|
sl@0
|
417 |
@return KErrNone or standard error code.
|
sl@0
|
418 |
@post The devices DPagingDevice::iDeviceId has been set.
|
sl@0
|
419 |
*/
|
sl@0
|
420 |
TInt InstallPagingDevice(DPagingDevice* aDevice);
|
sl@0
|
421 |
|
sl@0
|
422 |
/**
|
sl@0
|
423 |
Pure virutal function to allocate the virtual address space for temporary page mapping, for a
|
sl@0
|
424 |
paging request object. This is called by DoInstallPagingDevice after the object is created.
|
sl@0
|
425 |
@param aReq The paging request object
|
sl@0
|
426 |
@param aReqId An small integer unique to the supplied paging request object
|
sl@0
|
427 |
*/
|
sl@0
|
428 |
virtual void AllocLoadAddress(DPagingRequest& aReq, TInt aReqId)=0;
|
sl@0
|
429 |
|
sl@0
|
430 |
/**
|
sl@0
|
431 |
Notify the paging system that a page of physical RAM that was used for demand paging is no
|
sl@0
|
432 |
longer mapped into any processes and is about to be freed. This is called on the multiple
|
sl@0
|
433 |
memory model when a code segment is unloaded. It is not implemented on the moving memory model.
|
sl@0
|
434 |
*/
|
sl@0
|
435 |
virtual void NotifyPageFree(TPhysAddr aPage)=0;
|
sl@0
|
436 |
|
sl@0
|
437 |
/**
|
sl@0
|
438 |
Called when a realtime thread takes a paging fault.
|
sl@0
|
439 |
Checks whether it's ok for the thread to take to fault.
|
sl@0
|
440 |
@return KErrNone if the paging fault should be further processed
|
sl@0
|
441 |
*/
|
sl@0
|
442 |
TInt CheckRealtimeThreadFault(DThread* aThread, TAny* aContext);
|
sl@0
|
443 |
|
sl@0
|
444 |
/**
|
sl@0
|
445 |
Memory-model specific method to indicate if an address range might contain paged memory.
|
sl@0
|
446 |
|
sl@0
|
447 |
Implementations may return false positives but not false negatives - in other words this method
|
sl@0
|
448 |
may say the range contains paged memory when it does not, but not the other way around.
|
sl@0
|
449 |
|
sl@0
|
450 |
This is used when pinning to determine whether memory could actally be paged.
|
sl@0
|
451 |
*/
|
sl@0
|
452 |
virtual TBool MayBePaged(TLinAddr aStartAddr, TUint aLength);
|
sl@0
|
453 |
|
sl@0
|
454 |
public:
|
sl@0
|
455 |
TUint iMinimumPageCount; /**< Minimum size for the live page list, including locked pages */
|
sl@0
|
456 |
TUint iMaximumPageCount; /**< Maximum size for the live page list, including locked pages */
|
sl@0
|
457 |
TUint16 iYoungOldRatio; /**< Ratio of young to old pages in the live page list */
|
sl@0
|
458 |
SDblQue iYoungList; /**< Head of 'young' page list. */
|
sl@0
|
459 |
TUint iYoungCount; /**< Number of young pages */
|
sl@0
|
460 |
SDblQue iOldList; /**< Head of 'old' page list. */
|
sl@0
|
461 |
TUint iOldCount; /**< Number of young pages */
|
sl@0
|
462 |
TUint iReservePageCount; /**< Number of pages reserved for locking */
|
sl@0
|
463 |
TUint iMinimumPageLimit; /**< Minimum size for iMinimumPageCount, not including locked pages.
|
sl@0
|
464 |
iMinimumPageCount >= iMinimumPageLimit + iReservePageCount */
|
sl@0
|
465 |
|
sl@0
|
466 |
TLinAddr iTempPages; /**< Uncached memory location in kernel memory which may be used
|
sl@0
|
467 |
to map RAM pages whilst they are being paged in. */
|
sl@0
|
468 |
|
sl@0
|
469 |
static DemandPaging* ThePager; /**< Pointer to the single instance of this class */
|
sl@0
|
470 |
|
sl@0
|
471 |
TUint iInitMinimumPageCount; /**< Initial value for iMinimumPageCount */
|
sl@0
|
472 |
TUint iInitMaximumPageCount; /**< Initial value for iMaximumPageCount */
|
sl@0
|
473 |
|
sl@0
|
474 |
TLinAddr iRomLinearBase; /**< Linear address of ROM start. */
|
sl@0
|
475 |
TUint iRomSize; /**< ROM size in bytes. */
|
sl@0
|
476 |
TLinAddr iRomPagedLinearBase; /**< Linear address for the start of pagable ROM. */
|
sl@0
|
477 |
TUint iRomPagedSize; /**< The size of pagable ROM in bytes.
|
sl@0
|
478 |
(Zero indicates ROM is not pagable.) */
|
sl@0
|
479 |
SRomPageInfo* iRomPageIndex; /**< Pointer to ROM page index. */
|
sl@0
|
480 |
|
sl@0
|
481 |
TLinAddr iCodeLinearBase; /**< Linear adderss of start of user code area. */
|
sl@0
|
482 |
TUint iCodeSize; /**< Size of user code area in bytes. */
|
sl@0
|
483 |
|
sl@0
|
484 |
public:
|
sl@0
|
485 |
//
|
sl@0
|
486 |
// Paging device management...
|
sl@0
|
487 |
//
|
sl@0
|
488 |
|
sl@0
|
489 |
/**
|
sl@0
|
490 |
Information for a paging device.
|
sl@0
|
491 |
*/
|
sl@0
|
492 |
struct SPagingDevice
|
sl@0
|
493 |
{
|
sl@0
|
494 |
TBool iInstalled; /**< True, if this device has been installed. */
|
sl@0
|
495 |
DPagingDevice* iDevice; /**< Pointer to device object */
|
sl@0
|
496 |
};
|
sl@0
|
497 |
|
sl@0
|
498 |
TInt DoInstallPagingDevice(DPagingDevice* aDevice, TInt aId);
|
sl@0
|
499 |
TInt ReadRomPage(const DPagingRequest* aReq, TLinAddr aRomAddress);
|
sl@0
|
500 |
TInt ReadCodePage(const DPagingRequest* aReq, DMmuCodeSegMemory* aCodeSegMemory, TLinAddr aCodeAddress);
|
sl@0
|
501 |
TInt Decompress(TInt aCompressionType,TLinAddr aDst,TLinAddr aSrc,TUint aSrcSize);
|
sl@0
|
502 |
|
sl@0
|
503 |
inline SPagingDevice& RomPagingDevice()
|
sl@0
|
504 |
{ return iPagingDevices[0]; }
|
sl@0
|
505 |
|
sl@0
|
506 |
inline SPagingDevice& CodePagingDevice(TInt aLocalDriveNumber)
|
sl@0
|
507 |
{ return iPagingDevices[aLocalDriveNumber + 1]; }
|
sl@0
|
508 |
|
sl@0
|
509 |
public:
|
sl@0
|
510 |
SPagingDevice iPagingDevices[KMaxPagingDevices]; /**< Array of paging devices. The first device is used for ROM paging. */
|
sl@0
|
511 |
DChunk* iDeviceBuffersChunk; /**< Shared Chunk used to contain buffers for paging devices */
|
sl@0
|
512 |
TLinAddr iDeviceBuffers; /**< Address for start of iDeviceBuffersChunk */
|
sl@0
|
513 |
TUint iDeviceBufferSize; /**< Size of each individual buffer within iDeviceBuffers */
|
sl@0
|
514 |
|
sl@0
|
515 |
public:
|
sl@0
|
516 |
//
|
sl@0
|
517 |
// Paging request management...
|
sl@0
|
518 |
//
|
sl@0
|
519 |
|
sl@0
|
520 |
/**
|
sl@0
|
521 |
Resources needed to service a paging request.
|
sl@0
|
522 |
*/
|
sl@0
|
523 |
class DPagingRequest : public SDblQueLink
|
sl@0
|
524 |
{
|
sl@0
|
525 |
public:
|
sl@0
|
526 |
~DPagingRequest();
|
sl@0
|
527 |
public:
|
sl@0
|
528 |
TThreadMessage iMessage; /**< Used by the media driver to queue requests */
|
sl@0
|
529 |
DMutex* iMutex; /**< A mutex for synchronisation and priority inheritance. */
|
sl@0
|
530 |
TInt iUsageCount;/**< How many threads are using or waiting for this object. */
|
sl@0
|
531 |
TLinAddr iBuffer; /**< A two-page buffer to read compressed data into. */
|
sl@0
|
532 |
TLinAddr iLoadAddr; /**< Virtual address to map page at while it's being loaded. */
|
sl@0
|
533 |
TPte* iLoadPte; /**< PTE corresponding to iLoadAddr. */
|
sl@0
|
534 |
};
|
sl@0
|
535 |
|
sl@0
|
536 |
/**
|
sl@0
|
537 |
Creates a new DPagingRequest object and adds it to the list and free pool.
|
sl@0
|
538 |
Called from DoInstallPagingDevice.
|
sl@0
|
539 |
*/
|
sl@0
|
540 |
TInt CreateRequestObject();
|
sl@0
|
541 |
|
sl@0
|
542 |
/**
|
sl@0
|
543 |
Get a paging request object, waiting if necessary for one to become available.
|
sl@0
|
544 |
@pre The system lock must be held.
|
sl@0
|
545 |
*/
|
sl@0
|
546 |
DPagingRequest* AcquireRequestObject();
|
sl@0
|
547 |
|
sl@0
|
548 |
/**
|
sl@0
|
549 |
Release a previously acquired paging request object.
|
sl@0
|
550 |
@pre The system lock must be held.
|
sl@0
|
551 |
*/
|
sl@0
|
552 |
void ReleaseRequestObject(DPagingRequest* aReq);
|
sl@0
|
553 |
|
sl@0
|
554 |
public:
|
sl@0
|
555 |
/** Count of number of paging requests created. */
|
sl@0
|
556 |
TUint iPagingRequestCount;
|
sl@0
|
557 |
|
sl@0
|
558 |
/** Array of paging request objects. */
|
sl@0
|
559 |
DPagingRequest* iPagingRequests[KMaxPagingRequests];
|
sl@0
|
560 |
|
sl@0
|
561 |
/** Pool of unused paging request objects. */
|
sl@0
|
562 |
SDblQue iFreeRequestPool;
|
sl@0
|
563 |
|
sl@0
|
564 |
/**
|
sl@0
|
565 |
Count of number of paging requests created or currently being created. Used to allocate request
|
sl@0
|
566 |
object IDs and communicate eventual paging request count to ResizeLiveList.
|
sl@0
|
567 |
*/
|
sl@0
|
568 |
TInt iNextPagingRequestCount;
|
sl@0
|
569 |
|
sl@0
|
570 |
|
sl@0
|
571 |
public:
|
sl@0
|
572 |
//
|
sl@0
|
573 |
// Test and debug...
|
sl@0
|
574 |
//
|
sl@0
|
575 |
|
sl@0
|
576 |
/**
|
sl@0
|
577 |
Resize the live page list.
|
sl@0
|
578 |
*/
|
sl@0
|
579 |
TInt ResizeLiveList(TUint aMinimumPageCount,TUint aMaximumPageCount);
|
sl@0
|
580 |
|
sl@0
|
581 |
/**
|
sl@0
|
582 |
Return state information about a page of memory ad the given address in the current process.
|
sl@0
|
583 |
*/
|
sl@0
|
584 |
virtual TInt PageState(TLinAddr aAddr)=0;
|
sl@0
|
585 |
|
sl@0
|
586 |
/**
|
sl@0
|
587 |
Debug check to see if current thread can safely acquire the PagingMutex.
|
sl@0
|
588 |
Use this check in locations where paged memory may be accessed. It will detect code which
|
sl@0
|
589 |
would fault if paged memory were accessed.
|
sl@0
|
590 |
@return The held mutex that prohibits acquiring the PagingMutex, or NULL.
|
sl@0
|
591 |
*/
|
sl@0
|
592 |
DMutex* CheckMutexOrder();
|
sl@0
|
593 |
|
sl@0
|
594 |
/**
|
sl@0
|
595 |
Memory-model specific method to indicate if a read from an address range requires a mutex order
|
sl@0
|
596 |
check. Used in the implementation of CheckMutexOrder.
|
sl@0
|
597 |
*/
|
sl@0
|
598 |
virtual TBool NeedsMutexOrderCheck(TLinAddr aStartAddr, TUint aLength)=0;
|
sl@0
|
599 |
|
sl@0
|
600 |
/**
|
sl@0
|
601 |
Fault the system.
|
sl@0
|
602 |
*/
|
sl@0
|
603 |
static void Panic(TFault aFault);
|
sl@0
|
604 |
|
sl@0
|
605 |
private:
|
sl@0
|
606 |
|
sl@0
|
607 |
/**
|
sl@0
|
608 |
Non-cryptographically secure linear congruential pseudo random number generator -
|
sl@0
|
609 |
developed for speed with the use of a single multiply accumulate instruction.
|
sl@0
|
610 |
Derived from section "7.8 RANDOM NUMBER GENERATION" in ARM System Developer's
|
sl@0
|
611 |
guide.
|
sl@0
|
612 |
*/
|
sl@0
|
613 |
static TUint32 FastPseudoRand()
|
sl@0
|
614 |
{
|
sl@0
|
615 |
// Make sure the seed has been lazily initialised.
|
sl@0
|
616 |
FastPseudoRandomise();
|
sl@0
|
617 |
|
sl@0
|
618 |
TUint32 oldX;
|
sl@0
|
619 |
TUint32 newX;
|
sl@0
|
620 |
|
sl@0
|
621 |
// Keep trying to generate the next value in the pseudo random sequence until we
|
sl@0
|
622 |
// are sure no race has been caused by another thread which has entered the same
|
sl@0
|
623 |
// code.
|
sl@0
|
624 |
do
|
sl@0
|
625 |
{
|
sl@0
|
626 |
oldX = PseudoRandSeed;
|
sl@0
|
627 |
newX = 69069 * oldX + 41; // should compile to a single multiply accumulate instruction under ARM
|
sl@0
|
628 |
}
|
sl@0
|
629 |
while(!__e32_atomic_cas_acq32(&PseudoRandSeed, &oldX, newX));
|
sl@0
|
630 |
|
sl@0
|
631 |
return newX;
|
sl@0
|
632 |
}
|
sl@0
|
633 |
|
sl@0
|
634 |
/**
|
sl@0
|
635 |
Initialises the seed value for the pseudo random number generator.
|
sl@0
|
636 |
*/
|
sl@0
|
637 |
static void FastPseudoRandomise()
|
sl@0
|
638 |
{
|
sl@0
|
639 |
// Create the initial seed value for the pseudo random number generator using
|
sl@0
|
640 |
// the current system time.
|
sl@0
|
641 |
if(!PseudoRandInitialised) // race-prone but harmless - worst that can happen is that the seed is initialised more than once until PseudoRandInitialised is set to true
|
sl@0
|
642 |
{
|
sl@0
|
643 |
Int64 t = Kern::SystemTime();
|
sl@0
|
644 |
PseudoRandSeed = (TUint32)t ^ (TUint32)(t >> 32); // combine the two words for maximum entropy
|
sl@0
|
645 |
|
sl@0
|
646 |
PseudoRandInitialised = ETrue;
|
sl@0
|
647 |
}
|
sl@0
|
648 |
}
|
sl@0
|
649 |
|
sl@0
|
650 |
public:
|
sl@0
|
651 |
#ifdef __SUPPORT_DEMAND_PAGING_EMULATION__
|
sl@0
|
652 |
TInt iOriginalRomPageCount;
|
sl@0
|
653 |
TPhysAddr* iOriginalRomPages;
|
sl@0
|
654 |
#endif
|
sl@0
|
655 |
|
sl@0
|
656 |
SVMEventInfo iEventInfo;
|
sl@0
|
657 |
|
sl@0
|
658 |
#ifdef __CONCURRENT_PAGING_INSTRUMENTATION__
|
sl@0
|
659 |
TInt iWaitingCount; ///< number of threads waiting to acquire request object
|
sl@0
|
660 |
TInt iPagingCount; ///< number of threads holding request object
|
sl@0
|
661 |
TInt iMaxWaitingCount; ///< maximum historical value of iWaitingCount
|
sl@0
|
662 |
TInt iMaxPagingCount; ///< maximum historical value of iPagingCount
|
sl@0
|
663 |
#endif
|
sl@0
|
664 |
|
sl@0
|
665 |
#ifdef __DEMAND_PAGING_BENCHMARKS__
|
sl@0
|
666 |
void RecordBenchmarkData(TPagingBenchmark aBm, TUint32 aStartTime, TUint32 aEndTime);
|
sl@0
|
667 |
void ResetBenchmarkData(TPagingBenchmark aBm);
|
sl@0
|
668 |
SPagingBenchmarkInfo iBenchmarkInfo[EMaxPagingBm];
|
sl@0
|
669 |
#endif
|
sl@0
|
670 |
|
sl@0
|
671 |
private:
|
sl@0
|
672 |
static TBool PseudoRandInitialised; // flag to check whether FastPseudoRand has been lazily initialised yet
|
sl@0
|
673 |
static volatile TUint32 PseudoRandSeed; // current random seed for FastPseudoRand()
|
sl@0
|
674 |
};
|
sl@0
|
675 |
|
sl@0
|
676 |
#ifdef __DEMAND_PAGING_BENCHMARKS__
|
sl@0
|
677 |
|
sl@0
|
678 |
#define START_PAGING_BENCHMARK TUint32 _bmStart = NKern::FastCounter()
|
sl@0
|
679 |
#define END_PAGING_BENCHMARK(pager, bm) pager->RecordBenchmarkData(bm, _bmStart, NKern::FastCounter())
|
sl@0
|
680 |
|
sl@0
|
681 |
#else
|
sl@0
|
682 |
|
sl@0
|
683 |
#define START_PAGING_BENCHMARK
|
sl@0
|
684 |
#define END_PAGING_BENCHMARK(pager, bm)
|
sl@0
|
685 |
|
sl@0
|
686 |
#endif
|