sl@0
|
1 |
// Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
|
sl@0
|
2 |
// All rights reserved.
|
sl@0
|
3 |
// This component and the accompanying materials are made available
|
sl@0
|
4 |
// under the terms of the License "Eclipse Public License v1.0"
|
sl@0
|
5 |
// which accompanies this distribution, and is available
|
sl@0
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
|
sl@0
|
7 |
//
|
sl@0
|
8 |
// Initial Contributors:
|
sl@0
|
9 |
// Nokia Corporation - initial contribution.
|
sl@0
|
10 |
//
|
sl@0
|
11 |
// Contributors:
|
sl@0
|
12 |
//
|
sl@0
|
13 |
// Description:
|
sl@0
|
14 |
//
|
sl@0
|
15 |
|
sl@0
|
16 |
#include <plat_priv.h>
|
sl@0
|
17 |
#include "mm.h"
|
sl@0
|
18 |
#include "mmu.h"
|
sl@0
|
19 |
|
sl@0
|
20 |
#include "mpagearray.h"
|
sl@0
|
21 |
#include "mslaballoc.h"
|
sl@0
|
22 |
|
sl@0
|
23 |
|
sl@0
|
24 |
static RStaticSlabAllocator<RPageArray::TSegment,KPageArraySegmentBase,KPageArraySegmentEnd> PageSegmentAllocator;
|
sl@0
|
25 |
|
sl@0
|
26 |
|
sl@0
|
27 |
//
|
sl@0
|
28 |
// RPageArray::TSegment
|
sl@0
|
29 |
//
|
sl@0
|
30 |
|
sl@0
|
31 |
RPageArray::TSegment* RPageArray::TSegment::New()
|
sl@0
|
32 |
{
|
sl@0
|
33 |
__NK_ASSERT_DEBUG(!MmuLock::IsHeld());
|
sl@0
|
34 |
|
sl@0
|
35 |
// allocate segment...
|
sl@0
|
36 |
TSegment* s = PageSegmentAllocator.Alloc();
|
sl@0
|
37 |
if(!s)
|
sl@0
|
38 |
return s;
|
sl@0
|
39 |
|
sl@0
|
40 |
// initialise segment...
|
sl@0
|
41 |
s->iCounts = 1; // lock count = 1, alloc count = 0
|
sl@0
|
42 |
TPhysAddr* p = s->iPages;
|
sl@0
|
43 |
TPhysAddr* pEnd = p+KPageArraySegmentSize;
|
sl@0
|
44 |
TPhysAddr nullPage = EEmptyEntry;
|
sl@0
|
45 |
do *p++ = nullPage;
|
sl@0
|
46 |
while(p<pEnd);
|
sl@0
|
47 |
|
sl@0
|
48 |
return s;
|
sl@0
|
49 |
}
|
sl@0
|
50 |
|
sl@0
|
51 |
|
sl@0
|
52 |
RPageArray::TSegment* RPageArray::TSegment::Delete(TSegment* aSegment)
|
sl@0
|
53 |
{
|
sl@0
|
54 |
__NK_ASSERT_DEBUG(MmuLock::IsHeld());
|
sl@0
|
55 |
__NK_ASSERT_DEBUG(aSegment->iCounts==0);
|
sl@0
|
56 |
#ifdef _DEBUG
|
sl@0
|
57 |
TPhysAddr* p = aSegment->iPages;
|
sl@0
|
58 |
TPhysAddr* pEnd = p+KPageArraySegmentSize;
|
sl@0
|
59 |
do
|
sl@0
|
60 |
{
|
sl@0
|
61 |
TPhysAddr a = *p++;
|
sl@0
|
62 |
if(IsPresent(a))
|
sl@0
|
63 |
{
|
sl@0
|
64 |
Kern::Printf("TSegment Delete with allocated pages! [%d]=0x%08x",p-aSegment->iPages-1,a);
|
sl@0
|
65 |
__NK_ASSERT_DEBUG(0);
|
sl@0
|
66 |
}
|
sl@0
|
67 |
}
|
sl@0
|
68 |
while(p<pEnd);
|
sl@0
|
69 |
#endif
|
sl@0
|
70 |
PageSegmentAllocator.Free(aSegment);
|
sl@0
|
71 |
return 0;
|
sl@0
|
72 |
}
|
sl@0
|
73 |
|
sl@0
|
74 |
|
sl@0
|
75 |
FORCE_INLINE void RPageArray::TSegment::Lock(TUint aCount)
|
sl@0
|
76 |
{
|
sl@0
|
77 |
__NK_ASSERT_DEBUG(MmuLock::IsHeld());
|
sl@0
|
78 |
__e32_atomic_add_ord32(&iCounts, (TUint32)aCount);
|
sl@0
|
79 |
__NK_ASSERT_DEBUG((iCounts&KPageArraySegmentLockCountMask));
|
sl@0
|
80 |
}
|
sl@0
|
81 |
|
sl@0
|
82 |
|
sl@0
|
83 |
/**
|
sl@0
|
84 |
@return True if segment still exists, false if segment was deleted.
|
sl@0
|
85 |
*/
|
sl@0
|
86 |
TBool RPageArray::TSegment::Unlock(TSegment*& aSegment, TUint aCount)
|
sl@0
|
87 |
{
|
sl@0
|
88 |
__NK_ASSERT_DEBUG(MmuLock::IsHeld());
|
sl@0
|
89 |
|
sl@0
|
90 |
TSegment* s = aSegment;
|
sl@0
|
91 |
__NK_ASSERT_DEBUG(s);
|
sl@0
|
92 |
|
sl@0
|
93 |
TUint oldCounts = (TUint)__e32_atomic_add_ord32(&s->iCounts, (TUint32)-(TInt)aCount);
|
sl@0
|
94 |
__NK_ASSERT_DEBUG(oldCounts&KPageArraySegmentLockCountMask); // alloc count must have been non-zero before decrementing
|
sl@0
|
95 |
|
sl@0
|
96 |
#ifdef _DEBUG
|
sl@0
|
97 |
if((oldCounts&KPageArraySegmentLockCountMask)==aCount)
|
sl@0
|
98 |
{
|
sl@0
|
99 |
// check alloc count is consistent...
|
sl@0
|
100 |
TUint allocCount = s->iCounts>>KPageArraySegmentAllocCountShift;
|
sl@0
|
101 |
__NK_ASSERT_DEBUG(allocCount<=KPageArraySegmentSize);
|
sl@0
|
102 |
TUint realAllocCount = 0;
|
sl@0
|
103 |
TPhysAddr* p = s->iPages;
|
sl@0
|
104 |
TPhysAddr* pEnd = p+KPageArraySegmentSize;
|
sl@0
|
105 |
do
|
sl@0
|
106 |
{
|
sl@0
|
107 |
if(IsPresent(*p++))
|
sl@0
|
108 |
++realAllocCount;
|
sl@0
|
109 |
}
|
sl@0
|
110 |
while(p<pEnd);
|
sl@0
|
111 |
if(realAllocCount!=allocCount)
|
sl@0
|
112 |
{
|
sl@0
|
113 |
Kern::Printf("TSegment::Unlock alloc count missmatch %u!=%u",realAllocCount,allocCount);
|
sl@0
|
114 |
__NK_ASSERT_DEBUG(0);
|
sl@0
|
115 |
}
|
sl@0
|
116 |
}
|
sl@0
|
117 |
#endif
|
sl@0
|
118 |
|
sl@0
|
119 |
if(oldCounts>1)
|
sl@0
|
120 |
return oldCounts; // return 'true' to indicate segment still exists
|
sl@0
|
121 |
|
sl@0
|
122 |
// delete segment...
|
sl@0
|
123 |
aSegment = 0;
|
sl@0
|
124 |
return (TBool)Delete(s); // returns 'false'
|
sl@0
|
125 |
}
|
sl@0
|
126 |
|
sl@0
|
127 |
|
sl@0
|
128 |
FORCE_INLINE void RPageArray::TSegment::AdjustAllocCount(TInt aDelta)
|
sl@0
|
129 |
{
|
sl@0
|
130 |
__NK_ASSERT_DEBUG((iCounts&KPageArraySegmentLockCountMask));
|
sl@0
|
131 |
__e32_atomic_add_ord32(&iCounts, TUint32(aDelta)<<KPageArraySegmentAllocCountShift);
|
sl@0
|
132 |
}
|
sl@0
|
133 |
|
sl@0
|
134 |
|
sl@0
|
135 |
#ifdef _DEBUG
|
sl@0
|
136 |
void RPageArray::TSegment::Dump()
|
sl@0
|
137 |
{
|
sl@0
|
138 |
TUint allocCount = iCounts>>KPageArraySegmentAllocCountShift;
|
sl@0
|
139 |
TUint lockCount = iCounts&KPageArraySegmentLockCountMask;
|
sl@0
|
140 |
Kern::Printf("RPageArray::TSegment[0x%08x]::Dump() allocCount=%d lockCount=%d",this,allocCount,lockCount);
|
sl@0
|
141 |
for(TUint i=0; i<KPageArraySegmentSize; i+=4)
|
sl@0
|
142 |
Kern::Printf(" %08x %08x %08x %08x",iPages[i+0],iPages[i+1],iPages[i+2],iPages[i+3]);
|
sl@0
|
143 |
}
|
sl@0
|
144 |
#endif
|
sl@0
|
145 |
|
sl@0
|
146 |
|
sl@0
|
147 |
//
|
sl@0
|
148 |
// RPageArray::TIter
|
sl@0
|
149 |
//
|
sl@0
|
150 |
|
sl@0
|
151 |
TUint RPageArray::TIter::Pages(TPhysAddr*& aStart, TUint aMaxCount)
|
sl@0
|
152 |
{
|
sl@0
|
153 |
// MmuLock *may* be needed, depends if segments have been locked
|
sl@0
|
154 |
|
sl@0
|
155 |
TUint index = iIndex;
|
sl@0
|
156 |
TUint size = iEndIndex-index;
|
sl@0
|
157 |
if(!size)
|
sl@0
|
158 |
return 0;
|
sl@0
|
159 |
|
sl@0
|
160 |
TUint offset = index&KPageArraySegmentMask;
|
sl@0
|
161 |
aStart = iSegments[index>>KPageArraySegmentShift]->iPages+offset;
|
sl@0
|
162 |
|
sl@0
|
163 |
TUint n = KPageArraySegmentSize-offset;
|
sl@0
|
164 |
if(n>aMaxCount)
|
sl@0
|
165 |
n = aMaxCount;
|
sl@0
|
166 |
if(n>size)
|
sl@0
|
167 |
n = size;
|
sl@0
|
168 |
return n;
|
sl@0
|
169 |
}
|
sl@0
|
170 |
|
sl@0
|
171 |
|
sl@0
|
172 |
TUint RPageArray::TIter::AddFind(TIter& aPageList)
|
sl@0
|
173 |
{
|
sl@0
|
174 |
TRACE2(("RPageArray::TIter::AddFind range 0x%x..0x%x",iIndex,iEndIndex));
|
sl@0
|
175 |
|
sl@0
|
176 |
TUint index = iIndex;
|
sl@0
|
177 |
TUint endIndex = iEndIndex;
|
sl@0
|
178 |
if(index==endIndex)
|
sl@0
|
179 |
{
|
sl@0
|
180 |
nothing_found:
|
sl@0
|
181 |
aPageList.iIndex = endIndex;
|
sl@0
|
182 |
aPageList.iEndIndex = endIndex;
|
sl@0
|
183 |
TRACE2(("RPageArray::TIter::AddFind returns 0x%x+0x%x",iEndIndex,0));
|
sl@0
|
184 |
return 0;
|
sl@0
|
185 |
}
|
sl@0
|
186 |
|
sl@0
|
187 |
TSegment** pS = iSegments+(index>>KPageArraySegmentShift);
|
sl@0
|
188 |
TPhysAddr* p;
|
sl@0
|
189 |
TUint limit;
|
sl@0
|
190 |
|
sl@0
|
191 |
MmuLock::Lock();
|
sl@0
|
192 |
|
sl@0
|
193 |
// scan for empty entries...
|
sl@0
|
194 |
do
|
sl@0
|
195 |
{
|
sl@0
|
196 |
// get segment...
|
sl@0
|
197 |
p = (*pS++)->iPages+(index&KPageArraySegmentMask);
|
sl@0
|
198 |
TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask;
|
sl@0
|
199 |
limit = (nextIndex<endIndex) ? nextIndex : endIndex;
|
sl@0
|
200 |
// scan segment...
|
sl@0
|
201 |
do
|
sl@0
|
202 |
{
|
sl@0
|
203 |
TPhysAddr page = *p;
|
sl@0
|
204 |
if(!IsPresent(page))
|
sl@0
|
205 |
goto find_start;
|
sl@0
|
206 |
#if _DEBUG
|
sl@0
|
207 |
if(State(page)!=ECommitted)
|
sl@0
|
208 |
{
|
sl@0
|
209 |
Kern::Printf("RPageArray::TIter::AddFind found unexpected page: %x",page);
|
sl@0
|
210 |
__NK_ASSERT_DEBUG(0);
|
sl@0
|
211 |
// *p = (page&~(EStateMask|EVetoed))|ECommitted; // mark page as allocated again
|
sl@0
|
212 |
}
|
sl@0
|
213 |
#endif
|
sl@0
|
214 |
++p;
|
sl@0
|
215 |
}
|
sl@0
|
216 |
while(++index<limit);
|
sl@0
|
217 |
|
sl@0
|
218 |
MmuLock::Flash();
|
sl@0
|
219 |
}
|
sl@0
|
220 |
while(index<endIndex);
|
sl@0
|
221 |
|
sl@0
|
222 |
MmuLock::Unlock();
|
sl@0
|
223 |
goto nothing_found;
|
sl@0
|
224 |
|
sl@0
|
225 |
find_start:
|
sl@0
|
226 |
TUint startIndex = index;
|
sl@0
|
227 |
// scan for end of empty region...
|
sl@0
|
228 |
for(;;)
|
sl@0
|
229 |
{
|
sl@0
|
230 |
// scan segment...
|
sl@0
|
231 |
do
|
sl@0
|
232 |
{
|
sl@0
|
233 |
if(IsPresent(*p++))
|
sl@0
|
234 |
goto find_end;
|
sl@0
|
235 |
}
|
sl@0
|
236 |
while(++index<limit);
|
sl@0
|
237 |
// check for end...
|
sl@0
|
238 |
if(index>=endIndex)
|
sl@0
|
239 |
break;
|
sl@0
|
240 |
MmuLock::Flash();
|
sl@0
|
241 |
// get next segment...
|
sl@0
|
242 |
p = (*pS++)->iPages;
|
sl@0
|
243 |
TUint nextIndex = index+KPageArraySegmentSize;
|
sl@0
|
244 |
limit = (nextIndex<endIndex) ? nextIndex : endIndex;
|
sl@0
|
245 |
}
|
sl@0
|
246 |
|
sl@0
|
247 |
find_end:
|
sl@0
|
248 |
MmuLock::Unlock();
|
sl@0
|
249 |
|
sl@0
|
250 |
aPageList.iSegments = iSegments;
|
sl@0
|
251 |
aPageList.iIndex = startIndex;
|
sl@0
|
252 |
aPageList.iEndIndex = index;
|
sl@0
|
253 |
|
sl@0
|
254 |
iIndex = index;
|
sl@0
|
255 |
TUint n = index-startIndex;
|
sl@0
|
256 |
TRACE2(("RPageArray::TIter::AddFind returns 0x%x+0x%x",startIndex,n));
|
sl@0
|
257 |
return n;
|
sl@0
|
258 |
}
|
sl@0
|
259 |
|
sl@0
|
260 |
|
sl@0
|
261 |
void RPageArray::TIter::Add(TUint aCount, TPhysAddr* aPages)
|
sl@0
|
262 |
{
|
sl@0
|
263 |
// MmuLock NOT required because...
|
sl@0
|
264 |
// 1. AddStart has ensured all segments are allocated and locked (so they can't be deleted)
|
sl@0
|
265 |
// 2. AddFind returns an unallocated region. This can only be changed by Adding pages
|
sl@0
|
266 |
// and we only allow one thread to do this at a time (i.e. the thread calling this function.)
|
sl@0
|
267 |
|
sl@0
|
268 |
TRACE2(("RPageArray::TIter::Add 0x%x+0x%x",iIndex,aCount));
|
sl@0
|
269 |
__NK_ASSERT_DEBUG(aCount);
|
sl@0
|
270 |
|
sl@0
|
271 |
TUint index = iIndex;
|
sl@0
|
272 |
TUint endIndex = index+aCount;
|
sl@0
|
273 |
TSegment** pS = iSegments+(index>>KPageArraySegmentShift);
|
sl@0
|
274 |
do
|
sl@0
|
275 |
{
|
sl@0
|
276 |
// get segment...
|
sl@0
|
277 |
TSegment* s = *pS++;
|
sl@0
|
278 |
TPhysAddr* p = s->iPages+(index&KPageArraySegmentMask);
|
sl@0
|
279 |
TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask;
|
sl@0
|
280 |
TUint limit = (nextIndex<endIndex) ? nextIndex : endIndex;
|
sl@0
|
281 |
|
sl@0
|
282 |
// add pages to segment...
|
sl@0
|
283 |
s->AdjustAllocCount(limit-index);
|
sl@0
|
284 |
do
|
sl@0
|
285 |
{
|
sl@0
|
286 |
__NK_ASSERT_DEBUG((*aPages&KPageMask)==0);
|
sl@0
|
287 |
__NK_ASSERT_DEBUG(!IsPresent(*p)); // AddFind only found not-present entries
|
sl@0
|
288 |
*p++ = *aPages++|ECommitted;
|
sl@0
|
289 |
}
|
sl@0
|
290 |
while(++index<limit);
|
sl@0
|
291 |
}
|
sl@0
|
292 |
while(index<endIndex);
|
sl@0
|
293 |
|
sl@0
|
294 |
iIndex = index;
|
sl@0
|
295 |
}
|
sl@0
|
296 |
|
sl@0
|
297 |
|
sl@0
|
298 |
void RPageArray::TIter::AddContiguous(TUint aCount, TPhysAddr aPhysAddr)
|
sl@0
|
299 |
{
|
sl@0
|
300 |
// MmuLock NOT required because...
|
sl@0
|
301 |
// 1. AddStart has ensured all segments are allocated and locked (so they can't be deleted)
|
sl@0
|
302 |
// 2. AddFind returns an unallocated region. This can only be changed by Adding pages
|
sl@0
|
303 |
// and we only allow one thread to do this at a time (i.e. the thread calling this function.)
|
sl@0
|
304 |
|
sl@0
|
305 |
TRACE2(("RPageArray::TIter::AddContiguous 0x%x+0x%x",iIndex,aCount));
|
sl@0
|
306 |
__NK_ASSERT_DEBUG(aCount);
|
sl@0
|
307 |
__NK_ASSERT_DEBUG((aPhysAddr&KPageMask)==0);
|
sl@0
|
308 |
|
sl@0
|
309 |
TUint index = iIndex;
|
sl@0
|
310 |
TUint endIndex = index+aCount;
|
sl@0
|
311 |
TSegment** pS = iSegments+(index>>KPageArraySegmentShift);
|
sl@0
|
312 |
|
sl@0
|
313 |
do
|
sl@0
|
314 |
{
|
sl@0
|
315 |
// get segment...
|
sl@0
|
316 |
TSegment* s = *pS++;
|
sl@0
|
317 |
TPhysAddr* p = s->iPages+(index&KPageArraySegmentMask);
|
sl@0
|
318 |
TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask;
|
sl@0
|
319 |
TUint limit = (nextIndex<endIndex) ? nextIndex : endIndex;
|
sl@0
|
320 |
|
sl@0
|
321 |
// add pages to segment...
|
sl@0
|
322 |
s->AdjustAllocCount(limit-index);
|
sl@0
|
323 |
do
|
sl@0
|
324 |
{
|
sl@0
|
325 |
__NK_ASSERT_DEBUG(!IsPresent(*p)); // AddFind only found not-present entries
|
sl@0
|
326 |
*p++ = aPhysAddr|ECommitted;
|
sl@0
|
327 |
aPhysAddr += KPageSize;
|
sl@0
|
328 |
}
|
sl@0
|
329 |
while(++index<limit);
|
sl@0
|
330 |
}
|
sl@0
|
331 |
while(index<endIndex);
|
sl@0
|
332 |
|
sl@0
|
333 |
iIndex = index;
|
sl@0
|
334 |
}
|
sl@0
|
335 |
|
sl@0
|
336 |
|
sl@0
|
337 |
void RPageArray::TIter::Added(TUint aCount, TUint aChanged)
|
sl@0
|
338 |
{
|
sl@0
|
339 |
__NK_ASSERT_DEBUG(aCount);
|
sl@0
|
340 |
__NK_ASSERT_DEBUG(aChanged<=aCount);
|
sl@0
|
341 |
TUint index = iIndex;
|
sl@0
|
342 |
__NK_ASSERT_DEBUG((index>>KPageArraySegmentShift)==((index+aCount-1)>>KPageArraySegmentShift));
|
sl@0
|
343 |
TSegment* s = iSegments[index>>KPageArraySegmentShift];
|
sl@0
|
344 |
__NK_ASSERT_DEBUG(s);
|
sl@0
|
345 |
__NK_ASSERT_DEBUG(s->iCounts&KPageArraySegmentLockCountMask);
|
sl@0
|
346 |
s->AdjustAllocCount(aChanged);
|
sl@0
|
347 |
Skip(aCount);
|
sl@0
|
348 |
}
|
sl@0
|
349 |
|
sl@0
|
350 |
|
sl@0
|
351 |
TUint RPageArray::TIter::Find(TIter& aPageList)
|
sl@0
|
352 |
{
|
sl@0
|
353 |
TRACE2(("RPageArray::TIter::Find range 0x%x..0x%x",iIndex,iEndIndex));
|
sl@0
|
354 |
|
sl@0
|
355 |
MmuLock::Lock();
|
sl@0
|
356 |
TUint index = iIndex;
|
sl@0
|
357 |
TUint endIndex = iEndIndex;
|
sl@0
|
358 |
TSegment** pS = iSegments+(index>>KPageArraySegmentShift);
|
sl@0
|
359 |
TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask;
|
sl@0
|
360 |
|
sl@0
|
361 |
// search for first page...
|
sl@0
|
362 |
while(index<endIndex)
|
sl@0
|
363 |
{
|
sl@0
|
364 |
TSegment* s = *pS;
|
sl@0
|
365 |
if(!s)
|
sl@0
|
366 |
index = nextIndex;
|
sl@0
|
367 |
else
|
sl@0
|
368 |
{
|
sl@0
|
369 |
// search segment...
|
sl@0
|
370 |
TPhysAddr* p = s->iPages+(index&KPageArraySegmentMask);
|
sl@0
|
371 |
TUint limit = (nextIndex<endIndex) ? nextIndex : endIndex;
|
sl@0
|
372 |
do
|
sl@0
|
373 |
{
|
sl@0
|
374 |
if(RPageArray::IsPresent(*p++))
|
sl@0
|
375 |
goto start_done;
|
sl@0
|
376 |
}
|
sl@0
|
377 |
while(++index<limit);
|
sl@0
|
378 |
}
|
sl@0
|
379 |
// next segment...
|
sl@0
|
380 |
MmuLock::Flash();
|
sl@0
|
381 |
++pS;
|
sl@0
|
382 |
nextIndex = index+KPageArraySegmentSize;
|
sl@0
|
383 |
}
|
sl@0
|
384 |
start_done:
|
sl@0
|
385 |
// we can't flash or release the MmuLock until we've Locked the segment we found!
|
sl@0
|
386 |
iIndex = index;
|
sl@0
|
387 |
|
sl@0
|
388 |
// search for range of allocated pages...
|
sl@0
|
389 |
while(index<endIndex)
|
sl@0
|
390 |
{
|
sl@0
|
391 |
// check first entry...
|
sl@0
|
392 |
TSegment* s = *pS;
|
sl@0
|
393 |
if(!s)
|
sl@0
|
394 |
break;
|
sl@0
|
395 |
TPhysAddr* p = s->iPages+(index&KPageArraySegmentMask);
|
sl@0
|
396 |
if(!RPageArray::IsPresent(*p++))
|
sl@0
|
397 |
break;
|
sl@0
|
398 |
|
sl@0
|
399 |
// segment has pages, lock it...
|
sl@0
|
400 |
s->Lock();
|
sl@0
|
401 |
|
sl@0
|
402 |
// scan rest of entries...
|
sl@0
|
403 |
TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask;
|
sl@0
|
404 |
TUint limit = (nextIndex<endIndex) ? nextIndex : endIndex;
|
sl@0
|
405 |
while(++index<limit)
|
sl@0
|
406 |
if(!RPageArray::IsPresent(*p++))
|
sl@0
|
407 |
goto done;
|
sl@0
|
408 |
|
sl@0
|
409 |
// next segment...
|
sl@0
|
410 |
MmuLock::Flash();
|
sl@0
|
411 |
++pS;
|
sl@0
|
412 |
}
|
sl@0
|
413 |
done:
|
sl@0
|
414 |
MmuLock::Unlock();
|
sl@0
|
415 |
|
sl@0
|
416 |
aPageList.iSegments = iSegments;
|
sl@0
|
417 |
aPageList.iIndex = iIndex;
|
sl@0
|
418 |
aPageList.iEndIndex = index;
|
sl@0
|
419 |
TInt n = index-iIndex;
|
sl@0
|
420 |
TRACE2(("RPageArray::TIter::Find returns 0x%x+0x%x",iIndex,n));
|
sl@0
|
421 |
return n;
|
sl@0
|
422 |
}
|
sl@0
|
423 |
|
sl@0
|
424 |
|
sl@0
|
425 |
void RPageArray::TIter::FindRelease(TUint aCount)
|
sl@0
|
426 |
{
|
sl@0
|
427 |
TUint index = iIndex;
|
sl@0
|
428 |
Skip(aCount);
|
sl@0
|
429 |
RPageArray::Release(iSegments,index,aCount);
|
sl@0
|
430 |
}
|
sl@0
|
431 |
|
sl@0
|
432 |
|
sl@0
|
433 |
TUint RPageArray::TIter::RemoveFind(TIter& aPageList)
|
sl@0
|
434 |
{
|
sl@0
|
435 |
TRACE2(("RPageArray::TIter::RemoveFind range 0x%x..0x%x",iIndex,iEndIndex));
|
sl@0
|
436 |
|
sl@0
|
437 |
MmuLock::Lock();
|
sl@0
|
438 |
|
sl@0
|
439 |
TUint index = iIndex;
|
sl@0
|
440 |
TUint endIndex = iEndIndex;
|
sl@0
|
441 |
TSegment** pS = iSegments+(index>>KPageArraySegmentShift);
|
sl@0
|
442 |
TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask;
|
sl@0
|
443 |
|
sl@0
|
444 |
// search for first page...
|
sl@0
|
445 |
while(index<endIndex)
|
sl@0
|
446 |
{
|
sl@0
|
447 |
TSegment* s = *pS;
|
sl@0
|
448 |
if(!s)
|
sl@0
|
449 |
index = nextIndex;
|
sl@0
|
450 |
else
|
sl@0
|
451 |
{
|
sl@0
|
452 |
// search segment...
|
sl@0
|
453 |
TPhysAddr* p = s->iPages+(index&KPageArraySegmentMask);
|
sl@0
|
454 |
TUint limit = (nextIndex<endIndex) ? nextIndex : endIndex;
|
sl@0
|
455 |
do
|
sl@0
|
456 |
{
|
sl@0
|
457 |
if(State(*p++)>=EDecommitting)
|
sl@0
|
458 |
goto start_done;
|
sl@0
|
459 |
}
|
sl@0
|
460 |
while(++index<limit);
|
sl@0
|
461 |
}
|
sl@0
|
462 |
|
sl@0
|
463 |
// next segment...
|
sl@0
|
464 |
MmuLock::Flash();
|
sl@0
|
465 |
++pS;
|
sl@0
|
466 |
nextIndex = index+KPageArraySegmentSize;
|
sl@0
|
467 |
}
|
sl@0
|
468 |
start_done:
|
sl@0
|
469 |
// we can't flash or release the MmuLock until we've Locked the segment we found!
|
sl@0
|
470 |
iIndex = index;
|
sl@0
|
471 |
|
sl@0
|
472 |
// search for range of allocated pages, marking them EDecommitting...
|
sl@0
|
473 |
while(index<endIndex)
|
sl@0
|
474 |
{
|
sl@0
|
475 |
// check first entry...
|
sl@0
|
476 |
TSegment* s = *pS;
|
sl@0
|
477 |
if(!s)
|
sl@0
|
478 |
break;
|
sl@0
|
479 |
TPhysAddr* p = s->iPages+(index&KPageArraySegmentMask);
|
sl@0
|
480 |
TPhysAddr page = *p++;
|
sl@0
|
481 |
if(State(page)<EDecommitting)
|
sl@0
|
482 |
break;
|
sl@0
|
483 |
|
sl@0
|
484 |
p[-1] = (page&~EStateMask)|EDecommitting;
|
sl@0
|
485 |
|
sl@0
|
486 |
// segment has pages, lock it...
|
sl@0
|
487 |
s->Lock();
|
sl@0
|
488 |
|
sl@0
|
489 |
// scan rest of entries...
|
sl@0
|
490 |
TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask;
|
sl@0
|
491 |
TUint limit = (nextIndex<endIndex) ? nextIndex : endIndex;
|
sl@0
|
492 |
while(++index<limit)
|
sl@0
|
493 |
{
|
sl@0
|
494 |
TPhysAddr page = *p++;
|
sl@0
|
495 |
if(State(page)<EDecommitting)
|
sl@0
|
496 |
goto done;
|
sl@0
|
497 |
p[-1] = (page&~EStateMask)|EDecommitting;
|
sl@0
|
498 |
}
|
sl@0
|
499 |
|
sl@0
|
500 |
// next segment...
|
sl@0
|
501 |
MmuLock::Flash();
|
sl@0
|
502 |
++pS;
|
sl@0
|
503 |
}
|
sl@0
|
504 |
done:
|
sl@0
|
505 |
MmuLock::Unlock();
|
sl@0
|
506 |
|
sl@0
|
507 |
aPageList.iSegments = iSegments;
|
sl@0
|
508 |
aPageList.iIndex = iIndex;
|
sl@0
|
509 |
aPageList.iEndIndex = index;
|
sl@0
|
510 |
TInt n = index-iIndex;
|
sl@0
|
511 |
TRACE2(("RPageArray::TIter::RemoveFind returns 0x%x+0x%x",iIndex,n));
|
sl@0
|
512 |
return n;
|
sl@0
|
513 |
}
|
sl@0
|
514 |
|
sl@0
|
515 |
|
sl@0
|
516 |
TUint RPageArray::TIter::Remove(TUint aMaxCount, TPhysAddr* aPages)
|
sl@0
|
517 |
{
|
sl@0
|
518 |
TRACE2(("RPageArray::TIter::Remove 0x%x..0x%x max=0x%x",iIndex,iEndIndex,aMaxCount));
|
sl@0
|
519 |
|
sl@0
|
520 |
__NK_ASSERT_DEBUG(aMaxCount);
|
sl@0
|
521 |
|
sl@0
|
522 |
TUint count = 0;
|
sl@0
|
523 |
TUint index = iIndex;
|
sl@0
|
524 |
TUint endIndex = iEndIndex;
|
sl@0
|
525 |
if(index==endIndex)
|
sl@0
|
526 |
return 0;
|
sl@0
|
527 |
|
sl@0
|
528 |
TSegment** pS = iSegments+(index>>KPageArraySegmentShift);
|
sl@0
|
529 |
|
sl@0
|
530 |
MmuLock::Lock();
|
sl@0
|
531 |
|
sl@0
|
532 |
do
|
sl@0
|
533 |
{
|
sl@0
|
534 |
// get segment...
|
sl@0
|
535 |
TSegment* s = *pS++;
|
sl@0
|
536 |
TPhysAddr* p = s->iPages+(index&KPageArraySegmentMask);
|
sl@0
|
537 |
TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask;
|
sl@0
|
538 |
TUint limit = (nextIndex<endIndex) ? nextIndex : endIndex;
|
sl@0
|
539 |
|
sl@0
|
540 |
// remove pages from segment...
|
sl@0
|
541 |
do
|
sl@0
|
542 |
{
|
sl@0
|
543 |
TPhysAddr page = *p++;
|
sl@0
|
544 |
__NK_ASSERT_DEBUG(State(page)!=EStealing); // can't be stealing as that only happens with the RamAllocLock held, which we should already hold if freeing demand paged pages
|
sl@0
|
545 |
if(State(page)==EDecommitting || State(page)==EDecommitted)
|
sl@0
|
546 |
{
|
sl@0
|
547 |
// remove a page...
|
sl@0
|
548 |
if(page&EUnmapVetoed)
|
sl@0
|
549 |
{
|
sl@0
|
550 |
p[-1] = (page&~(EUnmapVetoed|EStateMask))|EDecommitted; // change to EDecommitted state
|
sl@0
|
551 |
}
|
sl@0
|
552 |
else
|
sl@0
|
553 |
{
|
sl@0
|
554 |
p[-1] = EEmptyEntry;
|
sl@0
|
555 |
s->AdjustAllocCount(-1);
|
sl@0
|
556 |
TPhysAddr pagePhys = page&~KPageMask;
|
sl@0
|
557 |
aPages[count++] = pagePhys;
|
sl@0
|
558 |
TRACE2(("RPageArray::TIter::Remove index=0x%x returns 0x%08x",index,pagePhys));
|
sl@0
|
559 |
if(count>=aMaxCount)
|
sl@0
|
560 |
{
|
sl@0
|
561 |
++index;
|
sl@0
|
562 |
goto done;
|
sl@0
|
563 |
}
|
sl@0
|
564 |
}
|
sl@0
|
565 |
// check not removing managed pages without the RamAllocLock...
|
sl@0
|
566 |
__NK_ASSERT_DEBUG(RamAllocLock::IsHeld()
|
sl@0
|
567 |
|| SPageInfo::FromPhysAddr(page)->Type()!=SPageInfo::EManaged);
|
sl@0
|
568 |
}
|
sl@0
|
569 |
}
|
sl@0
|
570 |
while(++index<limit);
|
sl@0
|
571 |
|
sl@0
|
572 |
MmuLock::Flash();
|
sl@0
|
573 |
}
|
sl@0
|
574 |
while(index<endIndex);
|
sl@0
|
575 |
|
sl@0
|
576 |
done:
|
sl@0
|
577 |
MmuLock::Unlock();
|
sl@0
|
578 |
iIndex = index;
|
sl@0
|
579 |
return count;
|
sl@0
|
580 |
}
|
sl@0
|
581 |
|
sl@0
|
582 |
|
sl@0
|
583 |
void RPageArray::TIter::VetoUnmap()
|
sl@0
|
584 |
{
|
sl@0
|
585 |
TUint index = iIndex;
|
sl@0
|
586 |
TUint endIndex = iEndIndex;
|
sl@0
|
587 |
if(index==endIndex)
|
sl@0
|
588 |
return;
|
sl@0
|
589 |
|
sl@0
|
590 |
TSegment** pS = iSegments+(index>>KPageArraySegmentShift);
|
sl@0
|
591 |
|
sl@0
|
592 |
MmuLock::Lock();
|
sl@0
|
593 |
|
sl@0
|
594 |
do
|
sl@0
|
595 |
{
|
sl@0
|
596 |
// get segment...
|
sl@0
|
597 |
TSegment* s = *pS++;
|
sl@0
|
598 |
TPhysAddr* p = s->iPages+(index&KPageArraySegmentMask);
|
sl@0
|
599 |
TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask;
|
sl@0
|
600 |
TUint limit = (nextIndex<endIndex) ? nextIndex : endIndex;
|
sl@0
|
601 |
|
sl@0
|
602 |
// veto pages in segment...
|
sl@0
|
603 |
do
|
sl@0
|
604 |
{
|
sl@0
|
605 |
TPhysAddr page = *p++;
|
sl@0
|
606 |
TRACE2(("RPageArray::TIter::Veto() yes/no=%d page=0x%08x",IsPresent(page) && TargetStateIsDecommitted(page),page));
|
sl@0
|
607 |
if(IsPresent(page) && TargetStateIsDecommitted(page))
|
sl@0
|
608 |
p[-1] = page|EUnmapVetoed;
|
sl@0
|
609 |
}
|
sl@0
|
610 |
while(++index<limit);
|
sl@0
|
611 |
|
sl@0
|
612 |
MmuLock::Flash();
|
sl@0
|
613 |
}
|
sl@0
|
614 |
while(index<endIndex);
|
sl@0
|
615 |
|
sl@0
|
616 |
MmuLock::Unlock();
|
sl@0
|
617 |
}
|
sl@0
|
618 |
|
sl@0
|
619 |
|
sl@0
|
620 |
void RPageArray::TIter::VetoRestrict(TBool aPageMoving)
|
sl@0
|
621 |
{
|
sl@0
|
622 |
TUint index = iIndex;
|
sl@0
|
623 |
TUint endIndex = iEndIndex;
|
sl@0
|
624 |
if(index==endIndex)
|
sl@0
|
625 |
return;
|
sl@0
|
626 |
|
sl@0
|
627 |
RPageArray::TState operation = aPageMoving ? RPageArray::EMoving : RPageArray::ERestrictingNA;
|
sl@0
|
628 |
|
sl@0
|
629 |
TSegment** pS = iSegments+(index>>KPageArraySegmentShift);
|
sl@0
|
630 |
|
sl@0
|
631 |
MmuLock::Lock();
|
sl@0
|
632 |
|
sl@0
|
633 |
do
|
sl@0
|
634 |
{
|
sl@0
|
635 |
// get segment...
|
sl@0
|
636 |
TSegment* s = *pS++;
|
sl@0
|
637 |
TPhysAddr* p = s->iPages+(index&KPageArraySegmentMask);
|
sl@0
|
638 |
TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask;
|
sl@0
|
639 |
TUint limit = (nextIndex<endIndex) ? nextIndex : endIndex;
|
sl@0
|
640 |
|
sl@0
|
641 |
// veto pages in segment...
|
sl@0
|
642 |
do
|
sl@0
|
643 |
{
|
sl@0
|
644 |
TPhysAddr page = *p++;
|
sl@0
|
645 |
TRACE2(("RPageArray::TIter::VetoRestrict() yes/no=%d page=0x%08x",State(page)==operation,page));
|
sl@0
|
646 |
if(State(page)==operation)
|
sl@0
|
647 |
{
|
sl@0
|
648 |
// to veto a 'restrict page' operation, we put the page back into the committed...
|
sl@0
|
649 |
p[-1] = (page&~EStateMask)|ECommitted;
|
sl@0
|
650 |
}
|
sl@0
|
651 |
}
|
sl@0
|
652 |
while(++index<limit);
|
sl@0
|
653 |
|
sl@0
|
654 |
MmuLock::Flash();
|
sl@0
|
655 |
}
|
sl@0
|
656 |
while(index<endIndex);
|
sl@0
|
657 |
|
sl@0
|
658 |
MmuLock::Unlock();
|
sl@0
|
659 |
}
|
sl@0
|
660 |
|
sl@0
|
661 |
|
sl@0
|
662 |
FORCE_INLINE void RPageArray::TIter::Set(RPageArray::TSegment** aSegments, TUint aIndex, TUint aEndIndex)
|
sl@0
|
663 |
{
|
sl@0
|
664 |
iSegments = aSegments;
|
sl@0
|
665 |
iIndex = aIndex;
|
sl@0
|
666 |
iEndIndex = aEndIndex;
|
sl@0
|
667 |
}
|
sl@0
|
668 |
|
sl@0
|
669 |
|
sl@0
|
670 |
//
|
sl@0
|
671 |
// RPageArray
|
sl@0
|
672 |
//
|
sl@0
|
673 |
|
sl@0
|
674 |
void RPageArray::Init2A()
|
sl@0
|
675 |
{
|
sl@0
|
676 |
TInt r = PageSegmentAllocator.Construct();
|
sl@0
|
677 |
__NK_ASSERT_ALWAYS(r==KErrNone);
|
sl@0
|
678 |
}
|
sl@0
|
679 |
|
sl@0
|
680 |
|
sl@0
|
681 |
void RPageArray::Init2B(DMutex* aLock)
|
sl@0
|
682 |
{
|
sl@0
|
683 |
// construct memory object for slabs...
|
sl@0
|
684 |
DMemoryObject* memory;
|
sl@0
|
685 |
TMappingCreateFlags mapFlags = (TMappingCreateFlags)(EMappingCreateFixedVirtual|EMappingCreateReserveAllResources);
|
sl@0
|
686 |
TMemoryAttributes memAttr = EMemoryAttributeStandard;
|
sl@0
|
687 |
TInt r = MM::InitFixedKernelMemory(memory, KPageArraySegmentBase, KPageArraySegmentEnd, KPageSize, EMemoryObjectUnpaged, EMemoryCreateNoWipe, memAttr, mapFlags);
|
sl@0
|
688 |
__NK_ASSERT_ALWAYS(r==KErrNone);
|
sl@0
|
689 |
MM::MemorySetLock(memory,aLock);
|
sl@0
|
690 |
PageSegmentAllocator.SetMemory(memory,1);
|
sl@0
|
691 |
}
|
sl@0
|
692 |
|
sl@0
|
693 |
|
sl@0
|
694 |
RPageArray::RPageArray()
|
sl@0
|
695 |
{
|
sl@0
|
696 |
__NK_ASSERT_DEBUG(!iSegments);
|
sl@0
|
697 |
}
|
sl@0
|
698 |
|
sl@0
|
699 |
|
sl@0
|
700 |
TInt RPageArray::Construct(TUint aMaxPages, TBool aPreallocateMemory)
|
sl@0
|
701 |
{
|
sl@0
|
702 |
iNumSegments = (aMaxPages+KPageArraySegmentMask)>>KPageArraySegmentShift;
|
sl@0
|
703 |
iSegments = (TSegment**)Kern::AllocZ(iNumSegments*sizeof(TSegment*));
|
sl@0
|
704 |
if(!iSegments)
|
sl@0
|
705 |
return KErrNoMemory;
|
sl@0
|
706 |
|
sl@0
|
707 |
if(!aPreallocateMemory)
|
sl@0
|
708 |
return KErrNone;
|
sl@0
|
709 |
|
sl@0
|
710 |
return PreallocateMemory();
|
sl@0
|
711 |
}
|
sl@0
|
712 |
|
sl@0
|
713 |
|
sl@0
|
714 |
TInt RPageArray::PreallocateMemory()
|
sl@0
|
715 |
{
|
sl@0
|
716 |
MmuLock::Lock();
|
sl@0
|
717 |
|
sl@0
|
718 |
__NK_ASSERT_DEBUG(!iPreallocatedMemory);
|
sl@0
|
719 |
iPreallocatedMemory = true;
|
sl@0
|
720 |
|
sl@0
|
721 |
TSegment** pS = iSegments;
|
sl@0
|
722 |
TSegment** pGEnd = pS+iNumSegments;
|
sl@0
|
723 |
do
|
sl@0
|
724 |
{
|
sl@0
|
725 |
if(!GetOrAllocateSegment(pS,1))
|
sl@0
|
726 |
{
|
sl@0
|
727 |
iNumSegments = pS-iSegments; // truncate to amount successfully allocated
|
sl@0
|
728 |
MmuLock::Unlock();
|
sl@0
|
729 |
return KErrNoMemory;
|
sl@0
|
730 |
}
|
sl@0
|
731 |
}
|
sl@0
|
732 |
while(++pS<pGEnd);
|
sl@0
|
733 |
|
sl@0
|
734 |
MmuLock::Unlock();
|
sl@0
|
735 |
return KErrNone;
|
sl@0
|
736 |
}
|
sl@0
|
737 |
|
sl@0
|
738 |
|
sl@0
|
739 |
RPageArray::~RPageArray()
|
sl@0
|
740 |
{
|
sl@0
|
741 |
TSegment** pS = iSegments;
|
sl@0
|
742 |
if(pS)
|
sl@0
|
743 |
{
|
sl@0
|
744 |
TSegment** pGEnd = pS+iNumSegments;
|
sl@0
|
745 |
if(!iPreallocatedMemory)
|
sl@0
|
746 |
{
|
sl@0
|
747 |
// check all segments have already been deleted...
|
sl@0
|
748 |
while(pS<pGEnd)
|
sl@0
|
749 |
{
|
sl@0
|
750 |
#ifdef _DEBUG
|
sl@0
|
751 |
if(*pS)
|
sl@0
|
752 |
(*pS)->Dump();
|
sl@0
|
753 |
#endif
|
sl@0
|
754 |
__NK_ASSERT_DEBUG(!*pS);
|
sl@0
|
755 |
++pS;
|
sl@0
|
756 |
}
|
sl@0
|
757 |
}
|
sl@0
|
758 |
else
|
sl@0
|
759 |
{
|
sl@0
|
760 |
MmuLock::Lock();
|
sl@0
|
761 |
while(pS<pGEnd)
|
sl@0
|
762 |
{
|
sl@0
|
763 |
__NK_ASSERT_DEBUG(*pS);
|
sl@0
|
764 |
TSegment::Unlock(*pS);
|
sl@0
|
765 |
#ifdef _DEBUG
|
sl@0
|
766 |
if(*pS)
|
sl@0
|
767 |
(*pS)->Dump();
|
sl@0
|
768 |
#endif
|
sl@0
|
769 |
__NK_ASSERT_DEBUG(!*pS);
|
sl@0
|
770 |
TRACE2(("RPageArray::~RPageArray delete segment=%d",pS-iSegments));
|
sl@0
|
771 |
++pS;
|
sl@0
|
772 |
if(pS<pGEnd)
|
sl@0
|
773 |
MmuLock::Flash();
|
sl@0
|
774 |
}
|
sl@0
|
775 |
MmuLock::Unlock();
|
sl@0
|
776 |
}
|
sl@0
|
777 |
|
sl@0
|
778 |
Kern::Free(iSegments);
|
sl@0
|
779 |
}
|
sl@0
|
780 |
}
|
sl@0
|
781 |
|
sl@0
|
782 |
|
sl@0
|
783 |
RPageArray::TSegment* RPageArray::GetOrAllocateSegment(TSegment** aSegmentEntry, TUint aLockCount)
|
sl@0
|
784 |
{
|
sl@0
|
785 |
__NK_ASSERT_DEBUG(MmuLock::IsHeld());
|
sl@0
|
786 |
__NK_ASSERT_DEBUG(aLockCount);
|
sl@0
|
787 |
|
sl@0
|
788 |
for(;;)
|
sl@0
|
789 |
{
|
sl@0
|
790 |
TSegment* s = *aSegmentEntry;
|
sl@0
|
791 |
if(s)
|
sl@0
|
792 |
{
|
sl@0
|
793 |
s->Lock(aLockCount);
|
sl@0
|
794 |
return s;
|
sl@0
|
795 |
}
|
sl@0
|
796 |
|
sl@0
|
797 |
// no segment, so allocate one...
|
sl@0
|
798 |
MmuLock::Unlock();
|
sl@0
|
799 |
s = TSegment::New();
|
sl@0
|
800 |
MmuLock::Lock();
|
sl@0
|
801 |
if(!s)
|
sl@0
|
802 |
return s;
|
sl@0
|
803 |
|
sl@0
|
804 |
// if someone else allocated one...
|
sl@0
|
805 |
if(*aSegmentEntry)
|
sl@0
|
806 |
{
|
sl@0
|
807 |
// free the one we created...
|
sl@0
|
808 |
TSegment::Unlock(s);
|
sl@0
|
809 |
//and retry...
|
sl@0
|
810 |
continue;
|
sl@0
|
811 |
}
|
sl@0
|
812 |
|
sl@0
|
813 |
// use new segment...
|
sl@0
|
814 |
TRACE2(("RPageArray::GetOrAllocateSegment new segment=%d",aSegmentEntry-iSegments));
|
sl@0
|
815 |
*aSegmentEntry = s;
|
sl@0
|
816 |
if(--aLockCount)
|
sl@0
|
817 |
s->Lock(aLockCount);
|
sl@0
|
818 |
return s;
|
sl@0
|
819 |
}
|
sl@0
|
820 |
}
|
sl@0
|
821 |
|
sl@0
|
822 |
|
sl@0
|
823 |
TInt RPageArray::Alloc(TUint aIndex, TUint aCount)
|
sl@0
|
824 |
{
|
sl@0
|
825 |
TRACE2(("RPageArray::Alloc(0x%x,0x%x)",aIndex,aCount));
|
sl@0
|
826 |
__NK_ASSERT_DEBUG(aIndex+aCount<=iNumSegments*KPageArraySegmentSize);
|
sl@0
|
827 |
__NK_ASSERT_DEBUG(aIndex+aCount>=aIndex);
|
sl@0
|
828 |
|
sl@0
|
829 |
MmuLock::Lock();
|
sl@0
|
830 |
|
sl@0
|
831 |
TUint index = aIndex;
|
sl@0
|
832 |
TUint endIndex = aIndex+aCount;
|
sl@0
|
833 |
TSegment** pS = iSegments+(index>>KPageArraySegmentShift);
|
sl@0
|
834 |
while(index<endIndex)
|
sl@0
|
835 |
{
|
sl@0
|
836 |
TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask;
|
sl@0
|
837 |
TUint limit = (nextIndex<endIndex) ? nextIndex : endIndex;
|
sl@0
|
838 |
TUint lockCount = limit-index;
|
sl@0
|
839 |
index = limit;
|
sl@0
|
840 |
TSegment* s = GetOrAllocateSegment(pS++,lockCount);
|
sl@0
|
841 |
if(!s)
|
sl@0
|
842 |
goto no_memory;
|
sl@0
|
843 |
}
|
sl@0
|
844 |
|
sl@0
|
845 |
MmuLock::Unlock();
|
sl@0
|
846 |
return KErrNone;
|
sl@0
|
847 |
|
sl@0
|
848 |
no_memory:
|
sl@0
|
849 |
MmuLock::Unlock();
|
sl@0
|
850 |
|
sl@0
|
851 |
// free what we actually alloced...
|
sl@0
|
852 |
endIndex = index&~KPageArraySegmentMask;
|
sl@0
|
853 |
Free(aIndex,endIndex-aIndex);
|
sl@0
|
854 |
|
sl@0
|
855 |
return KErrNoMemory;
|
sl@0
|
856 |
}
|
sl@0
|
857 |
|
sl@0
|
858 |
|
sl@0
|
859 |
void RPageArray::Free(TUint aIndex, TUint aCount)
|
sl@0
|
860 |
{
|
sl@0
|
861 |
TRACE2(("RPageArray::Free(0x%x,0x%x)",aIndex,aCount));
|
sl@0
|
862 |
__NK_ASSERT_DEBUG(aIndex+aCount<=iNumSegments*KPageArraySegmentSize);
|
sl@0
|
863 |
__NK_ASSERT_DEBUG(aIndex+aCount>aIndex);
|
sl@0
|
864 |
|
sl@0
|
865 |
MmuLock::Lock();
|
sl@0
|
866 |
|
sl@0
|
867 |
TUint index = aIndex;
|
sl@0
|
868 |
TUint endIndex = aIndex+aCount;
|
sl@0
|
869 |
TSegment** pS = iSegments+(index>>KPageArraySegmentShift);
|
sl@0
|
870 |
while(index<endIndex)
|
sl@0
|
871 |
{
|
sl@0
|
872 |
__NK_ASSERT_DEBUG(*pS);
|
sl@0
|
873 |
TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask;
|
sl@0
|
874 |
TUint limit = (nextIndex<endIndex) ? nextIndex : endIndex;
|
sl@0
|
875 |
TSegment::Unlock(*pS,limit-index);
|
sl@0
|
876 |
index = limit;
|
sl@0
|
877 |
++pS;
|
sl@0
|
878 |
}
|
sl@0
|
879 |
|
sl@0
|
880 |
MmuLock::Unlock();
|
sl@0
|
881 |
}
|
sl@0
|
882 |
|
sl@0
|
883 |
|
sl@0
|
884 |
TInt RPageArray::AddStart(TUint aIndex, TUint aCount, TIter& aIter, TBool aAllowExisting)
|
sl@0
|
885 |
{
|
sl@0
|
886 |
TRACE2(("RPageArray::AddStart(0x%x,0x%x,?,%d)",aIndex,aCount,aAllowExisting));
|
sl@0
|
887 |
__NK_ASSERT_DEBUG(aIndex+aCount<=iNumSegments*KPageArraySegmentSize);
|
sl@0
|
888 |
__NK_ASSERT_DEBUG(aIndex+aCount>aIndex);
|
sl@0
|
889 |
|
sl@0
|
890 |
aIter.Set(iSegments,aIndex,aIndex+aCount);
|
sl@0
|
891 |
|
sl@0
|
892 |
MmuLock::Lock();
|
sl@0
|
893 |
|
sl@0
|
894 |
TInt r;
|
sl@0
|
895 |
TUint index = aIndex;
|
sl@0
|
896 |
TUint endIndex = aIndex+aCount;
|
sl@0
|
897 |
TSegment** pS = iSegments+(index>>KPageArraySegmentShift);
|
sl@0
|
898 |
while(index<endIndex)
|
sl@0
|
899 |
{
|
sl@0
|
900 |
TSegment* s = *pS;
|
sl@0
|
901 |
if(!s)
|
sl@0
|
902 |
{
|
sl@0
|
903 |
// no segment, so allocate one...
|
sl@0
|
904 |
MmuLock::Unlock();
|
sl@0
|
905 |
s = TSegment::New();
|
sl@0
|
906 |
MmuLock::Lock();
|
sl@0
|
907 |
if(!s)
|
sl@0
|
908 |
goto no_memory;
|
sl@0
|
909 |
|
sl@0
|
910 |
// if someone else allocated one
|
sl@0
|
911 |
if(*pS)
|
sl@0
|
912 |
{
|
sl@0
|
913 |
// free the one we created...
|
sl@0
|
914 |
TSegment::Unlock(s);
|
sl@0
|
915 |
//and retry...
|
sl@0
|
916 |
continue;
|
sl@0
|
917 |
}
|
sl@0
|
918 |
|
sl@0
|
919 |
// use new segment...
|
sl@0
|
920 |
TRACE2(("RPageArray::AddStart new segment=%d",pS-iSegments));
|
sl@0
|
921 |
*pS = s;
|
sl@0
|
922 |
|
sl@0
|
923 |
// move on...
|
sl@0
|
924 |
index = (index+KPageArraySegmentSize)&~KPageArraySegmentMask;
|
sl@0
|
925 |
}
|
sl@0
|
926 |
else
|
sl@0
|
927 |
{
|
sl@0
|
928 |
TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask;
|
sl@0
|
929 |
if(aAllowExisting)
|
sl@0
|
930 |
{
|
sl@0
|
931 |
// just move on to next segment...
|
sl@0
|
932 |
index = (index+KPageArraySegmentSize)&~KPageArraySegmentMask;
|
sl@0
|
933 |
}
|
sl@0
|
934 |
else
|
sl@0
|
935 |
{
|
sl@0
|
936 |
// check page entries are empty...
|
sl@0
|
937 |
TPhysAddr* p = s->iPages+(index&KPageArraySegmentMask);
|
sl@0
|
938 |
TUint limit = (nextIndex<endIndex) ? nextIndex : endIndex;
|
sl@0
|
939 |
do
|
sl@0
|
940 |
{
|
sl@0
|
941 |
if(IsPresent(*p++))
|
sl@0
|
942 |
goto already_exists;
|
sl@0
|
943 |
}
|
sl@0
|
944 |
while(++index<limit);
|
sl@0
|
945 |
}
|
sl@0
|
946 |
// lock segment so that it doesn't go away...
|
sl@0
|
947 |
s->Lock();
|
sl@0
|
948 |
|
sl@0
|
949 |
if(index<endIndex)
|
sl@0
|
950 |
MmuLock::Flash();
|
sl@0
|
951 |
}
|
sl@0
|
952 |
++pS;
|
sl@0
|
953 |
}
|
sl@0
|
954 |
|
sl@0
|
955 |
// done...
|
sl@0
|
956 |
MmuLock::Unlock();
|
sl@0
|
957 |
return KErrNone;
|
sl@0
|
958 |
|
sl@0
|
959 |
no_memory:
|
sl@0
|
960 |
r = KErrNoMemory;
|
sl@0
|
961 |
goto error;
|
sl@0
|
962 |
already_exists:
|
sl@0
|
963 |
r = KErrAlreadyExists;
|
sl@0
|
964 |
error:
|
sl@0
|
965 |
MmuLock::Unlock();
|
sl@0
|
966 |
|
sl@0
|
967 |
// unlock any segments that we locked...
|
sl@0
|
968 |
endIndex = index&~KPageArraySegmentMask;
|
sl@0
|
969 |
if(endIndex>aIndex)
|
sl@0
|
970 |
Release(iSegments,aIndex,endIndex-aIndex);
|
sl@0
|
971 |
|
sl@0
|
972 |
// return error...
|
sl@0
|
973 |
return r;
|
sl@0
|
974 |
}
|
sl@0
|
975 |
|
sl@0
|
976 |
|
sl@0
|
977 |
void RPageArray::AddEnd(TUint aIndex, TUint aCount)
|
sl@0
|
978 |
{
|
sl@0
|
979 |
Release(iSegments,aIndex,aCount);
|
sl@0
|
980 |
}
|
sl@0
|
981 |
|
sl@0
|
982 |
|
sl@0
|
983 |
void RPageArray::FindStart(TUint aIndex, TUint aCount, TIter& aIter)
|
sl@0
|
984 |
{
|
sl@0
|
985 |
TRACE2(("RPageArray::FindStart(0x%x,0x%x,?)",aIndex,aCount));
|
sl@0
|
986 |
__NK_ASSERT_DEBUG(aIndex+aCount<=iNumSegments*KPageArraySegmentSize);
|
sl@0
|
987 |
__NK_ASSERT_DEBUG(aIndex+aCount>aIndex);
|
sl@0
|
988 |
|
sl@0
|
989 |
aIter.Set(iSegments,aIndex,aIndex+aCount);
|
sl@0
|
990 |
}
|
sl@0
|
991 |
|
sl@0
|
992 |
|
sl@0
|
993 |
void RPageArray::Release(TSegment** aSegments, TUint aIndex, TUint aCount)
|
sl@0
|
994 |
{
|
sl@0
|
995 |
__NK_ASSERT_DEBUG(aIndex+aCount>aIndex);
|
sl@0
|
996 |
|
sl@0
|
997 |
MmuLock::Lock();
|
sl@0
|
998 |
|
sl@0
|
999 |
TSegment** pS = aSegments+(aIndex>>KPageArraySegmentShift);
|
sl@0
|
1000 |
TSegment** pGLast = aSegments+((aIndex+aCount-1)>>KPageArraySegmentShift);
|
sl@0
|
1001 |
__NK_ASSERT_DEBUG(pS<=pGLast);
|
sl@0
|
1002 |
TUint flash = 0;
|
sl@0
|
1003 |
do
|
sl@0
|
1004 |
{
|
sl@0
|
1005 |
MmuLock::Flash(flash,KMaxPagesInOneGo);
|
sl@0
|
1006 |
if(TSegment::Unlock(*pS)==0)
|
sl@0
|
1007 |
{
|
sl@0
|
1008 |
TRACE2(("RPageArray::Release delete segment=%d",pS-aSegments));
|
sl@0
|
1009 |
}
|
sl@0
|
1010 |
++pS;
|
sl@0
|
1011 |
}
|
sl@0
|
1012 |
while(pS<=pGLast);
|
sl@0
|
1013 |
|
sl@0
|
1014 |
MmuLock::Unlock();
|
sl@0
|
1015 |
}
|
sl@0
|
1016 |
|
sl@0
|
1017 |
|
sl@0
|
1018 |
TPhysAddr* RPageArray::AddPageStart(TUint aIndex, TIter& aPageList)
|
sl@0
|
1019 |
{
|
sl@0
|
1020 |
__NK_ASSERT_DEBUG(aIndex<=iNumSegments*KPageArraySegmentSize);
|
sl@0
|
1021 |
|
sl@0
|
1022 |
MmuLock::Lock();
|
sl@0
|
1023 |
TSegment** pS = iSegments+(aIndex>>KPageArraySegmentShift);
|
sl@0
|
1024 |
TSegment* s = GetOrAllocateSegment(pS,1);
|
sl@0
|
1025 |
MmuLock::Unlock();
|
sl@0
|
1026 |
|
sl@0
|
1027 |
if(!s)
|
sl@0
|
1028 |
return 0;
|
sl@0
|
1029 |
|
sl@0
|
1030 |
aPageList.Set(iSegments,aIndex,aIndex+1);
|
sl@0
|
1031 |
|
sl@0
|
1032 |
return s->iPages+(aIndex&KPageArraySegmentMask);
|
sl@0
|
1033 |
}
|
sl@0
|
1034 |
|
sl@0
|
1035 |
|
sl@0
|
1036 |
TPhysAddr* RPageArray::RemovePageStart(TUint aIndex, TIter& aPageList)
|
sl@0
|
1037 |
{
|
sl@0
|
1038 |
__NK_ASSERT_DEBUG(aIndex<=iNumSegments*KPageArraySegmentSize);
|
sl@0
|
1039 |
|
sl@0
|
1040 |
MmuLock::Lock();
|
sl@0
|
1041 |
|
sl@0
|
1042 |
TSegment** pS = iSegments+(aIndex>>KPageArraySegmentShift);
|
sl@0
|
1043 |
TSegment* s = *pS;
|
sl@0
|
1044 |
if(!s)
|
sl@0
|
1045 |
{
|
sl@0
|
1046 |
MmuLock::Unlock();
|
sl@0
|
1047 |
return 0;
|
sl@0
|
1048 |
}
|
sl@0
|
1049 |
|
sl@0
|
1050 |
TPhysAddr* p = s->iPages+(aIndex&KPageArraySegmentMask);
|
sl@0
|
1051 |
TPhysAddr page = *p;
|
sl@0
|
1052 |
if(State(page)<EDecommitting)
|
sl@0
|
1053 |
{
|
sl@0
|
1054 |
MmuLock::Unlock();
|
sl@0
|
1055 |
return 0;
|
sl@0
|
1056 |
}
|
sl@0
|
1057 |
|
sl@0
|
1058 |
*p = (page&~EStateMask)|EDecommitting;
|
sl@0
|
1059 |
|
sl@0
|
1060 |
s->Lock();
|
sl@0
|
1061 |
|
sl@0
|
1062 |
MmuLock::Unlock();
|
sl@0
|
1063 |
|
sl@0
|
1064 |
aPageList.Set(iSegments,aIndex,aIndex+1);
|
sl@0
|
1065 |
|
sl@0
|
1066 |
return p;
|
sl@0
|
1067 |
}
|
sl@0
|
1068 |
|
sl@0
|
1069 |
|
sl@0
|
1070 |
TPhysAddr RPageArray::RemovePage(TPhysAddr* aPageEntry)
|
sl@0
|
1071 |
{
|
sl@0
|
1072 |
__NK_ASSERT_DEBUG(MmuLock::IsHeld());
|
sl@0
|
1073 |
TPhysAddr page = *aPageEntry;
|
sl@0
|
1074 |
__NK_ASSERT_DEBUG(State(page)!=EStealing); // can't be stealing as that only happens with the RamAllocLock held, which we should already hold if freeing demand paged pages
|
sl@0
|
1075 |
if(State(page)==EDecommitting || State(page)==EDecommitted)
|
sl@0
|
1076 |
{
|
sl@0
|
1077 |
// remove a page...
|
sl@0
|
1078 |
if(page&EUnmapVetoed)
|
sl@0
|
1079 |
{
|
sl@0
|
1080 |
*aPageEntry = (page&~(EUnmapVetoed|EStateMask))|EDecommitted; // change to EDecommitted state
|
sl@0
|
1081 |
}
|
sl@0
|
1082 |
else
|
sl@0
|
1083 |
{
|
sl@0
|
1084 |
*aPageEntry = EEmptyEntry;
|
sl@0
|
1085 |
return page&~KPageMask;
|
sl@0
|
1086 |
}
|
sl@0
|
1087 |
// check not removing managed pages without the RamAllocLock...
|
sl@0
|
1088 |
__NK_ASSERT_DEBUG(RamAllocLock::IsHeld()
|
sl@0
|
1089 |
|| SPageInfo::FromPhysAddr(page)->Type()!=SPageInfo::EManaged);
|
sl@0
|
1090 |
}
|
sl@0
|
1091 |
return KPhysAddrInvalid;
|
sl@0
|
1092 |
}
|
sl@0
|
1093 |
|
sl@0
|
1094 |
|
sl@0
|
1095 |
TPhysAddr* RPageArray::RestrictPageNAStart(TUint aIndex, TIter& aPageList)
|
sl@0
|
1096 |
{
|
sl@0
|
1097 |
__NK_ASSERT_DEBUG(MmuLock::IsHeld());
|
sl@0
|
1098 |
__NK_ASSERT_DEBUG(aIndex<=iNumSegments*KPageArraySegmentSize);
|
sl@0
|
1099 |
|
sl@0
|
1100 |
TSegment** pS = iSegments+(aIndex>>KPageArraySegmentShift);
|
sl@0
|
1101 |
TSegment* s = *pS;
|
sl@0
|
1102 |
if(!s)
|
sl@0
|
1103 |
return 0;
|
sl@0
|
1104 |
|
sl@0
|
1105 |
TPhysAddr* p = s->iPages+(aIndex&KPageArraySegmentMask);
|
sl@0
|
1106 |
TPhysAddr page = *p;
|
sl@0
|
1107 |
if(State(page) < RPageArray::ERestrictingNA)
|
sl@0
|
1108 |
return 0;
|
sl@0
|
1109 |
|
sl@0
|
1110 |
*p = (page&~EStateMask) | RPageArray::ERestrictingNA;
|
sl@0
|
1111 |
|
sl@0
|
1112 |
s->Lock();
|
sl@0
|
1113 |
|
sl@0
|
1114 |
aPageList.Set(iSegments,aIndex,aIndex+1);
|
sl@0
|
1115 |
|
sl@0
|
1116 |
return p;
|
sl@0
|
1117 |
}
|
sl@0
|
1118 |
|
sl@0
|
1119 |
|
sl@0
|
1120 |
TPhysAddr* RPageArray::StealPageStart(TUint aIndex, TIter& aPageList)
|
sl@0
|
1121 |
{
|
sl@0
|
1122 |
__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
|
sl@0
|
1123 |
__NK_ASSERT_DEBUG(MmuLock::IsHeld());
|
sl@0
|
1124 |
__NK_ASSERT_DEBUG(aIndex<=iNumSegments*KPageArraySegmentSize);
|
sl@0
|
1125 |
|
sl@0
|
1126 |
TSegment** pS = iSegments+(aIndex>>KPageArraySegmentShift);
|
sl@0
|
1127 |
TSegment* s = *pS;
|
sl@0
|
1128 |
__NK_ASSERT_DEBUG(s); // we only steal pages in the live list and these can not go away yet because we hold the RamAllocLock
|
sl@0
|
1129 |
|
sl@0
|
1130 |
TPhysAddr* p = s->iPages+(aIndex&KPageArraySegmentMask);
|
sl@0
|
1131 |
TPhysAddr page = *p;
|
sl@0
|
1132 |
|
sl@0
|
1133 |
if(State(page)>EStealing)
|
sl@0
|
1134 |
*p = (page&~EStateMask)|EStealing;
|
sl@0
|
1135 |
|
sl@0
|
1136 |
s->Lock();
|
sl@0
|
1137 |
|
sl@0
|
1138 |
aPageList.Set(iSegments,aIndex,aIndex+1);
|
sl@0
|
1139 |
|
sl@0
|
1140 |
return p;
|
sl@0
|
1141 |
}
|
sl@0
|
1142 |
|
sl@0
|
1143 |
|
sl@0
|
1144 |
TPhysAddr* RPageArray::MovePageStart(TUint aIndex, TIter& aPageList)
|
sl@0
|
1145 |
{
|
sl@0
|
1146 |
__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
|
sl@0
|
1147 |
__NK_ASSERT_DEBUG(MmuLock::IsHeld());
|
sl@0
|
1148 |
__NK_ASSERT_DEBUG(aIndex <= iNumSegments*KPageArraySegmentSize);
|
sl@0
|
1149 |
|
sl@0
|
1150 |
TSegment** pS = iSegments+(aIndex>>KPageArraySegmentShift);
|
sl@0
|
1151 |
TSegment* s = *pS;
|
sl@0
|
1152 |
// The segment should always exist for a page that is being moved.
|
sl@0
|
1153 |
__NK_ASSERT_DEBUG(s);
|
sl@0
|
1154 |
|
sl@0
|
1155 |
TPhysAddr* p = s->iPages+(aIndex&KPageArraySegmentMask);
|
sl@0
|
1156 |
TPhysAddr page = *p;
|
sl@0
|
1157 |
if(State(page) <= RPageArray::EMoving)
|
sl@0
|
1158 |
return NULL;
|
sl@0
|
1159 |
|
sl@0
|
1160 |
*p = (page & ~EStateMask) | EMoving;
|
sl@0
|
1161 |
|
sl@0
|
1162 |
aPageList.Set(iSegments, aIndex, aIndex+1);
|
sl@0
|
1163 |
|
sl@0
|
1164 |
return p;
|
sl@0
|
1165 |
}
|
sl@0
|
1166 |
|
sl@0
|
1167 |
|
sl@0
|
1168 |
void RPageArray::ReleasePage(TUint aIndex, TInt aDelta)
|
sl@0
|
1169 |
{
|
sl@0
|
1170 |
__NK_ASSERT_DEBUG(MmuLock::IsHeld());
|
sl@0
|
1171 |
__NK_ASSERT_DEBUG(aIndex<=iNumSegments*KPageArraySegmentSize);
|
sl@0
|
1172 |
|
sl@0
|
1173 |
TSegment** pS = iSegments+(aIndex>>KPageArraySegmentShift);
|
sl@0
|
1174 |
TSegment* s = *pS;
|
sl@0
|
1175 |
__NK_ASSERT_DEBUG(s); // must exist because FindPageStart/AddPageStart locked it
|
sl@0
|
1176 |
|
sl@0
|
1177 |
__NK_ASSERT_DEBUG(aDelta>=-1 && aDelta<=1);
|
sl@0
|
1178 |
if(aDelta)
|
sl@0
|
1179 |
s->AdjustAllocCount(aDelta);
|
sl@0
|
1180 |
|
sl@0
|
1181 |
if(TSegment::Unlock(*pS)==0)
|
sl@0
|
1182 |
{
|
sl@0
|
1183 |
TRACE2(("RPageArray::ReleasePage delete segment=%d",pS-iSegments));
|
sl@0
|
1184 |
}
|
sl@0
|
1185 |
}
|
sl@0
|
1186 |
|
sl@0
|
1187 |
|
sl@0
|
1188 |
TPhysAddr RPageArray::Page(TUint aIndex)
|
sl@0
|
1189 |
{
|
sl@0
|
1190 |
__NK_ASSERT_DEBUG(MmuLock::IsHeld());
|
sl@0
|
1191 |
__NK_ASSERT_DEBUG(aIndex<=iNumSegments*KPageArraySegmentSize);
|
sl@0
|
1192 |
|
sl@0
|
1193 |
TSegment** pS = iSegments+(aIndex>>KPageArraySegmentShift);
|
sl@0
|
1194 |
TSegment* s = *pS;
|
sl@0
|
1195 |
if(!s)
|
sl@0
|
1196 |
return ENotPresent;
|
sl@0
|
1197 |
return s->iPages[aIndex&KPageArraySegmentMask];
|
sl@0
|
1198 |
}
|
sl@0
|
1199 |
|
sl@0
|
1200 |
|
sl@0
|
1201 |
TPhysAddr* RPageArray::PageEntry(TUint aIndex)
|
sl@0
|
1202 |
{
|
sl@0
|
1203 |
__NK_ASSERT_DEBUG(MmuLock::IsHeld());
|
sl@0
|
1204 |
__NK_ASSERT_DEBUG(aIndex<=iNumSegments*KPageArraySegmentSize);
|
sl@0
|
1205 |
|
sl@0
|
1206 |
TSegment** pS = iSegments+(aIndex>>KPageArraySegmentShift);
|
sl@0
|
1207 |
TSegment* s = *pS;
|
sl@0
|
1208 |
if(!s)
|
sl@0
|
1209 |
return NULL;
|
sl@0
|
1210 |
return s->iPages + (aIndex & KPageArraySegmentMask);
|
sl@0
|
1211 |
}
|
sl@0
|
1212 |
|
sl@0
|
1213 |
|
sl@0
|
1214 |
TUint RPageArray::PagingManagerData(TUint aIndex)
|
sl@0
|
1215 |
{
|
sl@0
|
1216 |
__NK_ASSERT_DEBUG(MmuLock::IsHeld());
|
sl@0
|
1217 |
__NK_ASSERT_DEBUG(aIndex<=iNumSegments*KPageArraySegmentSize);
|
sl@0
|
1218 |
TSegment** pS = iSegments+(aIndex>>KPageArraySegmentShift);
|
sl@0
|
1219 |
TSegment* s = *pS;
|
sl@0
|
1220 |
__NK_ASSERT_DEBUG(s);
|
sl@0
|
1221 |
TPhysAddr* p = &s->iPages[aIndex&KPageArraySegmentMask];
|
sl@0
|
1222 |
|
sl@0
|
1223 |
TPhysAddr entry = *p;
|
sl@0
|
1224 |
if(IsPresent(entry))
|
sl@0
|
1225 |
{
|
sl@0
|
1226 |
#ifdef _DEBUG
|
sl@0
|
1227 |
SPageInfo* pi = SPageInfo::SafeFromPhysAddr(entry&~KPageMask);
|
sl@0
|
1228 |
if(!pi)
|
sl@0
|
1229 |
Kern::Printf("RPageArray::PagingManagerData bad entry 0x%08x",entry);
|
sl@0
|
1230 |
__NK_ASSERT_DEBUG(pi);
|
sl@0
|
1231 |
#else
|
sl@0
|
1232 |
SPageInfo* pi = SPageInfo::FromPhysAddr(entry);
|
sl@0
|
1233 |
#endif
|
sl@0
|
1234 |
entry = pi->PagingManagerData();
|
sl@0
|
1235 |
}
|
sl@0
|
1236 |
__NK_ASSERT_DEBUG((entry&(EFlagsMask|EStateMask))==ENotPresent);
|
sl@0
|
1237 |
|
sl@0
|
1238 |
return entry>>(EFlagsShift+EStateShift);
|
sl@0
|
1239 |
}
|
sl@0
|
1240 |
|
sl@0
|
1241 |
|
sl@0
|
1242 |
void RPageArray::SetPagingManagerData(TUint aIndex, TUint aValue)
|
sl@0
|
1243 |
{
|
sl@0
|
1244 |
aValue = (aValue<<(EFlagsShift+EStateShift))|ENotPresent;
|
sl@0
|
1245 |
|
sl@0
|
1246 |
__NK_ASSERT_DEBUG(MmuLock::IsHeld());
|
sl@0
|
1247 |
__NK_ASSERT_DEBUG(aIndex<=iNumSegments*KPageArraySegmentSize);
|
sl@0
|
1248 |
TSegment** pS = iSegments+(aIndex>>KPageArraySegmentShift);
|
sl@0
|
1249 |
TSegment* s = *pS;
|
sl@0
|
1250 |
__NK_ASSERT_DEBUG(s);
|
sl@0
|
1251 |
TPhysAddr* p = &s->iPages[aIndex&KPageArraySegmentMask];
|
sl@0
|
1252 |
|
sl@0
|
1253 |
TPhysAddr entry = *p;
|
sl@0
|
1254 |
if(!IsPresent(entry))
|
sl@0
|
1255 |
*p = aValue;
|
sl@0
|
1256 |
else
|
sl@0
|
1257 |
{
|
sl@0
|
1258 |
#ifdef _DEBUG
|
sl@0
|
1259 |
SPageInfo* pi = SPageInfo::SafeFromPhysAddr(entry&~KPageMask);
|
sl@0
|
1260 |
if(!pi)
|
sl@0
|
1261 |
Kern::Printf("RPageArray::SetPagingManagerData bad entry 0x%08x",entry);
|
sl@0
|
1262 |
__NK_ASSERT_DEBUG(pi);
|
sl@0
|
1263 |
#else
|
sl@0
|
1264 |
SPageInfo* pi = SPageInfo::FromPhysAddr(entry);
|
sl@0
|
1265 |
#endif
|
sl@0
|
1266 |
pi->SetPagingManagerData(aValue);
|
sl@0
|
1267 |
}
|
sl@0
|
1268 |
}
|
sl@0
|
1269 |
|
sl@0
|
1270 |
|
sl@0
|
1271 |
TPhysAddr RPageArray::PhysAddr(TUint aIndex)
|
sl@0
|
1272 |
{
|
sl@0
|
1273 |
__NK_ASSERT_DEBUG(MmuLock::IsHeld());
|
sl@0
|
1274 |
__NK_ASSERT_DEBUG(aIndex<=iNumSegments*KPageArraySegmentSize);
|
sl@0
|
1275 |
|
sl@0
|
1276 |
TSegment** pS = iSegments+(aIndex>>KPageArraySegmentShift);
|
sl@0
|
1277 |
TSegment* s = *pS;
|
sl@0
|
1278 |
if(s)
|
sl@0
|
1279 |
{
|
sl@0
|
1280 |
TPhysAddr page = s->iPages[aIndex&KPageArraySegmentMask];
|
sl@0
|
1281 |
if(IsPresent(page))
|
sl@0
|
1282 |
{
|
sl@0
|
1283 |
return page&~KPageMask;
|
sl@0
|
1284 |
}
|
sl@0
|
1285 |
}
|
sl@0
|
1286 |
return KPhysAddrInvalid;
|
sl@0
|
1287 |
}
|
sl@0
|
1288 |
|
sl@0
|
1289 |
|
sl@0
|
1290 |
TInt RPageArray::PhysAddr(TUint aIndex, TUint aCount, TPhysAddr& aPhysicalAddress, TPhysAddr* aPhysicalPageList)
|
sl@0
|
1291 |
{
|
sl@0
|
1292 |
__NK_ASSERT_DEBUG(aCount);
|
sl@0
|
1293 |
MmuLock::Lock();
|
sl@0
|
1294 |
|
sl@0
|
1295 |
TUint32* pageList = aPhysicalPageList;
|
sl@0
|
1296 |
|
sl@0
|
1297 |
// get first page...
|
sl@0
|
1298 |
TPhysAddr physStart = PhysAddr(aIndex++);
|
sl@0
|
1299 |
if(physStart==KPhysAddrInvalid)
|
sl@0
|
1300 |
{
|
sl@0
|
1301 |
MmuLock::Unlock();
|
sl@0
|
1302 |
return KErrNotFound;
|
sl@0
|
1303 |
}
|
sl@0
|
1304 |
if(pageList)
|
sl@0
|
1305 |
*pageList++ = physStart;
|
sl@0
|
1306 |
|
sl@0
|
1307 |
TUint32 nextPhys = physStart+KPageSize;
|
sl@0
|
1308 |
|
sl@0
|
1309 |
TUint flash = 0;
|
sl@0
|
1310 |
while(--aCount)
|
sl@0
|
1311 |
{
|
sl@0
|
1312 |
MmuLock::Flash(flash,KMaxPagesInOneGo);
|
sl@0
|
1313 |
|
sl@0
|
1314 |
// get next page...
|
sl@0
|
1315 |
TPhysAddr phys = PhysAddr(aIndex++);
|
sl@0
|
1316 |
if(phys==KPhysAddrInvalid)
|
sl@0
|
1317 |
{
|
sl@0
|
1318 |
MmuLock::Unlock();
|
sl@0
|
1319 |
return KErrNotFound;
|
sl@0
|
1320 |
}
|
sl@0
|
1321 |
if(pageList)
|
sl@0
|
1322 |
*pageList++ = phys;
|
sl@0
|
1323 |
|
sl@0
|
1324 |
// check for contiguity...
|
sl@0
|
1325 |
if(phys!=nextPhys)
|
sl@0
|
1326 |
nextPhys = KPhysAddrInvalid;
|
sl@0
|
1327 |
else
|
sl@0
|
1328 |
nextPhys += KPageSize;
|
sl@0
|
1329 |
}
|
sl@0
|
1330 |
|
sl@0
|
1331 |
MmuLock::Unlock();
|
sl@0
|
1332 |
|
sl@0
|
1333 |
if(nextPhys==KPhysAddrInvalid)
|
sl@0
|
1334 |
{
|
sl@0
|
1335 |
// memory is discontiguous...
|
sl@0
|
1336 |
if(!aPhysicalPageList)
|
sl@0
|
1337 |
return KErrNotFound;
|
sl@0
|
1338 |
aPhysicalAddress = KPhysAddrInvalid;
|
sl@0
|
1339 |
return 1;
|
sl@0
|
1340 |
}
|
sl@0
|
1341 |
else
|
sl@0
|
1342 |
{
|
sl@0
|
1343 |
// memory is contiguous...
|
sl@0
|
1344 |
aPhysicalAddress = physStart;
|
sl@0
|
1345 |
return KErrNone;
|
sl@0
|
1346 |
}
|
sl@0
|
1347 |
}
|
sl@0
|
1348 |
|
sl@0
|
1349 |
|