sl@0
|
1 |
// Copyright (c) 2005-2009 Nokia Corporation and/or its subsidiary(-ies).
|
sl@0
|
2 |
// All rights reserved.
|
sl@0
|
3 |
// This component and the accompanying materials are made available
|
sl@0
|
4 |
// under the terms of the License "Eclipse Public License v1.0"
|
sl@0
|
5 |
// which accompanies this distribution, and is available
|
sl@0
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
|
sl@0
|
7 |
//
|
sl@0
|
8 |
// Initial Contributors:
|
sl@0
|
9 |
// Nokia Corporation - initial contribution.
|
sl@0
|
10 |
//
|
sl@0
|
11 |
// Contributors:
|
sl@0
|
12 |
//
|
sl@0
|
13 |
// Description:
|
sl@0
|
14 |
// e32test\mmu\d_demandpaging.cpp
|
sl@0
|
15 |
//
|
sl@0
|
16 |
//
|
sl@0
|
17 |
|
sl@0
|
18 |
#include <kernel/kern_priv.h>
|
sl@0
|
19 |
#include <kernel/cache.h>
|
sl@0
|
20 |
#include "d_demandpaging.h"
|
sl@0
|
21 |
|
sl@0
|
22 |
/// Page attributes, cut-n-paste'd from mmubase.h
|
sl@0
|
23 |
enum TType
|
sl@0
|
24 |
{
|
sl@0
|
25 |
// EInvalid=0, // No physical RAM exists for this page
|
sl@0
|
26 |
// EFixed=1, // RAM fixed at boot time
|
sl@0
|
27 |
// EUnused=2, // Page is unused
|
sl@0
|
28 |
// EChunk=3,
|
sl@0
|
29 |
// ECodeSeg=4,
|
sl@0
|
30 |
// EHwChunk=5,
|
sl@0
|
31 |
// EPageTable=6,
|
sl@0
|
32 |
// EPageDir=7,
|
sl@0
|
33 |
// EPtInfo=8,
|
sl@0
|
34 |
// EShadow=9,
|
sl@0
|
35 |
|
sl@0
|
36 |
EPagedROM=10,
|
sl@0
|
37 |
EPagedCode=11,
|
sl@0
|
38 |
EPagedData=12,
|
sl@0
|
39 |
EPagedCache=13,
|
sl@0
|
40 |
EPagedFree=14,
|
sl@0
|
41 |
};
|
sl@0
|
42 |
|
sl@0
|
43 |
enum TState
|
sl@0
|
44 |
{
|
sl@0
|
45 |
EStateNormal = 0, // no special state
|
sl@0
|
46 |
EStatePagedYoung = 1,
|
sl@0
|
47 |
EStatePagedOld = 2,
|
sl@0
|
48 |
EStatePagedDead = 3,
|
sl@0
|
49 |
EStatePagedLocked = 4
|
sl@0
|
50 |
};
|
sl@0
|
51 |
|
sl@0
|
52 |
//
|
sl@0
|
53 |
// Class definitions
|
sl@0
|
54 |
//
|
sl@0
|
55 |
|
sl@0
|
56 |
class DDemandPagingTestFactory : public DLogicalDevice
|
sl@0
|
57 |
{
|
sl@0
|
58 |
public:
|
sl@0
|
59 |
~DDemandPagingTestFactory();
|
sl@0
|
60 |
virtual TInt Install();
|
sl@0
|
61 |
virtual void GetCaps(TDes8& aDes) const;
|
sl@0
|
62 |
virtual TInt Create(DLogicalChannelBase*& aChannel);
|
sl@0
|
63 |
};
|
sl@0
|
64 |
|
sl@0
|
65 |
class DDemandPagingTestChannel : public DLogicalChannelBase
|
sl@0
|
66 |
{
|
sl@0
|
67 |
public:
|
sl@0
|
68 |
DDemandPagingTestChannel();
|
sl@0
|
69 |
~DDemandPagingTestChannel();
|
sl@0
|
70 |
virtual TInt DoCreate(TInt aUnit, const TDesC8* anInfo, const TVersion& aVer);
|
sl@0
|
71 |
virtual TInt Request(TInt aFunction, TAny* a1, TAny* a2);
|
sl@0
|
72 |
TInt LockTest(const TAny* aBuffer, TInt aSize);
|
sl@0
|
73 |
TInt LockTest2();
|
sl@0
|
74 |
TInt DoConsumeContiguousRamTest(TInt aAlign, TInt aPages);
|
sl@0
|
75 |
TInt DoCreatePlatHwChunk(TInt aSize, TAny* aLinAddr);
|
sl@0
|
76 |
TInt DoDestroyPlatHwChunk();
|
sl@0
|
77 |
TInt ReadHoldingMutexTest(TAny* aDest);
|
sl@0
|
78 |
|
sl@0
|
79 |
TBool CheckPagedIn(TLinAddr aAddress);
|
sl@0
|
80 |
TBool CheckPagedOut(TLinAddr aAddress);
|
sl@0
|
81 |
TBool CheckLocked(TLinAddr aAddress);
|
sl@0
|
82 |
|
sl@0
|
83 |
TInt FreeRam();
|
sl@0
|
84 |
public:
|
sl@0
|
85 |
DDemandPagingTestFactory* iFactory;
|
sl@0
|
86 |
DDemandPagingLock iLock;
|
sl@0
|
87 |
|
sl@0
|
88 |
DPlatChunkHw* iHwChunk;
|
sl@0
|
89 |
TInt iChunkSize;
|
sl@0
|
90 |
TPhysAddr iPhysBase; // This will be base physical address of the chunk
|
sl@0
|
91 |
TLinAddr iLinearBase; // This will be base linear address of the chunk
|
sl@0
|
92 |
};
|
sl@0
|
93 |
|
sl@0
|
94 |
//
|
sl@0
|
95 |
// DDemandPagingTestFactory
|
sl@0
|
96 |
//
|
sl@0
|
97 |
|
sl@0
|
98 |
TInt DDemandPagingTestFactory::Install()
|
sl@0
|
99 |
{
|
sl@0
|
100 |
return SetName(&KDemandPagingTestLddName);
|
sl@0
|
101 |
}
|
sl@0
|
102 |
|
sl@0
|
103 |
DDemandPagingTestFactory::~DDemandPagingTestFactory()
|
sl@0
|
104 |
{
|
sl@0
|
105 |
}
|
sl@0
|
106 |
|
sl@0
|
107 |
void DDemandPagingTestFactory::GetCaps(TDes8& /*aDes*/) const
|
sl@0
|
108 |
{
|
sl@0
|
109 |
// Not used but required as DLogicalDevice::GetCaps is pure virtual
|
sl@0
|
110 |
}
|
sl@0
|
111 |
|
sl@0
|
112 |
TInt DDemandPagingTestFactory::Create(DLogicalChannelBase*& aChannel)
|
sl@0
|
113 |
{
|
sl@0
|
114 |
aChannel = NULL;
|
sl@0
|
115 |
DDemandPagingTestChannel* channel=new DDemandPagingTestChannel;
|
sl@0
|
116 |
if(!channel)
|
sl@0
|
117 |
return KErrNoMemory;
|
sl@0
|
118 |
channel->iFactory = this;
|
sl@0
|
119 |
aChannel = channel;
|
sl@0
|
120 |
return KErrNone;
|
sl@0
|
121 |
}
|
sl@0
|
122 |
|
sl@0
|
123 |
DECLARE_STANDARD_LDD()
|
sl@0
|
124 |
{
|
sl@0
|
125 |
return new DDemandPagingTestFactory;
|
sl@0
|
126 |
}
|
sl@0
|
127 |
|
sl@0
|
128 |
//
|
sl@0
|
129 |
// DDemandPagingTestChannel
|
sl@0
|
130 |
//
|
sl@0
|
131 |
|
sl@0
|
132 |
TInt DDemandPagingTestChannel::DoCreate(TInt /*aUnit*/, const TDesC8* /*aInfo*/, const TVersion& /*aVer*/)
|
sl@0
|
133 |
{
|
sl@0
|
134 |
return KErrNone;
|
sl@0
|
135 |
}
|
sl@0
|
136 |
|
sl@0
|
137 |
DDemandPagingTestChannel::DDemandPagingTestChannel()
|
sl@0
|
138 |
{
|
sl@0
|
139 |
}
|
sl@0
|
140 |
|
sl@0
|
141 |
DDemandPagingTestChannel::~DDemandPagingTestChannel()
|
sl@0
|
142 |
{
|
sl@0
|
143 |
DoDestroyPlatHwChunk();
|
sl@0
|
144 |
}
|
sl@0
|
145 |
|
sl@0
|
146 |
TInt DDemandPagingTestChannel::Request(TInt aFunction, TAny* a1, TAny* a2)
|
sl@0
|
147 |
{
|
sl@0
|
148 |
switch(aFunction)
|
sl@0
|
149 |
{
|
sl@0
|
150 |
case RDemandPagingTestLdd::ELockTest:
|
sl@0
|
151 |
{
|
sl@0
|
152 |
TInt r = LockTest(a1,(TInt)a2);
|
sl@0
|
153 |
if (r == KErrNone)
|
sl@0
|
154 |
r = LockTest2();
|
sl@0
|
155 |
return r;
|
sl@0
|
156 |
}
|
sl@0
|
157 |
|
sl@0
|
158 |
case RDemandPagingTestLdd::ESetRealtimeTrace:
|
sl@0
|
159 |
{
|
sl@0
|
160 |
#if defined(_DEBUG)
|
sl@0
|
161 |
TUint32 bit = TUint32(1<<(KREALTIME&31));
|
sl@0
|
162 |
__e32_atomic_axo_ord32(&Kern::SuperPage().iDebugMask[KREALTIME>>5], ~bit, a1?bit:0);
|
sl@0
|
163 |
#if 0 // can enable this to help debugging
|
sl@0
|
164 |
bit = (1<<(KPAGING&31));
|
sl@0
|
165 |
__e32_atomic_axo_ord32(&Kern::SuperPage().iDebugMask[KPAGING>>5], ~bit, a1?bit:0);
|
sl@0
|
166 |
#endif
|
sl@0
|
167 |
#endif //_DEBUG
|
sl@0
|
168 |
}
|
sl@0
|
169 |
return KErrNone;
|
sl@0
|
170 |
|
sl@0
|
171 |
case RDemandPagingTestLdd::EDoConsumeContiguousRamTest:
|
sl@0
|
172 |
{
|
sl@0
|
173 |
return DDemandPagingTestChannel::DoConsumeContiguousRamTest((TInt)a1, (TInt)a2);
|
sl@0
|
174 |
}
|
sl@0
|
175 |
|
sl@0
|
176 |
case RDemandPagingTestLdd::ECreatePlatHwChunk:
|
sl@0
|
177 |
{
|
sl@0
|
178 |
return DDemandPagingTestChannel::DoCreatePlatHwChunk((TInt)a1, a2);
|
sl@0
|
179 |
}
|
sl@0
|
180 |
|
sl@0
|
181 |
case RDemandPagingTestLdd::EDestroyPlatHwChunk:
|
sl@0
|
182 |
{
|
sl@0
|
183 |
return DDemandPagingTestChannel::DoDestroyPlatHwChunk();
|
sl@0
|
184 |
}
|
sl@0
|
185 |
|
sl@0
|
186 |
case RDemandPagingTestLdd::ELock:
|
sl@0
|
187 |
{
|
sl@0
|
188 |
TInt r=iLock.Alloc((TInt)a2);
|
sl@0
|
189 |
if(r!=KErrNone)
|
sl@0
|
190 |
return r;
|
sl@0
|
191 |
return iLock.Lock(&Kern::CurrentThread(),(TLinAddr)a1,(TInt)a2);
|
sl@0
|
192 |
}
|
sl@0
|
193 |
|
sl@0
|
194 |
case RDemandPagingTestLdd::EUnlock:
|
sl@0
|
195 |
{
|
sl@0
|
196 |
iLock.Free();
|
sl@0
|
197 |
return KErrNone;
|
sl@0
|
198 |
}
|
sl@0
|
199 |
|
sl@0
|
200 |
case RDemandPagingTestLdd::EReadHoldingMutexTest:
|
sl@0
|
201 |
return ReadHoldingMutexTest((TAny*)a1);
|
sl@0
|
202 |
|
sl@0
|
203 |
default:
|
sl@0
|
204 |
return KErrNotSupported;
|
sl@0
|
205 |
}
|
sl@0
|
206 |
}
|
sl@0
|
207 |
|
sl@0
|
208 |
//
|
sl@0
|
209 |
// DDemandPagingTestChannel::DoCreatePlatHwChunk
|
sl@0
|
210 |
//
|
sl@0
|
211 |
// For some of the tests of IPC from demand-paged memory, we need a writable
|
sl@0
|
212 |
// globally-mapped buffer; so this function creates a suitable chunk and
|
sl@0
|
213 |
// returns its (global, virtual) address to the userland caller. The caller
|
sl@0
|
214 |
// should call DoDestroyPlatHwChunk() to release the memory when the tests
|
sl@0
|
215 |
// are finished.
|
sl@0
|
216 |
//
|
sl@0
|
217 |
TInt DDemandPagingTestChannel::DoCreatePlatHwChunk(TInt aSize, TAny* aLinAddr)
|
sl@0
|
218 |
{
|
sl@0
|
219 |
TInt mapAttr = EMapAttrUserRw; // Supervisor and user both have read/write permissions
|
sl@0
|
220 |
|
sl@0
|
221 |
NKern::ThreadEnterCS();
|
sl@0
|
222 |
if (iHwChunk) // Only one chunk at a atime
|
sl@0
|
223 |
{
|
sl@0
|
224 |
NKern::ThreadLeaveCS();
|
sl@0
|
225 |
return KErrAlreadyExists;
|
sl@0
|
226 |
}
|
sl@0
|
227 |
|
sl@0
|
228 |
iChunkSize = Kern::RoundToPageSize(aSize);
|
sl@0
|
229 |
|
sl@0
|
230 |
Kern::Printf("*** Attempting to allocate contiguous physical RAM ***");
|
sl@0
|
231 |
TInt free = Kern::FreeRamInBytes();
|
sl@0
|
232 |
Kern::Printf(" requested: %08x", iChunkSize);
|
sl@0
|
233 |
Kern::Printf(" total free: %08x", free);
|
sl@0
|
234 |
|
sl@0
|
235 |
TInt r = Epoc::AllocPhysicalRam(iChunkSize, iPhysBase, 0); // Allocate RAM; result in iPhysBase
|
sl@0
|
236 |
if (r)
|
sl@0
|
237 |
{
|
sl@0
|
238 |
NKern::ThreadLeaveCS();
|
sl@0
|
239 |
Kern::Printf(" failed with error %d", r);
|
sl@0
|
240 |
return r;
|
sl@0
|
241 |
}
|
sl@0
|
242 |
else
|
sl@0
|
243 |
Kern::Printf(" success");
|
sl@0
|
244 |
|
sl@0
|
245 |
r = DPlatChunkHw::New(iHwChunk, iPhysBase, iChunkSize, mapAttr); // Create chunk
|
sl@0
|
246 |
if (r)
|
sl@0
|
247 |
{
|
sl@0
|
248 |
Epoc::FreePhysicalRam(iPhysBase, iChunkSize);
|
sl@0
|
249 |
iHwChunk = 0;
|
sl@0
|
250 |
NKern::ThreadLeaveCS();
|
sl@0
|
251 |
return r;
|
sl@0
|
252 |
}
|
sl@0
|
253 |
NKern::ThreadLeaveCS();
|
sl@0
|
254 |
|
sl@0
|
255 |
// Return the virtual address to userland
|
sl@0
|
256 |
iLinearBase = iHwChunk->LinearAddress();
|
sl@0
|
257 |
kumemput(aLinAddr, &iLinearBase, sizeof(iLinearBase));
|
sl@0
|
258 |
|
sl@0
|
259 |
Kern::Printf("CreatePlatHwChunk@%08x: iLinearBase %08x, iPhysBase %08x, size %d",
|
sl@0
|
260 |
iHwChunk, iLinearBase, iPhysBase, iChunkSize);
|
sl@0
|
261 |
|
sl@0
|
262 |
return KErrNone;
|
sl@0
|
263 |
}
|
sl@0
|
264 |
|
sl@0
|
265 |
TInt DDemandPagingTestChannel::DoDestroyPlatHwChunk()
|
sl@0
|
266 |
{
|
sl@0
|
267 |
Kern::Printf("DestroyPlatHwChunk@%08x: iLinearBase %08x, iPhysBase %08x, size %d",
|
sl@0
|
268 |
iHwChunk, iLinearBase, iPhysBase, iChunkSize);
|
sl@0
|
269 |
NKern::ThreadEnterCS();
|
sl@0
|
270 |
if (iHwChunk)
|
sl@0
|
271 |
{
|
sl@0
|
272 |
iHwChunk->Close(NULL);
|
sl@0
|
273 |
Epoc::FreePhysicalRam(iPhysBase, iChunkSize);
|
sl@0
|
274 |
iPhysBase = 0;
|
sl@0
|
275 |
iChunkSize = 0;
|
sl@0
|
276 |
iHwChunk = 0;
|
sl@0
|
277 |
}
|
sl@0
|
278 |
NKern::ThreadLeaveCS();
|
sl@0
|
279 |
return KErrNone;
|
sl@0
|
280 |
}
|
sl@0
|
281 |
|
sl@0
|
282 |
//
|
sl@0
|
283 |
// DDemandPagingTestChannel::DoConsumeContiguousRamTest
|
sl@0
|
284 |
//
|
sl@0
|
285 |
// This test attempts to consume all available Contiguous Ram until we need to ask the
|
sl@0
|
286 |
// demand paging code to release memory for it.
|
sl@0
|
287 |
//
|
sl@0
|
288 |
// On completion free all the memory allocated.
|
sl@0
|
289 |
//
|
sl@0
|
290 |
#define CHECK(c) { if(!(c)) { Kern::Printf("Fail %d", __LINE__); ; retVal = __LINE__;} }
|
sl@0
|
291 |
|
sl@0
|
292 |
TInt DDemandPagingTestChannel::DoConsumeContiguousRamTest(TInt aAlign, TInt aSize)
|
sl@0
|
293 |
{
|
sl@0
|
294 |
TInt retVal = KErrNone;
|
sl@0
|
295 |
TInt initialFreeRam = FreeRam();
|
sl@0
|
296 |
TInt totalBlocks = initialFreeRam/aSize;
|
sl@0
|
297 |
|
sl@0
|
298 |
NKern::ThreadEnterCS();
|
sl@0
|
299 |
TPhysAddr* pAddrArray = (TPhysAddr *)Kern::Alloc(sizeof(TPhysAddr) * totalBlocks);
|
sl@0
|
300 |
NKern::ThreadLeaveCS();
|
sl@0
|
301 |
CHECK(pAddrArray);
|
sl@0
|
302 |
if(!pAddrArray)
|
sl@0
|
303 |
return retVal;
|
sl@0
|
304 |
|
sl@0
|
305 |
SVMCacheInfo tempPages;
|
sl@0
|
306 |
|
sl@0
|
307 |
// get the initial free ram again as the heap may have grabbed a page during the alloc
|
sl@0
|
308 |
initialFreeRam = FreeRam();
|
sl@0
|
309 |
Kern::Printf("ConsumeContiguousRamTest: align %d size %d initialFreeRam %d", aAlign, aSize, initialFreeRam);
|
sl@0
|
310 |
|
sl@0
|
311 |
CHECK(Kern::HalFunction(EHalGroupVM,EVMHalGetCacheSize,&tempPages,0) == KErrNone);
|
sl@0
|
312 |
Kern::Printf("Start cache info: iMinSize %d iMaxSize %d iCurrentSize %d iMaxFreeSize %d",
|
sl@0
|
313 |
tempPages.iMinSize, tempPages.iMaxSize, tempPages.iCurrentSize ,tempPages.iMaxFreeSize);
|
sl@0
|
314 |
|
sl@0
|
315 |
TInt initialFreePages = tempPages.iMaxFreeSize;
|
sl@0
|
316 |
CHECK(initialFreePages != 0);
|
sl@0
|
317 |
|
sl@0
|
318 |
// allocate blocks to use up RAM until we fail to allocate any further...
|
sl@0
|
319 |
TBool freedPagesToAlloc = EFalse;
|
sl@0
|
320 |
TInt index;
|
sl@0
|
321 |
TUint32 alignMask = (1 << aAlign) - 1;
|
sl@0
|
322 |
for (index = 0; index < totalBlocks; )
|
sl@0
|
323 |
{
|
sl@0
|
324 |
CHECK(Kern::HalFunction(EHalGroupVM,EVMHalGetCacheSize,&tempPages,0) == KErrNone);
|
sl@0
|
325 |
TInt beforePages = tempPages.iMaxFreeSize;
|
sl@0
|
326 |
|
sl@0
|
327 |
NKern::ThreadEnterCS();
|
sl@0
|
328 |
TInt r = Epoc::AllocPhysicalRam(aSize, pAddrArray[index], aAlign);
|
sl@0
|
329 |
if(r==KErrNone)
|
sl@0
|
330 |
{
|
sl@0
|
331 |
// check the alignment of the returned pages
|
sl@0
|
332 |
CHECK((pAddrArray[index] & alignMask) == 0);
|
sl@0
|
333 |
++index;
|
sl@0
|
334 |
}
|
sl@0
|
335 |
NKern::ThreadLeaveCS();
|
sl@0
|
336 |
if(r!=KErrNone)
|
sl@0
|
337 |
{
|
sl@0
|
338 |
break;
|
sl@0
|
339 |
}
|
sl@0
|
340 |
CHECK(Kern::HalFunction(EHalGroupVM,EVMHalGetCacheSize,&tempPages,0) == KErrNone);
|
sl@0
|
341 |
TInt afterPages = tempPages.iMaxFreeSize;
|
sl@0
|
342 |
|
sl@0
|
343 |
if (afterPages != beforePages)
|
sl@0
|
344 |
freedPagesToAlloc = ETrue; // the alloc reclaimed memory from the paging cache
|
sl@0
|
345 |
}
|
sl@0
|
346 |
|
sl@0
|
347 |
if (!index)
|
sl@0
|
348 |
Kern::Printf("WARNING : DoConsumeContiguousRamTest no allocations were successful");
|
sl@0
|
349 |
// free the memory we allocated...
|
sl@0
|
350 |
while(--index>=0)
|
sl@0
|
351 |
{
|
sl@0
|
352 |
NKern::ThreadEnterCS();
|
sl@0
|
353 |
TInt r = Epoc::FreePhysicalRam(pAddrArray[index], aSize);
|
sl@0
|
354 |
NKern::ThreadLeaveCS();
|
sl@0
|
355 |
CHECK(r==KErrNone);
|
sl@0
|
356 |
}
|
sl@0
|
357 |
|
sl@0
|
358 |
CHECK(FreeRam() == initialFreeRam);
|
sl@0
|
359 |
|
sl@0
|
360 |
NKern::ThreadEnterCS();
|
sl@0
|
361 |
Kern::Free(pAddrArray);
|
sl@0
|
362 |
NKern::ThreadLeaveCS();
|
sl@0
|
363 |
|
sl@0
|
364 |
CHECK(Kern::HalFunction(EHalGroupVM,EVMHalGetCacheSize,&tempPages,0) == KErrNone);
|
sl@0
|
365 |
Kern::Printf("End cache info: iMinSize %d iMaxSize %d iCurrentSize %d iMaxFreeSize %d",
|
sl@0
|
366 |
tempPages.iMinSize, tempPages.iMaxSize, tempPages.iCurrentSize ,tempPages.iMaxFreeSize);
|
sl@0
|
367 |
|
sl@0
|
368 |
if (!freedPagesToAlloc)
|
sl@0
|
369 |
Kern::Printf("WARNING : DoConsumeContiguousRamTest freedPagesToAlloc was eFalse");
|
sl@0
|
370 |
//CHECK(freedPagesToAlloc);
|
sl@0
|
371 |
|
sl@0
|
372 |
return retVal;
|
sl@0
|
373 |
}
|
sl@0
|
374 |
#undef CHECK
|
sl@0
|
375 |
|
sl@0
|
376 |
|
sl@0
|
377 |
TUint8 ReadByte(volatile TUint8* aPtr)
|
sl@0
|
378 |
{
|
sl@0
|
379 |
return *aPtr;
|
sl@0
|
380 |
}
|
sl@0
|
381 |
|
sl@0
|
382 |
#define CHECK(c) { if(!(c)) return __LINE__; }
|
sl@0
|
383 |
|
sl@0
|
384 |
#define READ(a) ReadByte((volatile TUint8*)(a))
|
sl@0
|
385 |
|
sl@0
|
386 |
TInt DDemandPagingTestChannel::LockTest(const TAny* aBuffer, TInt aSize)
|
sl@0
|
387 |
{
|
sl@0
|
388 |
// Get page size info
|
sl@0
|
389 |
TInt pageSize = 0;
|
sl@0
|
390 |
CHECK(Kern::HalFunction(EHalGroupKernel,EKernelHalPageSizeInBytes,&pageSize,0)==KErrNone);
|
sl@0
|
391 |
TInt pageMask = pageSize-1;
|
sl@0
|
392 |
|
sl@0
|
393 |
// See if were running of the Flexible Memory Model
|
sl@0
|
394 |
TUint32 memModelAttrib = (TUint32)Kern::HalFunction(EHalGroupKernel,EKernelHalMemModelInfo,0,0);
|
sl@0
|
395 |
TBool fmm = (memModelAttrib&EMemModelTypeMask)==EMemModelTypeFlexible;
|
sl@0
|
396 |
|
sl@0
|
397 |
// Round buffer to page boundaries
|
sl@0
|
398 |
TLinAddr start = ((TLinAddr)aBuffer+pageMask)&~pageMask;
|
sl@0
|
399 |
TLinAddr end = ((TLinAddr)aBuffer+aSize)&~pageMask;
|
sl@0
|
400 |
aSize = end-start;
|
sl@0
|
401 |
Kern::Printf("Test buffer is %08x, %x\n",start,aSize);
|
sl@0
|
402 |
CHECK(aSize>pageSize*2);
|
sl@0
|
403 |
|
sl@0
|
404 |
// Flush all paged memory
|
sl@0
|
405 |
Kern::HalFunction(EHalGroupVM,EVMHalFlushCache,0,0);
|
sl@0
|
406 |
|
sl@0
|
407 |
TInt initialFreeRam;
|
sl@0
|
408 |
TInt freeRam1;
|
sl@0
|
409 |
TInt freeRam2;
|
sl@0
|
410 |
TLinAddr addr;
|
sl@0
|
411 |
TUint lockBytesUsed = fmm ? 0 : 0; // free ram change on locking (zero or aSize depending on implementation)
|
sl@0
|
412 |
|
sl@0
|
413 |
{ // this brace is essential for correctness
|
sl@0
|
414 |
DDemandPagingLock lock2; // construct a lock;
|
sl@0
|
415 |
|
sl@0
|
416 |
Kern::Printf("Check reading from buffer pages it in\n");
|
sl@0
|
417 |
for(addr=start; addr<end; addr+=pageSize) READ(addr);
|
sl@0
|
418 |
for(addr=start; addr<end; addr+=pageSize) CHECK(CheckPagedIn(addr));
|
sl@0
|
419 |
initialFreeRam = FreeRam();
|
sl@0
|
420 |
|
sl@0
|
421 |
Kern::Printf("Check Alloc reserves pages\n");
|
sl@0
|
422 |
CHECK(iLock.Alloc(aSize)==KErrNone);
|
sl@0
|
423 |
freeRam1 = FreeRam();
|
sl@0
|
424 |
|
sl@0
|
425 |
Kern::Printf("Check flushing pages out the buffer\n");
|
sl@0
|
426 |
Kern::HalFunction(EHalGroupVM,EVMHalFlushCache,0,0);
|
sl@0
|
427 |
for(addr=start; addr<end; addr+=pageSize) CHECK(CheckPagedOut(addr));
|
sl@0
|
428 |
|
sl@0
|
429 |
Kern::Printf("Check Lock\n");
|
sl@0
|
430 |
CHECK(iLock.Lock(&Kern::CurrentThread(),start,aSize));
|
sl@0
|
431 |
CHECK((TUint)FreeRam()==TUint(freeRam1-lockBytesUsed));
|
sl@0
|
432 |
for(addr=start; addr<end; addr+=pageSize) CHECK(CheckLocked(addr));
|
sl@0
|
433 |
|
sl@0
|
434 |
Kern::Printf("Check flushing doesn't page out the buffer\n");
|
sl@0
|
435 |
Kern::HalFunction(EHalGroupVM,EVMHalFlushCache,0,0);
|
sl@0
|
436 |
for(addr=start; addr<end; addr+=pageSize) CHECK(CheckLocked(addr));
|
sl@0
|
437 |
CHECK((TUint)FreeRam()==TUint(freeRam1-lockBytesUsed));
|
sl@0
|
438 |
|
sl@0
|
439 |
Kern::Printf("Check second Alloc\n");
|
sl@0
|
440 |
CHECK(lock2.Alloc(aSize)==KErrNone);
|
sl@0
|
441 |
freeRam2 = FreeRam();
|
sl@0
|
442 |
|
sl@0
|
443 |
Kern::Printf("Check second Lock\n");
|
sl@0
|
444 |
CHECK(lock2.Lock(&Kern::CurrentThread(),start,aSize));
|
sl@0
|
445 |
CHECK(FreeRam()==freeRam2);
|
sl@0
|
446 |
for(addr=start; addr<end; addr+=pageSize) CHECK(CheckLocked(addr));
|
sl@0
|
447 |
|
sl@0
|
448 |
Kern::Printf("Check deleting second lock\n");
|
sl@0
|
449 |
// lock2 is deleted here because it goes out of scope...
|
sl@0
|
450 |
} // this brace is essential for correctness
|
sl@0
|
451 |
CHECK((TUint)FreeRam()==TUint(freeRam1-lockBytesUsed));
|
sl@0
|
452 |
for(addr=start; addr<end; addr+=pageSize) CHECK(CheckLocked(addr));
|
sl@0
|
453 |
|
sl@0
|
454 |
Kern::Printf("Check Unlock\n");
|
sl@0
|
455 |
iLock.Unlock();
|
sl@0
|
456 |
CHECK(FreeRam()==freeRam1);
|
sl@0
|
457 |
for(addr=start; addr<end; addr+=pageSize) CHECK(CheckPagedIn(addr));
|
sl@0
|
458 |
iLock.Unlock();
|
sl@0
|
459 |
CHECK(FreeRam()==initialFreeRam);
|
sl@0
|
460 |
|
sl@0
|
461 |
Kern::Printf("Check Free\n");
|
sl@0
|
462 |
iLock.Free();
|
sl@0
|
463 |
CHECK(FreeRam()==initialFreeRam);
|
sl@0
|
464 |
iLock.Free();
|
sl@0
|
465 |
CHECK(FreeRam()==initialFreeRam);
|
sl@0
|
466 |
|
sl@0
|
467 |
return KErrNone;
|
sl@0
|
468 |
}
|
sl@0
|
469 |
|
sl@0
|
470 |
#undef CHECK
|
sl@0
|
471 |
#define CHECK(c) { if(!(c)) { r = __LINE__; goto cleanup; } }
|
sl@0
|
472 |
|
sl@0
|
473 |
TInt DDemandPagingTestChannel::LockTest2()
|
sl@0
|
474 |
{
|
sl@0
|
475 |
Kern::Printf("Check allocating locks eventually increases size of live list\n");
|
sl@0
|
476 |
TInt r = KErrNone;
|
sl@0
|
477 |
|
sl@0
|
478 |
DDemandPagingLock* lock = NULL;
|
sl@0
|
479 |
RPointerArray<DDemandPagingLock> lockArray;
|
sl@0
|
480 |
|
sl@0
|
481 |
const TInt KLockMax = 1000; // make this a bit bigger than current min page count?
|
sl@0
|
482 |
TInt i;
|
sl@0
|
483 |
|
sl@0
|
484 |
NKern::ThreadEnterCS();
|
sl@0
|
485 |
for (i = 0 ; i < KLockMax ; ++i)
|
sl@0
|
486 |
{
|
sl@0
|
487 |
lock = new DDemandPagingLock;
|
sl@0
|
488 |
CHECK(lock);
|
sl@0
|
489 |
CHECK(lockArray.Append(lock) == KErrNone);
|
sl@0
|
490 |
lock = NULL;
|
sl@0
|
491 |
|
sl@0
|
492 |
TInt initialFreeRam = FreeRam();
|
sl@0
|
493 |
CHECK(lockArray[i]->Alloc(1) == KErrNone);
|
sl@0
|
494 |
if (FreeRam() < initialFreeRam)
|
sl@0
|
495 |
{
|
sl@0
|
496 |
Kern::Printf("Live list size increased after %d locks allocated", i + 1);
|
sl@0
|
497 |
break;
|
sl@0
|
498 |
}
|
sl@0
|
499 |
}
|
sl@0
|
500 |
|
sl@0
|
501 |
CHECK(i < KLockMax);
|
sl@0
|
502 |
|
sl@0
|
503 |
cleanup:
|
sl@0
|
504 |
|
sl@0
|
505 |
delete lock;
|
sl@0
|
506 |
lock = NULL;
|
sl@0
|
507 |
for (i = 0 ; i < lockArray.Count() ; ++i)
|
sl@0
|
508 |
{
|
sl@0
|
509 |
delete lockArray[i];
|
sl@0
|
510 |
lockArray[i] = NULL;
|
sl@0
|
511 |
}
|
sl@0
|
512 |
lockArray.Reset();
|
sl@0
|
513 |
|
sl@0
|
514 |
NKern::ThreadLeaveCS();
|
sl@0
|
515 |
|
sl@0
|
516 |
return r;
|
sl@0
|
517 |
}
|
sl@0
|
518 |
|
sl@0
|
519 |
TInt DDemandPagingTestChannel::FreeRam()
|
sl@0
|
520 |
{
|
sl@0
|
521 |
Kern::HalFunction(EHalGroupKernel, EKernelHalSupervisorBarrier, 0, 0);
|
sl@0
|
522 |
TInt freeRam = Kern::FreeRamInBytes();
|
sl@0
|
523 |
Kern::Printf("...free RAM: %x\n",freeRam);
|
sl@0
|
524 |
return freeRam;
|
sl@0
|
525 |
}
|
sl@0
|
526 |
|
sl@0
|
527 |
|
sl@0
|
528 |
TUint32 PageState(TLinAddr aAddress)
|
sl@0
|
529 |
{
|
sl@0
|
530 |
TUint32 state = Kern::HalFunction(EHalGroupVM, EVMPageState, (TAny*)aAddress, 0);
|
sl@0
|
531 |
Kern::Printf("PageState: %08x=%08x",aAddress,state);
|
sl@0
|
532 |
return state;
|
sl@0
|
533 |
}
|
sl@0
|
534 |
|
sl@0
|
535 |
|
sl@0
|
536 |
TBool DDemandPagingTestChannel::CheckPagedIn(TLinAddr aAddress)
|
sl@0
|
537 |
{
|
sl@0
|
538 |
TUint32 state = PageState(aAddress);
|
sl@0
|
539 |
return (state&0xff00) == (EStatePagedYoung<<8);
|
sl@0
|
540 |
}
|
sl@0
|
541 |
|
sl@0
|
542 |
|
sl@0
|
543 |
TBool DDemandPagingTestChannel::CheckPagedOut(TLinAddr aAddress)
|
sl@0
|
544 |
{
|
sl@0
|
545 |
TUint32 state = PageState(aAddress);
|
sl@0
|
546 |
return (state&0xffff) == 0;
|
sl@0
|
547 |
}
|
sl@0
|
548 |
|
sl@0
|
549 |
|
sl@0
|
550 |
TInt DDemandPagingTestChannel::CheckLocked(TLinAddr aAddress)
|
sl@0
|
551 |
{
|
sl@0
|
552 |
TUint32 state = PageState(aAddress);
|
sl@0
|
553 |
return (state&0xff00) == (EStatePagedLocked<<8);
|
sl@0
|
554 |
}
|
sl@0
|
555 |
|
sl@0
|
556 |
|
sl@0
|
557 |
TInt DDemandPagingTestChannel::ReadHoldingMutexTest(TAny* aDest)
|
sl@0
|
558 |
{
|
sl@0
|
559 |
_LIT(KMutexName, "DPTestMutex");
|
sl@0
|
560 |
|
sl@0
|
561 |
NKern::ThreadEnterCS();
|
sl@0
|
562 |
|
sl@0
|
563 |
DMutex* mutex;
|
sl@0
|
564 |
TInt r = Kern::MutexCreate(mutex, KMutexName, KMutexOrdDebug); // Mutex order < demand paging
|
sl@0
|
565 |
if (r != KErrNone)
|
sl@0
|
566 |
{
|
sl@0
|
567 |
NKern::ThreadLeaveCS();
|
sl@0
|
568 |
return r;
|
sl@0
|
569 |
}
|
sl@0
|
570 |
Kern::MutexWait(*mutex);
|
sl@0
|
571 |
|
sl@0
|
572 |
const TRomHeader& romHeader = Epoc::RomHeader();
|
sl@0
|
573 |
TLinAddr unpagedRomStart = (TLinAddr)&romHeader;
|
sl@0
|
574 |
TLinAddr unpagedRomEnd;
|
sl@0
|
575 |
if (romHeader.iPageableRomStart)
|
sl@0
|
576 |
unpagedRomEnd = unpagedRomStart + romHeader.iPageableRomStart;
|
sl@0
|
577 |
else
|
sl@0
|
578 |
unpagedRomEnd = unpagedRomStart + romHeader.iUncompressedSize;
|
sl@0
|
579 |
|
sl@0
|
580 |
const TInt length = 16;
|
sl@0
|
581 |
TUint8 localBuf[length];
|
sl@0
|
582 |
if(!aDest)
|
sl@0
|
583 |
aDest = localBuf;
|
sl@0
|
584 |
Kern::Printf("Local buffer at %08x", aDest);
|
sl@0
|
585 |
|
sl@0
|
586 |
TAny* src1 = (TAny*)unpagedRomStart;
|
sl@0
|
587 |
TAny* src2 = (TAny*)(unpagedRomEnd - length);
|
sl@0
|
588 |
|
sl@0
|
589 |
DThread* thread = &Kern::CurrentThread();
|
sl@0
|
590 |
|
sl@0
|
591 |
Kern::Printf("Attempting to access %08x", src1);
|
sl@0
|
592 |
Kern::ThreadRawWrite(thread, aDest, src1, length);
|
sl@0
|
593 |
Kern::Printf("Attempting to access %08x", src2);
|
sl@0
|
594 |
Kern::ThreadRawWrite(thread, aDest, src2, length);
|
sl@0
|
595 |
|
sl@0
|
596 |
TUint8 stackData[length];
|
sl@0
|
597 |
Kern::Printf("Attempting to access %08x", stackData);
|
sl@0
|
598 |
Kern::ThreadRawWrite(thread, aDest, stackData, length);
|
sl@0
|
599 |
|
sl@0
|
600 |
TAny* heapData = Kern::Alloc(length);
|
sl@0
|
601 |
if (heapData)
|
sl@0
|
602 |
{
|
sl@0
|
603 |
Kern::Printf("Attempting to access %08x", heapData);
|
sl@0
|
604 |
Kern::ThreadRawWrite(thread, aDest, heapData, length);
|
sl@0
|
605 |
Kern::Free(heapData);
|
sl@0
|
606 |
}
|
sl@0
|
607 |
else
|
sl@0
|
608 |
r = KErrNoMemory;
|
sl@0
|
609 |
|
sl@0
|
610 |
Kern::MutexSignal(*mutex);
|
sl@0
|
611 |
mutex->Close(NULL);
|
sl@0
|
612 |
|
sl@0
|
613 |
NKern::ThreadLeaveCS();
|
sl@0
|
614 |
|
sl@0
|
615 |
return r; // a kernel fault indicates that the test failed
|
sl@0
|
616 |
}
|
sl@0
|
617 |
|