sl@0
|
1 |
// Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies).
|
sl@0
|
2 |
// All rights reserved.
|
sl@0
|
3 |
// This component and the accompanying materials are made available
|
sl@0
|
4 |
// under the terms of the License "Eclipse Public License v1.0"
|
sl@0
|
5 |
// which accompanies this distribution, and is available
|
sl@0
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
|
sl@0
|
7 |
//
|
sl@0
|
8 |
// Initial Contributors:
|
sl@0
|
9 |
// Nokia Corporation - initial contribution.
|
sl@0
|
10 |
//
|
sl@0
|
11 |
// Contributors:
|
sl@0
|
12 |
//
|
sl@0
|
13 |
// Description:
|
sl@0
|
14 |
// e32test\demandpaging\t_pagetable_limit.cpp
|
sl@0
|
15 |
// Tests to expose the limit of page table virtual address space.
|
sl@0
|
16 |
//
|
sl@0
|
17 |
//
|
sl@0
|
18 |
|
sl@0
|
19 |
//! @SYMTestCaseID KBASE-T_PAGETABLE_LIMIT
|
sl@0
|
20 |
//! @SYMTestType UT
|
sl@0
|
21 |
//! @SYMPREQ PREQ1490
|
sl@0
|
22 |
//! @SYMTestCaseDesc Tests to expose the limit of page table virtual address space.
|
sl@0
|
23 |
//! @SYMTestActions Test that a paged page table can always be acquired.
|
sl@0
|
24 |
//! @SYMTestExpectedResults All tests should pass.
|
sl@0
|
25 |
//! @SYMTestPriority High
|
sl@0
|
26 |
//! @SYMTestStatus Implemented
|
sl@0
|
27 |
|
sl@0
|
28 |
#define __E32TEST_EXTENSION__
|
sl@0
|
29 |
#include <e32test.h>
|
sl@0
|
30 |
#include <dptest.h>
|
sl@0
|
31 |
#include <e32svr.h>
|
sl@0
|
32 |
#include <u32std.h>
|
sl@0
|
33 |
#include <hal.h>
|
sl@0
|
34 |
|
sl@0
|
35 |
#include "t_dpcmn.h"
|
sl@0
|
36 |
|
sl@0
|
37 |
RTest test(_L("T_PAGETABLE_LIMIT"));
|
sl@0
|
38 |
|
sl@0
|
39 |
|
sl@0
|
40 |
_LIT(KClientPtServerName, "CClientPtServer");
|
sl@0
|
41 |
_LIT(KClientProcessName, "T_PAGETABLE_LIMIT");
|
sl@0
|
42 |
|
sl@0
|
43 |
enum TClientMsgType
|
sl@0
|
44 |
{
|
sl@0
|
45 |
EClientConnect = -1,
|
sl@0
|
46 |
EClientDisconnect = -2,
|
sl@0
|
47 |
EClientGetChunk = 0,
|
sl@0
|
48 |
EClientReadChunks = 1,
|
sl@0
|
49 |
};
|
sl@0
|
50 |
|
sl@0
|
51 |
class RDataPagingSession : public RSessionBase
|
sl@0
|
52 |
{
|
sl@0
|
53 |
public:
|
sl@0
|
54 |
TInt CreateSession(const TDesC& aServerName, TInt aMsgSlots)
|
sl@0
|
55 |
{
|
sl@0
|
56 |
return RSessionBase::CreateSession(aServerName,User::Version(),aMsgSlots);
|
sl@0
|
57 |
}
|
sl@0
|
58 |
TInt PublicSendReceive(TInt aFunction, const TIpcArgs &aPtr)
|
sl@0
|
59 |
{
|
sl@0
|
60 |
return (SendReceive(aFunction, aPtr));
|
sl@0
|
61 |
}
|
sl@0
|
62 |
TInt PublicSend(TInt aFunction, const TIpcArgs &aPtr)
|
sl@0
|
63 |
{
|
sl@0
|
64 |
return (Send(aFunction, aPtr));
|
sl@0
|
65 |
}
|
sl@0
|
66 |
};
|
sl@0
|
67 |
|
sl@0
|
68 |
|
sl@0
|
69 |
TInt ClientProcess(TInt aLen)
|
sl@0
|
70 |
{
|
sl@0
|
71 |
// Read the command line to get the number of chunk to map and whether or
|
sl@0
|
72 |
// not to access their data.
|
sl@0
|
73 |
HBufC* buf = HBufC::New(aLen);
|
sl@0
|
74 |
test(buf != NULL);
|
sl@0
|
75 |
TPtr ptr = buf->Des();
|
sl@0
|
76 |
User::CommandLine(ptr);
|
sl@0
|
77 |
|
sl@0
|
78 |
TLex lex(ptr);
|
sl@0
|
79 |
TInt chunkCount;
|
sl@0
|
80 |
TInt r = lex.Val(chunkCount);
|
sl@0
|
81 |
test_KErrNone(r);
|
sl@0
|
82 |
lex.SkipSpace();
|
sl@0
|
83 |
|
sl@0
|
84 |
TBool accessData;
|
sl@0
|
85 |
r = lex.Val(accessData);
|
sl@0
|
86 |
test_KErrNone(r);
|
sl@0
|
87 |
|
sl@0
|
88 |
|
sl@0
|
89 |
RDataPagingSession session;
|
sl@0
|
90 |
test_KErrNone(session.CreateSession(KClientPtServerName, 1));
|
sl@0
|
91 |
|
sl@0
|
92 |
RChunk* chunks = new RChunk[chunkCount];
|
sl@0
|
93 |
for (TInt i = 0; i < chunkCount; i++)
|
sl@0
|
94 |
{
|
sl@0
|
95 |
TInt r = chunks[i].SetReturnedHandle(session.PublicSendReceive(EClientGetChunk, TIpcArgs(i)));
|
sl@0
|
96 |
if (r != KErrNone)
|
sl@0
|
97 |
{
|
sl@0
|
98 |
test.Printf(_L("Failed to create a handle to the server's chunk r=%d\n"), r);
|
sl@0
|
99 |
for (TInt j = 0; j < i; j++)
|
sl@0
|
100 |
chunks[j].Close();
|
sl@0
|
101 |
session.Close();
|
sl@0
|
102 |
return r;
|
sl@0
|
103 |
}
|
sl@0
|
104 |
test_Value(chunks[i].Size(), chunks[i].Size() >= gPageSize);
|
sl@0
|
105 |
}
|
sl@0
|
106 |
if (!accessData)
|
sl@0
|
107 |
{
|
sl@0
|
108 |
// Touch the 1st page of each of the chunks.
|
sl@0
|
109 |
for (TInt i = 0; i < chunkCount; i++)
|
sl@0
|
110 |
{
|
sl@0
|
111 |
// Write the chunk data from top to bottom of the chunk's first page.
|
sl@0
|
112 |
TUint8* base = chunks[i].Base();
|
sl@0
|
113 |
TUint8* end = base + gPageSize - 1;
|
sl@0
|
114 |
*base = *end;
|
sl@0
|
115 |
}
|
sl@0
|
116 |
// Tell parent we've touched each chunk.
|
sl@0
|
117 |
TInt r = (TThreadId)session.PublicSendReceive(EClientReadChunks,TIpcArgs()); // Assumes id is only 32-bit.
|
sl@0
|
118 |
test_KErrNone(r);
|
sl@0
|
119 |
for(;;)
|
sl@0
|
120 |
{// Wake up every 100ms to be killed by the main process.
|
sl@0
|
121 |
User::After(100000);
|
sl@0
|
122 |
}
|
sl@0
|
123 |
}
|
sl@0
|
124 |
else
|
sl@0
|
125 |
{
|
sl@0
|
126 |
for (;;)
|
sl@0
|
127 |
{
|
sl@0
|
128 |
TInt offset = 0;
|
sl@0
|
129 |
for (TInt i = 0; i < chunkCount; i++)
|
sl@0
|
130 |
{
|
sl@0
|
131 |
// Write the chunk data from top to bottom of the chunk's first page.
|
sl@0
|
132 |
TUint8* base = chunks[i].Base();
|
sl@0
|
133 |
TUint8* end = base + gPageSize - 1;
|
sl@0
|
134 |
*(base + offset) = *(end - offset);
|
sl@0
|
135 |
}
|
sl@0
|
136 |
if (++offset >= (gPageSize >> 1))
|
sl@0
|
137 |
offset = 0;
|
sl@0
|
138 |
}
|
sl@0
|
139 |
}
|
sl@0
|
140 |
}
|
sl@0
|
141 |
|
sl@0
|
142 |
|
sl@0
|
143 |
void TestMaxPt()
|
sl@0
|
144 |
{
|
sl@0
|
145 |
// Flexible memory model reserves 0xF800000-0xFFF00000 for page tables
|
sl@0
|
146 |
// this allows 130,048 pages tables. Therefore mapping 1000 one
|
sl@0
|
147 |
// page chunks into 256 processes would require 256,000 page tables, i.e.
|
sl@0
|
148 |
// more than enough to hit the limit. So that the limit is reached in the middle,
|
sl@0
|
149 |
// map 500 unpaged and 500 paged chunks in each process.
|
sl@0
|
150 |
const TUint KNumChunks = 1000;
|
sl@0
|
151 |
const TUint KPagedChunksStart = (KNumChunks >> 1);
|
sl@0
|
152 |
const TUint KNumProcesses = 256;
|
sl@0
|
153 |
const TInt KMinFreeRam = (1000 * gPageSize) + (130048 * (gPageSize>>2));
|
sl@0
|
154 |
TInt freeRam;
|
sl@0
|
155 |
HAL::Get(HALData::EMemoryRAMFree, freeRam);
|
sl@0
|
156 |
if (freeRam < KMinFreeRam)
|
sl@0
|
157 |
{
|
sl@0
|
158 |
test.Printf(_L("Only 0x%x bytes of free RAM not enough to perform the test. Skipping test.\n"), freeRam);
|
sl@0
|
159 |
return;
|
sl@0
|
160 |
}
|
sl@0
|
161 |
|
sl@0
|
162 |
// Remove the maximum limit on the cache size as the test requires that it can
|
sl@0
|
163 |
// allocate as many page tables as possible but without stealing any pages as
|
sl@0
|
164 |
// stealing pages may indirectly steal paged page table pages.
|
sl@0
|
165 |
TUint minCacheSize, maxCacheSize, currentCacheSize;
|
sl@0
|
166 |
DPTest::CacheSize(minCacheSize,maxCacheSize,currentCacheSize);
|
sl@0
|
167 |
test_KErrNone(DPTest::SetCacheSize(minCacheSize, KMaxTUint));
|
sl@0
|
168 |
|
sl@0
|
169 |
RServer2 ptServer;
|
sl@0
|
170 |
TInt r = ptServer.CreateGlobal(KClientPtServerName);
|
sl@0
|
171 |
test_KErrNone(r);
|
sl@0
|
172 |
|
sl@0
|
173 |
// Create the global unpaged chunks. They have one page committed
|
sl@0
|
174 |
// but have a maximum size large enough to prevent their page tables being
|
sl@0
|
175 |
// shared between the chunks. On arm with 4KB pages each page table maps 1MB
|
sl@0
|
176 |
// so make chunk 1MB+4KB so chunk requires 2 page tables and is not aligned on
|
sl@0
|
177 |
// a 1MB boundary so it is a fine memory object.
|
sl@0
|
178 |
const TUint KChunkSize = (1024 * 1024) + gPageSize;
|
sl@0
|
179 |
RChunk* chunks = new RChunk[KNumChunks];
|
sl@0
|
180 |
TChunkCreateInfo createInfo;
|
sl@0
|
181 |
createInfo.SetNormal(gPageSize, KChunkSize);
|
sl@0
|
182 |
createInfo.SetGlobal(KNullDesC);
|
sl@0
|
183 |
createInfo.SetPaging(TChunkCreateInfo::EUnpaged);
|
sl@0
|
184 |
TUint i = 0;
|
sl@0
|
185 |
for (; i < KPagedChunksStart; i++)
|
sl@0
|
186 |
{
|
sl@0
|
187 |
r = chunks[i].Create(createInfo);
|
sl@0
|
188 |
test_KErrNone(r);
|
sl@0
|
189 |
}
|
sl@0
|
190 |
// Create paged chunks.
|
sl@0
|
191 |
createInfo.SetPaging(TChunkCreateInfo::EPaged);
|
sl@0
|
192 |
for (; i< KNumChunks; i++)
|
sl@0
|
193 |
{
|
sl@0
|
194 |
r = chunks[i].Create(createInfo);
|
sl@0
|
195 |
test_KErrNone(r);
|
sl@0
|
196 |
}
|
sl@0
|
197 |
|
sl@0
|
198 |
// Start remote processes, giving each process handles to each chunk.
|
sl@0
|
199 |
RProcess* processes = new RProcess[KNumProcesses];
|
sl@0
|
200 |
RMessage2 ptMessage;
|
sl@0
|
201 |
TUint processIndex = 0;
|
sl@0
|
202 |
TUint processLimit = 0;
|
sl@0
|
203 |
for (; processIndex < KNumProcesses; processIndex++)
|
sl@0
|
204 |
{
|
sl@0
|
205 |
// Start the process.
|
sl@0
|
206 |
test.Printf(_L("Creating process %d\n"), processIndex);
|
sl@0
|
207 |
TBuf<80> args;
|
sl@0
|
208 |
args.AppendFormat(_L("%d %d"), KNumChunks, EFalse);
|
sl@0
|
209 |
r = processes[processIndex].Create(KClientProcessName, args);
|
sl@0
|
210 |
test_KErrNone(r);
|
sl@0
|
211 |
TRequestStatus s;
|
sl@0
|
212 |
processes[processIndex].Logon(s);
|
sl@0
|
213 |
test_Equal(KRequestPending, s.Int());
|
sl@0
|
214 |
processes[processIndex].Resume();
|
sl@0
|
215 |
|
sl@0
|
216 |
ptServer.Receive(ptMessage);
|
sl@0
|
217 |
test_Equal(EClientConnect, ptMessage.Function());
|
sl@0
|
218 |
ptMessage.Complete(KErrNone);
|
sl@0
|
219 |
TInt func = EClientGetChunk;
|
sl@0
|
220 |
TUint chunkIndex = 0;
|
sl@0
|
221 |
for (; chunkIndex < KNumChunks && func == EClientGetChunk; chunkIndex++)
|
sl@0
|
222 |
{// Pass handles to all the unpaged chunks to the new process.
|
sl@0
|
223 |
ptServer.Receive(ptMessage);
|
sl@0
|
224 |
func = ptMessage.Function();
|
sl@0
|
225 |
if (func == EClientGetChunk)
|
sl@0
|
226 |
{
|
sl@0
|
227 |
TUint index = ptMessage.Int0();
|
sl@0
|
228 |
ptMessage.Complete(chunks[index]);
|
sl@0
|
229 |
}
|
sl@0
|
230 |
}
|
sl@0
|
231 |
if (func != EClientGetChunk)
|
sl@0
|
232 |
{
|
sl@0
|
233 |
// Should hit the limit of page tables and this process instance should exit
|
sl@0
|
234 |
// sending a disconnect message in the process.
|
sl@0
|
235 |
test_Equal(EClientDisconnect, func);
|
sl@0
|
236 |
// Should only fail when mapping unpaged chunks.
|
sl@0
|
237 |
test_Value(chunkIndex, chunkIndex < (KNumChunks >> 1));
|
sl@0
|
238 |
break;
|
sl@0
|
239 |
}
|
sl@0
|
240 |
// Wait for the process to access all the chunks and therefore
|
sl@0
|
241 |
// allocate the paged page tables before moving onto the next process.
|
sl@0
|
242 |
ptServer.Receive(ptMessage);
|
sl@0
|
243 |
func = ptMessage.Function();
|
sl@0
|
244 |
test_Equal(EClientReadChunks, func);
|
sl@0
|
245 |
ptMessage.Complete(KErrNone);
|
sl@0
|
246 |
|
sl@0
|
247 |
// Should have mapped all the required chunks.
|
sl@0
|
248 |
test_Equal(KNumChunks, chunkIndex);
|
sl@0
|
249 |
}
|
sl@0
|
250 |
// Should hit page table limit before KNumProcesses have been created.
|
sl@0
|
251 |
test_Value(processIndex, processIndex < KNumProcesses - 1);
|
sl@0
|
252 |
processLimit = processIndex;
|
sl@0
|
253 |
|
sl@0
|
254 |
// Now create more processes to access paged data even though the page table
|
sl@0
|
255 |
// address space has been exhausted. Limit to 10 more processes as test takes
|
sl@0
|
256 |
// long enough already.
|
sl@0
|
257 |
processIndex++;
|
sl@0
|
258 |
TUint excessProcesses = KNumProcesses - processIndex;
|
sl@0
|
259 |
TUint pagedIndexEnd = (excessProcesses > 10)? processIndex + 10 : processIndex + excessProcesses;
|
sl@0
|
260 |
for (; processIndex < pagedIndexEnd; processIndex++)
|
sl@0
|
261 |
{
|
sl@0
|
262 |
// Start the process.
|
sl@0
|
263 |
test.Printf(_L("Creating process %d\n"), processIndex);
|
sl@0
|
264 |
TBuf<80> args;
|
sl@0
|
265 |
args.AppendFormat(_L("%d %d"), KNumChunks-KPagedChunksStart, ETrue);
|
sl@0
|
266 |
r = processes[processIndex].Create(KClientProcessName, args);
|
sl@0
|
267 |
if (r != KErrNone)
|
sl@0
|
268 |
{// Have hit the limit of processes.
|
sl@0
|
269 |
processIndex--;
|
sl@0
|
270 |
// Should have created at least one more process.
|
sl@0
|
271 |
test_Value(processIndex, processIndex > processLimit);
|
sl@0
|
272 |
break;
|
sl@0
|
273 |
}
|
sl@0
|
274 |
TRequestStatus s;
|
sl@0
|
275 |
processes[processIndex].Logon(s);
|
sl@0
|
276 |
test_Equal(KRequestPending, s.Int());
|
sl@0
|
277 |
processes[processIndex].Resume();
|
sl@0
|
278 |
|
sl@0
|
279 |
ptServer.Receive(ptMessage);
|
sl@0
|
280 |
test_Equal(EClientConnect, ptMessage.Function());
|
sl@0
|
281 |
ptMessage.Complete(KErrNone);
|
sl@0
|
282 |
|
sl@0
|
283 |
TInt func = EClientGetChunk;
|
sl@0
|
284 |
TUint chunkIndex = KPagedChunksStart;
|
sl@0
|
285 |
for (; chunkIndex < KNumChunks && func == EClientGetChunk; chunkIndex++)
|
sl@0
|
286 |
{// Pass handles to all the unpaged chunks to the new process.
|
sl@0
|
287 |
ptServer.Receive(ptMessage);
|
sl@0
|
288 |
func = ptMessage.Function();
|
sl@0
|
289 |
if (func == EClientGetChunk)
|
sl@0
|
290 |
{
|
sl@0
|
291 |
TUint index = ptMessage.Int0() + KPagedChunksStart;
|
sl@0
|
292 |
ptMessage.Complete(chunks[index]);
|
sl@0
|
293 |
}
|
sl@0
|
294 |
}
|
sl@0
|
295 |
if (func != EClientGetChunk)
|
sl@0
|
296 |
{// Reached memory limits so exit.
|
sl@0
|
297 |
test_Equal(EClientDisconnect, func);
|
sl@0
|
298 |
// Should have created at least one more process.
|
sl@0
|
299 |
test_Value(processIndex, processIndex > processLimit+1);
|
sl@0
|
300 |
break;
|
sl@0
|
301 |
}
|
sl@0
|
302 |
|
sl@0
|
303 |
// Should have mapped all the required chunks.
|
sl@0
|
304 |
test_Equal(KNumChunks, chunkIndex);
|
sl@0
|
305 |
}
|
sl@0
|
306 |
// If we reached the end of then ensure that we kill only the running processes.
|
sl@0
|
307 |
if (processIndex == pagedIndexEnd)
|
sl@0
|
308 |
processIndex--;
|
sl@0
|
309 |
// Kill all the remote processes
|
sl@0
|
310 |
for(TInt j = processIndex; j >= 0; j--)
|
sl@0
|
311 |
{
|
sl@0
|
312 |
test.Printf(_L("killing process %d\n"), j);
|
sl@0
|
313 |
TRequestStatus req;
|
sl@0
|
314 |
processes[j].Logon(req);
|
sl@0
|
315 |
if (req == KRequestPending)
|
sl@0
|
316 |
{
|
sl@0
|
317 |
processes[j].Kill(KErrNone);
|
sl@0
|
318 |
User::WaitForRequest(req);
|
sl@0
|
319 |
}
|
sl@0
|
320 |
processes[j].Close();
|
sl@0
|
321 |
}
|
sl@0
|
322 |
delete[] processes;
|
sl@0
|
323 |
// Close the chunks.
|
sl@0
|
324 |
for (TUint k = 0; k < KNumChunks; k++)
|
sl@0
|
325 |
chunks[k].Close();
|
sl@0
|
326 |
delete[] chunks;
|
sl@0
|
327 |
|
sl@0
|
328 |
test_KErrNone(DPTest::SetCacheSize(minCacheSize, maxCacheSize));
|
sl@0
|
329 |
}
|
sl@0
|
330 |
|
sl@0
|
331 |
|
sl@0
|
332 |
TInt E32Main()
|
sl@0
|
333 |
{
|
sl@0
|
334 |
test_KErrNone(UserHal::PageSizeInBytes(gPageSize));
|
sl@0
|
335 |
|
sl@0
|
336 |
TUint len = User::CommandLineLength();
|
sl@0
|
337 |
if (len > 0)
|
sl@0
|
338 |
{
|
sl@0
|
339 |
return ClientProcess(len);
|
sl@0
|
340 |
}
|
sl@0
|
341 |
|
sl@0
|
342 |
test.Title();
|
sl@0
|
343 |
test_KErrNone(GetGlobalPolicies());
|
sl@0
|
344 |
|
sl@0
|
345 |
if (!gDataPagingSupported)
|
sl@0
|
346 |
{
|
sl@0
|
347 |
test.Printf(_L("Data paging not enabled so skipping test...\n"));
|
sl@0
|
348 |
return KErrNone;
|
sl@0
|
349 |
}
|
sl@0
|
350 |
|
sl@0
|
351 |
test.Start(_L("Test the system can always acquire a paged page table"));
|
sl@0
|
352 |
TestMaxPt();
|
sl@0
|
353 |
|
sl@0
|
354 |
test.End();
|
sl@0
|
355 |
return KErrNone;
|
sl@0
|
356 |
}
|