Update contrib.
1 // Copyright (c) 2005-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32test\mmu\d_demandpaging.cpp
18 #include <kernel/kern_priv.h>
19 #include <kernel/cache.h>
20 #include "d_demandpaging.h"
22 /// Page attributes, cut-n-paste'd from mmubase.h
25 // EInvalid=0, // No physical RAM exists for this page
26 // EFixed=1, // RAM fixed at boot time
27 // EUnused=2, // Page is unused
45 EStateNormal = 0, // no special state
56 class DDemandPagingTestFactory : public DLogicalDevice
59 ~DDemandPagingTestFactory();
60 virtual TInt Install();
61 virtual void GetCaps(TDes8& aDes) const;
62 virtual TInt Create(DLogicalChannelBase*& aChannel);
65 class DDemandPagingTestChannel : public DLogicalChannelBase
68 DDemandPagingTestChannel();
69 ~DDemandPagingTestChannel();
70 virtual TInt DoCreate(TInt aUnit, const TDesC8* anInfo, const TVersion& aVer);
71 virtual TInt Request(TInt aFunction, TAny* a1, TAny* a2);
72 TInt LockTest(const TAny* aBuffer, TInt aSize);
74 TInt DoConsumeContiguousRamTest(TInt aAlign, TInt aPages);
75 TInt DoCreatePlatHwChunk(TInt aSize, TAny* aLinAddr);
76 TInt DoDestroyPlatHwChunk();
77 TInt ReadHoldingMutexTest(TAny* aDest);
79 TBool CheckPagedIn(TLinAddr aAddress);
80 TBool CheckPagedOut(TLinAddr aAddress);
81 TBool CheckLocked(TLinAddr aAddress);
85 DDemandPagingTestFactory* iFactory;
86 DDemandPagingLock iLock;
88 DPlatChunkHw* iHwChunk;
90 TPhysAddr iPhysBase; // This will be base physical address of the chunk
91 TLinAddr iLinearBase; // This will be base linear address of the chunk
95 // DDemandPagingTestFactory
98 TInt DDemandPagingTestFactory::Install()
100 return SetName(&KDemandPagingTestLddName);
103 DDemandPagingTestFactory::~DDemandPagingTestFactory()
107 void DDemandPagingTestFactory::GetCaps(TDes8& /*aDes*/) const
109 // Not used but required as DLogicalDevice::GetCaps is pure virtual
112 TInt DDemandPagingTestFactory::Create(DLogicalChannelBase*& aChannel)
115 DDemandPagingTestChannel* channel=new DDemandPagingTestChannel;
118 channel->iFactory = this;
123 DECLARE_STANDARD_LDD()
125 return new DDemandPagingTestFactory;
129 // DDemandPagingTestChannel
132 TInt DDemandPagingTestChannel::DoCreate(TInt /*aUnit*/, const TDesC8* /*aInfo*/, const TVersion& /*aVer*/)
137 DDemandPagingTestChannel::DDemandPagingTestChannel()
141 DDemandPagingTestChannel::~DDemandPagingTestChannel()
143 DoDestroyPlatHwChunk();
146 TInt DDemandPagingTestChannel::Request(TInt aFunction, TAny* a1, TAny* a2)
150 case RDemandPagingTestLdd::ELockTest:
152 TInt r = LockTest(a1,(TInt)a2);
158 case RDemandPagingTestLdd::ESetRealtimeTrace:
161 TUint32 bit = TUint32(1<<(KREALTIME&31));
162 __e32_atomic_axo_ord32(&Kern::SuperPage().iDebugMask[KREALTIME>>5], ~bit, a1?bit:0);
163 #if 0 // can enable this to help debugging
164 bit = (1<<(KPAGING&31));
165 __e32_atomic_axo_ord32(&Kern::SuperPage().iDebugMask[KPAGING>>5], ~bit, a1?bit:0);
171 case RDemandPagingTestLdd::EDoConsumeContiguousRamTest:
173 return DDemandPagingTestChannel::DoConsumeContiguousRamTest((TInt)a1, (TInt)a2);
176 case RDemandPagingTestLdd::ECreatePlatHwChunk:
178 return DDemandPagingTestChannel::DoCreatePlatHwChunk((TInt)a1, a2);
181 case RDemandPagingTestLdd::EDestroyPlatHwChunk:
183 return DDemandPagingTestChannel::DoDestroyPlatHwChunk();
186 case RDemandPagingTestLdd::ELock:
188 TInt r=iLock.Alloc((TInt)a2);
191 return iLock.Lock(&Kern::CurrentThread(),(TLinAddr)a1,(TInt)a2);
194 case RDemandPagingTestLdd::EUnlock:
200 case RDemandPagingTestLdd::EReadHoldingMutexTest:
201 return ReadHoldingMutexTest((TAny*)a1);
204 return KErrNotSupported;
209 // DDemandPagingTestChannel::DoCreatePlatHwChunk
211 // For some of the tests of IPC from demand-paged memory, we need a writable
212 // globally-mapped buffer; so this function creates a suitable chunk and
213 // returns its (global, virtual) address to the userland caller. The caller
214 // should call DoDestroyPlatHwChunk() to release the memory when the tests
217 TInt DDemandPagingTestChannel::DoCreatePlatHwChunk(TInt aSize, TAny* aLinAddr)
219 TInt mapAttr = EMapAttrUserRw; // Supervisor and user both have read/write permissions
221 NKern::ThreadEnterCS();
222 if (iHwChunk) // Only one chunk at a atime
224 NKern::ThreadLeaveCS();
225 return KErrAlreadyExists;
228 iChunkSize = Kern::RoundToPageSize(aSize);
230 Kern::Printf("*** Attempting to allocate contiguous physical RAM ***");
231 TInt free = Kern::FreeRamInBytes();
232 Kern::Printf(" requested: %08x", iChunkSize);
233 Kern::Printf(" total free: %08x", free);
235 TInt r = Epoc::AllocPhysicalRam(iChunkSize, iPhysBase, 0); // Allocate RAM; result in iPhysBase
238 NKern::ThreadLeaveCS();
239 Kern::Printf(" failed with error %d", r);
243 Kern::Printf(" success");
245 r = DPlatChunkHw::New(iHwChunk, iPhysBase, iChunkSize, mapAttr); // Create chunk
248 Epoc::FreePhysicalRam(iPhysBase, iChunkSize);
250 NKern::ThreadLeaveCS();
253 NKern::ThreadLeaveCS();
255 // Return the virtual address to userland
256 iLinearBase = iHwChunk->LinearAddress();
257 kumemput(aLinAddr, &iLinearBase, sizeof(iLinearBase));
259 Kern::Printf("CreatePlatHwChunk@%08x: iLinearBase %08x, iPhysBase %08x, size %d",
260 iHwChunk, iLinearBase, iPhysBase, iChunkSize);
265 TInt DDemandPagingTestChannel::DoDestroyPlatHwChunk()
267 Kern::Printf("DestroyPlatHwChunk@%08x: iLinearBase %08x, iPhysBase %08x, size %d",
268 iHwChunk, iLinearBase, iPhysBase, iChunkSize);
269 NKern::ThreadEnterCS();
272 iHwChunk->Close(NULL);
273 Epoc::FreePhysicalRam(iPhysBase, iChunkSize);
278 NKern::ThreadLeaveCS();
283 // DDemandPagingTestChannel::DoConsumeContiguousRamTest
285 // This test attempts to consume all available Contiguous Ram until we need to ask the
286 // demand paging code to release memory for it.
288 // On completion free all the memory allocated.
290 #define CHECK(c) { if(!(c)) { Kern::Printf("Fail %d", __LINE__); ; retVal = __LINE__;} }
292 TInt DDemandPagingTestChannel::DoConsumeContiguousRamTest(TInt aAlign, TInt aSize)
294 TInt retVal = KErrNone;
295 TInt initialFreeRam = FreeRam();
296 TInt totalBlocks = initialFreeRam/aSize;
298 NKern::ThreadEnterCS();
299 TPhysAddr* pAddrArray = (TPhysAddr *)Kern::Alloc(sizeof(TPhysAddr) * totalBlocks);
300 NKern::ThreadLeaveCS();
305 SVMCacheInfo tempPages;
307 // get the initial free ram again as the heap may have grabbed a page during the alloc
308 initialFreeRam = FreeRam();
309 Kern::Printf("ConsumeContiguousRamTest: align %d size %d initialFreeRam %d", aAlign, aSize, initialFreeRam);
311 CHECK(Kern::HalFunction(EHalGroupVM,EVMHalGetCacheSize,&tempPages,0) == KErrNone);
312 Kern::Printf("Start cache info: iMinSize %d iMaxSize %d iCurrentSize %d iMaxFreeSize %d",
313 tempPages.iMinSize, tempPages.iMaxSize, tempPages.iCurrentSize ,tempPages.iMaxFreeSize);
315 TInt initialFreePages = tempPages.iMaxFreeSize;
316 CHECK(initialFreePages != 0);
318 // allocate blocks to use up RAM until we fail to allocate any further...
319 TBool freedPagesToAlloc = EFalse;
321 TUint32 alignMask = (1 << aAlign) - 1;
322 for (index = 0; index < totalBlocks; )
324 CHECK(Kern::HalFunction(EHalGroupVM,EVMHalGetCacheSize,&tempPages,0) == KErrNone);
325 TInt beforePages = tempPages.iMaxFreeSize;
327 NKern::ThreadEnterCS();
328 TInt r = Epoc::AllocPhysicalRam(aSize, pAddrArray[index], aAlign);
331 // check the alignment of the returned pages
332 CHECK((pAddrArray[index] & alignMask) == 0);
335 NKern::ThreadLeaveCS();
340 CHECK(Kern::HalFunction(EHalGroupVM,EVMHalGetCacheSize,&tempPages,0) == KErrNone);
341 TInt afterPages = tempPages.iMaxFreeSize;
343 if (afterPages != beforePages)
344 freedPagesToAlloc = ETrue; // the alloc reclaimed memory from the paging cache
348 Kern::Printf("WARNING : DoConsumeContiguousRamTest no allocations were successful");
349 // free the memory we allocated...
352 NKern::ThreadEnterCS();
353 TInt r = Epoc::FreePhysicalRam(pAddrArray[index], aSize);
354 NKern::ThreadLeaveCS();
358 CHECK(FreeRam() == initialFreeRam);
360 NKern::ThreadEnterCS();
361 Kern::Free(pAddrArray);
362 NKern::ThreadLeaveCS();
364 CHECK(Kern::HalFunction(EHalGroupVM,EVMHalGetCacheSize,&tempPages,0) == KErrNone);
365 Kern::Printf("End cache info: iMinSize %d iMaxSize %d iCurrentSize %d iMaxFreeSize %d",
366 tempPages.iMinSize, tempPages.iMaxSize, tempPages.iCurrentSize ,tempPages.iMaxFreeSize);
368 if (!freedPagesToAlloc)
369 Kern::Printf("WARNING : DoConsumeContiguousRamTest freedPagesToAlloc was eFalse");
370 //CHECK(freedPagesToAlloc);
377 TUint8 ReadByte(volatile TUint8* aPtr)
382 #define CHECK(c) { if(!(c)) return __LINE__; }
384 #define READ(a) ReadByte((volatile TUint8*)(a))
386 TInt DDemandPagingTestChannel::LockTest(const TAny* aBuffer, TInt aSize)
388 // Get page size info
390 CHECK(Kern::HalFunction(EHalGroupKernel,EKernelHalPageSizeInBytes,&pageSize,0)==KErrNone);
391 TInt pageMask = pageSize-1;
393 // See if were running of the Flexible Memory Model
394 TUint32 memModelAttrib = (TUint32)Kern::HalFunction(EHalGroupKernel,EKernelHalMemModelInfo,0,0);
395 TBool fmm = (memModelAttrib&EMemModelTypeMask)==EMemModelTypeFlexible;
397 // Round buffer to page boundaries
398 TLinAddr start = ((TLinAddr)aBuffer+pageMask)&~pageMask;
399 TLinAddr end = ((TLinAddr)aBuffer+aSize)&~pageMask;
401 Kern::Printf("Test buffer is %08x, %x\n",start,aSize);
402 CHECK(aSize>pageSize*2);
404 // Flush all paged memory
405 Kern::HalFunction(EHalGroupVM,EVMHalFlushCache,0,0);
411 TUint lockBytesUsed = fmm ? 0 : 0; // free ram change on locking (zero or aSize depending on implementation)
413 { // this brace is essential for correctness
414 DDemandPagingLock lock2; // construct a lock;
416 Kern::Printf("Check reading from buffer pages it in\n");
417 for(addr=start; addr<end; addr+=pageSize) READ(addr);
418 for(addr=start; addr<end; addr+=pageSize) CHECK(CheckPagedIn(addr));
419 initialFreeRam = FreeRam();
421 Kern::Printf("Check Alloc reserves pages\n");
422 CHECK(iLock.Alloc(aSize)==KErrNone);
423 freeRam1 = FreeRam();
425 Kern::Printf("Check flushing pages out the buffer\n");
426 Kern::HalFunction(EHalGroupVM,EVMHalFlushCache,0,0);
427 for(addr=start; addr<end; addr+=pageSize) CHECK(CheckPagedOut(addr));
429 Kern::Printf("Check Lock\n");
430 CHECK(iLock.Lock(&Kern::CurrentThread(),start,aSize));
431 CHECK((TUint)FreeRam()==TUint(freeRam1-lockBytesUsed));
432 for(addr=start; addr<end; addr+=pageSize) CHECK(CheckLocked(addr));
434 Kern::Printf("Check flushing doesn't page out the buffer\n");
435 Kern::HalFunction(EHalGroupVM,EVMHalFlushCache,0,0);
436 for(addr=start; addr<end; addr+=pageSize) CHECK(CheckLocked(addr));
437 CHECK((TUint)FreeRam()==TUint(freeRam1-lockBytesUsed));
439 Kern::Printf("Check second Alloc\n");
440 CHECK(lock2.Alloc(aSize)==KErrNone);
441 freeRam2 = FreeRam();
443 Kern::Printf("Check second Lock\n");
444 CHECK(lock2.Lock(&Kern::CurrentThread(),start,aSize));
445 CHECK(FreeRam()==freeRam2);
446 for(addr=start; addr<end; addr+=pageSize) CHECK(CheckLocked(addr));
448 Kern::Printf("Check deleting second lock\n");
449 // lock2 is deleted here because it goes out of scope...
450 } // this brace is essential for correctness
451 CHECK((TUint)FreeRam()==TUint(freeRam1-lockBytesUsed));
452 for(addr=start; addr<end; addr+=pageSize) CHECK(CheckLocked(addr));
454 Kern::Printf("Check Unlock\n");
456 CHECK(FreeRam()==freeRam1);
457 for(addr=start; addr<end; addr+=pageSize) CHECK(CheckPagedIn(addr));
459 CHECK(FreeRam()==initialFreeRam);
461 Kern::Printf("Check Free\n");
463 CHECK(FreeRam()==initialFreeRam);
465 CHECK(FreeRam()==initialFreeRam);
471 #define CHECK(c) { if(!(c)) { r = __LINE__; goto cleanup; } }
473 TInt DDemandPagingTestChannel::LockTest2()
475 Kern::Printf("Check allocating locks eventually increases size of live list\n");
478 DDemandPagingLock* lock = NULL;
479 RPointerArray<DDemandPagingLock> lockArray;
481 const TInt KLockMax = 1000; // make this a bit bigger than current min page count?
484 NKern::ThreadEnterCS();
485 for (i = 0 ; i < KLockMax ; ++i)
487 lock = new DDemandPagingLock;
489 CHECK(lockArray.Append(lock) == KErrNone);
492 TInt initialFreeRam = FreeRam();
493 CHECK(lockArray[i]->Alloc(1) == KErrNone);
494 if (FreeRam() < initialFreeRam)
496 Kern::Printf("Live list size increased after %d locks allocated", i + 1);
507 for (i = 0 ; i < lockArray.Count() ; ++i)
514 NKern::ThreadLeaveCS();
519 TInt DDemandPagingTestChannel::FreeRam()
521 Kern::HalFunction(EHalGroupKernel, EKernelHalSupervisorBarrier, 0, 0);
522 TInt freeRam = Kern::FreeRamInBytes();
523 Kern::Printf("...free RAM: %x\n",freeRam);
528 TUint32 PageState(TLinAddr aAddress)
530 TUint32 state = Kern::HalFunction(EHalGroupVM, EVMPageState, (TAny*)aAddress, 0);
531 Kern::Printf("PageState: %08x=%08x",aAddress,state);
536 TBool DDemandPagingTestChannel::CheckPagedIn(TLinAddr aAddress)
538 TUint32 state = PageState(aAddress);
539 return (state&0xff00) == (EStatePagedYoung<<8);
543 TBool DDemandPagingTestChannel::CheckPagedOut(TLinAddr aAddress)
545 TUint32 state = PageState(aAddress);
546 return (state&0xffff) == 0;
550 TInt DDemandPagingTestChannel::CheckLocked(TLinAddr aAddress)
552 TUint32 state = PageState(aAddress);
553 return (state&0xff00) == (EStatePagedLocked<<8);
557 TInt DDemandPagingTestChannel::ReadHoldingMutexTest(TAny* aDest)
559 _LIT(KMutexName, "DPTestMutex");
561 NKern::ThreadEnterCS();
564 TInt r = Kern::MutexCreate(mutex, KMutexName, KMutexOrdDebug); // Mutex order < demand paging
567 NKern::ThreadLeaveCS();
570 Kern::MutexWait(*mutex);
572 const TRomHeader& romHeader = Epoc::RomHeader();
573 TLinAddr unpagedRomStart = (TLinAddr)&romHeader;
574 TLinAddr unpagedRomEnd;
575 if (romHeader.iPageableRomStart)
576 unpagedRomEnd = unpagedRomStart + romHeader.iPageableRomStart;
578 unpagedRomEnd = unpagedRomStart + romHeader.iUncompressedSize;
580 const TInt length = 16;
581 TUint8 localBuf[length];
584 Kern::Printf("Local buffer at %08x", aDest);
586 TAny* src1 = (TAny*)unpagedRomStart;
587 TAny* src2 = (TAny*)(unpagedRomEnd - length);
589 DThread* thread = &Kern::CurrentThread();
591 Kern::Printf("Attempting to access %08x", src1);
592 Kern::ThreadRawWrite(thread, aDest, src1, length);
593 Kern::Printf("Attempting to access %08x", src2);
594 Kern::ThreadRawWrite(thread, aDest, src2, length);
596 TUint8 stackData[length];
597 Kern::Printf("Attempting to access %08x", stackData);
598 Kern::ThreadRawWrite(thread, aDest, stackData, length);
600 TAny* heapData = Kern::Alloc(length);
603 Kern::Printf("Attempting to access %08x", heapData);
604 Kern::ThreadRawWrite(thread, aDest, heapData, length);
605 Kern::Free(heapData);
610 Kern::MutexSignal(*mutex);
613 NKern::ThreadLeaveCS();
615 return r; // a kernel fault indicates that the test failed