1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/os/kernelhwsrv/kerneltest/e32test/mmu/t_cachechunk.cpp Fri Jun 15 03:10:57 2012 +0200
1.3 @@ -0,0 +1,578 @@
1.4 +// Copyright (c) 1995-2009 Nokia Corporation and/or its subsidiary(-ies).
1.5 +// All rights reserved.
1.6 +// This component and the accompanying materials are made available
1.7 +// under the terms of the License "Eclipse Public License v1.0"
1.8 +// which accompanies this distribution, and is available
1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
1.10 +//
1.11 +// Initial Contributors:
1.12 +// Nokia Corporation - initial contribution.
1.13 +//
1.14 +// Contributors:
1.15 +//
1.16 +// Description:
1.17 +// e32test\mmu\t_chunk.cpp
1.18 +// Tests on RChunk objects in connection with demand paging.
1.19 +// Tests exercise the locking, unlocking, commiting and decommiting of
1.20 +// pages to chunk objects.
1.21 +// 1 Check Unlocked page gets counted as free memory
1.22 +// 2 Check Unlock/Lock preserves page contents
1.23 +// 3 Tests at chunk offset '0'
1.24 +// 4 Check Lock is idempotent
1.25 +// 5 Check non page aligned Unlock/Lock
1.26 +// 6 Check unlocked pages get reclaimed for new memory allocation
1.27 +// 7 Check reclaimed memory is unmapped from original location
1.28 +// 8 Check Lock fails when memory is reclaimed
1.29 +// 9 Check Lock failure Decommits memory
1.30 +// 10 Recommit memory to chunk
1.31 +// 11 Check Commit on unlocked pages
1.32 +// 12 Check Commit on unlocked and reclaimed pages
1.33 +// 13 Restore chunk
1.34 +// 14 Tests at chunk offset 'PageSize'
1.35 +// 15 Check Lock is idempotent
1.36 +// 16 Check non page aligned Unlock/Lock
1.37 +// 17 Check unlocked pages get reclaimed for new memory allocation
1.38 +// 18 Check reclaimed memory is unmapped from original location
1.39 +// 19 Check Lock fails when memory is reclaimed
1.40 +// 20 Check Lock failure Decommits memory
1.41 +// 21 Recommit memory to chunk
1.42 +// 22 Check Commit on unlocked pages
1.43 +// 23 Check Commit on unlocked and reclaimed pages
1.44 +// 24 Restore chunk
1.45 +// 25 Tests at chunk offset '0x100000-PageSize'
1.46 +// 26 Check Lock is idempotent
1.47 +// 27 Check non page aligned Unlock/Lock
1.48 +// 28 Check unlocked pages get reclaimed for new memory allocation
1.49 +// 29 Check reclaimed memory is unmapped from original location
1.50 +// 30 Check Lock fails when memory is reclaimed
1.51 +// 31 Check Lock failure Decommits memory
1.52 +// 32 Recommit memory to chunk
1.53 +// 33 Check Commit on unlocked pages
1.54 +// 34 Check Commit on unlocked and reclaimed pages
1.55 +// 35 Restore chunk
1.56 +// 36 Tests at chunk offset '0x400000-PageSize'
1.57 +// 37 Check Lock is idempotent
1.58 +// 38 Check non page aligned Unlock/Lock
1.59 +// 39 Check unlocked pages get reclaimed for new memory allocation
1.60 +// 40 Check reclaimed memory is unmapped from original location
1.61 +// 41 Check Lock fails when memory is reclaimed
1.62 +// 42 Check Lock failure Decommits memory
1.63 +// 43 Recommit memory to chunk
1.64 +// 44 Check Commit on unlocked pages
1.65 +// 45 Check Commit on unlocked and reclaimed pages
1.66 +// 46 Restore chunk
1.67 +// 47 Big Unlock/Lock
1.68 +// 48 Benchmarks...
1.69 +// 49 Close chunk with unlocked pages which have been flushed
1.70 +//
1.71 +//
1.72 +
1.73 +//! @SYMTestCaseID KBASE-T_CACHECHUNK-0336
1.74 +//! @SYMTestType UT
1.75 +//! @SYMPREQ PREQ1110
1.76 +//! @SYMTestCaseDesc Demand Paging Loader Stress Tests
1.77 +//! @SYMTestActions 0 Commit all of memory
1.78 +//! @SYMTestExpectedResults All tests should pass.
1.79 +//! @SYMTestPriority High
1.80 +//! @SYMTestStatus Implemented
1.81 +
1.82 +#define __E32TEST_EXTENSION__
1.83 +
1.84 +#include <e32test.h>
1.85 +#include <e32panic.h>
1.86 +#include <e32svr.h>
1.87 +#include <hal.h>
1.88 +#include "mmudetect.h"
1.89 +#include "d_memorytest.h"
1.90 +#include "d_gobble.h"
1.91 +#include <dptest.h>
1.92 +#include "freeram.h"
1.93 +
1.94 +LOCAL_D RTest test(_L("T_CACHECHUNK"));
1.95 +
1.96 +RMemoryTestLdd MemoryTest;
1.97 +
1.98 +RChunk TestChunk;
1.99 +TUint8* TestChunkBase;
1.100 +TInt CommitEnd;
1.101 +TInt PageSize;
1.102 +TInt NoFreeRam;
1.103 +RTimer Timer;
1.104 +
1.105 +
1.106 +
1.107 +void FillPage(TUint aOffset)
1.108 + {
1.109 + TUint8* ptr = TestChunkBase+aOffset;
1.110 + TUint8* ptrEnd = ptr+PageSize;
1.111 + do *((TUint32*&)ptr)++ = aOffset+=4;
1.112 + while(ptr<ptrEnd);
1.113 + }
1.114 +
1.115 +
1.116 +TBool CheckPage(TUint aOffset)
1.117 + {
1.118 + TUint8* ptr = TestChunkBase+aOffset;
1.119 + TUint8* ptrEnd = ptr+PageSize;
1.120 + do if(*((TUint32*&)ptr)++ != (aOffset+=4)) break;
1.121 + while(ptr<ptrEnd);
1.122 + return ptr==ptrEnd;
1.123 + }
1.124 +
1.125 +
1.126 +TBool CheckPages(TUint aOffset, TInt aNumPages)
1.127 + {
1.128 + while(aNumPages--)
1.129 + if(!CheckPage(aOffset+=PageSize))
1.130 + return EFalse;
1.131 + return ETrue;
1.132 + }
1.133 +
1.134 +
1.135 +TBool IsPageMapped(TUint aOffset)
1.136 + {
1.137 + TUint32 value;
1.138 + TInt r=MemoryTest.ReadMemory(TestChunkBase+aOffset,value);
1.139 + return r==KErrNone;
1.140 + }
1.141 +
1.142 +
1.143 +void Tests(TInt aOffset)
1.144 + {
1.145 + if(aOffset+5*PageSize>=CommitEnd)
1.146 + {
1.147 + test.Start(_L("TEST NOT RUN - Not enough system RAM"));
1.148 + test.End();
1.149 + return;
1.150 + }
1.151 + TInt r;
1.152 + TInt freeRam;
1.153 +
1.154 + TUint origChunkSize = TestChunk.Size();
1.155 +
1.156 + test.Start(_L("Check Unlock is idempotent"));
1.157 + r = TestChunk.Unlock(aOffset+PageSize,PageSize);
1.158 + test_KErrNone(r);
1.159 + freeRam = FreeRam();
1.160 + test(freeRam==NoFreeRam+PageSize);
1.161 + r = TestChunk.Unlock(aOffset+PageSize,PageSize);
1.162 + test_KErrNone(r);
1.163 + test_Equal(FreeRam(), freeRam);
1.164 + // Ensure unlock on reclaimed pages is idempotent
1.165 + TInt flushSupported = UserSvr::HalFunction(EHalGroupVM,EVMHalFlushCache,0,0);
1.166 + r = TestChunk.Unlock(aOffset+PageSize,PageSize);
1.167 + test_KErrNone(r);
1.168 + test_Equal(FreeRam(), freeRam);
1.169 + test_Equal(origChunkSize, TestChunk.Size());
1.170 +
1.171 + if (flushSupported == KErrNotSupported)
1.172 + {// Flush cache not supported so lock won't fail so no need to recommit the pages.
1.173 + test_KErrNone(TestChunk.Lock(aOffset+PageSize,PageSize));
1.174 + }
1.175 + else
1.176 + {// Recommit the reclaimed pages.
1.177 + test_KErrNone(flushSupported);
1.178 + test_Equal(KErrNotFound, TestChunk.Lock(aOffset+PageSize,PageSize));
1.179 + test_KErrNone(TestChunk.Commit(aOffset+PageSize,PageSize));
1.180 + }
1.181 +
1.182 + test.Next(_L("Check Lock is idempotent"));
1.183 + r = TestChunk.Lock(aOffset,3*PageSize);
1.184 + test_KErrNone(r);
1.185 + freeRam = FreeRam();
1.186 + test(freeRam==NoFreeRam);
1.187 + CheckPages(aOffset,3);
1.188 + r = TestChunk.Lock(aOffset,3*PageSize);
1.189 + test_KErrNone(r);
1.190 + CheckPages(aOffset,3);
1.191 + freeRam = FreeRam();
1.192 + test(freeRam==NoFreeRam);
1.193 + test_Equal(origChunkSize, TestChunk.Size());
1.194 +
1.195 + test.Next(_L("Check non page aligned Unlock/Lock"));
1.196 + r = TestChunk.Unlock(aOffset+PageSize-1,1);
1.197 + test_KErrNone(r);
1.198 + freeRam = FreeRam();
1.199 + test(freeRam==NoFreeRam+PageSize);
1.200 + r = TestChunk.Lock(aOffset+PageSize-1,1);
1.201 + test_KErrNone(r);
1.202 + freeRam = FreeRam();
1.203 + test(freeRam==NoFreeRam);
1.204 + r = TestChunk.Unlock(aOffset+PageSize-1,2);
1.205 + test_KErrNone(r);
1.206 + freeRam = FreeRam();
1.207 + test(freeRam==NoFreeRam+PageSize*2);
1.208 + r = TestChunk.Lock(aOffset+PageSize-1,2);
1.209 + test_KErrNone(r);
1.210 + freeRam = FreeRam();
1.211 + test(freeRam==NoFreeRam);
1.212 + test_Equal(origChunkSize, TestChunk.Size());
1.213 +
1.214 + test.Next(_L("Check unlocked pages get reclaimed for new memory allocation"));
1.215 + r=TestChunk.Commit(CommitEnd,PageSize);
1.216 + test(r==KErrNoMemory);
1.217 + r = TestChunk.Unlock(aOffset,4*PageSize);
1.218 + test_KErrNone(r);
1.219 + freeRam = FreeRam();
1.220 + test(freeRam==NoFreeRam+PageSize*4);
1.221 + r=TestChunk.Commit(CommitEnd,PageSize);
1.222 + test_KErrNone(r);
1.223 + freeRam = FreeRam();
1.224 + test(freeRam<NoFreeRam+PageSize*4);
1.225 + r=TestChunk.Decommit(CommitEnd,PageSize);
1.226 + test_KErrNone(r);
1.227 + freeRam = FreeRam();
1.228 + test(freeRam==NoFreeRam+PageSize*4);
1.229 + UserSvr::HalFunction(EHalGroupVM,EVMHalFlushCache,0,0); // make sure unlocked page is gone
1.230 + freeRam = FreeRam();
1.231 + test(freeRam==NoFreeRam+PageSize*4);
1.232 +
1.233 +#ifndef __WINS__ // wins fakery doesn't actually do this
1.234 + test.Next(_L("Check reclaimed memory is unmapped and decommitted from original location"));
1.235 + TInt mappedPages = IsPageMapped(aOffset+PageSize*0);
1.236 + mappedPages += IsPageMapped(aOffset+PageSize*1);
1.237 + mappedPages += IsPageMapped(aOffset+PageSize*2);
1.238 + mappedPages += IsPageMapped(aOffset+PageSize*3);
1.239 + test(mappedPages<4);
1.240 +#endif
1.241 + if(aOffset>PageSize)
1.242 + {
1.243 + test(CheckPage(aOffset+PageSize*-1)); // should be left mapped
1.244 + }
1.245 + test(CheckPage(aOffset+PageSize*4)); // should be left mapped
1.246 +
1.247 + test.Next(_L("Check Lock fails when memory is reclaimed"));
1.248 + r = TestChunk.Lock(aOffset,4*PageSize);
1.249 + test(r==KErrNotFound);
1.250 + freeRam = FreeRam();
1.251 + test(freeRam==NoFreeRam+PageSize*4);
1.252 +
1.253 + test.Next(_L("Check Lock failure Decommits memory"));
1.254 + test(!IsPageMapped(aOffset+PageSize*0));
1.255 + test(!IsPageMapped(aOffset+PageSize*1));
1.256 + test(!IsPageMapped(aOffset+PageSize*2));
1.257 + test(!IsPageMapped(aOffset+PageSize*3));
1.258 + test_Equal(origChunkSize-PageSize*4, TestChunk.Size());
1.259 +
1.260 + test.Next(_L("Recommit memory to chunk"));
1.261 + TInt offset;
1.262 + for(offset=aOffset; offset<aOffset+PageSize*4; offset+=PageSize)
1.263 + {
1.264 + r=TestChunk.Commit(offset,PageSize);
1.265 + test_KErrNone(r);
1.266 + FillPage(offset);
1.267 + }
1.268 + freeRam = FreeRam();
1.269 + test(freeRam==NoFreeRam);
1.270 + test_Equal(origChunkSize, TestChunk.Size());
1.271 +
1.272 + test.Next(_L("Check Commit on unlocked pages"));
1.273 + r = TestChunk.Unlock(aOffset,4*PageSize);
1.274 + test_KErrNone(r);
1.275 + freeRam = FreeRam();
1.276 + test(freeRam>=NoFreeRam+PageSize*4);
1.277 + r=TestChunk.Commit(aOffset,4*PageSize);
1.278 + test(r==KErrAlreadyExists);
1.279 + freeRam = FreeRam();
1.280 + test(freeRam>=NoFreeRam+PageSize*4);
1.281 + test_Equal(origChunkSize, TestChunk.Size());
1.282 +
1.283 + test.Next(_L("Check Commit on unlocked and reclaimed pages"));
1.284 + // unlock and force a page to be reclaimed...
1.285 + r=TestChunk.Commit(CommitEnd,PageSize);
1.286 + test_KErrNone(r);
1.287 + r=TestChunk.Decommit(CommitEnd,PageSize);
1.288 + test_KErrNone(r);
1.289 + UserSvr::HalFunction(EHalGroupVM,EVMHalFlushCache,0,0); // make sure unlocked page is gone
1.290 + freeRam = FreeRam();
1.291 + test(freeRam>=NoFreeRam+PageSize*4);
1.292 + // check can't commit any pages (because they are unlocked, not decommitted)...
1.293 + r=TestChunk.Commit(aOffset+PageSize*0,PageSize);
1.294 + test(r==KErrAlreadyExists);
1.295 + r=TestChunk.Commit(aOffset+PageSize*1,PageSize);
1.296 + test(r==KErrAlreadyExists);
1.297 + r=TestChunk.Commit(aOffset+PageSize*2,PageSize);
1.298 + test(r==KErrAlreadyExists);
1.299 + r=TestChunk.Commit(aOffset+PageSize*3,PageSize);
1.300 + test(r==KErrAlreadyExists);
1.301 + freeRam = FreeRam();
1.302 + test(freeRam>=NoFreeRam+PageSize*4);
1.303 + test_Equal(origChunkSize, TestChunk.Size());
1.304 + // Restore the chunk to original size.
1.305 + r = TestChunk.Lock(aOffset,4*PageSize);
1.306 + test_Equal(r, KErrNotFound);
1.307 + freeRam = FreeRam();
1.308 + test_Compare(freeRam, >=, NoFreeRam+PageSize*4);
1.309 + test_Equal(origChunkSize - PageSize*4, TestChunk.Size());
1.310 + r = TestChunk.Commit(aOffset, PageSize*4);
1.311 + test_KErrNone(r);
1.312 +
1.313 + test.Next(_L("Check Decommit on unlocked pages"));
1.314 + r = TestChunk.Unlock(aOffset,PageSize*4);
1.315 + test_KErrNone(r);
1.316 + test(FreeRam() >= NoFreeRam+PageSize*4);
1.317 + r=TestChunk.Decommit(aOffset, PageSize*4);
1.318 + test_KErrNone(r);
1.319 + freeRam = FreeRam();
1.320 + test_Compare(freeRam, >=, NoFreeRam+PageSize*4);
1.321 + test_Equal(origChunkSize - PageSize*4, TestChunk.Size());
1.322 + // Restore chunk back to original state
1.323 + r = TestChunk.Commit(aOffset, PageSize*4);
1.324 + test_KErrNone(r);
1.325 + test(FreeRam() == NoFreeRam);
1.326 +
1.327 + test.Next(_L("Check Decommit on unlocked and reclaimed pages"));
1.328 + r = TestChunk.Unlock(aOffset,PageSize*4);
1.329 + test_KErrNone(r);
1.330 + freeRam = FreeRam();
1.331 + test_Compare(freeRam, >=, NoFreeRam+PageSize*4);
1.332 + r=TestChunk.Commit(CommitEnd,PageSize);
1.333 + test_KErrNone(r);
1.334 + r=TestChunk.Decommit(CommitEnd,PageSize);
1.335 + test_KErrNone(r);
1.336 + UserSvr::HalFunction(EHalGroupVM,EVMHalFlushCache,0,0); // make sure unlocked page is gone
1.337 + freeRam = FreeRam();
1.338 + test_Compare(freeRam, >=, NoFreeRam+PageSize*4);
1.339 + r=TestChunk.Decommit(aOffset, PageSize*4);
1.340 + test_KErrNone(r);
1.341 + freeRam = FreeRam();
1.342 + test_Compare(freeRam, >=, NoFreeRam+PageSize*4);
1.343 + test_Equal(origChunkSize - PageSize*4, TestChunk.Size());
1.344 +
1.345 + test.Next(_L("Restore chunk"));
1.346 + test_Equal(origChunkSize-PageSize*4, TestChunk.Size());
1.347 + for(offset=aOffset; offset<aOffset+PageSize*4; offset+=PageSize)
1.348 + {
1.349 + r=TestChunk.Commit(offset,PageSize);
1.350 + test_KErrNone(r);
1.351 + FillPage(offset);
1.352 + }
1.353 + freeRam = FreeRam();
1.354 + test(freeRam==NoFreeRam);
1.355 + test_Equal(origChunkSize, TestChunk.Size());
1.356 +
1.357 + test.End();
1.358 + }
1.359 +
1.360 +
1.361 +
1.362 +void TestBenchmarks()
1.363 + {
1.364 + TInt r = TestChunk.Unlock(0,CommitEnd); // start with everthing unlocked
1.365 + test_KErrNone(r);
1.366 + TInt testSizes[] = { PageSize,1<<16,1<<20,0 };
1.367 + TInt* sizes = testSizes;
1.368 + TInt size;
1.369 + while((size=*sizes++)!=0)
1.370 + {
1.371 + TRequestStatus status;
1.372 + Timer.After(status,1);
1.373 + User::WaitForRequest(status);
1.374 + TInt KRunTime = 1*1000*1000;
1.375 + Timer.After(status,KRunTime);
1.376 + TInt count = 0;
1.377 + while(status==KRequestPending)
1.378 + {
1.379 + r = TestChunk.Lock(0,size);
1.380 + test_KErrNone(r);
1.381 + r = TestChunk.Unlock(0,size);
1.382 + test_KErrNone(r);
1.383 + ++count;
1.384 + }
1.385 + User::WaitForRequest(status);
1.386 + test.Printf(_L("Unlock/Lock of %d kB takes %d us\n"),size>>10,KRunTime/count);
1.387 + }
1.388 + }
1.389 +
1.390 +
1.391 +
1.392 +void TestUnlockOld()
1.393 + {
1.394 + // we start with TestChunk being locked and no or little free RAM
1.395 + // (hence live list should be close to minimum size.)
1.396 +
1.397 + // get sizes...
1.398 + TUint min = 0;
1.399 + TUint max = 0;
1.400 + TUint cur = 0;
1.401 + TInt r = DPTest::CacheSize(min,max,cur);
1.402 +
1.403 + // manipulate demand paging live list so we end up with zero old pages...
1.404 +
1.405 + r = TestChunk.Unlock(0,min*2); // dump 2*min bytes at start of live list
1.406 + test_KErrNone(r);
1.407 + // live list now cur+2*min bytes
1.408 +
1.409 + r = TestChunk.Commit(CommitEnd,cur); // use up 'cur' bytes of RAM from end of live list
1.410 + test_KErrNone(r);
1.411 + // live list now 2*min bytes of pages which were unlocked from our test chunk
1.412 +
1.413 + // lock pages until free RAM is <= 2 pages.
1.414 + // this should remove all of the 'old' pages
1.415 + TUint i = 0;
1.416 + while(FreeRam()>2*PageSize)
1.417 + {
1.418 + TestChunk.Lock(i,PageSize);
1.419 + i += PageSize;
1.420 + test(i<=min);
1.421 + }
1.422 + // live list now min+2*PageSize bytes, with no old pages
1.423 +
1.424 + // now commit memory, which forces allocation from the demand paging live list
1.425 + // which doesn't have any old pages (the purpose of this test)...
1.426 + TUint extra = 0;
1.427 + for(;;)
1.428 + {
1.429 + r = TestChunk.Commit(CommitEnd+min+extra,PageSize);
1.430 + if(r==KErrNoMemory)
1.431 + break;
1.432 + extra += PageSize;
1.433 + }
1.434 + test(extra>0);
1.435 +
1.436 + // restore commit state...
1.437 + r = TestChunk.Decommit(CommitEnd,min+extra);
1.438 + test_KErrNone(r);
1.439 + r = TestChunk.Decommit(0,min*2);
1.440 + test_KErrNone(r);
1.441 + r = TestChunk.Commit(0,min*2);
1.442 + test_KErrNone(r);
1.443 + }
1.444 +
1.445 +
1.446 +
1.447 +TInt E32Main()
1.448 + {
1.449 + test.Title();
1.450 +
1.451 + if (!HaveVirtMem())
1.452 + {
1.453 + test.Printf(_L("This test requires an MMU\n"));
1.454 + return KErrNone;
1.455 + }
1.456 + test.Start(_L("Initialise test"));
1.457 + test.Next(_L("Load gobbler LDD"));
1.458 + TInt r = User::LoadLogicalDevice(KGobblerLddFileName);
1.459 + test(r==KErrNone || r==KErrAlreadyExists);
1.460 + RGobbler gobbler;
1.461 + r = gobbler.Open();
1.462 + test(r==KErrNone);
1.463 + TUint32 taken = gobbler.GobbleRAM(496*1024*1024);
1.464 + test.Printf(_L("Gobbled: %dK\n"), taken/1024);
1.465 + test.Printf(_L("Free RAM 0x%08X bytes\n"),FreeRam());
1.466 +
1.467 + test_KErrNone(HAL::Get(HAL::EMemoryPageSize,PageSize));
1.468 + TInt totalRAM;
1.469 + test_KErrNone(HAL::Get(HAL::EMemoryRAM, totalRAM));
1.470 + totalRAM -= taken;
1.471 + test.Printf(_L("totalRAM=%dK\n"), totalRAM/1024);
1.472 +
1.473 + test(KErrNone==MemoryTest.Open());
1.474 + // Create the test chunk. It must not be paged otherwise
1.475 + // unlocking its pages will have no effect.
1.476 + TChunkCreateInfo createInfo;
1.477 + createInfo.SetCache(totalRAM);
1.478 + test_KErrNone(TestChunk.Create(createInfo));
1.479 + TestChunkBase = TestChunk.Base();
1.480 +
1.481 + test(KErrNone==Timer.CreateLocal());
1.482 + UserSvr::HalFunction(EHalGroupVM,EVMHalFlushCache,0,0);
1.483 +
1.484 + test.Next(_L("Commit all of memory"));
1.485 + CommitEnd = 0;
1.486 + while(KErrNone==(r=TestChunk.Commit(CommitEnd,PageSize)))
1.487 + {
1.488 + FillPage(CommitEnd);
1.489 + CommitEnd += PageSize;
1.490 + }
1.491 + test(r==KErrNoMemory);
1.492 + NoFreeRam = FreeRam();
1.493 + test(NoFreeRam<=PageSize);
1.494 +
1.495 + test.Next(_L("Check Unlocked page gets counted as free memory"));
1.496 + r = TestChunk.Unlock(0,PageSize);
1.497 + test_KErrNone(r);
1.498 + TInt freeRam = FreeRam();
1.499 + test(freeRam==NoFreeRam+PageSize);
1.500 + r = TestChunk.Lock(0,PageSize);
1.501 + test_KErrNone(r);
1.502 + freeRam = FreeRam();
1.503 + test(freeRam==NoFreeRam);
1.504 +
1.505 + test.Next(_L("Check Unlock/Lock preserves page contents"));
1.506 + TInt offset;
1.507 + for(offset=0; offset<CommitEnd; offset+=PageSize)
1.508 + {
1.509 + test(CheckPage(offset));
1.510 + r = TestChunk.Unlock(offset,PageSize);
1.511 + test_KErrNone(r);
1.512 + r = TestChunk.Lock(offset,PageSize);
1.513 + test_KErrNone(r);
1.514 + test(CheckPage(offset));
1.515 + freeRam = FreeRam();
1.516 + test(freeRam==NoFreeRam);
1.517 + }
1.518 +
1.519 + test.Next(_L("Tests at chunk offset '0'"));
1.520 + Tests(0);
1.521 + test.Next(_L("Tests at chunk offset 'PageSize'"));
1.522 + Tests(PageSize);
1.523 + test.Next(_L("Tests at chunk offset '0x100000-PageSize'"));
1.524 + Tests(0x100000-PageSize);
1.525 + test.Next(_L("Tests at chunk offset '0x400000-PageSize'"));
1.526 + Tests(0x400000-PageSize);
1.527 +
1.528 + // Remove limit on max size of live list
1.529 + TUint originalMin = 0;
1.530 + TUint originalMax = 0;
1.531 + TUint currentSize = 0;
1.532 + r = DPTest::CacheSize(originalMin, originalMax, currentSize);
1.533 + test(r == KErrNone || r == KErrNotSupported);
1.534 + TBool resizeCache = r == KErrNone;
1.535 + if (resizeCache)
1.536 + test_KErrNone(DPTest::SetCacheSize(originalMin, KMaxTUint));
1.537 +
1.538 + test.Next(_L("Big Unlock/Lock"));
1.539 + r = TestChunk.Unlock(0,CommitEnd);
1.540 + test_KErrNone(r);
1.541 + freeRam = FreeRam();
1.542 + test(freeRam>=NoFreeRam+CommitEnd);
1.543 + r = TestChunk.Lock(0,CommitEnd);
1.544 + test_KErrNone(r);
1.545 + freeRam = FreeRam();
1.546 + test_Equal(NoFreeRam, freeRam);
1.547 +
1.548 + if (resizeCache)
1.549 + {
1.550 + test.Next(_L("Check Unlock of old pages doesn't cause problems"));
1.551 + TestUnlockOld();
1.552 + }
1.553 +
1.554 + test.Next(_L("Benchmarks..."));
1.555 + TestBenchmarks();
1.556 +
1.557 + test.Next(_L("Close chunk with unlocked pages which have been flushed"));
1.558 + r = TestChunk.Unlock(0,CommitEnd);
1.559 + test_KErrNone(r);
1.560 + UserSvr::HalFunction(EHalGroupVM,EVMHalFlushCache,0,0);
1.561 + TestChunk.Close();
1.562 +
1.563 + test.Next(_L("Check can't lock/unlock non-cache chunks"));
1.564 + RChunk chunk;
1.565 + test_KErrNone(chunk.CreateDisconnectedLocal(0,PageSize,2*PageSize));
1.566 + test_Equal(KErrGeneral,chunk.Lock(PageSize,PageSize));
1.567 + test_Equal(KErrGeneral,chunk.Unlock(0,PageSize));
1.568 + chunk.Close();
1.569 +
1.570 + // Restore original settings for live list size
1.571 + if (resizeCache)
1.572 + test_KErrNone(DPTest::SetCacheSize(originalMin, originalMax));
1.573 +
1.574 + // end...
1.575 + test.End();
1.576 + MemoryTest.Close();
1.577 + gobbler.Close();
1.578 + test.Close();
1.579 +
1.580 + return KErrNone;
1.581 + }