1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/os/kernelhwsrv/kerneltest/e32test/mmu/t_shbuf.cpp Fri Jun 15 03:10:57 2012 +0200
1.3 @@ -0,0 +1,3455 @@
1.4 +// Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies).
1.5 +// All rights reserved.
1.6 +// This component and the accompanying materials are made available
1.7 +// under the terms of the License "Eclipse Public License v1.0"
1.8 +// which accompanies this distribution, and is available
1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
1.10 +//
1.11 +// Initial Contributors:
1.12 +// Nokia Corporation - initial contribution.
1.13 +//
1.14 +// Contributors:
1.15 +//
1.16 +// Description:
1.17 +// e32test/mmu/t_shbuf.cpp
1.18 +//
1.19 +
1.20 +#define __E32TEST_EXTENSION__
1.21 +
1.22 +#include <e32test.h>
1.23 +#include <hal.h>
1.24 +#include <e32svr.h>
1.25 +#include <u32hal.h>
1.26 +#include "d_shbuf.h"
1.27 +#include <e32shbuf.h>
1.28 +#include <e32def.h>
1.29 +#include <e32def_private.h>
1.30 +
1.31 +#ifdef TEST_CLIENT_THREAD
1.32 +RTest test(_L("T_SHBUF_CLIENT"));
1.33 +#else
1.34 +RTest test(_L("T_SHBUF_OWN"));
1.35 +#endif
1.36 +
1.37 +RShPool P1; // User-side pool
1.38 +RShPool P2; // Kernel-side pool
1.39 +
1.40 +const TInt KTestPoolSizeInBytes = 1 << 20; // 1MB
1.41 +const TInt BufferSize[] = {128, 853, 4096, 5051, 131072, 1, 0}; // Last element must be 0
1.42 +
1.43 +const TInt* PtrBufSize;
1.44 +
1.45 +RShBufTestChannel Ldd;
1.46 +
1.47 +_LIT(KTestSlave, "SLAVE");
1.48 +_LIT(KTestLowSpaceSemaphore, "LowSpaceSemaphore");
1.49 +
1.50 +enum TTestSlave
1.51 + {
1.52 + ETestSlaveError,
1.53 + ETestSlaveNoDeallocation,
1.54 + };
1.55 +
1.56 +enum TTestPoolType
1.57 + {
1.58 + ETestNonPageAligned,
1.59 + ETestPageAligned,
1.60 + ETestPageAlignedGrowing,
1.61 + };
1.62 +
1.63 +TInt Log2(TInt aNum)
1.64 + {
1.65 + TInt res = -1;
1.66 + while(aNum)
1.67 + {
1.68 + res++;
1.69 + aNum >>= 1;
1.70 + }
1.71 + return res;
1.72 + }
1.73 +
1.74 +TInt RoundUp(TInt aNum, TInt aAlignmentLog2)
1.75 + {
1.76 + if (aNum % (1 << aAlignmentLog2) == 0)
1.77 + {
1.78 + return aNum;
1.79 + }
1.80 + return (aNum & ~((1 << aAlignmentLog2) - 1)) + (1 << aAlignmentLog2);
1.81 + }
1.82 +
1.83 +void LoadDeviceDrivers()
1.84 + {
1.85 + TInt r;
1.86 + #ifdef TEST_CLIENT_THREAD
1.87 + r= User::LoadLogicalDevice(_L("D_SHBUF_CLIENT.LDD"));
1.88 + if (r != KErrAlreadyExists)
1.89 + {
1.90 + test_KErrNone(r);
1.91 + }
1.92 + #else
1.93 + r = User::LoadLogicalDevice(_L("D_SHBUF_OWN.LDD"));
1.94 + if (r != KErrAlreadyExists)
1.95 + {
1.96 + test_KErrNone(r);
1.97 + }
1.98 + #endif
1.99 + }
1.100 +
1.101 +void FreeDeviceDrivers()
1.102 + {
1.103 + TInt r = User::FreeLogicalDevice(KTestShBufClient);
1.104 + test_KErrNone(r);
1.105 + r = User::FreeLogicalDevice(KTestShBufOwn);
1.106 + test_KErrNone(r);
1.107 + }
1.108 +
1.109 +void FillShBuf(RShBuf& aBuffer, TUint8 aValue)
1.110 + {
1.111 + TUint size = aBuffer.Size();
1.112 + TUint8* base = aBuffer.Ptr();
1.113 + test(size!=0);
1.114 + test(base!=0);
1.115 + memset(base,aValue,size);
1.116 + }
1.117 +
1.118 +TBool CheckFillShBuf(RShBuf& aBuffer, TUint8 aValue)
1.119 + {
1.120 + TUint size = aBuffer.Size();
1.121 + TUint8* base = aBuffer.Ptr();
1.122 + test(size!=0);
1.123 + test(base!=0);
1.124 + TUint8* ptr = base;
1.125 + TUint8* end = ptr+size;
1.126 + while(ptr<end)
1.127 + {
1.128 + TUint8 b = *ptr++;
1.129 + if(b!=aValue)
1.130 + {
1.131 + RDebug::Printf("CheckFillShBuf failed at offset 0x%x, expected 0x%02x but got 0x%02x ",ptr-base-1,aValue,b);
1.132 + return EFalse;
1.133 + }
1.134 + }
1.135 + return ETrue;
1.136 + }
1.137 +
1.138 +TBool CheckNotFillShBuf(RShBuf& aBuffer, TUint8 aValue)
1.139 + {
1.140 + TUint size = aBuffer.Size();
1.141 + TUint8* base = aBuffer.Ptr();
1.142 + test(size!=0);
1.143 + test(base!=0);
1.144 + TUint8* ptr = base;
1.145 + TUint8* end = ptr+size;
1.146 + while(ptr<end)
1.147 + {
1.148 + TUint8 b = *ptr++;
1.149 + if(b==aValue)
1.150 + {
1.151 + RDebug::Printf("CheckNotFillShBuf failed at offset 0x%x, expected not 0x%02x",ptr-base-1,aValue);
1.152 + return EFalse;
1.153 + }
1.154 + }
1.155 + return ETrue;
1.156 + }
1.157 +
1.158 +/*
1.159 +@SYMTestCaseID 1
1.160 +@SYMTestCaseDesc Create pool from user-side
1.161 +@SYMREQ REQ11423
1.162 +@SYMTestActions
1.163 + 1. Test Thread creates a pool (P1) and passes handle to device driver.
1.164 + 2. Device driver opens pool and checks its attributes.
1.165 +@SYMTestExpectedResults
1.166 + All OK.
1.167 +@SYMTestPriority Critical
1.168 +*/
1.169 +
1.170 +void CreateUserPool(TTestPoolType aPoolType)
1.171 + {
1.172 + test.Next(_L("Create user-side pool"));
1.173 + TInt r;
1.174 + TInt pagesize;
1.175 + r = HAL::Get(HAL::EMemoryPageSize, pagesize);
1.176 + test_KErrNone(r);
1.177 +
1.178 + switch (aPoolType)
1.179 + {
1.180 + case ETestNonPageAligned:
1.181 + // Non-page-aligned pool
1.182 + {
1.183 + test.Printf(_L("Non-page-aligned\n"));
1.184 + test_Equal(0, P1.Handle());
1.185 + TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, *PtrBufSize, KTestPoolSizeInBufs, 8);
1.186 + r = P1.Create(inf,KDefaultPoolHandleFlags);
1.187 + test_KErrNone(r);
1.188 +
1.189 + r = P1.SetBufferWindow(-1, ETrue);
1.190 + test_Equal(KErrNotSupported, r);
1.191 +
1.192 + TShPoolInfo poolinfotokernel;
1.193 + poolinfotokernel.iBufSize = *PtrBufSize;
1.194 + poolinfotokernel.iInitialBufs = KTestPoolSizeInBufs;
1.195 + poolinfotokernel.iMaxBufs = KTestPoolSizeInBufs;
1.196 + poolinfotokernel.iGrowTriggerRatio = 0;
1.197 + poolinfotokernel.iGrowByRatio = 0;
1.198 + poolinfotokernel.iShrinkHysteresisRatio = 0;
1.199 + poolinfotokernel.iAlignment = 8;
1.200 + poolinfotokernel.iFlags = EShPoolNonPageAlignedBuffer;
1.201 + r = Ldd.OpenUserPool(P1.Handle(), poolinfotokernel);
1.202 + test_KErrNone(r);
1.203 +
1.204 + TShPoolInfo poolinfo;
1.205 + P1.GetInfo(poolinfo);
1.206 + test_Equal(*PtrBufSize, poolinfo.iBufSize);
1.207 + test_Equal(KTestPoolSizeInBufs, poolinfo.iInitialBufs);
1.208 + test_Equal(KTestPoolSizeInBufs, poolinfo.iMaxBufs);
1.209 + test_Equal(0, poolinfo.iGrowTriggerRatio);
1.210 + test_Equal(0, poolinfo.iGrowByRatio);
1.211 + test_Equal(0, poolinfo.iShrinkHysteresisRatio);
1.212 + test_Equal(8, poolinfo.iAlignment);
1.213 + test(poolinfo.iFlags & EShPoolNonPageAlignedBuffer);
1.214 + test(!(poolinfo.iFlags & EShPoolPageAlignedBuffer));
1.215 + break;
1.216 + }
1.217 + case ETestPageAligned:
1.218 + // Page-aligned pool
1.219 + {
1.220 + test.Printf(_L("Page-aligned\n"));
1.221 + test_Equal(0, P1.Handle());
1.222 +
1.223 + TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, KTestPoolSizeInBufs);
1.224 + r = P1.Create(inf,KDefaultPoolHandleFlags);
1.225 + test_KErrNone(r);
1.226 +
1.227 + r = P1.SetBufferWindow(-1, ETrue);
1.228 + test_KErrNone(r);
1.229 +
1.230 + TShPoolInfo poolinfo;
1.231 + P1.GetInfo(poolinfo);
1.232 + test_Equal(*PtrBufSize, poolinfo.iBufSize);
1.233 + test_Equal(KTestPoolSizeInBufs, poolinfo.iInitialBufs);
1.234 + test_Equal(KTestPoolSizeInBufs, poolinfo.iMaxBufs);
1.235 + test_Equal(0, poolinfo.iGrowTriggerRatio);
1.236 + test_Equal(0, poolinfo.iGrowByRatio);
1.237 + test_Equal(0, poolinfo.iShrinkHysteresisRatio);
1.238 + test_Equal(Log2(pagesize), poolinfo.iAlignment);
1.239 + test(poolinfo.iFlags & EShPoolPageAlignedBuffer);
1.240 + test(!(poolinfo.iFlags & EShPoolNonPageAlignedBuffer));
1.241 +
1.242 + r = Ldd.OpenUserPool(P1.Handle(), poolinfo);
1.243 + test_KErrNone(r);
1.244 + break;
1.245 + }
1.246 + case ETestPageAlignedGrowing:
1.247 + // Page-aligned growing pool
1.248 + {
1.249 + test.Printf(_L("Page-aligned growing\n"));
1.250 + test_Equal(0, P1.Handle());
1.251 +
1.252 + TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, KTestPoolSizeInBufs / 2);
1.253 + // Set shrink hysteresis high so pool can't shrink
1.254 + r = inf.SetSizingAttributes(KTestPoolSizeInBufs, 25, 26, 25600);
1.255 + test_KErrNone(r);
1.256 + r = P1.Create(inf,KDefaultPoolHandleFlags);
1.257 + test_KErrNone(r);
1.258 +
1.259 + r = P1.SetBufferWindow(-1, ETrue);
1.260 + test_KErrNone(r);
1.261 +
1.262 + TShPoolInfo poolinfo;
1.263 + P1.GetInfo(poolinfo);
1.264 + test_Equal(*PtrBufSize, poolinfo.iBufSize);
1.265 + test_Equal(KTestPoolSizeInBufs / 2, poolinfo.iInitialBufs);
1.266 + test_Equal(KTestPoolSizeInBufs, poolinfo.iMaxBufs);
1.267 + test_Equal(25, poolinfo.iGrowTriggerRatio);
1.268 + test_Equal(26, poolinfo.iGrowByRatio);
1.269 + test_Equal(25600, poolinfo.iShrinkHysteresisRatio);
1.270 + test_Equal(Log2(pagesize), poolinfo.iAlignment);
1.271 + test(poolinfo.iFlags & EShPoolPageAlignedBuffer);
1.272 + test(!(poolinfo.iFlags & EShPoolNonPageAlignedBuffer));
1.273 +
1.274 + r = Ldd.OpenUserPool(P1.Handle(), poolinfo);
1.275 + test_KErrNone(r);
1.276 + break;
1.277 + }
1.278 + default:
1.279 + test(EFalse);
1.280 + }
1.281 + }
1.282 +
1.283 +/*
1.284 +@SYMTestCaseID 2
1.285 +@SYMTestCaseDesc Create pool from kernel-side
1.286 +@SYMREQ REQ11423
1.287 +@SYMTestActions
1.288 + 1. Device Driver creates a pool (P2) and passes handle to this thread.
1.289 + 2. Test Thread opens pool and checks its attributes.
1.290 +@SYMTestExpectedResults
1.291 + 1. Ok.
1.292 + 2. Ok.
1.293 +@SYMTestPriority Critical
1.294 +*/
1.295 +
1.296 +void CreateKernelPool(TTestPoolType aPoolType)
1.297 + {
1.298 + test.Next(_L("Create kernel-side pool"));
1.299 + TInt r;
1.300 + TInt pagesize;
1.301 + r = HAL::Get(HAL::EMemoryPageSize, pagesize);
1.302 + test_KErrNone(r);
1.303 + TInt handle;
1.304 +
1.305 + switch (aPoolType)
1.306 + {
1.307 + case ETestNonPageAligned:
1.308 + // Non-page-aligned pool
1.309 + {
1.310 + test.Printf(_L("Non-page-aligned\n"));
1.311 + test_Equal(0, P2.Handle());
1.312 +
1.313 + TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, *PtrBufSize, KTestPoolSizeInBufs, 8);
1.314 + r = Ldd.OpenKernelPool(inf, handle);
1.315 + test_KErrNone(r);
1.316 + P2.SetHandle(handle);
1.317 +
1.318 + TShPoolInfo poolinfo;
1.319 + P2.GetInfo(poolinfo);
1.320 + test_Equal(*PtrBufSize, poolinfo.iBufSize);
1.321 + test_Equal(KTestPoolSizeInBufs, poolinfo.iInitialBufs);
1.322 + test_Equal(KTestPoolSizeInBufs, poolinfo.iMaxBufs);
1.323 + test_Equal(0, poolinfo.iGrowTriggerRatio);
1.324 + test_Equal(0, poolinfo.iGrowByRatio);
1.325 + test_Equal(0, poolinfo.iShrinkHysteresisRatio);
1.326 + test_Equal(8, poolinfo.iAlignment);
1.327 + test(poolinfo.iFlags & EShPoolNonPageAlignedBuffer);
1.328 + test(!(poolinfo.iFlags & EShPoolPageAlignedBuffer));
1.329 + break;
1.330 + }
1.331 + case ETestPageAligned:
1.332 + // Page-aligned pool
1.333 + {
1.334 + test.Printf(_L("Page-aligned\n"));
1.335 + test_Equal(0, P2.Handle());
1.336 +
1.337 + TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, KTestPoolSizeInBufs);
1.338 + r = Ldd.OpenKernelPool(inf, handle);
1.339 + test_KErrNone(r);
1.340 + P2.SetHandle(handle);
1.341 +
1.342 + r = P2.SetBufferWindow(-1, ETrue);
1.343 + test_KErrNone(r);
1.344 +
1.345 + TShPoolInfo poolinfo;
1.346 + P2.GetInfo(poolinfo);
1.347 + test_Equal(*PtrBufSize, poolinfo.iBufSize);
1.348 + test_Equal(KTestPoolSizeInBufs, poolinfo.iInitialBufs);
1.349 + test_Equal(KTestPoolSizeInBufs, poolinfo.iMaxBufs);
1.350 + test_Equal(0, poolinfo.iGrowTriggerRatio);
1.351 + test_Equal(0, poolinfo.iGrowByRatio);
1.352 + test_Equal(0, poolinfo.iShrinkHysteresisRatio);
1.353 + test_Equal(Log2(pagesize), poolinfo.iAlignment);
1.354 + test(poolinfo.iFlags & EShPoolPageAlignedBuffer);
1.355 + test(!(poolinfo.iFlags & EShPoolNonPageAlignedBuffer));
1.356 + break;
1.357 + }
1.358 + case ETestPageAlignedGrowing:
1.359 + // Page-aligned pool growing
1.360 + {
1.361 + test.Printf(_L("Page-aligned growing\n"));
1.362 + test_Equal(0, P2.Handle());
1.363 +
1.364 + TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, KTestPoolSizeInBufs / 2);
1.365 + // Set shrink hysteresis high so pool can't shrink
1.366 + r = inf.SetSizingAttributes(KTestPoolSizeInBufs, 25, 26, 25600);
1.367 + test_KErrNone(r);
1.368 + r = Ldd.OpenKernelPool(inf, handle);
1.369 + test_KErrNone(r);
1.370 + P2.SetHandle(handle);
1.371 +
1.372 + r = P2.SetBufferWindow(-1, ETrue);
1.373 + test_KErrNone(r);
1.374 +
1.375 + TShPoolInfo poolinfo;
1.376 + P2.GetInfo(poolinfo);
1.377 + test_Equal(*PtrBufSize, poolinfo.iBufSize);
1.378 + test_Equal(KTestPoolSizeInBufs / 2, poolinfo.iInitialBufs);
1.379 + test_Equal(KTestPoolSizeInBufs, poolinfo.iMaxBufs);
1.380 + test_Equal(25, poolinfo.iGrowTriggerRatio);
1.381 + test_Equal(26, poolinfo.iGrowByRatio);
1.382 + test_Equal(25600, poolinfo.iShrinkHysteresisRatio);
1.383 + test_Equal(Log2(pagesize), poolinfo.iAlignment);
1.384 + test(poolinfo.iFlags & EShPoolPageAlignedBuffer);
1.385 + test(!(poolinfo.iFlags & EShPoolNonPageAlignedBuffer));
1.386 + break;
1.387 + }
1.388 + default:
1.389 + test(EFalse);
1.390 + }
1.391 + }
1.392 +
1.393 +/*
1.394 +@SYMTestCaseID 20
1.395 +@SYMTestCaseDesc Close pool from kernel-side
1.396 +@SYMREQ REQ11423
1.397 +@SYMTestActions
1.398 + 1. Device Driver closes P2.
1.399 + 2. Test Thread closes P2.
1.400 +@SYMTestExpectedResults
1.401 + 1. OK and Access Count is now 1.
1.402 + 2. OK
1.403 +@SYMTestPriority Critical
1.404 +*/
1.405 +
1.406 +void CloseKernelPool()
1.407 + {
1.408 + test.Next(_L("Close kernel-side pool"));
1.409 + TInt r;
1.410 +
1.411 + r = Ldd.CloseKernelPool();
1.412 + test_KErrNone(r);
1.413 +
1.414 + P2.Close();
1.415 +
1.416 + // wait for memory to be freed
1.417 + r = UserSvr::HalFunction(EHalGroupKernel, EKernelHalSupervisorBarrier, (TAny*)5000, 0);
1.418 + test_KErrNone(r);
1.419 +
1.420 + }
1.421 +
1.422 +/*
1.423 +@SYMTestCaseID 21
1.424 +@SYMTestCaseDesc Close pool from user-side
1.425 +@SYMREQ REQ11423
1.426 +@SYMTestActions
1.427 + 1. Test Thread closes P1.
1.428 + 2. Device Driver closes P1.
1.429 +@SYMTestExpectedResults
1.430 + 1. OK and Access Count is now 1.
1.431 + 2. OK.
1.432 +@SYMTestPriority Critical
1.433 +*/
1.434 +
1.435 +void CloseUserPool()
1.436 + {
1.437 + test.Next(_L("Close user-side pool"));
1.438 + TInt r;
1.439 +
1.440 + P1.Close();
1.441 +
1.442 + r = Ldd.CloseUserPool();
1.443 + test_KErrNone(r);
1.444 +
1.445 + // wait for memory to be freed
1.446 + r = UserSvr::HalFunction(EHalGroupKernel, EKernelHalSupervisorBarrier, (TAny*)5000, 0);
1.447 + test_KErrNone(r);
1.448 + }
1.449 +
1.450 +/*
1.451 +@SYMTestCaseID 3
1.452 +@SYMTestCaseDesc Buffer allocation from user-side
1.453 +@SYMREQ REQ11423
1.454 +@SYMTestActions
1.455 + 1. Test Thread creates a shared buffer on P1.
1.456 + 2. Test Thread passes buffer to Device Driver.
1.457 + 3. Device Driver obtains buffer and manipulates its contents.
1.458 + 4. Device Driver releases buffer.
1.459 + 5. Test Thread releases buffer.
1.460 +@SYMTestExpectedResults
1.461 + 1. Ok.
1.462 + 2. Ok.
1.463 + 3. Ok.
1.464 + 4. Ok.
1.465 + 5. Ok. Buffer de-allocated.
1.466 +@SYMTestPriority Critical
1.467 +*/
1.468 +
1.469 +void AllocateUserBuffer()
1.470 + {
1.471 + test.Next(_L("Allocate user-side buffer"));
1.472 + TInt r;
1.473 + RShBuf buf;
1.474 +
1.475 + // Allocate buffer on POOL 1
1.476 + __KHEAP_MARK;
1.477 + r = buf.Alloc(P1);
1.478 + test_KErrNone(r);
1.479 + __KHEAP_CHECK(0);
1.480 +
1.481 + TInt i;
1.482 + TShPoolInfo poolinfo1;
1.483 + P1.GetInfo(poolinfo1);
1.484 + TInt blocks = poolinfo1.iBufSize / KTestData1().Length();
1.485 +
1.486 + for (i = 0; i < blocks; i++)
1.487 + {
1.488 + TPtr8(buf.Ptr() + (i * KTestData1().Length()), KTestData1().Length(),KTestData1().Length()).Copy(KTestData1());
1.489 + }
1.490 + r = Ldd.ManipulateUserBuffer(buf.Handle());
1.491 +
1.492 + test_KErrNone(r);
1.493 +
1.494 + TBuf8<64> tmp;
1.495 +
1.496 + P1.GetInfo(poolinfo1);
1.497 + blocks = poolinfo1.iBufSize / tmp.MaxSize();
1.498 +
1.499 + for (i = 0 ; i < blocks; i++)
1.500 + {
1.501 + tmp.Fill(i);
1.502 + TPtrC8 ptrc(buf.Ptr() + (i * tmp.Length()), tmp.Length());
1.503 + r = tmp.Compare(ptrc);
1.504 + test_Equal(0, r);
1.505 + }
1.506 + buf.Close();
1.507 + __KHEAP_MARKEND;
1.508 +
1.509 + // Allocate buffer on POOL 2
1.510 + __KHEAP_MARK;
1.511 + r = buf.Alloc(P2);
1.512 + test_KErrNone(r);
1.513 + __KHEAP_CHECK(0);
1.514 +
1.515 + TShPoolInfo poolinfo2;
1.516 + P2.GetInfo(poolinfo2);
1.517 + blocks = poolinfo2.iBufSize / KTestData1().Length(); // PC REMOVE
1.518 +
1.519 + for (i = 0; i < blocks; i++)
1.520 + {
1.521 + TPtr8(buf.Ptr() + (i * KTestData1().Length()), KTestData1().Length(),KTestData1().Length()).Copy(KTestData1());
1.522 + }
1.523 +
1.524 + r = Ldd.ManipulateUserBuffer(buf.Handle());
1.525 + test_KErrNone(r);
1.526 +
1.527 + P2.GetInfo(poolinfo2);
1.528 + blocks = poolinfo2.iBufSize / tmp.MaxSize(); // PC REMOVE
1.529 +
1.530 + for (i = 0 ; i < blocks; i++)
1.531 + {
1.532 + tmp.Fill(i);
1.533 + r = tmp.Compare(TPtr8(buf.Ptr() + (i * tmp.Length()), tmp.Length(), tmp.Length()));
1.534 + test_Equal(0, r);
1.535 + }
1.536 + buf.Close();
1.537 + __KHEAP_MARKEND;
1.538 + }
1.539 +
1.540 +/*
1.541 +@SYMTestCaseID 4
1.542 +@SYMTestCaseDesc Buffer allocation from kernel-side
1.543 +@SYMREQ REQ11423
1.544 +@SYMTestActions
1.545 + 1. Device Driver creates a buffer on P2.
1.546 + 2. Device Driver manipulates buffer and passes it to Test Thread.
1.547 + 3. Test Thread manipulates buffer and send it back to Device Driver.
1.548 + 4. Device Driver check buffer's contents and releases it.
1.549 +@SYMTestExpectedResults
1.550 + 1. Ok.
1.551 + 2. Ok.
1.552 + 3. Ok.
1.553 + 4. Ok. Buffer de-allocated.
1.554 +@SYMTestPriority Critical
1.555 +*/
1.556 +
1.557 +void AllocateKernelBuffer()
1.558 + {
1.559 + test.Next(_L("Allocate kernel-side buffer"));
1.560 + TInt r;
1.561 + TInt handle;
1.562 + RShBuf kbuf0, kbuf1;
1.563 +
1.564 + // Allocate buffer on POOL 1
1.565 + r = Ldd.AllocateKernelBuffer(0, handle);
1.566 + test_KErrNone(r);
1.567 + kbuf0.SetHandle(handle);
1.568 +
1.569 + TInt i;
1.570 + TShPoolInfo poolinfo1;
1.571 + P1.GetInfo(poolinfo1);
1.572 + TInt blocks = poolinfo1.iBufSize / KTestData2().Length();
1.573 + for (i = 0; i < blocks; i++)
1.574 + {
1.575 + r = KTestData2().Compare(TPtr8(kbuf0.Ptr() + (i * KTestData2().Length()), KTestData2().Length(), KTestData2().Length()));
1.576 +
1.577 + test_Equal(0, r);
1.578 + }
1.579 + kbuf0.Close();
1.580 +
1.581 + // Allocate buffer on POOL 2
1.582 + r = Ldd.AllocateKernelBuffer(1, handle);
1.583 + test_KErrNone(r);
1.584 + kbuf1.SetHandle(handle);
1.585 +
1.586 + TShPoolInfo poolinfo2;
1.587 + P2.GetInfo(poolinfo2);
1.588 + blocks = poolinfo2.iBufSize / KTestData2().Length();
1.589 +
1.590 + for (i = 0; i < blocks; i++)
1.591 + {
1.592 + r = KTestData2().Compare(TPtr8(kbuf1.Ptr() + (i * KTestData2().Length()), KTestData2().Length(), KTestData2().Length()));
1.593 +
1.594 + test_Equal(0, r);
1.595 + }
1.596 + kbuf1.Close();
1.597 + }
1.598 +
1.599 +
1.600 +/*
1.601 +@SYMTestCaseID X1
1.602 +@SYMTestCaseDesc Allocate maximum number of buffers in a pool (user/kernel)
1.603 +@SYMREQ REQ11423
1.604 +@SYMTestActions
1.605 + Allocate as many buffers on a pool as possible.
1.606 + Free them all and re-allocate them again.
1.607 + Free them all.
1.608 +@SYMTestExpectedResults
1.609 + Ok.
1.610 +@SYMTestPriority High
1.611 +*/
1.612 +
1.613 +void AllocateUserMax(RShPool& aPool)
1.614 + {
1.615 + test.Next(_L("Exhaust pool memory from user-side"));
1.616 + TInt r;
1.617 +
1.618 + TShPoolInfo poolinfo;
1.619 + aPool.GetInfo(poolinfo);
1.620 + TBool aligned = (poolinfo.iFlags & EShPoolPageAlignedBuffer);
1.621 + RDebug::Printf("aligned=%d",aligned);
1.622 +
1.623 + RArray<RShBuf> bufarray;
1.624 + do
1.625 + {
1.626 + RShBuf buf;
1.627 + r = buf.Alloc(aPool);
1.628 + if (r==KErrNoMemory && KTestPoolSizeInBufs>bufarray.Count())
1.629 + {
1.630 + // try again after a delay, to allow for background resource allocation
1.631 +
1.632 + User::After(1000000);
1.633 + r = buf.Alloc(aPool);
1.634 + }
1.635 + if (!r)
1.636 + {
1.637 + r = bufarray.Append(buf);
1.638 + test_KErrNone(r);
1.639 + FillShBuf(buf,0x99);
1.640 + }
1.641 + }
1.642 + while (r == KErrNone);
1.643 + test_Equal(KErrNoMemory, r);
1.644 + test_Compare(KTestPoolSizeInBufs, <=, bufarray.Count());
1.645 +
1.646 + TInt n = bufarray.Count();
1.647 + while (n)
1.648 + {
1.649 + bufarray[--n].Close();
1.650 + }
1.651 +
1.652 + User::After(500000);
1.653 +
1.654 + // Do it once more
1.655 + n = 0;
1.656 + while (n<bufarray.Count())
1.657 + {
1.658 + r = bufarray[n].Alloc(aPool);
1.659 + if (r==KErrNoMemory)
1.660 + {
1.661 + // try again after a delay, to allow for background resource allocation
1.662 + User::After(1000000);
1.663 + r = bufarray[n].Alloc(aPool);
1.664 + }
1.665 + test_Assert(r == KErrNone, test.Printf(_L("n=%d r=%d\n"), n, r));
1.666 + if(aligned)
1.667 + test(CheckNotFillShBuf(bufarray[n],0x99));
1.668 + ++n;
1.669 + }
1.670 +
1.671 + RShBuf extrabuf;
1.672 + r = extrabuf.Alloc(aPool);
1.673 + test_Equal(KErrNoMemory, r);
1.674 +
1.675 + while (n)
1.676 + {
1.677 + bufarray[--n].Close();
1.678 + }
1.679 +
1.680 + bufarray.Close();
1.681 + }
1.682 +
1.683 +void AllocateKernelMax()
1.684 + {
1.685 + test.Next(_L("Exhaust pool memory from kernel-side"));
1.686 + TInt r;
1.687 + TInt allocated;
1.688 + r = Ldd.AllocateMax(0, allocated); // P1
1.689 + test_KErrNone(r);
1.690 + test_Equal(KTestPoolSizeInBufs, allocated);
1.691 + r = Ldd.AllocateMax(1, allocated); // P2
1.692 + test_KErrNone(r);
1.693 + test_Equal(KTestPoolSizeInBufs, allocated);
1.694 + }
1.695 +
1.696 +
1.697 +/*
1.698 +@SYMTestCaseID 11
1.699 +@SYMTestCaseDesc Buffer alignment (kernel/user)
1.700 +@SYMREQ REQ11423
1.701 +@SYMTestActions
1.702 + 1. Test Thread creates several pools with different buffer alignment
1.703 + requirements:
1.704 + 2. Test Thread allocates buffers on all pools.
1.705 + 3. Test Thread frees all buffers and close pools.
1.706 +@SYMTestExpectedResults
1.707 + 1. Ok.
1.708 + 2. Buffers are aligned to the desired boundary.
1.709 + 3. Ok.
1.710 +@SYMTestPriority High
1.711 +*/
1.712 +
1.713 +void BufferAlignmentUser()
1.714 + {
1.715 + test.Next(_L("Buffer alignment (User)"));
1.716 + TInt pagesize;
1.717 + TInt r;
1.718 + r = HAL::Get(HAL::EMemoryPageSize, pagesize);
1.719 + test_KErrNone(r);
1.720 +
1.721 + // Non page aligned buffers
1.722 + TInt i;
1.723 + for (i = 0; i <= Log2(pagesize); i++)
1.724 + {
1.725 + test.Printf(_L("."));
1.726 + TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, *PtrBufSize, 20, i); // TODO: Change minbufs back to 8 when the pool growing code works
1.727 + RShPool pool;
1.728 + r = pool.Create(inf,KDefaultPoolHandleFlags);
1.729 + test_KErrNone(r);
1.730 +
1.731 + TInt j;
1.732 + RShBuf buf[20];
1.733 + for (j = 0; j < 20; j++)
1.734 + {
1.735 + r = buf[j].Alloc(pool);
1.736 + test_KErrNone(r);
1.737 + }
1.738 +
1.739 + TInt alignment = i;
1.740 + if (alignment < KTestMinimumAlignmentLog2)
1.741 + {
1.742 + alignment = KTestMinimumAlignmentLog2;
1.743 + }
1.744 + for (j = 0; j < 20; j++)
1.745 + {
1.746 + test_Assert(!((TUint32) buf[j].Ptr() & ((1 << alignment) - 1)),
1.747 + test.Printf(_L("Pool%d buf[%d].Base() == 0x%08x"), i, j, buf[j].Ptr()));
1.748 + }
1.749 +
1.750 + for (j = 0; j < 20; j++)
1.751 + {
1.752 + buf[j].Close();
1.753 + }
1.754 + pool.Close();
1.755 + // delay to allow the management dfc to run and close pool
1.756 + User::After(100000);
1.757 + }
1.758 + test.Printf(_L("\n"));
1.759 +
1.760 + // Page aligned buffers
1.761 + TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, 20); // TODO: Change minbufs back to 8 when the pool growing code works
1.762 + RShPool pool;
1.763 + r = pool.Create(inf,KDefaultPoolHandleFlags);
1.764 + test_KErrNone(r);
1.765 +
1.766 + r = pool.SetBufferWindow(-1, ETrue);
1.767 + test_KErrNone(r);
1.768 +
1.769 + TInt j;
1.770 + RShBuf buf[20];
1.771 + for (j = 0; j < 20; j++)
1.772 + {
1.773 + r = buf[j].Alloc(pool);
1.774 + test_KErrNone(r);
1.775 + }
1.776 +
1.777 + for (j = 0; j < 20; j++)
1.778 + {
1.779 + test_Assert(!((TUint32) buf[j].Ptr() & (pagesize - 1)),
1.780 + test.Printf(_L("buf[%d].Base() == 0x%08x"), j, buf[j].Ptr()));
1.781 + }
1.782 + for (j = 0; j < 20; j++)
1.783 + {
1.784 + buf[j].Close();
1.785 + }
1.786 + pool.Close();
1.787 + }
1.788 +
1.789 +void BufferAlignmentKernel()
1.790 + {
1.791 + test.Next(_L("Buffer alignment (Kernel)"));
1.792 + TInt r;
1.793 +
1.794 + TInt pagesize;
1.795 + r = HAL::Get(HAL::EMemoryPageSize, pagesize);
1.796 + test_KErrNone(r);
1.797 +
1.798 + for (TInt i = 0; i < Log2(pagesize); i++)
1.799 + {
1.800 + test.Printf(_L("."));
1.801 + r = Ldd.BufferAlignmentKernel(*PtrBufSize, i);
1.802 + test_KErrNone(r);
1.803 + // delay to allow the management dfc to run
1.804 + User::After(100000);
1.805 + }
1.806 + test.Printf(_L("\n"));
1.807 + }
1.808 +
1.809 +/*
1.810 +@SYMTestCaseID 6
1.811 +@SYMTestCaseDesc Create pool at specific physical address
1.812 +@SYMREQ REQ11423
1.813 +@SYMTestActions
1.814 + 1. Device Driver allocates memory chunk.
1.815 + 2. Device Driver requests physical address of this memory chunk.
1.816 + 3. Device Driver creates pool at physical address of the memory chunk.
1.817 + 3. Device Driver allocate buffers on pool, free them and close pool.
1.818 +@SYMTestExpectedResults
1.819 + 1. Ok.
1.820 + 2. Ok.
1.821 + 3. Ok.
1.822 + 4. Ok
1.823 +@SYMTestPriority High
1.824 +*/
1.825 +
1.826 +void CreateKernelPoolPhysAddr()
1.827 + {
1.828 + test.Next(_L("Create pool at specific physical address"));
1.829 + TInt r;
1.830 + test.Start(_L("Contiguous physical memory"));
1.831 + r = Ldd.CreatePoolPhysAddrCont(*PtrBufSize);
1.832 + test_KErrNone(r);
1.833 + test.Next(_L("Discontiguous physical memory"));
1.834 + r = Ldd.CreatePoolPhysAddrNonCont(*PtrBufSize);
1.835 + test_KErrNone(r);
1.836 + test.End();
1.837 + }
1.838 +
1.839 +/*
1.840 +@SYMTestCaseID 14
1.841 +@SYMTestCaseDesc Buffer separation and overwrites
1.842 +@SYMREQ REQ11423
1.843 +@SYMTestActions
1.844 + 1. Test Thread creates two pools:
1.845 + - A pool with no guard pages.
1.846 + - A pool with guard pages.
1.847 + 2. Allocate two buffers on each pool.
1.848 + 3. Test Thread creates Secondary Thread.
1.849 + 4. Secondary Thread starts reading contents of the first buffer and keep
1.850 + reading beyond its limits (using a pointer, not a descriptor).
1.851 + 5. Secondary Thread starts writing on the first buffer and keep writing beyond
1.852 + its limits (using a pointer, not a descriptor).
1.853 + 6. Free buffers and close pools.
1.854 +@SYMTestExpectedResults
1.855 + 1. Ok.
1.856 + 2. Ok.
1.857 + 3. Ok.
1.858 + 4. Secondary Thread panics when it attempts to read the guard page, if there
1.859 + is one. Otherwise, it moves on to the second buffer. (Secondary Thread will
1.860 + have to be restarted).
1.861 + 5. Secondary Thread panics when it attempts to write on the guard page if
1.862 + there is one. Otherwise, it carries on writing on to the second buffer.
1.863 + 6. Ok.
1.864 +@SYMTestPriority High
1.865 +*/
1.866 +
1.867 +TInt ThreadGuardPagesRead(TAny* aArg)
1.868 + {
1.869 + TUint8* ptr = (TUint8*) aArg;
1.870 + if (ptr == NULL)
1.871 + {
1.872 + return KErrArgument;
1.873 + }
1.874 + TInt bufsize = *PtrBufSize;
1.875 + TInt i;
1.876 + TUint8 val = '$';
1.877 + TBool isok = ETrue;
1.878 + for (i = 0; i < bufsize; i++)
1.879 + {
1.880 + if (*(ptr + i) != val)
1.881 + {
1.882 + isok = EFalse;
1.883 + }
1.884 + }
1.885 + if (!isok)
1.886 + {
1.887 + return KErrUnknown;
1.888 + }
1.889 + return KErrNone;
1.890 + }
1.891 +
1.892 +TInt ThreadGuardPagesWrite(TAny* aArg)
1.893 + {
1.894 + TUint8* ptr = (TUint8*) aArg;
1.895 + if (ptr == NULL)
1.896 + {
1.897 + return KErrArgument;
1.898 + }
1.899 + TInt bufsize = *PtrBufSize;
1.900 + TInt i;
1.901 + for (i = 0; i < bufsize; i++)
1.902 + {
1.903 + *(ptr + i) = '#';
1.904 + }
1.905 + return KErrNone;
1.906 + }
1.907 +
1.908 +void GuardPages()
1.909 + {
1.910 + test.Next(_L("Guard pages"));
1.911 + TInt pagesize;
1.912 + TInt r;
1.913 + r = HAL::Get(HAL::EMemoryPageSize, pagesize);
1.914 + test_KErrNone(r);
1.915 +
1.916 + // Create pools
1.917 + RShPool pool1;
1.918 + RShPool pool2;
1.919 + TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, KTestPoolSizeInBufs);
1.920 + r = pool1.Create(inf,KDefaultPoolHandleFlags);
1.921 + test_KErrNone(r);
1.922 +
1.923 + r = pool1.SetBufferWindow(-1, ETrue);
1.924 + test_KErrNone(r);
1.925 +
1.926 + r = inf.SetGuardPages();
1.927 + test_KErrNone(r);
1.928 + r = pool2.Create(inf,KDefaultPoolHandleFlags);
1.929 + test_KErrNone(r);
1.930 +
1.931 + r = pool2.SetBufferWindow(-1, ETrue);
1.932 + test_KErrNone(r);
1.933 +
1.934 + // Allocate buffers
1.935 + RShBuf bufs1[KTestPoolSizeInBufs];
1.936 + RShBuf bufs2[KTestPoolSizeInBufs];
1.937 + TInt i;
1.938 + for (i = 0; i < KTestPoolSizeInBufs; i++)
1.939 + {
1.940 + r = bufs1[i].Alloc(pool1);
1.941 + test_Assert(r == KErrNone, test.Printf(_L("Pool1: i=%d r=%d\n"), i, r));
1.942 + TPtr8 ptr(bufs1[i].Ptr(), bufs1[i].Size(),bufs1[i].Size());
1.943 + ptr.Fill('$');
1.944 + }
1.945 + for (i = 0; i < KTestPoolSizeInBufs; i++)
1.946 + {
1.947 + r = bufs2[i].Alloc(pool2);
1.948 + test_Assert(r == KErrNone, test.Printf(_L("Pool2: i=%d r=%d\n"), i, r));
1.949 + TPtr8 ptr(bufs2[i].Ptr(), bufs1[i].Size(),bufs1[i].Size());
1.950 + ptr.Fill('$');
1.951 + }
1.952 +
1.953 + _LIT(KTestThreadRead, "GuardPagesReadTS%dP%dB%d");
1.954 + for (i = 0; i < KTestPoolSizeInBufs - 1; i++)
1.955 + {
1.956 + TBuf<40> threadname;
1.957 + RThread thread;
1.958 + TRequestStatus rs;
1.959 +
1.960 + // 1. Simple read within buffer
1.961 + // Pool 1
1.962 + threadname.Format(KTestThreadRead, 1, 1, i);
1.963 + r = thread.Create(threadname, ThreadGuardPagesRead, KDefaultStackSize, KMinHeapSize, KMinHeapSize,
1.964 + (TAny*) bufs1[i].Ptr());
1.965 + test_KErrNone(r);
1.966 + thread.Logon(rs);
1.967 + thread.Resume();
1.968 + User::WaitForRequest(rs);
1.969 + test_KErrNone(rs.Int());
1.970 + test_Equal(EExitKill, thread.ExitType());
1.971 + test_KErrNone(thread.ExitReason());
1.972 + thread.Close();
1.973 + // Pool 2
1.974 + threadname.Format(KTestThreadRead, 1, 2, i);
1.975 + r = thread.Create(threadname, ThreadGuardPagesRead, KDefaultStackSize, KMinHeapSize, KMinHeapSize,
1.976 + (TAny*) bufs2[i].Ptr());
1.977 + test_KErrNone(r);
1.978 + thread.Logon(rs);
1.979 + thread.Resume();
1.980 + User::WaitForRequest(rs);
1.981 + test_KErrNone(rs.Int());
1.982 + test_Equal(EExitKill, thread.ExitType());
1.983 + test_KErrNone(thread.ExitReason());
1.984 + thread.Close();
1.985 +
1.986 + // 2. If the buffer size is not a multiple of the MMU page size, it should be
1.987 + // possible to read after the buffer end until the page boundary
1.988 + if (*PtrBufSize % pagesize)
1.989 + {
1.990 + // Pool 1
1.991 + threadname.Format(KTestThreadRead, 2, 1, i);
1.992 + r = thread.Create(threadname, ThreadGuardPagesRead, KDefaultStackSize, KMinHeapSize, KMinHeapSize,
1.993 + (TAny*) (bufs1[i].Ptr() + pagesize - *PtrBufSize % pagesize));
1.994 + test_KErrNone(r);
1.995 + thread.Logon(rs);
1.996 + thread.Resume();
1.997 + User::WaitForRequest(rs);
1.998 + if (rs.Int() != KErrNone)
1.999 + {
1.1000 + test_Equal(KErrUnknown, rs.Int());
1.1001 + test_Equal(KErrUnknown, thread.ExitReason());
1.1002 + }
1.1003 + test_Equal(EExitKill, thread.ExitType());
1.1004 + thread.Close();
1.1005 + // Pool 2
1.1006 + threadname.Format(KTestThreadRead, 2, 2, i);
1.1007 + r = thread.Create(threadname, ThreadGuardPagesRead, KDefaultStackSize, KMinHeapSize, KMinHeapSize,
1.1008 + (TAny*) (bufs2[i].Ptr() + pagesize - *PtrBufSize % pagesize));
1.1009 + test_KErrNone(r);
1.1010 + thread.Logon(rs);
1.1011 + thread.Resume();
1.1012 + User::WaitForRequest(rs);
1.1013 + if (rs.Int() != KErrNone)
1.1014 + {
1.1015 + test_Equal(KErrUnknown, rs.Int());
1.1016 + test_Equal(KErrUnknown, thread.ExitReason());
1.1017 + }
1.1018 + test_Equal(EExitKill, thread.ExitType());
1.1019 + thread.Close();
1.1020 + }
1.1021 +
1.1022 + // 3. Now we attempt to read the first byte on the next page after the end of
1.1023 + // our buffer.
1.1024 + TInt offset;
1.1025 + if (*PtrBufSize % pagesize)
1.1026 + {
1.1027 + offset = pagesize - *PtrBufSize % pagesize + 1;
1.1028 + }
1.1029 + else
1.1030 + {
1.1031 + offset = 1;
1.1032 + }
1.1033 + // Pool 1
1.1034 + if (bufs1[i + 1].Ptr() == bufs1[i].Ptr() + RoundUp(*PtrBufSize, Log2(pagesize)))
1.1035 + {
1.1036 + // Only perform this test if the next buffer comes immediately next to this
1.1037 + // one. This is not necessarily the case on the Flexible Memory Model.
1.1038 + threadname.Format(KTestThreadRead, 3, 1, i);
1.1039 + r = thread.Create(threadname, ThreadGuardPagesRead, KDefaultStackSize, KMinHeapSize, KMinHeapSize,
1.1040 + (TAny*) (bufs1[i].Ptr() + offset));
1.1041 + test_KErrNone(r);
1.1042 + thread.Logon(rs);
1.1043 + thread.Resume();
1.1044 + User::WaitForRequest(rs);
1.1045 + if (rs.Int() != KErrNone) // No guard page, so it should be fine
1.1046 + {
1.1047 + test_Equal(KErrUnknown, rs.Int());
1.1048 + test_Equal(KErrUnknown, thread.ExitReason());
1.1049 + }
1.1050 + test_Equal(EExitKill, thread.ExitType());
1.1051 + thread.Close();
1.1052 + }
1.1053 + // Pool 2
1.1054 + TBool jit = User::JustInTime();
1.1055 + User::SetJustInTime(EFalse);
1.1056 + threadname.Format(KTestThreadRead, 3, 2, i);
1.1057 + r = thread.Create(threadname, ThreadGuardPagesRead, KDefaultStackSize, KMinHeapSize, KMinHeapSize,
1.1058 + (TAny*) (bufs2[i].Ptr() + offset));
1.1059 + test_KErrNone(r);
1.1060 + thread.Logon(rs);
1.1061 + thread.Resume();
1.1062 + User::WaitForRequest(rs);
1.1063 + test_Equal(3, rs.Int());
1.1064 + test_Equal(EExitPanic, thread.ExitType());
1.1065 + test_Equal(3, thread.ExitReason()); // KERN-EXEC 3
1.1066 + thread.Close();
1.1067 + User::SetJustInTime(jit);
1.1068 + }
1.1069 +
1.1070 + _LIT(KTestThreadWrite, "GuardPagesWriteTS%dP%dB%d");
1.1071 + for (i = 0; i < KTestPoolSizeInBufs - 1; i++)
1.1072 + {
1.1073 + TBuf<40> threadname;
1.1074 + RThread thread;
1.1075 + TRequestStatus rs;
1.1076 +
1.1077 + // 1. Simple write within buffer
1.1078 + // Pool 1
1.1079 + threadname.Format(KTestThreadWrite, 1, 1, i);
1.1080 + r = thread.Create(threadname, ThreadGuardPagesWrite, KDefaultStackSize, KMinHeapSize, KMinHeapSize,
1.1081 + (TAny*) bufs1[i].Ptr());
1.1082 + test_KErrNone(r);
1.1083 + thread.Logon(rs);
1.1084 + thread.Resume();
1.1085 + User::WaitForRequest(rs);
1.1086 + test_KErrNone(rs.Int());
1.1087 + test_Equal(EExitKill, thread.ExitType());
1.1088 + test_KErrNone(thread.ExitReason());
1.1089 + thread.Close();
1.1090 + // Pool 2
1.1091 + threadname.Format(KTestThreadWrite, 1, 2, i);
1.1092 + r = thread.Create(threadname, ThreadGuardPagesWrite, KDefaultStackSize, KMinHeapSize, KMinHeapSize,
1.1093 + (TAny*) bufs2[i].Ptr());
1.1094 + test_KErrNone(r);
1.1095 + thread.Logon(rs);
1.1096 + thread.Resume();
1.1097 + User::WaitForRequest(rs);
1.1098 + test_KErrNone(rs.Int());
1.1099 + test_Equal(EExitKill, thread.ExitType());
1.1100 + test_KErrNone(thread.ExitReason());
1.1101 + thread.Close();
1.1102 +
1.1103 + // 2. If the buffer size is not a multiple of the MMU page size, it should be
1.1104 + // possible to write after the buffer end until the page boundary
1.1105 + if (*PtrBufSize % pagesize)
1.1106 + {
1.1107 + // Pool 1
1.1108 + threadname.Format(KTestThreadWrite, 2, 1, i);
1.1109 + r = thread.Create(threadname, ThreadGuardPagesWrite, KDefaultStackSize, KMinHeapSize, KMinHeapSize,
1.1110 + (TAny*) (bufs1[i].Ptr() + pagesize - *PtrBufSize % pagesize));
1.1111 + test_KErrNone(r);
1.1112 + thread.Logon(rs);
1.1113 + thread.Resume();
1.1114 + User::WaitForRequest(rs);
1.1115 + test_KErrNone(rs.Int());
1.1116 + test_Equal(EExitKill, thread.ExitType());
1.1117 + test_KErrNone(thread.ExitReason());
1.1118 + thread.Close();
1.1119 + // Pool 2
1.1120 + threadname.Format(KTestThreadWrite, 2, 2, i);
1.1121 + r = thread.Create(threadname, ThreadGuardPagesWrite, KDefaultStackSize, KMinHeapSize, KMinHeapSize,
1.1122 + (TAny*) (bufs2[i].Ptr() + pagesize - *PtrBufSize % pagesize));
1.1123 + test_KErrNone(r);
1.1124 + thread.Logon(rs);
1.1125 + thread.Resume();
1.1126 + User::WaitForRequest(rs);
1.1127 + test_KErrNone(rs.Int());
1.1128 + test_Equal(EExitKill, thread.ExitType());
1.1129 + test_KErrNone(thread.ExitReason());
1.1130 + thread.Close();
1.1131 + }
1.1132 +
1.1133 + // 3. Now we attempt to write on the first byte on the next page after the
1.1134 + // end of our buffer.
1.1135 + TInt offset;
1.1136 + if (*PtrBufSize % pagesize)
1.1137 + {
1.1138 + offset = pagesize - *PtrBufSize % pagesize + 1;
1.1139 + }
1.1140 + else
1.1141 + {
1.1142 + offset = 1;
1.1143 + }
1.1144 + // Pool 1
1.1145 + if (bufs1[i + 1].Ptr() == bufs1[i].Ptr() + RoundUp(*PtrBufSize, Log2(pagesize)))
1.1146 + {
1.1147 + // Only perform this test if the next buffer comes immediately next to this
1.1148 + // one. This is not necessarily the case on the Flexible Memory Model.
1.1149 + threadname.Format(KTestThreadWrite, 3, 1, i);
1.1150 + r = thread.Create(threadname, ThreadGuardPagesWrite, KDefaultStackSize, KMinHeapSize, KMinHeapSize,
1.1151 + (TAny*) (bufs1[i].Ptr() + offset));
1.1152 + test_KErrNone(r);
1.1153 + thread.Logon(rs);
1.1154 + thread.Resume();
1.1155 + User::WaitForRequest(rs);
1.1156 + test_KErrNone(rs.Int());
1.1157 + test_Equal(EExitKill, thread.ExitType());
1.1158 + test_KErrNone(thread.ExitReason());
1.1159 + thread.Close();
1.1160 + }
1.1161 +
1.1162 + // Pool 2
1.1163 + TBool jit = User::JustInTime();
1.1164 + User::SetJustInTime(EFalse);
1.1165 + threadname.Format(KTestThreadWrite, 3, 2, i);
1.1166 + r = thread.Create(threadname, ThreadGuardPagesWrite, KDefaultStackSize, KMinHeapSize, KMinHeapSize,
1.1167 + (TAny*) (bufs2[i].Ptr() + offset));
1.1168 + test_KErrNone(r);
1.1169 + thread.Logon(rs);
1.1170 + thread.Resume();
1.1171 + User::WaitForRequest(rs);
1.1172 + test_Equal(3, rs.Int());
1.1173 + test_Equal(EExitPanic, thread.ExitType());
1.1174 + test_Equal(3, thread.ExitReason()); // KERN-EXEC 3
1.1175 + thread.Close();
1.1176 + User::SetJustInTime(jit);
1.1177 + }
1.1178 +
1.1179 + // Free buffers
1.1180 + for (i = 0; i < KTestPoolSizeInBufs; i++)
1.1181 + {
1.1182 + bufs1[i].Close();
1.1183 + bufs2[i].Close();
1.1184 + }
1.1185 + pool1.Close();
1.1186 + pool2.Close();
1.1187 + }
1.1188 +
1.1189 +/*
1.1190 +@SYMTestCaseID 12
1.1191 +@SYMTestCaseDesc Buffer mapping
1.1192 +@SYMREQ REQ11423
1.1193 +@SYMTestActions
1.1194 + 1. Test Thread allocates buffer on a mappable pool.
1.1195 + 2. Test Thread spawns Slave Process.
1.1196 + 3. Test Thread passes buffer handle to Slave Process.
1.1197 + 4. Slave Process attempts to read buffer then write to buffer.
1.1198 + 5. Slave Process maps buffer.
1.1199 + 6. Slave Process attempts to read buffer then write to buffer.
1.1200 + 7. Slave Process unmaps buffer.
1.1201 + 8. Slave Process attempts to read buffer then write to buffer.
1.1202 + 9. Test Thread kills Slave Process and frees buffer.
1.1203 +@SYMTestExpectedResults
1.1204 + 1. Ok.
1.1205 + 2. Ok.
1.1206 + 3. Ok.
1.1207 + 4. Slave Process panics. (and will have to be restarted)
1.1208 + 5. Ok.
1.1209 + 6. Ok.
1.1210 + 7. Ok.
1.1211 + 8. Slave Process panics.
1.1212 + 9. Ok.
1.1213 +@SYMTestPriority High
1.1214 +*/
1.1215 +
1.1216 +TInt ThreadBufferMappingRead(TAny* aArg)
1.1217 + {
1.1218 + if (!aArg)
1.1219 + {
1.1220 + return KErrArgument;
1.1221 + }
1.1222 + RShBuf* buf = (RShBuf*) aArg;
1.1223 + TUint x = 0;
1.1224 + TUint i;
1.1225 + volatile TUint8* ptr = buf->Ptr();
1.1226 +
1.1227 + for (i = 0; i < buf->Size(); i++)
1.1228 + {
1.1229 + x += *(ptr + i);
1.1230 + }
1.1231 + return KErrNone;
1.1232 + }
1.1233 +
1.1234 +TInt ThreadBufferMappingWrite(TAny* aArg)
1.1235 + {
1.1236 + if (!aArg)
1.1237 + {
1.1238 + return KErrArgument;
1.1239 + }
1.1240 + RShBuf* buf = (RShBuf*) aArg;
1.1241 + TPtr8 ptr(buf->Ptr(), buf->Size(),buf->Size());
1.1242 + ptr.Fill('Q');
1.1243 + return KErrNone;
1.1244 + }
1.1245 +
1.1246 +const TInt KTestBufferMappingPoolTypes = 8;
1.1247 +const TInt KTestBufferMappingTypes = 8;
1.1248 +
1.1249 +void BufferMapping()
1.1250 + {
1.1251 + test.Next(_L("Buffer Mapping"));
1.1252 +#ifdef __WINS__
1.1253 + test.Printf(_L("Does not run on the emulator. Skipped\n"));
1.1254 +#else
1.1255 + TInt r;
1.1256 + RShPool pool[KTestBufferMappingPoolTypes];
1.1257 + RShBuf buf[KTestBufferMappingTypes][KTestBufferMappingPoolTypes];
1.1258 + TUint poolflags[KTestBufferMappingPoolTypes];
1.1259 + TInt bufferwindow[KTestBufferMappingPoolTypes];
1.1260 + TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, KTestBufferMappingTypes);
1.1261 +
1.1262 + // POOL TYPES
1.1263 + // ------------------------------------------
1.1264 + // Pool no. AutoMap Writeable BufWindow
1.1265 + // 0 0 0 -1
1.1266 + // 1 1 0 -1
1.1267 + // 2 0 0 0
1.1268 + // 3 1 0 0
1.1269 + // 4 0 1 -1
1.1270 + // 5 1 1 -1
1.1271 + // 6 0 1 0
1.1272 + // 7 1 1 0
1.1273 +
1.1274 + TInt i;
1.1275 + test.Printf(_L("Create pools:"));
1.1276 + for (i = 0; i < KTestBufferMappingPoolTypes; i++)
1.1277 + {
1.1278 + poolflags[i] = EShPoolAllocate;
1.1279 + bufferwindow[i] = 0;
1.1280 + if (i % 2)
1.1281 + {
1.1282 + poolflags[i] |= EShPoolAutoMapBuf;
1.1283 + }
1.1284 + if (i > 3)
1.1285 + {
1.1286 + poolflags[i] |= EShPoolWriteable;
1.1287 + }
1.1288 + if (i % 4 > 1)
1.1289 + {
1.1290 + bufferwindow[i] = -1;
1.1291 + }
1.1292 + r = pool[i].Create(inf, poolflags[i] & ~EShPoolAutoMapBuf);
1.1293 + test_KErrNone(r);
1.1294 + r = pool[i].SetBufferWindow(bufferwindow[i], poolflags[i] & EShPoolAutoMapBuf);
1.1295 + test_KErrNone(r);
1.1296 + test.Printf(_L("."));
1.1297 + }
1.1298 + test.Printf(_L("\n"));
1.1299 +
1.1300 + // BUFFER TYPES
1.1301 + // Buffer no. Actions
1.1302 + // 0 Alloc unmapped.
1.1303 + // 1 Alloc unmapped then unmap again.
1.1304 + // 2 Default Alloc. Unmap if it is a AutoMap pool.
1.1305 + // 3 Alloc unmapped. Map Read-Only.
1.1306 + // 4 Default Alloc. Unmap if it is a R/W pool and re-map Read-Only.
1.1307 + // 5 Alloc unmapped. Map R/W
1.1308 + // 6 Default Alloc. Unmap and re-map.
1.1309 + // 7 Default Alloc R/W. Map again with Read-Only setting.
1.1310 + // Depending on the pool type, the actions above might not always be possible.
1.1311 +
1.1312 + // Buffer allocation
1.1313 + TInt j;
1.1314 + test.Printf(_L("Allocate buffers\n"));
1.1315 + for (j = 0; j < KTestBufferMappingPoolTypes; j++)
1.1316 + {
1.1317 + test.Printf(_L("\nPool %d:"), j);
1.1318 + for (i = 0; i < KTestBufferMappingTypes; i++)
1.1319 + {
1.1320 + switch (i % KTestBufferMappingTypes)
1.1321 + {
1.1322 + // Unmapped buffers
1.1323 + case 0:
1.1324 + case 1:
1.1325 + // This should always result in an unmapped buffer
1.1326 + r = buf[i][j].Alloc(pool[j], EShPoolAllocNoMap);
1.1327 + test_KErrNone(r);
1.1328 +
1.1329 + if((i % KTestBufferMappingTypes) == 1)
1.1330 + {
1.1331 + // Alloc unmapped then unmap again.
1.1332 + r = buf[i][j].UnMap();
1.1333 + test_Equal(KErrNotFound, r);
1.1334 + }
1.1335 + break;
1.1336 + case 2:
1.1337 + r = buf[i][j].Alloc(pool[j]);
1.1338 + if (poolflags[j] & EShPoolAutoMapBuf)
1.1339 + {
1.1340 + if (bufferwindow[j] == 0)
1.1341 + {
1.1342 + // Can't ask for a mapped buffer when buffer window is not set
1.1343 + test_Equal(KErrNoMemory, r);
1.1344 + }
1.1345 + else
1.1346 + {
1.1347 + // Alloc'd buffer was mapped - unmap it
1.1348 + test_KErrNone(r);
1.1349 + r = buf[i][j].UnMap();
1.1350 + test_KErrNone(r);
1.1351 + }
1.1352 + }
1.1353 + else
1.1354 + {
1.1355 + // Buffer not mapped
1.1356 + test_KErrNone(r);
1.1357 + }
1.1358 + break;
1.1359 +
1.1360 + // Read-Only buffers
1.1361 + case 3:
1.1362 + r = buf[i][j].Alloc(pool[j], EShPoolAllocNoMap);
1.1363 + test_KErrNone(r);
1.1364 + r = buf[i][j].Map(ETrue);
1.1365 + if (bufferwindow[j])
1.1366 + {
1.1367 + test_KErrNone(r);
1.1368 + }
1.1369 + else
1.1370 + {
1.1371 + test_Equal(KErrNoMemory, r);
1.1372 + }
1.1373 + break;
1.1374 + case 4:
1.1375 + r = buf[i][j].Alloc(pool[j]);
1.1376 + if (poolflags[j] & EShPoolAutoMapBuf)
1.1377 + {
1.1378 + if (bufferwindow[j] == 0)
1.1379 + {
1.1380 + // Can't ask for a mapped buffer when buffer window is not set
1.1381 + test_Equal(KErrNoMemory, r);
1.1382 + }
1.1383 + else if (poolflags[j] & EShPoolWriteable)
1.1384 + {
1.1385 + // Alloc'd buffer was mapped R/W - re-map it R/O
1.1386 + test_KErrNone(r);
1.1387 + r = buf[i][j].UnMap();
1.1388 + test_KErrNone(r);
1.1389 + r = buf[i][j].Map(ETrue);
1.1390 + test_KErrNone(r);
1.1391 + }
1.1392 + else
1.1393 + {
1.1394 + // Nothing to do
1.1395 + test_KErrNone(r);
1.1396 + }
1.1397 + }
1.1398 + else
1.1399 + {
1.1400 + // Buffer not mapped
1.1401 + test_KErrNone(r);
1.1402 + if (bufferwindow[j])
1.1403 + {
1.1404 + if (poolflags[j] & EShPoolWriteable)
1.1405 + {
1.1406 + // Explicitly map Read-Only
1.1407 + r = buf[i][j].Map(ETrue);
1.1408 + test_KErrNone(r);
1.1409 + }
1.1410 + else
1.1411 + {
1.1412 + // If Pool is RO, map default
1.1413 + r = buf[i][j].Map();
1.1414 + test_KErrNone(r);
1.1415 + }
1.1416 + }
1.1417 + else
1.1418 + {
1.1419 + // Can't map buffer
1.1420 + r = buf[i][j].Map(ETrue);
1.1421 + test_Equal(KErrNoMemory, r);
1.1422 + }
1.1423 + }
1.1424 + break;
1.1425 +
1.1426 + // Mapped for Read-Write
1.1427 + case 5:
1.1428 + r = buf[i][j].Alloc(pool[j], EShPoolAllocNoMap);
1.1429 + test_KErrNone(r);
1.1430 + r = buf[i][j].Map();
1.1431 + if (bufferwindow[j] == 0)
1.1432 + {
1.1433 + test_Equal(KErrNoMemory, r);
1.1434 + }
1.1435 + else if (!(poolflags[j] & EShPoolWriteable))
1.1436 + {
1.1437 + test_KErrNone(r);
1.1438 + }
1.1439 + else
1.1440 + {
1.1441 + test_KErrNone(r);
1.1442 + }
1.1443 + break;
1.1444 + case 6:
1.1445 + case 7:
1.1446 + r = buf[i][j].Alloc(pool[j]);
1.1447 + if (poolflags[j] & EShPoolAutoMapBuf)
1.1448 + {
1.1449 + if (bufferwindow[j] == 0)
1.1450 + {
1.1451 + // Can't ask for a mapped buffer when buffer window is not set
1.1452 + test_Equal(KErrNoMemory, r);
1.1453 + }
1.1454 + else if (poolflags[j] & EShPoolWriteable)
1.1455 + {
1.1456 + // Alloc'd buffer was mapped R/W
1.1457 + test_KErrNone(r);
1.1458 +
1.1459 + if((i % KTestBufferMappingTypes) == 7)
1.1460 + {
1.1461 + // Mapped for Read-Write then remapped as Read-Only
1.1462 + r = buf[i][j].Map(true);
1.1463 + test_Equal(KErrAlreadyExists, r);
1.1464 + }
1.1465 + }
1.1466 + }
1.1467 + else
1.1468 + {
1.1469 + // Buffer not mapped
1.1470 + test_KErrNone(r);
1.1471 + if (bufferwindow[j])
1.1472 + {
1.1473 + if (poolflags[j] & EShPoolWriteable)
1.1474 + {
1.1475 + // Default mapping
1.1476 + r = buf[i][j].Map();
1.1477 + test_KErrNone(r);
1.1478 +
1.1479 + if((i % KTestBufferMappingTypes) == 7)
1.1480 + {
1.1481 + // Mapped for Read-Write then remapped as Read-Only
1.1482 + r = buf[i][j].Map(true);
1.1483 + test_Equal(KErrAlreadyExists, r);
1.1484 + }
1.1485 + }
1.1486 + }
1.1487 + else
1.1488 + {
1.1489 + // Can't map buffer
1.1490 + r = buf[i][j].Map(ETrue);
1.1491 + test_Equal(KErrNoMemory, r);
1.1492 + }
1.1493 + }
1.1494 + break;
1.1495 +
1.1496 + default: test(EFalse);
1.1497 + }
1.1498 + test.Printf(_L("."));
1.1499 + }
1.1500 + }
1.1501 + test.Printf(_L("\n"));
1.1502 +
1.1503 + // Read and write tests
1.1504 + _LIT(KTestThreadName, "BufferMappingBuf%d(Test%d)");
1.1505 + test.Printf(_L("Read & Write tests\n"));
1.1506 + for (j = 0; j < KTestBufferMappingPoolTypes; j++)
1.1507 + {
1.1508 + for (i = 0; i < KTestBufferMappingTypes; i++)
1.1509 + {
1.1510 + if (buf[i][j].Handle())
1.1511 + {
1.1512 + switch (i % KTestBufferMappingTypes)
1.1513 + {
1.1514 + case 1:
1.1515 + case 2:
1.1516 + // Buffer not mapped - Read should fail
1.1517 + if (buf[i][j].Ptr() == NULL)
1.1518 + {
1.1519 + RThread thread;
1.1520 + TRequestStatus threadrs;
1.1521 + TBuf<40> threadname;
1.1522 + threadname.Format(KTestThreadName, i, (i % KTestBufferMappingTypes) + 1);
1.1523 + r = thread.Create(threadname, ThreadBufferMappingRead, KDefaultStackSize, KMinHeapSize, KMinHeapSize, (TAny*) &buf[i][j]);
1.1524 + test_KErrNone(r);
1.1525 + thread.Logon(threadrs);
1.1526 + thread.Resume();
1.1527 + User::WaitForRequest(threadrs);
1.1528 + test_Equal(3, threadrs.Int());
1.1529 + test_Equal(EExitPanic, thread.ExitType());
1.1530 + test_Equal(3, thread.ExitReason()); // KERN-EXEC 3
1.1531 + CLOSE_AND_WAIT(thread);
1.1532 + // Map buffer read-only for next test
1.1533 + r = buf[i][j].Map(ETrue);
1.1534 + if (bufferwindow[j])
1.1535 + {
1.1536 + test_KErrNone(r);
1.1537 + }
1.1538 + else
1.1539 + {
1.1540 + test_Equal(KErrNoMemory, r);
1.1541 + }
1.1542 + }
1.1543 + case 3:
1.1544 + case 4:
1.1545 + // Buffer mapped for R/O access - Read should not fail
1.1546 + if (bufferwindow[j] == 0)
1.1547 + {
1.1548 + break;
1.1549 + }
1.1550 + else
1.1551 + {
1.1552 + RThread thread;
1.1553 + TRequestStatus threadrs;
1.1554 + TBuf<40> threadname;
1.1555 + threadname.Format(KTestThreadName, i, (i % KTestBufferMappingTypes) + 1);
1.1556 + r = thread.Create(threadname, ThreadBufferMappingRead, KDefaultStackSize, KMinHeapSize, KMinHeapSize, (TAny*) &buf[i][j]);
1.1557 + test_KErrNone(r);
1.1558 + thread.Logon(threadrs);
1.1559 + thread.Resume();
1.1560 + User::WaitForRequest(threadrs);
1.1561 + test_KErrNone(threadrs.Int());
1.1562 + test_Equal(EExitKill, thread.ExitType());
1.1563 + test_KErrNone(thread.ExitReason());
1.1564 + CLOSE_AND_WAIT(thread);
1.1565 + }
1.1566 + // Write should fail
1.1567 + if (buf[i][j].Ptr())
1.1568 + {
1.1569 + RThread thread;
1.1570 + TRequestStatus threadrs;
1.1571 + TBuf<40> threadname;
1.1572 + threadname.Format(KTestThreadName, i, (i % KTestBufferMappingTypes) + 2);
1.1573 + r = thread.Create(threadname, ThreadBufferMappingWrite, KDefaultStackSize, KMinHeapSize, KMinHeapSize,(TAny*) &buf[i][j]);
1.1574 + test_KErrNone(r);
1.1575 + thread.Logon(threadrs);
1.1576 + thread.Resume();
1.1577 + User::WaitForRequest(threadrs);
1.1578 + test_Equal(3, threadrs.Int());
1.1579 + test_Equal(EExitPanic, thread.ExitType());
1.1580 + test_Equal(3, thread.ExitReason()); // KERN-EXEC 3
1.1581 + CLOSE_AND_WAIT(thread);
1.1582 + // Map buffer read-write for next test
1.1583 + r = buf[i][j].UnMap();
1.1584 + if(r != KErrNotFound)
1.1585 + {
1.1586 + test_KErrNone(r);
1.1587 + }
1.1588 + r = buf[i][j].Map();
1.1589 + test_KErrNone(r);
1.1590 + }
1.1591 + case 5:
1.1592 + case 6:
1.1593 + // Buffer mapped for R/W access - Write should not fail
1.1594 + if (bufferwindow[j] == 0 || !(poolflags[j] & EShPoolWriteable))
1.1595 + {
1.1596 + break;
1.1597 + }
1.1598 + else
1.1599 + {
1.1600 + RThread thread;
1.1601 + TRequestStatus threadrs;
1.1602 + TBuf<40> threadname;
1.1603 + threadname.Format(KTestThreadName, i, (i % KTestBufferMappingTypes) + 1);
1.1604 + r = thread.Create(threadname, ThreadBufferMappingWrite, KDefaultStackSize, KMinHeapSize, KMinHeapSize,(TAny*) &buf[i][j]);
1.1605 + test_KErrNone(r);
1.1606 + thread.Logon(threadrs);
1.1607 + thread.Resume();
1.1608 + User::WaitForRequest(threadrs);
1.1609 + test_KErrNone(threadrs.Int());
1.1610 + test_Equal(EExitKill, thread.ExitType());
1.1611 + test_KErrNone(thread.ExitReason());
1.1612 + CLOSE_AND_WAIT(thread);
1.1613 + // Unmap buffer for next test
1.1614 + r = buf[i][j].UnMap();
1.1615 + test_KErrNone(r);
1.1616 + }
1.1617 + // Buffer not mapped - Read should fail
1.1618 + if (buf[i][j].Ptr())
1.1619 + {
1.1620 + RThread thread;
1.1621 + TRequestStatus threadrs;
1.1622 + TBuf<40> threadname;
1.1623 + threadname.Format(KTestThreadName, i, (i % KTestBufferMappingTypes) + 2);
1.1624 + r = thread.Create(threadname, ThreadBufferMappingRead, KDefaultStackSize, KMinHeapSize, KMinHeapSize,(TAny*) &buf[i][j]);
1.1625 + test_KErrNone(r);
1.1626 + thread.Logon(threadrs);
1.1627 + thread.Resume();
1.1628 + User::WaitForRequest(threadrs);
1.1629 + test_Equal(3, threadrs.Int());
1.1630 + test_Equal(EExitPanic, thread.ExitType());
1.1631 + test_Equal(3, thread.ExitReason()); // KERN-EXEC 3
1.1632 + CLOSE_AND_WAIT(thread);
1.1633 + }
1.1634 + }
1.1635 + }
1.1636 + buf[i][j].Close();
1.1637 + test.Printf(_L("."));
1.1638 + }
1.1639 + pool[j].Close();
1.1640 + test.Printf(_L("\n"));
1.1641 + }
1.1642 +#endif
1.1643 + }
1.1644 +
1.1645 +void BufferWindow()
1.1646 + {
1.1647 + test.Next(_L("Buffer Window tests"));
1.1648 +#ifdef __WINS__
1.1649 + test.Printf(_L("Does not run on the emulator. Skipped\n"));
1.1650 +#else
1.1651 + TInt r;
1.1652 + RShPool pool;
1.1653 + RShBuf buf[KTestPoolSizeInBufs * 2 + 1];
1.1654 + TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, KTestPoolSizeInBufs * 2);
1.1655 + r = pool.Create(inf, KDefaultPoolHandleFlags);
1.1656 + test_KErrNone(r);
1.1657 +
1.1658 + // Allocate buffer but don't map them to this process memory
1.1659 + TInt i;
1.1660 + for (i = 0; i < KTestPoolSizeInBufs * 2; i++)
1.1661 + {
1.1662 + r = buf[i].Alloc(pool, EShPoolAllocNoMap);
1.1663 + test_KErrNone(r);
1.1664 + }
1.1665 +
1.1666 + // Pool is full
1.1667 + r = buf[KTestPoolSizeInBufs * 2].Alloc(pool, EShPoolAllocNoMap);
1.1668 + test_Equal(KErrNoMemory, r);
1.1669 + r = buf[0].Map();
1.1670 + test_Equal(KErrNoMemory, r);
1.1671 +
1.1672 + // Open a one-buffer window
1.1673 + r = pool.SetBufferWindow(1, ETrue);
1.1674 + test_KErrNone(r);
1.1675 + r = buf[0].Map();
1.1676 + test_KErrNone(r);
1.1677 + TPtr8 ptr0(buf[0].Ptr(), buf[0].Size(),buf[0].Size());
1.1678 + ptr0.Fill('>');
1.1679 + r = buf[1].Map();
1.1680 + test_Equal(KErrNoMemory, r);
1.1681 + r = buf[0].UnMap();
1.1682 + test_KErrNone(r);
1.1683 + r = buf[1].Map();
1.1684 + test_KErrNone(r);
1.1685 + TPtr8 ptr1(buf[0].Ptr(), buf[0].Size(),buf[0].Size());
1.1686 + ptr1.Fill('<');
1.1687 + r = buf[2].Map();
1.1688 + test_Equal(KErrNoMemory, r);
1.1689 +
1.1690 + // Enlarge window by one buffer
1.1691 + r = pool.SetBufferWindow(2, ETrue);
1.1692 + test_Equal(KErrAlreadyExists, r);
1.1693 +
1.1694 + // Close All buffers
1.1695 + for (i = 0; i < KTestPoolSizeInBufs * 2; i++)
1.1696 + {
1.1697 + buf[i].Close();
1.1698 + }
1.1699 +
1.1700 + pool.Close();
1.1701 + r = pool.Create(inf, KDefaultPoolHandleFlags);
1.1702 + test_KErrNone(r);
1.1703 +
1.1704 + r = pool.SetBufferWindow(KTestPoolSizeInBufs, ETrue); // Half the pool size
1.1705 + test_KErrNone(r);
1.1706 + for (i = 0; i < KTestPoolSizeInBufs * 2 - 1; i++)
1.1707 + {
1.1708 + if (i < KTestPoolSizeInBufs)
1.1709 + {
1.1710 + r = buf[i].Alloc(pool, 0);
1.1711 + test_KErrNone(r);
1.1712 + TPtr8 ptr(buf[0].Ptr(), buf[0].Size(),buf[0].Size());
1.1713 + ptr.Fill('?');
1.1714 + }
1.1715 + else
1.1716 + {
1.1717 + r = buf[i].Alloc(pool, EShPoolAllocNoMap);
1.1718 + test_KErrNone(r);
1.1719 + }
1.1720 + }
1.1721 + r = buf[KTestPoolSizeInBufs * 2].Alloc(pool, 0);
1.1722 + test_Equal(KErrNoMemory, r);
1.1723 + r = buf[KTestPoolSizeInBufs].Map();
1.1724 + test_Equal(KErrNoMemory, r);
1.1725 + r = buf[KTestPoolSizeInBufs * 2].Alloc(pool, EShPoolAllocNoMap);
1.1726 + test_KErrNone(r);
1.1727 +
1.1728 + // That's it
1.1729 + for (i = 0; i < (KTestPoolSizeInBufs * 2) + 1; i++)
1.1730 + {
1.1731 + buf[i].Close();
1.1732 + }
1.1733 + pool.Close();
1.1734 +
1.1735 + // Try again with automap set to false
1.1736 + RShPool pool2;
1.1737 + r = pool2.Create(inf, KDefaultPoolHandleFlags);
1.1738 + test_KErrNone(r);
1.1739 + for (i = 0; i < KTestPoolSizeInBufs * 2; i++)
1.1740 + {
1.1741 + r = buf[i].Alloc(pool2, 0);
1.1742 + test_KErrNone(r);
1.1743 + }
1.1744 + r = pool2.SetBufferWindow(-1, EFalse);
1.1745 + test_KErrNone(r);
1.1746 + for (i = 0; i < KTestPoolSizeInBufs * 2; i++)
1.1747 + {
1.1748 + r = buf[i].Map(ETrue);
1.1749 + test_KErrNone(r);
1.1750 + }
1.1751 + for (i = 0; i < KTestPoolSizeInBufs * 2; i++)
1.1752 + {
1.1753 + buf[i].Close();
1.1754 + }
1.1755 + pool2.Close();
1.1756 +#endif
1.1757 + }
1.1758 +
1.1759 +/*
1.1760 +@SYMTestCaseID 7
1.1761 +@SYMTestCaseDesc Trigger notifications
1.1762 +@SYMREQ REQ11423
1.1763 +@SYMTestActions
1.1764 + Set Low Space Notifications on various thresholds.
1.1765 + In a separate thread, keep allocating buffers.
1.1766 +@SYMTestExpectedResults
1.1767 + Notifications are completed when their respective levels are reached.
1.1768 +@SYMTestPriority Medium
1.1769 +*/
1.1770 +
1.1771 +TInt ThreadNotifications(TAny* aArg)
1.1772 + {
1.1773 + if (!aArg)
1.1774 + {
1.1775 + return KErrArgument;
1.1776 + }
1.1777 + RShPool* pool = (RShPool*) aArg;
1.1778 + RArray<RShBuf> bufarray;
1.1779 + TInt r;
1.1780 + RSemaphore sem;
1.1781 + r = sem.OpenGlobal(KTestLowSpaceSemaphore);
1.1782 + if (r)
1.1783 + {
1.1784 + RDebug::Printf("Line %d: r=%d", __LINE__, r);
1.1785 + return r;
1.1786 + }
1.1787 + // Start allocating buffers
1.1788 + while (pool->FreeCount() > 1)
1.1789 + {
1.1790 + RShBuf buf;
1.1791 + r = buf.Alloc(*pool);
1.1792 + if (r)
1.1793 + {
1.1794 + RDebug::Printf("Line %d: count=%d r=%d", __LINE__, bufarray.Count(), r);
1.1795 + return r;
1.1796 + }
1.1797 + bufarray.Append(buf);
1.1798 + if ((bufarray.Count() == 1) // wait for low3
1.1799 + || (bufarray.Count() == KTestPoolSizeInBufs - 2) // wait for low2
1.1800 + || (bufarray.Count() == KTestPoolSizeInBufs - 1)) // wait for low1/low4
1.1801 + {
1.1802 + r = sem.Wait(5000000); // 5 second timeout
1.1803 + if (r)
1.1804 + {
1.1805 + RDebug::Printf("Line %d: count=%d r=%d", __LINE__, bufarray.Count(), r);
1.1806 + return r;
1.1807 + }
1.1808 + }
1.1809 + }
1.1810 +
1.1811 + // Free all buffers
1.1812 + while (bufarray.Count())
1.1813 + {
1.1814 + bufarray[0].Close();
1.1815 + bufarray.Remove(0);
1.1816 + if ((bufarray.Count() == KTestPoolSizeInBufs - 2) // wait for free3
1.1817 + || (bufarray.Count() == 1) // wait for free2
1.1818 + || (bufarray.Count() == 0)) // wait for free1/free4
1.1819 + {
1.1820 + r = sem.Wait(5000000); // 5 second timeout
1.1821 + if (r)
1.1822 + {
1.1823 + RDebug::Printf("Line %d: count=%d r=%d", __LINE__, bufarray.Count(), r);
1.1824 + return r;
1.1825 + }
1.1826 + }
1.1827 + }
1.1828 + bufarray.Close();
1.1829 + sem.Close();
1.1830 + return KErrNone;
1.1831 + }
1.1832 +
1.1833 +enum TTestLowSpaceType
1.1834 + {
1.1835 + ETestCancelNonExistent,
1.1836 + ETestCancelTwice
1.1837 + };
1.1838 +
1.1839 +struct TTestThreadLowSpacePanicArgs
1.1840 + {
1.1841 + RShPool* iPool;
1.1842 + TUint iThreshold1;
1.1843 + TUint iThreshold2;
1.1844 + TTestLowSpaceType iType;
1.1845 + };
1.1846 +
1.1847 +TInt ThreadLowSpacePanic(TAny* aArg)
1.1848 + {
1.1849 + if (!aArg)
1.1850 + {
1.1851 + return KErrArgument;
1.1852 + }
1.1853 + TTestThreadLowSpacePanicArgs& targs = *(TTestThreadLowSpacePanicArgs*) aArg;
1.1854 + TRequestStatus rs;
1.1855 + if (targs.iType == ETestCancelNonExistent)
1.1856 + {
1.1857 + targs.iPool->CancelLowSpaceNotification(rs); // should panic
1.1858 + }
1.1859 + else if (targs.iType == ETestCancelTwice)
1.1860 + {
1.1861 + targs.iPool->RequestLowSpaceNotification(targs.iThreshold1, rs);
1.1862 + targs.iPool->CancelLowSpaceNotification(rs);
1.1863 + targs.iPool->CancelLowSpaceNotification(rs); // should panic
1.1864 + }
1.1865 + else
1.1866 + {
1.1867 + return KErrArgument;
1.1868 + }
1.1869 + return KErrNone;
1.1870 + }
1.1871 +
1.1872 +/*
1.1873 + * CancelLowSpaceNotification() no longer panic()s if it can't find the
1.1874 + * notification, so this routine not currently called.
1.1875 + */
1.1876 +void RequestLowSpacePanic(RShPool& aPool, TUint aThreshold1, TUint aThreshold2, TTestLowSpaceType aType, TInt aLine)
1.1877 + {
1.1878 + static TInt count = 0;
1.1879 + count++;
1.1880 + test.Printf(_L("RequestLowSpacePanic@%d(%d)\n"), aLine, count);
1.1881 + TBool jit = User::JustInTime();
1.1882 + User::SetJustInTime(EFalse);
1.1883 + TInt expectedpaniccode = KErrNone; // Initialised to silence compiler warnings
1.1884 + switch (aType)
1.1885 + {
1.1886 + case ETestCancelNonExistent:
1.1887 + case ETestCancelTwice:
1.1888 + expectedpaniccode = KErrNotFound;
1.1889 + break;
1.1890 + default:
1.1891 + test(EFalse);
1.1892 + }
1.1893 + //
1.1894 + TTestThreadLowSpacePanicArgs targs;
1.1895 + targs.iPool = &aPool;
1.1896 + targs.iThreshold1 = aThreshold1;
1.1897 + targs.iThreshold2 = aThreshold2;
1.1898 + targs.iType = aType;
1.1899 + //
1.1900 + RThread threadpanic;
1.1901 + TRequestStatus threadpanicrs;
1.1902 + TInt r;
1.1903 + TBuf<30> threadname;
1.1904 + threadname.Format(_L("ThreadLowSpacePanic%d"), count);
1.1905 + r = threadpanic.Create(threadname, ThreadLowSpacePanic, KDefaultStackSize, KMinHeapSize, 1 << 20, (TAny*) &targs);
1.1906 + test_KErrNone(r);
1.1907 + threadpanic.Logon(threadpanicrs);
1.1908 + threadpanic.Resume();
1.1909 + User::WaitForRequest(threadpanicrs);
1.1910 + //
1.1911 + test_Equal(expectedpaniccode, threadpanicrs.Int());
1.1912 + test_Equal(EExitPanic, threadpanic.ExitType());
1.1913 + test_Equal(expectedpaniccode, threadpanic.ExitReason());
1.1914 + threadpanic.Close();
1.1915 + User::SetJustInTime(jit);
1.1916 + }
1.1917 +
1.1918 +void NotificationRequests(RShPool& aPool)
1.1919 + {
1.1920 + test.Next(_L("Notifications"));
1.1921 + TInt r;
1.1922 +
1.1923 + RSemaphore sem;
1.1924 + r = sem.CreateGlobal(KTestLowSpaceSemaphore, 0);
1.1925 + test_KErrNone(r);
1.1926 + RTimer timer;
1.1927 + r = timer.CreateLocal();
1.1928 + test_KErrNone(r);
1.1929 + RThread thread;
1.1930 + TRequestStatus threadrs;
1.1931 + r = thread.Create(_L("ThreadNotifications"), ThreadNotifications, KDefaultStackSize, KMinHeapSize, 1 << 20, (TAny*) &aPool);
1.1932 + test_KErrNone(r);
1.1933 + thread.SetPriority(EPriorityMore);
1.1934 + thread.Logon(threadrs);
1.1935 +
1.1936 + test.Printf(_L("Low space notification\n"));
1.1937 + TRequestStatus low1;
1.1938 + TRequestStatus low2;
1.1939 + TRequestStatus low3;
1.1940 + TRequestStatus low4;
1.1941 + TRequestStatus low5;
1.1942 + TRequestStatus low6;
1.1943 + aPool.RequestLowSpaceNotification(1, low1);
1.1944 + test_Equal(KRequestPending, low1.Int());
1.1945 + aPool.RequestLowSpaceNotification(2, low2);
1.1946 + test_Equal(KRequestPending, low2.Int());
1.1947 + aPool.RequestLowSpaceNotification(aPool.FreeCount() - 1, low3);
1.1948 + test_Equal(KRequestPending, low3.Int());
1.1949 + aPool.RequestLowSpaceNotification(1, low4);
1.1950 + test_Equal(KRequestPending, low4.Int());
1.1951 + aPool.RequestLowSpaceNotification(0, low5); // Never completes
1.1952 + test_Equal(KRequestPending, low5.Int());
1.1953 + aPool.RequestLowSpaceNotification(KMaxTUint, low6); // Completes instantly
1.1954 + TRequestStatus timeoutlow;
1.1955 + timer.After(timeoutlow, 5000000); // 5 seconds time out
1.1956 + User::WaitForRequest(low6, timeoutlow);
1.1957 + test_KErrNone(low6.Int());
1.1958 + test_Equal(KRequestPending, low1.Int());
1.1959 + test_Equal(KRequestPending, low2.Int());
1.1960 + test_Equal(KRequestPending, low3.Int());
1.1961 + test_Equal(KRequestPending, low4.Int());
1.1962 + test_Equal(KRequestPending, low5.Int());
1.1963 + timer.Cancel();
1.1964 + User::WaitForRequest(timeoutlow);
1.1965 + thread.Resume();
1.1966 + User::WaitForRequest(low3, threadrs);
1.1967 + test_KErrNone(low3.Int());
1.1968 + test_Equal(KRequestPending, low1.Int());
1.1969 + test_Equal(KRequestPending, low2.Int());
1.1970 + test_Equal(KRequestPending, low4.Int());
1.1971 + test_Equal(KRequestPending, low5.Int());
1.1972 + sem.Signal();
1.1973 + User::WaitForRequest(low2, threadrs);
1.1974 + test_KErrNone(low2.Int())
1.1975 + test_Equal(KRequestPending, low1.Int());
1.1976 + test_Equal(KRequestPending, low4.Int());
1.1977 + test_Equal(KRequestPending, low5.Int());
1.1978 + sem.Signal();
1.1979 + User::WaitForRequest(low1, threadrs);
1.1980 + test_KErrNone(low1.Int());
1.1981 + User::WaitForRequest(low4, threadrs);
1.1982 + test_KErrNone(low4.Int());
1.1983 + test_Equal(KRequestPending, low5.Int());
1.1984 + test_Equal(EExitPending, thread.ExitType()); // Thread is still running
1.1985 + test_Compare(aPool.FreeCount(), <=, 1);
1.1986 +
1.1987 + test.Printf(_L("Free space notification\n"));
1.1988 + TRequestStatus free1;
1.1989 + TRequestStatus free2;
1.1990 + TRequestStatus free3;
1.1991 + TRequestStatus free4;
1.1992 + TRequestStatus free5;
1.1993 + TRequestStatus free6;
1.1994 + aPool.RequestFreeSpaceNotification(KTestPoolSizeInBufs, free1);
1.1995 + test_Equal(KRequestPending, free1.Int());
1.1996 + aPool.RequestFreeSpaceNotification(KTestPoolSizeInBufs - 1, free2);
1.1997 + test_Equal(KRequestPending, free2.Int());
1.1998 + aPool.RequestFreeSpaceNotification(aPool.FreeCount() + 1, free3);
1.1999 + test_Equal(KRequestPending, free3.Int());
1.2000 + aPool.RequestFreeSpaceNotification(KTestPoolSizeInBufs, free4);
1.2001 + test_Equal(KRequestPending, free4.Int());
1.2002 + aPool.RequestFreeSpaceNotification(KTestPoolSizeInBufs + 1, free5); // Never completes
1.2003 + test_Equal(KRequestPending, free5.Int());
1.2004 + aPool.RequestFreeSpaceNotification(0, free6); // Completes instantly
1.2005 +
1.2006 + TRequestStatus timeoutfree;
1.2007 + timer.After(timeoutfree, 5000000); // 5 seconds time out
1.2008 + User::WaitForRequest(free6, timeoutfree);
1.2009 + test_KErrNone(free6.Int());
1.2010 +
1.2011 + test_Equal(KRequestPending, free1.Int());
1.2012 + test_Equal(KRequestPending, free2.Int());
1.2013 + test_Equal(KRequestPending, free3.Int());
1.2014 + test_Equal(KRequestPending, free4.Int());
1.2015 + test_Equal(KRequestPending, free5.Int());
1.2016 +
1.2017 + timer.Cancel();
1.2018 + User::WaitForRequest(timeoutfree);
1.2019 +
1.2020 + sem.Signal(); // resume thread execution
1.2021 + User::WaitForRequest(free3, threadrs);
1.2022 + test_KErrNone(free3.Int());
1.2023 + test_Equal(KRequestPending, free1.Int());
1.2024 + test_Equal(KRequestPending, free2.Int());
1.2025 + test_Equal(KRequestPending, free4.Int());
1.2026 + test_Equal(KRequestPending, free5.Int());
1.2027 +
1.2028 + sem.Signal();
1.2029 + User::WaitForRequest(free2, threadrs);
1.2030 + test_KErrNone(free2.Int())
1.2031 +
1.2032 + test_Equal(KRequestPending, free1.Int());
1.2033 + test_Equal(KRequestPending, free4.Int());
1.2034 + test_Equal(KRequestPending, free5.Int());
1.2035 + sem.Signal();
1.2036 +
1.2037 + User::WaitForRequest(free1, threadrs);
1.2038 + test_KErrNone(free1.Int());
1.2039 + test_KErrNone(free4.Int());
1.2040 +
1.2041 + test_Equal(KRequestPending, free5.Int());
1.2042 + test_Equal(EExitPending, thread.ExitType()); // Thread is still running
1.2043 +
1.2044 + test_Compare(aPool.FreeCount(), >=, KTestPoolSizeInBufs);
1.2045 +
1.2046 + // Complete the requests still pending...
1.2047 + aPool.CancelLowSpaceNotification(low5);
1.2048 + User::WaitForRequest(low5);
1.2049 +
1.2050 + aPool.CancelFreeSpaceNotification(free5);
1.2051 + User::WaitForRequest(free5);
1.2052 +
1.2053 + // Let thread complete
1.2054 + sem.Signal();
1.2055 + User::WaitForRequest(threadrs);
1.2056 + test_Equal(EExitKill, thread.ExitType());
1.2057 + test_KErrNone(thread.ExitReason());
1.2058 + thread.Close();
1.2059 + sem.Close();
1.2060 + timer.Close();
1.2061 + }
1.2062 +
1.2063 +/*
1.2064 +@SYMTestCaseID 9
1.2065 +@SYMTestCaseDesc Cancel low- and free-space notifications
1.2066 +@SYMREQ REQ11423
1.2067 +@SYMTestActions
1.2068 + Set Low/High LowSpace Notifications.
1.2069 + Cancel them.
1.2070 +@SYMTestExpectedResults
1.2071 + All OK.
1.2072 +@SYMTestPriority Medium
1.2073 +*/
1.2074 +
1.2075 +void CancelNotificationRequests(RShPool& aPool)
1.2076 + {
1.2077 + test.Next(_L("Cancel notifications"));
1.2078 + TInt r;
1.2079 +
1.2080 + RSemaphore sem;
1.2081 + r = sem.CreateGlobal(KTestLowSpaceSemaphore, 0);
1.2082 + test_KErrNone(r);
1.2083 + RThread thread;
1.2084 + TRequestStatus threadrs;
1.2085 + r = thread.Create(_L("ThreadCancelNotifications"), ThreadNotifications, KDefaultStackSize, KMinHeapSize, 1 << 20, (TAny*) &aPool);
1.2086 + test_KErrNone(r);
1.2087 + thread.SetPriority(EPriorityLess);
1.2088 + thread.Logon(threadrs);
1.2089 +
1.2090 + test.Printf(_L("Cancel low space notifications\n"));
1.2091 + // Low space notification cancel
1.2092 + TRequestStatus low;
1.2093 + aPool.RequestLowSpaceNotification(1, low);
1.2094 + aPool.CancelLowSpaceNotification(low);
1.2095 + test_Equal(KErrCancel, low.Int());
1.2096 + // We should be able to cancel again without panic()ing
1.2097 + // (no guarantees on return code; maybe Cancel() should have void return type?)
1.2098 + aPool.CancelLowSpaceNotification(low);
1.2099 + test.Printf(_L("Second cancel returned %d\n"), low.Int());
1.2100 + TRequestStatus low2;
1.2101 + aPool.RequestLowSpaceNotification(1, low2); // For thread sync
1.2102 + thread.Resume();
1.2103 + sem.Signal(2);
1.2104 + User::WaitForRequest(low2, threadrs);
1.2105 + test_KErrNone(low2.Int());
1.2106 + test_Equal(EExitPending, thread.ExitType()); // Thread is still running
1.2107 + test_Compare(aPool.FreeCount(), <=, 1);
1.2108 +
1.2109 + test.Printf(_L("Cancel free space notifications\n"));
1.2110 + TRequestStatus free;
1.2111 + aPool.CancelFreeSpaceNotification(free); // Cancel non-existant notification
1.2112 + aPool.RequestFreeSpaceNotification(KTestPoolSizeInBufs, free);
1.2113 + aPool.CancelLowSpaceNotification(free); // Use wrong method
1.2114 + aPool.CancelFreeSpaceNotification(free); // Use wrong method
1.2115 + test_Equal(KErrCancel, free.Int());
1.2116 + aPool.CancelFreeSpaceNotification(free); // Already cancelled
1.2117 +
1.2118 + // Complete the requests still pending...
1.2119 + User::WaitForRequest(low);
1.2120 +
1.2121 + sem.Signal(4); // Resume thread execution and let it complete
1.2122 + User::WaitForRequest(threadrs);
1.2123 + test_KErrNone(threadrs.Int());
1.2124 + test_Equal(EExitKill, thread.ExitType());
1.2125 + test_KErrNone(thread.ExitReason());
1.2126 + test_Compare(aPool.FreeCount(), >=, KTestPoolSizeInBufs);
1.2127 + thread.Close();
1.2128 + sem.Close();
1.2129 + }
1.2130 +
1.2131 +
1.2132 +/*
1.2133 +@SYMTestCaseID 10
1.2134 +@SYMTestCaseDesc Grow and shrink pool
1.2135 +@SYMREQ REQ11423
1.2136 +@SYMTestActions
1.2137 + 1. Test Thread creates pools with various size attributes
1.2138 + 2. Test Thread keeps allocating buffers on pool.
1.2139 + 3. Test Thread keeps freeing buffers on pool
1.2140 + 4. Test Thread frees all buffers and close pool.
1.2141 +@SYMTestExpectedResults
1.2142 + Pools grows and shrink grows as expected.
1.2143 +@SYMTestPriority High
1.2144 +*/
1.2145 +
1.2146 +const TInt KTestFreeCountTimeOut = 20000000; // 20 seconds (of thread inactivity)
1.2147 +const TInt KTestWaitBeforeRetry = 2000; // 0.002 second
1.2148 +
1.2149 +TUint MultFx248(TUint n, TUint f)
1.2150 + {
1.2151 + TUint64 r = (TUint64) n * f;
1.2152 + I64LSR(r, 8);
1.2153 + return r > KMaxTUint32 ? KMaxTUint32 : I64LOW(r);
1.2154 + }
1.2155 +
1.2156 +class TTestPoolModel
1.2157 + {
1.2158 +public:
1.2159 + TTestPoolModel(TShPoolInfo& aInfo);
1.2160 + void Alloc();
1.2161 + void Free();
1.2162 + TUint FreeCount();
1.2163 + void DisplayCounters();
1.2164 +private:
1.2165 + void CalcGSP();
1.2166 + void CheckGrowShrink();
1.2167 + void Grow();
1.2168 + void Shrink();
1.2169 +private:
1.2170 + TUint iAllocated;
1.2171 + TUint iFree;
1.2172 + //
1.2173 + TUint iInitial;
1.2174 + TUint iMax;
1.2175 + TUint iGrowTriggerRatio;
1.2176 + TUint iGrowByRatio;
1.2177 + TUint iShrinkByRatio;
1.2178 + TUint iShrinkHysteresisRatio;
1.2179 + TUint iPoolFlags;
1.2180 + //
1.2181 + TUint iGrowTrigger;
1.2182 + TUint iShrinkTrigger;
1.2183 + //
1.2184 + TBool iDebug;
1.2185 + };
1.2186 +
1.2187 +TTestPoolModel::TTestPoolModel(TShPoolInfo& aInfo)
1.2188 + {
1.2189 + iInitial = aInfo.iInitialBufs;
1.2190 + iMax = aInfo.iMaxBufs;
1.2191 + iGrowTriggerRatio = aInfo.iGrowTriggerRatio;
1.2192 + iGrowByRatio = aInfo.iGrowByRatio;
1.2193 + iShrinkByRatio = 256 - 65536 / (256 + iGrowByRatio);
1.2194 + iShrinkHysteresisRatio = aInfo.iShrinkHysteresisRatio;
1.2195 + iPoolFlags = aInfo.iFlags;
1.2196 + iAllocated = 0;
1.2197 + iFree = iInitial;
1.2198 + iDebug = EFalse; // Set this to ETrue to display detailed information
1.2199 +
1.2200 + CalcGSP();
1.2201 + if (iDebug)
1.2202 + {
1.2203 + test.Printf(_L("A F A+F GT ST \n"));
1.2204 + test.Printf(_L("==============================\n"));
1.2205 + DisplayCounters();
1.2206 + }
1.2207 + }
1.2208 +
1.2209 +void TTestPoolModel::Alloc()
1.2210 + {
1.2211 + iAllocated++;
1.2212 + iFree--;
1.2213 + CheckGrowShrink();
1.2214 + }
1.2215 +
1.2216 +void TTestPoolModel::Free()
1.2217 + {
1.2218 + iAllocated--;
1.2219 + iFree++;
1.2220 + CheckGrowShrink();
1.2221 + }
1.2222 +
1.2223 +TUint TTestPoolModel::FreeCount()
1.2224 + {
1.2225 + return iFree;
1.2226 + }
1.2227 +
1.2228 +void TTestPoolModel::CalcGSP()
1.2229 + {
1.2230 + TUint n = iAllocated + iFree;
1.2231 +
1.2232 + // If the pool is at its maximum size, we can't grow
1.2233 + if (n >= iMax || iGrowTriggerRatio == 0 /*|| iCommittedPages >= iMaxPages*/)
1.2234 + {
1.2235 + iGrowTrigger = 0;
1.2236 + }
1.2237 + else
1.2238 + {
1.2239 + iGrowTrigger = MultFx248(n, iGrowTriggerRatio);
1.2240 +
1.2241 + // Deal with rounding towards zero
1.2242 + if (iGrowTrigger == 0)
1.2243 + iGrowTrigger = 1;
1.2244 + }
1.2245 +
1.2246 + // If no growing has happened, we can't shrink
1.2247 + if (n <= iInitial || iGrowTriggerRatio == 0 || (iPoolFlags & EShPoolSuppressShrink) != 0)
1.2248 + {
1.2249 + iShrinkTrigger = iMax;
1.2250 + }
1.2251 + else
1.2252 + {
1.2253 + // To ensure that shrinking doesn't immediately happen after growing, the trigger
1.2254 + // amount is the grow trigger + the grow amount (which is the number of free buffers
1.2255 + // just after a grow) times the shrink hysteresis value.
1.2256 + iShrinkTrigger = MultFx248(n, iGrowTriggerRatio + iGrowByRatio);
1.2257 + iShrinkTrigger = MultFx248(iShrinkTrigger, iShrinkHysteresisRatio);
1.2258 +
1.2259 + // Deal with rounding towards zero
1.2260 + if (iShrinkTrigger == 0)
1.2261 + iShrinkTrigger = 1;
1.2262 +
1.2263 + // If the shrink trigger ends up > the number of buffers currently in
1.2264 + // the pool, set it to that number (less 1, since the test is "> trigger").
1.2265 + // This means the pool will only shrink when all the buffers have been freed.
1.2266 + if (iShrinkTrigger >= n)
1.2267 + iShrinkTrigger = n - 1;
1.2268 + }
1.2269 + if (iDebug)
1.2270 + {
1.2271 + DisplayCounters();
1.2272 + }
1.2273 + }
1.2274 +
1.2275 +void TTestPoolModel::CheckGrowShrink()
1.2276 + {
1.2277 + if (iFree < iGrowTrigger)
1.2278 + {
1.2279 + Grow();
1.2280 + CheckGrowShrink();
1.2281 + }
1.2282 + if (iFree > iShrinkTrigger)
1.2283 + {
1.2284 + Shrink();
1.2285 + CheckGrowShrink();
1.2286 + }
1.2287 + }
1.2288 +
1.2289 +void TTestPoolModel::Grow()
1.2290 + {
1.2291 + TUint headroom = iMax - (iAllocated + iFree);
1.2292 + TUint growby = MultFx248(iAllocated + iFree, iGrowByRatio);
1.2293 + if (growby == 0) // Handle round-to-zero
1.2294 + growby = 1;
1.2295 + if (growby > headroom)
1.2296 + growby = headroom;
1.2297 + iFree += growby;
1.2298 + if (iDebug)
1.2299 + {
1.2300 + test.Printf(_L("GROW by %d!\n"), growby);
1.2301 + }
1.2302 + CalcGSP();
1.2303 + }
1.2304 +
1.2305 +void TTestPoolModel::Shrink()
1.2306 + {
1.2307 + TUint grownBy = iAllocated + iFree - iInitial;
1.2308 + TUint shrinkby = MultFx248(iAllocated + iFree, iShrinkByRatio);
1.2309 + if (shrinkby == 0) // Handle round-to-zero
1.2310 + shrinkby = 1;
1.2311 + if (shrinkby > grownBy)
1.2312 + shrinkby = grownBy;
1.2313 + if (shrinkby > iFree)
1.2314 + shrinkby = iFree;
1.2315 + iFree -= shrinkby;
1.2316 + if (iDebug)
1.2317 + {
1.2318 + test.Printf(_L("SHRINK by %d!\n"), shrinkby);
1.2319 + }
1.2320 + CalcGSP();
1.2321 + }
1.2322 +
1.2323 +void TTestPoolModel::DisplayCounters()
1.2324 + {
1.2325 + test.Printf(_L("%-6u%-6u%-6u%-6u%-6u\n"), iAllocated, iFree, iAllocated + iFree, iGrowTrigger, iShrinkTrigger);
1.2326 + }
1.2327 +
1.2328 +void PoolGrowingTestRoutine(const TShPoolCreateInfo& aInfo, TUint aBufferFlags = 0)
1.2329 + {
1.2330 + TInt r;
1.2331 + TInt timeout;
1.2332 + RShPool pool;
1.2333 + r = pool.Create(aInfo, KDefaultPoolHandleFlags);
1.2334 + test_KErrNone(r);
1.2335 +
1.2336 + TShPoolInfo info;
1.2337 + pool.GetInfo(info);
1.2338 +
1.2339 + // Only set the buffer window if we're going to map the buffers
1.2340 + if (!(aBufferFlags & EShPoolAllocNoMap) && (info.iFlags & EShPoolPageAlignedBuffer))
1.2341 + {
1.2342 + r = pool.SetBufferWindow(-1, ETrue);
1.2343 + test_KErrNone(r)
1.2344 + }
1.2345 +
1.2346 + TTestPoolModel model(info);
1.2347 + RArray<RShBuf> bufarray;
1.2348 + test_Equal(info.iInitialBufs, pool.FreeCount());
1.2349 +
1.2350 + // Buffer allocation
1.2351 + do
1.2352 + {
1.2353 + timeout = KTestFreeCountTimeOut / KTestWaitBeforeRetry;
1.2354 + while (model.FreeCount() != pool.FreeCount())
1.2355 + {
1.2356 + User::After(KTestWaitBeforeRetry);
1.2357 + test_Assert(--timeout,
1.2358 + test.Printf(_L("Timeout: Free==%u (expected %u)\n"), pool.FreeCount(), model.FreeCount());
1.2359 + model.DisplayCounters();
1.2360 + );
1.2361 + if ((timeout * KTestWaitBeforeRetry) % 1000000 == 0)
1.2362 + {
1.2363 + test.Printf(_L("Time out in %d seconds! (line %d)\n"), timeout * KTestWaitBeforeRetry / 1000000, __LINE__);
1.2364 + }
1.2365 + }
1.2366 + RShBuf buf;
1.2367 + r = buf.Alloc(pool, aBufferFlags);
1.2368 + if (r == KErrNoMemory)
1.2369 + {
1.2370 + // We expect to get a failure when all buffers are allocated
1.2371 + if ((TUint) bufarray.Count() == info.iMaxBufs)
1.2372 + break;
1.2373 + if (!(aBufferFlags & EShPoolAllocCanWait))
1.2374 + {
1.2375 + // Give the Management DFC some time to run, then try allocating again
1.2376 + User::After(1000000); // 1 second
1.2377 + r = buf.Alloc(pool);
1.2378 + if (r)
1.2379 + {
1.2380 + test.Printf(_L("Alloc fail after %d of %d; Free==%u (expected %u)\n"),
1.2381 + bufarray.Count(), info.iMaxBufs, pool.FreeCount(), model.FreeCount());
1.2382 + break;
1.2383 + }
1.2384 + }
1.2385 + }
1.2386 +
1.2387 + if (r == KErrNone)
1.2388 + {
1.2389 + model.Alloc();
1.2390 + if (!(aBufferFlags & EShPoolAllocNoMap))
1.2391 + {
1.2392 + TPtr8 ptr(buf.Ptr(), buf.Size(),buf.Size());
1.2393 + ptr.Fill(bufarray.Count() % 256);
1.2394 + }
1.2395 + bufarray.Append(buf);
1.2396 + }
1.2397 + }
1.2398 + while (r == KErrNone);
1.2399 +
1.2400 + test_Equal(KErrNoMemory, r);
1.2401 + test_Equal(info.iMaxBufs, bufarray.Count());
1.2402 + test_Equal(0, pool.FreeCount());
1.2403 +
1.2404 + // Now free no more than 1/3 of these buffers...
1.2405 + while ((TUint) bufarray.Count() > 2 * info.iMaxBufs / 3)
1.2406 + {
1.2407 + // remove buffers from the back of the array
1.2408 + if (!(aBufferFlags & EShPoolAllocNoMap))
1.2409 + {
1.2410 + TPtr8 ptr(bufarray[bufarray.Count() - 1].Ptr(), bufarray[bufarray.Count() - 1].Size(),bufarray[bufarray.Count() - 1].Size());
1.2411 + ptr.Fill((bufarray.Count() + 1) % 256);
1.2412 + }
1.2413 + bufarray[bufarray.Count() - 1].Close();
1.2414 + bufarray.Remove(bufarray.Count() - 1);
1.2415 + model.Free();
1.2416 +
1.2417 + timeout = KTestFreeCountTimeOut / KTestWaitBeforeRetry;
1.2418 + while (model.FreeCount() != pool.FreeCount())
1.2419 + {
1.2420 + User::After(KTestWaitBeforeRetry);
1.2421 + test_Assert(--timeout,
1.2422 + test.Printf(_L("Timeout: Free==%u (expected %u)\n"), pool.FreeCount(), model.FreeCount());
1.2423 + model.DisplayCounters();
1.2424 + );
1.2425 + if ((timeout * KTestWaitBeforeRetry) % 1000000 == 0)
1.2426 + {
1.2427 + test.Printf(_L("Time out in %d seconds! (line %d)\n"), timeout * KTestWaitBeforeRetry / 1000000, __LINE__);
1.2428 + }
1.2429 + }
1.2430 + }
1.2431 +
1.2432 + // ... and re-allocate them
1.2433 + do
1.2434 + {
1.2435 + timeout = KTestFreeCountTimeOut / KTestWaitBeforeRetry;
1.2436 + while (model.FreeCount() != pool.FreeCount())
1.2437 + {
1.2438 + User::After(KTestWaitBeforeRetry);
1.2439 + test_Assert(--timeout,
1.2440 + test.Printf(_L("Timeout: Free==%u (expected %u)\n"), pool.FreeCount(), model.FreeCount());
1.2441 + model.DisplayCounters();
1.2442 + );
1.2443 + if ((timeout * KTestWaitBeforeRetry) % 1000000 == 0)
1.2444 + {
1.2445 + test.Printf(_L("Time out in %d seconds! (line %d)\n"), timeout * KTestWaitBeforeRetry / 1000000, __LINE__);
1.2446 + }
1.2447 + }
1.2448 + RShBuf buf;
1.2449 + r = buf.Alloc(pool, aBufferFlags);
1.2450 + if (r == KErrNoMemory)
1.2451 + {
1.2452 + // We expect to get a failure when all buffers are allocated
1.2453 + if ((TUint) bufarray.Count() == info.iMaxBufs)
1.2454 + break;
1.2455 + if (!(aBufferFlags & EShPoolAllocCanWait))
1.2456 + {
1.2457 + // Give the Management DFC some time to run, then try allocating again
1.2458 + User::After(1000000); // 1 second
1.2459 + r = buf.Alloc(pool);
1.2460 + if (r)
1.2461 + {
1.2462 + test.Printf(_L("Alloc fail after %d of %d; Free==%u (expected %u)\n"),
1.2463 + bufarray.Count(), info.iMaxBufs, pool.FreeCount(), model.FreeCount());
1.2464 + break;
1.2465 + }
1.2466 + }
1.2467 + }
1.2468 +
1.2469 + if (r == KErrNone)
1.2470 + {
1.2471 + model.Alloc();
1.2472 + if (!(aBufferFlags & EShPoolAllocNoMap))
1.2473 + {
1.2474 + TPtr8 ptr(buf.Ptr(), buf.Size(),buf.Size());
1.2475 + ptr.Fill(bufarray.Count() % 256);
1.2476 + }
1.2477 + bufarray.Append(buf);
1.2478 + }
1.2479 + }
1.2480 + while (r == KErrNone);
1.2481 +
1.2482 + test_Equal(KErrNoMemory, r);
1.2483 + test_Equal(info.iMaxBufs, bufarray.Count());
1.2484 + test_Equal(0, pool.FreeCount());
1.2485 +
1.2486 + // Free all buffers
1.2487 + while (bufarray.Count())
1.2488 + {
1.2489 + // remove buffers from the back of the array
1.2490 + if (!(aBufferFlags & EShPoolAllocNoMap))
1.2491 + {
1.2492 + TPtr8 ptr(bufarray[bufarray.Count() - 1].Ptr(), bufarray[bufarray.Count() - 1].Size(),bufarray[bufarray.Count() - 1].Size());
1.2493 + ptr.Fill((bufarray.Count() + 1) % 256);
1.2494 + }
1.2495 + bufarray[bufarray.Count() - 1].Close();
1.2496 + bufarray.Remove(bufarray.Count() - 1);
1.2497 + model.Free();
1.2498 +
1.2499 + timeout = KTestFreeCountTimeOut / KTestWaitBeforeRetry;
1.2500 + while (model.FreeCount() != pool.FreeCount())
1.2501 + {
1.2502 + User::After(KTestWaitBeforeRetry);
1.2503 + test_Assert(--timeout,
1.2504 + test.Printf(_L("Timeout: Free==%u (expected %u)\n"), pool.FreeCount(), model.FreeCount());
1.2505 + model.DisplayCounters();
1.2506 + );
1.2507 + if ((timeout * KTestWaitBeforeRetry) % 1000000 == 0)
1.2508 + {
1.2509 + test.Printf(_L("Time out in %d seconds! (line %d)\n"), timeout * KTestWaitBeforeRetry / 1000000, __LINE__);
1.2510 + }
1.2511 + }
1.2512 + }
1.2513 +
1.2514 + // Pool should have shrunk back to its initial size
1.2515 + test_Equal(info.iInitialBufs, pool.FreeCount());
1.2516 + bufarray.Close();
1.2517 + pool.Close();
1.2518 + }
1.2519 +
1.2520 +void PoolGrowingUser()
1.2521 + {
1.2522 + test.Next(_L("Pool Growing/Shrinking (User)"));
1.2523 + TInt r;
1.2524 + TInt pagesize;
1.2525 + r = HAL::Get(HAL::EMemoryPageSize, pagesize);
1.2526 + test_KErrNone(r);
1.2527 + // Pool A: Non-page aligned pool (64-byte alignment)
1.2528 + {
1.2529 + TInt alignment = 6;
1.2530 + TInt maxbufs = KTestPoolSizeInBytes / RoundUp(*PtrBufSize, alignment);
1.2531 + if (maxbufs > 32000)
1.2532 + {
1.2533 + maxbufs = 32000;
1.2534 + }
1.2535 + TInt initialbufs = maxbufs / 2;
1.2536 + TInt growtrigger = 32;
1.2537 + TInt growby = 32;
1.2538 + TInt shrinkhys = 288;
1.2539 + test.Printf(_L("POOL A: BufSize=%d InitialBufs=%d MaxBufs=%d GrowTrigger=%d GrowBy=%d ShrinkHys=%d Alignment=%d\n"),
1.2540 + *PtrBufSize, initialbufs, maxbufs, growtrigger, growby, shrinkhys, alignment);
1.2541 + TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, *PtrBufSize, initialbufs, alignment);
1.2542 + r = inf.SetSizingAttributes(maxbufs, growtrigger, growby, shrinkhys);
1.2543 + test_KErrNone(r);
1.2544 + PoolGrowingTestRoutine(inf);
1.2545 + }
1.2546 +
1.2547 + // Pool B: Non-page aligned pool (maximum alignment)
1.2548 + {
1.2549 + TInt alignment = Log2(pagesize);
1.2550 + TInt maxbufs = KTestPoolSizeInBytes / RoundUp(*PtrBufSize, alignment);
1.2551 + if (maxbufs > 32000)
1.2552 + {
1.2553 + maxbufs = 32000;
1.2554 + }
1.2555 + TInt initialbufs = maxbufs / 4;
1.2556 + TInt growtrigger = 32;
1.2557 + TInt growby = 32;
1.2558 + TInt shrinkhys = 288;
1.2559 + test.Printf(_L("POOL B: BufSize=%d InitialBufs=%d MaxBufs=%d GrowTrigger=%d GrowBy=%d ShrinkHys=%d Alignment=%d\n"),
1.2560 + *PtrBufSize, initialbufs, maxbufs, growtrigger, growby, shrinkhys, alignment);
1.2561 + TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, *PtrBufSize, initialbufs, alignment);
1.2562 + r = inf.SetSizingAttributes(maxbufs, growtrigger, growby, shrinkhys);
1.2563 + test_KErrNone(r);
1.2564 + PoolGrowingTestRoutine(inf);
1.2565 + }
1.2566 +
1.2567 + // Pool C: Page aligned pool without guard pages
1.2568 + {
1.2569 + TInt maxbufs = KTestPoolSizeInBytes / RoundUp(*PtrBufSize, Log2(pagesize));
1.2570 + if (maxbufs > 32000)
1.2571 + {
1.2572 + maxbufs = 32000;
1.2573 + }
1.2574 + TInt initialbufs = maxbufs * 3 / 8;
1.2575 + TInt growtrigger = 32;
1.2576 + TInt growby = 32;
1.2577 + TInt shrinkhys = 288;
1.2578 + test.Printf(_L("POOL C: BufSize=%d InitialBufs=%d MaxBufs=%d GrowTrigger=%d GrowBy=%d ShrinkHys=%d Page-Aligned\n"),
1.2579 + *PtrBufSize, initialbufs, maxbufs, growtrigger, growby, shrinkhys);
1.2580 + TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, initialbufs);
1.2581 + r = inf.SetSizingAttributes(maxbufs, growtrigger, growby, shrinkhys);
1.2582 + test_KErrNone(r);
1.2583 + PoolGrowingTestRoutine(inf);
1.2584 + }
1.2585 +
1.2586 + // Pool D: Page aligned pool without guard pages
1.2587 + {
1.2588 + TInt maxbufs = KTestPoolSizeInBytes / RoundUp(*PtrBufSize, Log2(pagesize));
1.2589 + if (maxbufs > 32000)
1.2590 + {
1.2591 + maxbufs = 32000;
1.2592 + }
1.2593 + TInt initialbufs = maxbufs / 2;
1.2594 + TInt growtrigger = 32;
1.2595 + TInt growby = 32;
1.2596 + TInt shrinkhys = 288;
1.2597 + test.Printf(_L("POOL D: BufSize=%d InitialBufs=%d MaxBufs=%d GrowTrigger=%d GrowBy=%d ShrinkHys=%d Page-Aligned+Guard\n"),
1.2598 + *PtrBufSize, initialbufs, maxbufs, growtrigger, growby, shrinkhys);
1.2599 + TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, initialbufs);
1.2600 + r = inf.SetSizingAttributes(maxbufs, growtrigger, growby, shrinkhys);
1.2601 + test_KErrNone(r);
1.2602 + r = inf.SetGuardPages();
1.2603 + test_KErrNone(r);
1.2604 + PoolGrowingTestRoutine(inf);
1.2605 + }
1.2606 +
1.2607 + // Pool A': Non-page aligned pool (64-byte alignment)
1.2608 + {
1.2609 + TInt alignment = 6;
1.2610 + TInt maxbufs = KTestPoolSizeInBytes / RoundUp(*PtrBufSize, alignment);
1.2611 + if (maxbufs > 32000)
1.2612 + {
1.2613 + maxbufs = 32000;
1.2614 + }
1.2615 + TInt initialbufs = 1;
1.2616 + TInt growtrigger = 32;
1.2617 + TInt growby = 256;
1.2618 + TInt shrinkhys = 512;
1.2619 + test.Printf(_L("POOL A': BufSize=%d InitialBufs=%d MaxBufs=%d GrowTrigger=%d GrowBy=%d ShrinkHys=%d Alignment=%d\n"),
1.2620 + *PtrBufSize, initialbufs, maxbufs, growtrigger, growby, shrinkhys, alignment);
1.2621 + TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, *PtrBufSize, initialbufs, alignment);
1.2622 + r = inf.SetSizingAttributes(maxbufs, growtrigger, growby, shrinkhys);
1.2623 + test_KErrNone(r);
1.2624 + PoolGrowingTestRoutine(inf);
1.2625 + }
1.2626 +
1.2627 + // Pool A'': Non-page aligned pool (64-byte alignment) - AllocCanWait
1.2628 + {
1.2629 + TInt alignment = 6;
1.2630 + TInt maxbufs = KTestPoolSizeInBytes / RoundUp(*PtrBufSize, alignment);
1.2631 + if (maxbufs > 32000)
1.2632 + {
1.2633 + maxbufs = 32000;
1.2634 + }
1.2635 + TInt initialbufs = 1;
1.2636 + TInt growtrigger = 1;
1.2637 + TInt growby = 1;
1.2638 + TInt shrinkhys = 257;
1.2639 + test.Printf(_L("POOL A'': BufSize=%d InitialBufs=%d MaxBufs=%d GrowTrigger=%d GrowBy=%d ShrinkHys=%d Alignment=%d\n"),
1.2640 + *PtrBufSize, initialbufs, maxbufs, growtrigger, growby, shrinkhys, alignment);
1.2641 + TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, *PtrBufSize, initialbufs, alignment);
1.2642 + r = inf.SetSizingAttributes(maxbufs, growtrigger, growby, shrinkhys);
1.2643 + test_KErrNone(r);
1.2644 + PoolGrowingTestRoutine(inf, EShPoolAllocCanWait);
1.2645 + }
1.2646 +
1.2647 + // Pool D': Page aligned pool without guard pages
1.2648 + {
1.2649 + TInt maxbufs = KTestPoolSizeInBytes / RoundUp(*PtrBufSize, Log2(pagesize));
1.2650 + if (maxbufs > 32000)
1.2651 + {
1.2652 + maxbufs = 32000;
1.2653 + }
1.2654 + TInt initialbufs = 1;
1.2655 + TInt growtrigger = 1;
1.2656 + TInt growby = 1024;
1.2657 + TInt shrinkhys = 2048;
1.2658 + test.Printf(_L("POOL D': BufSize=%d InitialBufs=%d MaxBufs=%d GrowTrigger=%d GrowBy=%d ShrinkHys=%d Page-Aligned+Guard\n"),
1.2659 + *PtrBufSize, initialbufs, maxbufs, growtrigger, growby, shrinkhys);
1.2660 + TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, initialbufs);
1.2661 + r = inf.SetSizingAttributes(maxbufs, growtrigger, growby, shrinkhys);
1.2662 + test_KErrNone(r);
1.2663 + r = inf.SetGuardPages();
1.2664 + test_KErrNone(r);
1.2665 + PoolGrowingTestRoutine(inf);
1.2666 + }
1.2667 + // Pool D'': Page aligned pool without guard pages - NoBufferMap
1.2668 + {
1.2669 + TInt maxbufs = KTestPoolSizeInBytes / RoundUp(*PtrBufSize, Log2(pagesize));
1.2670 + if (maxbufs > 32000)
1.2671 + {
1.2672 + maxbufs = 32000;
1.2673 + }
1.2674 + TInt initialbufs = maxbufs / 2;
1.2675 + TInt growtrigger = 32;
1.2676 + TInt growby = 32;
1.2677 + TInt shrinkhys = 288;
1.2678 + test.Printf(_L("POOL D'': BufSize=%d InitialBufs=%d MaxBufs=%d GrowTrigger=%d GrowBy=%d ShrinkHys=%d Page-Aligned+Guard\n"),
1.2679 + *PtrBufSize, initialbufs, maxbufs, growtrigger, growby, shrinkhys);
1.2680 + TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, initialbufs);
1.2681 + r = inf.SetSizingAttributes(maxbufs, growtrigger, growby, shrinkhys);
1.2682 + test_KErrNone(r);
1.2683 + r = inf.SetGuardPages();
1.2684 + test_KErrNone(r);
1.2685 + PoolGrowingTestRoutine(inf, EShPoolAllocNoMap);
1.2686 + }
1.2687 + }
1.2688 +
1.2689 +/*
1.2690 +@SYMTestCaseID X3
1.2691 +@SYMTestCaseDesc Contiguous buffer allocation
1.2692 +@SYMREQ REQ11423
1.2693 +@SYMTestActions
1.2694 + Create a pool with the Contiguous attribute and allocate buffers.
1.2695 +@SYMTestExpectedResults
1.2696 + Buffers memory is physically contiguous.
1.2697 +@SYMTestPriority High
1.2698 +*/
1.2699 +
1.2700 +void ContiguousPoolKernel()
1.2701 + {
1.2702 + test.Next(_L("Contiguous Pool (Kernel)"));
1.2703 +#ifdef __WINS__
1.2704 + test.Printf(_L("Does not run on the emulator. Skipped\n"));
1.2705 +#else
1.2706 + TInt r;
1.2707 + TInt pagesize;
1.2708 + r = HAL::Get(HAL::EMemoryPageSize, pagesize);
1.2709 + test_KErrNone(r);
1.2710 + if (*PtrBufSize <= pagesize)
1.2711 + {
1.2712 + test.Printf(_L("Buffer size <= page size. Skipped.\n"));
1.2713 + return;
1.2714 + }
1.2715 +
1.2716 + TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, KTestPoolSizeInBufs);
1.2717 +// r = inf.SetSizingAttributes(KTestPoolSizeInBufs, 25, 25, 25600);
1.2718 +// test_KErrNone(r);
1.2719 +
1.2720 + r = Ldd.ContiguousPoolKernel(inf);
1.2721 + test_KErrNone(r);
1.2722 +
1.2723 +#endif // __WINS__
1.2724 + }
1.2725 +
1.2726 +void ShBufPin()
1.2727 + {
1.2728 + test.Next(_L("Buffer pinning"));
1.2729 +#ifdef __WINS__
1.2730 + test.Printf(_L("Does not run on the emulator. Skipped\n"));
1.2731 +#else
1.2732 + TInt r;
1.2733 + RShPool pool1;
1.2734 + RShBuf buf1;
1.2735 + TShPoolCreateInfo inf1(TShPoolCreateInfo::ENonPageAlignedBuffer, *PtrBufSize * KTestPoolSizeInBufs, 1, KTestMinimumAlignmentLog2);
1.2736 + r = pool1.Create(inf1, KDefaultPoolHandleFlags);
1.2737 + test_KErrNone(r);
1.2738 + r = buf1.Alloc(pool1);
1.2739 + test_KErrNone(r);
1.2740 + r = Ldd.PinBuffer(pool1.Handle(), buf1.Handle());
1.2741 + test_KErrNone(r);
1.2742 + buf1.Close();
1.2743 + pool1.Close();
1.2744 +
1.2745 + RShPool pool2;
1.2746 + RShBuf buf2;
1.2747 + TShPoolCreateInfo inf2(TShPoolCreateInfo::ENonPageAlignedBuffer, *PtrBufSize * KTestPoolSizeInBufs, 1, KTestMinimumAlignmentLog2);
1.2748 + r = pool2.Create(inf2, KDefaultPoolHandleFlags);
1.2749 + test_KErrNone(r);
1.2750 + r = buf2.Alloc(pool2);
1.2751 + test_KErrNone(r);
1.2752 + r = Ldd.PinBuffer(pool2.Handle(), buf2.Handle());
1.2753 + test_KErrNone(r);
1.2754 + buf2.Close();
1.2755 + pool2.Close();
1.2756 +#endif // _WINS_
1.2757 + }
1.2758 +
1.2759 +/*
1.2760 +@SYMTestCaseID
1.2761 +@SYMTestCaseDesc
1.2762 +@SYMREQ
1.2763 +@SYMTestActions
1.2764 +@SYMTestExpectedResults
1.2765 +@SYMTestPriority
1.2766 +*/
1.2767 +
1.2768 +void SingleBufferPool()
1.2769 + {
1.2770 + test.Next(_L("Single Buffer Pool"));
1.2771 + TInt r;
1.2772 +
1.2773 + RShPool pool;
1.2774 + RShBuf buf;
1.2775 + RShBuf buf2;
1.2776 +
1.2777 + TShPoolCreateInfo infpa(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize * KTestPoolSizeInBufs, 1);
1.2778 + r = infpa.SetGuardPages();
1.2779 + test_KErrNone(r);
1.2780 + r = pool.Create(infpa, KDefaultPoolHandleFlags);
1.2781 + test_KErrNone(r);
1.2782 + r = pool.SetBufferWindow(-1, ETrue);
1.2783 + test_KErrNone(r);
1.2784 + r = buf.Alloc(pool);
1.2785 + test_KErrNone(r);
1.2786 + r = buf2.Alloc(pool);
1.2787 + test_Equal(KErrNoMemory, r);
1.2788 + TPtr8(buf.Ptr(), buf.Size(), buf.Size()).Fill('!');
1.2789 + buf.Close();
1.2790 + pool.Close();
1.2791 +
1.2792 + TShPoolCreateInfo infnpa(TShPoolCreateInfo::ENonPageAlignedBuffer, *PtrBufSize * KTestPoolSizeInBufs, 1, KTestMinimumAlignmentLog2);
1.2793 + r = pool.Create(infnpa, KDefaultPoolHandleFlags);
1.2794 + test_KErrNone(r);
1.2795 + r = buf.Alloc(pool);
1.2796 + test_KErrNone(r);
1.2797 + r = buf2.Alloc(pool);
1.2798 + test_Equal(KErrNoMemory, r);
1.2799 + TPtr8(buf.Ptr(), buf.Size(),buf.Size()).Fill('?');
1.2800 + buf.Close();
1.2801 + pool.Close();
1.2802 + }
1.2803 +
1.2804 +/*
1.2805 +@SYMTestCaseID X4
1.2806 +@SYMTestCaseDesc Negative tests (user/kernel)
1.2807 +@SYMREQ REQ11423
1.2808 +@SYMTestActions
1.2809 + API calls with invalid arguments.
1.2810 +@SYMTestExpectedResults
1.2811 + Appropriate error code returned.
1.2812 +@SYMTestPriority High
1.2813 +*/
1.2814 +
1.2815 +void NegativeTestsUser()
1.2816 + {
1.2817 + test.Next(_L("Negative tests (User)"));
1.2818 + TInt r;
1.2819 + TInt pagesize;
1.2820 + TInt ram;
1.2821 + r = HAL::Get(HAL::EMemoryPageSize, pagesize);
1.2822 + test_KErrNone(r);
1.2823 + r = HAL::Get(HAL::EMemoryRAM, ram);
1.2824 + test_KErrNone(r);
1.2825 +
1.2826 + RShPool pool;
1.2827 + { TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, 0, 0); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
1.2828 + { TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, 100, 0); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
1.2829 + { TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, 0, 100); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
1.2830 + { TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, KMaxTUint, 10); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
1.2831 + { TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, 10, KMaxTUint); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
1.2832 + { TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, KMaxTUint, KMaxTUint); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
1.2833 + { TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, 65537, 65536); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
1.2834 + { TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, 10, 1 + (1 << (32 - Log2(pagesize)))); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
1.2835 + { TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, 4096, 10); r = pool.Create(inf, KDefaultPoolHandleFlags); test_Equal(KErrNone, r); pool.Close(); }
1.2836 + // XXX The following test will need updating in Phase 2, when exclusive access will be supported
1.2837 + // (page-aligned-buffer pools only)
1.2838 + { TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, 4096, 10); inf.SetExclusive(); r = pool.Create(inf, KDefaultPoolHandleFlags); test_Equal(KErrNotSupported, r); pool.Close(); }
1.2839 + { TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 4096, 10, 12); r = pool.Create(inf, KDefaultPoolHandleFlags); test_Equal(KErrNone, r); pool.Close(); }
1.2840 + { TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 4096, 10, 12); inf.SetExclusive(); r = pool.Create(inf, KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); pool.Close(); }
1.2841 +#ifndef __WINS__
1.2842 + { TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, 128 * pagesize, (ram / (128 * pagesize)) + 1); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrNoMemory, r); }
1.2843 +#endif
1.2844 + { TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 0, 0, 0); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
1.2845 + { TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 100, 0, 0); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
1.2846 + { TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 0, 100, 0); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
1.2847 + { TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, KMaxTUint, 10, 0); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
1.2848 + { TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 10, KMaxTUint, 0); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
1.2849 + { TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, KMaxTUint, KMaxTUint, 0); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
1.2850 + { TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 65537, 65536, 0); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
1.2851 + { TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 10, 10, KMaxTUint); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
1.2852 + { TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 10, 10, 33); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
1.2853 + { TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 10, 300, 24); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
1.2854 + { TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 10, 65537, 16); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
1.2855 + { TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 10, 10, Log2(pagesize) + 1); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
1.2856 +
1.2857 + {
1.2858 + TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, *BufferSize, KTestPoolSizeInBufs, 0);
1.2859 + inf.SetGuardPages();
1.2860 + r = pool.Create(inf, KDefaultPoolHandleFlags); test_Equal(KErrArgument, r);
1.2861 + r = inf.SetSizingAttributes(KTestPoolSizeInBufs - 1, 25, 25, 280); test_KErrNone(r); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r);
1.2862 + // Either grow trigger ratio or grow by ratio == 0 => non-growable pool
1.2863 + // Such pools must have initial buffers == max buffers
1.2864 + r = inf.SetSizingAttributes(KTestPoolSizeInBufs * 2, 1, 0, 1); test_Equal(KErrArgument, r); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r);
1.2865 + r = inf.SetSizingAttributes(KTestPoolSizeInBufs * 2, 1, 0, 0); test_Equal(KErrArgument, r); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r);
1.2866 + // shrink hysteresis ratio must be > 256
1.2867 + r = inf.SetSizingAttributes(KTestPoolSizeInBufs - 1, 25, 25, 256); test_Equal(KErrArgument, r); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r);
1.2868 + // grow ratio must be < 256
1.2869 + r = inf.SetSizingAttributes(KTestPoolSizeInBufs * 2, 256, 25, 260); test_Equal(KErrArgument, r); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r);
1.2870 + }
1.2871 +
1.2872 + // Can't have a non-aligned, contiguous pool that grows
1.2873 + TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 200, 10, 0);
1.2874 + r = inf.SetSizingAttributes(KTestPoolSizeInBufs * 2, 25, 25, 280);
1.2875 + test_KErrNone(r);
1.2876 + }
1.2877 +
1.2878 +void NegativeTestsKernel()
1.2879 + {
1.2880 + test.Next(_L("Negative tests (Kernel)"));
1.2881 + TInt r;
1.2882 + r = Ldd.NegativeTestsKernel();
1.2883 + test_KErrNone(r);
1.2884 + }
1.2885 +
1.2886 +/*
1.2887 +@SYMTestCaseID 23
1.2888 +@SYMTestCaseDesc Out of memory testing
1.2889 +@SYMREQ
1.2890 +@SYMTestActions
1.2891 + TBD
1.2892 +@SYMTestExpectedResults
1.2893 +@SYMTestPriority High
1.2894 +*/
1.2895 +
1.2896 +void OutOfMemory()
1.2897 + {
1.2898 + test.Next(_L("Out of memory"));
1.2899 +#ifdef _DEBUG
1.2900 +
1.2901 +
1.2902 + const TInt KMaxKernelAllocations = 1024;
1.2903 + TInt i, r;
1.2904 + RShPool pool;
1.2905 + TShPoolCreateInfo inf0(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, 1);
1.2906 + TShPoolCreateInfo inf1(TShPoolCreateInfo::ENonPageAlignedBuffer, *PtrBufSize, 1, 0);
1.2907 + r = inf0.SetSizingAttributes(4, 100, 1024, 300);
1.2908 + test_KErrNone(r);
1.2909 + r = inf1.SetSizingAttributes(4, 100, 1024, 300);
1.2910 + test_KErrNone(r);
1.2911 +
1.2912 + for(TInt j = 0; j <= 1; j++)
1.2913 + {
1.2914 +
1.2915 + if(j == 0)
1.2916 + test.Printf(_L("OOM testing for page-aligned pool\n"));
1.2917 + else
1.2918 + test.Printf(_L("OOM testing for non-page-aligned pool\n"));
1.2919 +
1.2920 + r = KErrNoMemory;
1.2921 +
1.2922 + __KHEAP_RESET;
1.2923 +
1.2924 + //Create the pool
1.2925 + for (i = 0; i < KMaxKernelAllocations && r == KErrNoMemory; i++)
1.2926 + {
1.2927 + __KHEAP_FAILNEXT(i);
1.2928 + if(j == 0)
1.2929 + r = pool.Create(inf0,KDefaultPoolHandleFlags);
1.2930 + else
1.2931 + r = pool.Create(inf1,KDefaultPoolHandleFlags);
1.2932 + __KHEAP_RESET;
1.2933 + }
1.2934 + test.Printf(_L("Create pool took %d tries\n"),i);
1.2935 + test_KErrNone(r);
1.2936 +
1.2937 + //Allocate buffers with automatic pool growing enabled
1.2938 + r = KErrNoMemory;
1.2939 + RShBuf buf1;
1.2940 + for (i = 0; i < KMaxKernelAllocations && r == KErrNoMemory; i++)
1.2941 + {
1.2942 + __KHEAP_FAILNEXT(i);
1.2943 + if(j == 0)
1.2944 + r = buf1.Alloc(pool, EShPoolAllocNoMap);
1.2945 + else
1.2946 + r = buf1.Alloc(pool);
1.2947 + __KHEAP_RESET;
1.2948 + }
1.2949 + test.Printf(_L("Allocate shared buffer 1 took %d tries\n"),i);
1.2950 + test_KErrNone(r);
1.2951 +
1.2952 + // delay to allow the pool to grow
1.2953 + User::After(20000);
1.2954 +
1.2955 + r = KErrNoMemory;
1.2956 + RShBuf buf2;
1.2957 + for (i = 0; i < KMaxKernelAllocations && r == KErrNoMemory; i++)
1.2958 + {
1.2959 + __KHEAP_FAILNEXT(i);
1.2960 + if(j == 0)
1.2961 + r = buf2.Alloc(pool, EShPoolAllocNoMap);
1.2962 + else
1.2963 + r = buf2.Alloc(pool);
1.2964 + __KHEAP_RESET;
1.2965 + User::After(20000);
1.2966 + }
1.2967 + test.Printf(_L("Allocate shared buffer 2 took %d tries\n"),i);
1.2968 + test_KErrNone(r);
1.2969 +
1.2970 + // delay to allow the pool to grow again
1.2971 + User::After(20000);
1.2972 +
1.2973 + r = KErrNoMemory;
1.2974 + RShBuf buf3;
1.2975 + for (i = 0; i < KMaxKernelAllocations && r == KErrNoMemory; i++)
1.2976 + {
1.2977 + __KHEAP_FAILNEXT(i);
1.2978 + if(j == 0)
1.2979 + r = buf3.Alloc(pool, EShPoolAllocNoMap);
1.2980 + else
1.2981 + r = buf3.Alloc(pool);
1.2982 + __KHEAP_RESET;
1.2983 + }
1.2984 + test.Printf(_L("Allocate shared buffer 3 took %d tries\n"),i);
1.2985 + test_KErrNone(r);
1.2986 +
1.2987 + //Map a buffer in page-aligned-pool case
1.2988 + if(j == 0)
1.2989 + {
1.2990 + //Open a one-buffer window
1.2991 + r = pool.SetBufferWindow(1, ETrue);
1.2992 + test_KErrNone(r);
1.2993 +
1.2994 + //Map a buffer
1.2995 + r = KErrNoMemory;
1.2996 + for (i = 0; i < KMaxKernelAllocations && r == KErrNoMemory; i++)
1.2997 + {
1.2998 + buf1.UnMap();
1.2999 + __KHEAP_FAILNEXT(i);
1.3000 + r = buf1.Map();
1.3001 + __KHEAP_RESET;
1.3002 + }
1.3003 + test.Printf(_L("Mapping buffer 1 took %d tries\n"),i);
1.3004 + test_KErrNone(r);
1.3005 + }
1.3006 +
1.3007 + //Setup low-space notification
1.3008 + TRequestStatus low;
1.3009 + low = KErrNoMemory;
1.3010 + for (i = 0; i < KMaxKernelAllocations && low != KRequestPending; i++)
1.3011 + {
1.3012 + __KHEAP_FAILNEXT(i);
1.3013 + pool.RequestLowSpaceNotification(1, low);
1.3014 + __KHEAP_RESET;
1.3015 + }
1.3016 + test.Printf(_L("Setting up low-space notification took %d tries\n"),i);
1.3017 + test_Equal(low.Int(), KRequestPending);
1.3018 +
1.3019 + //Setup free-space notification
1.3020 + TRequestStatus free;
1.3021 + free = KErrNoMemory;
1.3022 + for (i = 0; i < KMaxKernelAllocations && free != KRequestPending; i++)
1.3023 + {
1.3024 + __KHEAP_FAILNEXT(i);
1.3025 + pool.RequestFreeSpaceNotification(4, free);
1.3026 + __KHEAP_RESET;
1.3027 + }
1.3028 + test.Printf(_L("Setting up free-space notification took %d tries\n"),i);
1.3029 + test_Equal(free.Int(), KRequestPending);
1.3030 +
1.3031 + //No allocations should occur here
1.3032 + __KHEAP_FAILNEXT(1);
1.3033 + if(j == 0)
1.3034 + {
1.3035 + //Unmap the buffer
1.3036 + r = buf1.UnMap();
1.3037 + }
1.3038 +
1.3039 + //Cancel the notifications
1.3040 + pool.CancelLowSpaceNotification(low);
1.3041 + pool.CancelFreeSpaceNotification(free);
1.3042 +
1.3043 + //Close the buffers and the pool
1.3044 + buf1.Close();
1.3045 + buf2.Close();
1.3046 + buf3.Close();
1.3047 + pool.Close();
1.3048 + __KHEAP_RESET;
1.3049 +
1.3050 + }
1.3051 +
1.3052 + // Allocate kernel-side buffer on Pool 2
1.3053 + TInt handle = 0;
1.3054 + RShBuf kbuf;
1.3055 + r = KErrNoMemory;
1.3056 + for (i = 0; i < KMaxKernelAllocations && r == KErrNoMemory; i++)
1.3057 + {
1.3058 + __KHEAP_FAILNEXT(i);
1.3059 + r = Ldd.AllocateKernelBuffer(1, handle);
1.3060 + __KHEAP_RESET;
1.3061 + }
1.3062 + test.Printf(_L("Allocate kernel buffer took %d tries\n"),i);
1.3063 + test_KErrNone(r);
1.3064 +
1.3065 + __KHEAP_FAILNEXT(1);
1.3066 + kbuf.SetHandle(handle);
1.3067 + __KHEAP_RESET;
1.3068 +
1.3069 + r = KErrNoMemory;
1.3070 + for (i = 0; i < KMaxKernelAllocations && r == KErrNoMemory; i++)
1.3071 + {
1.3072 + r = kbuf.UnMap();
1.3073 + __KHEAP_FAILNEXT(i);
1.3074 + r = kbuf.Map();
1.3075 + __KHEAP_RESET;
1.3076 + }
1.3077 + test.Printf(_L("Mapping kernel buffer took %d tries\n"),i);
1.3078 + test_KErrNone(r);
1.3079 +
1.3080 + __KHEAP_FAILNEXT(1);
1.3081 + r = kbuf.UnMap();
1.3082 + kbuf.Close();
1.3083 + __KHEAP_RESET;
1.3084 +
1.3085 +
1.3086 +#else // _DEBUG
1.3087 + test.Printf(_L("Debug builds only. Test skipped."));
1.3088 +#endif // _DEBUG
1.3089 + }
1.3090 +
1.3091 +/*
1.3092 +@SYMTestCaseID 22
1.3093 +@SYMTestCaseDesc Stress testing
1.3094 +@SYMREQ
1.3095 +@SYMTestActions
1.3096 + TBD
1.3097 +@SYMTestExpectedResults
1.3098 +@SYMTestPriority Medium
1.3099 +*/
1.3100 +
1.3101 +TInt StressThread1(TAny*)
1.3102 + {
1.3103 + TInt r;
1.3104 + TInt pagesize;
1.3105 + r = HAL::Get(HAL::EMemoryPageSize, pagesize);
1.3106 + test_KErrNone(r);
1.3107 +
1.3108 + TInt i = 0;
1.3109 + FOREVER
1.3110 + {
1.3111 + RShPool pool;
1.3112 + if (i % 2)
1.3113 + {
1.3114 + TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, 1000, 512);
1.3115 + r = pool.Create(inf,KDefaultPoolHandleFlags);
1.3116 + if (r)
1.3117 + {
1.3118 + RDebug::Printf("Error %d line %d", r, __LINE__);
1.3119 + break;
1.3120 + }
1.3121 +
1.3122 + r = pool.SetBufferWindow(-1, ETrue);
1.3123 + test_KErrNone(r);
1.3124 +
1.3125 + }
1.3126 + else
1.3127 + {
1.3128 + TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 10000, 200, 0);
1.3129 + r = pool.Create(inf,KDefaultPoolHandleFlags);
1.3130 + if (r)
1.3131 + {
1.3132 + RDebug::Printf("Error %d line %d", r, __LINE__);
1.3133 + break;
1.3134 + }
1.3135 + }
1.3136 + pool.Close();
1.3137 + i++;
1.3138 + if (i % 100 == 0)
1.3139 + {
1.3140 + RDebug::Printf("ST1 %d iterations", i);
1.3141 + }
1.3142 + }
1.3143 + return r;
1.3144 + }
1.3145 +
1.3146 +TInt StressThread2(TAny*)
1.3147 + {
1.3148 + TInt r = KErrUnknown;
1.3149 + TShPoolInfo inf1;
1.3150 + TShPoolInfo inf2;
1.3151 + P1.GetInfo(inf1);
1.3152 + P2.GetInfo(inf2);
1.3153 + TInt j = 0;
1.3154 + FOREVER
1.3155 + {
1.3156 + TUint i;
1.3157 + RArray<RShBuf> bufarray1;
1.3158 + RArray<RShBuf> bufarray2;
1.3159 + for (i = 0; i < inf1.iMaxBufs; i++)
1.3160 + {
1.3161 + RShBuf buf;
1.3162 + r = buf.Alloc(P1);
1.3163 + if (r)
1.3164 + {
1.3165 + RDebug::Printf("Error %d line %d i=%d", r, __LINE__, i);
1.3166 + break;
1.3167 + }
1.3168 + TPtr8(buf.Ptr(), buf.Size(),buf.Size()).Fill('1');
1.3169 + r = bufarray1.Append(buf);
1.3170 + if (r)
1.3171 + {
1.3172 + buf.Close();
1.3173 + RDebug::Printf("Error %d line %d i=%d", r, __LINE__, i);
1.3174 + break;
1.3175 + }
1.3176 + }
1.3177 + for (i = 0; i < inf2.iMaxBufs; i++)
1.3178 + {
1.3179 + RShBuf buf;
1.3180 + r = buf.Alloc(P2);
1.3181 + if (r)
1.3182 + {
1.3183 + RDebug::Printf("Error %d line %d i=%d", r, __LINE__, i);
1.3184 + break;
1.3185 + }
1.3186 + TPtr8(buf.Ptr(), buf.Size(),buf.Size()).Fill('2');
1.3187 + bufarray2.Append(buf);
1.3188 + }
1.3189 + i = 0;
1.3190 + while (bufarray1.Count())
1.3191 + {
1.3192 + bufarray1[0].Close();
1.3193 + bufarray1.Remove(0);
1.3194 + i++;
1.3195 + }
1.3196 +
1.3197 + while (bufarray2.Count())
1.3198 + {
1.3199 + bufarray2[0].Close();
1.3200 + bufarray2.Remove(0);
1.3201 + }
1.3202 + bufarray1.Close();
1.3203 + bufarray2.Close();
1.3204 + if (r)
1.3205 + {
1.3206 + break;
1.3207 + }
1.3208 + j++;
1.3209 + if (j % 10 == 0)
1.3210 + {
1.3211 + RDebug::Printf("ST2 %d iterations", j);
1.3212 + }
1.3213 + }
1.3214 + return r;
1.3215 + }
1.3216 +
1.3217 +void StressTesting(TInt aSecs)
1.3218 + {
1.3219 + test.Next(_L("Stress testing"));
1.3220 + TInt r;
1.3221 +
1.3222 + test.Start(_L("Create pools"));
1.3223 + TShPoolCreateInfo inf1(TShPoolCreateInfo::ENonPageAlignedBuffer, 2000, 500, 11);
1.3224 + r = P1.Create(inf1,KDefaultPoolHandleFlags);
1.3225 + test_KErrNone(r);
1.3226 + TInt handle;
1.3227 + TShPoolCreateInfo inf2(TShPoolCreateInfo::EPageAlignedBuffer, 5000, 150);
1.3228 + r = Ldd.OpenKernelPool(inf2, handle);
1.3229 + test_KErrNone(r);
1.3230 + P2.SetHandle(handle);
1.3231 +
1.3232 + r = P2.SetBufferWindow(-1, ETrue);
1.3233 + test_KErrNone(r);
1.3234 +
1.3235 + test.Next(_L("Create threads"));
1.3236 + RThread t1;
1.3237 + r = t1.Create(_L("THREAD1"), StressThread1, KDefaultStackSize, KMinHeapSize, KMinHeapSize, NULL);
1.3238 + test_KErrNone(r);
1.3239 + RThread t2;
1.3240 + r = t2.Create(_L("THREAD2"), StressThread2, KDefaultStackSize*2, KMinHeapSize, 1 << 20, NULL);
1.3241 + test_KErrNone(r);
1.3242 + test.Next(_L("Start threads"));
1.3243 + test.Printf(_L("Wait for %d seconds\n"), aSecs);
1.3244 + RThread().SetPriority(EPriorityMore);
1.3245 + TRequestStatus t1rs;
1.3246 + TRequestStatus t2rs;
1.3247 + t1.Logon(t1rs);
1.3248 + t2.Logon(t2rs);
1.3249 + t1.Resume();
1.3250 + t2.Resume();
1.3251 + User::After(aSecs * 1000000);
1.3252 +
1.3253 + test.Next(_L("Kill threads"));
1.3254 + t1.Kill(KErrNone);
1.3255 + t2.Kill(KErrNone);
1.3256 +
1.3257 + // wait for threads to actually die
1.3258 + User::WaitForRequest(t1rs);
1.3259 + User::WaitForRequest(t2rs);
1.3260 +
1.3261 + t1.Close();
1.3262 + t2.Close();
1.3263 + RThread().SetPriority(EPriorityNormal);
1.3264 +
1.3265 + test.Next(_L("Close pools"));
1.3266 + P1.Close();
1.3267 + r = Ldd.CloseKernelPool();
1.3268 + test_KErrNone(r);
1.3269 + P2.Close();
1.3270 + test.End();
1.3271 + }
1.3272 +
1.3273 +/*
1.3274 +@SYMTestCaseID
1.3275 +@SYMTestCaseDesc
1.3276 +@SYMREQ
1.3277 +@SYMTestActions
1.3278 +@SYMTestExpectedResults
1.3279 +@SYMTestPriority
1.3280 +*/
1.3281 +
1.3282 +void NoDeallocation()
1.3283 + {
1.3284 + test.Next(_L("No deallocation"));
1.3285 + TInt r;
1.3286 + TBuf<10> command;
1.3287 + command.Format(_L("%S %d"), &KTestSlave, ETestSlaveNoDeallocation);
1.3288 + RProcess p;
1.3289 + r = p.Create(RProcess().FileName(), command);
1.3290 + test_KErrNone(r);
1.3291 + TRequestStatus rs;
1.3292 + p.Logon(rs);
1.3293 + p.Resume();
1.3294 + User::WaitForRequest(rs);
1.3295 +
1.3296 + // wait for memory to be freed
1.3297 + r = UserSvr::HalFunction(EHalGroupKernel, EKernelHalSupervisorBarrier, (TAny*)5000, 0);
1.3298 + test_KErrNone(r);
1.3299 +
1.3300 + __KHEAP_MARKEND;
1.3301 + test_KErrNone(rs.Int());
1.3302 + test_Equal(EExitKill, p.ExitType());
1.3303 + test_KErrNone(p.ExitReason());
1.3304 + p.Close();
1.3305 + }
1.3306 +
1.3307 +TInt SlaveNoDeallocation()
1.3308 + {
1.3309 + __KHEAP_MARK;
1.3310 + TInt r;
1.3311 + RShPool pool;
1.3312 + TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *BufferSize, KTestPoolSizeInBufs);
1.3313 + r = pool.Create(inf,KDefaultPoolHandleFlags);
1.3314 + test_KErrNone(r);
1.3315 +
1.3316 + pool.SetBufferWindow(-1, ETrue);
1.3317 + test_KErrNone(r);
1.3318 +
1.3319 + if (!r)
1.3320 + {
1.3321 + RShBuf buf;
1.3322 + r = buf.Alloc(pool);
1.3323 + }
1.3324 + return r;
1.3325 + }
1.3326 +
1.3327 +TInt E32Main()
1.3328 + {
1.3329 + __UHEAP_MARK;
1.3330 +
1.3331 + // Parse command line for slave processes
1.3332 + TInt r = KErrArgument;
1.3333 + TBuf<KMaxFullName> cmd;
1.3334 + User::CommandLine(cmd);
1.3335 + TLex lex(cmd);
1.3336 + if (lex.NextToken() == KTestSlave)
1.3337 + {
1.3338 + TInt function;
1.3339 + TLex functionlex(lex.NextToken());
1.3340 + functionlex.Val(function);
1.3341 + switch (function)
1.3342 + {
1.3343 + case ETestSlaveNoDeallocation:
1.3344 + r = SlaveNoDeallocation();
1.3345 + break;
1.3346 + }
1.3347 + __UHEAP_MARKEND;
1.3348 + return r;
1.3349 + }
1.3350 + // Test starts here
1.3351 + test.Title();
1.3352 +
1.3353 + test.Start(_L("Check for Shared Buffers availability"));
1.3354 + RShPool pool;
1.3355 + TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *BufferSize, KTestPoolSizeInBufs);
1.3356 + r = pool.Create(inf,KDefaultPoolHandleFlags);
1.3357 + if (r == KErrNotSupported)
1.3358 + {
1.3359 + test.Printf(_L("Not supported by this memory model.\n"));
1.3360 + }
1.3361 + else
1.3362 + {
1.3363 + test_KErrNone(r);
1.3364 + pool.Close();
1.3365 +
1.3366 + test.Next(_L("No device driver"));
1.3367 + test.Start(_L("Start test loop"));
1.3368 + for (PtrBufSize = BufferSize; *PtrBufSize != 0; PtrBufSize++)
1.3369 + {
1.3370 + TBuf<30> title;
1.3371 + title.Format(_L("Buffer size = %d bytes"), *PtrBufSize);
1.3372 + test.Next(title);
1.3373 + test.Start(_L("New test iteration"));
1.3374 + BufferAlignmentUser();
1.3375 + BufferMapping();
1.3376 + BufferWindow();
1.3377 + GuardPages();
1.3378 + PoolGrowingUser();
1.3379 + SingleBufferPool();
1.3380 + test.End();
1.3381 + }
1.3382 + test.End();
1.3383 + test.Next(_L("Load Device Driver"));
1.3384 + LoadDeviceDrivers();
1.3385 +
1.3386 + #ifdef TEST_CLIENT_THREAD
1.3387 + test.Next(_L("Device driver in client thread"));
1.3388 + r = Ldd.Open(0);
1.3389 + #else
1.3390 + test.Next(_L("Device driver in own thread"));
1.3391 + r = Ldd.Open(1);
1.3392 + #endif
1.3393 +
1.3394 + test_KErrNone(r);
1.3395 +
1.3396 + test.Start(_L("Start test loop"));
1.3397 + for (PtrBufSize = BufferSize; *PtrBufSize != 0; PtrBufSize++)
1.3398 + {
1.3399 + TBuf<30> title;
1.3400 + title.Format(_L("Buffer size = %d bytes"), *PtrBufSize);
1.3401 + test.Next(title);
1.3402 + test.Start(_L("New test iteration"));
1.3403 + CreateUserPool(ETestNonPageAligned);
1.3404 + CreateKernelPool(ETestNonPageAligned);
1.3405 + AllocateUserBuffer();
1.3406 + AllocateKernelBuffer();
1.3407 + AllocateUserMax(P1);
1.3408 + AllocateUserMax(P2);
1.3409 + AllocateKernelMax();
1.3410 + BufferAlignmentKernel();
1.3411 + CreateKernelPoolPhysAddr();
1.3412 + NotificationRequests(P1);
1.3413 + NotificationRequests(P2);
1.3414 + CancelNotificationRequests(P1);
1.3415 + CancelNotificationRequests(P2);
1.3416 + ShBufPin();
1.3417 + CloseKernelPool();
1.3418 + CloseUserPool();
1.3419 + ContiguousPoolKernel();
1.3420 + CreateUserPool(ETestPageAligned);
1.3421 + CreateKernelPool(ETestPageAligned);
1.3422 + OutOfMemory();
1.3423 + AllocateUserBuffer();
1.3424 + AllocateKernelBuffer();
1.3425 + AllocateUserMax(P1);
1.3426 + AllocateUserMax(P2);
1.3427 + AllocateKernelMax();
1.3428 + NotificationRequests(P1);
1.3429 + NotificationRequests(P2);
1.3430 + CloseUserPool();
1.3431 + CloseKernelPool();
1.3432 + CreateUserPool(ETestPageAlignedGrowing);
1.3433 + CreateKernelPool(ETestPageAlignedGrowing);
1.3434 + OutOfMemory();
1.3435 + AllocateKernelMax();
1.3436 + AllocateUserMax(P1);
1.3437 + AllocateUserMax(P2);
1.3438 + CloseUserPool();
1.3439 + CloseKernelPool();
1.3440 + test.End();
1.3441 + }
1.3442 + NegativeTestsKernel();
1.3443 + StressTesting(5);
1.3444 + test.End();
1.3445 + Ldd.Close();
1.3446 +
1.3447 + NegativeTestsUser();
1.3448 + NoDeallocation();
1.3449 +
1.3450 + test.Next(_L("Unload Device Drivers"));
1.3451 + FreeDeviceDrivers();
1.3452 + }
1.3453 + test.End();
1.3454 + test.Close();
1.3455 +
1.3456 + __UHEAP_MARKEND;
1.3457 + return KErrNone;
1.3458 + }