Update contrib.
1 // Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32test/mmu/t_shbuf.cpp
17 #define __E32TEST_EXTENSION__
26 #include <e32def_private.h>
28 #ifdef TEST_CLIENT_THREAD
29 RTest test(_L("T_SHBUF_CLIENT"));
31 RTest test(_L("T_SHBUF_OWN"));
34 RShPool P1; // User-side pool
35 RShPool P2; // Kernel-side pool
37 const TInt KTestPoolSizeInBytes = 1 << 20; // 1MB
38 const TInt BufferSize[] = {128, 853, 4096, 5051, 131072, 1, 0}; // Last element must be 0
40 const TInt* PtrBufSize;
42 RShBufTestChannel Ldd;
44 _LIT(KTestSlave, "SLAVE");
45 _LIT(KTestLowSpaceSemaphore, "LowSpaceSemaphore");
50 ETestSlaveNoDeallocation,
57 ETestPageAlignedGrowing,
71 TInt RoundUp(TInt aNum, TInt aAlignmentLog2)
73 if (aNum % (1 << aAlignmentLog2) == 0)
77 return (aNum & ~((1 << aAlignmentLog2) - 1)) + (1 << aAlignmentLog2);
80 void LoadDeviceDrivers()
83 #ifdef TEST_CLIENT_THREAD
84 r= User::LoadLogicalDevice(_L("D_SHBUF_CLIENT.LDD"));
85 if (r != KErrAlreadyExists)
90 r = User::LoadLogicalDevice(_L("D_SHBUF_OWN.LDD"));
91 if (r != KErrAlreadyExists)
98 void FreeDeviceDrivers()
100 TInt r = User::FreeLogicalDevice(KTestShBufClient);
102 r = User::FreeLogicalDevice(KTestShBufOwn);
106 void FillShBuf(RShBuf& aBuffer, TUint8 aValue)
108 TUint size = aBuffer.Size();
109 TUint8* base = aBuffer.Ptr();
112 memset(base,aValue,size);
115 TBool CheckFillShBuf(RShBuf& aBuffer, TUint8 aValue)
117 TUint size = aBuffer.Size();
118 TUint8* base = aBuffer.Ptr();
122 TUint8* end = ptr+size;
128 RDebug::Printf("CheckFillShBuf failed at offset 0x%x, expected 0x%02x but got 0x%02x ",ptr-base-1,aValue,b);
135 TBool CheckNotFillShBuf(RShBuf& aBuffer, TUint8 aValue)
137 TUint size = aBuffer.Size();
138 TUint8* base = aBuffer.Ptr();
142 TUint8* end = ptr+size;
148 RDebug::Printf("CheckNotFillShBuf failed at offset 0x%x, expected not 0x%02x",ptr-base-1,aValue);
157 @SYMTestCaseDesc Create pool from user-side
160 1. Test Thread creates a pool (P1) and passes handle to device driver.
161 2. Device driver opens pool and checks its attributes.
162 @SYMTestExpectedResults
164 @SYMTestPriority Critical
167 void CreateUserPool(TTestPoolType aPoolType)
169 test.Next(_L("Create user-side pool"));
172 r = HAL::Get(HAL::EMemoryPageSize, pagesize);
177 case ETestNonPageAligned:
178 // Non-page-aligned pool
180 test.Printf(_L("Non-page-aligned\n"));
181 test_Equal(0, P1.Handle());
182 TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, *PtrBufSize, KTestPoolSizeInBufs, 8);
183 r = P1.Create(inf,KDefaultPoolHandleFlags);
186 r = P1.SetBufferWindow(-1, ETrue);
187 test_Equal(KErrNotSupported, r);
189 TShPoolInfo poolinfotokernel;
190 poolinfotokernel.iBufSize = *PtrBufSize;
191 poolinfotokernel.iInitialBufs = KTestPoolSizeInBufs;
192 poolinfotokernel.iMaxBufs = KTestPoolSizeInBufs;
193 poolinfotokernel.iGrowTriggerRatio = 0;
194 poolinfotokernel.iGrowByRatio = 0;
195 poolinfotokernel.iShrinkHysteresisRatio = 0;
196 poolinfotokernel.iAlignment = 8;
197 poolinfotokernel.iFlags = EShPoolNonPageAlignedBuffer;
198 r = Ldd.OpenUserPool(P1.Handle(), poolinfotokernel);
201 TShPoolInfo poolinfo;
202 P1.GetInfo(poolinfo);
203 test_Equal(*PtrBufSize, poolinfo.iBufSize);
204 test_Equal(KTestPoolSizeInBufs, poolinfo.iInitialBufs);
205 test_Equal(KTestPoolSizeInBufs, poolinfo.iMaxBufs);
206 test_Equal(0, poolinfo.iGrowTriggerRatio);
207 test_Equal(0, poolinfo.iGrowByRatio);
208 test_Equal(0, poolinfo.iShrinkHysteresisRatio);
209 test_Equal(8, poolinfo.iAlignment);
210 test(poolinfo.iFlags & EShPoolNonPageAlignedBuffer);
211 test(!(poolinfo.iFlags & EShPoolPageAlignedBuffer));
214 case ETestPageAligned:
217 test.Printf(_L("Page-aligned\n"));
218 test_Equal(0, P1.Handle());
220 TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, KTestPoolSizeInBufs);
221 r = P1.Create(inf,KDefaultPoolHandleFlags);
224 r = P1.SetBufferWindow(-1, ETrue);
227 TShPoolInfo poolinfo;
228 P1.GetInfo(poolinfo);
229 test_Equal(*PtrBufSize, poolinfo.iBufSize);
230 test_Equal(KTestPoolSizeInBufs, poolinfo.iInitialBufs);
231 test_Equal(KTestPoolSizeInBufs, poolinfo.iMaxBufs);
232 test_Equal(0, poolinfo.iGrowTriggerRatio);
233 test_Equal(0, poolinfo.iGrowByRatio);
234 test_Equal(0, poolinfo.iShrinkHysteresisRatio);
235 test_Equal(Log2(pagesize), poolinfo.iAlignment);
236 test(poolinfo.iFlags & EShPoolPageAlignedBuffer);
237 test(!(poolinfo.iFlags & EShPoolNonPageAlignedBuffer));
239 r = Ldd.OpenUserPool(P1.Handle(), poolinfo);
243 case ETestPageAlignedGrowing:
244 // Page-aligned growing pool
246 test.Printf(_L("Page-aligned growing\n"));
247 test_Equal(0, P1.Handle());
249 TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, KTestPoolSizeInBufs / 2);
250 // Set shrink hysteresis high so pool can't shrink
251 r = inf.SetSizingAttributes(KTestPoolSizeInBufs, 25, 26, 25600);
253 r = P1.Create(inf,KDefaultPoolHandleFlags);
256 r = P1.SetBufferWindow(-1, ETrue);
259 TShPoolInfo poolinfo;
260 P1.GetInfo(poolinfo);
261 test_Equal(*PtrBufSize, poolinfo.iBufSize);
262 test_Equal(KTestPoolSizeInBufs / 2, poolinfo.iInitialBufs);
263 test_Equal(KTestPoolSizeInBufs, poolinfo.iMaxBufs);
264 test_Equal(25, poolinfo.iGrowTriggerRatio);
265 test_Equal(26, poolinfo.iGrowByRatio);
266 test_Equal(25600, poolinfo.iShrinkHysteresisRatio);
267 test_Equal(Log2(pagesize), poolinfo.iAlignment);
268 test(poolinfo.iFlags & EShPoolPageAlignedBuffer);
269 test(!(poolinfo.iFlags & EShPoolNonPageAlignedBuffer));
271 r = Ldd.OpenUserPool(P1.Handle(), poolinfo);
282 @SYMTestCaseDesc Create pool from kernel-side
285 1. Device Driver creates a pool (P2) and passes handle to this thread.
286 2. Test Thread opens pool and checks its attributes.
287 @SYMTestExpectedResults
290 @SYMTestPriority Critical
293 void CreateKernelPool(TTestPoolType aPoolType)
295 test.Next(_L("Create kernel-side pool"));
298 r = HAL::Get(HAL::EMemoryPageSize, pagesize);
304 case ETestNonPageAligned:
305 // Non-page-aligned pool
307 test.Printf(_L("Non-page-aligned\n"));
308 test_Equal(0, P2.Handle());
310 TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, *PtrBufSize, KTestPoolSizeInBufs, 8);
311 r = Ldd.OpenKernelPool(inf, handle);
313 P2.SetHandle(handle);
315 TShPoolInfo poolinfo;
316 P2.GetInfo(poolinfo);
317 test_Equal(*PtrBufSize, poolinfo.iBufSize);
318 test_Equal(KTestPoolSizeInBufs, poolinfo.iInitialBufs);
319 test_Equal(KTestPoolSizeInBufs, poolinfo.iMaxBufs);
320 test_Equal(0, poolinfo.iGrowTriggerRatio);
321 test_Equal(0, poolinfo.iGrowByRatio);
322 test_Equal(0, poolinfo.iShrinkHysteresisRatio);
323 test_Equal(8, poolinfo.iAlignment);
324 test(poolinfo.iFlags & EShPoolNonPageAlignedBuffer);
325 test(!(poolinfo.iFlags & EShPoolPageAlignedBuffer));
328 case ETestPageAligned:
331 test.Printf(_L("Page-aligned\n"));
332 test_Equal(0, P2.Handle());
334 TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, KTestPoolSizeInBufs);
335 r = Ldd.OpenKernelPool(inf, handle);
337 P2.SetHandle(handle);
339 r = P2.SetBufferWindow(-1, ETrue);
342 TShPoolInfo poolinfo;
343 P2.GetInfo(poolinfo);
344 test_Equal(*PtrBufSize, poolinfo.iBufSize);
345 test_Equal(KTestPoolSizeInBufs, poolinfo.iInitialBufs);
346 test_Equal(KTestPoolSizeInBufs, poolinfo.iMaxBufs);
347 test_Equal(0, poolinfo.iGrowTriggerRatio);
348 test_Equal(0, poolinfo.iGrowByRatio);
349 test_Equal(0, poolinfo.iShrinkHysteresisRatio);
350 test_Equal(Log2(pagesize), poolinfo.iAlignment);
351 test(poolinfo.iFlags & EShPoolPageAlignedBuffer);
352 test(!(poolinfo.iFlags & EShPoolNonPageAlignedBuffer));
355 case ETestPageAlignedGrowing:
356 // Page-aligned pool growing
358 test.Printf(_L("Page-aligned growing\n"));
359 test_Equal(0, P2.Handle());
361 TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, KTestPoolSizeInBufs / 2);
362 // Set shrink hysteresis high so pool can't shrink
363 r = inf.SetSizingAttributes(KTestPoolSizeInBufs, 25, 26, 25600);
365 r = Ldd.OpenKernelPool(inf, handle);
367 P2.SetHandle(handle);
369 r = P2.SetBufferWindow(-1, ETrue);
372 TShPoolInfo poolinfo;
373 P2.GetInfo(poolinfo);
374 test_Equal(*PtrBufSize, poolinfo.iBufSize);
375 test_Equal(KTestPoolSizeInBufs / 2, poolinfo.iInitialBufs);
376 test_Equal(KTestPoolSizeInBufs, poolinfo.iMaxBufs);
377 test_Equal(25, poolinfo.iGrowTriggerRatio);
378 test_Equal(26, poolinfo.iGrowByRatio);
379 test_Equal(25600, poolinfo.iShrinkHysteresisRatio);
380 test_Equal(Log2(pagesize), poolinfo.iAlignment);
381 test(poolinfo.iFlags & EShPoolPageAlignedBuffer);
382 test(!(poolinfo.iFlags & EShPoolNonPageAlignedBuffer));
392 @SYMTestCaseDesc Close pool from kernel-side
395 1. Device Driver closes P2.
396 2. Test Thread closes P2.
397 @SYMTestExpectedResults
398 1. OK and Access Count is now 1.
400 @SYMTestPriority Critical
403 void CloseKernelPool()
405 test.Next(_L("Close kernel-side pool"));
408 r = Ldd.CloseKernelPool();
413 // wait for memory to be freed
414 r = UserSvr::HalFunction(EHalGroupKernel, EKernelHalSupervisorBarrier, (TAny*)5000, 0);
421 @SYMTestCaseDesc Close pool from user-side
424 1. Test Thread closes P1.
425 2. Device Driver closes P1.
426 @SYMTestExpectedResults
427 1. OK and Access Count is now 1.
429 @SYMTestPriority Critical
434 test.Next(_L("Close user-side pool"));
439 r = Ldd.CloseUserPool();
442 // wait for memory to be freed
443 r = UserSvr::HalFunction(EHalGroupKernel, EKernelHalSupervisorBarrier, (TAny*)5000, 0);
449 @SYMTestCaseDesc Buffer allocation from user-side
452 1. Test Thread creates a shared buffer on P1.
453 2. Test Thread passes buffer to Device Driver.
454 3. Device Driver obtains buffer and manipulates its contents.
455 4. Device Driver releases buffer.
456 5. Test Thread releases buffer.
457 @SYMTestExpectedResults
462 5. Ok. Buffer de-allocated.
463 @SYMTestPriority Critical
466 void AllocateUserBuffer()
468 test.Next(_L("Allocate user-side buffer"));
472 // Allocate buffer on POOL 1
479 TShPoolInfo poolinfo1;
480 P1.GetInfo(poolinfo1);
481 TInt blocks = poolinfo1.iBufSize / KTestData1().Length();
483 for (i = 0; i < blocks; i++)
485 TPtr8(buf.Ptr() + (i * KTestData1().Length()), KTestData1().Length(),KTestData1().Length()).Copy(KTestData1());
487 r = Ldd.ManipulateUserBuffer(buf.Handle());
493 P1.GetInfo(poolinfo1);
494 blocks = poolinfo1.iBufSize / tmp.MaxSize();
496 for (i = 0 ; i < blocks; i++)
499 TPtrC8 ptrc(buf.Ptr() + (i * tmp.Length()), tmp.Length());
500 r = tmp.Compare(ptrc);
506 // Allocate buffer on POOL 2
512 TShPoolInfo poolinfo2;
513 P2.GetInfo(poolinfo2);
514 blocks = poolinfo2.iBufSize / KTestData1().Length(); // PC REMOVE
516 for (i = 0; i < blocks; i++)
518 TPtr8(buf.Ptr() + (i * KTestData1().Length()), KTestData1().Length(),KTestData1().Length()).Copy(KTestData1());
521 r = Ldd.ManipulateUserBuffer(buf.Handle());
524 P2.GetInfo(poolinfo2);
525 blocks = poolinfo2.iBufSize / tmp.MaxSize(); // PC REMOVE
527 for (i = 0 ; i < blocks; i++)
530 r = tmp.Compare(TPtr8(buf.Ptr() + (i * tmp.Length()), tmp.Length(), tmp.Length()));
539 @SYMTestCaseDesc Buffer allocation from kernel-side
542 1. Device Driver creates a buffer on P2.
543 2. Device Driver manipulates buffer and passes it to Test Thread.
544 3. Test Thread manipulates buffer and send it back to Device Driver.
545 4. Device Driver check buffer's contents and releases it.
546 @SYMTestExpectedResults
550 4. Ok. Buffer de-allocated.
551 @SYMTestPriority Critical
554 void AllocateKernelBuffer()
556 test.Next(_L("Allocate kernel-side buffer"));
561 // Allocate buffer on POOL 1
562 r = Ldd.AllocateKernelBuffer(0, handle);
564 kbuf0.SetHandle(handle);
567 TShPoolInfo poolinfo1;
568 P1.GetInfo(poolinfo1);
569 TInt blocks = poolinfo1.iBufSize / KTestData2().Length();
570 for (i = 0; i < blocks; i++)
572 r = KTestData2().Compare(TPtr8(kbuf0.Ptr() + (i * KTestData2().Length()), KTestData2().Length(), KTestData2().Length()));
578 // Allocate buffer on POOL 2
579 r = Ldd.AllocateKernelBuffer(1, handle);
581 kbuf1.SetHandle(handle);
583 TShPoolInfo poolinfo2;
584 P2.GetInfo(poolinfo2);
585 blocks = poolinfo2.iBufSize / KTestData2().Length();
587 for (i = 0; i < blocks; i++)
589 r = KTestData2().Compare(TPtr8(kbuf1.Ptr() + (i * KTestData2().Length()), KTestData2().Length(), KTestData2().Length()));
599 @SYMTestCaseDesc Allocate maximum number of buffers in a pool (user/kernel)
602 Allocate as many buffers on a pool as possible.
603 Free them all and re-allocate them again.
605 @SYMTestExpectedResults
607 @SYMTestPriority High
610 void AllocateUserMax(RShPool& aPool)
612 test.Next(_L("Exhaust pool memory from user-side"));
615 TShPoolInfo poolinfo;
616 aPool.GetInfo(poolinfo);
617 TBool aligned = (poolinfo.iFlags & EShPoolPageAlignedBuffer);
618 RDebug::Printf("aligned=%d",aligned);
620 RArray<RShBuf> bufarray;
624 r = buf.Alloc(aPool);
625 if (r==KErrNoMemory && KTestPoolSizeInBufs>bufarray.Count())
627 // try again after a delay, to allow for background resource allocation
629 User::After(1000000);
630 r = buf.Alloc(aPool);
634 r = bufarray.Append(buf);
639 while (r == KErrNone);
640 test_Equal(KErrNoMemory, r);
641 test_Compare(KTestPoolSizeInBufs, <=, bufarray.Count());
643 TInt n = bufarray.Count();
646 bufarray[--n].Close();
653 while (n<bufarray.Count())
655 r = bufarray[n].Alloc(aPool);
658 // try again after a delay, to allow for background resource allocation
659 User::After(1000000);
660 r = bufarray[n].Alloc(aPool);
662 test_Assert(r == KErrNone, test.Printf(_L("n=%d r=%d\n"), n, r));
664 test(CheckNotFillShBuf(bufarray[n],0x99));
669 r = extrabuf.Alloc(aPool);
670 test_Equal(KErrNoMemory, r);
674 bufarray[--n].Close();
680 void AllocateKernelMax()
682 test.Next(_L("Exhaust pool memory from kernel-side"));
685 r = Ldd.AllocateMax(0, allocated); // P1
687 test_Equal(KTestPoolSizeInBufs, allocated);
688 r = Ldd.AllocateMax(1, allocated); // P2
690 test_Equal(KTestPoolSizeInBufs, allocated);
696 @SYMTestCaseDesc Buffer alignment (kernel/user)
699 1. Test Thread creates several pools with different buffer alignment
701 2. Test Thread allocates buffers on all pools.
702 3. Test Thread frees all buffers and close pools.
703 @SYMTestExpectedResults
705 2. Buffers are aligned to the desired boundary.
707 @SYMTestPriority High
710 void BufferAlignmentUser()
712 test.Next(_L("Buffer alignment (User)"));
715 r = HAL::Get(HAL::EMemoryPageSize, pagesize);
718 // Non page aligned buffers
720 for (i = 0; i <= Log2(pagesize); i++)
722 test.Printf(_L("."));
723 TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, *PtrBufSize, 20, i); // TODO: Change minbufs back to 8 when the pool growing code works
725 r = pool.Create(inf,KDefaultPoolHandleFlags);
730 for (j = 0; j < 20; j++)
732 r = buf[j].Alloc(pool);
737 if (alignment < KTestMinimumAlignmentLog2)
739 alignment = KTestMinimumAlignmentLog2;
741 for (j = 0; j < 20; j++)
743 test_Assert(!((TUint32) buf[j].Ptr() & ((1 << alignment) - 1)),
744 test.Printf(_L("Pool%d buf[%d].Base() == 0x%08x"), i, j, buf[j].Ptr()));
747 for (j = 0; j < 20; j++)
752 // delay to allow the management dfc to run and close pool
755 test.Printf(_L("\n"));
757 // Page aligned buffers
758 TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, 20); // TODO: Change minbufs back to 8 when the pool growing code works
760 r = pool.Create(inf,KDefaultPoolHandleFlags);
763 r = pool.SetBufferWindow(-1, ETrue);
768 for (j = 0; j < 20; j++)
770 r = buf[j].Alloc(pool);
774 for (j = 0; j < 20; j++)
776 test_Assert(!((TUint32) buf[j].Ptr() & (pagesize - 1)),
777 test.Printf(_L("buf[%d].Base() == 0x%08x"), j, buf[j].Ptr()));
779 for (j = 0; j < 20; j++)
786 void BufferAlignmentKernel()
788 test.Next(_L("Buffer alignment (Kernel)"));
792 r = HAL::Get(HAL::EMemoryPageSize, pagesize);
795 for (TInt i = 0; i < Log2(pagesize); i++)
797 test.Printf(_L("."));
798 r = Ldd.BufferAlignmentKernel(*PtrBufSize, i);
800 // delay to allow the management dfc to run
803 test.Printf(_L("\n"));
808 @SYMTestCaseDesc Create pool at specific physical address
811 1. Device Driver allocates memory chunk.
812 2. Device Driver requests physical address of this memory chunk.
813 3. Device Driver creates pool at physical address of the memory chunk.
814 3. Device Driver allocate buffers on pool, free them and close pool.
815 @SYMTestExpectedResults
820 @SYMTestPriority High
823 void CreateKernelPoolPhysAddr()
825 test.Next(_L("Create pool at specific physical address"));
827 test.Start(_L("Contiguous physical memory"));
828 r = Ldd.CreatePoolPhysAddrCont(*PtrBufSize);
830 test.Next(_L("Discontiguous physical memory"));
831 r = Ldd.CreatePoolPhysAddrNonCont(*PtrBufSize);
838 @SYMTestCaseDesc Buffer separation and overwrites
841 1. Test Thread creates two pools:
842 - A pool with no guard pages.
843 - A pool with guard pages.
844 2. Allocate two buffers on each pool.
845 3. Test Thread creates Secondary Thread.
846 4. Secondary Thread starts reading contents of the first buffer and keep
847 reading beyond its limits (using a pointer, not a descriptor).
848 5. Secondary Thread starts writing on the first buffer and keep writing beyond
849 its limits (using a pointer, not a descriptor).
850 6. Free buffers and close pools.
851 @SYMTestExpectedResults
855 4. Secondary Thread panics when it attempts to read the guard page, if there
856 is one. Otherwise, it moves on to the second buffer. (Secondary Thread will
857 have to be restarted).
858 5. Secondary Thread panics when it attempts to write on the guard page if
859 there is one. Otherwise, it carries on writing on to the second buffer.
861 @SYMTestPriority High
864 TInt ThreadGuardPagesRead(TAny* aArg)
866 TUint8* ptr = (TUint8*) aArg;
871 TInt bufsize = *PtrBufSize;
875 for (i = 0; i < bufsize; i++)
877 if (*(ptr + i) != val)
889 TInt ThreadGuardPagesWrite(TAny* aArg)
891 TUint8* ptr = (TUint8*) aArg;
896 TInt bufsize = *PtrBufSize;
898 for (i = 0; i < bufsize; i++)
907 test.Next(_L("Guard pages"));
910 r = HAL::Get(HAL::EMemoryPageSize, pagesize);
916 TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, KTestPoolSizeInBufs);
917 r = pool1.Create(inf,KDefaultPoolHandleFlags);
920 r = pool1.SetBufferWindow(-1, ETrue);
923 r = inf.SetGuardPages();
925 r = pool2.Create(inf,KDefaultPoolHandleFlags);
928 r = pool2.SetBufferWindow(-1, ETrue);
932 RShBuf bufs1[KTestPoolSizeInBufs];
933 RShBuf bufs2[KTestPoolSizeInBufs];
935 for (i = 0; i < KTestPoolSizeInBufs; i++)
937 r = bufs1[i].Alloc(pool1);
938 test_Assert(r == KErrNone, test.Printf(_L("Pool1: i=%d r=%d\n"), i, r));
939 TPtr8 ptr(bufs1[i].Ptr(), bufs1[i].Size(),bufs1[i].Size());
942 for (i = 0; i < KTestPoolSizeInBufs; i++)
944 r = bufs2[i].Alloc(pool2);
945 test_Assert(r == KErrNone, test.Printf(_L("Pool2: i=%d r=%d\n"), i, r));
946 TPtr8 ptr(bufs2[i].Ptr(), bufs1[i].Size(),bufs1[i].Size());
950 _LIT(KTestThreadRead, "GuardPagesReadTS%dP%dB%d");
951 for (i = 0; i < KTestPoolSizeInBufs - 1; i++)
957 // 1. Simple read within buffer
959 threadname.Format(KTestThreadRead, 1, 1, i);
960 r = thread.Create(threadname, ThreadGuardPagesRead, KDefaultStackSize, KMinHeapSize, KMinHeapSize,
961 (TAny*) bufs1[i].Ptr());
965 User::WaitForRequest(rs);
966 test_KErrNone(rs.Int());
967 test_Equal(EExitKill, thread.ExitType());
968 test_KErrNone(thread.ExitReason());
971 threadname.Format(KTestThreadRead, 1, 2, i);
972 r = thread.Create(threadname, ThreadGuardPagesRead, KDefaultStackSize, KMinHeapSize, KMinHeapSize,
973 (TAny*) bufs2[i].Ptr());
977 User::WaitForRequest(rs);
978 test_KErrNone(rs.Int());
979 test_Equal(EExitKill, thread.ExitType());
980 test_KErrNone(thread.ExitReason());
983 // 2. If the buffer size is not a multiple of the MMU page size, it should be
984 // possible to read after the buffer end until the page boundary
985 if (*PtrBufSize % pagesize)
988 threadname.Format(KTestThreadRead, 2, 1, i);
989 r = thread.Create(threadname, ThreadGuardPagesRead, KDefaultStackSize, KMinHeapSize, KMinHeapSize,
990 (TAny*) (bufs1[i].Ptr() + pagesize - *PtrBufSize % pagesize));
994 User::WaitForRequest(rs);
995 if (rs.Int() != KErrNone)
997 test_Equal(KErrUnknown, rs.Int());
998 test_Equal(KErrUnknown, thread.ExitReason());
1000 test_Equal(EExitKill, thread.ExitType());
1003 threadname.Format(KTestThreadRead, 2, 2, i);
1004 r = thread.Create(threadname, ThreadGuardPagesRead, KDefaultStackSize, KMinHeapSize, KMinHeapSize,
1005 (TAny*) (bufs2[i].Ptr() + pagesize - *PtrBufSize % pagesize));
1009 User::WaitForRequest(rs);
1010 if (rs.Int() != KErrNone)
1012 test_Equal(KErrUnknown, rs.Int());
1013 test_Equal(KErrUnknown, thread.ExitReason());
1015 test_Equal(EExitKill, thread.ExitType());
1019 // 3. Now we attempt to read the first byte on the next page after the end of
1022 if (*PtrBufSize % pagesize)
1024 offset = pagesize - *PtrBufSize % pagesize + 1;
1031 if (bufs1[i + 1].Ptr() == bufs1[i].Ptr() + RoundUp(*PtrBufSize, Log2(pagesize)))
1033 // Only perform this test if the next buffer comes immediately next to this
1034 // one. This is not necessarily the case on the Flexible Memory Model.
1035 threadname.Format(KTestThreadRead, 3, 1, i);
1036 r = thread.Create(threadname, ThreadGuardPagesRead, KDefaultStackSize, KMinHeapSize, KMinHeapSize,
1037 (TAny*) (bufs1[i].Ptr() + offset));
1041 User::WaitForRequest(rs);
1042 if (rs.Int() != KErrNone) // No guard page, so it should be fine
1044 test_Equal(KErrUnknown, rs.Int());
1045 test_Equal(KErrUnknown, thread.ExitReason());
1047 test_Equal(EExitKill, thread.ExitType());
1051 TBool jit = User::JustInTime();
1052 User::SetJustInTime(EFalse);
1053 threadname.Format(KTestThreadRead, 3, 2, i);
1054 r = thread.Create(threadname, ThreadGuardPagesRead, KDefaultStackSize, KMinHeapSize, KMinHeapSize,
1055 (TAny*) (bufs2[i].Ptr() + offset));
1059 User::WaitForRequest(rs);
1060 test_Equal(3, rs.Int());
1061 test_Equal(EExitPanic, thread.ExitType());
1062 test_Equal(3, thread.ExitReason()); // KERN-EXEC 3
1064 User::SetJustInTime(jit);
1067 _LIT(KTestThreadWrite, "GuardPagesWriteTS%dP%dB%d");
1068 for (i = 0; i < KTestPoolSizeInBufs - 1; i++)
1070 TBuf<40> threadname;
1074 // 1. Simple write within buffer
1076 threadname.Format(KTestThreadWrite, 1, 1, i);
1077 r = thread.Create(threadname, ThreadGuardPagesWrite, KDefaultStackSize, KMinHeapSize, KMinHeapSize,
1078 (TAny*) bufs1[i].Ptr());
1082 User::WaitForRequest(rs);
1083 test_KErrNone(rs.Int());
1084 test_Equal(EExitKill, thread.ExitType());
1085 test_KErrNone(thread.ExitReason());
1088 threadname.Format(KTestThreadWrite, 1, 2, i);
1089 r = thread.Create(threadname, ThreadGuardPagesWrite, KDefaultStackSize, KMinHeapSize, KMinHeapSize,
1090 (TAny*) bufs2[i].Ptr());
1094 User::WaitForRequest(rs);
1095 test_KErrNone(rs.Int());
1096 test_Equal(EExitKill, thread.ExitType());
1097 test_KErrNone(thread.ExitReason());
1100 // 2. If the buffer size is not a multiple of the MMU page size, it should be
1101 // possible to write after the buffer end until the page boundary
1102 if (*PtrBufSize % pagesize)
1105 threadname.Format(KTestThreadWrite, 2, 1, i);
1106 r = thread.Create(threadname, ThreadGuardPagesWrite, KDefaultStackSize, KMinHeapSize, KMinHeapSize,
1107 (TAny*) (bufs1[i].Ptr() + pagesize - *PtrBufSize % pagesize));
1111 User::WaitForRequest(rs);
1112 test_KErrNone(rs.Int());
1113 test_Equal(EExitKill, thread.ExitType());
1114 test_KErrNone(thread.ExitReason());
1117 threadname.Format(KTestThreadWrite, 2, 2, i);
1118 r = thread.Create(threadname, ThreadGuardPagesWrite, KDefaultStackSize, KMinHeapSize, KMinHeapSize,
1119 (TAny*) (bufs2[i].Ptr() + pagesize - *PtrBufSize % pagesize));
1123 User::WaitForRequest(rs);
1124 test_KErrNone(rs.Int());
1125 test_Equal(EExitKill, thread.ExitType());
1126 test_KErrNone(thread.ExitReason());
1130 // 3. Now we attempt to write on the first byte on the next page after the
1131 // end of our buffer.
1133 if (*PtrBufSize % pagesize)
1135 offset = pagesize - *PtrBufSize % pagesize + 1;
1142 if (bufs1[i + 1].Ptr() == bufs1[i].Ptr() + RoundUp(*PtrBufSize, Log2(pagesize)))
1144 // Only perform this test if the next buffer comes immediately next to this
1145 // one. This is not necessarily the case on the Flexible Memory Model.
1146 threadname.Format(KTestThreadWrite, 3, 1, i);
1147 r = thread.Create(threadname, ThreadGuardPagesWrite, KDefaultStackSize, KMinHeapSize, KMinHeapSize,
1148 (TAny*) (bufs1[i].Ptr() + offset));
1152 User::WaitForRequest(rs);
1153 test_KErrNone(rs.Int());
1154 test_Equal(EExitKill, thread.ExitType());
1155 test_KErrNone(thread.ExitReason());
1160 TBool jit = User::JustInTime();
1161 User::SetJustInTime(EFalse);
1162 threadname.Format(KTestThreadWrite, 3, 2, i);
1163 r = thread.Create(threadname, ThreadGuardPagesWrite, KDefaultStackSize, KMinHeapSize, KMinHeapSize,
1164 (TAny*) (bufs2[i].Ptr() + offset));
1168 User::WaitForRequest(rs);
1169 test_Equal(3, rs.Int());
1170 test_Equal(EExitPanic, thread.ExitType());
1171 test_Equal(3, thread.ExitReason()); // KERN-EXEC 3
1173 User::SetJustInTime(jit);
1177 for (i = 0; i < KTestPoolSizeInBufs; i++)
1188 @SYMTestCaseDesc Buffer mapping
1191 1. Test Thread allocates buffer on a mappable pool.
1192 2. Test Thread spawns Slave Process.
1193 3. Test Thread passes buffer handle to Slave Process.
1194 4. Slave Process attempts to read buffer then write to buffer.
1195 5. Slave Process maps buffer.
1196 6. Slave Process attempts to read buffer then write to buffer.
1197 7. Slave Process unmaps buffer.
1198 8. Slave Process attempts to read buffer then write to buffer.
1199 9. Test Thread kills Slave Process and frees buffer.
1200 @SYMTestExpectedResults
1204 4. Slave Process panics. (and will have to be restarted)
1208 8. Slave Process panics.
1210 @SYMTestPriority High
1213 TInt ThreadBufferMappingRead(TAny* aArg)
1217 return KErrArgument;
1219 RShBuf* buf = (RShBuf*) aArg;
1222 volatile TUint8* ptr = buf->Ptr();
1224 for (i = 0; i < buf->Size(); i++)
1231 TInt ThreadBufferMappingWrite(TAny* aArg)
1235 return KErrArgument;
1237 RShBuf* buf = (RShBuf*) aArg;
1238 TPtr8 ptr(buf->Ptr(), buf->Size(),buf->Size());
1243 const TInt KTestBufferMappingPoolTypes = 8;
1244 const TInt KTestBufferMappingTypes = 8;
1246 void BufferMapping()
1248 test.Next(_L("Buffer Mapping"));
1250 test.Printf(_L("Does not run on the emulator. Skipped\n"));
1253 RShPool pool[KTestBufferMappingPoolTypes];
1254 RShBuf buf[KTestBufferMappingTypes][KTestBufferMappingPoolTypes];
1255 TUint poolflags[KTestBufferMappingPoolTypes];
1256 TInt bufferwindow[KTestBufferMappingPoolTypes];
1257 TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, KTestBufferMappingTypes);
1260 // ------------------------------------------
1261 // Pool no. AutoMap Writeable BufWindow
1272 test.Printf(_L("Create pools:"));
1273 for (i = 0; i < KTestBufferMappingPoolTypes; i++)
1275 poolflags[i] = EShPoolAllocate;
1276 bufferwindow[i] = 0;
1279 poolflags[i] |= EShPoolAutoMapBuf;
1283 poolflags[i] |= EShPoolWriteable;
1287 bufferwindow[i] = -1;
1289 r = pool[i].Create(inf, poolflags[i] & ~EShPoolAutoMapBuf);
1291 r = pool[i].SetBufferWindow(bufferwindow[i], poolflags[i] & EShPoolAutoMapBuf);
1293 test.Printf(_L("."));
1295 test.Printf(_L("\n"));
1298 // Buffer no. Actions
1299 // 0 Alloc unmapped.
1300 // 1 Alloc unmapped then unmap again.
1301 // 2 Default Alloc. Unmap if it is a AutoMap pool.
1302 // 3 Alloc unmapped. Map Read-Only.
1303 // 4 Default Alloc. Unmap if it is a R/W pool and re-map Read-Only.
1304 // 5 Alloc unmapped. Map R/W
1305 // 6 Default Alloc. Unmap and re-map.
1306 // 7 Default Alloc R/W. Map again with Read-Only setting.
1307 // Depending on the pool type, the actions above might not always be possible.
1309 // Buffer allocation
1311 test.Printf(_L("Allocate buffers\n"));
1312 for (j = 0; j < KTestBufferMappingPoolTypes; j++)
1314 test.Printf(_L("\nPool %d:"), j);
1315 for (i = 0; i < KTestBufferMappingTypes; i++)
1317 switch (i % KTestBufferMappingTypes)
1322 // This should always result in an unmapped buffer
1323 r = buf[i][j].Alloc(pool[j], EShPoolAllocNoMap);
1326 if((i % KTestBufferMappingTypes) == 1)
1328 // Alloc unmapped then unmap again.
1329 r = buf[i][j].UnMap();
1330 test_Equal(KErrNotFound, r);
1334 r = buf[i][j].Alloc(pool[j]);
1335 if (poolflags[j] & EShPoolAutoMapBuf)
1337 if (bufferwindow[j] == 0)
1339 // Can't ask for a mapped buffer when buffer window is not set
1340 test_Equal(KErrNoMemory, r);
1344 // Alloc'd buffer was mapped - unmap it
1346 r = buf[i][j].UnMap();
1352 // Buffer not mapped
1357 // Read-Only buffers
1359 r = buf[i][j].Alloc(pool[j], EShPoolAllocNoMap);
1361 r = buf[i][j].Map(ETrue);
1362 if (bufferwindow[j])
1368 test_Equal(KErrNoMemory, r);
1372 r = buf[i][j].Alloc(pool[j]);
1373 if (poolflags[j] & EShPoolAutoMapBuf)
1375 if (bufferwindow[j] == 0)
1377 // Can't ask for a mapped buffer when buffer window is not set
1378 test_Equal(KErrNoMemory, r);
1380 else if (poolflags[j] & EShPoolWriteable)
1382 // Alloc'd buffer was mapped R/W - re-map it R/O
1384 r = buf[i][j].UnMap();
1386 r = buf[i][j].Map(ETrue);
1397 // Buffer not mapped
1399 if (bufferwindow[j])
1401 if (poolflags[j] & EShPoolWriteable)
1403 // Explicitly map Read-Only
1404 r = buf[i][j].Map(ETrue);
1409 // If Pool is RO, map default
1410 r = buf[i][j].Map();
1417 r = buf[i][j].Map(ETrue);
1418 test_Equal(KErrNoMemory, r);
1423 // Mapped for Read-Write
1425 r = buf[i][j].Alloc(pool[j], EShPoolAllocNoMap);
1427 r = buf[i][j].Map();
1428 if (bufferwindow[j] == 0)
1430 test_Equal(KErrNoMemory, r);
1432 else if (!(poolflags[j] & EShPoolWriteable))
1443 r = buf[i][j].Alloc(pool[j]);
1444 if (poolflags[j] & EShPoolAutoMapBuf)
1446 if (bufferwindow[j] == 0)
1448 // Can't ask for a mapped buffer when buffer window is not set
1449 test_Equal(KErrNoMemory, r);
1451 else if (poolflags[j] & EShPoolWriteable)
1453 // Alloc'd buffer was mapped R/W
1456 if((i % KTestBufferMappingTypes) == 7)
1458 // Mapped for Read-Write then remapped as Read-Only
1459 r = buf[i][j].Map(true);
1460 test_Equal(KErrAlreadyExists, r);
1466 // Buffer not mapped
1468 if (bufferwindow[j])
1470 if (poolflags[j] & EShPoolWriteable)
1473 r = buf[i][j].Map();
1476 if((i % KTestBufferMappingTypes) == 7)
1478 // Mapped for Read-Write then remapped as Read-Only
1479 r = buf[i][j].Map(true);
1480 test_Equal(KErrAlreadyExists, r);
1487 r = buf[i][j].Map(ETrue);
1488 test_Equal(KErrNoMemory, r);
1493 default: test(EFalse);
1495 test.Printf(_L("."));
1498 test.Printf(_L("\n"));
1500 // Read and write tests
1501 _LIT(KTestThreadName, "BufferMappingBuf%d(Test%d)");
1502 test.Printf(_L("Read & Write tests\n"));
1503 for (j = 0; j < KTestBufferMappingPoolTypes; j++)
1505 for (i = 0; i < KTestBufferMappingTypes; i++)
1507 if (buf[i][j].Handle())
1509 switch (i % KTestBufferMappingTypes)
1513 // Buffer not mapped - Read should fail
1514 if (buf[i][j].Ptr() == NULL)
1517 TRequestStatus threadrs;
1518 TBuf<40> threadname;
1519 threadname.Format(KTestThreadName, i, (i % KTestBufferMappingTypes) + 1);
1520 r = thread.Create(threadname, ThreadBufferMappingRead, KDefaultStackSize, KMinHeapSize, KMinHeapSize, (TAny*) &buf[i][j]);
1522 thread.Logon(threadrs);
1524 User::WaitForRequest(threadrs);
1525 test_Equal(3, threadrs.Int());
1526 test_Equal(EExitPanic, thread.ExitType());
1527 test_Equal(3, thread.ExitReason()); // KERN-EXEC 3
1528 CLOSE_AND_WAIT(thread);
1529 // Map buffer read-only for next test
1530 r = buf[i][j].Map(ETrue);
1531 if (bufferwindow[j])
1537 test_Equal(KErrNoMemory, r);
1542 // Buffer mapped for R/O access - Read should not fail
1543 if (bufferwindow[j] == 0)
1550 TRequestStatus threadrs;
1551 TBuf<40> threadname;
1552 threadname.Format(KTestThreadName, i, (i % KTestBufferMappingTypes) + 1);
1553 r = thread.Create(threadname, ThreadBufferMappingRead, KDefaultStackSize, KMinHeapSize, KMinHeapSize, (TAny*) &buf[i][j]);
1555 thread.Logon(threadrs);
1557 User::WaitForRequest(threadrs);
1558 test_KErrNone(threadrs.Int());
1559 test_Equal(EExitKill, thread.ExitType());
1560 test_KErrNone(thread.ExitReason());
1561 CLOSE_AND_WAIT(thread);
1563 // Write should fail
1564 if (buf[i][j].Ptr())
1567 TRequestStatus threadrs;
1568 TBuf<40> threadname;
1569 threadname.Format(KTestThreadName, i, (i % KTestBufferMappingTypes) + 2);
1570 r = thread.Create(threadname, ThreadBufferMappingWrite, KDefaultStackSize, KMinHeapSize, KMinHeapSize,(TAny*) &buf[i][j]);
1572 thread.Logon(threadrs);
1574 User::WaitForRequest(threadrs);
1575 test_Equal(3, threadrs.Int());
1576 test_Equal(EExitPanic, thread.ExitType());
1577 test_Equal(3, thread.ExitReason()); // KERN-EXEC 3
1578 CLOSE_AND_WAIT(thread);
1579 // Map buffer read-write for next test
1580 r = buf[i][j].UnMap();
1581 if(r != KErrNotFound)
1585 r = buf[i][j].Map();
1590 // Buffer mapped for R/W access - Write should not fail
1591 if (bufferwindow[j] == 0 || !(poolflags[j] & EShPoolWriteable))
1598 TRequestStatus threadrs;
1599 TBuf<40> threadname;
1600 threadname.Format(KTestThreadName, i, (i % KTestBufferMappingTypes) + 1);
1601 r = thread.Create(threadname, ThreadBufferMappingWrite, KDefaultStackSize, KMinHeapSize, KMinHeapSize,(TAny*) &buf[i][j]);
1603 thread.Logon(threadrs);
1605 User::WaitForRequest(threadrs);
1606 test_KErrNone(threadrs.Int());
1607 test_Equal(EExitKill, thread.ExitType());
1608 test_KErrNone(thread.ExitReason());
1609 CLOSE_AND_WAIT(thread);
1610 // Unmap buffer for next test
1611 r = buf[i][j].UnMap();
1614 // Buffer not mapped - Read should fail
1615 if (buf[i][j].Ptr())
1618 TRequestStatus threadrs;
1619 TBuf<40> threadname;
1620 threadname.Format(KTestThreadName, i, (i % KTestBufferMappingTypes) + 2);
1621 r = thread.Create(threadname, ThreadBufferMappingRead, KDefaultStackSize, KMinHeapSize, KMinHeapSize,(TAny*) &buf[i][j]);
1623 thread.Logon(threadrs);
1625 User::WaitForRequest(threadrs);
1626 test_Equal(3, threadrs.Int());
1627 test_Equal(EExitPanic, thread.ExitType());
1628 test_Equal(3, thread.ExitReason()); // KERN-EXEC 3
1629 CLOSE_AND_WAIT(thread);
1634 test.Printf(_L("."));
1637 test.Printf(_L("\n"));
1644 test.Next(_L("Buffer Window tests"));
1646 test.Printf(_L("Does not run on the emulator. Skipped\n"));
1650 RShBuf buf[KTestPoolSizeInBufs * 2 + 1];
1651 TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, KTestPoolSizeInBufs * 2);
1652 r = pool.Create(inf, KDefaultPoolHandleFlags);
1655 // Allocate buffer but don't map them to this process memory
1657 for (i = 0; i < KTestPoolSizeInBufs * 2; i++)
1659 r = buf[i].Alloc(pool, EShPoolAllocNoMap);
1664 r = buf[KTestPoolSizeInBufs * 2].Alloc(pool, EShPoolAllocNoMap);
1665 test_Equal(KErrNoMemory, r);
1667 test_Equal(KErrNoMemory, r);
1669 // Open a one-buffer window
1670 r = pool.SetBufferWindow(1, ETrue);
1674 TPtr8 ptr0(buf[0].Ptr(), buf[0].Size(),buf[0].Size());
1677 test_Equal(KErrNoMemory, r);
1682 TPtr8 ptr1(buf[0].Ptr(), buf[0].Size(),buf[0].Size());
1685 test_Equal(KErrNoMemory, r);
1687 // Enlarge window by one buffer
1688 r = pool.SetBufferWindow(2, ETrue);
1689 test_Equal(KErrAlreadyExists, r);
1691 // Close All buffers
1692 for (i = 0; i < KTestPoolSizeInBufs * 2; i++)
1698 r = pool.Create(inf, KDefaultPoolHandleFlags);
1701 r = pool.SetBufferWindow(KTestPoolSizeInBufs, ETrue); // Half the pool size
1703 for (i = 0; i < KTestPoolSizeInBufs * 2 - 1; i++)
1705 if (i < KTestPoolSizeInBufs)
1707 r = buf[i].Alloc(pool, 0);
1709 TPtr8 ptr(buf[0].Ptr(), buf[0].Size(),buf[0].Size());
1714 r = buf[i].Alloc(pool, EShPoolAllocNoMap);
1718 r = buf[KTestPoolSizeInBufs * 2].Alloc(pool, 0);
1719 test_Equal(KErrNoMemory, r);
1720 r = buf[KTestPoolSizeInBufs].Map();
1721 test_Equal(KErrNoMemory, r);
1722 r = buf[KTestPoolSizeInBufs * 2].Alloc(pool, EShPoolAllocNoMap);
1726 for (i = 0; i < (KTestPoolSizeInBufs * 2) + 1; i++)
1732 // Try again with automap set to false
1734 r = pool2.Create(inf, KDefaultPoolHandleFlags);
1736 for (i = 0; i < KTestPoolSizeInBufs * 2; i++)
1738 r = buf[i].Alloc(pool2, 0);
1741 r = pool2.SetBufferWindow(-1, EFalse);
1743 for (i = 0; i < KTestPoolSizeInBufs * 2; i++)
1745 r = buf[i].Map(ETrue);
1748 for (i = 0; i < KTestPoolSizeInBufs * 2; i++)
1758 @SYMTestCaseDesc Trigger notifications
1761 Set Low Space Notifications on various thresholds.
1762 In a separate thread, keep allocating buffers.
1763 @SYMTestExpectedResults
1764 Notifications are completed when their respective levels are reached.
1765 @SYMTestPriority Medium
1768 TInt ThreadNotifications(TAny* aArg)
1772 return KErrArgument;
1774 RShPool* pool = (RShPool*) aArg;
1775 RArray<RShBuf> bufarray;
1778 r = sem.OpenGlobal(KTestLowSpaceSemaphore);
1781 RDebug::Printf("Line %d: r=%d", __LINE__, r);
1784 // Start allocating buffers
1785 while (pool->FreeCount() > 1)
1788 r = buf.Alloc(*pool);
1791 RDebug::Printf("Line %d: count=%d r=%d", __LINE__, bufarray.Count(), r);
1794 bufarray.Append(buf);
1795 if ((bufarray.Count() == 1) // wait for low3
1796 || (bufarray.Count() == KTestPoolSizeInBufs - 2) // wait for low2
1797 || (bufarray.Count() == KTestPoolSizeInBufs - 1)) // wait for low1/low4
1799 r = sem.Wait(5000000); // 5 second timeout
1802 RDebug::Printf("Line %d: count=%d r=%d", __LINE__, bufarray.Count(), r);
1809 while (bufarray.Count())
1811 bufarray[0].Close();
1813 if ((bufarray.Count() == KTestPoolSizeInBufs - 2) // wait for free3
1814 || (bufarray.Count() == 1) // wait for free2
1815 || (bufarray.Count() == 0)) // wait for free1/free4
1817 r = sem.Wait(5000000); // 5 second timeout
1820 RDebug::Printf("Line %d: count=%d r=%d", __LINE__, bufarray.Count(), r);
1830 enum TTestLowSpaceType
1832 ETestCancelNonExistent,
1836 struct TTestThreadLowSpacePanicArgs
1841 TTestLowSpaceType iType;
1844 TInt ThreadLowSpacePanic(TAny* aArg)
1848 return KErrArgument;
1850 TTestThreadLowSpacePanicArgs& targs = *(TTestThreadLowSpacePanicArgs*) aArg;
1852 if (targs.iType == ETestCancelNonExistent)
1854 targs.iPool->CancelLowSpaceNotification(rs); // should panic
1856 else if (targs.iType == ETestCancelTwice)
1858 targs.iPool->RequestLowSpaceNotification(targs.iThreshold1, rs);
1859 targs.iPool->CancelLowSpaceNotification(rs);
1860 targs.iPool->CancelLowSpaceNotification(rs); // should panic
1864 return KErrArgument;
1870 * CancelLowSpaceNotification() no longer panic()s if it can't find the
1871 * notification, so this routine not currently called.
1873 void RequestLowSpacePanic(RShPool& aPool, TUint aThreshold1, TUint aThreshold2, TTestLowSpaceType aType, TInt aLine)
1875 static TInt count = 0;
1877 test.Printf(_L("RequestLowSpacePanic@%d(%d)\n"), aLine, count);
1878 TBool jit = User::JustInTime();
1879 User::SetJustInTime(EFalse);
1880 TInt expectedpaniccode = KErrNone; // Initialised to silence compiler warnings
1883 case ETestCancelNonExistent:
1884 case ETestCancelTwice:
1885 expectedpaniccode = KErrNotFound;
1891 TTestThreadLowSpacePanicArgs targs;
1892 targs.iPool = &aPool;
1893 targs.iThreshold1 = aThreshold1;
1894 targs.iThreshold2 = aThreshold2;
1895 targs.iType = aType;
1897 RThread threadpanic;
1898 TRequestStatus threadpanicrs;
1900 TBuf<30> threadname;
1901 threadname.Format(_L("ThreadLowSpacePanic%d"), count);
1902 r = threadpanic.Create(threadname, ThreadLowSpacePanic, KDefaultStackSize, KMinHeapSize, 1 << 20, (TAny*) &targs);
1904 threadpanic.Logon(threadpanicrs);
1905 threadpanic.Resume();
1906 User::WaitForRequest(threadpanicrs);
1908 test_Equal(expectedpaniccode, threadpanicrs.Int());
1909 test_Equal(EExitPanic, threadpanic.ExitType());
1910 test_Equal(expectedpaniccode, threadpanic.ExitReason());
1911 threadpanic.Close();
1912 User::SetJustInTime(jit);
1915 void NotificationRequests(RShPool& aPool)
1917 test.Next(_L("Notifications"));
1921 r = sem.CreateGlobal(KTestLowSpaceSemaphore, 0);
1924 r = timer.CreateLocal();
1927 TRequestStatus threadrs;
1928 r = thread.Create(_L("ThreadNotifications"), ThreadNotifications, KDefaultStackSize, KMinHeapSize, 1 << 20, (TAny*) &aPool);
1930 thread.SetPriority(EPriorityMore);
1931 thread.Logon(threadrs);
1933 test.Printf(_L("Low space notification\n"));
1934 TRequestStatus low1;
1935 TRequestStatus low2;
1936 TRequestStatus low3;
1937 TRequestStatus low4;
1938 TRequestStatus low5;
1939 TRequestStatus low6;
1940 aPool.RequestLowSpaceNotification(1, low1);
1941 test_Equal(KRequestPending, low1.Int());
1942 aPool.RequestLowSpaceNotification(2, low2);
1943 test_Equal(KRequestPending, low2.Int());
1944 aPool.RequestLowSpaceNotification(aPool.FreeCount() - 1, low3);
1945 test_Equal(KRequestPending, low3.Int());
1946 aPool.RequestLowSpaceNotification(1, low4);
1947 test_Equal(KRequestPending, low4.Int());
1948 aPool.RequestLowSpaceNotification(0, low5); // Never completes
1949 test_Equal(KRequestPending, low5.Int());
1950 aPool.RequestLowSpaceNotification(KMaxTUint, low6); // Completes instantly
1951 TRequestStatus timeoutlow;
1952 timer.After(timeoutlow, 5000000); // 5 seconds time out
1953 User::WaitForRequest(low6, timeoutlow);
1954 test_KErrNone(low6.Int());
1955 test_Equal(KRequestPending, low1.Int());
1956 test_Equal(KRequestPending, low2.Int());
1957 test_Equal(KRequestPending, low3.Int());
1958 test_Equal(KRequestPending, low4.Int());
1959 test_Equal(KRequestPending, low5.Int());
1961 User::WaitForRequest(timeoutlow);
1963 User::WaitForRequest(low3, threadrs);
1964 test_KErrNone(low3.Int());
1965 test_Equal(KRequestPending, low1.Int());
1966 test_Equal(KRequestPending, low2.Int());
1967 test_Equal(KRequestPending, low4.Int());
1968 test_Equal(KRequestPending, low5.Int());
1970 User::WaitForRequest(low2, threadrs);
1971 test_KErrNone(low2.Int())
1972 test_Equal(KRequestPending, low1.Int());
1973 test_Equal(KRequestPending, low4.Int());
1974 test_Equal(KRequestPending, low5.Int());
1976 User::WaitForRequest(low1, threadrs);
1977 test_KErrNone(low1.Int());
1978 User::WaitForRequest(low4, threadrs);
1979 test_KErrNone(low4.Int());
1980 test_Equal(KRequestPending, low5.Int());
1981 test_Equal(EExitPending, thread.ExitType()); // Thread is still running
1982 test_Compare(aPool.FreeCount(), <=, 1);
1984 test.Printf(_L("Free space notification\n"));
1985 TRequestStatus free1;
1986 TRequestStatus free2;
1987 TRequestStatus free3;
1988 TRequestStatus free4;
1989 TRequestStatus free5;
1990 TRequestStatus free6;
1991 aPool.RequestFreeSpaceNotification(KTestPoolSizeInBufs, free1);
1992 test_Equal(KRequestPending, free1.Int());
1993 aPool.RequestFreeSpaceNotification(KTestPoolSizeInBufs - 1, free2);
1994 test_Equal(KRequestPending, free2.Int());
1995 aPool.RequestFreeSpaceNotification(aPool.FreeCount() + 1, free3);
1996 test_Equal(KRequestPending, free3.Int());
1997 aPool.RequestFreeSpaceNotification(KTestPoolSizeInBufs, free4);
1998 test_Equal(KRequestPending, free4.Int());
1999 aPool.RequestFreeSpaceNotification(KTestPoolSizeInBufs + 1, free5); // Never completes
2000 test_Equal(KRequestPending, free5.Int());
2001 aPool.RequestFreeSpaceNotification(0, free6); // Completes instantly
2003 TRequestStatus timeoutfree;
2004 timer.After(timeoutfree, 5000000); // 5 seconds time out
2005 User::WaitForRequest(free6, timeoutfree);
2006 test_KErrNone(free6.Int());
2008 test_Equal(KRequestPending, free1.Int());
2009 test_Equal(KRequestPending, free2.Int());
2010 test_Equal(KRequestPending, free3.Int());
2011 test_Equal(KRequestPending, free4.Int());
2012 test_Equal(KRequestPending, free5.Int());
2015 User::WaitForRequest(timeoutfree);
2017 sem.Signal(); // resume thread execution
2018 User::WaitForRequest(free3, threadrs);
2019 test_KErrNone(free3.Int());
2020 test_Equal(KRequestPending, free1.Int());
2021 test_Equal(KRequestPending, free2.Int());
2022 test_Equal(KRequestPending, free4.Int());
2023 test_Equal(KRequestPending, free5.Int());
2026 User::WaitForRequest(free2, threadrs);
2027 test_KErrNone(free2.Int())
2029 test_Equal(KRequestPending, free1.Int());
2030 test_Equal(KRequestPending, free4.Int());
2031 test_Equal(KRequestPending, free5.Int());
2034 User::WaitForRequest(free1, threadrs);
2035 test_KErrNone(free1.Int());
2036 test_KErrNone(free4.Int());
2038 test_Equal(KRequestPending, free5.Int());
2039 test_Equal(EExitPending, thread.ExitType()); // Thread is still running
2041 test_Compare(aPool.FreeCount(), >=, KTestPoolSizeInBufs);
2043 // Complete the requests still pending...
2044 aPool.CancelLowSpaceNotification(low5);
2045 User::WaitForRequest(low5);
2047 aPool.CancelFreeSpaceNotification(free5);
2048 User::WaitForRequest(free5);
2050 // Let thread complete
2052 User::WaitForRequest(threadrs);
2053 test_Equal(EExitKill, thread.ExitType());
2054 test_KErrNone(thread.ExitReason());
2062 @SYMTestCaseDesc Cancel low- and free-space notifications
2065 Set Low/High LowSpace Notifications.
2067 @SYMTestExpectedResults
2069 @SYMTestPriority Medium
2072 void CancelNotificationRequests(RShPool& aPool)
2074 test.Next(_L("Cancel notifications"));
2078 r = sem.CreateGlobal(KTestLowSpaceSemaphore, 0);
2081 TRequestStatus threadrs;
2082 r = thread.Create(_L("ThreadCancelNotifications"), ThreadNotifications, KDefaultStackSize, KMinHeapSize, 1 << 20, (TAny*) &aPool);
2084 thread.SetPriority(EPriorityLess);
2085 thread.Logon(threadrs);
2087 test.Printf(_L("Cancel low space notifications\n"));
2088 // Low space notification cancel
2090 aPool.RequestLowSpaceNotification(1, low);
2091 aPool.CancelLowSpaceNotification(low);
2092 test_Equal(KErrCancel, low.Int());
2093 // We should be able to cancel again without panic()ing
2094 // (no guarantees on return code; maybe Cancel() should have void return type?)
2095 aPool.CancelLowSpaceNotification(low);
2096 test.Printf(_L("Second cancel returned %d\n"), low.Int());
2097 TRequestStatus low2;
2098 aPool.RequestLowSpaceNotification(1, low2); // For thread sync
2101 User::WaitForRequest(low2, threadrs);
2102 test_KErrNone(low2.Int());
2103 test_Equal(EExitPending, thread.ExitType()); // Thread is still running
2104 test_Compare(aPool.FreeCount(), <=, 1);
2106 test.Printf(_L("Cancel free space notifications\n"));
2107 TRequestStatus free;
2108 aPool.CancelFreeSpaceNotification(free); // Cancel non-existant notification
2109 aPool.RequestFreeSpaceNotification(KTestPoolSizeInBufs, free);
2110 aPool.CancelLowSpaceNotification(free); // Use wrong method
2111 aPool.CancelFreeSpaceNotification(free); // Use wrong method
2112 test_Equal(KErrCancel, free.Int());
2113 aPool.CancelFreeSpaceNotification(free); // Already cancelled
2115 // Complete the requests still pending...
2116 User::WaitForRequest(low);
2118 sem.Signal(4); // Resume thread execution and let it complete
2119 User::WaitForRequest(threadrs);
2120 test_KErrNone(threadrs.Int());
2121 test_Equal(EExitKill, thread.ExitType());
2122 test_KErrNone(thread.ExitReason());
2123 test_Compare(aPool.FreeCount(), >=, KTestPoolSizeInBufs);
2131 @SYMTestCaseDesc Grow and shrink pool
2134 1. Test Thread creates pools with various size attributes
2135 2. Test Thread keeps allocating buffers on pool.
2136 3. Test Thread keeps freeing buffers on pool
2137 4. Test Thread frees all buffers and close pool.
2138 @SYMTestExpectedResults
2139 Pools grows and shrink grows as expected.
2140 @SYMTestPriority High
2143 const TInt KTestFreeCountTimeOut = 20000000; // 20 seconds (of thread inactivity)
2144 const TInt KTestWaitBeforeRetry = 2000; // 0.002 second
2146 TUint MultFx248(TUint n, TUint f)
2148 TUint64 r = (TUint64) n * f;
2150 return r > KMaxTUint32 ? KMaxTUint32 : I64LOW(r);
2153 class TTestPoolModel
2156 TTestPoolModel(TShPoolInfo& aInfo);
2160 void DisplayCounters();
2163 void CheckGrowShrink();
2172 TUint iGrowTriggerRatio;
2174 TUint iShrinkByRatio;
2175 TUint iShrinkHysteresisRatio;
2179 TUint iShrinkTrigger;
2184 TTestPoolModel::TTestPoolModel(TShPoolInfo& aInfo)
2186 iInitial = aInfo.iInitialBufs;
2187 iMax = aInfo.iMaxBufs;
2188 iGrowTriggerRatio = aInfo.iGrowTriggerRatio;
2189 iGrowByRatio = aInfo.iGrowByRatio;
2190 iShrinkByRatio = 256 - 65536 / (256 + iGrowByRatio);
2191 iShrinkHysteresisRatio = aInfo.iShrinkHysteresisRatio;
2192 iPoolFlags = aInfo.iFlags;
2195 iDebug = EFalse; // Set this to ETrue to display detailed information
2200 test.Printf(_L("A F A+F GT ST \n"));
2201 test.Printf(_L("==============================\n"));
2206 void TTestPoolModel::Alloc()
2213 void TTestPoolModel::Free()
2220 TUint TTestPoolModel::FreeCount()
2225 void TTestPoolModel::CalcGSP()
2227 TUint n = iAllocated + iFree;
2229 // If the pool is at its maximum size, we can't grow
2230 if (n >= iMax || iGrowTriggerRatio == 0 /*|| iCommittedPages >= iMaxPages*/)
2236 iGrowTrigger = MultFx248(n, iGrowTriggerRatio);
2238 // Deal with rounding towards zero
2239 if (iGrowTrigger == 0)
2243 // If no growing has happened, we can't shrink
2244 if (n <= iInitial || iGrowTriggerRatio == 0 || (iPoolFlags & EShPoolSuppressShrink) != 0)
2246 iShrinkTrigger = iMax;
2250 // To ensure that shrinking doesn't immediately happen after growing, the trigger
2251 // amount is the grow trigger + the grow amount (which is the number of free buffers
2252 // just after a grow) times the shrink hysteresis value.
2253 iShrinkTrigger = MultFx248(n, iGrowTriggerRatio + iGrowByRatio);
2254 iShrinkTrigger = MultFx248(iShrinkTrigger, iShrinkHysteresisRatio);
2256 // Deal with rounding towards zero
2257 if (iShrinkTrigger == 0)
2260 // If the shrink trigger ends up > the number of buffers currently in
2261 // the pool, set it to that number (less 1, since the test is "> trigger").
2262 // This means the pool will only shrink when all the buffers have been freed.
2263 if (iShrinkTrigger >= n)
2264 iShrinkTrigger = n - 1;
2272 void TTestPoolModel::CheckGrowShrink()
2274 if (iFree < iGrowTrigger)
2279 if (iFree > iShrinkTrigger)
2286 void TTestPoolModel::Grow()
2288 TUint headroom = iMax - (iAllocated + iFree);
2289 TUint growby = MultFx248(iAllocated + iFree, iGrowByRatio);
2290 if (growby == 0) // Handle round-to-zero
2292 if (growby > headroom)
2297 test.Printf(_L("GROW by %d!\n"), growby);
2302 void TTestPoolModel::Shrink()
2304 TUint grownBy = iAllocated + iFree - iInitial;
2305 TUint shrinkby = MultFx248(iAllocated + iFree, iShrinkByRatio);
2306 if (shrinkby == 0) // Handle round-to-zero
2308 if (shrinkby > grownBy)
2310 if (shrinkby > iFree)
2315 test.Printf(_L("SHRINK by %d!\n"), shrinkby);
2320 void TTestPoolModel::DisplayCounters()
2322 test.Printf(_L("%-6u%-6u%-6u%-6u%-6u\n"), iAllocated, iFree, iAllocated + iFree, iGrowTrigger, iShrinkTrigger);
2325 void PoolGrowingTestRoutine(const TShPoolCreateInfo& aInfo, TUint aBufferFlags = 0)
2330 r = pool.Create(aInfo, KDefaultPoolHandleFlags);
2336 // Only set the buffer window if we're going to map the buffers
2337 if (!(aBufferFlags & EShPoolAllocNoMap) && (info.iFlags & EShPoolPageAlignedBuffer))
2339 r = pool.SetBufferWindow(-1, ETrue);
2343 TTestPoolModel model(info);
2344 RArray<RShBuf> bufarray;
2345 test_Equal(info.iInitialBufs, pool.FreeCount());
2347 // Buffer allocation
2350 timeout = KTestFreeCountTimeOut / KTestWaitBeforeRetry;
2351 while (model.FreeCount() != pool.FreeCount())
2353 User::After(KTestWaitBeforeRetry);
2354 test_Assert(--timeout,
2355 test.Printf(_L("Timeout: Free==%u (expected %u)\n"), pool.FreeCount(), model.FreeCount());
2356 model.DisplayCounters();
2358 if ((timeout * KTestWaitBeforeRetry) % 1000000 == 0)
2360 test.Printf(_L("Time out in %d seconds! (line %d)\n"), timeout * KTestWaitBeforeRetry / 1000000, __LINE__);
2364 r = buf.Alloc(pool, aBufferFlags);
2365 if (r == KErrNoMemory)
2367 // We expect to get a failure when all buffers are allocated
2368 if ((TUint) bufarray.Count() == info.iMaxBufs)
2370 if (!(aBufferFlags & EShPoolAllocCanWait))
2372 // Give the Management DFC some time to run, then try allocating again
2373 User::After(1000000); // 1 second
2374 r = buf.Alloc(pool);
2377 test.Printf(_L("Alloc fail after %d of %d; Free==%u (expected %u)\n"),
2378 bufarray.Count(), info.iMaxBufs, pool.FreeCount(), model.FreeCount());
2387 if (!(aBufferFlags & EShPoolAllocNoMap))
2389 TPtr8 ptr(buf.Ptr(), buf.Size(),buf.Size());
2390 ptr.Fill(bufarray.Count() % 256);
2392 bufarray.Append(buf);
2395 while (r == KErrNone);
2397 test_Equal(KErrNoMemory, r);
2398 test_Equal(info.iMaxBufs, bufarray.Count());
2399 test_Equal(0, pool.FreeCount());
2401 // Now free no more than 1/3 of these buffers...
2402 while ((TUint) bufarray.Count() > 2 * info.iMaxBufs / 3)
2404 // remove buffers from the back of the array
2405 if (!(aBufferFlags & EShPoolAllocNoMap))
2407 TPtr8 ptr(bufarray[bufarray.Count() - 1].Ptr(), bufarray[bufarray.Count() - 1].Size(),bufarray[bufarray.Count() - 1].Size());
2408 ptr.Fill((bufarray.Count() + 1) % 256);
2410 bufarray[bufarray.Count() - 1].Close();
2411 bufarray.Remove(bufarray.Count() - 1);
2414 timeout = KTestFreeCountTimeOut / KTestWaitBeforeRetry;
2415 while (model.FreeCount() != pool.FreeCount())
2417 User::After(KTestWaitBeforeRetry);
2418 test_Assert(--timeout,
2419 test.Printf(_L("Timeout: Free==%u (expected %u)\n"), pool.FreeCount(), model.FreeCount());
2420 model.DisplayCounters();
2422 if ((timeout * KTestWaitBeforeRetry) % 1000000 == 0)
2424 test.Printf(_L("Time out in %d seconds! (line %d)\n"), timeout * KTestWaitBeforeRetry / 1000000, __LINE__);
2429 // ... and re-allocate them
2432 timeout = KTestFreeCountTimeOut / KTestWaitBeforeRetry;
2433 while (model.FreeCount() != pool.FreeCount())
2435 User::After(KTestWaitBeforeRetry);
2436 test_Assert(--timeout,
2437 test.Printf(_L("Timeout: Free==%u (expected %u)\n"), pool.FreeCount(), model.FreeCount());
2438 model.DisplayCounters();
2440 if ((timeout * KTestWaitBeforeRetry) % 1000000 == 0)
2442 test.Printf(_L("Time out in %d seconds! (line %d)\n"), timeout * KTestWaitBeforeRetry / 1000000, __LINE__);
2446 r = buf.Alloc(pool, aBufferFlags);
2447 if (r == KErrNoMemory)
2449 // We expect to get a failure when all buffers are allocated
2450 if ((TUint) bufarray.Count() == info.iMaxBufs)
2452 if (!(aBufferFlags & EShPoolAllocCanWait))
2454 // Give the Management DFC some time to run, then try allocating again
2455 User::After(1000000); // 1 second
2456 r = buf.Alloc(pool);
2459 test.Printf(_L("Alloc fail after %d of %d; Free==%u (expected %u)\n"),
2460 bufarray.Count(), info.iMaxBufs, pool.FreeCount(), model.FreeCount());
2469 if (!(aBufferFlags & EShPoolAllocNoMap))
2471 TPtr8 ptr(buf.Ptr(), buf.Size(),buf.Size());
2472 ptr.Fill(bufarray.Count() % 256);
2474 bufarray.Append(buf);
2477 while (r == KErrNone);
2479 test_Equal(KErrNoMemory, r);
2480 test_Equal(info.iMaxBufs, bufarray.Count());
2481 test_Equal(0, pool.FreeCount());
2484 while (bufarray.Count())
2486 // remove buffers from the back of the array
2487 if (!(aBufferFlags & EShPoolAllocNoMap))
2489 TPtr8 ptr(bufarray[bufarray.Count() - 1].Ptr(), bufarray[bufarray.Count() - 1].Size(),bufarray[bufarray.Count() - 1].Size());
2490 ptr.Fill((bufarray.Count() + 1) % 256);
2492 bufarray[bufarray.Count() - 1].Close();
2493 bufarray.Remove(bufarray.Count() - 1);
2496 timeout = KTestFreeCountTimeOut / KTestWaitBeforeRetry;
2497 while (model.FreeCount() != pool.FreeCount())
2499 User::After(KTestWaitBeforeRetry);
2500 test_Assert(--timeout,
2501 test.Printf(_L("Timeout: Free==%u (expected %u)\n"), pool.FreeCount(), model.FreeCount());
2502 model.DisplayCounters();
2504 if ((timeout * KTestWaitBeforeRetry) % 1000000 == 0)
2506 test.Printf(_L("Time out in %d seconds! (line %d)\n"), timeout * KTestWaitBeforeRetry / 1000000, __LINE__);
2511 // Pool should have shrunk back to its initial size
2512 test_Equal(info.iInitialBufs, pool.FreeCount());
2517 void PoolGrowingUser()
2519 test.Next(_L("Pool Growing/Shrinking (User)"));
2522 r = HAL::Get(HAL::EMemoryPageSize, pagesize);
2524 // Pool A: Non-page aligned pool (64-byte alignment)
2527 TInt maxbufs = KTestPoolSizeInBytes / RoundUp(*PtrBufSize, alignment);
2528 if (maxbufs > 32000)
2532 TInt initialbufs = maxbufs / 2;
2533 TInt growtrigger = 32;
2535 TInt shrinkhys = 288;
2536 test.Printf(_L("POOL A: BufSize=%d InitialBufs=%d MaxBufs=%d GrowTrigger=%d GrowBy=%d ShrinkHys=%d Alignment=%d\n"),
2537 *PtrBufSize, initialbufs, maxbufs, growtrigger, growby, shrinkhys, alignment);
2538 TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, *PtrBufSize, initialbufs, alignment);
2539 r = inf.SetSizingAttributes(maxbufs, growtrigger, growby, shrinkhys);
2541 PoolGrowingTestRoutine(inf);
2544 // Pool B: Non-page aligned pool (maximum alignment)
2546 TInt alignment = Log2(pagesize);
2547 TInt maxbufs = KTestPoolSizeInBytes / RoundUp(*PtrBufSize, alignment);
2548 if (maxbufs > 32000)
2552 TInt initialbufs = maxbufs / 4;
2553 TInt growtrigger = 32;
2555 TInt shrinkhys = 288;
2556 test.Printf(_L("POOL B: BufSize=%d InitialBufs=%d MaxBufs=%d GrowTrigger=%d GrowBy=%d ShrinkHys=%d Alignment=%d\n"),
2557 *PtrBufSize, initialbufs, maxbufs, growtrigger, growby, shrinkhys, alignment);
2558 TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, *PtrBufSize, initialbufs, alignment);
2559 r = inf.SetSizingAttributes(maxbufs, growtrigger, growby, shrinkhys);
2561 PoolGrowingTestRoutine(inf);
2564 // Pool C: Page aligned pool without guard pages
2566 TInt maxbufs = KTestPoolSizeInBytes / RoundUp(*PtrBufSize, Log2(pagesize));
2567 if (maxbufs > 32000)
2571 TInt initialbufs = maxbufs * 3 / 8;
2572 TInt growtrigger = 32;
2574 TInt shrinkhys = 288;
2575 test.Printf(_L("POOL C: BufSize=%d InitialBufs=%d MaxBufs=%d GrowTrigger=%d GrowBy=%d ShrinkHys=%d Page-Aligned\n"),
2576 *PtrBufSize, initialbufs, maxbufs, growtrigger, growby, shrinkhys);
2577 TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, initialbufs);
2578 r = inf.SetSizingAttributes(maxbufs, growtrigger, growby, shrinkhys);
2580 PoolGrowingTestRoutine(inf);
2583 // Pool D: Page aligned pool without guard pages
2585 TInt maxbufs = KTestPoolSizeInBytes / RoundUp(*PtrBufSize, Log2(pagesize));
2586 if (maxbufs > 32000)
2590 TInt initialbufs = maxbufs / 2;
2591 TInt growtrigger = 32;
2593 TInt shrinkhys = 288;
2594 test.Printf(_L("POOL D: BufSize=%d InitialBufs=%d MaxBufs=%d GrowTrigger=%d GrowBy=%d ShrinkHys=%d Page-Aligned+Guard\n"),
2595 *PtrBufSize, initialbufs, maxbufs, growtrigger, growby, shrinkhys);
2596 TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, initialbufs);
2597 r = inf.SetSizingAttributes(maxbufs, growtrigger, growby, shrinkhys);
2599 r = inf.SetGuardPages();
2601 PoolGrowingTestRoutine(inf);
2604 // Pool A': Non-page aligned pool (64-byte alignment)
2607 TInt maxbufs = KTestPoolSizeInBytes / RoundUp(*PtrBufSize, alignment);
2608 if (maxbufs > 32000)
2612 TInt initialbufs = 1;
2613 TInt growtrigger = 32;
2615 TInt shrinkhys = 512;
2616 test.Printf(_L("POOL A': BufSize=%d InitialBufs=%d MaxBufs=%d GrowTrigger=%d GrowBy=%d ShrinkHys=%d Alignment=%d\n"),
2617 *PtrBufSize, initialbufs, maxbufs, growtrigger, growby, shrinkhys, alignment);
2618 TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, *PtrBufSize, initialbufs, alignment);
2619 r = inf.SetSizingAttributes(maxbufs, growtrigger, growby, shrinkhys);
2621 PoolGrowingTestRoutine(inf);
2624 // Pool A'': Non-page aligned pool (64-byte alignment) - AllocCanWait
2627 TInt maxbufs = KTestPoolSizeInBytes / RoundUp(*PtrBufSize, alignment);
2628 if (maxbufs > 32000)
2632 TInt initialbufs = 1;
2633 TInt growtrigger = 1;
2635 TInt shrinkhys = 257;
2636 test.Printf(_L("POOL A'': BufSize=%d InitialBufs=%d MaxBufs=%d GrowTrigger=%d GrowBy=%d ShrinkHys=%d Alignment=%d\n"),
2637 *PtrBufSize, initialbufs, maxbufs, growtrigger, growby, shrinkhys, alignment);
2638 TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, *PtrBufSize, initialbufs, alignment);
2639 r = inf.SetSizingAttributes(maxbufs, growtrigger, growby, shrinkhys);
2641 PoolGrowingTestRoutine(inf, EShPoolAllocCanWait);
2644 // Pool D': Page aligned pool without guard pages
2646 TInt maxbufs = KTestPoolSizeInBytes / RoundUp(*PtrBufSize, Log2(pagesize));
2647 if (maxbufs > 32000)
2651 TInt initialbufs = 1;
2652 TInt growtrigger = 1;
2654 TInt shrinkhys = 2048;
2655 test.Printf(_L("POOL D': BufSize=%d InitialBufs=%d MaxBufs=%d GrowTrigger=%d GrowBy=%d ShrinkHys=%d Page-Aligned+Guard\n"),
2656 *PtrBufSize, initialbufs, maxbufs, growtrigger, growby, shrinkhys);
2657 TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, initialbufs);
2658 r = inf.SetSizingAttributes(maxbufs, growtrigger, growby, shrinkhys);
2660 r = inf.SetGuardPages();
2662 PoolGrowingTestRoutine(inf);
2664 // Pool D'': Page aligned pool without guard pages - NoBufferMap
2666 TInt maxbufs = KTestPoolSizeInBytes / RoundUp(*PtrBufSize, Log2(pagesize));
2667 if (maxbufs > 32000)
2671 TInt initialbufs = maxbufs / 2;
2672 TInt growtrigger = 32;
2674 TInt shrinkhys = 288;
2675 test.Printf(_L("POOL D'': BufSize=%d InitialBufs=%d MaxBufs=%d GrowTrigger=%d GrowBy=%d ShrinkHys=%d Page-Aligned+Guard\n"),
2676 *PtrBufSize, initialbufs, maxbufs, growtrigger, growby, shrinkhys);
2677 TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, initialbufs);
2678 r = inf.SetSizingAttributes(maxbufs, growtrigger, growby, shrinkhys);
2680 r = inf.SetGuardPages();
2682 PoolGrowingTestRoutine(inf, EShPoolAllocNoMap);
2688 @SYMTestCaseDesc Contiguous buffer allocation
2691 Create a pool with the Contiguous attribute and allocate buffers.
2692 @SYMTestExpectedResults
2693 Buffers memory is physically contiguous.
2694 @SYMTestPriority High
2697 void ContiguousPoolKernel()
2699 test.Next(_L("Contiguous Pool (Kernel)"));
2701 test.Printf(_L("Does not run on the emulator. Skipped\n"));
2705 r = HAL::Get(HAL::EMemoryPageSize, pagesize);
2707 if (*PtrBufSize <= pagesize)
2709 test.Printf(_L("Buffer size <= page size. Skipped.\n"));
2713 TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, KTestPoolSizeInBufs);
2714 // r = inf.SetSizingAttributes(KTestPoolSizeInBufs, 25, 25, 25600);
2715 // test_KErrNone(r);
2717 r = Ldd.ContiguousPoolKernel(inf);
2725 test.Next(_L("Buffer pinning"));
2727 test.Printf(_L("Does not run on the emulator. Skipped\n"));
2732 TShPoolCreateInfo inf1(TShPoolCreateInfo::ENonPageAlignedBuffer, *PtrBufSize * KTestPoolSizeInBufs, 1, KTestMinimumAlignmentLog2);
2733 r = pool1.Create(inf1, KDefaultPoolHandleFlags);
2735 r = buf1.Alloc(pool1);
2737 r = Ldd.PinBuffer(pool1.Handle(), buf1.Handle());
2744 TShPoolCreateInfo inf2(TShPoolCreateInfo::ENonPageAlignedBuffer, *PtrBufSize * KTestPoolSizeInBufs, 1, KTestMinimumAlignmentLog2);
2745 r = pool2.Create(inf2, KDefaultPoolHandleFlags);
2747 r = buf2.Alloc(pool2);
2749 r = Ldd.PinBuffer(pool2.Handle(), buf2.Handle());
2761 @SYMTestExpectedResults
2765 void SingleBufferPool()
2767 test.Next(_L("Single Buffer Pool"));
2774 TShPoolCreateInfo infpa(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize * KTestPoolSizeInBufs, 1);
2775 r = infpa.SetGuardPages();
2777 r = pool.Create(infpa, KDefaultPoolHandleFlags);
2779 r = pool.SetBufferWindow(-1, ETrue);
2781 r = buf.Alloc(pool);
2783 r = buf2.Alloc(pool);
2784 test_Equal(KErrNoMemory, r);
2785 TPtr8(buf.Ptr(), buf.Size(), buf.Size()).Fill('!');
2789 TShPoolCreateInfo infnpa(TShPoolCreateInfo::ENonPageAlignedBuffer, *PtrBufSize * KTestPoolSizeInBufs, 1, KTestMinimumAlignmentLog2);
2790 r = pool.Create(infnpa, KDefaultPoolHandleFlags);
2792 r = buf.Alloc(pool);
2794 r = buf2.Alloc(pool);
2795 test_Equal(KErrNoMemory, r);
2796 TPtr8(buf.Ptr(), buf.Size(),buf.Size()).Fill('?');
2803 @SYMTestCaseDesc Negative tests (user/kernel)
2806 API calls with invalid arguments.
2807 @SYMTestExpectedResults
2808 Appropriate error code returned.
2809 @SYMTestPriority High
2812 void NegativeTestsUser()
2814 test.Next(_L("Negative tests (User)"));
2818 r = HAL::Get(HAL::EMemoryPageSize, pagesize);
2820 r = HAL::Get(HAL::EMemoryRAM, ram);
2824 { TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, 0, 0); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
2825 { TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, 100, 0); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
2826 { TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, 0, 100); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
2827 { TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, KMaxTUint, 10); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
2828 { TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, 10, KMaxTUint); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
2829 { TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, KMaxTUint, KMaxTUint); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
2830 { TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, 65537, 65536); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
2831 { TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, 10, 1 + (1 << (32 - Log2(pagesize)))); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
2832 { TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, 4096, 10); r = pool.Create(inf, KDefaultPoolHandleFlags); test_Equal(KErrNone, r); pool.Close(); }
2833 // XXX The following test will need updating in Phase 2, when exclusive access will be supported
2834 // (page-aligned-buffer pools only)
2835 { TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, 4096, 10); inf.SetExclusive(); r = pool.Create(inf, KDefaultPoolHandleFlags); test_Equal(KErrNotSupported, r); pool.Close(); }
2836 { TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 4096, 10, 12); r = pool.Create(inf, KDefaultPoolHandleFlags); test_Equal(KErrNone, r); pool.Close(); }
2837 { TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 4096, 10, 12); inf.SetExclusive(); r = pool.Create(inf, KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); pool.Close(); }
2839 { TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, 128 * pagesize, (ram / (128 * pagesize)) + 1); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrNoMemory, r); }
2841 { TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 0, 0, 0); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
2842 { TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 100, 0, 0); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
2843 { TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 0, 100, 0); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
2844 { TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, KMaxTUint, 10, 0); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
2845 { TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 10, KMaxTUint, 0); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
2846 { TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, KMaxTUint, KMaxTUint, 0); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
2847 { TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 65537, 65536, 0); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
2848 { TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 10, 10, KMaxTUint); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
2849 { TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 10, 10, 33); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
2850 { TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 10, 300, 24); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
2851 { TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 10, 65537, 16); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
2852 { TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 10, 10, Log2(pagesize) + 1); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r); }
2855 TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, *BufferSize, KTestPoolSizeInBufs, 0);
2856 inf.SetGuardPages();
2857 r = pool.Create(inf, KDefaultPoolHandleFlags); test_Equal(KErrArgument, r);
2858 r = inf.SetSizingAttributes(KTestPoolSizeInBufs - 1, 25, 25, 280); test_KErrNone(r); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r);
2859 // Either grow trigger ratio or grow by ratio == 0 => non-growable pool
2860 // Such pools must have initial buffers == max buffers
2861 r = inf.SetSizingAttributes(KTestPoolSizeInBufs * 2, 1, 0, 1); test_Equal(KErrArgument, r); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r);
2862 r = inf.SetSizingAttributes(KTestPoolSizeInBufs * 2, 1, 0, 0); test_Equal(KErrArgument, r); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r);
2863 // shrink hysteresis ratio must be > 256
2864 r = inf.SetSizingAttributes(KTestPoolSizeInBufs - 1, 25, 25, 256); test_Equal(KErrArgument, r); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r);
2865 // grow ratio must be < 256
2866 r = inf.SetSizingAttributes(KTestPoolSizeInBufs * 2, 256, 25, 260); test_Equal(KErrArgument, r); r = pool.Create(inf,KDefaultPoolHandleFlags); test_Equal(KErrArgument, r);
2869 // Can't have a non-aligned, contiguous pool that grows
2870 TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 200, 10, 0);
2871 r = inf.SetSizingAttributes(KTestPoolSizeInBufs * 2, 25, 25, 280);
2875 void NegativeTestsKernel()
2877 test.Next(_L("Negative tests (Kernel)"));
2879 r = Ldd.NegativeTestsKernel();
2885 @SYMTestCaseDesc Out of memory testing
2889 @SYMTestExpectedResults
2890 @SYMTestPriority High
2895 test.Next(_L("Out of memory"));
2899 const TInt KMaxKernelAllocations = 1024;
2902 TShPoolCreateInfo inf0(TShPoolCreateInfo::EPageAlignedBuffer, *PtrBufSize, 1);
2903 TShPoolCreateInfo inf1(TShPoolCreateInfo::ENonPageAlignedBuffer, *PtrBufSize, 1, 0);
2904 r = inf0.SetSizingAttributes(4, 100, 1024, 300);
2906 r = inf1.SetSizingAttributes(4, 100, 1024, 300);
2909 for(TInt j = 0; j <= 1; j++)
2913 test.Printf(_L("OOM testing for page-aligned pool\n"));
2915 test.Printf(_L("OOM testing for non-page-aligned pool\n"));
2922 for (i = 0; i < KMaxKernelAllocations && r == KErrNoMemory; i++)
2924 __KHEAP_FAILNEXT(i);
2926 r = pool.Create(inf0,KDefaultPoolHandleFlags);
2928 r = pool.Create(inf1,KDefaultPoolHandleFlags);
2931 test.Printf(_L("Create pool took %d tries\n"),i);
2934 //Allocate buffers with automatic pool growing enabled
2937 for (i = 0; i < KMaxKernelAllocations && r == KErrNoMemory; i++)
2939 __KHEAP_FAILNEXT(i);
2941 r = buf1.Alloc(pool, EShPoolAllocNoMap);
2943 r = buf1.Alloc(pool);
2946 test.Printf(_L("Allocate shared buffer 1 took %d tries\n"),i);
2949 // delay to allow the pool to grow
2954 for (i = 0; i < KMaxKernelAllocations && r == KErrNoMemory; i++)
2956 __KHEAP_FAILNEXT(i);
2958 r = buf2.Alloc(pool, EShPoolAllocNoMap);
2960 r = buf2.Alloc(pool);
2964 test.Printf(_L("Allocate shared buffer 2 took %d tries\n"),i);
2967 // delay to allow the pool to grow again
2972 for (i = 0; i < KMaxKernelAllocations && r == KErrNoMemory; i++)
2974 __KHEAP_FAILNEXT(i);
2976 r = buf3.Alloc(pool, EShPoolAllocNoMap);
2978 r = buf3.Alloc(pool);
2981 test.Printf(_L("Allocate shared buffer 3 took %d tries\n"),i);
2984 //Map a buffer in page-aligned-pool case
2987 //Open a one-buffer window
2988 r = pool.SetBufferWindow(1, ETrue);
2993 for (i = 0; i < KMaxKernelAllocations && r == KErrNoMemory; i++)
2996 __KHEAP_FAILNEXT(i);
3000 test.Printf(_L("Mapping buffer 1 took %d tries\n"),i);
3004 //Setup low-space notification
3007 for (i = 0; i < KMaxKernelAllocations && low != KRequestPending; i++)
3009 __KHEAP_FAILNEXT(i);
3010 pool.RequestLowSpaceNotification(1, low);
3013 test.Printf(_L("Setting up low-space notification took %d tries\n"),i);
3014 test_Equal(low.Int(), KRequestPending);
3016 //Setup free-space notification
3017 TRequestStatus free;
3018 free = KErrNoMemory;
3019 for (i = 0; i < KMaxKernelAllocations && free != KRequestPending; i++)
3021 __KHEAP_FAILNEXT(i);
3022 pool.RequestFreeSpaceNotification(4, free);
3025 test.Printf(_L("Setting up free-space notification took %d tries\n"),i);
3026 test_Equal(free.Int(), KRequestPending);
3028 //No allocations should occur here
3029 __KHEAP_FAILNEXT(1);
3036 //Cancel the notifications
3037 pool.CancelLowSpaceNotification(low);
3038 pool.CancelFreeSpaceNotification(free);
3040 //Close the buffers and the pool
3049 // Allocate kernel-side buffer on Pool 2
3053 for (i = 0; i < KMaxKernelAllocations && r == KErrNoMemory; i++)
3055 __KHEAP_FAILNEXT(i);
3056 r = Ldd.AllocateKernelBuffer(1, handle);
3059 test.Printf(_L("Allocate kernel buffer took %d tries\n"),i);
3062 __KHEAP_FAILNEXT(1);
3063 kbuf.SetHandle(handle);
3067 for (i = 0; i < KMaxKernelAllocations && r == KErrNoMemory; i++)
3070 __KHEAP_FAILNEXT(i);
3074 test.Printf(_L("Mapping kernel buffer took %d tries\n"),i);
3077 __KHEAP_FAILNEXT(1);
3084 test.Printf(_L("Debug builds only. Test skipped."));
3090 @SYMTestCaseDesc Stress testing
3094 @SYMTestExpectedResults
3095 @SYMTestPriority Medium
3098 TInt StressThread1(TAny*)
3102 r = HAL::Get(HAL::EMemoryPageSize, pagesize);
3111 TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, 1000, 512);
3112 r = pool.Create(inf,KDefaultPoolHandleFlags);
3115 RDebug::Printf("Error %d line %d", r, __LINE__);
3119 r = pool.SetBufferWindow(-1, ETrue);
3125 TShPoolCreateInfo inf(TShPoolCreateInfo::ENonPageAlignedBuffer, 10000, 200, 0);
3126 r = pool.Create(inf,KDefaultPoolHandleFlags);
3129 RDebug::Printf("Error %d line %d", r, __LINE__);
3137 RDebug::Printf("ST1 %d iterations", i);
3143 TInt StressThread2(TAny*)
3145 TInt r = KErrUnknown;
3154 RArray<RShBuf> bufarray1;
3155 RArray<RShBuf> bufarray2;
3156 for (i = 0; i < inf1.iMaxBufs; i++)
3162 RDebug::Printf("Error %d line %d i=%d", r, __LINE__, i);
3165 TPtr8(buf.Ptr(), buf.Size(),buf.Size()).Fill('1');
3166 r = bufarray1.Append(buf);
3170 RDebug::Printf("Error %d line %d i=%d", r, __LINE__, i);
3174 for (i = 0; i < inf2.iMaxBufs; i++)
3180 RDebug::Printf("Error %d line %d i=%d", r, __LINE__, i);
3183 TPtr8(buf.Ptr(), buf.Size(),buf.Size()).Fill('2');
3184 bufarray2.Append(buf);
3187 while (bufarray1.Count())
3189 bufarray1[0].Close();
3190 bufarray1.Remove(0);
3194 while (bufarray2.Count())
3196 bufarray2[0].Close();
3197 bufarray2.Remove(0);
3208 RDebug::Printf("ST2 %d iterations", j);
3214 void StressTesting(TInt aSecs)
3216 test.Next(_L("Stress testing"));
3219 test.Start(_L("Create pools"));
3220 TShPoolCreateInfo inf1(TShPoolCreateInfo::ENonPageAlignedBuffer, 2000, 500, 11);
3221 r = P1.Create(inf1,KDefaultPoolHandleFlags);
3224 TShPoolCreateInfo inf2(TShPoolCreateInfo::EPageAlignedBuffer, 5000, 150);
3225 r = Ldd.OpenKernelPool(inf2, handle);
3227 P2.SetHandle(handle);
3229 r = P2.SetBufferWindow(-1, ETrue);
3232 test.Next(_L("Create threads"));
3234 r = t1.Create(_L("THREAD1"), StressThread1, KDefaultStackSize, KMinHeapSize, KMinHeapSize, NULL);
3237 r = t2.Create(_L("THREAD2"), StressThread2, KDefaultStackSize*2, KMinHeapSize, 1 << 20, NULL);
3239 test.Next(_L("Start threads"));
3240 test.Printf(_L("Wait for %d seconds\n"), aSecs);
3241 RThread().SetPriority(EPriorityMore);
3242 TRequestStatus t1rs;
3243 TRequestStatus t2rs;
3248 User::After(aSecs * 1000000);
3250 test.Next(_L("Kill threads"));
3254 // wait for threads to actually die
3255 User::WaitForRequest(t1rs);
3256 User::WaitForRequest(t2rs);
3260 RThread().SetPriority(EPriorityNormal);
3262 test.Next(_L("Close pools"));
3264 r = Ldd.CloseKernelPool();
3275 @SYMTestExpectedResults
3279 void NoDeallocation()
3281 test.Next(_L("No deallocation"));
3284 command.Format(_L("%S %d"), &KTestSlave, ETestSlaveNoDeallocation);
3286 r = p.Create(RProcess().FileName(), command);
3291 User::WaitForRequest(rs);
3293 // wait for memory to be freed
3294 r = UserSvr::HalFunction(EHalGroupKernel, EKernelHalSupervisorBarrier, (TAny*)5000, 0);
3298 test_KErrNone(rs.Int());
3299 test_Equal(EExitKill, p.ExitType());
3300 test_KErrNone(p.ExitReason());
3304 TInt SlaveNoDeallocation()
3309 TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *BufferSize, KTestPoolSizeInBufs);
3310 r = pool.Create(inf,KDefaultPoolHandleFlags);
3313 pool.SetBufferWindow(-1, ETrue);
3319 r = buf.Alloc(pool);
3328 // Parse command line for slave processes
3329 TInt r = KErrArgument;
3330 TBuf<KMaxFullName> cmd;
3331 User::CommandLine(cmd);
3333 if (lex.NextToken() == KTestSlave)
3336 TLex functionlex(lex.NextToken());
3337 functionlex.Val(function);
3340 case ETestSlaveNoDeallocation:
3341 r = SlaveNoDeallocation();
3350 test.Start(_L("Check for Shared Buffers availability"));
3352 TShPoolCreateInfo inf(TShPoolCreateInfo::EPageAlignedBuffer, *BufferSize, KTestPoolSizeInBufs);
3353 r = pool.Create(inf,KDefaultPoolHandleFlags);
3354 if (r == KErrNotSupported)
3356 test.Printf(_L("Not supported by this memory model.\n"));
3363 test.Next(_L("No device driver"));
3364 test.Start(_L("Start test loop"));
3365 for (PtrBufSize = BufferSize; *PtrBufSize != 0; PtrBufSize++)
3368 title.Format(_L("Buffer size = %d bytes"), *PtrBufSize);
3370 test.Start(_L("New test iteration"));
3371 BufferAlignmentUser();
3380 test.Next(_L("Load Device Driver"));
3381 LoadDeviceDrivers();
3383 #ifdef TEST_CLIENT_THREAD
3384 test.Next(_L("Device driver in client thread"));
3387 test.Next(_L("Device driver in own thread"));
3393 test.Start(_L("Start test loop"));
3394 for (PtrBufSize = BufferSize; *PtrBufSize != 0; PtrBufSize++)
3397 title.Format(_L("Buffer size = %d bytes"), *PtrBufSize);
3399 test.Start(_L("New test iteration"));
3400 CreateUserPool(ETestNonPageAligned);
3401 CreateKernelPool(ETestNonPageAligned);
3402 AllocateUserBuffer();
3403 AllocateKernelBuffer();
3404 AllocateUserMax(P1);
3405 AllocateUserMax(P2);
3406 AllocateKernelMax();
3407 BufferAlignmentKernel();
3408 CreateKernelPoolPhysAddr();
3409 NotificationRequests(P1);
3410 NotificationRequests(P2);
3411 CancelNotificationRequests(P1);
3412 CancelNotificationRequests(P2);
3416 ContiguousPoolKernel();
3417 CreateUserPool(ETestPageAligned);
3418 CreateKernelPool(ETestPageAligned);
3420 AllocateUserBuffer();
3421 AllocateKernelBuffer();
3422 AllocateUserMax(P1);
3423 AllocateUserMax(P2);
3424 AllocateKernelMax();
3425 NotificationRequests(P1);
3426 NotificationRequests(P2);
3429 CreateUserPool(ETestPageAlignedGrowing);
3430 CreateKernelPool(ETestPageAlignedGrowing);
3432 AllocateKernelMax();
3433 AllocateUserMax(P1);
3434 AllocateUserMax(P2);
3439 NegativeTestsKernel();
3444 NegativeTestsUser();
3447 test.Next(_L("Unload Device Drivers"));
3448 FreeDeviceDrivers();