sl@0: // Copyright (c) 2002-2009 Nokia Corporation and/or its subsidiary(-ies). sl@0: // All rights reserved. sl@0: // This component and the accompanying materials are made available sl@0: // under the terms of the License "Eclipse Public License v1.0" sl@0: // which accompanies this distribution, and is available sl@0: // at the URL "http://www.eclipse.org/legal/epl-v10.html". sl@0: // sl@0: // Initial Contributors: sl@0: // Nokia Corporation - initial contribution. sl@0: // sl@0: // Contributors: sl@0: // sl@0: // Description: sl@0: // e32test\heap\t_heap2.cpp sl@0: // Overview: sl@0: // Tests RHeap class, including a stress test and a "grow in place" sl@0: // ReAlloc test. sl@0: // API Information: sl@0: // RHeap sl@0: // Details: sl@0: // - Test allocation on fixed length heaps in local, disconnected chunks for sl@0: // different heap sizes and alignments. Assumes knowledge of heap sl@0: // implementation. sl@0: // - Test allocation, free, reallocation and compression on chunk heaps with sl@0: // different maximum and minimum lengths and alignments. Assumes knowledge sl@0: // of heap implementation. sl@0: // - Stress test heap implementation with a single thread that allocates, frees sl@0: // and reallocates cells, and checks the heap. sl@0: // - Stress test heap implementation with two threads that run concurrently. sl@0: // - Create a chunk heap, test growing in place by allocating a cell and sl@0: // then reallocating additional space until failure, verify that the cell sl@0: // did not move and the size was increased. sl@0: // - The heap is checked to verify that no cells remain allocated after the sl@0: // tests are complete. sl@0: // Platforms/Drives/Compatibility: sl@0: // All sl@0: // Assumptions/Requirement/Pre-requisites: sl@0: // Failures and causes: sl@0: // Base Port information: sl@0: // sl@0: // sl@0: sl@0: #include sl@0: #include sl@0: #include sl@0: #include sl@0: sl@0: // Needed for KHeapShrinkHysRatio which is now ROM 'patchdata' sl@0: #include "TestRHeapShrink.h" sl@0: sl@0: #define DECL_GET(T,x) inline T x() const {return i##x;} sl@0: #define DECL_GET2(T,x,y) inline T y() const {return i##x;} sl@0: sl@0: sl@0: #ifdef __EABI__ sl@0: IMPORT_D extern const TInt KHeapMinCellSize; sl@0: #else sl@0: const TInt KHeapMinCellSize = 0; sl@0: #endif sl@0: sl@0: RTest test(_L("T_HEAP2")); sl@0: sl@0: #define TEST_ALIGN(p,a) test((TLinAddr(p)&((a)-1))==0) sl@0: sl@0: struct STestCell sl@0: { sl@0: enum {EMagic = 0xb8aa3b29}; sl@0: sl@0: TUint32 iLength; sl@0: TUint32 iData[1]; sl@0: sl@0: void Set(TInt aLength); sl@0: void Verify(TInt aLength); sl@0: void Verify(const TAny* aInitPtr, TInt aInitLength, TInt aLength); sl@0: }; sl@0: sl@0: void STestCell::Set(TInt aLength) sl@0: { sl@0: TInt i; sl@0: TUint32 x = (TUint32)this ^ (TUint32)aLength ^ (TUint32)EMagic; sl@0: aLength -= RHeap::EAllocCellSize; sl@0: if (aLength==0) sl@0: return; sl@0: iLength = x; sl@0: aLength /= sizeof(TUint32); sl@0: for (i=0; ilen; sl@0: TUint8* pEnd = (TUint8*)pC + len; sl@0: TEST_ALIGN(aCell, iAlign); sl@0: TEST_ALIGN(len, iAlign); sl@0: test(len >= iMinCell); sl@0: test((TUint8*)pC>=iBase && pEnd<=iTop); sl@0: return len; sl@0: } sl@0: sl@0: void RTestHeap::FullCheckAllocatedCell(const TAny* aCell) const sl@0: { sl@0: ((STestCell*)aCell)->Verify(CheckAllocatedCell(aCell)); sl@0: } sl@0: sl@0: TAny* RTestHeap::TestAlloc(TInt aSize) sl@0: { sl@0: TAny* p = Alloc(aSize); sl@0: if (p) sl@0: { sl@0: TInt len = CheckAllocatedCell(p); sl@0: test((len-RHeap::EAllocCellSize)>=aSize); sl@0: ((STestCell*)p)->Set(len); sl@0: } sl@0: return p; sl@0: } sl@0: sl@0: void RTestHeap::TestFree(TAny* aPtr) sl@0: { sl@0: if (aPtr) sl@0: FullCheckAllocatedCell(aPtr); sl@0: Free(aPtr); sl@0: } sl@0: sl@0: TAny* RTestHeap::TestReAlloc(TAny* aPtr, TInt aSize, TInt aMode) sl@0: { sl@0: TInt old_len = aPtr ? CheckAllocatedCell(aPtr) : 0; sl@0: if (aPtr) sl@0: ((STestCell*)aPtr)->Verify(old_len); sl@0: TAny* p = ReAlloc(aPtr, aSize, aMode); sl@0: if (!p) sl@0: { sl@0: ((STestCell*)aPtr)->Verify(old_len); sl@0: return p; sl@0: } sl@0: TInt new_len = CheckAllocatedCell(p); sl@0: test((new_len-RHeap::EAllocCellSize)>=aSize); sl@0: if (p == aPtr) sl@0: { sl@0: ((STestCell*)p)->Verify(p, old_len, Min(old_len, new_len)); sl@0: if (new_len != old_len) sl@0: ((STestCell*)p)->Set(new_len); sl@0: return p; sl@0: } sl@0: test(!(aMode & ENeverMove)); sl@0: test((new_len > old_len) || (aMode & EAllowMoveOnShrink)); sl@0: if (old_len) sl@0: ((STestCell*)p)->Verify(aPtr, old_len, Min(old_len, new_len)); sl@0: if (new_len != old_len) sl@0: ((STestCell*)p)->Set(new_len); sl@0: return p; sl@0: } sl@0: sl@0: struct SHeapCellInfo sl@0: { sl@0: RTestHeap* iHeap; sl@0: TInt iTotalAlloc; sl@0: TInt iTotalAllocSize; sl@0: TInt iTotalFree; sl@0: TUint8* iNextCell; sl@0: }; sl@0: sl@0: void RTestHeap::WalkFullCheckCell(TAny* aPtr, TCellType aType, TAny* aCell, TInt aLen) sl@0: { sl@0: (void)aCell; sl@0: ::SHeapCellInfo& info = *(::SHeapCellInfo*)aPtr; sl@0: switch(aType) sl@0: { sl@0: case EGoodAllocatedCell: sl@0: { sl@0: test(aCell == info.iNextCell); sl@0: TInt len = ((SCell*)aCell)->len; sl@0: test(len == aLen); sl@0: info.iNextCell += len; sl@0: ++info.iTotalAlloc; sl@0: info.iTotalAllocSize += (aLen-EAllocCellSize); sl@0: STestCell* pT = (STestCell*)((TUint8*)aCell + EAllocCellSize); sl@0: pT->Verify(len); sl@0: break; sl@0: } sl@0: case EGoodFreeCell: sl@0: { sl@0: test(aCell == info.iNextCell); sl@0: TInt len = ((SCell*)aCell)->len; sl@0: test(len == aLen); sl@0: info.iNextCell += len; sl@0: ++info.iTotalFree; sl@0: break; sl@0: } sl@0: default: sl@0: test.Printf(_L("TYPE=%d ??\n"),aType); sl@0: test(0); sl@0: break; sl@0: } sl@0: } sl@0: sl@0: void RTestHeap::FullCheck() sl@0: { sl@0: ::SHeapCellInfo info; sl@0: Mem::FillZ(&info, sizeof(info)); sl@0: info.iHeap = this; sl@0: info.iNextCell = iBase; sl@0: DebugFunction(EWalk, (TAny*)&WalkFullCheckCell, &info); sl@0: test(info.iNextCell == iTop); sl@0: test(info.iTotalAlloc == iCellCount); sl@0: test(info.iTotalAllocSize == iTotalAllocSize); sl@0: } sl@0: sl@0: TInt RTestHeap::FreeCellLen(const TAny* aPtr) const sl@0: { sl@0: SCell* p = iFree.next; sl@0: SCell* q = (SCell*)((TUint8*)aPtr - EAllocCellSize); sl@0: for (; p && p!=q; p = p->next) {} sl@0: if (p == q) sl@0: return p->len - EAllocCellSize; sl@0: return -1; sl@0: } sl@0: sl@0: TInt RTestHeap::LastFreeCellLen(void) const sl@0: { sl@0: SCell* p = iFree.next; sl@0: if (p==NULL) sl@0: return -1; sl@0: for (; p->next; p=p->next){} sl@0: return p->len; sl@0: } sl@0: sl@0: sl@0: /** Checks whether a call to Compress() will actually perform a reduction sl@0: of the heap. sl@0: Relies on the free last cell on the heap being cell that has just been freed sl@0: plus any extra. sl@0: Intended for use by t_heap2.cpp - DoTest4(). sl@0: @param aFreedSize The size in bytes of the cell that was freed sl@0: */ sl@0: TInt RTestHeap::CalcComp(TInt aFreedSize) sl@0: { sl@0: TInt largestCell=0; sl@0: largestCell = LastFreeCellLen(); sl@0: // if the largest cell is too small or it would have been compressed by the sl@0: // free operation then return 0. sl@0: if (largestCell < iPageSize || aFreedSize >= KHeapShrinkHysRatio*(iGrowBy>>8)) sl@0: { sl@0: return 0; sl@0: } sl@0: else sl@0: { sl@0: return _ALIGN_DOWN(aFreedSize,iPageSize); sl@0: } sl@0: } sl@0: sl@0: /** compress the heap if the KHeapShrinkRatio is too large for what we are sl@0: expecting in DoTest4(). sl@0: */ sl@0: void RTestHeap::ForceCompress(TInt aFreed) sl@0: { sl@0: if (aFreed < KHeapShrinkHysRatio*(iGrowBy>>8)) sl@0: { sl@0: Compress(); sl@0: } sl@0: } sl@0: RTestHeap* RTestHeap::FixedHeap(TInt aMaxLength, TInt aAlign, TBool aSingleThread) sl@0: { sl@0: RChunk c; sl@0: TInt bottom = 0x40000; sl@0: TInt top = bottom + aMaxLength; sl@0: TInt r = c.CreateDisconnectedLocal(bottom, top, top + bottom, EOwnerThread); sl@0: if (r!=KErrNone) sl@0: return NULL; sl@0: TUint8* base = c.Base() + bottom; sl@0: RTestHeap* h = (RTestHeap*)UserHeap::FixedHeap(base, aMaxLength, aAlign, aSingleThread); sl@0: if (!aAlign) sl@0: aAlign = RHeap::ECellAlignment; sl@0: test((TUint8*)h == base); sl@0: test(h->AccessCount() == 1); sl@0: test(h->HandleCount() == (aSingleThread ? 0 : 1)); sl@0: test(h->Handles() == (aSingleThread ? NULL : (TInt*)&h->LockRef())); sl@0: test(h->Flags() == TUint32(RAllocator::EFixedSize | (aSingleThread ? RAllocator::ESingleThreaded : 0))); sl@0: test(h->CellCount() == 0); sl@0: test(h->TotalAllocSize() == 0); sl@0: test(h->MaxLength() == aMaxLength); sl@0: test(h->MinLength() == h->Top() - (TUint8*)h); sl@0: test(h->Offset() == 0); sl@0: test(h->GrowBy() == 0); sl@0: test(h->ChunkHandle() == 0); sl@0: test(h->Align() == aAlign); sl@0: TInt min_cell = _ALIGN_UP((KHeapMinCellSize + Max((TInt)RHeap::EAllocCellSize, (TInt)RHeap::EFreeCellSize)), aAlign); sl@0: TInt hdr_len = _ALIGN_UP(sizeof(RHeap) + RHeap::EAllocCellSize, aAlign) - RHeap::EAllocCellSize; sl@0: TInt user_len = _ALIGN_DOWN(aMaxLength - hdr_len, aAlign); sl@0: test(h->Base() == base + hdr_len); sl@0: test(h->MinCell() == min_cell); sl@0: test(h->Top() - h->Base() == user_len); sl@0: test(h->FreeRef().next == (RHeap::SCell*)h->Base()); sl@0: h->TakeChunkOwnership(c); sl@0: return h; sl@0: } sl@0: sl@0: void RTestHeap::TakeChunkOwnership(RChunk aChunk) sl@0: { sl@0: iChunkHandle = aChunk.Handle(); sl@0: ++iHandleCount; sl@0: iHandles = &iChunkHandle; sl@0: } sl@0: sl@0: sl@0: #define ACCESS_COUNT(h) (((RTestHeap*)h)->AccessCount()) sl@0: #define HANDLE_COUNT(h) (((RTestHeap*)h)->HandleCount()) sl@0: #define HANDLES(h) (((RTestHeap*)h)->Handles()) sl@0: #define FLAGS(h) (((RTestHeap*)h)->Flags()) sl@0: #define CELL_COUNT(h) (((RTestHeap*)h)->CellCount()) sl@0: #define TOTAL_ALLOC_SIZE(h) (((RTestHeap*)h)->TotalAllocSize()) sl@0: #define MIN_LENGTH(h) (((RTestHeap*)h)->MinLength()) sl@0: #define OFFSET(h) (((RTestHeap*)h)->Offset()) sl@0: #define GROW_BY(h) (((RTestHeap*)h)->GrowBy()) sl@0: #define CHUNK_HANDLE(h) (((RTestHeap*)h)->ChunkHandle()) sl@0: #define LOCK_REF(h) (((RTestHeap*)h)->LockRef()) sl@0: #define TOP(h) (((RTestHeap*)h)->Top()) sl@0: #define ALIGN(h) (((RTestHeap*)h)->Align()) sl@0: #define MIN_CELL(h) (((RTestHeap*)h)->MinCell()) sl@0: #define PAGE_SIZE(h) (((RTestHeap*)h)->PageSize()) sl@0: #define FREE_REF(h) (((RTestHeap*)h)->FreeRef()) sl@0: sl@0: void DoTest1(RHeap* aH) sl@0: { sl@0: RTestHeap* h = (RTestHeap*)aH; sl@0: test.Printf(_L("Test Alloc: min=%x max=%x align=%d growby=%d\n"), sl@0: h->MinLength(), h->MaxLength(), h->Align(), h->GrowBy()); sl@0: TInt l; sl@0: TAny* p = NULL; sl@0: TUint8* next = h->Base(); sl@0: TUint8* top = h->Top(); sl@0: TUint8* limit = (TUint8*)h + h->MaxLength(); sl@0: TBool fixed = h->Flags() & RAllocator::EFixedSize; sl@0: for (l=1; l<=1024; ++l) sl@0: { sl@0: TInt remain1 = top - next; sl@0: TInt xl1 = _ALIGN_UP(Max((l+RHeap::EAllocCellSize), h->MinCell()), h->Align()); sl@0: p = h->TestAlloc(l); sl@0: if ( (fixed && remain1 < xl1) || (next + xl1 > limit) ) sl@0: { sl@0: test(p == NULL); sl@0: test(top == h->Top()); sl@0: test.Printf(_L("Alloc failed at l=%d next=%08x\n"), l, next); sl@0: break; sl@0: } sl@0: test(p == next + RHeap::EAllocCellSize); sl@0: if (xl1 > remain1) sl@0: { sl@0: // no room for this cell sl@0: TInt g = h->GrowBy(); sl@0: while (xl1 > remain1) sl@0: { sl@0: top += g; sl@0: remain1 += g; sl@0: } sl@0: } sl@0: test(top == h->Top()); sl@0: if (xl1 + h->MinCell() > remain1) sl@0: { sl@0: // this cell fits but remainder is too small or nonexistent sl@0: xl1 = top - next; sl@0: next = top; sl@0: test(h->FreeRef().next == NULL); sl@0: } sl@0: else sl@0: { sl@0: // this cell fits and remainder can be reused sl@0: next += xl1; sl@0: } sl@0: test(aH->AllocLen(p) == xl1 - RHeap::EAllocCellSize); sl@0: } sl@0: h->FullCheck(); sl@0: } sl@0: sl@0: void DoTest2(RHeap* aH) sl@0: { sl@0: RTestHeap* h = (RTestHeap*)aH; sl@0: test.Printf(_L("Test Free: min=%x max=%x align=%d growby=%d\n"), sl@0: h->MinLength(), h->MaxLength(), h->Align(), h->GrowBy()); sl@0: TInt al; sl@0: TInt min = h->MinCell(); sl@0: TBool pad = EFalse; sl@0: for (al=1; al<256; (void)((pad=!pad)!=0 || (al+=al+1)) ) sl@0: { sl@0: TAny* p[32]; sl@0: TInt last_len = 0; sl@0: TAny* last = NULL; sl@0: TInt i; sl@0: test.Printf(_L("al=%d pad=%d\n"), al, pad); sl@0: TUint8* top=0; sl@0: TAny* spare=0; sl@0: TBool heapReduced = EFalse; sl@0: for (i=0; i<32; ++i) sl@0: { sl@0: // Check whether the cell created for the allocation of al would end up sl@0: // including extra bytes from the last free cell that aren't enough sl@0: // to create a new free cell. sl@0: top = h->Top(); sl@0: TInt freeLen=h->LastFreeCellLen(); sl@0: TInt actualAllocBytes = Max(_ALIGN_UP(al + RHeap::EAllocCellSize, h->Align()), min); sl@0: TInt remainingBytes = freeLen - actualAllocBytes; sl@0: if (remainingBytes < min) sl@0: { sl@0: // Force the heap to grow so that once this allocation is freed sl@0: // the free cell left will be large enough to include the al allocation sl@0: // and to create a new free cell if necessary. sl@0: actualAllocBytes = _ALIGN_UP(actualAllocBytes + min, h->Align()); sl@0: TAny* q = h->TestAlloc(actualAllocBytes); sl@0: // Check heap has grown sl@0: test(top < h->Top()); sl@0: top = h->Top(); sl@0: test(q!=NULL); sl@0: // Have grown the heap so allocate a cell as a place holder to stop sl@0: // the heap being shrunk and the actual cell we want to allocate from being the sl@0: // wrong size sl@0: spare=h->TestAlloc(8); sl@0: h->TestFree(q); sl@0: // Ensure heap wasn't shrunk after free sl@0: test(top == h->Top()); sl@0: } sl@0: top = h->Top(); sl@0: // Allocate the new sl@0: p[i] = h->TestAlloc(al); sl@0: test(p[i]!=NULL); sl@0: if (remainingBytes < min) sl@0: {// now safe to free any padding as p[i] now allocated and its size can't change sl@0: h->TestFree(spare); sl@0: } sl@0: TInt tmp1=h->AllocLen(p[i]); sl@0: TInt tmp2=Max(_ALIGN_UP(al+RHeap::EAllocCellSize,h->Align()), min)-RHeap::EAllocCellSize; sl@0: test(tmp1 == tmp2); sl@0: } sl@0: last = (TUint8*)p[31] + _ALIGN_UP(Max((al + RHeap::EAllocCellSize), min), h->Align()); sl@0: last_len = h->FreeCellLen(last); sl@0: test(last_len > 0); sl@0: if (pad) sl@0: { sl@0: test(h->TestAlloc(last_len) == last); sl@0: test(h->FreeRef().next == NULL); sl@0: } sl@0: else sl@0: last = NULL; sl@0: top = h->Top(); sl@0: for (i=0,heapReduced=EFalse; i<32; ++i) sl@0: { sl@0: h->TestFree(p[i]); sl@0: TInt fl = h->FreeCellLen(p[i]); sl@0: TInt xfl = _ALIGN_UP(Max((al + RHeap::EAllocCellSize), h->MinCell()), h->Align()) - RHeap::EAllocCellSize; sl@0: if (h->Top() < top) // heap was reduced due to small KHeapShrinkHysRatio and big KHeapMinCellSize sl@0: { sl@0: top = h->Top(); sl@0: heapReduced = ETrue; sl@0: } sl@0: sl@0: if (i < 31 || pad) sl@0: test(fl == xfl); sl@0: else sl@0: { sl@0: if (!heapReduced) sl@0: test(fl == xfl + RHeap::EAllocCellSize + last_len); sl@0: else sl@0: { sl@0: heapReduced = EFalse; sl@0: } sl@0: } sl@0: test(h->TestAlloc(al)==p[i]); sl@0: } sl@0: for (i=0,heapReduced=EFalse; i<31; ++i) sl@0: { sl@0: TInt j = i+1; sl@0: TUint8* q; sl@0: // Free to adjacent cells and check that the free cell left is the combined sl@0: // size of the 2 adjacent cells just freed sl@0: h->TestFree(p[i]); sl@0: h->TestFree(p[j]); sl@0: TInt fl = h->FreeCellLen(p[i]); sl@0: if (h->Top() < top) // heap was reduced due to small KHeapShrinkHysRatio and big KHeapMinCellSize sl@0: { sl@0: top = h->Top(); sl@0: heapReduced = ETrue; sl@0: } sl@0: TInt xfl = 2 * _ALIGN_UP(Max((al + RHeap::EAllocCellSize), h->MinCell()), h->Align()) - RHeap::EAllocCellSize; sl@0: if (j < 31 || pad) sl@0: test(fl == xfl); sl@0: else sl@0: { sl@0: if (!heapReduced) sl@0: test(fl == xfl + RHeap::EAllocCellSize + last_len); sl@0: else sl@0: { sl@0: heapReduced = EFalse; sl@0: } sl@0: } sl@0: test(h->FreeCellLen(p[j]) < 0); sl@0: test(h->TestAlloc(fl)==p[i]); sl@0: test(h->Top() == top); sl@0: h->TestFree(p[i]); sl@0: test(h->FreeCellLen(p[i]) == fl); sl@0: // test when you alloc a cell that is larger than cells just freed sl@0: // that its position is not the same as the freed cells sl@0: // will hold for all cells except top/last one sl@0: if (j < 31 && !pad && fl < last_len) sl@0: { sl@0: q = (TUint8*)h->TestAlloc(fl+1); sl@0: if (h->Top() > top) sl@0: top = h->Top(); sl@0: test(h->Top() == top); sl@0: test(q > p[i]); sl@0: h->TestFree(q); sl@0: if (h->Top() < top) // heap was reduced due to small KHeapShrinkHysRatio and big KHeapMinCellSize sl@0: { sl@0: top = h->Top(); sl@0: heapReduced = ETrue; sl@0: } sl@0: } sl@0: // check cell that is just smaller than space but not small enough sl@0: // for a new free cell to be created, is the size of whole free cell sl@0: test(h->TestAlloc(fl-min+1)==p[i]); sl@0: test(h->Top() == top); sl@0: test(h->AllocLen(p[i])==fl); sl@0: h->TestFree(p[i]); sl@0: // Check cell that is small enough for new free cell and alloc'd cell to be sl@0: // created at p[i] cell is created at p[i] sl@0: test(h->TestAlloc(fl-min)==p[i]); sl@0: test(h->Top() == top); sl@0: // check free cell is at expected position sl@0: q = (TUint8*)p[i] + fl - min + RHeap::EAllocCellSize; sl@0: test(h->FreeCellLen(q) == min - RHeap::EAllocCellSize); sl@0: // alloc 0 length cell at q, will work as new cell of min length will be created sl@0: test(h->TestAlloc(0) == q); sl@0: test(h->Top() == top); sl@0: h->TestFree(p[i]); sl@0: test(h->FreeCellLen(p[i]) == fl - min); sl@0: h->TestFree(q); sl@0: // again check free cells are combined sl@0: test(h->FreeCellLen(q) < 0); sl@0: test(h->FreeCellLen(p[i]) == fl); sl@0: // check reallocating the cells places them back to same positions sl@0: test(h->TestAlloc(al)==p[i]); sl@0: test(h->Top() == top); sl@0: test(h->TestAlloc(al)==p[j]); sl@0: test(h->Top() == top); sl@0: if (pad) sl@0: test(h->FreeRef().next == NULL); sl@0: } sl@0: for (i=0,heapReduced=EFalse; i<30; ++i) sl@0: { sl@0: TInt j = i+1; sl@0: TInt k = i+2; sl@0: TUint8* q; sl@0: // Free 3 adjacent cells and check free cell created is combined size sl@0: h->TestFree(p[i]); sl@0: h->TestFree(p[k]); sl@0: h->TestFree(p[j]); sl@0: h->FullCheck(); sl@0: if (h->Top() < top) // heap was reduced due to small KHeapShrinkHysRatio and big KHeapMinCellSize sl@0: { sl@0: top = h->Top(); sl@0: heapReduced = ETrue; sl@0: } sl@0: TInt fl = h->FreeCellLen(p[i]); sl@0: TInt xfl = 3 * _ALIGN_UP(Max((al + RHeap::EAllocCellSize), h->MinCell()), h->Align()) - RHeap::EAllocCellSize; sl@0: if (k < 31 || pad) sl@0: test(fl == xfl); sl@0: else sl@0: { sl@0: if (!heapReduced) sl@0: test(fl == xfl + RHeap::EAllocCellSize + last_len); sl@0: else sl@0: { sl@0: heapReduced = EFalse; sl@0: } sl@0: } sl@0: test(h->FreeCellLen(p[j]) < 0); sl@0: test(h->FreeCellLen(p[k]) < 0); sl@0: //ensure created free cell is allocated to new cell of free cell size sl@0: test(h->TestAlloc(fl)==p[i]); sl@0: test(h->Top() == top); sl@0: h->TestFree(p[i]); sl@0: test(h->FreeCellLen(p[i]) == fl); sl@0: if (h->Top() < top) // heap was reduced due to small KHeapShrinkHysRatio and big KHeapMinCellSize sl@0: top = h->Top(); sl@0: if (k < 31 && !pad && fl < last_len) sl@0: { sl@0: // Test new cell one larger than free cell size is allocated somewhere else sl@0: q = (TUint8*)h->TestAlloc(fl+1); sl@0: if (h->Top() > top) sl@0: top = h->Top(); sl@0: test(h->Top() == top); sl@0: test(q > p[i]); sl@0: h->TestFree(q); sl@0: if (h->Top() < top) // heap was reduced due to small KHeapShrinkHysRatio and big KHeapMinCellSize sl@0: { sl@0: top = h->Top(); sl@0: heapReduced = ETrue; sl@0: } sl@0: } sl@0: // check allocating cell just smaller than free cell size but sl@0: // too large for neew free cell to be created, is size of whole free cell sl@0: test(h->TestAlloc(fl-min+1)==p[i]); sl@0: test(h->Top() == top); sl@0: test(h->AllocLen(p[i])==fl); sl@0: h->TestFree(p[i]); sl@0: // ensure free cell is created this time as well as alloc'd cell sl@0: test(h->TestAlloc(fl-min)==p[i]); sl@0: test(h->Top() == top); sl@0: q = (TUint8*)p[i] + fl - min + RHeap::EAllocCellSize; sl@0: test(h->FreeCellLen(q) == min - RHeap::EAllocCellSize); sl@0: test(h->TestAlloc(0) == q); sl@0: test(h->Top() == top); sl@0: h->TestFree(p[i]); sl@0: test(h->FreeCellLen(p[i]) == fl - min); sl@0: h->TestFree(q); sl@0: test(h->FreeCellLen(q) < 0); sl@0: test(h->FreeCellLen(p[i]) == fl); sl@0: // realloc all cells and check heap not expanded sl@0: test(h->TestAlloc(al)==p[i]); sl@0: test(h->Top() == top); sl@0: test(h->TestAlloc(al)==p[j]); sl@0: test(h->Top() == top); sl@0: test(h->TestAlloc(al)==p[k]); sl@0: test(h->Top() == top); sl@0: // If padding than no space should left on heap sl@0: if (pad) sl@0: test(h->FreeRef().next == NULL); sl@0: } sl@0: // when padding this will free padding from top of heap sl@0: h->TestFree(last); sl@0: } sl@0: h->FullCheck(); sl@0: } sl@0: sl@0: void DoTest3(RHeap* aH) sl@0: { sl@0: RTestHeap* h = (RTestHeap*)aH; sl@0: test.Printf(_L("Test ReAlloc: min=%x max=%x align=%d growby=%d\n"), sl@0: h->MinLength(), h->MaxLength(), h->Align(), h->GrowBy()); sl@0: // allocate continuous heap cell, then free them and reallocate again sl@0: TInt al; sl@0: for (al=1; al<256; al+=al+1) sl@0: { sl@0: TAny* p0 = h->TestAlloc(al); sl@0: TInt al0 = h->AllocLen(p0); sl@0: h->TestFree(p0); sl@0: TAny* p1 = h->TestReAlloc(NULL, al, 0); sl@0: TInt al1 = h->AllocLen(p1); sl@0: test(p1 == p0); sl@0: test(al1 == al0); sl@0: h->TestFree(p1); sl@0: TAny* p2 = h->TestAlloc(1); sl@0: TAny* p3 = h->TestReAlloc(p2, al, 0); sl@0: test(p3 == p0); sl@0: TInt al3 = h->AllocLen(p3); sl@0: test(al3 == al0); sl@0: h->TestFree(p3); sl@0: TAny* p4 = h->TestAlloc(1024); sl@0: TAny* p5 = h->TestReAlloc(p4, al, 0); sl@0: test(p5 == p0); sl@0: TInt al5 = h->AllocLen(p5); sl@0: test(al5 == al0); sl@0: h->TestFree(p5); sl@0: } sl@0: TInt i; sl@0: TInt j; sl@0: for (j=0; j<30; j+=3) sl@0: { sl@0: TAny* p[30]; sl@0: TInt ala[30]; sl@0: TInt fla[30]; sl@0: h->Reset(); sl@0: for (i=0; i<30; ++i) sl@0: { sl@0: p[i] = h->TestAlloc(8*i*i); sl@0: ala[i] = h->AllocLen(p[i]); sl@0: fla[i] = 0; sl@0: } sl@0: for (i=1; i<30; i+=3) sl@0: { sl@0: h->TestFree(p[i]); sl@0: fla[i] = h->FreeCellLen(p[i]); sl@0: test(fla[i] == ala[i]); sl@0: test(h->FreeCellLen(p[i-1]) < 0); sl@0: test(h->FreeCellLen(p[i+1]) < 0); sl@0: } sl@0: h->FullCheck(); sl@0: TInt al1 = _ALIGN_UP(Max((RHeap::EAllocCellSize + 1), h->MinCell()), h->Align()); sl@0: // adjust al1 for some case when reallocated heap cell will not be shrinked because remainder will not big enough sl@0: // to form a new free cell due to a big KHeapMinCellSize value sl@0: TInt alaj = ala[j] + RHeap::EAllocCellSize; sl@0: if (al1 < alaj && alaj - al1 < h->MinCell()) sl@0: al1 = alaj; sl@0: TAny* p1 = h->TestReAlloc(p[j], 1, RHeap::ENeverMove); sl@0: test(p1 == p[j]); sl@0: test(h->AllocLen(p1) == al1 - RHeap::EAllocCellSize); sl@0: TAny* p1b = (TUint8*)p1 + al1; sl@0: test(h->FreeCellLen(p1b) == fla[j+1] + RHeap::EAllocCellSize + ala[j] - al1); sl@0: TInt l2 = ala[j] + fla[j+1] + RHeap::EAllocCellSize; // max without moving sl@0: TInt l3 = l2 - h->MinCell(); sl@0: TAny* p3 = h->TestReAlloc(p[j], l3, RHeap::ENeverMove); sl@0: test(p3 == p[j]); sl@0: TAny* p3b = (TUint8*)p3 + h->AllocLen(p3) + RHeap::EAllocCellSize; sl@0: test(h->FreeCellLen(p3b) == h->MinCell() - RHeap::EAllocCellSize); sl@0: TAny* p2 = h->TestReAlloc(p[j], l2, RHeap::ENeverMove); sl@0: test(p2 == p[j]); sl@0: test(h->AllocLen(p2) == l2); sl@0: TAny* p4 = h->TestReAlloc(p[j], l2+1, RHeap::ENeverMove); sl@0: test(p4 == NULL); sl@0: test(h->AllocLen(p2) == l2); sl@0: TAny* p5 = h->TestReAlloc(p[j], l2+1, 0); sl@0: TInt k = 0; sl@0: for (; k<30 && fla[k] <= l2; ++k) {} sl@0: if (k < 30) sl@0: test(p5 == p[k]); sl@0: else sl@0: test(p5 >= (TUint8*)p[29] + ala[29]); sl@0: test(h->FreeCellLen(p2) == ala[j] + ala[j+1] + RHeap::EAllocCellSize); sl@0: TInt ali = _ALIGN_UP(RHeap::EAllocCellSize,h->Align()); sl@0: TAny* p6b = (TUint8*)p[j+2] + ala[j+2] - ali + RHeap::EAllocCellSize; sl@0: test(h->FreeCellLen(p6b) < 0); sl@0: TAny* p6 = h->TestReAlloc(p[j+2], ala[j+2] - ali , 0); sl@0: test(p6 == p[j+2]); sl@0: if (h->AllocLen(p6) != ala[j+2]) // allocated heap cell size changed sl@0: test(h->FreeCellLen(p6b) == h->MinCell() - RHeap::EAllocCellSize); sl@0: TInt g = h->GrowBy(); sl@0: TAny* p7 = h->TestReAlloc(p5, 8*g, 0); sl@0: test(p7 >= p5); sl@0: TUint8* p8 = (TUint8*)p7 - RHeap::EAllocCellSize + al1; sl@0: TUint8* p9 = (TUint8*)_ALIGN_UP(TLinAddr(p8), h->PageSize()); sl@0: if (p9-p8 < h->MinCell()) sl@0: p9 += h->PageSize(); sl@0: TAny* p7b = h->TestReAlloc(p7, 1, 0); sl@0: test(p7b == p7); sl@0: test(h->Top() + (RHeap::EAllocCellSize & (h->Align()-1)) == p9); sl@0: sl@0: h->FullCheck(); sl@0: } sl@0: } sl@0: sl@0: // Test compression sl@0: // {1 free cell, >1 free cell} x {reduce cell, eliminate cell, reduce cell but too small} sl@0: // sl@0: void DoTest4(RHeap* aH) sl@0: { sl@0: RTestHeap* h = (RTestHeap*)aH; sl@0: test.Printf(_L("Test Compress: min=%x max=%x align=%d growby=%d\n"), sl@0: h->MinLength(), h->MaxLength(), h->Align(), h->GrowBy()); sl@0: TInt page_size; sl@0: UserHal::PageSizeInBytes(page_size); sl@0: test(page_size == h->PageSize()); sl@0: TInt g = h->GrowBy(); sl@0: TEST_ALIGN(g, page_size); sl@0: test(g >= page_size); sl@0: RChunk c; sl@0: c.SetHandle(h->ChunkHandle()); sl@0: TInt align = h->Align(); sl@0: TInt minc = h->MinCell(); sl@0: sl@0: TInt orig_size = c.Size(); sl@0: TUint8* orig_top = h->Top(); sl@0: sl@0: // size in bytes that last free cell on the top of the heap must be sl@0: // before the heap will be shrunk, size must include the no of bytes to sl@0: // store the cell data/header i.e RHeap::EAllocCellSize sl@0: TInt shrinkThres = KHeapShrinkHysRatio*(g>>8); sl@0: sl@0: TInt pass; sl@0: for (pass=0; pass<2; ++pass) sl@0: { sl@0: TUint8* p0 = (TUint8*)h->TestAlloc(4); sl@0: test(p0 == h->Base() + RHeap::EAllocCellSize); sl@0: TInt l1 = h->Top() - (TUint8*)h->FreeRef().next; sl@0: TEST_ALIGN(l1, align); sl@0: l1 -= RHeap::EAllocCellSize; sl@0: TUint8* p1; sl@0: // Grow heap by 2*iGrowBy bytes sl@0: p1 = (TUint8*)h->TestAlloc(l1 + 2*g); sl@0: test(p1 == p0 + h->AllocLen(p0) + RHeap::EAllocCellSize); sl@0: test(h->Top() - orig_top == 2*g); sl@0: test(c.Size() - orig_size == 2*g); sl@0: // May compress heap, may not sl@0: h->TestFree(p1); sl@0: h->ForceCompress(2*g); sl@0: test(h->Top() == orig_top); sl@0: test(c.Size() == orig_size); sl@0: test((TUint8*)h->FreeRef().next == p1 - RHeap::EAllocCellSize); sl@0: h->FullCheck(); sl@0: //if KHeapShrinkHysRatio is > 2.0 then heap compression will occur here sl@0: test(h->Compress() == 0); sl@0: test(h->TestAlloc(l1) == p1); sl@0: test(h->FreeRef().next == NULL); sl@0: if (pass) sl@0: h->TestFree(p0); // leave another free cell on second pass sl@0: TInt l2 = g - RHeap::EAllocCellSize; sl@0: // Will grow heap by iGrowBy bytes sl@0: TUint8* p2 = (TUint8*)h->TestAlloc(l2); sl@0: test(p2 == orig_top + RHeap::EAllocCellSize); sl@0: test(h->Top() - orig_top == g); sl@0: test(c.Size() - orig_size == g); sl@0: // may or may not compress heap sl@0: h->TestFree(p2); sl@0: if (l2+RHeap::EAllocCellSize >= shrinkThres) sl@0: { sl@0: // When KHeapShrinkRatio small enough heap will have been compressed sl@0: test(h->Top() == orig_top); sl@0: if (pass) sl@0: { sl@0: test((TUint8*)h->FreeRef().next == p0 - RHeap::EAllocCellSize); sl@0: test((TUint8*)h->FreeRef().next->next == NULL); sl@0: } sl@0: else sl@0: test((TUint8*)h->FreeRef().next == NULL); sl@0: } sl@0: else sl@0: { sl@0: test(h->Top() - orig_top == g); sl@0: if (pass) sl@0: { sl@0: test((TUint8*)h->FreeRef().next == p0 - RHeap::EAllocCellSize); sl@0: test((TUint8*)h->FreeRef().next->next == orig_top); sl@0: } sl@0: else sl@0: test((TUint8*)h->FreeRef().next == orig_top); sl@0: } sl@0: // this compress will only do anything if the KHeapShrinkRatio is large sl@0: // enough to introduce hysteresis otherwise the heap would have been compressed sl@0: // by the free operation itself sl@0: TInt tmp1,tmp2; sl@0: tmp2=h->CalcComp(g); sl@0: tmp1=h->Compress(); sl@0: test(tmp1 == tmp2); sl@0: test(h->Top() == orig_top); sl@0: test(c.Size() == orig_size); sl@0: h->FullCheck(); sl@0: // shouldn't compress heap as already compressed sl@0: test(h->Compress() == 0); sl@0: //grow heap by iGrowBy bytes sl@0: test(h->TestAlloc(l2) == p2); sl@0: //grow heap by iGrowBy bytes sl@0: TUint8* p3 = (TUint8*)h->TestAlloc(l2); sl@0: test(p3 == p2 + g); sl@0: test(h->Top() - orig_top == 2*g); sl@0: test(c.Size() - orig_size == 2*g); sl@0: // may or may not reduce heap sl@0: h->TestFree(p2); sl@0: // may or may not reduce heap sl@0: h->TestFree(p3); sl@0: h->ForceCompress(2*g); sl@0: test(h->Top() == orig_top); sl@0: test(c.Size() == orig_size); sl@0: h->FullCheck(); sl@0: if (pass) sl@0: { sl@0: test((TUint8*)h->FreeRef().next == p0 - RHeap::EAllocCellSize); sl@0: test((TUint8*)h->FreeRef().next->next == NULL); sl@0: } sl@0: else sl@0: test((TUint8*)h->FreeRef().next == NULL); sl@0: //grow heap by iGrowBy bytes sl@0: test(h->TestAlloc(l2) == p2); sl@0: //grow heap by iGrowBy*2 + page size bytes sl@0: test(h->TestAlloc(l2 + g + page_size) == p3); sl@0: test(h->Top() - orig_top == 4*g); sl@0: test(c.Size() - orig_size == 4*g); sl@0: // will compress heap if KHeapShrinkHysRatio <= KHeapShrinkRatioDflt sl@0: test(h->TestReAlloc(p3, page_size - RHeap::EAllocCellSize, 0) == p3); sl@0: h->ForceCompress(g+page_size); sl@0: test(h->Top() - orig_top == g + page_size); sl@0: test(c.Size() - orig_size == g + page_size); sl@0: h->FullCheck(); sl@0: // will compress heap if KHeapShrinkHysRatio <= KHeapShrinkRatio1 sl@0: h->TestFree(p2); sl@0: // will compress heap if KHeapShrinkHysRatio <= KHeapShrinkRatio1 && g<=page_size sl@0: // or KHeapShrinkHysRatio >= 2.0 and g==page_size sl@0: h->TestFree(p3); sl@0: // may or may not perform further compression sl@0: tmp1=h->CalcComp(g+page_size); sl@0: tmp2=h->Compress(); sl@0: test(tmp1 == tmp2); sl@0: test(h->Top() == orig_top); sl@0: test(c.Size() == orig_size); sl@0: h->FullCheck(); sl@0: test(h->TestAlloc(l2 - minc) == p2); sl@0: test(h->TestAlloc(l2 + g + page_size + minc) == p3 - minc); sl@0: test(h->Top() - orig_top == 4*g); sl@0: test(c.Size() - orig_size == 4*g); sl@0: h->TestFree(p3 - minc); sl@0: h->ForceCompress(l2 + g + page_size + minc); sl@0: test(h->Top() - orig_top == g); sl@0: test(c.Size() - orig_size == g); sl@0: h->FullCheck(); sl@0: if (pass) sl@0: { sl@0: test((TUint8*)h->FreeRef().next == p0 - RHeap::EAllocCellSize); sl@0: test((TUint8*)h->FreeRef().next->next == p3 - minc - RHeap::EAllocCellSize); sl@0: } sl@0: else sl@0: test((TUint8*)h->FreeRef().next == p3 - minc - RHeap::EAllocCellSize); sl@0: h->TestFree(p2); sl@0: if (l2+RHeap::EAllocCellSize >= shrinkThres) sl@0: { sl@0: // When KHeapShrinkRatio small enough heap will have been compressed sl@0: test(h->Top() == orig_top); sl@0: test(c.Size() - orig_size == 0); sl@0: } sl@0: else sl@0: { sl@0: test(h->Top() - orig_top == g); sl@0: test(c.Size() - orig_size == g); sl@0: } sl@0: h->FullCheck(); sl@0: if ( ((TLinAddr)orig_top & (align-1)) == 0) sl@0: { sl@0: TAny* free; sl@0: TEST_ALIGN(p2 - RHeap::EAllocCellSize, page_size); sl@0: // will have free space of g-minc sl@0: test(h->TestAlloc(l2 + minc) == p2); sl@0: test(h->Top() - orig_top == 2*g); sl@0: test(c.Size() - orig_size == 2*g); sl@0: free = pass ? h->FreeRef().next->next : h->FreeRef().next; sl@0: test(free != NULL); sl@0: test(h->TestReAlloc(p2, l2 - 4, 0) == p2); sl@0: TInt freeSp = g-minc + (l2+minc - (l2-4)); sl@0: TInt adjust = 0; sl@0: if (freeSp >= shrinkThres && freeSp-page_size >= minc) sl@0: { sl@0: // if page_size is less than growBy (g) then heap will be shrunk sl@0: // by less than a whole g. sl@0: adjust = g-((page_sizeTop() - orig_top == 2*g - adjust); sl@0: test(c.Size() - orig_size == 2*g - adjust); sl@0: free = pass ? h->FreeRef().next->next : h->FreeRef().next; sl@0: test(free != NULL); sl@0: TEST_ALIGN(TLinAddr(free)+4, page_size); sl@0: test(h->TestAlloc(l2 + g + page_size + 4) == p3 - 4); sl@0: test(h->Top() - orig_top == 4*g - adjust); sl@0: test(c.Size() - orig_size == 4*g - adjust); sl@0: h->TestFree(p3 - 4); sl@0: h->ForceCompress(l2 + g + page_size + 4); sl@0: test(h->Top() - orig_top == g + page_size); sl@0: test(c.Size() - orig_size == g + page_size); sl@0: h->FullCheck(); sl@0: h->TestFree(p2); sl@0: h->ForceCompress(l2-4); sl@0: test(h->Compress() == 0); sl@0: // check heap is grown, will have free space of g-minc sl@0: test(h->TestAlloc(l2 + minc) == p2); sl@0: test(h->Top() - orig_top == 2*g); sl@0: test(c.Size() - orig_size == 2*g); sl@0: free = pass ? h->FreeRef().next->next : h->FreeRef().next; sl@0: test(free != NULL); sl@0: // may shrink heap as will now have g+minc free bytes sl@0: test(h->TestReAlloc(p2, l2 - minc, 0) == p2); sl@0: if (g+minc >= shrinkThres) sl@0: { sl@0: test(h->Top() - orig_top == g); sl@0: test(c.Size() - orig_size == g); sl@0: } sl@0: else sl@0: { sl@0: test(h->Top() - orig_top == 2*g); sl@0: test(c.Size() - orig_size == 2*g); sl@0: } sl@0: free = pass ? h->FreeRef().next->next : h->FreeRef().next; sl@0: test(free != NULL); sl@0: TEST_ALIGN(TLinAddr(free)+minc, page_size); sl@0: test(h->TestAlloc(l2 + g + page_size + minc) == p3 - minc); sl@0: test(h->Top() - orig_top == 4*g); sl@0: test(c.Size() - orig_size == 4*g); sl@0: h->TestFree(p3 - minc); sl@0: h->ForceCompress(l2 + g + page_size + minc); sl@0: test(h->Top() - orig_top == g); sl@0: test(c.Size() - orig_size == g); sl@0: h->FullCheck(); sl@0: h->TestFree(p2); sl@0: } sl@0: sl@0: h->TestFree(p1); sl@0: if (pass == 0) sl@0: h->TestFree(p0); sl@0: h->Compress(); sl@0: } sl@0: h->FullCheck(); sl@0: } sl@0: sl@0: void Test1() sl@0: { sl@0: RHeap* h; sl@0: h = RTestHeap::FixedHeap(0x1000, 0); sl@0: test(h != NULL); sl@0: DoTest1(h); sl@0: h->Close(); sl@0: h = RTestHeap::FixedHeap(0x1000, 0, EFalse); sl@0: test(h != NULL); sl@0: DoTest1(h); sl@0: h->Close(); sl@0: h = RTestHeap::FixedHeap(0x10000, 64); sl@0: test(h != NULL); sl@0: DoTest1(h); sl@0: h->Close(); sl@0: h = RTestHeap::FixedHeap(0x100000, 4096); sl@0: test(h != NULL); sl@0: DoTest1(h); sl@0: h->Close(); sl@0: h = RTestHeap::FixedHeap(0x100000, 8192); sl@0: test(h != NULL); sl@0: DoTest1(h); sl@0: h->Close(); sl@0: h = UserHeap::ChunkHeap(&KNullDesC(), 0x1000, 0x1000, 0x1000, 4); sl@0: test(h != NULL); sl@0: DoTest1(h); sl@0: h->Close(); sl@0: h = UserHeap::ChunkHeap(&KNullDesC(), 0x1000, 0x10000, 0x1000, 4); sl@0: test(h != NULL); sl@0: DoTest1(h); sl@0: h->Close(); sl@0: h = UserHeap::ChunkHeap(&KNullDesC(), 0x1000, 0x100000, 0x1000, 4096); sl@0: test(h != NULL); sl@0: DoTest1(h); sl@0: h->Close(); sl@0: h = UserHeap::ChunkHeap(&KNullDesC(), 0x1000, 0x100000, 0x1000, 4); sl@0: test(h != NULL); sl@0: DoTest1(h); sl@0: h->Reset(); sl@0: DoTest2(h); sl@0: h->Reset(); sl@0: DoTest3(h); sl@0: h->Reset(); sl@0: DoTest4(h); sl@0: h->Close(); sl@0: h = UserHeap::ChunkHeap(&KNullDesC(), 0x1000, 0x100000, 0x1000, 8); sl@0: test(h != NULL); sl@0: DoTest1(h); sl@0: h->Reset(); sl@0: DoTest2(h); sl@0: h->Reset(); sl@0: DoTest3(h); sl@0: h->Reset(); sl@0: DoTest4(h); sl@0: h->Close(); sl@0: h = UserHeap::ChunkHeap(&KNullDesC(), 0x1000, 0x100000, 0x1000, 16); sl@0: test(h != NULL); sl@0: DoTest1(h); sl@0: h->Reset(); sl@0: DoTest2(h); sl@0: h->Reset(); sl@0: DoTest3(h); sl@0: h->Reset(); sl@0: DoTest4(h); sl@0: h->Close(); sl@0: h = UserHeap::ChunkHeap(&KNullDesC(), 0x1000, 0x100000, 0x1000, 32); sl@0: test(h != NULL); sl@0: DoTest1(h); sl@0: h->Reset(); sl@0: DoTest2(h); sl@0: h->Reset(); sl@0: DoTest3(h); sl@0: h->Reset(); sl@0: DoTest4(h); sl@0: h->Close(); sl@0: h = UserHeap::ChunkHeap(&KNullDesC(), 0x3000, 0x100000, 0x3000, 4); sl@0: test(h != NULL); sl@0: DoTest1(h); sl@0: h->Reset(); sl@0: DoTest2(h); sl@0: h->Reset(); sl@0: DoTest3(h); sl@0: h->Reset(); sl@0: DoTest4(h); sl@0: h->Close(); sl@0: } sl@0: sl@0: struct SHeapStress sl@0: { sl@0: RThread iThread; sl@0: volatile TBool iStop; sl@0: TInt iAllocs; sl@0: TInt iFailedAllocs; sl@0: TInt iFrees; sl@0: TInt iReAllocs; sl@0: TInt iFailedReAllocs; sl@0: TInt iChecks; sl@0: TUint32 iSeed; sl@0: RAllocator* iAllocator; sl@0: sl@0: TUint32 Random(); sl@0: }; sl@0: sl@0: TUint32 SHeapStress::Random() sl@0: { sl@0: iSeed *= 69069; sl@0: iSeed += 41; sl@0: return iSeed; sl@0: } sl@0: sl@0: TInt RandomLength(TUint32 aRandom) sl@0: { sl@0: TUint8 x = (TUint8)aRandom; sl@0: if (x & 0x80) sl@0: return (x & 0x7f) << 7; sl@0: return x & 0x7f; sl@0: } sl@0: sl@0: TInt HeapStress(TAny* aPtr) sl@0: { sl@0: SHeapStress& hs = *(SHeapStress*)aPtr; sl@0: RTestHeap* h = (RTestHeap*)&User::Allocator(); sl@0: TUint8* cell[256]; sl@0: TInt len[256]; sl@0: sl@0: Mem::FillZ(cell, sizeof(cell)); sl@0: Mem::FillZ(len, sizeof(len)); sl@0: sl@0: RThread::Rendezvous(KErrNone); sl@0: while (!hs.iStop) sl@0: { sl@0: // allocate all cells sl@0: TInt i; sl@0: for (i=0; i<256; ++i) sl@0: { sl@0: if (!cell[i]) sl@0: { sl@0: ++hs.iAllocs; sl@0: cell[i] = (TUint8*)h->TestAlloc(RandomLength(hs.Random())); sl@0: if (cell[i]) sl@0: len[i] = h->AllocLen(cell[i]); sl@0: else sl@0: ++hs.iFailedAllocs; sl@0: } sl@0: } sl@0: sl@0: // free some cells sl@0: TInt n = 64 + (hs.Random() & 127); sl@0: while (--n) sl@0: { sl@0: i = hs.Random() & 0xff; sl@0: if (cell[i]) sl@0: { sl@0: test(h->AllocLen(cell[i]) == len[i]); sl@0: h->TestFree(cell[i]); sl@0: cell[i] = NULL; sl@0: len[i] = 0; sl@0: ++hs.iFrees; sl@0: } sl@0: } sl@0: sl@0: // realloc some cells sl@0: n = 64 + (hs.Random() & 127); sl@0: while (--n) sl@0: { sl@0: TUint32 rn = hs.Random(); sl@0: i = (rn >> 8) & 0xff; sl@0: TInt new_len = RandomLength(rn); sl@0: if (cell[i]) sl@0: { sl@0: test(h->AllocLen(cell[i]) == len[i]); sl@0: ++hs.iReAllocs; sl@0: TUint8* p = (TUint8*)h->TestReAlloc(cell[i], new_len, rn >> 16); sl@0: if (p) sl@0: { sl@0: cell[i] = p; sl@0: len[i] = h->AllocLen(p); sl@0: } sl@0: else sl@0: ++hs.iFailedReAllocs; sl@0: } sl@0: } sl@0: sl@0: // check the heap sl@0: h->Check(); sl@0: ++hs.iChecks; sl@0: } sl@0: return 0; sl@0: } sl@0: sl@0: void CreateStressThread(SHeapStress& aInfo) sl@0: { sl@0: Mem::FillZ(&aInfo, _FOFF(SHeapStress, iSeed)); sl@0: RThread& t = aInfo.iThread; sl@0: TInt r = t.Create(KNullDesC(), &HeapStress, 0x2000, aInfo.iAllocator, &aInfo); sl@0: test(r==KErrNone); sl@0: t.SetPriority(EPriorityLess); sl@0: TRequestStatus s; sl@0: t.Rendezvous(s); sl@0: test(s == KRequestPending); sl@0: t.Resume(); sl@0: User::WaitForRequest(s); sl@0: test(s == KErrNone); sl@0: test(t.ExitType() == EExitPending); sl@0: t.SetPriority(EPriorityMuchLess); sl@0: } sl@0: sl@0: void StopStressThread(SHeapStress& aInfo) sl@0: { sl@0: RThread& t = aInfo.iThread; sl@0: TRequestStatus s; sl@0: t.Logon(s); sl@0: aInfo.iStop = ETrue; sl@0: User::WaitForRequest(s); sl@0: const TDesC& exitCat = t.ExitCategory(); sl@0: TInt exitReason = t.ExitReason(); sl@0: TInt exitType = t.ExitType(); sl@0: test.Printf(_L("Exit type %d,%d,%S\n"), exitType, exitReason, &exitCat); sl@0: test(exitType == EExitKill); sl@0: test(exitReason == KErrNone); sl@0: test(s == KErrNone); sl@0: test.Printf(_L("Total Allocs : %d\n"), aInfo.iAllocs); sl@0: test.Printf(_L("Failed Allocs : %d\n"), aInfo.iFailedAllocs); sl@0: test.Printf(_L("Total Frees : %d\n"), aInfo.iFrees); sl@0: test.Printf(_L("Total ReAllocs : %d\n"), aInfo.iReAllocs); sl@0: test.Printf(_L("Failed ReAllocs : %d\n"), aInfo.iFailedReAllocs); sl@0: test.Printf(_L("Heap checks : %d\n"), aInfo.iChecks); sl@0: } sl@0: sl@0: void DoStressTest1(RAllocator* aAllocator) sl@0: { sl@0: RTestHeap* h = (RTestHeap*)aAllocator; sl@0: test.Printf(_L("Test Stress 1: min=%x max=%x align=%d growby=%d\n"), sl@0: h->MinLength(), h->MaxLength(), h->Align(), h->GrowBy()); sl@0: SHeapStress hs; sl@0: hs.iSeed = 0xb504f334; sl@0: hs.iAllocator = aAllocator; sl@0: CreateStressThread(hs); sl@0: User::After(10*1000000); sl@0: StopStressThread(hs); sl@0: CLOSE_AND_WAIT(hs.iThread); sl@0: h->FullCheck(); sl@0: } sl@0: sl@0: void DoStressTest2(RAllocator* aAllocator) sl@0: { sl@0: RTestHeap* h = (RTestHeap*)aAllocator; sl@0: test.Printf(_L("Test Stress 2: min=%x max=%x align=%d growby=%d\n"), sl@0: h->MinLength(), h->MaxLength(), h->Align(), h->GrowBy()); sl@0: SHeapStress hs1; sl@0: SHeapStress hs2; sl@0: hs1.iSeed = 0xb504f334; sl@0: hs1.iAllocator = aAllocator; sl@0: hs2.iSeed = 0xddb3d743; sl@0: hs2.iAllocator = aAllocator; sl@0: CreateStressThread(hs1); sl@0: CreateStressThread(hs2); sl@0: User::After(20*1000000); sl@0: StopStressThread(hs1); sl@0: StopStressThread(hs2); sl@0: CLOSE_AND_WAIT(hs1.iThread); sl@0: CLOSE_AND_WAIT(hs2.iThread); sl@0: h->FullCheck(); sl@0: } sl@0: sl@0: void StressTests() sl@0: { sl@0: RHeap* h; sl@0: h = UserHeap::ChunkHeap(&KNullDesC(), 0x1000, 0x100000, 0x1000, 4); sl@0: test(h != NULL); sl@0: DoStressTest1(h); sl@0: h->Reset(); sl@0: DoStressTest2(h); sl@0: h->Close(); sl@0: h = UserHeap::ChunkHeap(&KNullDesC(), 0x1000, 0x100000, 0x1000, 8); sl@0: test(h != NULL); sl@0: DoStressTest1(h); sl@0: h->Reset(); sl@0: DoStressTest2(h); sl@0: h->Close(); sl@0: } sl@0: sl@0: TInt TestHeapGrowInPlace(TInt aMode) sl@0: { sl@0: TBool reAllocs=EFalse; sl@0: TBool heapGrew=EFalse; sl@0: sl@0: RHeap* myHeap; sl@0: sl@0: myHeap = UserHeap::ChunkHeap(NULL,0x1000,0x4000,0x1000); sl@0: sl@0: TAny *testBuffer,*testBuffer2; sl@0: // Start size chosen so that 1st realloc will use up exactly all the heap. sl@0: // Later iterations wont, and there will be a free cell at the end of the heap. sl@0: TInt currentSize = ((0x800) - sizeof(RHeap)) - RHeap::EAllocCellSize; sl@0: TInt growBy = 0x800; sl@0: TInt newSpace, space; sl@0: sl@0: testBuffer2 = myHeap->Alloc(currentSize); sl@0: sl@0: newSpace = myHeap->Size(); sl@0: do sl@0: { sl@0: space = newSpace; sl@0: testBuffer = testBuffer2; sl@0: currentSize+=growBy; sl@0: testBuffer2 = myHeap->ReAlloc(testBuffer,currentSize,aMode); sl@0: sl@0: newSpace = myHeap->Size(); sl@0: sl@0: if (testBuffer2) sl@0: { sl@0: sl@0: if (testBuffer!=testBuffer2) sl@0: reAllocs = ETrue; sl@0: sl@0: if (newSpace>space) sl@0: heapGrew = ETrue; sl@0: } sl@0: growBy-=16; sl@0: } while (testBuffer2); sl@0: currentSize-=growBy; sl@0: sl@0: myHeap->Free(testBuffer); sl@0: myHeap->Close(); sl@0: sl@0: // How did we do? sl@0: if (reAllocs) sl@0: { sl@0: test.Printf(_L("Failure - Memory was moved!\n")); sl@0: return -100; sl@0: } sl@0: if (!heapGrew) sl@0: { sl@0: test.Printf(_L("Failure - Heap Never Grew!\n")); sl@0: return -200; sl@0: } sl@0: if (currentSize<= 0x3000) sl@0: { sl@0: test.Printf(_L("Failed to grow by a reasonable amount!\n")); sl@0: return -300; sl@0: } sl@0: sl@0: return KErrNone; sl@0: } sl@0: sl@0: void ReAllocTests() sl@0: { sl@0: test.Next(_L("Testing Grow In Place")); sl@0: test(TestHeapGrowInPlace(0)==KErrNone); sl@0: test(TestHeapGrowInPlace(RHeap::ENeverMove)==KErrNone); sl@0: } sl@0: sl@0: RHeap* TestDEF078391Heap = 0; sl@0: sl@0: TInt TestDEF078391ThreadFunction(TAny*) sl@0: { sl@0: TestDEF078391Heap = UserHeap::ChunkHeap(NULL,0x1000,0x100000,KMinHeapGrowBy,0,EFalse); sl@0: return TestDEF078391Heap ? KErrNone : KErrGeneral; sl@0: } sl@0: sl@0: void TestDEF078391() sl@0: { sl@0: // Test that creating a multithreaded heap with UserHeap::ChunkHeap sl@0: // doesn't create any reference counts on the creating thread. sl@0: // This is done by creating a heap in a named thread, then exiting sl@0: // the thread and re-creating it with the same name. sl@0: // This will fail with KErrAlreadyExists if the orinal thread has sl@0: // not died because of an unclosed reference count. sl@0: test.Next(_L("Test that creating a multithreaded heap doesn't open references of creator")); sl@0: _LIT(KThreadName,"ThreadName"); sl@0: RThread t; sl@0: TInt r=t.Create(KThreadName,TestDEF078391ThreadFunction,0x1000,0x1000,0x100000,NULL); sl@0: test(r==KErrNone); sl@0: TRequestStatus status; sl@0: t.Logon(status); sl@0: t.Resume(); sl@0: User::WaitForRequest(status); sl@0: test(status==KErrNone); sl@0: test(t.ExitType()==EExitKill); sl@0: test(t.ExitReason()==KErrNone); sl@0: CLOSE_AND_WAIT(t); sl@0: test(TestDEF078391Heap!=0); sl@0: User::After(1000000); // give more opportunity for thread cleanup to happen sl@0: sl@0: // create thread a second time sl@0: r=t.Create(KThreadName,TestDEF078391ThreadFunction,0x1000,0x1000,0x100000,NULL); sl@0: test(r==KErrNone); sl@0: t.Kill(0); sl@0: CLOSE_AND_WAIT(t); sl@0: sl@0: // close the heap that got created earlier sl@0: TestDEF078391Heap->Close(); sl@0: } sl@0: sl@0: TInt E32Main() sl@0: { sl@0: test.Title(); sl@0: __KHEAP_MARK; sl@0: test.Start(_L("Testing heaps")); sl@0: TestDEF078391(); sl@0: Test1(); sl@0: StressTests(); sl@0: ReAllocTests(); sl@0: test.End(); sl@0: __KHEAP_MARKEND; sl@0: return 0; sl@0: }