Update contrib.
1 // Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32/memmodel/emul/win32/mshbuf.cpp
15 // Shareable Data Buffers
18 #include <kernel/smap.h>
20 _LIT(KLitDWin32ShPool,"DWin32ShPool");
21 _LIT(KLitDWin32AlignedShPool,"DWin32AlignedShPool");
22 _LIT(KLitDWin32NonAlignedShPool,"DWin32NonAlignedShPool");
25 DWin32ShBuf::DWin32ShBuf(DShPool* aPool, TLinAddr aRelAddr) : DShBuf(aPool, aRelAddr)
27 __KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShBuf::DWin32ShBuf()"));
30 DWin32ShBuf::~DWin32ShBuf()
32 __KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShBuf::~DWin32ShBuf()"));
35 TUint8* DWin32ShBuf::Base(DProcess* aProcess)
37 __KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShBuf::Base(0x%x)", aProcess));
39 TUint8* base = reinterpret_cast<DWin32ShPool*>(iPool)->Base(aProcess) + (TUint)iRelAddress;
44 TUint8* DWin32ShBuf::Base()
46 __KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShBuf::Base()"));
48 TUint8* base = reinterpret_cast<DWin32ShPool*>(iPool)->Base() + (TUint)iRelAddress;
53 TInt DWin32ShBuf::Map(TUint /* aMapAttr */, DProcess* /* aProcess */, TLinAddr& aBase)
55 __KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShBuf::Map()"));
57 TInt r = KErrNotSupported;
59 if (iPool->iPoolFlags & EShPoolPageAlignedBuffer)
63 r = KErrAlreadyExists;
67 aBase = reinterpret_cast<TUint>(reinterpret_cast<DWin32ShPool*>(iPool)->Base() + (TUint)iRelAddress);
76 TInt DWin32ShBuf::UnMap(DProcess* /* aProcess */)
78 __KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShBuf::UnMap()"));
80 TInt r = KErrNotSupported;
82 if (iPool->iPoolFlags & EShPoolPageAlignedBuffer)
98 TInt DWin32ShBuf::AddToProcess(DProcess* aProcess, TUint /* aAttr */)
100 __KTRACE_OPT(KMMU, Kern::Printf("Adding DWin32ShBuf %O to process %O", this, aProcess));
104 if (aProcess != K::TheKernelProcess)
105 r = iPool->OpenClient(aProcess, flags);
110 TInt DWin32ShBuf::Close(TAny* aPtr)
112 __KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShBuf::Close(0x%08x)", aPtr));
116 DProcess* pP = reinterpret_cast<DProcess*>(aPtr);
118 if (pP != K::TheKernelProcess)
119 iPool->CloseClient(pP);
122 return DShBuf::Close(aPtr);
125 DWin32ShPool::DWin32ShPool()
128 __KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShPool::DWin32ShPool"));
132 DWin32ShPool::~DWin32ShPool()
134 __KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShPool::~DWin32ShPool"));
136 if (iWin32MemoryBase)
138 TUint64 maxSize = static_cast<TUint64>(iMaxBuffers) * static_cast<TUint64>(iBufGap);
140 // We know that maxSize is less than KMaxTInt as we tested for this in DoCreate().
141 VirtualFree(LPVOID(iWin32MemoryBase), (SIZE_T)maxSize, MEM_DECOMMIT);
142 VirtualFree(LPVOID(iWin32MemoryBase), 0, MEM_RELEASE);
144 MM::FreeMemory += iWin32MemorySize;
151 void DWin32ShPool::DestroyClientResources(DProcess* aProcess)
153 __KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShPool::DestroyClientResources"));
155 TInt r = DestroyHandles(aProcess);
156 __NK_ASSERT_DEBUG((r == KErrNone) || (r == KErrDied));
157 (void)r; // Silence warnings
160 TInt DWin32ShPool::DeleteInitialBuffers()
162 __KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShPool::DeleteInitialBuffers"));
164 if (iInitialBuffersArray != NULL)
166 for (TUint i = 0; i < iInitialBuffers; i++)
168 iInitialBuffersArray[i].iObjLink.Deque(); // remove from free list
169 iInitialBuffersArray[i].Dec();
170 iInitialBuffersArray[i].~DWin32ShBuf();
173 Kern::Free(iInitialBuffersArray);
174 iInitialBuffersArray = NULL;
180 TInt DWin32ShPool::DestroyHandles(DProcess* aProcess)
182 __KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShPool::DestroyHandles(0x%08x)", aProcess));
185 Kern::MutexWait(*iProcessLock);
186 DShPoolClient* client = reinterpret_cast<DShPoolClient*>(iClientMap->Remove(reinterpret_cast<TUint>(aProcess)));
188 __NK_ASSERT_DEBUG(client);
189 __NK_ASSERT_DEBUG(client->iAccessCount == 0);
193 if (aProcess != K::TheKernelProcess)
195 // Remove reserved handles
196 r = aProcess->iHandles.Reserve(-TInt(iTotalBuffers));
199 Kern::MutexSignal(*iProcessLock);
205 TInt DWin32ShPool::Close(TAny* aPtr)
207 __KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShPool::Close(0x%08x)", aPtr));
209 if (aPtr) // not NULL must be user side
211 DProcess* pP = reinterpret_cast<DProcess*>(aPtr);
216 return DShPool::Close(aPtr);
220 TInt DWin32ShPool::CreateInitialBuffers()
222 __KTRACE_OPT(KMMU,Kern::Printf(">DWin32ShPool::CreateInitialBuffers"));
224 iInitialBuffersArray = reinterpret_cast<DWin32ShBuf*>(Kern::Alloc(iInitialBuffers * sizeof(DWin32ShBuf)));
226 if (iInitialBuffersArray == NULL)
230 for (TUint i = 0; i < iInitialBuffers; i++)
232 DWin32ShBuf *buf = new (&iInitialBuffersArray[i]) DWin32ShBuf(this, offset);
233 TInt r = buf->Construct();
237 iFreeList.Add(&buf->iObjLink);
248 iFreeBuffers = iInitialBuffers;
249 iTotalBuffers = iInitialBuffers;
251 iBufMap->Alloc(0, iInitialBuffers);
257 TUint8* DWin32ShPool::Base()
259 return iWin32MemoryBase;
263 TUint8* DWin32ShPool::Base(DProcess* /*aProcess*/)
265 return iWin32MemoryBase;
269 TInt DWin32ShPool::AddToProcess(DProcess* aProcess, TUint aAttr)
271 __KTRACE_OPT(KEXEC, Kern::Printf("Adding DWin32ShPool %O to process %O", this, aProcess));
275 Kern::MutexWait(*iProcessLock);
277 DShPoolClient* client = reinterpret_cast<DShPoolClient*>(iClientMap->Find(reinterpret_cast<TUint>(aProcess)));
282 client = new DShPoolClient;
286 client->iFlags = aAttr;
287 r = iClientMap->Add(reinterpret_cast<TUint>(aProcess), client);
291 if (aProcess != K::TheKernelProcess)
293 r = aProcess->iHandles.Reserve(iTotalBuffers);
297 iClientMap->Remove(reinterpret_cast<TUint>(aProcess));
315 client->iAccessCount++;
319 Kern::MutexSignal(*iProcessLock);
325 TInt DWin32ShPool::DoCreate(TShPoolCreateInfo& aInfo)
327 TUint64 maxSize = static_cast<TUint64>(aInfo.iInfo.iMaxBufs) * static_cast<TUint64>(iBufGap);
329 if (maxSize > static_cast<TUint64>(KMaxTInt))
334 __KTRACE_OPT(KMMU,Kern::Printf("DWin32ShPool::DoCreate (maxSize = 0x%08x, iBufGap = 0x%08x)",
335 static_cast<TInt>(maxSize), iBufGap));
337 iWin32MemoryBase = (TUint8*) VirtualAlloc(NULL, (SIZE_T)maxSize, MEM_RESERVE, PAGE_READWRITE);
338 if (iWin32MemoryBase == NULL)
343 __KTRACE_OPT(KMMU,Kern::Printf("DWin32ShPool::DoCreate (iWin32MemoryBase = 0x%08x)", iWin32MemoryBase));
345 iBufMap = TBitMapAllocator::New(aInfo.iInfo.iMaxBufs, (TBool)ETrue);
355 TBool DWin32ShPool::IsOpen(DProcess* /*aProcess*/)
357 // could do we some kind of check here?
362 TInt DWin32ShPool::UpdateFreeList()
364 __KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShPool::UpdateFreeList"));
367 SDblQueLink* anchor = reinterpret_cast<SDblQueLink*>(&iFreeList);
370 while(!iAltFreeList.IsEmpty())
372 // sort a temporary list of 'n' object with the lowest index first
373 for (TInt n = 0; n < 8 && !iAltFreeList.IsEmpty(); ++n)
375 // bit of an assumption, lets assume that the lower indexes will be allocated and freed first
376 // and therefore will be nearer the front of the list
377 DShBuf* buf = _LOFF(iAltFreeList.GetFirst(), DShBuf, iObjLink);
379 SDblQueLink* anchor = reinterpret_cast<SDblQueLink*>(&temp);
380 SDblQueLink* pLink = temp.Last();
384 // traverse the list starting at the back
385 if ((pLink != anchor) && (_LOFF(pLink, DShBuf, iObjLink)->iRelAddress > buf->iRelAddress))
387 pLink = pLink->iPrev;
391 buf->iObjLink.InsertAfter(pLink);
397 // now merge with the free list
398 while(!temp.IsEmpty())
400 if (iFreeList.IsEmpty())
402 iFreeList.MoveFrom(&temp);
406 // working backwards with the highest index
407 DShBuf* buf = _LOFF(temp.Last(), DShBuf, iObjLink);
408 SDblQueLink* pLink = iFreeList.Last();
410 while (!NKern::FMFlash(&iLock))
412 if ((pLink != anchor) && (_LOFF(pLink, DShBuf, iObjLink)->iRelAddress > buf->iRelAddress))
414 pLink = pLink->iPrev;
418 buf->iObjLink.Deque();
419 buf->iObjLink.InsertAfter(pLink);
423 buf = _LOFF(temp.Last(), DShBuf, iObjLink);
427 NKern::FMFlash(&iLock);
431 __KTRACE_OPT(KMMU, Kern::Printf("<DWin32ShPool::UpdateFreeList"));
436 void DWin32ShPool::Free(DShBuf* aBuf)
438 __KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShPool::Free (aBuf = 0x%08x, aBuf->Base() 0x%08x)", aBuf, aBuf->Base()));
440 TLinAddr newAddr = (TLinAddr)aBuf->Base();
442 memset((TAny*)newAddr,0xde,aBuf->Size());
444 memclr((TAny*)newAddr,aBuf->Size());
449 // Remove from allocated list
450 aBuf->iObjLink.Deque();
452 // we want to put the initial buffers at the head of the free list
453 // and the grown buffers at the tail as this makes shrinking more efficient
454 if (aBuf >= iInitialBuffersArray && aBuf < (iInitialBuffersArray + iInitialBuffers))
456 iFreeList.AddHead(&aBuf->iObjLink);
460 iAltFreeList.Add(&aBuf->iObjLink);
467 iPoolFlags &= ~EShPoolSuppressShrink; // Allow shrinking again, if it was blocked
470 // queue ManagementDfc which completes notifications as appropriate
474 Close(NULL); // decrement pool reference count
478 TInt DWin32ShPool::Alloc(DShBuf*& aShBuf)
480 __KTRACE_OPT(KMMU, Kern::Printf(">DWin32ShPool::Alloc (DShBuf)"));
482 TInt r = KErrNoMemory;
487 if (!iFreeList.IsEmpty())
489 aShBuf = _LOFF(iFreeList.GetFirst(), DShBuf, iObjLink);
491 iAllocated.Add(&aShBuf->iObjLink);
495 Open(); // increment pool reference count
500 // try alternative free list
501 if (!iAltFreeList.IsEmpty())
503 aShBuf = _LOFF(iAltFreeList.GetFirst(), DShBuf, iObjLink);
505 iAllocated.Add(&aShBuf->iObjLink);
509 Open(); // increment pool reference count
519 __KTRACE_OPT(KMMU, Kern::Printf("<DWin32ShPool::Alloc return buf = 0x%08x", aShBuf));
524 DWin32AlignedShPool::DWin32AlignedShPool()
527 __KTRACE_OPT(KMMU, Kern::Printf(">DWin32AlignedShPool::DWin32AlignedShPool"));
531 DWin32AlignedShPool::~DWin32AlignedShPool()
533 __KTRACE_OPT(KMMU, Kern::Printf(">DWin32AlignedShPool::~DWin32AlignedShPool"));
537 TInt DWin32AlignedShPool::DoCreate(TShPoolCreateInfo& aInfo)
541 r = DWin32ShPool::DoCreate(aInfo);
547 if (iPoolFlags & EShPoolGuardPages)
549 TUint numOfBytes = iBufGap - MM::RamPageSize;
550 iCommittedPages = MM::RoundToPageSize(iInitialBuffers * numOfBytes) >> MM::RamPageShift;
552 for (TUint i = 0; i < iInitialBuffers; ++i)
554 TUint offset = iBufGap * i;
557 if (MM::Commit(reinterpret_cast<TLinAddr>(iWin32MemoryBase+offset), numOfBytes, 0xFF, EFalse) != KErrNone)
562 iWin32MemorySize += numOfBytes;
567 iMaxPages = MM::RoundToPageSize(aInfo.iInfo.iMaxBufs * numOfBytes) >> MM::RamPageShift;
571 // Make sure we give the caller the number of buffers they were expecting
572 iCommittedPages = MM::RoundToPageSize(iInitialBuffers * iBufGap) >> MM::RamPageShift;
574 if (MM::Commit(reinterpret_cast<TLinAddr>(iWin32MemoryBase), iCommittedPages << MM::RamPageShift, 0xFF, EFalse) != KErrNone)
579 iWin32MemorySize = iCommittedPages << MM::RamPageShift;
583 iMaxPages = MM::RoundToPageSize(aInfo.iInfo.iMaxBufs * iBufGap) >> MM::RamPageShift;
590 TInt DWin32AlignedShPool::SetBufferWindow(DProcess* /*aProcess*/, TInt /*aWindowSize*/ )
596 TInt DWin32AlignedShPool::GrowPool()
598 __KTRACE_OPT(KMMU, Kern::Printf(">DWin32AlignedShPool::GrowPool()"));
600 Kern::MutexWait(*iProcessLock);
602 // How many bytes to commit for each new buffer (must be whole number of pages)
603 TUint bytes = (iPoolFlags & EShPoolGuardPages) ? iBufGap - MM::RamPageSize : iBufGap;
605 __ASSERT_DEBUG(!(bytes % MM::RamPageSize), Kern::PanicCurrentThread(KLitDWin32AlignedShPool, __LINE__));
607 TInt pages = bytes >> MM::RamPageShift;
609 TUint32 headroom = iMaxBuffers - iTotalBuffers;
611 // How many buffers to grow by?
612 TUint32 grow = mult_fx248(iTotalBuffers, iGrowByRatio);
613 if (grow == 0) // Handle round-to-zero
622 for (i = 0; i < grow; ++i)
624 TInt offset = iBufMap->Alloc();
635 if (MM::Commit(reinterpret_cast<TLinAddr>(iWin32MemoryBase+offset), bytes, 0xFF, EFalse) != KErrNone)
639 iWin32MemorySize += bytes;
644 iBufMap->Free(offset / iBufGap);
648 DWin32ShBuf *buf = new DWin32ShBuf(this, offset);
653 MM::Decommit(reinterpret_cast<TLinAddr>(iWin32MemoryBase+offset), bytes);
654 iWin32MemorySize -= bytes;
656 iBufMap->Free(offset / iBufGap);
661 TInt r = buf->Construct();
666 MM::Decommit(reinterpret_cast<TLinAddr>(iWin32MemoryBase+offset), bytes);
667 iWin32MemorySize -= bytes;
669 iBufMap->Free(offset / iBufGap);
670 buf->DObject::Close(NULL);
674 iCommittedPages += pages;
676 temp.Add(&buf->iObjLink);
679 r = UpdateReservedHandles(i);
684 iFreeList.MoveFrom(&temp);
691 // else delete buffers
693 while ((pLink = temp.GetFirst()) != NULL)
695 DShBuf* buf = _LOFF(pLink, DShBuf, iObjLink);
696 TLinAddr offset = buf->iRelAddress;
697 iBufMap->Free(offset / iBufGap);
699 MM::Decommit(reinterpret_cast<TLinAddr>(iWin32MemoryBase+offset), bytes);
700 iWin32MemorySize -= bytes;
702 iCommittedPages -= pages;
703 buf->DObject::Close(NULL);
707 CalculateGrowShrinkTriggers();
709 Kern::MutexSignal(*iProcessLock);
711 __KTRACE_OPT(KMMU, Kern::Printf("<DWin32AlignedShPool::GrowPool()"));
713 } // DWin32AlignedShPool::GrowPool
716 TInt DWin32AlignedShPool::ShrinkPool()
718 __KTRACE_OPT(KMMU, Kern::Printf(">DWin32AlignedShPool::ShrinkPool()"));
720 Kern::MutexWait(*iProcessLock);
722 // How many bytes to commit for each new buffer (must be whole number of pages)
723 TUint bytes = (iPoolFlags & EShPoolGuardPages) ? iBufGap - MM::RamPageSize : iBufGap;
725 __ASSERT_DEBUG(!(bytes % MM::RamPageSize), Kern::PanicCurrentThread(KLitDWin32AlignedShPool, __LINE__));
727 TInt pages = bytes >> MM::RamPageShift;
730 TUint32 grownBy = iTotalBuffers - iInitialBuffers;
732 // How many buffers to shrink by?
733 TUint32 shrink = mult_fx248(iTotalBuffers, iShrinkByRatio);
734 if (shrink == 0) // Handle round-to-zero
736 if (shrink > grownBy)
738 if (shrink > iFreeBuffers)
739 shrink = iFreeBuffers;
743 for (i = 0; i < shrink; ++i)
746 if (iFreeList.IsEmpty())
751 // work from the back of the queue
752 SDblQueLink *pLink = iFreeList.Last();
754 DShBuf* pBuf = _LOFF(pLink, DShBuf, iObjLink);
756 if (pBuf >= iInitialBuffersArray && pBuf < (iInitialBuffersArray + iInitialBuffers))
765 iCommittedPages -= pages;
768 TLinAddr offset = pBuf->iRelAddress;
770 iBufMap->Free(offset / iBufGap);
773 MM::Decommit(reinterpret_cast<TLinAddr>(iWin32MemoryBase+offset), iBufSize);
774 iWin32MemorySize -= iBufSize;
776 pBuf->DObject::Close(NULL);
779 TInt r = UpdateReservedHandles(-(TInt)i);
781 // If we couldn't shrink the pool by this many buffers, wait until we Free() another
782 // buffer before trying to shrink again.
784 iPoolFlags |= EShPoolSuppressShrink;
786 CalculateGrowShrinkTriggers();
788 Kern::MutexSignal(*iProcessLock);
790 __KTRACE_OPT(KMMU, Kern::Printf("<DWin32AlignedShPool::ShrinkPool()"));
792 } // DWin32AlignedShPool::ShrinkPool
795 DWin32NonAlignedShPool::DWin32NonAlignedShPool()
798 __KTRACE_OPT(KMMU, Kern::Printf(">DWin32NonAlignedShPool::DWin32NonAlignedShPool"));
802 DWin32NonAlignedShPool::~DWin32NonAlignedShPool()
804 __KTRACE_OPT(KMMU, Kern::Printf(">DWin32NonAlignedShPool::~DWin32NonAlignedShPool"));
810 TInt DWin32NonAlignedShPool::DoCreate(TShPoolCreateInfo& aInfo)
815 r = DWin32ShPool::DoCreate(aInfo);
822 if (iPoolFlags & EShPoolPhysicalMemoryPool)
824 return KErrNotSupported;
828 // Make sure we give the caller the number of buffers they were expecting
829 iCommittedPages = MM::RoundToPageSize(iInitialBuffers * iBufGap) >> MM::RamPageShift;
832 if (MM::Commit(reinterpret_cast<TLinAddr>(iWin32MemoryBase), iCommittedPages << MM::RamPageShift, 0xFF, EFalse) != KErrNone)
837 iWin32MemorySize = iCommittedPages << MM::RamPageShift;
840 iMaxPages = MM::RoundToPageSize(aInfo.iInfo.iMaxBufs * iBufGap) >> MM::RamPageShift;
843 iPagesMap = TBitMapAllocator::New(iMaxPages, (TBool)ETrue);
850 iPagesMap->Alloc(0, iCommittedPages);
855 void DWin32NonAlignedShPool::FreeBufferPages(TUint aOffset)
857 TLinAddr firstByte = aOffset; // offset of first byte in buffer
858 TLinAddr lastByte = firstByte+iBufGap-1; // offset of last byte in buffer
859 TUint firstPage = firstByte>>MM::RamPageShift; // index of first page containing part of the buffer
860 TUint lastPage = lastByte>>MM::RamPageShift; // index of last page containing part of the buffer
862 TUint firstBuffer = (firstByte&~(MM::RamPageSize - 1))/iBufGap; // index of first buffer which lies in firstPage
863 TUint lastBuffer = (lastByte|(MM::RamPageSize - 1))/iBufGap; // index of last buffer which lies in lastPage
864 TUint thisBuffer = firstByte/iBufGap; // index of the buffer to be freed
866 // Ensure lastBuffer is within bounds (there may be room in the last
867 // page for more buffers than we have allocated).
868 if (lastBuffer >= iMaxBuffers)
869 lastBuffer = iMaxBuffers-1;
871 if(firstBuffer!=thisBuffer && iBufMap->NotFree(firstBuffer,thisBuffer-firstBuffer))
873 // first page has other allocated buffers in it,
874 // so we can't free it and must move on to next one...
875 if (firstPage >= lastPage)
880 if(lastBuffer!=thisBuffer && iBufMap->NotFree(thisBuffer+1,lastBuffer-thisBuffer))
882 // last page has other allocated buffers in it,
883 // so we can't free it and must step back to previous one...
884 if (lastPage <= firstPage)
889 if(firstPage<=lastPage)
891 // we can free pages firstPage trough to lastPage...
892 TUint numPages = lastPage-firstPage+1;
893 iPagesMap->SelectiveFree(firstPage,numPages);
895 MM::Decommit(reinterpret_cast<TLinAddr>(iWin32MemoryBase+(firstPage << MM::RamPageShift)), (numPages << MM::RamPageShift));
896 iWin32MemorySize -= (numPages << MM::RamPageShift);
898 iCommittedPages -= numPages;
903 TInt DWin32NonAlignedShPool::GrowPool()
905 __KTRACE_OPT(KMMU, Kern::Printf(">DWin32NonAlignedShPool::GrowPool()"));
907 Kern::MutexWait(*iProcessLock);
909 TUint32 headroom = iMaxBuffers - iTotalBuffers;
911 // How many buffers to grow by?
912 TUint32 grow = mult_fx248(iTotalBuffers, iGrowByRatio);
913 if (grow == 0) // Handle round-to-zero
922 for (i = 0; i < grow; ++i)
924 TInt offset = iBufMap->Alloc();
934 TInt lastPage = (offset + iBufSize - 1) >> MM::RamPageShift;
936 // Allocate one page at a time.
937 for (TInt page = offset >> MM::RamPageShift; page <= lastPage; ++page)
939 // Is the page allocated?
940 if (iPagesMap->NotAllocated(page, 1))
943 if (MM::Commit(reinterpret_cast<TLinAddr>(iWin32MemoryBase+(page << MM::RamPageShift)), MM::RamPageSize, 0xFF, EFalse) != KErrNone)
949 iWin32MemorySize += MM::RamPageSize;
953 iPagesMap->Alloc(page, 1);
959 iBufMap->Free(offset / iBufGap);
960 FreeBufferPages(offset);
964 DWin32ShBuf *buf = new DWin32ShBuf(this, offset);
968 iBufMap->Free(offset / iBufGap);
969 FreeBufferPages(offset);
974 r = buf->Construct();
978 iBufMap->Free(offset / iBufGap);
979 FreeBufferPages(offset);
980 buf->DObject::Close(NULL);
984 temp.Add(&buf->iObjLink);
987 r = UpdateReservedHandles(i);
992 iFreeList.MoveFrom(&temp);
999 // couldn't reserve handles so have no choice but to
1000 // delete the buffers
1001 __KTRACE_OPT(KMMU, Kern::Printf("GrowPool failed with %d, deleting buffers", r));
1003 while ((pLink = temp.GetFirst()) != NULL)
1005 DShBuf* buf = _LOFF(pLink, DShBuf, iObjLink);
1006 TLinAddr offset = buf->iRelAddress;
1007 iBufMap->Free(offset / iBufGap);
1008 FreeBufferPages(offset);
1009 buf->DObject::Close(NULL);
1011 __KTRACE_OPT(KMMU, Kern::Printf("Buffers deleted"));
1014 CalculateGrowShrinkTriggers();
1016 Kern::MutexSignal(*iProcessLock);
1018 __KTRACE_OPT(KMMU, Kern::Printf("<DWin32NonAlignedShPool::GrowPool()"));
1020 } // DWin32NonAlignedShPool::GrowPool
1023 TInt DWin32NonAlignedShPool::ShrinkPool()
1025 __KTRACE_OPT(KMMU, Kern::Printf(">DWin32NonAlignedShPool::ShrinkPool()"));
1027 Kern::MutexWait(*iProcessLock);
1030 TUint32 grownBy = iTotalBuffers - iInitialBuffers;
1032 // How many buffers to shrink by?
1033 TUint32 shrink = mult_fx248(iTotalBuffers, iShrinkByRatio);
1034 if (shrink == 0) // Handle round-to-zero
1036 if (shrink > grownBy)
1038 if (shrink > iFreeBuffers)
1039 shrink = iFreeBuffers;
1042 for (i = 0; i < shrink; ++i)
1045 if (iFreeList.IsEmpty())
1050 // work from the back of the queue
1051 SDblQueLink *pLink = iFreeList.Last();
1053 DShBuf* pBuf = _LOFF(pLink, DShBuf, iObjLink);
1055 if (pBuf >= iInitialBuffersArray && pBuf < (iInitialBuffersArray + iInitialBuffers))
1066 TLinAddr offset = pBuf->iRelAddress;
1068 iBufMap->Free(offset / iBufGap);
1069 FreeBufferPages(offset);
1070 pBuf->DObject::Close(NULL);
1073 UpdateReservedHandles(-(TInt)i);
1075 // If we couldn't shrink the pool by this many buffers, wait until we Free() another
1076 // buffer before trying to shrink again.
1078 iPoolFlags |= EShPoolSuppressShrink;
1080 CalculateGrowShrinkTriggers();
1082 Kern::MutexSignal(*iProcessLock);
1084 __KTRACE_OPT(KMMU, Kern::Printf("<DWin32NonAlignedShPool::ShrinkPool()"));
1087 } // DWin32NonAlignedShPool::ShrinkPool