Update contrib.
1 // Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32/memmodel/epoc/flexible/mshbuf.cpp
15 // Shareable Data Buffers
20 #include <kernel/smap.h>
22 _LIT(KLitDMemModelAlignedShPool,"DMMAlignedShPool"); // Must be no more than 16 characters!
26 void Link(TWait*& aList)
39 static void SignalAll(TWait* aList)
43 TWait* next = aList->iNext;
44 NKern::FSSignal(&aList->iSem);
51 class DShBufMapping : public DBase
55 DMemoryMapping* iMapping;
57 TWait* iTransitions; // Mapping and Unmapping operations
62 DMemModelShPool::DMemModelShPool() : DShPool()
64 __KTRACE_OPT(KMMU, Kern::Printf(">DMemModelShPool::DMemModelShPool"));
67 DMemModelShPool::~DMemModelShPool()
69 __KTRACE_OPT(KMMU, Kern::Printf(">DMemModelShPool::~DMemModelShPool"));
72 void DMemModelShPool::DestroyClientResources(DProcess* aProcess)
74 __KTRACE_OPT(KMMU, Kern::Printf(">DMemModelShPool::DestroyClientResources"));
76 TInt r = DestroyAllMappingsAndReservedHandles(aProcess);
77 __NK_ASSERT_DEBUG((r == KErrNone) || (r == KErrDied));
78 (void)r; // Silence warnings
81 DMemModelAlignedShBuf::DMemModelAlignedShBuf(DShPool* aPool) : DShBuf(aPool)
83 __KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShBuf::DMemModelAlignedShBuf()"));
86 TInt DMemModelAlignedShBuf::Construct()
88 __KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShBuf::Construct()"));
92 r = DShBuf::Construct();
100 TInt DMemModelAlignedShBuf::Close(TAny* aPtr)
102 __KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShBuf::Close(0x%08x)", aPtr));
106 DProcess* pP = reinterpret_cast<DProcess*>(aPtr);
108 iPool->CloseClient(pP);
111 return DShBuf::Close(aPtr);
114 TInt DMemModelAlignedShBuf::AddToProcess(DProcess* aProcess, TUint aAttr)
116 __KTRACE_OPT(KMMU,Kern::Printf("Adding DMemModelShBuf %O to process %O",this,aProcess));
121 r = iPool->OpenClient(aProcess, flags);
125 if ((flags & EShPoolAutoMapBuf) && ((aAttr & EShPoolNoMapBuf) == 0))
127 // note we use the client's pool flags and not the buffer attributes
128 r = Map(flags, aProcess, base);
130 if (aProcess == K::TheKernelProcess)
131 iRelAddress = static_cast<TLinAddr>(base);
138 TInt DMemModelAlignedShBuf::Create()
140 __KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShBuf::Create()"));
143 // calculate memory type...
144 TMemoryObjectType memoryType = EMemoryObjectUnpaged;
146 TMemoryAttributes attr = EMemoryAttributeStandard;
148 // calculate memory flags...
149 TMemoryCreateFlags flags = static_cast<TMemoryCreateFlags>((EMemoryCreateDefault|EMemoryCreateUseCustomWipeByte|(0xAA<<EMemoryCreateWipeByteShift)));
151 // note that any guard pages will be included in iBufGap, however the amount of memory committed
152 // will be iBufSize rounded up to a page
153 r = MM::MemoryNew(iMemoryObject, memoryType, MM::RoundToPageCount(iPool->iBufGap), flags, attr);
158 if (iPool->iPoolFlags & EShPoolContiguous)
161 r = MM::MemoryAllocContiguous(iMemoryObject, 0, MM::RoundToPageCount(iPool->iBufSize), 0, paddr);
165 r = MM::MemoryAlloc(iMemoryObject, 0, MM::RoundToPageCount(iPool->iBufSize));
171 DMemModelAlignedShBuf::~DMemModelAlignedShBuf()
173 __KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShBuf::~DMemModelAlignedShBuf()"));
175 __NK_ASSERT_DEBUG(iMappings.IsEmpty());
177 MM::MemoryDestroy(iMemoryObject);
180 TInt DMemModelAlignedShBuf::Map(TUint aMapAttr, DProcess* aProcess, TLinAddr& aBase)
182 __KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShBuf::Map()"));
185 DShBufMapping* m = NULL;
186 DMemoryMapping* mapping = NULL;
187 DMemModelProcess* pP = reinterpret_cast<DMemModelProcess*>(aProcess);
189 TBool write = (TBool)EFalse;
191 // User = ETrue, ReadOnlyWrite = ETrue, Execute = EFalse
192 if (aMapAttr & EShPoolWriteable)
193 write = (TBool)ETrue;
195 TMappingPermissions perm = MM::MappingPermissions(pP!=K::TheKernelProcess, write, (TBool)EFalse);
201 r = FindMapping(m, pP);
206 if (m->iTransitioning)
208 wait.Link(m->iTransitions);
215 return KErrAlreadyExists;
219 DMemModelAlignedShPoolClient* client = reinterpret_cast<DMemModelAlignedShPoolClient*>(iPool->iClientMap->Find(reinterpret_cast<TUint>(aProcess)));
221 __NK_ASSERT_DEBUG(client);
223 DMemModelAlignedShPool* pool = reinterpret_cast<DMemModelAlignedShPool*>(iPool);
225 __NK_ASSERT_DEBUG(m == NULL);
226 r = pool->GetFreeMapping(m, client);
230 iMappings.AddHead(&m->iObjLink);
231 m->iTransitioning = ETrue;
233 mapping = m->iMapping;
234 iPool->UnlockPool(); // have to release fast lock for MappingMap
236 r = MM::MappingMap(mapping, perm, iMemoryObject, 0, MM::RoundToPageCount(pool->iBufSize));
240 TWait* list = m->iTransitions;
241 m->iTransitions = NULL;
244 pool->ReleaseMapping(m, client);
246 aBase = MM::MappingBase(mapping);
248 m->iTransitioning = EFalse;
251 TWait::SignalAll(list);
259 TInt DMemModelAlignedShBuf::FindMapping(DShBufMapping*& aMapping, DMemModelProcess* aProcess)
261 // Must be in critical section so we don't leak os asid references.
263 __NK_ASSERT_DEBUG(iPool->iLock.HeldByCurrentThread());
265 TInt r = KErrNotFound;
268 // Open a reference on aProcess's os asid so that it can't be freed and
269 // reused while searching.
270 TInt osAsid = aProcess->TryOpenOsAsid();
272 {// aProcess has died and freed its os asid.
276 SDblQueLink* pLink = iMappings.First();
277 SDblQueLink* end = reinterpret_cast<SDblQueLink*>(&iMappings);
278 DShBufMapping* m = NULL;
282 m = _LOFF(pLink, DShBufMapping, iObjLink);
284 if (m->iOsAsid == osAsid)
290 pLink = pLink->iNext;
293 // Close the reference on the os asid as if we have a mapping then its lifetime will
294 // determine whether the process still owns an os asid.
295 aProcess->CloseOsAsid();
299 TInt DMemModelAlignedShBuf::UnMap(DProcess* aProcess)
301 __KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShBuf::UnMap()"));
305 DMemModelProcess* pP = reinterpret_cast<DMemModelProcess*>(aProcess);
307 DShBufMapping* m = NULL;
313 r = FindMapping(m, pP);
321 if (m->iTransitioning)
323 wait.Link(m->iTransitions);
333 m->iTransitioning = ETrue;
336 MM::MappingUnmap(m->iMapping);
339 DMemModelAlignedShPoolClient* client = reinterpret_cast<DMemModelAlignedShPoolClient*>(iPool->iClientMap->Find(reinterpret_cast<TUint>(aProcess)));
341 __NK_ASSERT_DEBUG(client);
343 TWait* list = m->iTransitions;
344 m->iTransitions = NULL;
346 m->iTransitioning = EFalse;
348 DMemModelAlignedShPool* pool = reinterpret_cast<DMemModelAlignedShPool*>(iPool);
349 pool->ReleaseMapping(m, client);
351 if (aProcess == K::TheKernelProcess)
356 wait.SignalAll(list);
360 TUint8* DMemModelAlignedShBuf::Base(DProcess* aProcess)
362 __KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShBuf::Base()"));
363 DMemModelProcess* pP = reinterpret_cast<DMemModelProcess*>(aProcess);
365 DShBufMapping* mapping = NULL;
367 TInt r = FindMapping(mapping, pP);
371 base = reinterpret_cast<TUint8*>(MM::MappingBase(mapping->iMapping));
377 TUint8* DMemModelAlignedShBuf::Base()
379 return reinterpret_cast<TUint8*>(iRelAddress);
382 TInt DMemModelAlignedShBuf::Pin(TPhysicalPinObject* aPinObject, TBool aReadOnly, TPhysAddr& aAddress, TPhysAddr* aPages, TUint32& aMapAttr, TUint& aColour)
384 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"DMemModelAlignedShBuf::Pin");
386 TInt r = MM::PinPhysicalMemory(iMemoryObject, (DPhysicalPinMapping*)aPinObject, 0,
387 MM::RoundToPageCount(Size()),
388 aReadOnly, aAddress, aPages, aMapAttr, aColour);
393 TInt DMemModelAlignedShPool::GetFreeMapping(DShBufMapping*& aMapping, DMemModelAlignedShPoolClient* aClient)
395 __KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::GetFreeMapping()"));
396 __NK_ASSERT_DEBUG(iLock.HeldByCurrentThread());
398 TInt r = KErrNotFound;
403 if (!aClient->iMappingFreeList.IsEmpty())
405 aMapping = _LOFF(aClient->iMappingFreeList.GetFirst(), DShBufMapping, iObjLink);
414 __KTRACE_OPT(KMMU2, Kern::Printf("DMemModelAlignedShPool::GetFreeMapping(0x%08x, 0x%08x) returns %d", aMapping, aClient, r));
418 TInt DMemModelAlignedShPool::ReleaseMapping(DShBufMapping*& aMapping, DMemModelAlignedShPoolClient* aClient)
420 __KTRACE_OPT(KMMU2, Kern::Printf("DMemModelAlignedShPool::ReleaseMapping(0x%08x,0x%08x)",aMapping,aClient));
421 __NK_ASSERT_DEBUG(iLock.HeldByCurrentThread());
427 aClient->iMappingFreeList.AddHead(&aMapping->iObjLink);
432 // pool has probably been closed delete mapping
434 __KTRACE_OPT(KMMU2, Kern::Printf("DMemModelAlignedShPool::ReleaseMapping delete 0x%08x",aMapping));
435 UnlockPool(); // have to release fast lock for MappingDestroy
436 MM::MappingDestroy(aMapping->iMapping);
445 TInt DMemModelAlignedShPool::SetBufferWindow(DProcess* aProcess, TInt aWindowSize)
447 __KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::SetBufferWindow()"));
449 // Create and construct mappings but do not map
450 // also allocate reserved handles
452 TUint noOfBuffers = aWindowSize;
454 if (aWindowSize > static_cast<TInt>(iMaxBuffers))
457 Kern::MutexWait(*iProcessLock);
460 DMemModelAlignedShPoolClient* client = reinterpret_cast<DMemModelAlignedShPoolClient*>(iClientMap->Find(reinterpret_cast<TUint>(aProcess)));
465 if (client->iWindowSize != 0)
467 Kern::MutexSignal(*iProcessLock);
468 return KErrAlreadyExists;
473 noOfBuffers = iTotalBuffers;
476 DMemModelProcess* pP = reinterpret_cast<DMemModelProcess*>(aProcess);
477 r = CreateMappings(client, noOfBuffers, pP);
481 client->iWindowSize = aWindowSize;
485 DestroyMappings(client, noOfBuffers);
493 Kern::MutexSignal(*iProcessLock);
498 TInt DMemModelAlignedShPool::MappingNew(DShBufMapping*& aMapping, DMemModelProcess* aProcess)
500 // Must be in critical section so we don't leak os asid references.
503 TMappingCreateFlags flags=EMappingCreateDefault;
505 FlagSet(flags, EMappingCreateReserveAllResources);
507 // Open a reference to aProcess's os so it isn't freed and reused while
508 // we're creating this mapping.
509 TInt osAsid = aProcess->TryOpenOsAsid();
511 {// The process has freed its os asid so can't create a new mapping.
515 DMemoryMapping* mapping = NULL;
516 DShBufMapping* m = NULL;
517 TInt r = MM::MappingNew(mapping, MM::RoundToPageCount(iBufGap), osAsid, flags);
521 m = new DShBufMapping;
525 m->iMapping = mapping;
530 MM::MappingDestroy(mapping);
535 // Close the reference on the os asid as while aMapping is valid then the
536 // os asid must be also.
537 aProcess->CloseOsAsid();
540 __KTRACE_OPT(KMMU2, Kern::Printf("DMemModelAlignedShPool::MappingNew returns 0x%08x,%d",aMapping,r));
544 TInt DMemModelAlignedShPool::AddToProcess(DProcess* aProcess, TUint aAttr)
546 __KTRACE_OPT(KMMU,Kern::Printf("Adding DMemModelAlignedShPool %O to process %O",this,aProcess));
549 Kern::MutexWait(*iProcessLock);
552 DShPoolClient* client = reinterpret_cast<DShPoolClient*>(iClientMap->Find(reinterpret_cast<TUint>(aProcess)));
557 client = new DMemModelAlignedShPoolClient;
560 client->iFlags = aAttr;
561 r = iClientMap->Add(reinterpret_cast<TUint>(aProcess), client);
565 if (aProcess != K::TheKernelProcess)
567 r = aProcess->iHandles.Reserve(iTotalBuffers);
571 iClientMap->Remove(reinterpret_cast<TUint>(aProcess));
589 client->iAccessCount++;
593 Kern::MutexSignal(*iProcessLock);
598 DMemModelAlignedShPool::DMemModelAlignedShPool() : DMemModelShPool()
601 __KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::DMemModelAlignedShPool"));
604 void DMemModelAlignedShPool::Free(DShBuf* aBuf)
606 __KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::Free (aBuf = 0x%08x)", aBuf));
610 // Remove from allocated list
611 aBuf->iObjLink.Deque();
614 DMemModelAlignedShBuf* buf = reinterpret_cast<DMemModelAlignedShBuf*>(aBuf);
616 if (MM::MemoryIsNotMapped(buf->iMemoryObject))
618 UnlockPool(); // have to release fast mutex
619 MM::MemoryWipe(buf->iMemoryObject);
622 // we want to put the initial buffers at the head of the free list
623 // and the grown buffers at the tail as this makes shrinking more efficient
624 if (aBuf >= iInitialBuffersArray && aBuf < (iInitialBuffersArray + iInitialBuffers))
626 iFreeList.AddHead(&aBuf->iObjLink);
630 iFreeList.Add(&aBuf->iObjLink);
639 iPendingList.Add(&aBuf->iObjLink);
642 iPoolFlags &= ~EShPoolSuppressShrink; // Allow shrinking again, if it was blocked
645 // queue ManagementDfc which completes notifications as appropriate
649 DShPool::Close(NULL); // decrement pool reference count
652 TInt DMemModelAlignedShPool::UpdateFreeList()
654 __KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::UpdateFreeList"));
657 SDblQueLink* pLink = iPendingList.First();
660 SDblQueLink* anchor = &iPendingList.iA;
662 while (pLink != anchor)
664 DMemModelAlignedShBuf* buf = _LOFF(pLink, DMemModelAlignedShBuf, iObjLink);
666 pLink = pLink->iNext;
669 if (MM::MemoryIsNotMapped(buf->iMemoryObject))
672 buf->iObjLink.Deque();
675 MM::MemoryWipe(buf->iMemoryObject);
678 if (buf >= iInitialBuffersArray && buf < (iInitialBuffersArray + iInitialBuffers))
680 iFreeList.AddHead(&buf->iObjLink);
684 iFreeList.Add(&buf->iObjLink);
694 __KTRACE_OPT(KMMU, Kern::Printf("<DMemModelAlignedShPool::UpdateFreeList"));
698 DMemModelAlignedShPool::~DMemModelAlignedShPool()
700 __KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::~DMemModelAlignedShPool"));
703 TInt DMemModelAlignedShPool::DoCreate(TShPoolCreateInfo& aInfo)
706 TUint64 maxSize64 = static_cast<TUint64>(aInfo.iInfo.iMaxBufs) * static_cast<TUint64>(iBufGap);
708 if (maxSize64 > static_cast<TUint64>(KMaxTInt) || maxSize64 <= static_cast<TUint64>(0))
711 iMaxPages = MM::RoundToPageCount(static_cast<TInt>(maxSize64));
716 TInt DMemModelAlignedShPool::DestroyAllMappingsAndReservedHandles(DProcess* aProcess)
718 __KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::DestroyAllMappingsAndReservedHandles(0x%08x)", aProcess));
721 Kern::MutexWait(*iProcessLock);
722 DMemModelAlignedShPoolClient* client = reinterpret_cast<DMemModelAlignedShPoolClient*>(iClientMap->Remove(reinterpret_cast<TUint>(aProcess)));
724 __NK_ASSERT_DEBUG(client);
725 __NK_ASSERT_DEBUG(client->iAccessCount == 0);
727 DestroyMappings(client, KMaxTInt);
730 if (aProcess != K::TheKernelProcess)
732 // Remove reserved handles
733 r = aProcess->iHandles.Reserve(-iTotalBuffers);
736 Kern::MutexSignal(*iProcessLock);
738 __KTRACE_OPT(KMMU, Kern::Printf("<DMemModelAlignedShPool::DestroyAllMappingsAndReservedHandles(0x%08x)", aProcess));
743 TInt DMemModelAlignedShPool::DestroyMappings(DMemModelAlignedShPoolClient* aClient, TInt aNoOfMappings)
745 __KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::DestroyMappings(0x%08x)", aClient));
750 DShBufMapping* m = NULL;
751 SDblQueLink* pLink = NULL;
753 while (i < aNoOfMappings && !aClient->iMappingFreeList.IsEmpty())
756 pLink = aClient->iMappingFreeList.GetFirst();
762 m = _LOFF(pLink, DShBufMapping, iObjLink);
763 __KTRACE_OPT(KMMU2, Kern::Printf("DMemModelAlignedShPool::DestroyMappings delete 0x%08x",m));
764 MM::MappingClose(m->iMapping);
769 __KTRACE_OPT(KMMU, Kern::Printf("<DMemModelAlignedShPool::DestroyMappings"));
775 TInt DMemModelAlignedShPool::CreateMappings(DMemModelAlignedShPoolClient* aClient, TInt aNoOfMappings, DMemModelProcess* aProcess)
777 __KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::CreateMappings"));
779 __ASSERT_MUTEX(iProcessLock);
783 for (TInt i = 0; i < aNoOfMappings; ++i)
785 DShBufMapping* mapping;
786 r = MappingNew(mapping, aProcess);
790 aClient->iMappingFreeList.AddHead(&mapping->iObjLink);
803 TInt DMemModelAlignedShPool::UpdateMappingsAndReservedHandles(TInt aNoOfBuffers)
805 __KTRACE_OPT(KMMU2, Kern::Printf(">DMemModelAlignedShPool::UpdateMappingsAndReservedHandles(0x%08x)", aNoOfBuffers));
807 SMap::TIterator iter(*iClientMap);
809 SMap::TEntry* lastEntry = NULL;
810 DMemModelProcess* pP;
811 DMemModelAlignedShPoolClient* client;
812 TInt result = KErrNone;
814 Kern::MutexWait(*iProcessLock);
816 // First handle the case of increasing allocation
817 if (aNoOfBuffers > 0)
818 while ((entry = iter.Next()) != lastEntry)
820 // Try to update handle reservation; skip if process is null or has gone away
821 client = (DMemModelAlignedShPoolClient*)(entry->iObj);
822 pP = (DMemModelProcess*)(entry->iKey);
825 TInt r = pP->iHandles.Reserve(aNoOfBuffers);
827 __KTRACE_OPT(KMMU2, Kern::Printf("?DMemModelAlignedShPool::UpdateMappingsAndReservedHandles(0x%08x) Reserve failed %d", aNoOfBuffers, r));
831 if (r == KErrNone && client->iWindowSize <= 0)
833 // A positive window size means the number of mappings is fixed, so we don't need to reserve more.
834 // But here zero or negative means a variable number, so we need to create extra mappings now.
835 r = CreateMappings(client, aNoOfBuffers, pP);
838 __KTRACE_OPT(KMMU2, Kern::Printf("?DMemModelAlignedShPool::UpdateMappingsAndReservedHandles(0x%08x) CreateMappings failed %d", aNoOfBuffers, r));
839 pP->iHandles.Reserve(-aNoOfBuffers); // Creation failed, so release the handles reserved above
845 // Some problem; cleanup as best we can by falling into the loop below to undo what we've done
849 aNoOfBuffers = -aNoOfBuffers;
854 // Now handle the case of decreasing allocation; also used for recovery from errors, in which case
855 // this loop iterates only over the elements that were *successfully* processed by the loop above
856 if (aNoOfBuffers < 0)
857 while ((entry = iter.Next()) != lastEntry)
859 // Try to update handle reservation; skip if process is null or has gone away
860 client = (DMemModelAlignedShPoolClient*)(entry->iObj);
861 pP = (DMemModelProcess*)(entry->iKey);
864 TInt r = pP->iHandles.Reserve(aNoOfBuffers);
868 if (r == KErrNone && client->iWindowSize <= 0)
869 r = DestroyMappings(client, -aNoOfBuffers);
870 // De-allocation by Reserve(-n) and/or DestroyMappings() should never fail
872 Kern::PanicCurrentThread(KLitDMemModelAlignedShPool, r);
875 Kern::MutexSignal(*iProcessLock);
877 __KTRACE_OPT(KMMU2, Kern::Printf("<DMemModelAlignedShPool::UpdateMappingsAndReservedHandles(0x%08x) returning %d", aNoOfBuffers, result));
881 TInt DMemModelAlignedShPool::DeleteInitialBuffers()
883 __KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::DeleteInitialBuffers"));
885 if (iInitialBuffersArray != NULL)
887 for (TUint i = 0; i < iInitialBuffers; i++)
889 iInitialBuffersArray[i].iObjLink.Deque(); // remove from free list
890 iInitialBuffersArray[i].Dec();
891 iInitialBuffersArray[i].~DMemModelAlignedShBuf();
895 Kern::Free(iInitialBuffersArray);
896 iInitialBuffersArray = NULL;
901 TInt DMemModelAlignedShPool::Close(TAny* aPtr)
903 __KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::Close(0x%08x)", aPtr));
907 DProcess* pP = reinterpret_cast<DProcess*>(aPtr);
911 __KTRACE_OPT(KMMU, Kern::Printf("<DMemModelAlignedShPool::Close(0x%08x)", aPtr));
912 return DShPool::Close(aPtr);
915 TInt DMemModelAlignedShPool::CreateInitialBuffers()
917 __KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::CreateInitialBuffers"));
919 iInitialBuffersArray = reinterpret_cast<DMemModelAlignedShBuf*>(Kern::Alloc(iInitialBuffers * sizeof(DMemModelAlignedShBuf)));
921 if (iInitialBuffersArray == NULL)
924 for (TUint i = 0; i < iInitialBuffers; i++)
926 // always use kernel linear address in DShBuf
927 DMemModelAlignedShBuf *buf = new (&iInitialBuffersArray[i]) DMemModelAlignedShBuf(this);
928 TInt r = buf->Construct();
932 iFreeList.Add(&buf->iObjLink);
941 iFreeBuffers = iInitialBuffers;
942 iTotalBuffers = iInitialBuffers;
947 TInt DMemModelAlignedShPool::GrowPool()
949 __KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::GrowPool()"));
953 Kern::MutexWait(*iProcessLock);
955 TUint32 headroom = iMaxBuffers - iTotalBuffers;
957 // How many buffers to grow by?
958 TUint32 grow = mult_fx248(iTotalBuffers, iGrowByRatio);
959 if (grow == 0) // Handle round-to-zero
965 for (i = 0; i < grow; ++i)
967 DMemModelAlignedShBuf *buf = new DMemModelAlignedShBuf(this);
975 TInt r = buf->Construct();
979 buf->DObject::Close(NULL);
983 temp.Add(&buf->iObjLink);
986 r = UpdateMappingsAndReservedHandles(i);
991 iFreeList.MoveFrom(&temp);
998 // couldn't create either the mappings or reserve handles so have no choice but to
999 // delete the buffers
1001 while ((pLink = temp.GetFirst()) != NULL)
1003 DShBuf* buf = _LOFF(pLink, DShBuf, iObjLink);
1004 buf->DObject::Close(NULL);
1008 CalculateGrowShrinkTriggers();
1010 Kern::MutexSignal(*iProcessLock);
1012 __KTRACE_OPT(KMMU, Kern::Printf("<DMemModelAlignedShPool::GrowPool()"));
1016 TInt DMemModelAlignedShPool::ShrinkPool()
1018 __KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::ShrinkPool()"))
1020 Kern::MutexWait(*iProcessLock);
1022 TUint32 grownBy = iTotalBuffers - iInitialBuffers;
1024 // How many buffers to shrink by?
1025 TUint32 shrink = mult_fx248(iTotalBuffers, iShrinkByRatio);
1026 if (shrink == 0) // Handle round-to-zero
1028 if (shrink > grownBy)
1030 if (shrink > iFreeBuffers)
1031 shrink = iFreeBuffers;
1033 // work backwards as the grown buffers should be at the back
1035 for (i = 0; i < shrink; i++)
1039 if (iFreeList.IsEmpty())
1045 DShBuf* buf = _LOFF(iFreeList.Last(), DShBuf, iObjLink);
1047 // can't delete initial buffers
1048 if (buf >= iInitialBuffersArray && buf < (iInitialBuffersArray + iInitialBuffers))
1054 buf->iObjLink.Deque();
1058 buf->DObject::Close(NULL);
1061 TInt r = UpdateMappingsAndReservedHandles(-i);
1063 // If we couldn't shrink the pool by this many buffers, wait until we Free() another
1064 // buffer before trying to shrink again.
1066 iPoolFlags |= EShPoolSuppressShrink;
1068 CalculateGrowShrinkTriggers();
1070 Kern::MutexSignal(*iProcessLock);
1072 __KTRACE_OPT(KMMU, Kern::Printf("<DMemModelAlignedShPool::ShrinkPool()"));
1077 TInt DMemModelAlignedShPool::Alloc(DShBuf*& aShBuf)
1079 __KTRACE_OPT(KMMU, Kern::Printf(">DMemModelAlignedShPool::Alloc (DShBuf)"));
1081 TInt r = KErrNoMemory;
1086 if (!iFreeList.IsEmpty())
1088 aShBuf = _LOFF(iFreeList.GetFirst(), DShBuf, iObjLink);
1091 iAllocated.Add(&aShBuf->iObjLink);
1092 iAllocatedBuffers++;
1095 Open(); // increment pool reference count
1102 KickManagementDfc();
1104 __KTRACE_OPT(KMMU, Kern::Printf("<DMemModelAlignedShPool::Alloc return buf = 0x%08x", aShBuf));
1108 DMemModelNonAlignedShBuf::DMemModelNonAlignedShBuf(DShPool* aPool, TLinAddr aRelAddr) : DShBuf(aPool, aRelAddr)
1110 __KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShBuf::DMemModelNonAlignedShBuf()"));
1113 DMemModelNonAlignedShBuf::~DMemModelNonAlignedShBuf()
1115 __KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShBuf::~DMemModelNonAlignedShBuf()"));
1118 TInt DMemModelNonAlignedShBuf::Close(TAny* aPtr)
1120 __KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShBuf::Close(0x%08x)", aPtr));
1124 DProcess* pP = reinterpret_cast<DProcess*>(aPtr);
1126 // there no per buffer resources for kernel clients for non-aligned buffers
1127 if (pP != K::TheKernelProcess)
1128 iPool->CloseClient(pP);
1131 return DShBuf::Close(aPtr);
1134 TInt DMemModelNonAlignedShBuf::AddToProcess(DProcess* aProcess, TUint /* aAttr */)
1136 __KTRACE_OPT(KMMU, Kern::Printf("Adding DMemModelShBuf %O to process %O", this, aProcess));
1139 return iPool->OpenClient(aProcess, flags);
1143 TUint8* DMemModelNonAlignedShBuf::Base(DProcess* aProcess)
1145 __KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShBuf::Base(0x%x)", aProcess));
1147 TUint8* base = reinterpret_cast<DMemModelNonAlignedShPool*>(iPool)->Base(aProcess) + (TUint)iRelAddress;
1152 TUint8* DMemModelNonAlignedShBuf::Base()
1154 __KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShBuf::Base()"));
1156 TUint8* base = reinterpret_cast<DMemModelNonAlignedShPool*>(iPool)->Base();
1158 return base ? base + iRelAddress : NULL;
1161 TInt DMemModelNonAlignedShBuf::Map(TUint /* aMapAttr */, DProcess* /* aProcess */, TLinAddr& /* aBase */)
1163 __KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShBuf::Map()"));
1165 return KErrNotSupported;
1168 TInt DMemModelNonAlignedShBuf::UnMap(DProcess* /* aProcess */)
1170 __KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShBuf::UnMap()"));
1172 return KErrNotSupported;
1175 TInt DMemModelNonAlignedShBuf::Pin(TPhysicalPinObject* aPinObject, TBool aReadOnly, TPhysAddr& aAddress, TPhysAddr* aPages, TUint32& aMapAttr, TUint& aColour)
1177 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"DMemModelNonAlignedShBuf::Pin");
1179 DMemModelNonAlignedShPool* pool = reinterpret_cast<DMemModelNonAlignedShPool*>(iPool);
1181 NKern::ThreadEnterCS();
1183 TInt startPage = iRelAddress >> KPageShift;
1184 TInt lastPage = MM::RoundToPageCount(iRelAddress + Size());
1186 TInt pages = lastPage - startPage;
1188 if (!pages) pages++;
1190 TInt r = MM::PinPhysicalMemory(pool->iMemoryObject, (DPhysicalPinMapping*)aPinObject,
1191 startPage, pages, aReadOnly, aAddress, aPages, aMapAttr, aColour);
1193 // adjust physical address to start of the buffer
1196 aAddress += (iRelAddress - (startPage << KPageShift));
1198 NKern::ThreadLeaveCS();
1202 DMemModelNonAlignedShPool::DMemModelNonAlignedShPool() : DMemModelShPool()
1204 __KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShPool::DMemModelNonAlignedShPool"));
1207 DMemModelNonAlignedShPool::~DMemModelNonAlignedShPool()
1209 __KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShPool::~DMemModelNonAlignedShPool"));
1211 MM::MemoryDestroy(iMemoryObject);
1217 TInt DMemModelNonAlignedShPool::DoCreate(TShPoolCreateInfo& aInfo)
1219 __KTRACE_OPT(KMMU, Kern::Printf("DMemModelNonAlignedShPool::DoCreate(%d, %d, %d)", aInfo.iInfo.iMaxBufs, iBufGap, iBufSize));
1222 TUint64 maxSize64 = static_cast<TUint64>(aInfo.iInfo.iMaxBufs) * static_cast<TUint64>(iBufGap);
1224 if (maxSize64 > static_cast<TUint64>(KMaxTInt) || maxSize64 <= static_cast<TUint64>(0))
1225 return KErrArgument;
1227 TInt maxPages = MM::RoundToPageCount(static_cast<TInt>(maxSize64));
1229 iBufMap = TBitMapAllocator::New(aInfo.iInfo.iMaxBufs, (TBool)ETrue);
1230 if (iBufMap == NULL)
1231 return KErrNoMemory;
1233 iPagesMap = TBitMapAllocator::New(maxPages, (TBool)ETrue);
1234 if (iPagesMap == NULL)
1235 return KErrNoMemory;
1237 // Memory attributes
1238 TMemoryAttributes attr = EMemoryAttributeStandard;
1241 TMemoryObjectType memoryType = (iPoolFlags & EShPoolPhysicalMemoryPool) ? EMemoryObjectHardware : EMemoryObjectUnpaged;
1244 TMemoryCreateFlags memoryFlags = EMemoryCreateDefault; // Don't leave previous contents of memory
1246 // Now create the memory object
1247 r = MM::MemoryNew(iMemoryObject, memoryType, maxPages, memoryFlags, attr);
1251 // Make sure we give the caller the number of buffers they were expecting
1252 iCommittedPages = MM::RoundToPageCount(iInitialBuffers * iBufGap);
1254 if (iPoolFlags & EShPoolPhysicalMemoryPool)
1256 __KTRACE_OPT(KMMU, Kern::Printf("DMemModelNonAlignedShPool::DoCreate(iCommittedPages = 0x%08x, aInfo.iPhysAddr.iPhysAddrList = 0x%08x )", iCommittedPages, aInfo.iPhysAddr.iPhysAddrList));
1257 if (iPoolFlags & EShPoolContiguous)
1259 r = MM::MemoryAddContiguous(iMemoryObject, 0, iCommittedPages, aInfo.iPhysAddr.iPhysAddr);
1263 r = MM::MemoryAddPages(iMemoryObject, 0, iCommittedPages, aInfo.iPhysAddr.iPhysAddrList);
1266 iMaxPages = iCommittedPages;
1270 __KTRACE_OPT(KMMU, Kern::Printf("DMemModelNonAlignedShPool::DoCreate(iCommittedPages = %d, contig = %d)", iCommittedPages, iPoolFlags & EShPoolContiguous));
1272 if (iPoolFlags & EShPoolContiguous)
1275 r = MM::MemoryAllocContiguous(iMemoryObject, 0, iCommittedPages, 0, paddr);
1279 r = MM::MemoryAlloc(iMemoryObject, 0, iCommittedPages);
1282 iMaxPages = maxPages;
1285 iPagesMap->Alloc(0, iCommittedPages);
1290 TUint8* DMemModelNonAlignedShPool::Base(DProcess* aProcess)
1295 DMemModelNonAlignedShPoolClient* client = reinterpret_cast<DMemModelNonAlignedShPoolClient*>(iClientMap->Find(reinterpret_cast<TUint>(aProcess)));
1297 __NK_ASSERT_DEBUG(client); // ASSERT because pool must be already opened in the clients address space
1298 __NK_ASSERT_DEBUG(client->iMapping); // ASSERT because non-aligned buffers are mapped by default in user space
1300 base = reinterpret_cast<TUint8*>(MM::MappingBase(client->iMapping));
1307 TInt DMemModelNonAlignedShPool::CreateInitialBuffers()
1309 __KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShPool::CreateInitialBuffers"));
1311 iInitialBuffersArray = reinterpret_cast<DMemModelNonAlignedShBuf*>(Kern::Alloc(iInitialBuffers * sizeof(DMemModelNonAlignedShBuf)));
1313 if (iInitialBuffersArray == NULL)
1314 return KErrNoMemory;
1316 TLinAddr offset = 0;
1317 for (TUint i = 0; i < iInitialBuffers; i++)
1319 DMemModelNonAlignedShBuf *buf = new (&iInitialBuffersArray[i]) DMemModelNonAlignedShBuf(this, offset);
1320 TInt r = buf->Construct();
1324 iFreeList.Add(&buf->iObjLink);
1328 iInitialBuffers = i;
1329 return KErrNoMemory;
1335 iFreeBuffers = iInitialBuffers;
1336 iTotalBuffers = iInitialBuffers;
1337 iBufMap->Alloc(0, iInitialBuffers);
1342 TInt DMemModelNonAlignedShPool::AddToProcess(DProcess* aProcess, TUint aAttr)
1344 // Must be in critical section so we don't leak os asid references.
1346 __KTRACE_OPT(KMMU, Kern::Printf("Adding DMemModelShPool %O to process %O", this, aProcess));
1348 DMemoryMapping* mapping = NULL;
1350 TBool write = (TBool)EFalse;
1352 // User = ETrue, ReadOnlyWrite = ETrue, Execute = EFalse
1353 if (aAttr & EShPoolWriteable)
1354 write = (TBool)ETrue;
1356 TMappingPermissions perm = MM::MappingPermissions(ETrue, // user
1360 TMappingCreateFlags mappingFlags = EMappingCreateDefault;
1362 DMemModelProcess* pP = reinterpret_cast<DMemModelProcess*>(aProcess);
1364 Kern::MutexWait(*iProcessLock);
1368 DMemModelNonAlignedShPoolClient* client = reinterpret_cast<DMemModelNonAlignedShPoolClient*>(iClientMap->Find(reinterpret_cast<TUint>(aProcess)));
1373 client = new DMemModelNonAlignedShPoolClient;
1377 // map non aligned pools in userside processes by default
1378 if (aAttr & EShPoolAutoMapBuf || pP != K::TheKernelProcess)
1380 // Open a reference on the os asid so it doesn't get freed and reused.
1381 TInt osAsid = pP->TryOpenOsAsid();
1383 {// The process freed its os asid so can't create a new mapping.
1388 r = MM::MappingNew(mapping, iMemoryObject, perm, osAsid, mappingFlags);
1389 // Close the reference as the mapping will be destroyed if the process dies.
1393 if ((r == KErrNone) && (pP == K::TheKernelProcess))
1395 iBaseAddress = MM::MappingBase(mapping);
1401 client->iMapping = mapping;
1402 client->iFlags = aAttr;
1403 r = iClientMap->Add(reinterpret_cast<TUint>(aProcess), client);
1407 if (pP != K::TheKernelProcess)
1409 r = aProcess->iHandles.Reserve(iTotalBuffers);
1413 iClientMap->Remove(reinterpret_cast<TUint>(aProcess));
1421 MM::MappingDestroy(mapping);
1437 client->iAccessCount++;
1441 Kern::MutexSignal(*iProcessLock);
1446 TInt DMemModelNonAlignedShPool::DeleteInitialBuffers()
1448 __KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShPool::DeleteInitialBuffers"));
1450 if (iInitialBuffersArray != NULL)
1452 for (TUint i = 0; i < iInitialBuffers; i++)
1454 iInitialBuffersArray[i].iObjLink.Deque(); // remove from free list
1455 iInitialBuffersArray[i].Dec();
1456 iInitialBuffersArray[i].~DMemModelNonAlignedShBuf();
1460 Kern::Free(iInitialBuffersArray);
1461 iInitialBuffersArray = NULL;
1466 TInt DMemModelNonAlignedShPool::DestroyAllMappingsAndReservedHandles(DProcess* aProcess)
1468 __KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShPool::DestroyAllMappingsAndReservedHandles(0x%08x)", aProcess));
1471 Kern::MutexWait(*iProcessLock);
1472 DMemModelNonAlignedShPoolClient* client = reinterpret_cast<DMemModelNonAlignedShPoolClient*>(iClientMap->Remove(reinterpret_cast<TUint>(aProcess)));
1474 __NK_ASSERT_DEBUG(client);
1475 __NK_ASSERT_DEBUG(client->iAccessCount == 0);
1477 if (client->iMapping)
1479 MM::MappingDestroy(client->iMapping);
1483 if (aProcess != K::TheKernelProcess)
1485 // Remove reserved handles
1486 r = aProcess->iHandles.Reserve(-(iTotalBuffers));
1493 Kern::MutexSignal(*iProcessLock);
1495 __KTRACE_OPT(KMMU, Kern::Printf("<DMemModelNonAlignedShPool::DestroyAllMappingsAndReservedHandles(0x%08x)", aProcess));
1501 TInt DMemModelNonAlignedShPool::Close(TAny* aPtr)
1503 __KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShPool::Close(0x%08x)", aPtr));
1507 DProcess* pP = reinterpret_cast<DProcess*>(aPtr);
1512 return DShPool::Close(aPtr);
1515 void DMemModelNonAlignedShPool::FreeBufferPages(TUint aOffset)
1517 TLinAddr firstByte = aOffset; // offset of first byte in buffer
1518 TLinAddr lastByte = firstByte+iBufGap-1; // offset of last byte in buffer
1519 TUint firstPage = firstByte>>KPageShift; // index of first page containing part of the buffer
1520 TUint lastPage = lastByte>>KPageShift; // index of last page containing part of the buffer
1522 TUint firstBuffer = (firstByte&~KPageMask)/iBufGap; // index of first buffer which lies in firstPage
1523 TUint lastBuffer = (lastByte|KPageMask)/iBufGap; // index of last buffer which lies in lastPage
1524 TUint thisBuffer = firstByte/iBufGap; // index of the buffer to be freed
1526 // Ensure lastBuffer is within bounds (there may be room in the last
1527 // page for more buffers than we have allocated).
1528 if (lastBuffer >= iMaxBuffers)
1529 lastBuffer = iMaxBuffers-1;
1531 if(firstBuffer!=thisBuffer && iBufMap->NotFree(firstBuffer,thisBuffer-firstBuffer))
1533 // first page has other allocated buffers in it,
1534 // so we can't free it and must move on to next one...
1535 if (firstPage >= lastPage)
1540 if(lastBuffer!=thisBuffer && iBufMap->NotFree(thisBuffer+1,lastBuffer-thisBuffer))
1542 // last page has other allocated buffers in it,
1543 // so we can't free it and must step back to previous one...
1544 if (lastPage <= firstPage)
1549 if(firstPage<=lastPage)
1551 // we can free pages firstPage trough to lastPage...
1552 TUint numPages = lastPage-firstPage+1;
1553 iPagesMap->SelectiveFree(firstPage,numPages);
1554 MM::MemoryLock(iMemoryObject);
1555 MM::MemoryFree(iMemoryObject, firstPage, numPages);
1556 MM::MemoryUnlock(iMemoryObject);
1557 iCommittedPages -= numPages;
1561 TInt DMemModelNonAlignedShPool::GrowPool()
1563 __KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShPool::GrowPool()"));
1565 // Don't do anything with physical memory pools
1566 if (iPoolFlags & EShPoolPhysicalMemoryPool)
1569 Kern::MutexWait(*iProcessLock);
1571 TUint32 headroom = iMaxBuffers - iTotalBuffers;
1573 // How many buffers to grow by?
1574 TUint32 grow = mult_fx248(iTotalBuffers, iGrowByRatio);
1575 if (grow == 0) // Handle round-to-zero
1577 if (grow > headroom)
1584 for (i = 0; i < grow; ++i)
1586 TInt offset = iBufMap->Alloc();
1596 TInt lastPage = (offset + iBufSize - 1) >> KPageShift;
1598 // Allocate one page at a time.
1599 for (TInt page = offset >> KPageShift; page <= lastPage; ++page)
1601 // Is the page allocated?
1602 if (iPagesMap->NotAllocated(page, 1))
1604 MM::MemoryLock(iMemoryObject);
1605 r = MM::MemoryAlloc(iMemoryObject, page, 1);
1606 MM::MemoryUnlock(iMemoryObject);
1614 iPagesMap->Alloc(page, 1);
1620 iBufMap->Free(offset / iBufGap);
1621 FreeBufferPages(offset);
1625 DMemModelNonAlignedShBuf *buf = new DMemModelNonAlignedShBuf(this, offset);
1629 iBufMap->Free(offset / iBufGap);
1630 FreeBufferPages(offset);
1635 r = buf->Construct();
1639 iBufMap->Free(offset / iBufGap);
1640 FreeBufferPages(offset);
1641 buf->DObject::Close(NULL);
1645 temp.Add(&buf->iObjLink);
1648 r = UpdateReservedHandles(i);
1653 iFreeList.MoveFrom(&temp);
1660 // couldn't reserve handles so have no choice but to
1661 // delete the buffers
1662 __KTRACE_OPT(KMMU, Kern::Printf("GrowPool failed with %d, deleting buffers", r));
1664 while ((pLink = temp.GetFirst()) != NULL)
1666 DShBuf* buf = _LOFF(pLink, DShBuf, iObjLink);
1667 TLinAddr offset = buf->iRelAddress;
1668 iBufMap->Free(offset / iBufGap);
1669 FreeBufferPages(offset);
1670 buf->DObject::Close(NULL);
1672 __KTRACE_OPT(KMMU, Kern::Printf("Buffers deleted"));
1675 CalculateGrowShrinkTriggers();
1677 Kern::MutexSignal(*iProcessLock);
1679 __KTRACE_OPT(KMMU, Kern::Printf("<DMemModelNonAlignedShPool::GrowPool()"));
1683 TInt DMemModelNonAlignedShPool::ShrinkPool()
1685 __KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShPool::ShrinkPool()"));
1687 // Don't do anything with physical memory pools
1688 if (iPoolFlags & EShPoolPhysicalMemoryPool)
1691 Kern::MutexWait(*iProcessLock);
1693 TUint32 grownBy = iTotalBuffers - iInitialBuffers;
1695 // How many buffers to shrink by?
1696 TUint32 shrink = mult_fx248(iTotalBuffers, iShrinkByRatio);
1697 if (shrink == 0) // Handle round-to-zero
1699 if (shrink > grownBy)
1701 if (shrink > iFreeBuffers)
1702 shrink = iFreeBuffers;
1705 for (i = 0; i < shrink; ++i)
1709 if (iFreeList.IsEmpty())
1715 // work from the back of the queue
1716 SDblQueLink *pLink = iFreeList.Last();
1718 DShBuf* pBuf = _LOFF(pLink, DShBuf, iObjLink);
1720 if (pBuf >= iInitialBuffersArray && pBuf < (iInitialBuffersArray + iInitialBuffers))
1731 TLinAddr offset = pBuf->iRelAddress;
1732 iBufMap->Free(offset / iBufGap);
1733 FreeBufferPages(offset);
1735 pBuf->DObject::Close(NULL);
1738 UpdateReservedHandles(-(TInt)i);
1740 // If we couldn't shrink the pool by this many buffers, wait until we Free() another
1741 // buffer before trying to shrink again.
1743 iPoolFlags |= EShPoolSuppressShrink;
1745 CalculateGrowShrinkTriggers();
1747 Kern::MutexSignal(*iProcessLock);
1749 __KTRACE_OPT(KMMU, Kern::Printf("<DMemModelNonAlignedShPool::ShrinkPool()"));
1754 TInt DMemModelNonAlignedShPool::UpdateFreeList()
1756 __KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShPool::UpdateFreeList"));
1761 while(!iAltFreeList.IsEmpty())
1763 // sort a temporary list of 'n' object with the lowest index first
1764 for (TInt n = 0; n < 8 && !iAltFreeList.IsEmpty(); ++n)
1766 // bit of an assumption, lets assume that the lower indexes will be allocated and freed first
1767 // and therefore will be nearer the front of the list
1768 DShBuf* buf = _LOFF(iAltFreeList.GetFirst(), DShBuf, iObjLink);
1770 SDblQueLink* anchor = reinterpret_cast<SDblQueLink*>(&temp);
1771 SDblQueLink* pLink = temp.Last();
1775 // traverse the list starting at the back
1776 if ((pLink != anchor) && (_LOFF(pLink, DShBuf, iObjLink)->iRelAddress > buf->iRelAddress))
1778 pLink = pLink->iPrev;
1782 buf->iObjLink.InsertAfter(pLink);
1788 // now merge with the free list
1789 while(!temp.IsEmpty())
1791 if (iFreeList.IsEmpty())
1793 iFreeList.MoveFrom(&temp);
1797 // working backwards with the highest index
1798 DShBuf* buf = _LOFF(temp.Last(), DShBuf, iObjLink);
1799 SDblQueLink* anchor = reinterpret_cast<SDblQueLink*>(&iFreeList);
1800 SDblQueLink* pLink = iFreeList.Last();
1802 while (!NKern::FMFlash(&iLock))
1804 if ((pLink != anchor) && (_LOFF(pLink, DShBuf, iObjLink)->iRelAddress > buf->iRelAddress))
1806 pLink = pLink->iPrev;
1810 buf->iObjLink.Deque();
1811 buf->iObjLink.InsertAfter(pLink);
1815 buf = _LOFF(temp.Last(), DShBuf, iObjLink);
1819 NKern::FMFlash(&iLock);
1823 __KTRACE_OPT(KMMU, Kern::Printf("<DMemModelNonAlignedShPool::UpdateFreeList"));
1827 void DMemModelNonAlignedShPool::Free(DShBuf* aBuf)
1829 __KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShPool::Free (aBuf = 0x%08x, aBuf->Base() 0x%08x)", aBuf, aBuf->iRelAddress));
1833 // Remove from allocated list
1834 aBuf->iObjLink.Deque();
1837 // we want to put the initial buffers at the head of the free list
1838 // and the grown buffers at the tail as this makes shrinking more efficient
1839 if (aBuf >= iInitialBuffersArray && aBuf < (iInitialBuffersArray + iInitialBuffers))
1841 iFreeList.AddHead(&aBuf->iObjLink);
1845 iAltFreeList.Add(&aBuf->iObjLink);
1850 --iAllocatedBuffers;
1852 iPoolFlags &= ~EShPoolSuppressShrink; // Allow shrinking again, if it was blocked
1855 // queue ManagementDfc which completes notifications as appropriate
1857 KickManagementDfc();
1859 DShPool::Close(NULL); // decrement pool reference count
1863 TInt DMemModelNonAlignedShPool::Alloc(DShBuf*& aShBuf)
1865 __KTRACE_OPT(KMMU, Kern::Printf(">DMemModelNonAlignedShPool::Alloc (DShBuf)"));
1871 if (!iFreeList.IsEmpty())
1873 aShBuf = _LOFF(iFreeList.GetFirst(), DShBuf, iObjLink);
1875 iAllocated.Add(&aShBuf->iObjLink);
1876 iAllocatedBuffers++;
1881 // try alternative free list
1882 if (!iAltFreeList.IsEmpty())
1884 aShBuf = _LOFF(iAltFreeList.GetFirst(), DShBuf, iObjLink);
1886 iAllocated.Add(&aShBuf->iObjLink);
1887 iAllocatedBuffers++;
1893 KickManagementDfc(); // Try to grow
1894 return KErrNoMemory;
1899 Open(); // increment pool reference count
1904 KickManagementDfc();
1906 __KTRACE_OPT(KMMU, Kern::Printf("<DMemModelNonAlignedShPool::Alloc return buf = 0x%08x", aShBuf));