Update contrib.
1 // Copyright (c) 1994-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32\memmodel\emul\win32\mchunk.cpp
21 DWin32Chunk::~DWin32Chunk()
23 __KTRACE_OPT(KTHREAD,Kern::Printf("DWin32Chunk destruct %O",this));
27 VirtualFree(LPVOID(iBase), iMaxSize, MEM_DECOMMIT);
28 VirtualFree(LPVOID(iBase), 0, MEM_RELEASE);
30 MM::FreeMemory += iSize;
31 if(iUnlockedPageBitMap)
33 TInt unlockedMemory = MM::RamPageSize*(iUnlockedPageBitMap->iSize-iUnlockedPageBitMap->iAvail);
34 if(unlockedMemory<=MM::CacheMemory)
35 MM::CacheMemory-=unlockedMemory;
38 MM::ReclaimedCacheMemory -= unlockedMemory-MM::CacheMemory;
41 MM::CheckMemoryCounters();
45 __KTRACE_OPT(KMEMTRACE, {MM::Wait();Kern::Printf("MT:D %d %x %O",NTickCount(),this,this);MM::Signal();});
47 BTraceContext4(BTrace::EChunks,BTrace::EChunkDestroyed,this);
50 delete iUnlockedPageBitMap;
51 delete iPermanentPageBitMap;
53 TDfc* dfc = (TDfc*)__e32_atomic_swp_ord_ptr(&iDestroyedDfc, 0);
59 TUint8* DWin32Chunk::Base(DProcess* /*aProcess*/)
65 TInt DWin32Chunk::DoCreate(SChunkCreateInfo& aInfo)
67 __ASSERT_COMPILE(!(EMMChunkAttributesMask & EChunkAttributesMask));
69 if(iAttributes&EMemoryNotOwned)
70 return KErrNotSupported;
71 if (aInfo.iMaxSize<=0)
73 iMaxSize=MM::RoundToChunkSize(aInfo.iMaxSize);
74 TInt maxpages=iMaxSize>>MM::RamPageShift;
75 if (iAttributes & EDisconnected)
77 TBitMapAllocator* pM=TBitMapAllocator::New(maxpages,ETrue);
80 TBitMapAllocator* pUM=TBitMapAllocator::New(maxpages,ETrue);
87 iUnlockedPageBitMap=pUM;
88 __KTRACE_OPT(KMMU,Kern::Printf("PageBitMap at %08x, MaxPages %d",pM,maxpages));
92 case ESharedKernelSingle:
93 case ESharedKernelMultiple:
95 TBitMapAllocator* pM=TBitMapAllocator::New(maxpages,ETrue);
98 iPermanentPageBitMap = pM;
100 // fall through to next case...
103 case EUserSelfModCode:
106 DWORD protect = (iChunkType == EUserSelfModCode) ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
107 LPVOID base = VirtualAlloc(NULL, iMaxSize, MEM_RESERVE, protect);
110 iBase = (TUint8*) base;
111 __KTRACE_OPT(KMMU,Kern::Printf("Reserved: Base=%08x, Size=%08x",iBase,iMaxSize));
117 __KTRACE_OPT(KMEMTRACE, {MM::Wait();Kern::Printf("MT:C %d %x %O",NTickCount(),this,this);MM::Signal();});
121 BTraceContextN(BTrace::EChunks,BTrace::EChunkCreated,this,iMaxSize,nameBuf.Ptr(),nameBuf.Size());
123 BTrace8(BTrace::EChunks,BTrace::EChunkOwner,this,iOwningProcess);
124 BTraceContext12(BTrace::EChunks,BTrace::EChunkInfo,this,iChunkType,iAttributes);
129 TInt DWin32Chunk::Adjust(TInt aNewSize)
131 // Adjust a standard chunk.
135 __KTRACE_OPT(KMMU,Kern::Printf("DWin32Chunk::Adjust %08x",aNewSize));
136 if (iAttributes & (EDoubleEnded|EDisconnected))
138 if (aNewSize<0 || aNewSize>iMaxSize)
142 TInt newSize=MM::RoundToPageSize(aNewSize);
148 __KTRACE_OPT(KMMU,Kern::Printf("DWin32Chunk::Adjust growing"));
149 r=DoCommit(iSize,newSize-iSize);
151 else if (newSize<iSize)
153 __KTRACE_OPT(KMMU,Kern::Printf("DWin32Chunk::Adjust shrinking"));
154 DoDecommit(newSize,iSize-newSize);
159 __COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this);
160 __KTRACE_OPT(KMMU,Kern::Printf("DWin32Chunk %O adjusted to %x",this,iSize));
164 TInt DWin32Chunk::AdjustDoubleEnded(TInt aBottom, TInt aTop)
166 // Adjust a double-ended chunk.
169 __KTRACE_OPT(KMMU,Kern::Printf("DWin32Chunk::AdjustDoubleEnded %x-%x",aBottom,aTop));
170 if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDoubleEnded)
172 if (0>aBottom || aBottom>aTop || aTop>iMaxSize)
174 aBottom &= ~(MM::RamPageSize-1);
175 aTop = MM::RoundToPageSize(aTop);
176 TInt newSize=aTop-aBottom;
179 TInt initBottom=iStartPos;
180 TInt initTop=iStartPos+iSize;
181 TInt nBottom=Max(aBottom,initBottom); // intersection bottom
182 TInt nTop=Min(aTop,initTop); // intersection top
186 __KTRACE_OPT(KMMU,Kern::Printf("Initial and final regions intersect"));
187 if (initBottom<nBottom)
190 DoDecommit(initBottom,nBottom-initBottom);
193 DoDecommit(nTop,initTop-nTop); // this changes iSize
196 r=DoCommit(aBottom,nBottom-aBottom);
200 r=DoCommit(nTop,aTop-nTop);
204 DoDecommit(aBottom,nBottom-aBottom);
208 r=DoCommit(nTop,aTop-nTop);
212 __KTRACE_OPT(KMMU,Kern::Printf("Initial and final regions disjoint"));
214 DoDecommit(initBottom,iSize);
217 r=DoCommit(iStartPos,newSize);
220 __COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this);
221 __KTRACE_OPT(KMMU,Kern::Printf("DWin32Chunk %O adjusted to %x+%x",this,iStartPos,iSize));
226 TInt DWin32Chunk::Commit(TInt aOffset, TInt aSize, TCommitType aCommitType, TUint32* aExtraArg)
228 // Commit to a disconnected chunk.
231 __KTRACE_OPT(KMMU,Kern::Printf("DWin32Chunk::Commit %x+%x type=%d extra=%08x",aOffset,aSize,aCommitType,aExtraArg));
232 if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
234 if (aOffset<0 || aSize<0 || (aOffset+aSize)>iMaxSize)
236 if(LOGICAL_XOR((TInt)aCommitType&DChunk::ECommitPhysicalMask, iAttributes&DChunk::EMemoryNotOwned))
237 return KErrNotSupported; // Commit type doesn't match 'memory owned' type
239 TInt top = MM::RoundToPageSize(aOffset + aSize);
240 aOffset &= ~(MM::RamPageSize - 1);
241 aSize = top - aOffset;
244 TInt i=aOffset>>MM::RamPageShift;
245 TInt n=aSize>>MM::RamPageShift;
247 if (iPageBitMap->NotFree(i,n))
253 case DChunk::ECommitDiscontiguous:
255 r=DoCommit(aOffset,aSize);
260 case DChunk::ECommitContiguous:
261 r=DoCommit(aOffset,aSize);
262 // Return a fake physical address which is == linear address
264 *aExtraArg = (TUint)(iBase+aOffset);
267 case DChunk::ECommitDiscontiguousPhysical:
268 case DChunk::ECommitContiguousPhysical:
269 // The emulator doesn't do physical address allocation
278 iPageBitMap->Alloc(i,n);
280 MM::CheckMemoryCounters();
282 __COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this);
286 TInt DWin32Chunk::Allocate(TInt aSize, TInt aGuard, TInt aAlign)
288 // Allocate offset and commit to a disconnected chunk.
292 __KTRACE_OPT(KMMU,Kern::Printf("DWin32Chunk::Allocate %x %x %d",aSize,aGuard,aAlign));
293 if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
295 if (aSize<=0 || aGuard<0 || aSize+aGuard>iMaxSize)
298 aSize = MM::RoundToPageSize(aSize);
299 aGuard = MM::RoundToPageSize(aGuard);
302 TInt n=(aSize+aGuard)>>MM::RamPageShift;
304 TInt i=iPageBitMap->AllocConsecutive(n,EFalse); // allocate the offset
306 r=KErrNoMemory; // run out of reserved space for this chunk
309 TInt offset=i<<MM::RamPageShift;
310 __KTRACE_OPT(KMMU,Kern::Printf("Offset %x allocated",offset));
311 r=DoCommit(offset+aGuard,aSize);
314 iPageBitMap->Alloc(i,n);
315 r=offset; // if operation successful, return allocated offset
319 __KTRACE_OPT(KMMU,Kern::Printf("DWin32Chunk::Allocate returns %x",r));
320 __COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this);
324 TInt DWin32Chunk::Decommit(TInt anOffset, TInt aSize)
326 // Decommit from a disconnected chunk.
329 __KTRACE_OPT(KMMU,Kern::Printf("DWin32Chunk::Decommit %x+%x",anOffset,aSize));
330 if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
332 if (anOffset<0 || aSize<0 || (anOffset+aSize)>iMaxSize)
335 TInt top = MM::RoundToPageSize(anOffset + aSize);
336 anOffset &= ~(MM::RamPageSize - 1);
337 aSize = top - anOffset;
341 // limit the range to the home region range
342 __KTRACE_OPT(KMMU,Kern::Printf("Rounded and Clipped range %x+%x",anOffset,aSize));
344 TInt i=anOffset>>MM::RamPageShift;
345 TInt n=aSize>>MM::RamPageShift;
346 // check for decommiting unlocked pages...
347 for(TInt j=i; j<i+n; j++)
349 if(iUnlockedPageBitMap->NotFree(j,1))
351 iUnlockedPageBitMap->Free(j);
352 if(MM::ReclaimedCacheMemory)
354 MM::ReclaimedCacheMemory -= MM::RamPageSize;
355 MM::FreeMemory -= MM::RamPageSize; // reclaimed memory already counted, so adjust
358 MM::CacheMemory -= MM::RamPageSize;
361 __KTRACE_OPT(KMMU,Kern::Printf("Calling SelectiveFree(%d,%d)",i,n));
362 iPageBitMap->SelectiveFree(i,n); // free those positions which are actually allocated
363 DoDecommit(anOffset,aSize);
364 MM::CheckMemoryCounters();
366 __DEBUG_EVENT(EEventUpdateChunk, this);
370 TInt DWin32Chunk::Unlock(TInt aOffset, TInt aSize)
372 __KTRACE_OPT(KMMU,Kern::Printf("DWin32Chunk::Unlock %x+%x",aOffset,aSize));
373 if (!(iAttributes&ECache))
375 if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
377 if (aOffset<0 || aSize<0 || (aOffset+aSize)>iMaxSize)
380 TInt top = MM::RoundToPageSize(aOffset + aSize);
381 aOffset &= ~(MM::RamPageSize - 1);
382 aSize = top - aOffset;
386 TInt i=aOffset>>MM::RamPageShift;
387 TInt n=aSize>>MM::RamPageShift;
389 if (iPageBitMap->NotAllocated(i,n))
390 r=KErrNotFound; // some pages aren't committed
393 for(TInt j=i; j<i+n; j++)
395 if(iUnlockedPageBitMap->NotAllocated(j,1))
397 // unlock this page...
398 iUnlockedPageBitMap->Alloc(j,1);
399 MM::CacheMemory += MM::RamPageSize;
405 MM::CheckMemoryCounters();
410 TInt DWin32Chunk::Lock(TInt aOffset, TInt aSize)
412 __KTRACE_OPT(KMMU,Kern::Printf("DWin32Chunk::Lock %x+%x",aOffset,aSize));
413 if (!(iAttributes&ECache))
415 if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
417 if (aOffset<0 || aSize<0 || (aOffset+aSize)>iMaxSize)
420 TInt top = MM::RoundToPageSize(aOffset + aSize);
421 aOffset &= ~(MM::RamPageSize - 1);
422 aSize = top - aOffset;
426 TInt i=aOffset>>MM::RamPageShift;
427 TInt n=aSize>>MM::RamPageShift;
429 if (iPageBitMap->NotAllocated(i,n))
430 r=KErrNotFound; // some pages aren't committed
434 for(TInt j=i; j<i+n; j++)
436 if(iUnlockedPageBitMap->NotFree(j,1))
439 if(MM::ReclaimedCacheMemory)
444 iUnlockedPageBitMap->Free(j);
445 MM::CacheMemory -= MM::RamPageSize;
451 // decommit memory on error...
452 for(TInt j=i; j<i+n; j++)
454 if(iUnlockedPageBitMap->NotFree(j,1))
456 iUnlockedPageBitMap->Free(j);
457 if(MM::ReclaimedCacheMemory)
459 MM::ReclaimedCacheMemory -= MM::RamPageSize;
460 MM::FreeMemory -= MM::RamPageSize; // reclaimed memory already counted, so adjust
463 MM::CacheMemory -= MM::RamPageSize;
466 iPageBitMap->SelectiveFree(i,n);
467 DoDecommit(aOffset,aSize);
469 MM::CheckMemoryCounters();
474 TInt DWin32Chunk::CheckAccess()
476 DProcess* pP=TheCurrentThread->iOwningProcess;
477 if (iAttributes&EPrivate)
479 if (iOwningProcess && iOwningProcess!=pP && pP!=K::TheKernelProcess)
480 return KErrAccessDenied;
485 TInt DWin32Chunk::Address(TInt aOffset, TInt aSize, TLinAddr& aKernelAddress)
487 if(!iPermanentPageBitMap)
488 return KErrAccessDenied;
489 if(TUint(aOffset)>=TUint(iMaxSize))
491 if(TUint(aOffset+aSize)>TUint(iMaxSize))
495 TInt pageShift = MM::RamPageShift;
496 TInt start = aOffset>>pageShift;
497 TInt size = ((aOffset+aSize-1)>>pageShift)-start+1;
498 if(iPermanentPageBitMap->NotAllocated(start,size))
500 aKernelAddress = (TLinAddr)iBase+aOffset;
504 void DWin32Chunk::Substitute(TInt /*aOffset*/, TPhysAddr /*aOldAddr*/, TPhysAddr /*aNewAddr*/)
506 MM::Panic(MM::ENotSupportedOnEmulator);
509 TInt DWin32Chunk::PhysicalAddress(TInt aOffset, TInt aSize, TLinAddr& aKernelAddress, TUint32& aPhysicalAddress, TUint32* aPhysicalPageList)
511 TInt r=Address(aOffset,aSize,aKernelAddress);
515 // return fake physical addresses which are the same as the linear address
516 aPhysicalAddress = aKernelAddress;
518 TInt pageShift = MM::RamPageShift;
519 TUint32 page = aKernelAddress>>pageShift<<pageShift;
520 TUint32 lastPage = (aKernelAddress+aSize-1)>>pageShift<<pageShift;
521 TUint32* pageList = aPhysicalPageList;
522 TUint32 pageSize = 1<<pageShift;
524 for(; page<=lastPage; page += pageSize)
529 TInt DWin32Chunk::DoCommit(TInt aOffset, TInt aSize)
531 // Get win32 to commit the pages.
532 // We know they are not already committed - this is guaranteed by the caller so we can update the memory info easily
538 TBool execute = (iChunkType == EUserSelfModCode) ? ETrue : EFalse;
540 TInt r = MM::Commit(reinterpret_cast<TLinAddr>(iBase + aOffset), aSize, iClearByte, execute);
546 if(iPermanentPageBitMap)
547 iPermanentPageBitMap->Alloc(aOffset>>MM::RamPageShift,aSize>>MM::RamPageShift);
549 __KTRACE_OPT(KMEMTRACE, {Kern::Printf("MT:A %d %x %x %O",NTickCount(),this,iSize,this);});
551 BTraceContext12(BTrace::EChunks,BTrace::EChunkMemoryAllocated,this,aOffset,aSize);
559 void DWin32Chunk::DoDecommit(TInt anOffset, TInt aSize)
561 // Get win32 to decommit the pages.
562 // The pages may or may not be committed: we need to find out which ones are so that the memory info is updated correctly
565 TInt freed = MM::Decommit(reinterpret_cast<TLinAddr>(iBase+anOffset), aSize);
568 __KTRACE_OPT(KMEMTRACE, {Kern::Printf("MT:A %d %x %x %O",NTickCount(),this,iSize,this);});
571 TUint32 MM::RoundToChunkSize(TUint32 aSize)
573 TUint32 m=MM::RamChunkSize-1;
577 void DWin32Chunk::BTracePrime(TInt aCategory)
579 DChunk::BTracePrime(aCategory);
582 if (aCategory == BTrace::EChunks || aCategory == -1)
585 // it is essential that the following code is in braces because __LOCK_HOST
586 // creates an object which must be destroyed before the MM::Signal() at the end.
589 // output traces for all memory which has been committed to the chunk...
591 while(offset<iMaxSize)
593 MEMORY_BASIC_INFORMATION info;
594 VirtualQuery(LPVOID(iBase + offset), &info, sizeof(info));
595 TUint size = Min(iMaxSize-offset, info.RegionSize);
596 if(info.State == MEM_COMMIT)
597 BTrace12(BTrace::EChunks, BTrace::EChunkMemoryAllocated,this,offset,size);