os/kernelhwsrv/kernel/eka/memmodel/emul/win32/mchunk.cpp
changeset 0 bde4ae8d615e
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/os/kernelhwsrv/kernel/eka/memmodel/emul/win32/mchunk.cpp	Fri Jun 15 03:10:57 2012 +0200
     1.3 @@ -0,0 +1,604 @@
     1.4 +// Copyright (c) 1994-2009 Nokia Corporation and/or its subsidiary(-ies).
     1.5 +// All rights reserved.
     1.6 +// This component and the accompanying materials are made available
     1.7 +// under the terms of the License "Eclipse Public License v1.0"
     1.8 +// which accompanies this distribution, and is available
     1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
    1.10 +//
    1.11 +// Initial Contributors:
    1.12 +// Nokia Corporation - initial contribution.
    1.13 +//
    1.14 +// Contributors:
    1.15 +//
    1.16 +// Description:
    1.17 +// e32\memmodel\emul\win32\mchunk.cpp
    1.18 +// 
    1.19 +//
    1.20 +
    1.21 +#include "memmodel.h"
    1.22 +#include <emulator.h>
    1.23 +
    1.24 +DWin32Chunk::~DWin32Chunk()
    1.25 +	{
    1.26 +	__KTRACE_OPT(KTHREAD,Kern::Printf("DWin32Chunk destruct %O",this));
    1.27 +
    1.28 +	if (iBase)
    1.29 +		{
    1.30 +		VirtualFree(LPVOID(iBase), iMaxSize, MEM_DECOMMIT);
    1.31 +		VirtualFree(LPVOID(iBase), 0, MEM_RELEASE);
    1.32 +		MM::Wait();
    1.33 +		MM::FreeMemory += iSize;
    1.34 +		if(iUnlockedPageBitMap)
    1.35 +			{
    1.36 +			TInt unlockedMemory = MM::RamPageSize*(iUnlockedPageBitMap->iSize-iUnlockedPageBitMap->iAvail);
    1.37 +			if(unlockedMemory<=MM::CacheMemory)
    1.38 +				MM::CacheMemory-=unlockedMemory;
    1.39 +			else
    1.40 +				{
    1.41 +				MM::ReclaimedCacheMemory -= unlockedMemory-MM::CacheMemory;
    1.42 +				MM::CacheMemory = 0;
    1.43 +				}
    1.44 +			MM::CheckMemoryCounters();
    1.45 +			}
    1.46 +		MM::Signal();
    1.47 +		}
    1.48 +	__KTRACE_OPT(KMEMTRACE, {MM::Wait();Kern::Printf("MT:D %d %x %O",NTickCount(),this,this);MM::Signal();});
    1.49 +#ifdef BTRACE_CHUNKS
    1.50 +	BTraceContext4(BTrace::EChunks,BTrace::EChunkDestroyed,this);
    1.51 +#endif
    1.52 +	delete iPageBitMap;
    1.53 +	delete iUnlockedPageBitMap;
    1.54 +	delete iPermanentPageBitMap;
    1.55 +
    1.56 +	TDfc* dfc = (TDfc*)__e32_atomic_swp_ord_ptr(&iDestroyedDfc, 0);
    1.57 +	if (dfc)
    1.58 +		dfc->Enque();
    1.59 +	}
    1.60 +
    1.61 +
    1.62 +TUint8* DWin32Chunk::Base(DProcess* /*aProcess*/)
    1.63 +	{
    1.64 +	return iBase;
    1.65 +	}
    1.66 +
    1.67 +
    1.68 +TInt DWin32Chunk::DoCreate(SChunkCreateInfo& aInfo)
    1.69 +	{
    1.70 +	__ASSERT_COMPILE(!(EMMChunkAttributesMask & EChunkAttributesMask));
    1.71 +
    1.72 +	if(iAttributes&EMemoryNotOwned)
    1.73 +		return KErrNotSupported;
    1.74 +	if (aInfo.iMaxSize<=0)
    1.75 +		return KErrArgument;
    1.76 +	iMaxSize=MM::RoundToChunkSize(aInfo.iMaxSize);
    1.77 +	TInt maxpages=iMaxSize>>MM::RamPageShift;
    1.78 +	if (iAttributes & EDisconnected)
    1.79 +		{
    1.80 +		TBitMapAllocator* pM=TBitMapAllocator::New(maxpages,ETrue);
    1.81 +		if (!pM)
    1.82 +			return KErrNoMemory;
    1.83 +		TBitMapAllocator* pUM=TBitMapAllocator::New(maxpages,ETrue);
    1.84 +		if (!pUM)
    1.85 +			{
    1.86 +			delete pM;
    1.87 +			return KErrNoMemory;
    1.88 +			}
    1.89 +		iPageBitMap=pM;
    1.90 +		iUnlockedPageBitMap=pUM;
    1.91 +		__KTRACE_OPT(KMMU,Kern::Printf("PageBitMap at %08x, MaxPages %d",pM,maxpages));
    1.92 +		}
    1.93 +	switch (iChunkType)
    1.94 +		{
    1.95 +	case ESharedKernelSingle:
    1.96 +	case ESharedKernelMultiple:
    1.97 +		{
    1.98 +		TBitMapAllocator* pM=TBitMapAllocator::New(maxpages,ETrue);
    1.99 +		if (!pM)
   1.100 +			return KErrNoMemory;
   1.101 +		iPermanentPageBitMap = pM;
   1.102 +		}
   1.103 +		// fall through to next case...
   1.104 +	case ESharedIo:
   1.105 +	case EKernelMessage:
   1.106 +	case EUserSelfModCode:
   1.107 +	case EUserData:
   1.108 +		{
   1.109 +		DWORD protect = (iChunkType == EUserSelfModCode) ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
   1.110 +		LPVOID base = VirtualAlloc(NULL, iMaxSize, MEM_RESERVE, protect);
   1.111 +		if (!base)
   1.112 +			return KErrNoMemory;
   1.113 +		iBase  = (TUint8*) base;
   1.114 +		__KTRACE_OPT(KMMU,Kern::Printf("Reserved: Base=%08x, Size=%08x",iBase,iMaxSize));
   1.115 +		}
   1.116 +		break;
   1.117 +	default:
   1.118 +		break;
   1.119 +		}
   1.120 +	__KTRACE_OPT(KMEMTRACE, {MM::Wait();Kern::Printf("MT:C %d %x %O",NTickCount(),this,this);MM::Signal();});
   1.121 +#ifdef BTRACE_CHUNKS
   1.122 +	TKName nameBuf;
   1.123 +	Name(nameBuf);
   1.124 +	BTraceContextN(BTrace::EChunks,BTrace::EChunkCreated,this,iMaxSize,nameBuf.Ptr(),nameBuf.Size());
   1.125 +	if(iOwningProcess)
   1.126 +		BTrace8(BTrace::EChunks,BTrace::EChunkOwner,this,iOwningProcess);
   1.127 +	BTraceContext12(BTrace::EChunks,BTrace::EChunkInfo,this,iChunkType,iAttributes);
   1.128 +#endif
   1.129 +	return KErrNone;
   1.130 +	}
   1.131 +
   1.132 +TInt DWin32Chunk::Adjust(TInt aNewSize)
   1.133 +//
   1.134 +// Adjust a standard chunk.
   1.135 +//
   1.136 +	{
   1.137 +
   1.138 +	__KTRACE_OPT(KMMU,Kern::Printf("DWin32Chunk::Adjust %08x",aNewSize));
   1.139 +	if (iAttributes & (EDoubleEnded|EDisconnected))
   1.140 +		return KErrGeneral;
   1.141 +	if (aNewSize<0 || aNewSize>iMaxSize)
   1.142 +		return KErrArgument;
   1.143 +
   1.144 +	TInt r=KErrNone;
   1.145 +	TInt newSize=MM::RoundToPageSize(aNewSize);
   1.146 +	if (newSize!=iSize)
   1.147 +		{
   1.148 +		MM::Wait();
   1.149 +		if (newSize>iSize)
   1.150 +			{
   1.151 +			__KTRACE_OPT(KMMU,Kern::Printf("DWin32Chunk::Adjust growing"));
   1.152 +			r=DoCommit(iSize,newSize-iSize);
   1.153 +			}
   1.154 +		else if (newSize<iSize)
   1.155 +			{
   1.156 +			__KTRACE_OPT(KMMU,Kern::Printf("DWin32Chunk::Adjust shrinking"));
   1.157 +			DoDecommit(newSize,iSize-newSize);
   1.158 +			}
   1.159 +		MM::Signal();
   1.160 +		}
   1.161 +
   1.162 +	__COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this);
   1.163 +	__KTRACE_OPT(KMMU,Kern::Printf("DWin32Chunk %O adjusted to %x",this,iSize));
   1.164 +	return r;
   1.165 +	}
   1.166 +
   1.167 +TInt DWin32Chunk::AdjustDoubleEnded(TInt aBottom, TInt aTop)
   1.168 +//
   1.169 +// Adjust a double-ended chunk.
   1.170 +//
   1.171 +	{
   1.172 +	__KTRACE_OPT(KMMU,Kern::Printf("DWin32Chunk::AdjustDoubleEnded %x-%x",aBottom,aTop));
   1.173 +	if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDoubleEnded)
   1.174 +		return KErrGeneral;
   1.175 +	if (0>aBottom || aBottom>aTop || aTop>iMaxSize)
   1.176 +		return KErrArgument;
   1.177 +	aBottom &= ~(MM::RamPageSize-1);
   1.178 +	aTop = MM::RoundToPageSize(aTop);
   1.179 +	TInt newSize=aTop-aBottom;
   1.180 +
   1.181 +	MM::Wait();
   1.182 +	TInt initBottom=iStartPos;
   1.183 +	TInt initTop=iStartPos+iSize;
   1.184 +	TInt nBottom=Max(aBottom,initBottom);	// intersection bottom
   1.185 +	TInt nTop=Min(aTop,initTop);	// intersection top
   1.186 +	TInt r=KErrNone;
   1.187 +	if (nBottom<nTop)
   1.188 +		{
   1.189 +		__KTRACE_OPT(KMMU,Kern::Printf("Initial and final regions intersect"));
   1.190 +		if (initBottom<nBottom)
   1.191 +			{
   1.192 +			iStartPos=aBottom;
   1.193 +			DoDecommit(initBottom,nBottom-initBottom);
   1.194 +			}
   1.195 +		if (initTop>nTop)
   1.196 +			DoDecommit(nTop,initTop-nTop);	// this changes iSize
   1.197 +		if (aBottom<nBottom)
   1.198 +			{
   1.199 +			r=DoCommit(aBottom,nBottom-aBottom);
   1.200 +			if (r==KErrNone)
   1.201 +				{
   1.202 +				if (aTop>nTop)
   1.203 +					r=DoCommit(nTop,aTop-nTop);
   1.204 +				if (r==KErrNone)
   1.205 +					iStartPos=aBottom;
   1.206 +				else
   1.207 +					DoDecommit(aBottom,nBottom-aBottom);
   1.208 +				}
   1.209 +			}
   1.210 +		else if (aTop>nTop)
   1.211 +			r=DoCommit(nTop,aTop-nTop);
   1.212 +		}
   1.213 +	else
   1.214 +		{
   1.215 +		__KTRACE_OPT(KMMU,Kern::Printf("Initial and final regions disjoint"));
   1.216 +		if (iSize)
   1.217 +			DoDecommit(initBottom,iSize);
   1.218 +		iStartPos=aBottom;
   1.219 +		if (newSize)
   1.220 +			r=DoCommit(iStartPos,newSize);
   1.221 +		}
   1.222 +	MM::Signal();
   1.223 +	__COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this);
   1.224 +	__KTRACE_OPT(KMMU,Kern::Printf("DWin32Chunk %O adjusted to %x+%x",this,iStartPos,iSize));
   1.225 +	return r;
   1.226 +	}
   1.227 +
   1.228 +
   1.229 +TInt DWin32Chunk::Commit(TInt aOffset, TInt aSize, TCommitType aCommitType, TUint32* aExtraArg)
   1.230 +//
   1.231 +// Commit to a disconnected chunk.
   1.232 +//
   1.233 +	{
   1.234 +	__KTRACE_OPT(KMMU,Kern::Printf("DWin32Chunk::Commit %x+%x type=%d extra=%08x",aOffset,aSize,aCommitType,aExtraArg));
   1.235 +	if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
   1.236 +		return KErrGeneral;
   1.237 +	if (aOffset<0 || aSize<0 || (aOffset+aSize)>iMaxSize)
   1.238 +		return KErrArgument;
   1.239 +	if(LOGICAL_XOR((TInt)aCommitType&DChunk::ECommitPhysicalMask, iAttributes&DChunk::EMemoryNotOwned))
   1.240 +		return KErrNotSupported;  // Commit type doesn't match 'memory owned' type
   1.241 +
   1.242 +	TInt top = MM::RoundToPageSize(aOffset + aSize);
   1.243 +	aOffset &= ~(MM::RamPageSize - 1);
   1.244 +	aSize = top - aOffset;
   1.245 +
   1.246 +	TInt r=KErrNone;
   1.247 +	TInt i=aOffset>>MM::RamPageShift;
   1.248 +	TInt n=aSize>>MM::RamPageShift;
   1.249 +	MM::Wait();
   1.250 +	if (iPageBitMap->NotFree(i,n))
   1.251 +		r=KErrAlreadyExists;
   1.252 +	else
   1.253 +		{
   1.254 +		switch(aCommitType)
   1.255 +			{
   1.256 +		case DChunk::ECommitDiscontiguous:
   1.257 +			if(aExtraArg==0)
   1.258 +				r=DoCommit(aOffset,aSize);
   1.259 +			else
   1.260 +				r = KErrArgument;
   1.261 +			break;
   1.262 +
   1.263 +		case DChunk::ECommitContiguous:
   1.264 +			r=DoCommit(aOffset,aSize);
   1.265 +			 // Return a fake physical address which is == linear address
   1.266 +			if(r==KErrNone)
   1.267 +				*aExtraArg = (TUint)(iBase+aOffset);
   1.268 +			break;
   1.269 +
   1.270 +		case DChunk::ECommitDiscontiguousPhysical:
   1.271 +		case DChunk::ECommitContiguousPhysical:
   1.272 +			// The emulator doesn't do physical address allocation
   1.273 +			r=KErrNotSupported;
   1.274 +			break;
   1.275 +
   1.276 +		default:
   1.277 +			r = KErrArgument;
   1.278 +			break;
   1.279 +			};
   1.280 +		if (r==KErrNone)
   1.281 +			iPageBitMap->Alloc(i,n);
   1.282 +		}
   1.283 +	MM::CheckMemoryCounters();
   1.284 +	MM::Signal();
   1.285 +	__COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this);
   1.286 +	return r;
   1.287 +	}
   1.288 +
   1.289 +TInt DWin32Chunk::Allocate(TInt aSize, TInt aGuard, TInt aAlign)
   1.290 +//
   1.291 +// Allocate offset and commit to a disconnected chunk.
   1.292 +//
   1.293 +	{
   1.294 +	(void)aAlign;
   1.295 +	__KTRACE_OPT(KMMU,Kern::Printf("DWin32Chunk::Allocate %x %x %d",aSize,aGuard,aAlign));
   1.296 +	if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
   1.297 +		return KErrGeneral;
   1.298 +	if (aSize<=0 || aGuard<0 || aSize+aGuard>iMaxSize)
   1.299 +		return KErrArgument;
   1.300 +
   1.301 +	aSize = MM::RoundToPageSize(aSize);
   1.302 +	aGuard = MM::RoundToPageSize(aGuard);
   1.303 +
   1.304 +	TInt r=KErrNone;
   1.305 +	TInt n=(aSize+aGuard)>>MM::RamPageShift;
   1.306 +	MM::Wait();
   1.307 +	TInt i=iPageBitMap->AllocConsecutive(n,EFalse);		// allocate the offset
   1.308 +	if (i<0)
   1.309 +		r=KErrNoMemory;		// run out of reserved space for this chunk
   1.310 +	else
   1.311 +		{
   1.312 +		TInt offset=i<<MM::RamPageShift;
   1.313 +		__KTRACE_OPT(KMMU,Kern::Printf("Offset %x allocated",offset));
   1.314 +		r=DoCommit(offset+aGuard,aSize);
   1.315 +		if (r==KErrNone)
   1.316 +			{
   1.317 +			iPageBitMap->Alloc(i,n);
   1.318 +			r=offset;		// if operation successful, return allocated offset
   1.319 +			}
   1.320 +		}
   1.321 +	MM::Signal();
   1.322 +	__KTRACE_OPT(KMMU,Kern::Printf("DWin32Chunk::Allocate returns %x",r));
   1.323 +	__COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this);
   1.324 +	return r;
   1.325 +	}
   1.326 +
   1.327 +TInt DWin32Chunk::Decommit(TInt anOffset, TInt aSize)
   1.328 +//
   1.329 +// Decommit from a disconnected chunk.
   1.330 +//
   1.331 +	{
   1.332 +	__KTRACE_OPT(KMMU,Kern::Printf("DWin32Chunk::Decommit %x+%x",anOffset,aSize));
   1.333 +	if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
   1.334 +		return KErrGeneral;
   1.335 +	if (anOffset<0 || aSize<0 || (anOffset+aSize)>iMaxSize)
   1.336 +		return KErrArgument;
   1.337 +	
   1.338 +	TInt top = MM::RoundToPageSize(anOffset + aSize);
   1.339 +	anOffset &= ~(MM::RamPageSize - 1);
   1.340 +	aSize = top - anOffset;
   1.341 +
   1.342 +	MM::Wait();
   1.343 +
   1.344 +	// limit the range to the home region range
   1.345 +	__KTRACE_OPT(KMMU,Kern::Printf("Rounded and Clipped range %x+%x",anOffset,aSize));
   1.346 +
   1.347 +	TInt i=anOffset>>MM::RamPageShift;
   1.348 +	TInt n=aSize>>MM::RamPageShift;
   1.349 +	// check for decommiting unlocked pages...
   1.350 +	for(TInt j=i; j<i+n; j++)
   1.351 +		{
   1.352 +		if(iUnlockedPageBitMap->NotFree(j,1))
   1.353 +			{
   1.354 +			iUnlockedPageBitMap->Free(j);
   1.355 +			if(MM::ReclaimedCacheMemory)
   1.356 +				{
   1.357 +				MM::ReclaimedCacheMemory -= MM::RamPageSize;
   1.358 +				MM::FreeMemory -= MM::RamPageSize; // reclaimed memory already counted, so adjust
   1.359 +				}
   1.360 +			else
   1.361 +				MM::CacheMemory -= MM::RamPageSize;
   1.362 +			}
   1.363 +		}
   1.364 +	__KTRACE_OPT(KMMU,Kern::Printf("Calling SelectiveFree(%d,%d)",i,n));
   1.365 +	iPageBitMap->SelectiveFree(i,n);	// free those positions which are actually allocated
   1.366 +	DoDecommit(anOffset,aSize);
   1.367 +	MM::CheckMemoryCounters();
   1.368 +	MM::Signal();
   1.369 +	__DEBUG_EVENT(EEventUpdateChunk, this);
   1.370 +	return KErrNone;
   1.371 +	}
   1.372 +
   1.373 +TInt DWin32Chunk::Unlock(TInt aOffset, TInt aSize)
   1.374 +	{
   1.375 +	__KTRACE_OPT(KMMU,Kern::Printf("DWin32Chunk::Unlock %x+%x",aOffset,aSize));
   1.376 +	if (!(iAttributes&ECache))
   1.377 +		return KErrGeneral;
   1.378 +	if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
   1.379 +		return KErrGeneral;
   1.380 +	if (aOffset<0 || aSize<0 || (aOffset+aSize)>iMaxSize)
   1.381 +		return KErrArgument;
   1.382 +	
   1.383 +	TInt top = MM::RoundToPageSize(aOffset + aSize);
   1.384 +	aOffset &= ~(MM::RamPageSize - 1);
   1.385 +	aSize = top - aOffset;
   1.386 +
   1.387 +	MM::Wait();
   1.388 +
   1.389 +	TInt i=aOffset>>MM::RamPageShift;
   1.390 +	TInt n=aSize>>MM::RamPageShift;
   1.391 +	TInt r;
   1.392 +	if (iPageBitMap->NotAllocated(i,n))
   1.393 +		r=KErrNotFound; // some pages aren't committed
   1.394 +	else
   1.395 +		{
   1.396 +		for(TInt j=i; j<i+n; j++)
   1.397 +			{
   1.398 +			if(iUnlockedPageBitMap->NotAllocated(j,1))
   1.399 +				{
   1.400 +				// unlock this page...
   1.401 +				iUnlockedPageBitMap->Alloc(j,1);
   1.402 +				MM::CacheMemory += MM::RamPageSize;
   1.403 +				}
   1.404 +			}
   1.405 +		r = KErrNone;
   1.406 +		}
   1.407 +
   1.408 +	MM::CheckMemoryCounters();
   1.409 +	MM::Signal();
   1.410 +	return r;
   1.411 +	}
   1.412 +
   1.413 +TInt DWin32Chunk::Lock(TInt aOffset, TInt aSize)
   1.414 +	{
   1.415 +	__KTRACE_OPT(KMMU,Kern::Printf("DWin32Chunk::Lock %x+%x",aOffset,aSize));
   1.416 +	if (!(iAttributes&ECache))
   1.417 +		return KErrGeneral;
   1.418 +	if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
   1.419 +		return KErrGeneral;
   1.420 +	if (aOffset<0 || aSize<0 || (aOffset+aSize)>iMaxSize)
   1.421 +		return KErrArgument;
   1.422 +	
   1.423 +	TInt top = MM::RoundToPageSize(aOffset + aSize);
   1.424 +	aOffset &= ~(MM::RamPageSize - 1);
   1.425 +	aSize = top - aOffset;
   1.426 +
   1.427 +	MM::Wait();
   1.428 +
   1.429 +	TInt i=aOffset>>MM::RamPageShift;
   1.430 +	TInt n=aSize>>MM::RamPageShift;
   1.431 +	TInt r;
   1.432 +	if (iPageBitMap->NotAllocated(i,n))
   1.433 +		r=KErrNotFound; // some pages aren't committed
   1.434 +	else
   1.435 +		{
   1.436 +		r = KErrNone;
   1.437 +		for(TInt j=i; j<i+n; j++)
   1.438 +			{
   1.439 +			if(iUnlockedPageBitMap->NotFree(j,1))
   1.440 +				{
   1.441 +				// lock this page...
   1.442 +				if(MM::ReclaimedCacheMemory)
   1.443 +					{
   1.444 +					r = KErrNotFound;
   1.445 +					break;
   1.446 +					}
   1.447 +				iUnlockedPageBitMap->Free(j);
   1.448 +				MM::CacheMemory -= MM::RamPageSize;
   1.449 +				}
   1.450 +			}
   1.451 +		}
   1.452 +	if(r!=KErrNone)
   1.453 +		{
   1.454 +		// decommit memory on error...
   1.455 +		for(TInt j=i; j<i+n; j++)
   1.456 +			{
   1.457 +			if(iUnlockedPageBitMap->NotFree(j,1))
   1.458 +				{
   1.459 +				iUnlockedPageBitMap->Free(j);
   1.460 +				if(MM::ReclaimedCacheMemory)
   1.461 +					{
   1.462 +					MM::ReclaimedCacheMemory -= MM::RamPageSize;
   1.463 +					MM::FreeMemory -= MM::RamPageSize; // reclaimed memory already counted, so adjust
   1.464 +					}
   1.465 +				else
   1.466 +					MM::CacheMemory -= MM::RamPageSize;
   1.467 +				}
   1.468 +			}
   1.469 +		iPageBitMap->SelectiveFree(i,n);
   1.470 +		DoDecommit(aOffset,aSize);
   1.471 +		}
   1.472 +	MM::CheckMemoryCounters();
   1.473 +	MM::Signal();
   1.474 +	return r;
   1.475 +	}
   1.476 +
   1.477 +TInt DWin32Chunk::CheckAccess()
   1.478 +	{
   1.479 +	DProcess* pP=TheCurrentThread->iOwningProcess;
   1.480 +	if (iAttributes&EPrivate)
   1.481 +		{
   1.482 +		if (iOwningProcess && iOwningProcess!=pP && pP!=K::TheKernelProcess)
   1.483 +			return KErrAccessDenied;
   1.484 +		}
   1.485 +	return KErrNone;
   1.486 +	}
   1.487 +
   1.488 +TInt DWin32Chunk::Address(TInt aOffset, TInt aSize, TLinAddr& aKernelAddress)
   1.489 +	{
   1.490 +	if(!iPermanentPageBitMap)
   1.491 +		return KErrAccessDenied;
   1.492 +	if(TUint(aOffset)>=TUint(iMaxSize))
   1.493 +		return KErrArgument;
   1.494 +	if(TUint(aOffset+aSize)>TUint(iMaxSize))
   1.495 +		return KErrArgument;
   1.496 +	if(aSize<=0)
   1.497 +		return KErrArgument;
   1.498 +	TInt pageShift = MM::RamPageShift;
   1.499 +	TInt start = aOffset>>pageShift;
   1.500 +	TInt size = ((aOffset+aSize-1)>>pageShift)-start+1;
   1.501 +	if(iPermanentPageBitMap->NotAllocated(start,size))
   1.502 +		return KErrNotFound;
   1.503 +	aKernelAddress = (TLinAddr)iBase+aOffset;
   1.504 +	return KErrNone;
   1.505 +	}
   1.506 +
   1.507 +void DWin32Chunk::Substitute(TInt /*aOffset*/, TPhysAddr /*aOldAddr*/, TPhysAddr /*aNewAddr*/)
   1.508 +	{
   1.509 +	MM::Panic(MM::ENotSupportedOnEmulator);
   1.510 +	}
   1.511 +
   1.512 +TInt DWin32Chunk::PhysicalAddress(TInt aOffset, TInt aSize, TLinAddr& aKernelAddress, TUint32& aPhysicalAddress, TUint32* aPhysicalPageList)
   1.513 +	{
   1.514 +	TInt r=Address(aOffset,aSize,aKernelAddress);
   1.515 +	if(r!=KErrNone)
   1.516 +		return r;
   1.517 +
   1.518 +	// return fake physical addresses which are the same as the linear address
   1.519 +	aPhysicalAddress = 	aKernelAddress;
   1.520 +
   1.521 +	TInt pageShift = MM::RamPageShift;
   1.522 +	TUint32 page = aKernelAddress>>pageShift<<pageShift;
   1.523 +	TUint32 lastPage = (aKernelAddress+aSize-1)>>pageShift<<pageShift;
   1.524 +	TUint32* pageList = aPhysicalPageList;
   1.525 +	TUint32 pageSize = 1<<pageShift;
   1.526 +	if(pageList)
   1.527 +		for(; page<=lastPage; page += pageSize)
   1.528 +			*pageList++ = page;
   1.529 +	return KErrNone;
   1.530 +	}
   1.531 +
   1.532 +TInt DWin32Chunk::DoCommit(TInt aOffset, TInt aSize)
   1.533 +//
   1.534 +// Get win32 to commit the pages.
   1.535 +// We know they are not already committed - this is guaranteed by the caller so we can update the memory info easily
   1.536 +//
   1.537 +	{
   1.538 +	if (aSize==0)
   1.539 +		return KErrNone;
   1.540 +
   1.541 +	TBool execute = (iChunkType == EUserSelfModCode) ? ETrue : EFalse;
   1.542 +
   1.543 +	TInt r = MM::Commit(reinterpret_cast<TLinAddr>(iBase + aOffset), aSize, iClearByte, execute);
   1.544 +
   1.545 +	if (r == KErrNone)
   1.546 +		{
   1.547 +		iSize += aSize;
   1.548 +
   1.549 +		if(iPermanentPageBitMap)
   1.550 +	        iPermanentPageBitMap->Alloc(aOffset>>MM::RamPageShift,aSize>>MM::RamPageShift);
   1.551 +
   1.552 +		__KTRACE_OPT(KMEMTRACE, {Kern::Printf("MT:A %d %x %x %O",NTickCount(),this,iSize,this);});
   1.553 +#ifdef BTRACE_CHUNKS
   1.554 +		BTraceContext12(BTrace::EChunks,BTrace::EChunkMemoryAllocated,this,aOffset,aSize);
   1.555 +#endif
   1.556 +		return KErrNone;
   1.557 +		}
   1.558 +
   1.559 +	return KErrNoMemory;
   1.560 +	}
   1.561 +
   1.562 +void DWin32Chunk::DoDecommit(TInt anOffset, TInt aSize)
   1.563 +//
   1.564 +// Get win32 to decommit the pages.
   1.565 +// The pages may or may not be committed: we need to find out which ones are so that the memory info is updated correctly
   1.566 +//
   1.567 +	{
   1.568 +	TInt freed = MM::Decommit(reinterpret_cast<TLinAddr>(iBase+anOffset), aSize);
   1.569 +
   1.570 +	iSize -= freed;
   1.571 +	__KTRACE_OPT(KMEMTRACE, {Kern::Printf("MT:A %d %x %x %O",NTickCount(),this,iSize,this);});
   1.572 +	}
   1.573 +
   1.574 +TUint32 MM::RoundToChunkSize(TUint32 aSize)
   1.575 +	{
   1.576 +	TUint32 m=MM::RamChunkSize-1;
   1.577 +	return (aSize+m)&~m;
   1.578 +	}
   1.579 +
   1.580 +void DWin32Chunk::BTracePrime(TInt aCategory)
   1.581 +	{
   1.582 +	DChunk::BTracePrime(aCategory);
   1.583 +	
   1.584 +#ifdef BTRACE_CHUNKS
   1.585 +	if (aCategory == BTrace::EChunks || aCategory == -1)
   1.586 +		{
   1.587 +		MM::Wait();
   1.588 +		// it is essential that the following code is in braces because __LOCK_HOST
   1.589 +		// creates an object which must be destroyed before the MM::Signal() at the end.
   1.590 +			{
   1.591 +			__LOCK_HOST;
   1.592 +			// output traces for all memory which has been committed to the chunk...
   1.593 +			TInt offset=0;
   1.594 +			while(offset<iMaxSize)
   1.595 +				{
   1.596 +				MEMORY_BASIC_INFORMATION info;
   1.597 +				VirtualQuery(LPVOID(iBase + offset), &info, sizeof(info));
   1.598 +				TUint size = Min(iMaxSize-offset, info.RegionSize);
   1.599 +				if(info.State == MEM_COMMIT)
   1.600 +					BTrace12(BTrace::EChunks, BTrace::EChunkMemoryAllocated,this,offset,size);
   1.601 +				offset += size;
   1.602 +				}
   1.603 +			}
   1.604 +			MM::Signal();
   1.605 +		}
   1.606 +#endif
   1.607 +	}