os/kernelhwsrv/kernel/eka/memmodel/epoc/moving/mchunk.cpp
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
sl@0
     1
// Copyright (c) 1994-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
// e32\memmodel\epoc\moving\mchunk.cpp
sl@0
    15
// 
sl@0
    16
//
sl@0
    17
sl@0
    18
#include "memmodel.h"
sl@0
    19
#include "cache_maintenance.h"
sl@0
    20
#include <mmubase.inl>
sl@0
    21
#include <ramalloc.h>
sl@0
    22
sl@0
    23
DMemModelChunk::DMemModelChunk()
sl@0
    24
	{
sl@0
    25
	}
sl@0
    26
sl@0
    27
void DMemModelChunk::Destruct()
sl@0
    28
	{
sl@0
    29
	__KTRACE_OPT(KTHREAD,Kern::Printf("DMemModelChunk destruct %O",this));
sl@0
    30
	Mmu& m = Mmu::Get();
sl@0
    31
	TInt nPdes=iMaxSize>>m.iChunkShift;
sl@0
    32
	if (nPdes<=32 || iPdeBitMap!=NULL)
sl@0
    33
		{
sl@0
    34
		if ((iAttributes & EDisconnected) && iPageBitMap!=NULL)
sl@0
    35
			Decommit(0,iMaxSize);
sl@0
    36
		else if (iAttributes & EDoubleEnded)
sl@0
    37
			AdjustDoubleEnded(0,0);
sl@0
    38
		else
sl@0
    39
			Adjust(0);
sl@0
    40
		}
sl@0
    41
sl@0
    42
	if ((iAttributes&EFixedAddress) && iHomeRegionBase>=m.iKernelSection->iBase)
sl@0
    43
		{
sl@0
    44
		Mmu::Wait();
sl@0
    45
		__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::~DMemModelChunk remove region"));
sl@0
    46
		if (TLinAddr(iBase)==iHomeBase)
sl@0
    47
			iBase=NULL;
sl@0
    48
		DeallocateHomeAddress();	// unlink from home section queue
sl@0
    49
		iHomeRegionBase=0;
sl@0
    50
		iHomeBase=0;
sl@0
    51
		Mmu::Signal();
sl@0
    52
		}
sl@0
    53
	if ((iMaxSize>>m.iChunkShift) > 32)
sl@0
    54
		{
sl@0
    55
		TAny* pM = __e32_atomic_swp_ord_ptr(&iPdeBitMap, 0);
sl@0
    56
		Kern::Free(pM);
sl@0
    57
		}
sl@0
    58
	TBitMapAllocator* pM = (TBitMapAllocator*)__e32_atomic_swp_ord_ptr(&iPageBitMap, 0);
sl@0
    59
	delete pM;
sl@0
    60
	pM = (TBitMapAllocator*)__e32_atomic_swp_ord_ptr(&iPermanentPageBitMap, 0);
sl@0
    61
	delete pM;
sl@0
    62
sl@0
    63
	TDfc* dfc = (TDfc*)__e32_atomic_swp_ord_ptr(&iDestroyedDfc, 0);
sl@0
    64
	if(dfc)
sl@0
    65
		dfc->Enque();
sl@0
    66
sl@0
    67
	__KTRACE_OPT(KMEMTRACE, {Mmu::Wait(); Kern::Printf("MT:D %d %x %O",NTickCount(),this,this);Mmu::Signal();});
sl@0
    68
#ifdef BTRACE_CHUNKS
sl@0
    69
	BTraceContext4(BTrace::EChunks,BTrace::EChunkDestroyed,this);
sl@0
    70
#endif
sl@0
    71
	}
sl@0
    72
sl@0
    73
TInt DMemModelChunk::Close(TAny* aPtr)
sl@0
    74
	{
sl@0
    75
	if (aPtr)
sl@0
    76
		{
sl@0
    77
		DMemModelProcess* pP=(DMemModelProcess*)aPtr;
sl@0
    78
		pP->RemoveChunk(this);
sl@0
    79
		}
sl@0
    80
	TInt r=Dec();
sl@0
    81
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Close %d %O",r,this));
sl@0
    82
	__NK_ASSERT_DEBUG(r > 0); // Should never be negative.
sl@0
    83
	if (r==1)
sl@0
    84
		{
sl@0
    85
		K::ObjDelete(this);
sl@0
    86
		return EObjectDeleted;
sl@0
    87
		}
sl@0
    88
	return 0;
sl@0
    89
	}
sl@0
    90
sl@0
    91
sl@0
    92
TUint8* DMemModelChunk::Base(DProcess* aProcess)
sl@0
    93
	{
sl@0
    94
	return iBase;
sl@0
    95
	}
sl@0
    96
sl@0
    97
sl@0
    98
TInt DMemModelChunk::DoCreate(SChunkCreateInfo& aInfo)
sl@0
    99
	{
sl@0
   100
	__ASSERT_COMPILE(!(EMMChunkAttributesMask & EChunkAttributesMask));
sl@0
   101
sl@0
   102
	if (aInfo.iMaxSize<=0)
sl@0
   103
		return KErrArgument;
sl@0
   104
	Mmu& m=Mmu::Get();
sl@0
   105
	TInt nPdes=(aInfo.iMaxSize+m.iChunkMask)>>m.iChunkShift;
sl@0
   106
	iMaxSize=nPdes<<m.iChunkShift;
sl@0
   107
	iMapAttr = aInfo.iMapAttr;
sl@0
   108
	SetupPermissions();
sl@0
   109
	if (nPdes>32)
sl@0
   110
		{
sl@0
   111
		TInt words=(nPdes+31)>>5;
sl@0
   112
		iPdeBitMap=(TUint32*)Kern::Alloc(words*sizeof(TUint32));
sl@0
   113
		if (!iPdeBitMap)
sl@0
   114
			return KErrNoMemory;
sl@0
   115
		memclr(iPdeBitMap, words*sizeof(TUint32));
sl@0
   116
		}
sl@0
   117
	else
sl@0
   118
		iPdeBitMap=NULL;
sl@0
   119
sl@0
   120
	TInt maxpages=iMaxSize>>m.iPageShift;
sl@0
   121
	if (iAttributes & EDisconnected)
sl@0
   122
		{
sl@0
   123
		TBitMapAllocator* pM=TBitMapAllocator::New(maxpages,ETrue);
sl@0
   124
		if (!pM)
sl@0
   125
			return KErrNoMemory;
sl@0
   126
		iPageBitMap=pM;
sl@0
   127
		__KTRACE_OPT(KMMU,Kern::Printf("PageBitMap at %08x, MaxPages %d",pM,maxpages));
sl@0
   128
		}
sl@0
   129
	if(iChunkType==ESharedKernelSingle || iChunkType==ESharedKernelMultiple)
sl@0
   130
		{
sl@0
   131
		TBitMapAllocator* pM=TBitMapAllocator::New(maxpages,ETrue);
sl@0
   132
		if (!pM)
sl@0
   133
			return KErrNoMemory;
sl@0
   134
		iPermanentPageBitMap = pM;
sl@0
   135
		}
sl@0
   136
	__KTRACE_OPT(KMEMTRACE, {Mmu::Wait();Kern::Printf("MT:C %d %x %O",NTickCount(),this,this);Mmu::Signal();});
sl@0
   137
#ifdef BTRACE_CHUNKS
sl@0
   138
	TKName nameBuf;
sl@0
   139
	Name(nameBuf);
sl@0
   140
	BTraceContextN(BTrace::EChunks,BTrace::EChunkCreated,this,iMaxSize,nameBuf.Ptr(),nameBuf.Size());
sl@0
   141
	if(iOwningProcess)
sl@0
   142
		BTrace8(BTrace::EChunks,BTrace::EChunkOwner,this,iOwningProcess);
sl@0
   143
	BTraceContext12(BTrace::EChunks,BTrace::EChunkInfo,this,iChunkType,iAttributes);
sl@0
   144
#endif
sl@0
   145
	return KErrNone;
sl@0
   146
	}
sl@0
   147
sl@0
   148
void DMemModelChunk::ClaimInitialPages()
sl@0
   149
	{
sl@0
   150
	__KTRACE_OPT(KMMU,Kern::Printf("Chunk %O ClaimInitialPages()",this));
sl@0
   151
	Mmu& m=Mmu::Get();
sl@0
   152
	TInt offset=0;
sl@0
   153
	TUint32 ccp=K::CompressKHeapPtr(this);
sl@0
   154
	NKern::LockSystem();
sl@0
   155
	while(offset<iSize)
sl@0
   156
		{
sl@0
   157
		TInt ptid=m.GetPageTableId(TLinAddr(iBase)+offset);
sl@0
   158
		__ASSERT_ALWAYS(ptid>=0,MM::Panic(MM::EClaimInitialPagesBadPageTable));
sl@0
   159
		__KTRACE_OPT(KMMU,Kern::Printf("Offset %x PTID=%d",offset,ptid));
sl@0
   160
		AddPde(offset);
sl@0
   161
		SPageTableInfo& ptinfo = m.PtInfo(ptid);
sl@0
   162
		ptinfo.SetChunk(ccp,offset>>m.iChunkShift);
sl@0
   163
		TPte* pPte=(TPte*)m.PageTableLinAddr(ptid);
sl@0
   164
		TInt i;
sl@0
   165
		TInt np = 0;
sl@0
   166
		TInt flashCount = MM::MaxPagesInOneGo;
sl@0
   167
		for (i=0; i<m.iChunkSize>>m.iPageShift; ++i, offset+=m.iPageSize)
sl@0
   168
			{
sl@0
   169
			if(--flashCount<=0)
sl@0
   170
				{
sl@0
   171
				flashCount = MM::MaxPagesInOneGo;
sl@0
   172
				NKern::FlashSystem();
sl@0
   173
				}
sl@0
   174
			TPte pte=pPte[i];
sl@0
   175
			if (m.PteIsPresent(pte))
sl@0
   176
				{
sl@0
   177
				++np;
sl@0
   178
				TPhysAddr phys=m.PtePhysAddr(pte, i);
sl@0
   179
				__KTRACE_OPT(KMMU,Kern::Printf("Offset %x phys %08x",offset,phys));
sl@0
   180
				SPageInfo* pi = SPageInfo::SafeFromPhysAddr(phys);
sl@0
   181
				if (pi)
sl@0
   182
					{
sl@0
   183
					pi->SetChunk(this,offset>>m.iPageShift);
sl@0
   184
#ifdef BTRACE_KERNEL_MEMORY
sl@0
   185
					--Epoc::KernelMiscPages; // page now owned by chunk, and is not 'miscelaneous'
sl@0
   186
#endif
sl@0
   187
					}
sl@0
   188
				}
sl@0
   189
			}
sl@0
   190
		ptinfo.iCount = np;
sl@0
   191
		__KTRACE_OPT(KMMU,Kern::Printf("Offset %x PTID %d NP %d", offset, ptid, np));
sl@0
   192
		}
sl@0
   193
	NKern::UnlockSystem();
sl@0
   194
	__KTRACE_OPT(KMMU,Kern::Printf("nPdes=%d, Pdes=%08x, HomePdes=%08x",iNumPdes,iPdes,iHomePdes));
sl@0
   195
	}
sl@0
   196
sl@0
   197
void DMemModelChunk::SetFixedAddress(TLinAddr aAddr, TInt aInitialSize)
sl@0
   198
	{
sl@0
   199
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O SetFixedAddress %08X size %08X",this,aAddr,aInitialSize));
sl@0
   200
	iHomeRegionOffset=0;
sl@0
   201
	iHomeRegionBase=aAddr;
sl@0
   202
	iHomeBase=aAddr;
sl@0
   203
	iBase=(TUint8*)aAddr;
sl@0
   204
	iHomeRegionSize=iMaxSize;
sl@0
   205
	iAttributes|=EFixedAddress;
sl@0
   206
	iSize=Mmu::RoundToPageSize(aInitialSize);
sl@0
   207
	ClaimInitialPages();
sl@0
   208
	}
sl@0
   209
sl@0
   210
TInt DMemModelChunk::Reserve(TInt aInitialSize)
sl@0
   211
//
sl@0
   212
// Reserve home section address space for a chunk
sl@0
   213
//
sl@0
   214
	{
sl@0
   215
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O Reserve() size %08x",this,aInitialSize));
sl@0
   216
	iHomeRegionOffset=0;
sl@0
   217
	if (!K::Initialising)
sl@0
   218
		Mmu::Wait();
sl@0
   219
	iHomeRegionBase=AllocateHomeAddress(iMaxSize);
sl@0
   220
	if (!K::Initialising)
sl@0
   221
		Mmu::Signal();
sl@0
   222
	iHomeBase=iHomeRegionBase;
sl@0
   223
	iBase=(TUint8*)iHomeRegionBase;
sl@0
   224
	if (iHomeRegionBase==0)
sl@0
   225
		return KErrNoMemory;
sl@0
   226
	iSize=Mmu::RoundToPageSize(aInitialSize);
sl@0
   227
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O address %08x",this,iHomeRegionBase));
sl@0
   228
	ClaimInitialPages();
sl@0
   229
	return KErrNone;
sl@0
   230
	}
sl@0
   231
sl@0
   232
TInt DMemModelChunk::Adjust(TInt aNewSize)
sl@0
   233
//
sl@0
   234
// Adjust a standard chunk.
sl@0
   235
//
sl@0
   236
	{
sl@0
   237
sl@0
   238
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Adjust %08x",aNewSize));
sl@0
   239
	if (iAttributes & (EDoubleEnded|EDisconnected))
sl@0
   240
		return KErrGeneral;
sl@0
   241
	if (aNewSize<0 || aNewSize>iMaxSize)
sl@0
   242
		return KErrArgument;
sl@0
   243
sl@0
   244
	TInt r=KErrNone;
sl@0
   245
	TInt newSize=Mmu::RoundToPageSize(aNewSize);
sl@0
   246
	if (newSize!=iSize)
sl@0
   247
		{
sl@0
   248
		Mmu::Wait();
sl@0
   249
		if (newSize>iSize)
sl@0
   250
			{
sl@0
   251
			__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Adjust growing"));
sl@0
   252
			r=DoCommit(iSize,newSize-iSize);
sl@0
   253
			}
sl@0
   254
		else if (newSize<iSize)
sl@0
   255
			{
sl@0
   256
			__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Adjust shrinking"));
sl@0
   257
			DoDecommit(newSize,iSize-newSize);
sl@0
   258
			}
sl@0
   259
		Mmu::Signal();
sl@0
   260
		}
sl@0
   261
	__COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this);
sl@0
   262
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O adjusted to %x base %08x home %08x",this,iSize,iBase,iHomeRegionBase));
sl@0
   263
	return r;
sl@0
   264
	}
sl@0
   265
sl@0
   266
TInt DMemModelChunk::ExpandHomeRegion(TInt aOffset, TInt aSize)
sl@0
   267
	{
sl@0
   268
	// Ensure that the chunk's home region is big enough to accommodate extra RAM being committed
sl@0
   269
	__KTRACE_OPT(KMMU,Kern::Printf("Chunk %O ExpandHomeRegion(%x,%x)",this,aOffset,aSize));
sl@0
   270
	Mmu& m = Mmu::Get();
sl@0
   271
	TBool lowerLimitOk=(aOffset>=iHomeRegionOffset && aOffset<=iHomeRegionOffset+iHomeRegionSize);
sl@0
   272
	TBool upperLimitOk=(aOffset+aSize>=iHomeRegionOffset && aOffset+aSize<=iHomeRegionOffset+iHomeRegionSize);
sl@0
   273
	if (lowerLimitOk && upperLimitOk)
sl@0
   274
		return KErrNone;	// no change required
sl@0
   275
	TInt newLowerLimit;
sl@0
   276
	TInt newUpperLimit;
sl@0
   277
	if (iHomeRegionSize)
sl@0
   278
		{
sl@0
   279
		newLowerLimit=Min(iHomeRegionOffset,aOffset);
sl@0
   280
		newUpperLimit=Max(iHomeRegionOffset+iHomeRegionSize,aOffset+aSize);
sl@0
   281
		}
sl@0
   282
	else
sl@0
   283
		{
sl@0
   284
		newLowerLimit=aOffset;
sl@0
   285
		newUpperLimit=aOffset+aSize;
sl@0
   286
		}
sl@0
   287
	newLowerLimit &= ~m.iChunkMask;
sl@0
   288
	newUpperLimit = (newUpperLimit+m.iChunkMask)&~m.iChunkMask;
sl@0
   289
	TInt newHomeRegionSize=newUpperLimit-newLowerLimit;
sl@0
   290
	__KTRACE_OPT(KMMU,Kern::Printf("newLowerLimit=%x, newUpperLimit=%x",newLowerLimit,newUpperLimit));
sl@0
   291
	if (newHomeRegionSize>iMaxSize)
sl@0
   292
		return KErrArgument;
sl@0
   293
	TLinAddr newHomeRegionBase;
sl@0
   294
	if (iHomeRegionSize==0)
sl@0
   295
		newHomeRegionBase=AllocateHomeAddress(newHomeRegionSize);
sl@0
   296
	else
sl@0
   297
		newHomeRegionBase=ReallocateHomeAddress(newHomeRegionSize);
sl@0
   298
	__KTRACE_OPT(KMMU,Kern::Printf("newHomeRegionBase=%08x",newHomeRegionBase));
sl@0
   299
	if (newHomeRegionBase==0)
sl@0
   300
		return KErrNoMemory;
sl@0
   301
	TInt deltaOffset=iHomeRegionOffset-newLowerLimit;
sl@0
   302
	TLinAddr newHomeBase=newHomeRegionBase-newLowerLimit;
sl@0
   303
	TLinAddr translatedHomeBase=newHomeRegionBase+deltaOffset;
sl@0
   304
sl@0
   305
	// lock the kernel while we change the chunk's home region
sl@0
   306
	// Note: The new home region always contains the original home region, so
sl@0
   307
	// if we reach here, it must be strictly larger.
sl@0
   308
	NKern::LockSystem();
sl@0
   309
	if (iNumPdes && iHomeRegionBase!=translatedHomeBase)
sl@0
   310
		{
sl@0
   311
		TLinAddr oldBase=TLinAddr(iBase);
sl@0
   312
		if (oldBase==iHomeBase)
sl@0
   313
			{
sl@0
   314
			// chunk is currently at home, so must move it
sl@0
   315
			// Note: this operation must cope with overlapping initial and final regions
sl@0
   316
			m.GenericFlush(Mmu::EFlushDMove);		// preemption could occur here...
sl@0
   317
			if (TLinAddr(iBase)==iHomeBase)	// ...so need to check chunk is still at home address
sl@0
   318
				{
sl@0
   319
				m.MoveChunk(iHomeRegionBase,translatedHomeBase,iNumPdes);
sl@0
   320
				iBase=(TUint8*)newHomeBase;
sl@0
   321
				MoveCurrentPdes(iHomeRegionBase,translatedHomeBase);
sl@0
   322
				MoveHomePdes(iHomeRegionBase,translatedHomeBase);
sl@0
   323
				}
sl@0
   324
			}
sl@0
   325
		else
sl@0
   326
			{
sl@0
   327
			MoveHomePdes(iHomeRegionBase,translatedHomeBase);
sl@0
   328
			}
sl@0
   329
		__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::ExpandHomeRegion moved home base from %08x to %08x",
sl@0
   330
							iHomeRegionBase,newHomeRegionBase));
sl@0
   331
		}
sl@0
   332
	if (!iBase)
sl@0
   333
		iBase=(TUint8*)newHomeBase;
sl@0
   334
	iHomeRegionBase=newHomeRegionBase;
sl@0
   335
	iHomeRegionOffset=newLowerLimit;
sl@0
   336
	iHomeBase=newHomeBase;
sl@0
   337
	__KTRACE_OPT(KMMU,Kern::Printf("Final iHomeRegionBase=%08x, iHomeRegionOffset=%08x",iHomeRegionBase,iHomeRegionOffset));
sl@0
   338
	__KTRACE_OPT(KMMU,Kern::Printf("Final iHomeRegionSize=%08x, iBase=%08x, iHomeBase=%08x",iHomeRegionSize,iBase,iHomeBase));
sl@0
   339
	__KTRACE_OPT(KMMU,Kern::Printf("nPdes=%d, Pdes=%08x, HomePdes=%08x",iNumPdes,iPdes,iHomePdes));
sl@0
   340
	NKern::UnlockSystem();
sl@0
   341
	return KErrNone;
sl@0
   342
	}
sl@0
   343
sl@0
   344
TInt DMemModelChunk::Address(TInt aOffset, TInt aSize, TLinAddr& aKernelAddress)
sl@0
   345
	{
sl@0
   346
	if(!iPermanentPageBitMap)
sl@0
   347
		return KErrAccessDenied;
sl@0
   348
	if(TUint(aOffset)>=TUint(iMaxSize))
sl@0
   349
		return KErrArgument;
sl@0
   350
	if(TUint(aOffset+aSize)>TUint(iMaxSize))
sl@0
   351
		return KErrArgument;
sl@0
   352
	if(aSize<=0)
sl@0
   353
		return KErrArgument;
sl@0
   354
	TInt pageShift = Mmu::Get().iPageShift;
sl@0
   355
	TInt start = aOffset>>pageShift;
sl@0
   356
	TInt size = ((aOffset+aSize-1)>>pageShift)-start+1;
sl@0
   357
	if(iPermanentPageBitMap->NotAllocated(start,size))
sl@0
   358
		return KErrNotFound;
sl@0
   359
	aKernelAddress = (TLinAddr)iBase+aOffset;
sl@0
   360
	return KErrNone;
sl@0
   361
	}
sl@0
   362
sl@0
   363
TInt DMemModelChunk::PhysicalAddress(TInt aOffset, TInt aSize, TLinAddr& aKernelAddress, TUint32& aPhysicalAddress, TUint32* aPhysicalPageList)
sl@0
   364
	{
sl@0
   365
	TInt r=Address(aOffset,aSize,aKernelAddress);
sl@0
   366
	if(r!=KErrNone)
sl@0
   367
		return r;
sl@0
   368
sl@0
   369
	return Mmu::Get().LinearToPhysical(aKernelAddress,aSize,aPhysicalAddress,aPhysicalPageList);
sl@0
   370
	}
sl@0
   371
sl@0
   372
void DMemModelChunk::Substitute(TInt aOffset, TPhysAddr aOldAddr, TPhysAddr aNewAddr)
sl@0
   373
	{
sl@0
   374
	// Substitute the page mapping at aOffset with aNewAddr.
sl@0
   375
	// Called with the system lock held.
sl@0
   376
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Substitute %x %08x %08x",aOffset,aOldAddr,aNewAddr));
sl@0
   377
	Mmu& m = Mmu::Get();
sl@0
   378
	
sl@0
   379
	TLinAddr addr=(TLinAddr)iBase+aOffset;
sl@0
   380
	TInt ptid=m.GetPageTableId(addr);
sl@0
   381
	if(ptid<0)
sl@0
   382
		MM::Panic(MM::EChunkRemapNoPageTable);
sl@0
   383
sl@0
   384
	m.RemapPage(ptid, addr, aOldAddr, aNewAddr, iPtePermissions, iOwningProcess);
sl@0
   385
	if(iChunkType==EKernelCode || iChunkType==EDll || iChunkType==EUserSelfModCode)
sl@0
   386
		m.SyncCodeMappings();
sl@0
   387
	}
sl@0
   388
sl@0
   389
/**
sl@0
   390
Get the movability type of the chunk's pages
sl@0
   391
@return How movable the chunk's pages are
sl@0
   392
*/
sl@0
   393
TZonePageType DMemModelChunk::GetPageType()
sl@0
   394
	{
sl@0
   395
	// Shared chunks have their physical addresses available
sl@0
   396
	if (iChunkType == ESharedKernelSingle ||
sl@0
   397
		iChunkType == ESharedKernelMultiple || 
sl@0
   398
		iChunkType == ESharedIo ||
sl@0
   399
		iChunkType == ESharedKernelMirror ||
sl@0
   400
		iChunkType == EKernelMessage ||
sl@0
   401
		iChunkType == EKernelData)	// Don't move kernel heap pages as DMA may be accessing them.
sl@0
   402
		{
sl@0
   403
		return EPageFixed;
sl@0
   404
		}
sl@0
   405
	// All other types of chunk are movable
sl@0
   406
	return EPageMovable;
sl@0
   407
	}
sl@0
   408
sl@0
   409
TInt DMemModelChunk::DoCommit(TInt aOffset, TInt aSize, TCommitType aCommitType, TUint32* aExtraArg)
sl@0
   410
	{
sl@0
   411
	// Commit more RAM to a chunk at a specified offset
sl@0
   412
	// enter and leave with system unlocked
sl@0
   413
	// must hold RamAlloc mutex before calling this function
sl@0
   414
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::DoCommit %x+%x type=%d extra=%08x",aOffset,aSize,aCommitType,aExtraArg));
sl@0
   415
	TInt offset=aOffset;
sl@0
   416
	TInt endOffset=offset+aSize;
sl@0
   417
	TInt newPtId=-1;
sl@0
   418
	Mmu& m = Mmu::Get();
sl@0
   419
	DRamAllocator& a = *m.iRamPageAllocator;
sl@0
   420
	TInt r=KErrNone;
sl@0
   421
	TPhysAddr pageList[KMaxPages];
sl@0
   422
	TPhysAddr* pPageList=0;
sl@0
   423
	TPhysAddr nextPage=0;
sl@0
   424
	TUint32 ccp=K::CompressKHeapPtr(this);
sl@0
   425
	SPageInfo::TType type = SPageInfo::EChunk;
sl@0
   426
sl@0
   427
	if (iHomeRegionSize==0 || (iAttributes&EFixedAddress)==0)
sl@0
   428
		{
sl@0
   429
		r=ExpandHomeRegion(aOffset,aSize);
sl@0
   430
		if (r!=KErrNone)
sl@0
   431
			return r;
sl@0
   432
		}
sl@0
   433
sl@0
   434
	// Set flag to indicate if RAM should be cleared before being committed.
sl@0
   435
	// Note, EDll, EUserCode are covered in the code segment, in order not to clear
sl@0
   436
	// the region overwritten by the loader
sl@0
   437
	TBool clearRam =	iChunkType==EUserData
sl@0
   438
					 || iChunkType==EDllData
sl@0
   439
					 || iChunkType==EUserSelfModCode
sl@0
   440
					 || iChunkType==ESharedKernelSingle
sl@0
   441
					 || iChunkType==ESharedKernelMultiple
sl@0
   442
					 || iChunkType==ESharedIo
sl@0
   443
                     || iChunkType==ERamDrive;
sl@0
   444
sl@0
   445
sl@0
   446
	TBool ownsMemory = !(iAttributes&EMemoryNotOwned);
sl@0
   447
	TBool physicalCommit = aCommitType&DChunk::ECommitPhysicalMask;
sl@0
   448
	if(ownsMemory)
sl@0
   449
		{
sl@0
   450
		if(physicalCommit)
sl@0
   451
			return KErrNotSupported;
sl@0
   452
		}
sl@0
   453
	else
sl@0
   454
		{
sl@0
   455
		if(!physicalCommit)
sl@0
   456
			return KErrNotSupported;
sl@0
   457
		type = SPageInfo::EInvalid;	// to indicate page info not to be updated
sl@0
   458
		}
sl@0
   459
sl@0
   460
	switch(aCommitType)
sl@0
   461
		{
sl@0
   462
	case DChunk::ECommitDiscontiguous:
sl@0
   463
		// No setup to do
sl@0
   464
		break;
sl@0
   465
sl@0
   466
	case DChunk::ECommitContiguous:
sl@0
   467
		{
sl@0
   468
		// Allocate a block of contiguous RAM from the free pool
sl@0
   469
		TInt numPages=(endOffset-offset)>>m.iPageShift;
sl@0
   470
		r=m.AllocContiguousRam(numPages<<m.iPageShift, nextPage, GetPageType(), 0);
sl@0
   471
		if (r!=KErrNone)
sl@0
   472
			return r;
sl@0
   473
		if(clearRam)
sl@0
   474
			m.ClearPages(numPages, (TPhysAddr*)(nextPage|1), iClearByte);  // clear RAM if required
sl@0
   475
		*aExtraArg = nextPage;	// store physical address of RAM as return argument
sl@0
   476
		}
sl@0
   477
		break;
sl@0
   478
sl@0
   479
	case DChunk::ECommitDiscontiguousPhysical:
sl@0
   480
		{
sl@0
   481
		pPageList = aExtraArg;				// use pages given given to us
sl@0
   482
sl@0
   483
		// Check address of pages are multiples of page size...
sl@0
   484
		TInt numPages=(endOffset-offset)>>m.iPageShift;
sl@0
   485
		TUint32* ptr = aExtraArg;
sl@0
   486
		TUint32* endPtr = aExtraArg+numPages;
sl@0
   487
		if(ptr>=endPtr)
sl@0
   488
			return KErrNone;				// Zero size commit is OK
sl@0
   489
		TPhysAddr pageBits = 0;
sl@0
   490
		do
sl@0
   491
			pageBits |= *ptr++;
sl@0
   492
		while(ptr<endPtr);
sl@0
   493
		if(pageBits&(m.iPageSize-1))
sl@0
   494
			return KErrArgument;			// all addresses must be multiple of page size
sl@0
   495
		}
sl@0
   496
		break;
sl@0
   497
sl@0
   498
	case DChunk::ECommitContiguousPhysical:
sl@0
   499
		nextPage = (TPhysAddr)aExtraArg;	// we have been given the physical address to use
sl@0
   500
		if(nextPage&(m.iPageSize-1))
sl@0
   501
			return KErrArgument;			// address must be multiple of page size
sl@0
   502
		break;
sl@0
   503
sl@0
   504
#ifdef __MARM__
sl@0
   505
	case DChunk::ECommitVirtual:
sl@0
   506
		break;
sl@0
   507
#endif
sl@0
   508
sl@0
   509
	default:
sl@0
   510
		return KErrNotSupported;
sl@0
   511
		}
sl@0
   512
sl@0
   513
	// Commit memory a bit at a time (so system lock is only needs to be held for limited time)
sl@0
   514
	while(offset<endOffset)
sl@0
   515
		{
sl@0
   516
		TInt np=(endOffset-offset)>>m.iPageShift;	// pages remaining to satisfy request
sl@0
   517
		TInt npEnd=(m.iChunkSize-(offset&m.iChunkMask))>>m.iPageShift;	// number of pages to end of page table
sl@0
   518
		if (np>npEnd)
sl@0
   519
			np=npEnd;								// limit to single page table
sl@0
   520
		if (np>MM::MaxPagesInOneGo)
sl@0
   521
			np=MM::MaxPagesInOneGo;					// limit
sl@0
   522
		NKern::LockSystem();						// lock the system while we look at the page directory
sl@0
   523
		TLinAddr addr=(TLinAddr)iBase+offset;		// current address
sl@0
   524
		TInt ptid=m.GetPageTableId(addr);			// get page table ID if a page table is already assigned here
sl@0
   525
		NKern::UnlockSystem();						// we can now unlock the system
sl@0
   526
		newPtId=-1;
sl@0
   527
		if (ptid<0)
sl@0
   528
			{
sl@0
   529
			// need to allocate a new page table
sl@0
   530
			newPtId=m.AllocPageTable();
sl@0
   531
			if (newPtId<0)
sl@0
   532
				{
sl@0
   533
				// out of memory, so break out and revert
sl@0
   534
				r=KErrNoMemory;
sl@0
   535
				break;
sl@0
   536
				}
sl@0
   537
			ptid=newPtId;
sl@0
   538
			}
sl@0
   539
sl@0
   540
		if(aCommitType==DChunk::ECommitDiscontiguous)
sl@0
   541
			{
sl@0
   542
			pPageList = pageList;
sl@0
   543
			r=m.AllocRamPages(pPageList,np, GetPageType());	// try to allocate pages
sl@0
   544
			if (r!=KErrNone)
sl@0
   545
				break;							// if we fail, break out and revert
sl@0
   546
			if(clearRam)
sl@0
   547
				m.ClearPages(np, pPageList, iClearByte);	// clear RAM if required
sl@0
   548
			}
sl@0
   549
sl@0
   550
		// lock the system while we change the MMU mappings
sl@0
   551
		NKern::LockSystem();
sl@0
   552
		TInt commitSize = np<<m.iPageShift;
sl@0
   553
		iSize += commitSize;					// update committed size
sl@0
   554
		if (aCommitType==DChunk::ECommitVirtual)
sl@0
   555
			m.MapVirtual(ptid, np);
sl@0
   556
		else if(pPageList)
sl@0
   557
			{
sl@0
   558
			m.MapRamPages(ptid, type, this, offset, pPageList, np, iPtePermissions);
sl@0
   559
			pPageList += np;
sl@0
   560
			}
sl@0
   561
		else
sl@0
   562
			{
sl@0
   563
			m.MapPhysicalPages(ptid, type, this, offset, nextPage, np, iPtePermissions);
sl@0
   564
			nextPage += commitSize;
sl@0
   565
			}
sl@0
   566
		NKern::UnlockSystem();
sl@0
   567
sl@0
   568
		NKern::LockSystem();
sl@0
   569
		if (newPtId>=0)
sl@0
   570
			{
sl@0
   571
			// We have allocated a new page table, now we must assign it and update PDE info
sl@0
   572
			SPageTableInfo& pti=m.PtInfo(ptid);
sl@0
   573
			pti.SetChunk(ccp, offset>>m.iChunkShift);
sl@0
   574
			TLinAddr addr=(TLinAddr)iBase+offset;	// current address
sl@0
   575
			m.DoAssignPageTable(ptid, addr, iPdePermissions[iChunkState]);
sl@0
   576
			AddPde(offset);						// update PDE info
sl@0
   577
			}
sl@0
   578
		__KTRACE_OPT(KMMU,Kern::Printf("nPdes=%d, Pdes=%08x, HomePdes=%08x",iNumPdes,iPdes,iHomePdes));
sl@0
   579
		NKern::UnlockSystem();
sl@0
   580
		__KTRACE_OPT(KMEMTRACE,Kern::Printf("MT:A %d %x %x %O",NTickCount(),this,iSize,this));
sl@0
   581
#ifdef BTRACE_CHUNKS
sl@0
   582
		BTraceContext12(BTrace::EChunks,ownsMemory?BTrace::EChunkMemoryAllocated:BTrace::EChunkMemoryAdded,this,offset,commitSize);
sl@0
   583
#endif
sl@0
   584
sl@0
   585
		offset += commitSize;				// update offset
sl@0
   586
		}
sl@0
   587
sl@0
   588
	if (r==KErrNone)
sl@0
   589
		{
sl@0
   590
		if(iPermanentPageBitMap)
sl@0
   591
			iPermanentPageBitMap->Alloc(aOffset>>m.iPageShift,aSize>>m.iPageShift);
sl@0
   592
		}
sl@0
   593
	else
sl@0
   594
		{
sl@0
   595
		// we ran out of memory somewhere
sl@0
   596
		// first check if we have an unassigned page table
sl@0
   597
		if (newPtId>=0)
sl@0
   598
			m.FreePageTable(newPtId);			// free the unassigned page table
sl@0
   599
sl@0
   600
		// now free any memory we succeeded in allocating and return the chunk to its initial state
sl@0
   601
		DChunk::TDecommitType decommitType = aCommitType==DChunk::ECommitVirtual ?
sl@0
   602
			DChunk::EDecommitVirtual : DChunk::EDecommitNormal;
sl@0
   603
		DoDecommit(aOffset,offset-aOffset,decommitType);
sl@0
   604
sl@0
   605
		if(aCommitType==DChunk::ECommitContiguous)
sl@0
   606
			{
sl@0
   607
			// Free the pages we allocated but didn't get around to commiting
sl@0
   608
			TPhysAddr last = nextPage + ((endOffset-offset)>>m.iPageShift<<m.iPageShift);
sl@0
   609
			while(nextPage<last)
sl@0
   610
				{
sl@0
   611
				a.FreeRamPage(nextPage, GetPageType());
sl@0
   612
				nextPage += m.iPageSize;
sl@0
   613
				}
sl@0
   614
			*aExtraArg = KPhysAddrInvalid;	// return invalid physical address
sl@0
   615
			}
sl@0
   616
sl@0
   617
		m.iAllocFailed=ETrue;
sl@0
   618
		}
sl@0
   619
	return r;
sl@0
   620
	}
sl@0
   621
sl@0
   622
void DMemModelChunk::DoDecommit(TInt aOffset, TInt aSize, TDecommitType aDecommitType)
sl@0
   623
	{
sl@0
   624
	// Decommit RAM from a chunk at a specified offset
sl@0
   625
	// enter and leave with kernel unlocked
sl@0
   626
	// must hold RamAlloc mutex before calling this function
sl@0
   627
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::DoDecommit %x+%x",aOffset,aSize));
sl@0
   628
	if (iHomeRegionBase==0)
sl@0
   629
		return;
sl@0
   630
	
sl@0
   631
	TBool ownsMemory = !(iAttributes&EMemoryNotOwned);
sl@0
   632
	if (!ownsMemory)
sl@0
   633
		{
sl@0
   634
		// Physical memory not owned by the chunk also has to be evicted from cache(s).
sl@0
   635
		// We cannot just purge, as it can still be in use by the driver. Therefore, we'll flush it.
sl@0
   636
		// Purging physical memory from cache(s) that is owned by the chunk is done below.
sl@0
   637
		CacheMaintenance::MemoryToPreserveAndReuse((TLinAddr)(iBase+aOffset), aSize, iMapAttr);			
sl@0
   638
		}
sl@0
   639
	
sl@0
   640
	TInt offset=aOffset;
sl@0
   641
	TInt endOffset=offset+aSize;
sl@0
   642
	Mmu& m = Mmu::Get();
sl@0
   643
	DRamAllocator& a = *m.iRamPageAllocator;
sl@0
   644
	TPhysAddr pageList[KMaxPages];
sl@0
   645
#ifdef __CPU_WRITE_BACK_CACHE
sl@0
   646
	TInt size_reduction = Min(aSize,iSize);
sl@0
   647
	TBool selectiveFlush=((TUint)size_reduction<=(CacheMaintenance::SyncAllPerformanceThresholdPages()<<KPageShift));
sl@0
   648
#endif
sl@0
   649
	while(offset<endOffset)
sl@0
   650
		{
sl@0
   651
		TInt np=(endOffset-offset)>>m.iPageShift;		// number of pages remaining to decommit
sl@0
   652
		TInt pdeEnd=(offset+m.iChunkSize)&~m.iChunkMask;
sl@0
   653
		TInt npEnd=(pdeEnd-offset)>>m.iPageShift;		// number of pages to end of page table
sl@0
   654
		if (np>npEnd)
sl@0
   655
			np=npEnd;									// limit to single page table
sl@0
   656
		if (np>MM::MaxPagesInOneGo)
sl@0
   657
			np=MM::MaxPagesInOneGo;						// limit
sl@0
   658
		NKern::LockSystem();							// lock the system while we look at the page directory
sl@0
   659
		TUint8* base=iBase;								// save base address
sl@0
   660
		TLinAddr addr=(TLinAddr)base+offset;			// current address
sl@0
   661
		TInt ptid=m.GetPageTableId(addr);				// get page table ID if a page table is already assigned here
sl@0
   662
		if (ptid>=0)
sl@0
   663
			{
sl@0
   664
			TInt nPtes=0;
sl@0
   665
			TInt nFree=0;
sl@0
   666
sl@0
   667
			// Unmap the pages, clear the PTEs and place the physical addresses of the now-free RAM pages in
sl@0
   668
			// pageList. Return nPtes=number of pages placed in list, remain=number of PTEs remaining in page table
sl@0
   669
			// This also invalidates any TLB entries for the unmapped pages.
sl@0
   670
			// NB for WriteBack cache, we must also invalidate any cached entries for these pages - this might be done
sl@0
   671
			// by invalidating entry-by-entry or by a complete cache flush at the end.
sl@0
   672
			// NB For split TLB, ITLB may not be invalidated. In that case it will be invalidated by
sl@0
   673
			// Mmu::SyncCodeMappings() at the end of the function.
sl@0
   674
			TInt remain;
sl@0
   675
			if (aDecommitType == EDecommitVirtual)
sl@0
   676
				remain=m.UnmapVirtual(ptid,addr,np,pageList,ownsMemory,nPtes,nFree,iOwningProcess);
sl@0
   677
			else
sl@0
   678
				remain=m.UnmapPages(ptid,addr,np,pageList,ownsMemory,nPtes,nFree,iOwningProcess);
sl@0
   679
			TInt decommitSize=nPtes<<m.iPageShift;
sl@0
   680
			iSize-=decommitSize;				// reduce the committed size
sl@0
   681
sl@0
   682
			// if page table is now completely empty, unassign it and update chunk PDE info
sl@0
   683
			remain &= KUnmapPagesCountMask;
sl@0
   684
			if (remain==0)
sl@0
   685
				{
sl@0
   686
				m.DoUnassignPageTable(addr);
sl@0
   687
				RemovePde(offset);
sl@0
   688
				NKern::UnlockSystem();
sl@0
   689
				m.FreePageTable(ptid);
sl@0
   690
				NKern::LockSystem();
sl@0
   691
				}
sl@0
   692
			__KTRACE_OPT(KMMU,Kern::Printf("nPdes=%d, Pdes=%08x, HomePdes=%08x",iNumPdes,iPdes,iHomePdes));
sl@0
   693
#ifdef __CPU_WRITE_BACK_CACHE
sl@0
   694
			if (selectiveFlush)
sl@0
   695
				{
sl@0
   696
				TInt n=np;
sl@0
   697
				while(n && iBase==base)	// reschedule may move base, but then cache will have been flushed so we can stop purging L1
sl@0
   698
					{
sl@0
   699
					CacheMaintenance::PageToReuseVirtualCache(addr);
sl@0
   700
					addr+=m.iPageSize;
sl@0
   701
					--n;
sl@0
   702
					NKern::FlashSystem();
sl@0
   703
					}
sl@0
   704
				Mmu::Get().CacheMaintenanceOnDecommit(pageList, nFree);	//On ARMv5, this deals with L2 cache only
sl@0
   705
				}
sl@0
   706
#endif
sl@0
   707
			NKern::UnlockSystem();				// we can now unlock the system
sl@0
   708
			__KTRACE_OPT(KMEMTRACE,Kern::Printf("MT:A %d %x %x %O",NTickCount(),this,iSize,this));
sl@0
   709
#ifdef BTRACE_CHUNKS
sl@0
   710
			if(nFree)
sl@0
   711
				BTraceContext12(BTrace::EChunks,ownsMemory?BTrace::EChunkMemoryDeallocated:BTrace::EChunkMemoryRemoved,this,offset,nFree<<m.iPageShift);
sl@0
   712
#endif
sl@0
   713
sl@0
   714
			// We can now return the decommitted pages to the free page list
sl@0
   715
			if (nFree)
sl@0
   716
				a.FreeRamPages(pageList,nFree, GetPageType());
sl@0
   717
sl@0
   718
			offset+=(np<<m.iPageShift);
sl@0
   719
			}
sl@0
   720
		else
sl@0
   721
			{
sl@0
   722
			NKern::UnlockSystem();
sl@0
   723
			__KTRACE_OPT(KMMU,Kern::Printf("No page table at %08x",addr));
sl@0
   724
			if ((iAttributes&EDisconnected)==0)
sl@0
   725
				MM::Panic(MM::EChunkDecommitNoPageTable);
sl@0
   726
			offset=pdeEnd;	// disconnected chunk - step on to next PDE
sl@0
   727
			}
sl@0
   728
		}
sl@0
   729
	if (iSize==0 && (iAttributes&EFixedAddress)==0)
sl@0
   730
		{
sl@0
   731
		__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::DoDecommit remove region"));
sl@0
   732
		NKern::LockSystem();
sl@0
   733
		if (TLinAddr(iBase)==iHomeBase)
sl@0
   734
			iBase=NULL;
sl@0
   735
		DeallocateHomeAddress();
sl@0
   736
		NKern::UnlockSystem();
sl@0
   737
		}
sl@0
   738
#ifdef __CPU_WRITE_BACK_CACHE
sl@0
   739
	if (!selectiveFlush)
sl@0
   740
		{
sl@0
   741
		NKern::LockSystem();
sl@0
   742
		m.GenericFlush((TUint)Mmu::EFlushDDecommit); 	//Flush virtual DCache
sl@0
   743
		CacheMaintenance::SyncPhysicalCache_All();
sl@0
   744
		NKern::UnlockSystem();
sl@0
   745
		}
sl@0
   746
#endif
sl@0
   747
	if (iAttributes & ECode)
sl@0
   748
		m.SyncCodeMappings();		// flush ITLB if necessary
sl@0
   749
	}
sl@0
   750
sl@0
   751
sl@0
   752
TInt DMemModelChunk::AdjustDoubleEnded(TInt aBottom, TInt aTop)
sl@0
   753
//
sl@0
   754
// Adjust a double-ended chunk.
sl@0
   755
//
sl@0
   756
	{
sl@0
   757
sl@0
   758
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::AdjustDoubleEnded %x-%x",aBottom,aTop));
sl@0
   759
	if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDoubleEnded)
sl@0
   760
		return KErrGeneral;
sl@0
   761
	if (aTop<0 || aBottom<0 || aTop<aBottom || aTop>iMaxSize)
sl@0
   762
		return KErrArgument;
sl@0
   763
	Mmu& m = Mmu::Get();
sl@0
   764
	aBottom &= ~m.iPageMask;
sl@0
   765
	aTop=(aTop+m.iPageMask)&~m.iPageMask;
sl@0
   766
	TInt newSize=aTop-aBottom;
sl@0
   767
	if (newSize>iMaxSize)
sl@0
   768
		return KErrArgument;
sl@0
   769
sl@0
   770
	Mmu::Wait();
sl@0
   771
	TInt initBottom=iStartPos;
sl@0
   772
	TInt initTop=iStartPos+iSize;
sl@0
   773
	TInt nBottom=Max(aBottom,iStartPos);	// intersection bottom
sl@0
   774
	TInt nTop=Min(aTop,iStartPos+iSize);	// intersection top
sl@0
   775
	TInt r=KErrNone;
sl@0
   776
	if (nBottom<nTop)
sl@0
   777
		{
sl@0
   778
		__KTRACE_OPT(KMMU,Kern::Printf("Initial and final regions intersect"));
sl@0
   779
		if (initBottom<nBottom)
sl@0
   780
			{
sl@0
   781
			iStartPos=aBottom;
sl@0
   782
			DoDecommit(initBottom,nBottom-initBottom);
sl@0
   783
			}
sl@0
   784
		if (initTop>nTop)
sl@0
   785
			DoDecommit(nTop,initTop-nTop);	// this changes iSize
sl@0
   786
		if (aBottom<nBottom)
sl@0
   787
			{
sl@0
   788
			r=DoCommit(aBottom,nBottom-aBottom);
sl@0
   789
			if (r==KErrNone)
sl@0
   790
				{
sl@0
   791
				if (aTop>nTop)
sl@0
   792
					r=DoCommit(nTop,aTop-nTop);
sl@0
   793
				if (r==KErrNone)
sl@0
   794
					iStartPos=aBottom;
sl@0
   795
				else
sl@0
   796
					DoDecommit(aBottom,nBottom-aBottom);
sl@0
   797
				}
sl@0
   798
			}
sl@0
   799
		else if (aTop>nTop)
sl@0
   800
			r=DoCommit(nTop,aTop-nTop);
sl@0
   801
		}
sl@0
   802
	else
sl@0
   803
		{
sl@0
   804
		__KTRACE_OPT(KMMU,Kern::Printf("Initial and final regions disjoint"));
sl@0
   805
		if (iSize)
sl@0
   806
			DoDecommit(initBottom,iSize);
sl@0
   807
		iStartPos=aBottom;
sl@0
   808
		if (newSize)
sl@0
   809
			r=DoCommit(iStartPos,newSize);
sl@0
   810
		}
sl@0
   811
	Mmu::Signal();
sl@0
   812
	__COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this);
sl@0
   813
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O adjusted to %x+%x base %08x home %08x",this,iStartPos,iSize,iBase,iHomeRegionBase));
sl@0
   814
	return r;
sl@0
   815
	}
sl@0
   816
sl@0
   817
TInt DMemModelChunk::Commit(TInt aOffset, TInt aSize, TCommitType aCommitType, TUint32* aExtraArg)
sl@0
   818
//
sl@0
   819
// Commit to a disconnected chunk.
sl@0
   820
//
sl@0
   821
	{
sl@0
   822
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Commit %x+%x type=%d extra=%08x",aOffset,aSize,aCommitType,aExtraArg));
sl@0
   823
	if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
sl@0
   824
		return KErrGeneral;
sl@0
   825
	if (aOffset<0 || aSize<0)
sl@0
   826
		return KErrArgument;
sl@0
   827
	if (aSize==0)
sl@0
   828
		return KErrNone;
sl@0
   829
	Mmu& m = Mmu::Get();
sl@0
   830
	aSize+=(aOffset & m.iPageMask);
sl@0
   831
	aOffset &= ~m.iPageMask;
sl@0
   832
	aSize=(aSize+m.iPageMask)&~m.iPageMask;
sl@0
   833
	if ((aOffset+aSize)>iMaxSize)
sl@0
   834
		return KErrArgument;
sl@0
   835
sl@0
   836
	Mmu::Wait();
sl@0
   837
	TInt r=KErrNone;
sl@0
   838
	TInt i=aOffset>>m.iPageShift;
sl@0
   839
	TInt n=aSize>>m.iPageShift;
sl@0
   840
	if (iPageBitMap->NotFree(i,n))
sl@0
   841
		r=KErrAlreadyExists;
sl@0
   842
	else
sl@0
   843
		{
sl@0
   844
		r=DoCommit(aOffset,aSize,aCommitType,aExtraArg);
sl@0
   845
		if (r==KErrNone)
sl@0
   846
			iPageBitMap->Alloc(i,n);
sl@0
   847
		}
sl@0
   848
	Mmu::Signal();
sl@0
   849
	__COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this);
sl@0
   850
	return r;
sl@0
   851
	}
sl@0
   852
sl@0
   853
TInt DMemModelChunk::Allocate(TInt aSize, TInt aGuard, TInt aAlign)
sl@0
   854
//
sl@0
   855
// Allocate offset and commit to a disconnected chunk.
sl@0
   856
//
sl@0
   857
	{
sl@0
   858
	TInt r = DoAllocate(aSize, aGuard, aAlign, ETrue);
sl@0
   859
	__COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this);
sl@0
   860
	return r;
sl@0
   861
	}
sl@0
   862
sl@0
   863
TInt DMemModelChunk::FindFree(TInt aSize, TInt aGuard, TInt aAlign)
sl@0
   864
//
sl@0
   865
// Find free offset but don't commit any memory.
sl@0
   866
//
sl@0
   867
	{
sl@0
   868
	return DoAllocate(aSize, aGuard, aAlign, EFalse);
sl@0
   869
	}
sl@0
   870
sl@0
   871
TInt DMemModelChunk::DoAllocate(TInt aSize, TInt aGuard, TInt aAlign, TBool aCommit)
sl@0
   872
	{
sl@0
   873
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::DoAllocate %x %x %d",aSize,aGuard,aAlign));
sl@0
   874
sl@0
   875
	// Only allow this to be called on disconnected chunks and not disconnected 
sl@0
   876
	// cache chunks as when guards pages exist the bit map can't be used to determine
sl@0
   877
	// the size of disconnected cache chunks as is required by Decommit().
sl@0
   878
	if ((iAttributes & (EDoubleEnded|EDisconnected|ECache))!=EDisconnected)
sl@0
   879
		return KErrGeneral;
sl@0
   880
sl@0
   881
	if (aSize<=0 || aGuard<0)
sl@0
   882
		return KErrArgument;
sl@0
   883
	Mmu& m = Mmu::Get();
sl@0
   884
	aAlign=Max(aAlign-m.iPageShift,0);
sl@0
   885
	aSize=(aSize+m.iPageMask)&~m.iPageMask;
sl@0
   886
	aGuard=(aGuard+m.iPageMask)&~m.iPageMask;
sl@0
   887
	if ((aSize+aGuard)>iMaxSize)
sl@0
   888
		return KErrArgument;
sl@0
   889
sl@0
   890
	Mmu::Wait();
sl@0
   891
	TInt r=KErrNone;
sl@0
   892
	TInt n=(aSize+aGuard)>>m.iPageShift;
sl@0
   893
	TInt i=iPageBitMap->AllocAligned(n,aAlign,0,EFalse);	// allocate the offset
sl@0
   894
	if (i<0)
sl@0
   895
		r=KErrNoMemory;		// run out of reserved space for this chunk
sl@0
   896
	else
sl@0
   897
		{
sl@0
   898
		TInt offset=i<<m.iPageShift;
sl@0
   899
		__KTRACE_OPT(KMMU,Kern::Printf("Offset %x allocated",offset));
sl@0
   900
		if (aCommit)
sl@0
   901
			{
sl@0
   902
			r=DoCommit(offset+aGuard,aSize,ECommitDiscontiguous);
sl@0
   903
			if (r==KErrNone)
sl@0
   904
				iPageBitMap->Alloc(i,n);
sl@0
   905
			}
sl@0
   906
		if (r==KErrNone)
sl@0
   907
			r=offset;		// if operation successful, return allocated offset
sl@0
   908
		}
sl@0
   909
	Mmu::Signal();
sl@0
   910
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::DoAllocate returns %x",r));
sl@0
   911
	return r;
sl@0
   912
	}
sl@0
   913
sl@0
   914
TInt DMemModelChunk::Decommit(TInt aOffset, TInt aSize)
sl@0
   915
//
sl@0
   916
// Decommit from a disconnected chunk.
sl@0
   917
//
sl@0
   918
	{
sl@0
   919
	return Decommit(aOffset, aSize, EDecommitNormal);
sl@0
   920
	}
sl@0
   921
sl@0
   922
TInt DMemModelChunk::Decommit(TInt aOffset, TInt aSize, TDecommitType aDecommitType)
sl@0
   923
//
sl@0
   924
// Decommit from a disconnected chunk.
sl@0
   925
//
sl@0
   926
	{
sl@0
   927
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Decommit %x+%x",aOffset,aSize));
sl@0
   928
	if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
sl@0
   929
		return KErrGeneral;
sl@0
   930
	if (aOffset<0 || aSize<0)
sl@0
   931
		return KErrArgument;
sl@0
   932
	if (aSize==0)
sl@0
   933
		return KErrNone;
sl@0
   934
	Mmu& m = Mmu::Get();
sl@0
   935
	aSize+=(aOffset & m.iPageMask);
sl@0
   936
	aOffset &= ~m.iPageMask;
sl@0
   937
	aSize=(aSize+m.iPageMask)&~m.iPageMask;
sl@0
   938
	if ((aOffset+aSize)>iMaxSize)
sl@0
   939
		return KErrArgument;
sl@0
   940
sl@0
   941
	Mmu::Wait();
sl@0
   942
sl@0
   943
	// limit the range to the home region range
sl@0
   944
	TInt end = aOffset+aSize;
sl@0
   945
	if (aOffset<iHomeRegionOffset)
sl@0
   946
		aOffset=iHomeRegionOffset;
sl@0
   947
	if (end>iHomeRegionOffset+iHomeRegionSize)
sl@0
   948
		end=iHomeRegionOffset+iHomeRegionSize;
sl@0
   949
	aSize = end-aOffset;
sl@0
   950
	if(aSize<0)
sl@0
   951
		aSize=0;
sl@0
   952
	__KTRACE_OPT(KMMU,Kern::Printf("Rounded and Clipped range %x+%x",aOffset,aSize));
sl@0
   953
sl@0
   954
	if (aSize)
sl@0
   955
		{
sl@0
   956
		TInt i=aOffset>>m.iPageShift;
sl@0
   957
		TInt n=aSize>>m.iPageShift;
sl@0
   958
		__KTRACE_OPT(KMMU,Kern::Printf("Calling SelectiveFree(%d,%d)",i,n));
sl@0
   959
		TUint oldAvail = iPageBitMap->iAvail;
sl@0
   960
		TUint oldSize = iSize;
sl@0
   961
sl@0
   962
		// Free those positions which are still commited and also any guard pages, 
sl@0
   963
		// i.e. pages that are reserved in this chunk but which are not commited.
sl@0
   964
		iPageBitMap->SelectiveFree(i,n);
sl@0
   965
		DoDecommit(aOffset,aSize,aDecommitType);
sl@0
   966
sl@0
   967
		if (iAttributes & ECache)
sl@0
   968
			{// If this is the file server cache chunk then adjust the size based 
sl@0
   969
			// on the bit map size because:-
sl@0
   970
			//	- 	Unlocked and reclaimed pages will be unmapped without updating
sl@0
   971
			// 		iSize or the bit map. 
sl@0
   972
			//	-	DoDecommit() only decommits the mapped pages.
sl@0
   973
			// For all other chunks what is mapped is what is committed to the 
sl@0
   974
			// chunk so iSize is accurate.
sl@0
   975
			TUint actualFreedPages = iPageBitMap->iAvail - oldAvail;
sl@0
   976
			iSize = oldSize - (actualFreedPages << KPageShift);
sl@0
   977
			}
sl@0
   978
		}
sl@0
   979
sl@0
   980
	Mmu::Signal();
sl@0
   981
	__DEBUG_EVENT(EEventUpdateChunk, this);
sl@0
   982
	return KErrNone;
sl@0
   983
	}
sl@0
   984
sl@0
   985
TInt DMemModelChunk::Unlock(TInt aOffset, TInt aSize)
sl@0
   986
	{
sl@0
   987
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Unlock %x+%x",aOffset,aSize));
sl@0
   988
	if (!(iAttributes&ECache))
sl@0
   989
		return KErrGeneral;
sl@0
   990
	if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
sl@0
   991
		return KErrGeneral;
sl@0
   992
sl@0
   993
	// Mark this as the file server cache chunk.  This is safe as it is only the 
sl@0
   994
	// file server that can invoke this function.
sl@0
   995
	iAttributes |= ECache;
sl@0
   996
sl@0
   997
	if (aOffset<0 || aSize<0)
sl@0
   998
		return KErrArgument;
sl@0
   999
	if (aSize==0)
sl@0
  1000
		return KErrNone;
sl@0
  1001
	Mmu& m = Mmu::Get();
sl@0
  1002
	aSize+=(aOffset & m.iPageMask);
sl@0
  1003
	aOffset &= ~m.iPageMask;
sl@0
  1004
	aSize=(aSize+m.iPageMask)&~m.iPageMask;
sl@0
  1005
	if ((aOffset+aSize)>iMaxSize)
sl@0
  1006
		return KErrArgument;
sl@0
  1007
sl@0
  1008
	Mmu::Wait();
sl@0
  1009
	TInt r=KErrNone;
sl@0
  1010
	TInt i=aOffset>>m.iPageShift;
sl@0
  1011
	TInt n=aSize>>m.iPageShift;
sl@0
  1012
	if (iPageBitMap->NotAllocated(i,n))
sl@0
  1013
		r=KErrNotFound;
sl@0
  1014
	else
sl@0
  1015
		{
sl@0
  1016
#ifdef BTRACE_CHUNKS
sl@0
  1017
		TUint oldFree = m.FreeRamInBytes();
sl@0
  1018
#endif
sl@0
  1019
		r=Mmu::Get().UnlockRamCachePages(iBase,i,n);
sl@0
  1020
#ifdef BTRACE_CHUNKS
sl@0
  1021
		if(r==KErrNone)
sl@0
  1022
			{
sl@0
  1023
			TUint unlocked = m.FreeRamInBytes()-oldFree; // size of memory unlocked
sl@0
  1024
			if(unlocked)
sl@0
  1025
				BTraceContext12(BTrace::EChunks,BTrace::EChunkMemoryDeallocated,this,aOffset,unlocked);
sl@0
  1026
			}
sl@0
  1027
#endif
sl@0
  1028
		}
sl@0
  1029
	Mmu::Signal();
sl@0
  1030
	__COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this);
sl@0
  1031
	return r;
sl@0
  1032
	}
sl@0
  1033
sl@0
  1034
TInt DMemModelChunk::Lock(TInt aOffset, TInt aSize)
sl@0
  1035
	{
sl@0
  1036
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Lock %x+%x",aOffset,aSize));
sl@0
  1037
	if (!(iAttributes&ECache))
sl@0
  1038
		return KErrGeneral;
sl@0
  1039
	if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
sl@0
  1040
		return KErrGeneral;
sl@0
  1041
	if (aOffset<0 || aSize<0)
sl@0
  1042
		return KErrArgument;
sl@0
  1043
	if (aSize==0)
sl@0
  1044
		return KErrNone;
sl@0
  1045
	Mmu& m = Mmu::Get();
sl@0
  1046
	aSize+=(aOffset & m.iPageMask);
sl@0
  1047
	aOffset &= ~m.iPageMask;
sl@0
  1048
	aSize=(aSize+m.iPageMask)&~m.iPageMask;
sl@0
  1049
	if ((aOffset+aSize)>iMaxSize)
sl@0
  1050
		return KErrArgument;
sl@0
  1051
sl@0
  1052
	Mmu::Wait();
sl@0
  1053
	TInt r=KErrNone;
sl@0
  1054
	TInt i=aOffset>>m.iPageShift;
sl@0
  1055
	TInt n=aSize>>m.iPageShift;
sl@0
  1056
	if (iPageBitMap->NotAllocated(i,n))
sl@0
  1057
		r=KErrNotFound;
sl@0
  1058
	else
sl@0
  1059
		{
sl@0
  1060
#ifdef BTRACE_CHUNKS
sl@0
  1061
		TUint oldFree = m.FreeRamInBytes();
sl@0
  1062
#endif
sl@0
  1063
		r=Mmu::Get().LockRamCachePages(iBase,i,n);
sl@0
  1064
#ifdef BTRACE_CHUNKS
sl@0
  1065
		if(r==KErrNone)
sl@0
  1066
			{
sl@0
  1067
			TUint locked = oldFree-m.FreeRamInBytes();
sl@0
  1068
			if(locked)
sl@0
  1069
				BTraceContext12(BTrace::EChunks,BTrace::EChunkMemoryAllocated,this,aOffset,locked);
sl@0
  1070
			}
sl@0
  1071
#endif
sl@0
  1072
		}
sl@0
  1073
	if(r!=KErrNone)
sl@0
  1074
		{
sl@0
  1075
		// decommit memory on error...
sl@0
  1076
		__KTRACE_OPT(KMMU,Kern::Printf("Calling SelectiveFree(%d,%d)",i,n));
sl@0
  1077
		TUint oldAvail = iPageBitMap->iAvail;
sl@0
  1078
		iPageBitMap->SelectiveFree(i,n);	// free those positions which are actually allocated
sl@0
  1079
		TUint oldSize = iSize;
sl@0
  1080
sl@0
  1081
		DoDecommit(aOffset,aSize);
sl@0
  1082
sl@0
  1083
		// Use the bit map to adjust the size of the chunk as unlocked and reclaimed pages
sl@0
  1084
		// will have been unmapped but not removed from the bit map as DoDecommit() only 
sl@0
  1085
		// decommits the mapped pages.
sl@0
  1086
		TUint actualFreedPages = iPageBitMap->iAvail - oldAvail;
sl@0
  1087
		iSize = oldSize - (actualFreedPages << KPageShift);
sl@0
  1088
		}
sl@0
  1089
sl@0
  1090
	Mmu::Signal();
sl@0
  1091
	__COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this);
sl@0
  1092
	return r;
sl@0
  1093
	}
sl@0
  1094
sl@0
  1095
#ifndef __SCHEDULER_MACHINE_CODED__
sl@0
  1096
// System locked in this function for a time proportional to chunk size.
sl@0
  1097
// This is unavoidable since the chunk state must always be well defined
sl@0
  1098
// whenever the system is unlocked.
sl@0
  1099
TUint32 DMemModelChunk::ApplyTopLevelPermissions(TChunkState aChunkState)
sl@0
  1100
	{
sl@0
  1101
	__KTRACE_OPT(KMMU,Kern::Printf("ApplyTopLevelPermissions ChunkState=%d",aChunkState));
sl@0
  1102
	if (!(iAttributes&EFixedAccess))
sl@0
  1103
		{
sl@0
  1104
		iChunkState=aChunkState;
sl@0
  1105
		if (iSize)
sl@0
  1106
			{
sl@0
  1107
			Mmu& m = Mmu::Get();
sl@0
  1108
			TLinAddr base=(TLinAddr)iBase;
sl@0
  1109
			TInt size=iSize;
sl@0
  1110
			TUint32 mask=m.iChunkMask;
sl@0
  1111
			if (iAttributes & EDoubleEnded)
sl@0
  1112
				{
sl@0
  1113
				base+=(iStartPos & ~mask);
sl@0
  1114
				size=((iStartPos&mask)+size+mask)&~mask;
sl@0
  1115
				}
sl@0
  1116
			m.ApplyTopLevelPermissions(base,size,iPdePermissions[aChunkState]);
sl@0
  1117
			}
sl@0
  1118
		return (iAttributes&ECode)?Mmu::EFlushDPermChg|Mmu::EFlushIPermChg:Mmu::EFlushDPermChg;
sl@0
  1119
		}
sl@0
  1120
	return 0;
sl@0
  1121
	}
sl@0
  1122
sl@0
  1123
// System locked in this function for a time proportional to chunk size.
sl@0
  1124
// This is unavoidable since the chunk state must always be well defined
sl@0
  1125
// whenever the system is unlocked.
sl@0
  1126
TUint32 DMemModelChunk::MoveToRunAddress(TLinAddr aLinearAddr, TChunkState aChunkState)
sl@0
  1127
	{
sl@0
  1128
	iChunkState=aChunkState;
sl@0
  1129
	if (iSize)
sl@0
  1130
		{
sl@0
  1131
		TLinAddr base=(TLinAddr)iBase;
sl@0
  1132
		TLinAddr dest=aLinearAddr;
sl@0
  1133
		TInt size=iSize;
sl@0
  1134
		if (iAttributes & EDoubleEnded)
sl@0
  1135
			{
sl@0
  1136
			Mmu& m = Mmu::Get();
sl@0
  1137
			TUint32 mask=m.iChunkMask;
sl@0
  1138
			base+=(iStartPos & ~mask);
sl@0
  1139
			dest+=(iStartPos & ~mask);
sl@0
  1140
			size=((iStartPos&mask)+size+mask)&~mask;
sl@0
  1141
			}
sl@0
  1142
		m.MoveChunk(base,size,dest,iPdePermissions[aChunkState]);
sl@0
  1143
		}
sl@0
  1144
	MoveCurrentPdes((TLinAddr)iBase,aLinearAddr);
sl@0
  1145
	iBase=(TUint8 *)aLinearAddr;
sl@0
  1146
	return Mmu::EFlushDMove;	// chunk can't contain code
sl@0
  1147
	}
sl@0
  1148
sl@0
  1149
// System locked in this function for a time proportional to chunk size.
sl@0
  1150
// This is unavoidable since the chunk state must always be well defined
sl@0
  1151
// whenever the system is unlocked.
sl@0
  1152
TUint32 DMemModelChunk::MoveToHomeSection()
sl@0
  1153
	{
sl@0
  1154
	iChunkState=ENotRunning;
sl@0
  1155
	if (iSize)
sl@0
  1156
		{
sl@0
  1157
		TLinAddr base=TLinAddr(iBase);
sl@0
  1158
		TLinAddr home=iHomeRegionBase;
sl@0
  1159
		TInt size=iSize;
sl@0
  1160
		if (iAttributes & EDoubleEnded)
sl@0
  1161
			{
sl@0
  1162
			Mmu& m = Mmu::Get();
sl@0
  1163
			TUint32 mask=m.iChunkMask;
sl@0
  1164
			base+=(iStartPos & ~mask);
sl@0
  1165
			home+=(iStartPos & ~mask);
sl@0
  1166
			size=((iStartPos&mask)+size+mask)&~mask;
sl@0
  1167
			}
sl@0
  1168
		m.MoveChunk(base,size,home,iPdePermissions[0]);
sl@0
  1169
		}
sl@0
  1170
	iBase=(TUint8 *)iHomeRegionBase;
sl@0
  1171
	iHomePdes=iPdes;
sl@0
  1172
	return Mmu::EFlushDMove;	// chunk can't contain code
sl@0
  1173
	}
sl@0
  1174
#endif
sl@0
  1175
sl@0
  1176
TLinAddr DMemModelChunk::AllocateHomeAddress(TInt aSize)
sl@0
  1177
	{
sl@0
  1178
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::AllocateHomeAddress size %08x",aSize));
sl@0
  1179
	Mmu& m = Mmu::Get();
sl@0
  1180
	TLinearSection* s = m.iKernelSection;
sl@0
  1181
	TUint required;
sl@0
  1182
	if (iAttributes&EFixedAddress)
sl@0
  1183
		required=Mmu::RoundToChunkSize(iMaxSize);
sl@0
  1184
	else
sl@0
  1185
		required=Mmu::RoundToChunkSize(aSize);
sl@0
  1186
	required >>= m.iChunkShift;
sl@0
  1187
	TInt r = s->iAllocator.AllocConsecutive(required, EFalse);
sl@0
  1188
	if (r<0)
sl@0
  1189
		return 0;
sl@0
  1190
	s->iAllocator.Alloc(r, required);
sl@0
  1191
	TLinAddr addr = s->iBase + (r<<m.iChunkShift);
sl@0
  1192
	__KTRACE_OPT(KMMU,Kern::Printf("Address %08x allocated",addr));
sl@0
  1193
	iHomeRegionSize = required << m.iChunkShift;
sl@0
  1194
	return addr;
sl@0
  1195
	}
sl@0
  1196
sl@0
  1197
void DMemModelChunk::DeallocateHomeAddress()
sl@0
  1198
	{
sl@0
  1199
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::DeallocateHomeAddress %08x+%x", iHomeRegionBase, iHomeRegionSize));
sl@0
  1200
	if (iHomeRegionSize)
sl@0
  1201
		{
sl@0
  1202
		Mmu& m = Mmu::Get();
sl@0
  1203
		TLinearSection* s = m.iKernelSection;
sl@0
  1204
		TInt first = (TInt)((iHomeRegionBase - s->iBase)>>m.iChunkShift);
sl@0
  1205
		TInt count = (TInt)(iHomeRegionSize >> m.iChunkShift);
sl@0
  1206
		s->iAllocator.Free(first, count);
sl@0
  1207
		iHomeRegionBase=0;
sl@0
  1208
		iHomeRegionSize=0;
sl@0
  1209
		}
sl@0
  1210
	}
sl@0
  1211
sl@0
  1212
TLinAddr DMemModelChunk::ReallocateHomeAddress(TInt aNewSize)
sl@0
  1213
	{
sl@0
  1214
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::ReallocateHomeAddress(%08x) for chunk %O",aNewSize,this));
sl@0
  1215
sl@0
  1216
	// can never be called for a fixed address chunk
sl@0
  1217
	__ASSERT_ALWAYS((iAttributes&(EFixedAddress))==0,MM::Panic(MM::EFixedChunkMoving));
sl@0
  1218
sl@0
  1219
	Mmu& m = Mmu::Get();
sl@0
  1220
	TLinearSection* s = m.iKernelSection;
sl@0
  1221
	TUint required=Mmu::RoundToChunkSize(aNewSize);
sl@0
  1222
	TInt next = (TInt)((iHomeRegionBase + iHomeRegionSize - s->iBase)>>m.iChunkShift);
sl@0
  1223
	TInt count = (TInt)((required - iHomeRegionSize) >> m.iChunkShift);
sl@0
  1224
	if (!s->iAllocator.NotFree(next, count))
sl@0
  1225
		{
sl@0
  1226
		// we can expand in place
sl@0
  1227
		s->iAllocator.Alloc(next, count);
sl@0
  1228
		iHomeRegionSize = required;
sl@0
  1229
		return iHomeRegionBase;
sl@0
  1230
		}
sl@0
  1231
	TUint oldHomeSize = iHomeRegionSize;
sl@0
  1232
	TLinAddr addr = AllocateHomeAddress(required);	// try to get a new home address
sl@0
  1233
	if (addr && oldHomeSize)
sl@0
  1234
		{
sl@0
  1235
		// succeeded - free old region
sl@0
  1236
		next = (TInt)((iHomeRegionBase - s->iBase)>>m.iChunkShift);
sl@0
  1237
		count = (TInt)(oldHomeSize >> m.iChunkShift);
sl@0
  1238
		s->iAllocator.Free(next, count);
sl@0
  1239
		}
sl@0
  1240
	// if it fails, keep our current home region
sl@0
  1241
	return addr;
sl@0
  1242
	}
sl@0
  1243
sl@0
  1244
TInt DMemModelChunk::CheckAccess()
sl@0
  1245
	{
sl@0
  1246
	DProcess* pP=TheCurrentThread->iOwningProcess;
sl@0
  1247
	if (iAttributes&EPrivate)
sl@0
  1248
		{
sl@0
  1249
		if (iOwningProcess && iOwningProcess!=pP && pP!=K::TheKernelProcess)
sl@0
  1250
			return KErrAccessDenied;
sl@0
  1251
		}
sl@0
  1252
	return KErrNone;
sl@0
  1253
	}
sl@0
  1254
sl@0
  1255
TInt DMemModelChunkHw::Close(TAny*)
sl@0
  1256
	{
sl@0
  1257
	__KTRACE_OPT(KOBJECT,Kern::Printf("DMemModelChunkHw::Close %d %O",AccessCount(),this));
sl@0
  1258
	TInt r=Dec();
sl@0
  1259
	if (r==1)
sl@0
  1260
		{
sl@0
  1261
		if (iLinAddr)
sl@0
  1262
			{
sl@0
  1263
			// Physical memory has to be evicted from cache(s).
sl@0
  1264
			// Must be preserved as well, as it can still be in use by the driver.
sl@0
  1265
			CacheMaintenance::MemoryToPreserveAndReuse(iLinAddr, iSize, iAttribs);			
sl@0
  1266
sl@0
  1267
			MmuBase& m=*MmuBase::TheMmu;
sl@0
  1268
			MmuBase::Wait();
sl@0
  1269
			m.Unmap(iLinAddr,iSize);
sl@0
  1270
			MmuBase::Signal();
sl@0
  1271
			DeallocateLinearAddress();
sl@0
  1272
			}
sl@0
  1273
		K::ObjDelete(this);
sl@0
  1274
		}
sl@0
  1275
	return r;
sl@0
  1276
	}
sl@0
  1277
sl@0
  1278
void DMemModelChunk::BTracePrime(TInt aCategory)
sl@0
  1279
	{
sl@0
  1280
	DChunk::BTracePrime(aCategory);
sl@0
  1281
	
sl@0
  1282
#ifdef BTRACE_CHUNKS
sl@0
  1283
	if (aCategory == BTrace::EChunks || aCategory == -1)
sl@0
  1284
		{
sl@0
  1285
		MmuBase::Wait();
sl@0
  1286
sl@0
  1287
		TBool memoryOwned = !(iAttributes&EMemoryNotOwned);
sl@0
  1288
		Mmu& m=Mmu::Get();
sl@0
  1289
		TInt committedBase = -1;
sl@0
  1290
sl@0
  1291
		// look at each page table in this chunk...
sl@0
  1292
		TUint chunkEndIndex = iMaxSize>>KChunkShift;
sl@0
  1293
		NKern::LockSystem();
sl@0
  1294
		for(TUint chunkIndex=0; chunkIndex<chunkEndIndex; ++chunkIndex)
sl@0
  1295
			{
sl@0
  1296
			TLinAddr addr=(TLinAddr)iBase+chunkIndex*KChunkSize;		// current address
sl@0
  1297
			TInt ptid = m.GetPageTableId(addr);
sl@0
  1298
			if(ptid<0)
sl@0
  1299
				{
sl@0
  1300
				// no page table...
sl@0
  1301
				if(committedBase!=-1)
sl@0
  1302
					{
sl@0
  1303
					NKern::FlashSystem();
sl@0
  1304
					TUint committedEnd = chunkIndex*KChunkSize;
sl@0
  1305
					BTrace12(BTrace::EChunks, memoryOwned?BTrace::EChunkMemoryAllocated:BTrace::EChunkMemoryAdded,this,committedBase,committedEnd-committedBase);
sl@0
  1306
					committedBase = -1;
sl@0
  1307
					}
sl@0
  1308
				continue;
sl@0
  1309
				}
sl@0
  1310
			TPte* pPte=(TPte*)m.PageTableLinAddr(ptid);
sl@0
  1311
sl@0
  1312
			// look at each page in page table...
sl@0
  1313
			for(TUint pageIndex=0; pageIndex<KChunkSize/KPageSize; ++pageIndex)
sl@0
  1314
				{
sl@0
  1315
				TBool committed = false;
sl@0
  1316
				TPhysAddr phys = m.PtePhysAddr(pPte[pageIndex], pageIndex);
sl@0
  1317
				if(phys!=KPhysAddrInvalid)
sl@0
  1318
					{
sl@0
  1319
					// we have a page...
sl@0
  1320
					if(!memoryOwned)
sl@0
  1321
						committed = true;
sl@0
  1322
					else
sl@0
  1323
						{
sl@0
  1324
						// make sure we own the page...
sl@0
  1325
						SPageInfo* pi = SPageInfo::SafeFromPhysAddr(phys);
sl@0
  1326
						if(pi && pi->Type()==SPageInfo::EChunk && pi->Owner()==this)
sl@0
  1327
							committed = true;
sl@0
  1328
						}
sl@0
  1329
					}
sl@0
  1330
sl@0
  1331
				if(committed)
sl@0
  1332
					{
sl@0
  1333
					if(committedBase==-1)
sl@0
  1334
						committedBase = chunkIndex*KChunkSize+pageIndex*KPageSize; // start of new region
sl@0
  1335
					}
sl@0
  1336
				else
sl@0
  1337
					{
sl@0
  1338
					if(committedBase!=-1)
sl@0
  1339
						{
sl@0
  1340
						// generate trace for region...
sl@0
  1341
						NKern::FlashSystem();
sl@0
  1342
						TUint committedEnd = chunkIndex*KChunkSize+pageIndex*KPageSize;
sl@0
  1343
						BTrace12(BTrace::EChunks, memoryOwned?BTrace::EChunkMemoryAllocated:BTrace::EChunkMemoryAdded,this,committedBase,committedEnd-committedBase);
sl@0
  1344
						committedBase = -1;
sl@0
  1345
						}
sl@0
  1346
					}
sl@0
  1347
sl@0
  1348
				if((pageIndex&15)==0)
sl@0
  1349
					NKern::FlashSystem();
sl@0
  1350
				}
sl@0
  1351
			}
sl@0
  1352
		NKern::UnlockSystem();
sl@0
  1353
sl@0
  1354
		if(committedBase!=-1)
sl@0
  1355
			{
sl@0
  1356
			TUint committedEnd = chunkEndIndex*KChunkSize;
sl@0
  1357
			BTrace12(BTrace::EChunks, memoryOwned?BTrace::EChunkMemoryAllocated:BTrace::EChunkMemoryAdded,this,committedBase,committedEnd-committedBase);
sl@0
  1358
			}
sl@0
  1359
sl@0
  1360
		MmuBase::Signal();
sl@0
  1361
		}
sl@0
  1362
#endif
sl@0
  1363
	}