os/kernelhwsrv/kernel/eka/memmodel/epoc/direct/mchunk.cpp
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
sl@0
     1
// Copyright (c) 1994-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
// e32\memmodel\epoc\direct\mchunk.cpp
sl@0
    15
// 
sl@0
    16
//
sl@0
    17
sl@0
    18
#include <memmodel.h>
sl@0
    19
sl@0
    20
DMemModelChunk::~DMemModelChunk()
sl@0
    21
	{
sl@0
    22
	__KTRACE_OPT(KTHREAD,Kern::Printf("DMemModelChunk destruct %O",this));
sl@0
    23
	if (iRegionSize)
sl@0
    24
		{
sl@0
    25
		MM::WaitRamAlloc();
sl@0
    26
		MM::FreeRegion(iRegionBase,iRegionSize);
sl@0
    27
		__KTRACE_OPT(KMEMTRACE, Kern::Printf("MT:D %d %x %O",NTickCount(),this,this););
sl@0
    28
		MM::SignalRamAlloc();
sl@0
    29
#ifdef BTRACE_CHUNKS
sl@0
    30
		BTraceContext4(BTrace::EChunks,BTrace::EChunkDestroyed,this);
sl@0
    31
#endif
sl@0
    32
		}
sl@0
    33
	iRegionSize=0;
sl@0
    34
sl@0
    35
	TDfc* dfc = (TDfc*)__e32_atomic_swp_ord_ptr(&iDestroyedDfc, 0);
sl@0
    36
	if(dfc)
sl@0
    37
		dfc->Enque();
sl@0
    38
	}
sl@0
    39
sl@0
    40
sl@0
    41
TUint8* DMemModelChunk::Base(DProcess* aProcess)
sl@0
    42
	{
sl@0
    43
	return iBase;
sl@0
    44
	}
sl@0
    45
sl@0
    46
sl@0
    47
TInt DMemModelChunk::DoCreate(SChunkCreateInfo& anInfo)
sl@0
    48
	{
sl@0
    49
	__ASSERT_COMPILE(!(EMMChunkAttributesMask & EChunkAttributesMask));
sl@0
    50
sl@0
    51
	if(iAttributes&EMemoryNotOwned)
sl@0
    52
		return KErrNotSupported;
sl@0
    53
	if (anInfo.iMaxSize<=0)
sl@0
    54
		return KErrArgument;
sl@0
    55
	TInt r=KErrNone;
sl@0
    56
	iMaxSize=MM::RoundToBlockSize(anInfo.iMaxSize);
sl@0
    57
	switch (anInfo.iType)
sl@0
    58
		{
sl@0
    59
		case EDll:
sl@0
    60
		case EUserCode:
sl@0
    61
		case EUserSelfModCode:
sl@0
    62
		case EUserData:
sl@0
    63
		case EDllData:
sl@0
    64
		case ESharedKernelSingle:
sl@0
    65
		case ESharedKernelMultiple:
sl@0
    66
		case ESharedIo:
sl@0
    67
		case EKernelMessage:
sl@0
    68
			MM::WaitRamAlloc();
sl@0
    69
			r=MM::AllocRegion(iRegionBase, iMaxSize);
sl@0
    70
			if (r==KErrNone)
sl@0
    71
				iRegionSize=iMaxSize;
sl@0
    72
			else
sl@0
    73
				MM::AllocFailed=ETrue;
sl@0
    74
			MM::SignalRamAlloc();
sl@0
    75
			iBase=(TUint8*)iRegionBase;
sl@0
    76
			iSize=iMaxSize;
sl@0
    77
			if(r==KErrNone)
sl@0
    78
				{
sl@0
    79
				iMapAttr = EMapAttrCachedMax;
sl@0
    80
				__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::DoCreate clear %x+%x",iRegionBase,iRegionSize));
sl@0
    81
sl@0
    82
				// Clear memory to value determined by chunk member
sl@0
    83
				memset((TAny*)iRegionBase, iClearByte, MM::RoundToBlockSize(iRegionSize));
sl@0
    84
				}
sl@0
    85
			break;
sl@0
    86
		default:
sl@0
    87
			break;
sl@0
    88
		}
sl@0
    89
sl@0
    90
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::DoCreate %O ret %d",this,r));
sl@0
    91
	__KTRACE_OPT(KMMU,Kern::Printf("RegionBase=%08x, RegionSize=%08x",iRegionBase,iRegionSize));
sl@0
    92
	__KTRACE_OPT(KMEMTRACE, {MM::WaitRamAlloc();Kern::Printf("MT:C %d %x %O",NTickCount(),this,this);MM::SignalRamAlloc();});
sl@0
    93
#ifdef BTRACE_CHUNKS
sl@0
    94
	TKName nameBuf;
sl@0
    95
	Name(nameBuf);
sl@0
    96
	BTraceContextN(BTrace::EChunks,BTrace::EChunkCreated,this,iMaxSize,nameBuf.Ptr(),nameBuf.Size());
sl@0
    97
	if(iOwningProcess)
sl@0
    98
		BTrace8(BTrace::EChunks,BTrace::EChunkOwner,this,iOwningProcess);
sl@0
    99
	BTraceContext12(BTrace::EChunks,BTrace::EChunkInfo,this,iChunkType,iAttributes);
sl@0
   100
#endif
sl@0
   101
	return r;
sl@0
   102
	}
sl@0
   103
sl@0
   104
void DMemModelChunk::SetFixedAddress(TLinAddr anAddr, TInt aSize)
sl@0
   105
	{
sl@0
   106
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O SetFixedAddress %08X size %08X",this,anAddr,aSize));
sl@0
   107
	iSize=MM::RoundToBlockSize(aSize);
sl@0
   108
	if (iSize>iMaxSize)
sl@0
   109
		iMaxSize=iSize;
sl@0
   110
	iBase=(TUint8*)anAddr;
sl@0
   111
	}
sl@0
   112
sl@0
   113
TInt DMemModelChunk::Adjust(TInt aNewSize)
sl@0
   114
//
sl@0
   115
// Adjust a standard chunk.
sl@0
   116
//
sl@0
   117
	{
sl@0
   118
sl@0
   119
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Adjust %08x",aNewSize));
sl@0
   120
	if (iAttributes & (EDoubleEnded|EDisconnected))
sl@0
   121
		return KErrGeneral;
sl@0
   122
	if (aNewSize<0 || aNewSize>iMaxSize)
sl@0
   123
		return KErrArgument;
sl@0
   124
sl@0
   125
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O adjusted to %x",this,iSize));
sl@0
   126
	__KTRACE_OPT(KMEMTRACE, {MM::WaitRamAlloc();Kern::Printf("MT:A %d %x %x %O",NTickCount(),this,iSize,this);MM::SignalRamAlloc();});
sl@0
   127
	return KErrNone;
sl@0
   128
	}
sl@0
   129
sl@0
   130
TInt DMemModelChunk::AdjustDoubleEnded(TInt aBottom, TInt aTop)
sl@0
   131
//
sl@0
   132
// Adjust a double-ended chunk.
sl@0
   133
//
sl@0
   134
	{
sl@0
   135
sl@0
   136
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::AdjustDoubleEnded %x-%x",aBottom,aTop));
sl@0
   137
	if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDoubleEnded)
sl@0
   138
		return KErrGeneral;
sl@0
   139
	if (aTop<0 || aBottom<0 || aTop<aBottom || aTop>iMaxSize)
sl@0
   140
		return KErrArgument;
sl@0
   141
	TInt newSize=aTop-aBottom;
sl@0
   142
	if (newSize>iMaxSize)
sl@0
   143
		return KErrArgument;
sl@0
   144
	iStartPos=aBottom;
sl@0
   145
sl@0
   146
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O adjusted to %x+%x",this,iStartPos,iSize));
sl@0
   147
	__KTRACE_OPT(KMEMTRACE, {MM::WaitRamAlloc();Kern::Printf("MT:A %d %x %x %O",NTickCount(),this,iSize,this);MM::SignalRamAlloc();});
sl@0
   148
	return KErrNone;
sl@0
   149
	}
sl@0
   150
sl@0
   151
TInt DMemModelChunk::Address(TInt aOffset, TInt aSize, TLinAddr& aKernelAddress)
sl@0
   152
	{
sl@0
   153
	if(TUint(aOffset)>=TUint(iMaxSize))
sl@0
   154
		return KErrArgument;
sl@0
   155
	if(TUint(aOffset+aSize)>TUint(iMaxSize))
sl@0
   156
		return KErrArgument;
sl@0
   157
	if(aSize<=0)
sl@0
   158
		return KErrArgument;
sl@0
   159
	aKernelAddress = (TLinAddr)iBase+aOffset;
sl@0
   160
	return KErrNone;
sl@0
   161
	}
sl@0
   162
sl@0
   163
TInt DMemModelChunk::PhysicalAddress(TInt aOffset, TInt aSize, TLinAddr& aKernelAddress, TUint32& aPhysicalAddress, TUint32* aPhysicalPageList)
sl@0
   164
	{
sl@0
   165
	TInt r=Address(aOffset,aSize,aKernelAddress);
sl@0
   166
	if(r!=KErrNone)
sl@0
   167
		return r;
sl@0
   168
sl@0
   169
	TPhysAddr physStart = Epoc::LinearToPhysical(aKernelAddress);
sl@0
   170
sl@0
   171
	TInt pageShift = 12;
sl@0
   172
	TUint32 page = aKernelAddress>>pageShift<<pageShift;
sl@0
   173
	TUint32 lastPage = (aKernelAddress+aSize-1)>>pageShift<<pageShift;
sl@0
   174
	TUint32* pageList = aPhysicalPageList;
sl@0
   175
	TUint32 nextPhys = Epoc::LinearToPhysical(page);
sl@0
   176
	TUint32 pageSize = 1<<pageShift;
sl@0
   177
	while(page<=lastPage)
sl@0
   178
		{
sl@0
   179
		TPhysAddr phys = Epoc::LinearToPhysical(page);
sl@0
   180
		if(pageList)
sl@0
   181
			*pageList++ = phys;
sl@0
   182
		if(phys!=nextPhys)
sl@0
   183
			nextPhys = KPhysAddrInvalid;
sl@0
   184
		else
sl@0
   185
			nextPhys += pageSize;
sl@0
   186
		page += pageSize;
sl@0
   187
		}
sl@0
   188
	if(nextPhys==KPhysAddrInvalid)
sl@0
   189
		{
sl@0
   190
		// Memory is discontiguous...
sl@0
   191
		aPhysicalAddress = KPhysAddrInvalid;
sl@0
   192
		return 1;
sl@0
   193
		}
sl@0
   194
	else
sl@0
   195
		{
sl@0
   196
		// Memory is contiguous...
sl@0
   197
		aPhysicalAddress = physStart;
sl@0
   198
		return KErrNone;
sl@0
   199
		}
sl@0
   200
	}
sl@0
   201
sl@0
   202
TInt DMemModelChunk::Commit(TInt aOffset, TInt aSize, TCommitType aCommitType, TUint32* aExtraArg)
sl@0
   203
//
sl@0
   204
// Commit to a disconnected chunk.
sl@0
   205
//
sl@0
   206
	{
sl@0
   207
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Commit %x+%x type=%d extra=%08x",aOffset,aSize,aCommitType,aExtraArg));
sl@0
   208
	if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
sl@0
   209
		return KErrGeneral;
sl@0
   210
	if (aOffset<0 || aSize<0 || (aOffset+aSize)>iMaxSize)
sl@0
   211
		return KErrArgument;
sl@0
   212
	if(LOGICAL_XOR((TInt)aCommitType&DChunk::ECommitPhysicalMask, iAttributes&DChunk::EMemoryNotOwned))
sl@0
   213
		return KErrNotSupported;  // Commit type doesn't match 'memory owned' type
sl@0
   214
sl@0
   215
	if((TInt)aCommitType&DChunk::ECommitPhysicalMask)
sl@0
   216
		return KErrNotSupported;
sl@0
   217
	if(aCommitType==DChunk::ECommitContiguous)
sl@0
   218
		{
sl@0
   219
		// We can't commit contiguous memory, we just have to take what's already there.
sl@0
   220
		// So check to see if memory is contiguous, and if not, return KErrNoMemory -
sl@0
   221
		// which is what other Memory Models do if they can't find enough contiguous RAM.
sl@0
   222
		TLinAddr kernAddr;
sl@0
   223
		if(PhysicalAddress(aOffset,aSize,kernAddr,*aExtraArg)!=KErrNone)
sl@0
   224
			return KErrNoMemory;
sl@0
   225
		}
sl@0
   226
	else if(aCommitType!=DChunk::ECommitDiscontiguous)
sl@0
   227
		return KErrArgument;
sl@0
   228
sl@0
   229
	return KErrNone;
sl@0
   230
	}
sl@0
   231
sl@0
   232
TInt DMemModelChunk::Allocate(TInt aSize, TInt aGuard, TInt aAlign)
sl@0
   233
//
sl@0
   234
// Allocate offset and commit to a disconnected chunk.
sl@0
   235
//
sl@0
   236
	{
sl@0
   237
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Allocate %x %x %d",aSize,aGuard,aAlign));
sl@0
   238
	if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
sl@0
   239
		return KErrGeneral;
sl@0
   240
	if (aSize<=0 || aSize>iMaxSize)
sl@0
   241
		return KErrArgument;
sl@0
   242
	TInt r=KErrNotSupported;
sl@0
   243
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Allocate returns %x",r));
sl@0
   244
	return r;
sl@0
   245
	}
sl@0
   246
sl@0
   247
TInt DMemModelChunk::Decommit(TInt anOffset, TInt aSize)
sl@0
   248
//
sl@0
   249
// Decommit from a disconnected chunk.
sl@0
   250
//
sl@0
   251
	{
sl@0
   252
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Decommit %x+%x",anOffset,aSize));
sl@0
   253
	if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
sl@0
   254
		return KErrGeneral;
sl@0
   255
	if (anOffset<0 || aSize<0 || (anOffset+aSize)>iMaxSize)
sl@0
   256
		return KErrArgument;
sl@0
   257
	return KErrNone;
sl@0
   258
	}
sl@0
   259
sl@0
   260
void DMemModelChunk::Substitute(TInt /*aOffset*/, TPhysAddr /*aOldAddr*/, TPhysAddr /*aNewAddr*/)
sl@0
   261
	{
sl@0
   262
	MM::Panic(MM::EUnsupportedOperation);
sl@0
   263
	}
sl@0
   264
sl@0
   265
TInt DMemModelChunk::Unlock(TInt anOffset, TInt aSize)
sl@0
   266
	{
sl@0
   267
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Decommit %x+%x",anOffset,aSize));
sl@0
   268
	if (!(iAttributes&ECache))
sl@0
   269
		return KErrGeneral;
sl@0
   270
	if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
sl@0
   271
		return KErrGeneral;
sl@0
   272
	if (anOffset<0 || aSize<0 || (anOffset+aSize)>iMaxSize)
sl@0
   273
		return KErrArgument;
sl@0
   274
	return KErrNone;
sl@0
   275
	}
sl@0
   276
sl@0
   277
TInt DMemModelChunk::Lock(TInt anOffset, TInt aSize)
sl@0
   278
	{
sl@0
   279
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Decommit %x+%x",anOffset,aSize));
sl@0
   280
	if (!(iAttributes&ECache))
sl@0
   281
		return KErrGeneral;
sl@0
   282
	if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
sl@0
   283
		return KErrGeneral;
sl@0
   284
	if (anOffset<0 || aSize<0 || (anOffset+aSize)>iMaxSize)
sl@0
   285
		return KErrArgument;
sl@0
   286
	return KErrNone;
sl@0
   287
	}
sl@0
   288
sl@0
   289
TInt DMemModelChunk::CheckAccess()
sl@0
   290
	{
sl@0
   291
	DProcess* pP=TheCurrentThread->iOwningProcess;
sl@0
   292
	if (iAttributes&EPrivate)
sl@0
   293
		{
sl@0
   294
		if (iOwningProcess && iOwningProcess!=pP && pP!=K::TheKernelProcess)
sl@0
   295
			return KErrAccessDenied;
sl@0
   296
		}
sl@0
   297
	return KErrNone;
sl@0
   298
	}
sl@0
   299
sl@0
   300
TUint32 MM::RoundToBlockSize(TUint32 aSize)
sl@0
   301
	{
sl@0
   302
	TUint32 m=MM::RamBlockSize-1;
sl@0
   303
	return (aSize+m)&~m;
sl@0
   304
	}
sl@0
   305
sl@0
   306
void MM::FreeRegion(TLinAddr aBase, TInt aSize)
sl@0
   307
	{
sl@0
   308
	__KTRACE_OPT(KMMU,Kern::Printf("MM::FreeRegion base %08x size %08x",aBase,aSize));
sl@0
   309
	aSize=MM::RoundToBlockSize(aSize);
sl@0
   310
	__ASSERT_ALWAYS(aBase>=MM::UserDataSectionBase && aBase+aSize<=MM::UserDataSectionEnd, MM::Panic(MM::EFreeInvalidRegion));
sl@0
   311
	TInt block=(aBase-MM::UserDataSectionBase)>>MM::RamBlockShift;
sl@0
   312
	TInt nBlocks=aSize>>MM::RamBlockShift;
sl@0
   313
	MM::RamAllocator->Free(block, nBlocks);
sl@0
   314
	}
sl@0
   315
sl@0
   316
TInt MM::AllocRegion(TLinAddr& aBase, TInt aSize, TInt aAlign)
sl@0
   317
	{
sl@0
   318
	__KTRACE_OPT(KMMU,Kern::Printf("MM::AllocRegion size 0x%x align %d",aSize,aAlign));
sl@0
   319
	TInt align=Max(aAlign-MM::RamBlockShift, 0);
sl@0
   320
	TInt nBlocks=MM::RoundToBlockSize(aSize)>>MM::RamBlockShift;
sl@0
   321
	TInt base=(TInt)(MM::UserDataSectionBase>>MM::RamBlockShift);
sl@0
   322
	TInt block=MM::RamAllocator->AllocAligned(nBlocks, align, base, ETrue);	// returns first block number or -1
sl@0
   323
	if (block<0)
sl@0
   324
		return KErrNoMemory;
sl@0
   325
	MM::RamAllocator->Alloc(block,nBlocks);
sl@0
   326
	aBase=MM::UserDataSectionBase+(block<<MM::RamBlockShift);
sl@0
   327
	__KTRACE_OPT(KMMU,Kern::Printf("MM::AllocRegion address %08x",aBase));
sl@0
   328
	return KErrNone;
sl@0
   329
	}
sl@0
   330
sl@0
   331
TInt MM::ClaimRegion(TLinAddr aBase, TInt aSize)
sl@0
   332
	{
sl@0
   333
	__KTRACE_OPT(KMMU,Kern::Printf("MM::ClaimRegion base %08x size %08x",aBase,aSize));
sl@0
   334
	TUint32 m=MM::RamBlockSize-1;
sl@0
   335
	aSize=MM::RoundToBlockSize(aSize+(aBase&m));
sl@0
   336
	aBase&=~m;
sl@0
   337
	if (aBase<MM::UserDataSectionBase || TUint32(aSize)>MM::UserDataSectionEnd-aBase)
sl@0
   338
		return KErrArgument;
sl@0
   339
	TInt block=(aBase-MM::UserDataSectionBase)>>MM::RamBlockShift;
sl@0
   340
	TInt nBlocks=aSize>>MM::RamBlockShift;
sl@0
   341
	if (MM::RamAllocator->NotFree(block, nBlocks))
sl@0
   342
		return KErrInUse;
sl@0
   343
	MM::RamAllocator->Alloc(block, nBlocks);
sl@0
   344
	return KErrNone;
sl@0
   345
	}
sl@0
   346
sl@0
   347
// Allocate a physically contiguous region
sl@0
   348
TInt MM::AllocContiguousRegion(TLinAddr& aBase, TInt aSize, TInt aAlign)
sl@0
   349
	{
sl@0
   350
#ifndef __CPU_HAS_MMU
sl@0
   351
	return MM::AllocRegion(aBase, aSize, aAlign);
sl@0
   352
#else
sl@0
   353
	__KTRACE_OPT(KMMU,Kern::Printf("MM::AllocContiguousRegion size 0x%x align %d",aSize,aAlign));
sl@0
   354
	TBitMapAllocator* sa = MM::SecondaryAllocator;
sl@0
   355
	if (!sa)
sl@0
   356
		return MM::AllocRegion(aBase, aSize, aAlign);	// only one physical bank
sl@0
   357
sl@0
   358
	TBitMapAllocator* ra = MM::RamAllocator;
sl@0
   359
	TInt align=Max(aAlign-MM::RamBlockShift, 0);
sl@0
   360
	TUint32 alignmask = (1u<<align)-1;
sl@0
   361
	TInt nBlocks=MM::RoundToBlockSize(aSize)>>MM::RamBlockShift;
sl@0
   362
	TInt base=(TInt)(MM::UserDataSectionBase>>MM::RamBlockShift);
sl@0
   363
	const SRamBank* banks = (const SRamBank*)TheSuperPage().iRamBootData;
sl@0
   364
	const SRamBank* pB = banks;
sl@0
   365
	TInt bnum = 0;
sl@0
   366
	TInt block = -1;
sl@0
   367
	for (; pB->iSize; ++pB)
sl@0
   368
		{
sl@0
   369
		TInt nb = pB->iSize >> MM::RamBlockShift;
sl@0
   370
		sa->CopyAlignedRange(ra, bnum, nb);
sl@0
   371
		TInt basealign = (base + bnum) & alignmask;
sl@0
   372
		block = sa->AllocAligned(nBlocks, align, basealign, ETrue);	// returns first block number or -1
sl@0
   373
		if (block>=0)
sl@0
   374
			break;
sl@0
   375
		bnum += nb;
sl@0
   376
		}
sl@0
   377
	if (pB->iSize == 0)
sl@0
   378
		return KErrNoMemory;
sl@0
   379
	MM::RamAllocator->Alloc(block + bnum, nBlocks);
sl@0
   380
	aBase = MM::UserDataSectionBase + ((block + bnum)<<MM::RamBlockShift);
sl@0
   381
	__KTRACE_OPT(KMMU,Kern::Printf("MM::AllocContiguousRegion address %08x",aBase));
sl@0
   382
	return KErrNone;
sl@0
   383
#endif
sl@0
   384
	}
sl@0
   385
sl@0
   386
TInt MM::BlockNumber(TPhysAddr aAddr)
sl@0
   387
	{
sl@0
   388
	__KTRACE_OPT(KMMU,Kern::Printf("MM::BlockNumber %08x",aAddr));
sl@0
   389
	const SRamBank* banks = (const SRamBank*)TheSuperPage().iRamBootData;
sl@0
   390
	const SRamBank* pB = banks;
sl@0
   391
	TInt bnum = 0;
sl@0
   392
	for (; pB->iSize; ++pB)
sl@0
   393
		{
sl@0
   394
		if (aAddr >= pB->iBase)
sl@0
   395
			{
sl@0
   396
			TUint32 offset = aAddr - pB->iBase;
sl@0
   397
			if (offset < pB->iSize)
sl@0
   398
				{
sl@0
   399
				TInt bn = bnum + TInt(offset>>MM::RamBlockShift);
sl@0
   400
				__KTRACE_OPT(KMMU,Kern::Printf("MM::BlockNumber %08x->%x",aAddr,bn));
sl@0
   401
				return bn;
sl@0
   402
				}
sl@0
   403
			}
sl@0
   404
		TInt nb = pB->iSize >> MM::RamBlockShift;
sl@0
   405
		bnum += nb;
sl@0
   406
		}
sl@0
   407
	return KErrNotFound;
sl@0
   408
	}
sl@0
   409
sl@0
   410
/********************************************
sl@0
   411
 * Hardware chunk abstraction
sl@0
   412
 ********************************************/
sl@0
   413
sl@0
   414
/**
sl@0
   415
	@pre	Call in a thread context.
sl@0
   416
	@pre	Interrupts must be enabled.
sl@0
   417
	@pre	Kernel must be unlocked.
sl@0
   418
	@pre    No fast mutex can be held.
sl@0
   419
	@pre	Calling thread must be in a critical section.
sl@0
   420
 */
sl@0
   421
EXPORT_C TInt DPlatChunkHw::New(DPlatChunkHw*& aChunk, TPhysAddr aAddr, TInt aSize, TUint aAttribs)
sl@0
   422
	{
sl@0
   423
	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"DPlatChunkHw::New");
sl@0
   424
	__KTRACE_OPT(KMMU,Kern::Printf("DPlatChunkHw::New phys=%08x, size=%x, attribs=%x",aAddr,aSize,aAttribs));
sl@0
   425
	aChunk=NULL;
sl@0
   426
	if (aSize<=0)
sl@0
   427
		return KErrArgument;
sl@0
   428
	DPlatChunkHw* pC=new DPlatChunkHw;
sl@0
   429
	if (!pC)
sl@0
   430
		return KErrNoMemory;
sl@0
   431
	__KTRACE_OPT(KMMU,Kern::Printf("DPlatChunkHw created at %08x",pC));
sl@0
   432
sl@0
   433
	pC->iPhysAddr=aAddr;
sl@0
   434
	pC->iLinAddr=aAddr;
sl@0
   435
	pC->iSize=aSize;
sl@0
   436
	aChunk=pC;
sl@0
   437
	return KErrNone;
sl@0
   438
	}
sl@0
   439
sl@0
   440
sl@0
   441
void DMemModelChunk::BTracePrime(TInt aCategory)
sl@0
   442
	{
sl@0
   443
	DChunk::BTracePrime(aCategory);
sl@0
   444
	
sl@0
   445
#ifdef BTRACE_CHUNKS
sl@0
   446
	if (aCategory == BTrace::EChunks || aCategory == -1)
sl@0
   447
		{
sl@0
   448
		BTrace12(BTrace::EChunks, BTrace::EChunkMemoryAllocated,this,0,this->iSize);
sl@0
   449
		}
sl@0
   450
#endif
sl@0
   451
	}