os/kernelhwsrv/kernel/eka/memmodel/epoc/flexible/mmu/mlargemappings.cpp
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
sl@0
     1
// Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
//
sl@0
    15
sl@0
    16
#include "mlargemappings.h"
sl@0
    17
#include "cache_maintenance.inl"
sl@0
    18
sl@0
    19
sl@0
    20
//
sl@0
    21
// DLargeMappedMemory
sl@0
    22
//
sl@0
    23
sl@0
    24
sl@0
    25
DLargeMappedMemory::DLargeMappedMemory(DMemoryManager* aManager, TUint aSizeInPages, TMemoryAttributes aAttributes, TMemoryCreateFlags aCreateFlags)
sl@0
    26
	: DCoarseMemory(aManager, aSizeInPages, aAttributes, aCreateFlags)
sl@0
    27
	{
sl@0
    28
	}
sl@0
    29
sl@0
    30
sl@0
    31
DLargeMappedMemory* DLargeMappedMemory::New(DMemoryManager* aManager, TUint aSizeInPages, TMemoryAttributes aAttributes, TMemoryCreateFlags aCreateFlags)
sl@0
    32
	{
sl@0
    33
	TRACE(("DLargeMappedMemory::New()"));
sl@0
    34
	TUint chunkCount = (aSizeInPages + KPagesInPDE - 1) >> KPagesInPDEShift;
sl@0
    35
	TUint wordCount = (chunkCount  + 31) >> 5;
sl@0
    36
	TUint size = sizeof(DLargeMappedMemory) + sizeof(TUint) * (wordCount - 1);
sl@0
    37
	DLargeMappedMemory* self = (DLargeMappedMemory*)Kern::AllocZ(size);
sl@0
    38
	if(self)
sl@0
    39
		{
sl@0
    40
		new (self) DLargeMappedMemory(aManager, aSizeInPages, aAttributes, aCreateFlags);
sl@0
    41
		if(self->Construct()!=KErrNone)
sl@0
    42
			{
sl@0
    43
			self->Close();
sl@0
    44
			self = NULL;
sl@0
    45
			}
sl@0
    46
		}
sl@0
    47
	TRACE(("DLargeMappedMemory::New() returns 0x%08x", self));
sl@0
    48
	return self;
sl@0
    49
	}
sl@0
    50
sl@0
    51
sl@0
    52
DLargeMappedMemory::~DLargeMappedMemory()
sl@0
    53
	{
sl@0
    54
	TRACE2(("DLargeMappedMemory[0x%08x]::~DLargeMappedMemory()",this));
sl@0
    55
	}
sl@0
    56
sl@0
    57
sl@0
    58
DMemoryMapping* DLargeMappedMemory::CreateMapping(TUint aIndex, TUint aCount)
sl@0
    59
	{
sl@0
    60
	TRACE(("DLargeMappedMemory[0x%08x]::CreateMapping()",this));	
sl@0
    61
	if (((aIndex|aCount)&(KChunkMask>>KPageShift))==0)
sl@0
    62
		return new DLargeMapping();
sl@0
    63
	else
sl@0
    64
		return new DFineMapping();
sl@0
    65
	}
sl@0
    66
sl@0
    67
sl@0
    68
TInt DLargeMappedMemory::ClaimInitialPages(TLinAddr aBase,
sl@0
    69
										   TUint aSize,
sl@0
    70
										   TMappingPermissions aPermissions,
sl@0
    71
										   TBool aAllowGaps,
sl@0
    72
										   TBool aAllowNonRamPages)
sl@0
    73
	{
sl@0
    74
	TRACE(("DLargeMappedMemory[0x%08x]::ClaimInitialPages(0x%08x,0x%08x,0x%08x,%d,%d)",
sl@0
    75
		   this,aBase,aSize,aPermissions,aAllowGaps,aAllowNonRamPages));
sl@0
    76
	TInt r = DCoarseMemory::ClaimInitialPages(aBase,aSize,aPermissions,aAllowGaps,
sl@0
    77
											  aAllowNonRamPages);
sl@0
    78
	if (r != KErrNone)
sl@0
    79
		return r;
sl@0
    80
sl@0
    81
	// set initial contiguous state by checking which pages were section mapped by the bootstrap
sl@0
    82
	MmuLock::Lock();
sl@0
    83
	TPde* pPde = Mmu::PageDirectoryEntry(KKernelOsAsid,aBase);	
sl@0
    84
	TUint endChunk = aSize >> KChunkShift;
sl@0
    85
	for (TUint chunk = 0 ; chunk < endChunk ; ++chunk)
sl@0
    86
		{	  
sl@0
    87
		SetChunkContiguous(chunk, Mmu::PdeMapsSection(*pPde++));
sl@0
    88
		TRACE(("  chunk %d contiguous state is %d", chunk, IsChunkContiguous(chunk)));
sl@0
    89
		}
sl@0
    90
	MmuLock::Unlock();
sl@0
    91
	
sl@0
    92
	return KErrNone;
sl@0
    93
	}
sl@0
    94
sl@0
    95
sl@0
    96
TInt DLargeMappedMemory::MapPages(RPageArray::TIter aPages)
sl@0
    97
	{
sl@0
    98
	TRACE2(("DLargeMappedMemory[0x%08x]::MapPages(?) index=0x%x count=0x%x",this,aPages.Index(),aPages.Count()));
sl@0
    99
sl@0
   100
	// for now: assert pages do not overlapped a contiguous area
sl@0
   101
	// todo: update contiguous state, update page tables and call MapPages on large mappings
sl@0
   102
#ifdef _DEBUG
sl@0
   103
	for (TUint index = aPages.Index() ; index < aPages.IndexEnd() ; index += KPagesInPDE)
sl@0
   104
		{
sl@0
   105
		MmuLock::Lock();
sl@0
   106
		__NK_ASSERT_DEBUG(!IsChunkContiguous(index >> KPagesInPDEShift));
sl@0
   107
		MmuLock::Unlock();
sl@0
   108
		}
sl@0
   109
#endif
sl@0
   110
sl@0
   111
	// map pages in all page tables and fine mappings
sl@0
   112
	return DCoarseMemory::MapPages(aPages);
sl@0
   113
	}
sl@0
   114
sl@0
   115
sl@0
   116
void DLargeMappedMemory::RemapPage(TPhysAddr& aPageArray, TUint aIndex, TBool aInvalidateTLB)
sl@0
   117
	{
sl@0
   118
	TRACE2(("DLargeMappedMemory[0x%08x]::RemapPage() index=0x%x",this, aIndex));
sl@0
   119
sl@0
   120
	// update contiguous state...
sl@0
   121
	// todo: for now we will assume that remapping a page makes it non-contiguous
sl@0
   122
	MmuLock::Lock();
sl@0
   123
	SetChunkContiguous(aIndex >> KPagesInPDEShift, EFalse);
sl@0
   124
	MmuLock::Unlock();
sl@0
   125
sl@0
   126
	// remap pages in all page tables and call RemapPage on large mappings...
sl@0
   127
	MmuLock::Lock();
sl@0
   128
	TUint pteType = 0;
sl@0
   129
	do
sl@0
   130
		{
sl@0
   131
		DPageTables* tables = iPageTables[pteType];
sl@0
   132
		if(tables)
sl@0
   133
			{
sl@0
   134
			tables->Open();
sl@0
   135
			MmuLock::Unlock();
sl@0
   136
			tables->RemapPage(aPageArray, aIndex, aInvalidateTLB);
sl@0
   137
			tables->iMappings.RemapPage(aPageArray, aIndex, aInvalidateTLB);
sl@0
   138
			tables->AsyncClose();
sl@0
   139
			MmuLock::Lock();
sl@0
   140
			}
sl@0
   141
		}
sl@0
   142
	while(++pteType<ENumPteTypes);
sl@0
   143
	MmuLock::Unlock();
sl@0
   144
sl@0
   145
	// remap page in all fine mappings...
sl@0
   146
	DMemoryObject::RemapPage(aPageArray, aIndex, aInvalidateTLB);
sl@0
   147
	}
sl@0
   148
sl@0
   149
sl@0
   150
void DLargeMappedMemory::UnmapPages(RPageArray::TIter aPages, TBool aDecommitting)
sl@0
   151
	{
sl@0
   152
	TRACE2(("DLargeMappedMemory[0x%08x]::UnmapPages(?,%d) index=0x%x count=0x%x",this,(bool)aDecommitting,aPages.Index(),aPages.Count()));
sl@0
   153
sl@0
   154
	// for now: assert pages do not overlapped a contiguous area
sl@0
   155
	// todo: update contiguous state, update page tables and call UnmapPages on large mappings
sl@0
   156
#ifdef _DEBUG
sl@0
   157
	for (TUint index = aPages.Index() ; index < aPages.IndexEnd() ; index += KPagesInPDE)
sl@0
   158
		{
sl@0
   159
		MmuLock::Lock();
sl@0
   160
		__NK_ASSERT_DEBUG(!IsChunkContiguous(index >> KPagesInPDEShift));
sl@0
   161
		MmuLock::Unlock();
sl@0
   162
		}
sl@0
   163
#endif
sl@0
   164
sl@0
   165
	// unmap pages in all page tables and fine mappings
sl@0
   166
	DCoarseMemory::UnmapPages(aPages, aDecommitting);
sl@0
   167
	}
sl@0
   168
sl@0
   169
sl@0
   170
void DLargeMappedMemory::RestrictPages(RPageArray::TIter aPages, TRestrictPagesType aRestriction)
sl@0
   171
	{
sl@0
   172
	TRACE2(("DLargeMappedMemory[0x%08x]::RestrictPages(?,%d) index=0x%x count=0x%x",this,aRestriction,aPages.Index(),aPages.Count()));
sl@0
   173
sl@0
   174
	// assert pages do not overlapped a contiguous area...
sl@0
   175
#ifdef _DEBUG
sl@0
   176
	for (TUint index = aPages.Index() ; index < aPages.IndexEnd() ; index += KPagesInPDE)
sl@0
   177
		{
sl@0
   178
		MmuLock::Lock();
sl@0
   179
		__NK_ASSERT_DEBUG(!IsChunkContiguous(index >> KPagesInPDEShift));
sl@0
   180
		MmuLock::Unlock();
sl@0
   181
		}
sl@0
   182
#endif
sl@0
   183
sl@0
   184
	DCoarseMemory::RestrictPages(aPages, aRestriction);
sl@0
   185
	}
sl@0
   186
sl@0
   187
sl@0
   188
TBool DLargeMappedMemory::IsChunkContiguous(TInt aChunkIndex)
sl@0
   189
	{
sl@0
   190
	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
sl@0
   191
	TUint index = aChunkIndex >> 5;
sl@0
   192
	TUint mask = 1 << (aChunkIndex & 31);
sl@0
   193
	return (iContiguousState[index] & mask) != 0;
sl@0
   194
	}
sl@0
   195
sl@0
   196
sl@0
   197
void DLargeMappedMemory::SetChunkContiguous(TInt aChunkIndex, TBool aIsContiguous)
sl@0
   198
	{
sl@0
   199
	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
sl@0
   200
	TUint index = aChunkIndex >> 5;
sl@0
   201
	TUint mask = 1 << (aChunkIndex & 31);
sl@0
   202
	iContiguousState[index] = (iContiguousState[index] & ~mask) | (aIsContiguous ? mask : 0);
sl@0
   203
	}
sl@0
   204
sl@0
   205
sl@0
   206
//
sl@0
   207
// DLargeMapping
sl@0
   208
//
sl@0
   209
sl@0
   210
sl@0
   211
DLargeMapping::DLargeMapping() : DCoarseMapping(ELargeMapping)
sl@0
   212
	{
sl@0
   213
	}
sl@0
   214
sl@0
   215
sl@0
   216
TInt DLargeMapping::DoMap()
sl@0
   217
	{
sl@0
   218
	TRACE(("DLargeMapping[0x%08x]::DoMap()", this));
sl@0
   219
	__NK_ASSERT_DEBUG(((iStartIndex|iSizeInPages)&(KChunkMask>>KPageShift))==0); // be extra paranoid about alignment
sl@0
   220
sl@0
   221
	MmuLock::Lock();
sl@0
   222
sl@0
   223
	TPde* pPde = Mmu::PageDirectoryEntry(OsAsid(),Base());
sl@0
   224
	DLargeMappedMemory* memory = (DLargeMappedMemory*)Memory(ETrue); // safe because we're called from code which has added mapping to memory
sl@0
   225
	
sl@0
   226
	TUint flash = 0;
sl@0
   227
	TUint chunk = iStartIndex >> KPagesInPDEShift;
sl@0
   228
	TUint endChunk = (iStartIndex + iSizeInPages) >> KPagesInPDEShift;
sl@0
   229
	
sl@0
   230
	while(chunk < endChunk)
sl@0
   231
		{
sl@0
   232
		MmuLock::Flash(flash,KMaxPdesInOneGo*2);
sl@0
   233
		TPde pde = KPdeUnallocatedEntry;
sl@0
   234
		TPte* pt = memory->GetPageTable(PteType(), chunk);
sl@0
   235
		if (memory->IsChunkContiguous(chunk))
sl@0
   236
			pde = Mmu::PageToSectionEntry(pt[0],iBlankPde); // todo: use get phys addr?
sl@0
   237
		else if (pt)
sl@0
   238
			pde = Mmu::PageTablePhysAddr(pt)|iBlankPde;
sl@0
   239
sl@0
   240
		if (pde == KPdeUnallocatedEntry)
sl@0
   241
			{
sl@0
   242
			TRACE2(("!PDE %x=%x (was %x)",pPde,KPdeUnallocatedEntry,*pPde));
sl@0
   243
			__NK_ASSERT_DEBUG(*pPde==KPdeUnallocatedEntry);
sl@0
   244
			}
sl@0
   245
		else
sl@0
   246
			{
sl@0
   247
			TRACE2(("!PDE %x=%x (was %x)",pPde,pde,*pPde));
sl@0
   248
			__NK_ASSERT_DEBUG(*pPde==KPdeUnallocatedEntry || ((*pPde^pde)&~KPdeMatchMask)==0);
sl@0
   249
			*pPde = pde;
sl@0
   250
			SinglePdeUpdated(pPde);
sl@0
   251
			flash += 3; // increase flash rate because we've done quite a bit more work
sl@0
   252
			}
sl@0
   253
sl@0
   254
		++pPde;
sl@0
   255
		++chunk;
sl@0
   256
		}
sl@0
   257
	MmuLock::Unlock();
sl@0
   258
sl@0
   259
	return KErrNone;
sl@0
   260
	}
sl@0
   261
sl@0
   262
sl@0
   263
void DLargeMapping::RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB)
sl@0
   264
	{
sl@0
   265
	TRACE(("DLargeMapping[0x%08x]::RemapPage(%08x, %d, %d, %d)", this, aPageArray, aIndex, aMapInstanceCount, aInvalidateTLB));
sl@0
   266
sl@0
   267
	TInt chunkIndex = aIndex >> KPagesInPDEShift;
sl@0
   268
sl@0
   269
	MmuLock::Lock();
sl@0
   270
	DLargeMappedMemory* memory = (DLargeMappedMemory*)Memory(); // safe because we're called from code which has reference on tables, which has reference on memory
sl@0
   271
	TPte* pt = memory->GetPageTable(PteType(), chunkIndex);
sl@0
   272
	
sl@0
   273
	// check the page is still mapped and mapping isn't being detached 
sl@0
   274
	// or hasn't been reused for another purpose...
sl@0
   275
	if(!pt || BeingDetached() || aMapInstanceCount != MapInstanceCount())
sl@0
   276
		{
sl@0
   277
		// can't map pages to this mapping any more so just exit.
sl@0
   278
		TRACE(("  page no longer mapped"));
sl@0
   279
		MmuLock::Unlock();
sl@0
   280
		return;
sl@0
   281
		}
sl@0
   282
	
sl@0
   283
	TPde* pPde = Mmu::PageDirectoryEntry(OsAsid(),Base() + (chunkIndex << KChunkShift));
sl@0
   284
	TPde currentPde = *pPde;
sl@0
   285
	
sl@0
   286
	if (!memory->IsChunkContiguous(chunkIndex) && Mmu::PdeMapsSection(currentPde))
sl@0
   287
		{
sl@0
   288
		// break section mapping and replace with page table...
sl@0
   289
		TRACE2(("  breaking section mapping"));
sl@0
   290
		TPde pde = Mmu::PageTablePhysAddr(pt)|iBlankPde;
sl@0
   291
		TRACE2(("!PDE %x=%x (was %x)",pPde,pde,*pPde));
sl@0
   292
		// can't assert old value if the first page has been remapped
sl@0
   293
		__NK_ASSERT_DEBUG((aIndex & (KPagesInPDE - 1)) == 0 ||
sl@0
   294
						  *pPde == Mmu::PageToSectionEntry(pt[0],iBlankPde));
sl@0
   295
		*pPde = pde;
sl@0
   296
		SinglePdeUpdated(pPde);
sl@0
   297
		MmuLock::Unlock();
sl@0
   298
#ifndef COARSE_GRAINED_TLB_MAINTENANCE
sl@0
   299
		if (aInvalidateTLB) 
sl@0
   300
			{
sl@0
   301
			// invalidate chunk...
sl@0
   302
			TUint start = (chunkIndex << KPagesInPDEShift) - iStartIndex;
sl@0
   303
			TLinAddr addr = LinAddrAndOsAsid() + (start << KPageShift);
sl@0
   304
			TLinAddr endAddr = addr + KChunkSize;
sl@0
   305
			do InvalidateTLBForPage(addr);
sl@0
   306
			while((addr+=KPageSize)<endAddr);
sl@0
   307
			InvalidateTLBForPage(addr);
sl@0
   308
			}
sl@0
   309
#endif
sl@0
   310
		}
sl@0
   311
	else if (memory->IsChunkContiguous(chunkIndex) && Mmu::PdeMapsPageTable(currentPde))
sl@0
   312
		{
sl@0
   313
		// reform section mapping...
sl@0
   314
		TRACE2(("  reforming section mapping"));
sl@0
   315
		__NK_ASSERT_ALWAYS(0); // todo: not yet implemented
sl@0
   316
		}
sl@0
   317
	else
sl@0
   318
		{
sl@0
   319
		// remap already handled by page table update in DPageTables...
sl@0
   320
		MmuLock::Unlock();
sl@0
   321
#ifndef COARSE_GRAINED_TLB_MAINTENANCE
sl@0
   322
		if (aInvalidateTLB)
sl@0
   323
			{
sl@0
   324
			// invalidate page...
sl@0
   325
			TUint start = aIndex - iStartIndex;
sl@0
   326
			TLinAddr addr = LinAddrAndOsAsid() + (start << KPageShift);
sl@0
   327
			InvalidateTLBForPage(addr);
sl@0
   328
			}
sl@0
   329
#endif
sl@0
   330
		}
sl@0
   331
	
sl@0
   332
	}
sl@0
   333
sl@0
   334
sl@0
   335
TInt DLargeMapping::PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount)
sl@0
   336
	{
sl@0
   337
	TRACE(("DLargeMapping[0x%08x]::PageIn(%d, %d, ?, %d)", this, aPages.Index(), aPages.Count(), aMapInstanceCount));
sl@0
   338
#ifdef _DEBUG
sl@0
   339
	// assert that we're not trying to page in any section mapped pages
sl@0
   340
	TUint startIndex = aPages.Index();
sl@0
   341
	TUint endIndex = startIndex + aPages.Count();
sl@0
   342
	for (TUint index = startIndex ; index < endIndex ; index += KPagesInPDE)
sl@0
   343
		{
sl@0
   344
		TLinAddr addr = Base() + ((index - iStartIndex) << KPageShift);
sl@0
   345
		TRACE2(("  checking page %d at %08x", index, addr));
sl@0
   346
		TPde* pPde = Mmu::PageDirectoryEntry(OsAsid(),addr);
sl@0
   347
		__NK_ASSERT_DEBUG(!Mmu::PdeMapsSection(*pPde));
sl@0
   348
		}
sl@0
   349
#endif	
sl@0
   350
	return DCoarseMapping::PageIn(aPages, aPinArgs, aMapInstanceCount);
sl@0
   351
	}
sl@0
   352
sl@0
   353
sl@0
   354
TBool DLargeMapping::MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex)
sl@0
   355
	{
sl@0
   356
	// this shouldn't ever be called as it's only used by ram defrag
sl@0
   357
	__NK_ASSERT_DEBUG(EFalse);
sl@0
   358
	return EFalse;
sl@0
   359
	}
sl@0
   360
sl@0
   361
sl@0
   362
TPte* DLargeMapping::FindPageTable(TLinAddr aLinAddr, TUint aMemoryIndex)
sl@0
   363
	{
sl@0
   364
	// this shouldn't ever be called as it's only used by ram defrag
sl@0
   365
	__NK_ASSERT_DEBUG(EFalse);
sl@0
   366
	return NULL;
sl@0
   367
	}