os/kernelhwsrv/kernel/eka/memmodel/epoc/flexible/mmu/mmapping.cpp
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
sl@0
     1
// Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
//
sl@0
    15
sl@0
    16
#include <plat_priv.h>
sl@0
    17
#include "mm.h"
sl@0
    18
#include "mmu.h"
sl@0
    19
#include "mmapping.h"
sl@0
    20
#include "mobject.h"
sl@0
    21
#include "maddressspace.h"
sl@0
    22
#include "mptalloc.h"
sl@0
    23
#include "mmanager.h" // needed for DMemoryManager::Pin/Unpin, not nice, but no obvious way to break dependency
sl@0
    24
#include "cache_maintenance.inl"
sl@0
    25
sl@0
    26
//
sl@0
    27
// DMemoryMapping
sl@0
    28
//
sl@0
    29
sl@0
    30
DMemoryMapping::DMemoryMapping(TUint aType)
sl@0
    31
	: DMemoryMappingBase(aType)
sl@0
    32
	{
sl@0
    33
	}
sl@0
    34
sl@0
    35
sl@0
    36
TInt DMemoryMapping::Construct(TMemoryAttributes aAttributes, TMappingCreateFlags aFlags, TInt aOsAsid, TLinAddr aAddr, TUint aSize, TLinAddr aColourOffset)
sl@0
    37
	{
sl@0
    38
	TRACE(("DMemoryMapping[0x%08x]::Construct(0x%x,0x%x,%d,0x%08x,0x%08x,0x%08x)",this,(TUint32&)aAttributes,aFlags,aOsAsid,aAddr,aSize,aColourOffset));
sl@0
    39
sl@0
    40
	// setup PDE values...
sl@0
    41
	iBlankPde = Mmu::BlankPde(aAttributes);
sl@0
    42
sl@0
    43
	// setup flags...
sl@0
    44
	if(aFlags&EMappingCreateReserveAllResources)
sl@0
    45
		Flags() |= EPermanentPageTables;
sl@0
    46
sl@0
    47
	// allocate virtual memory...
sl@0
    48
	TInt r = AllocateVirtualMemory(aFlags,aOsAsid,aAddr,aSize,aColourOffset);
sl@0
    49
	if(r==KErrNone)
sl@0
    50
		{
sl@0
    51
		// add to address space...
sl@0
    52
		TLinAddr addr = iAllocatedLinAddrAndOsAsid&~KPageMask;
sl@0
    53
		TInt osAsid = iAllocatedLinAddrAndOsAsid&KPageMask;
sl@0
    54
		r = AddressSpace[osAsid]->AddMapping(addr,this);
sl@0
    55
		if(r!=KErrNone)
sl@0
    56
			FreeVirtualMemory();
sl@0
    57
		}
sl@0
    58
sl@0
    59
	return r;
sl@0
    60
	}
sl@0
    61
sl@0
    62
sl@0
    63
DMemoryMapping::~DMemoryMapping()
sl@0
    64
	{
sl@0
    65
	TRACE(("DMemoryMapping[0x%08x]::~DMemoryMapping()",this));
sl@0
    66
	Destruct();
sl@0
    67
	}
sl@0
    68
sl@0
    69
sl@0
    70
void DMemoryMapping::Destruct()
sl@0
    71
	{
sl@0
    72
	__NK_ASSERT_DEBUG(!IsAttached());
sl@0
    73
sl@0
    74
	// remove from address space...
sl@0
    75
	TLinAddr addr = iAllocatedLinAddrAndOsAsid&~KPageMask;
sl@0
    76
	TInt osAsid = iAllocatedLinAddrAndOsAsid&KPageMask;
sl@0
    77
	TAny* removed = AddressSpace[osAsid]->RemoveMapping(addr);
sl@0
    78
	if(removed)
sl@0
    79
		__NK_ASSERT_DEBUG(removed==this);
sl@0
    80
sl@0
    81
	FreeVirtualMemory();
sl@0
    82
	}
sl@0
    83
sl@0
    84
sl@0
    85
void DMemoryMapping::BTraceCreate()
sl@0
    86
	{
sl@0
    87
	MmuLock::Lock();
sl@0
    88
	TUint32 data[4] = { iStartIndex, iSizeInPages, OsAsid(), Base() };
sl@0
    89
	BTraceContextN(BTrace::EFlexibleMemModel,BTrace::EMemoryMappingCreate,this,Memory(),data,sizeof(data));
sl@0
    90
	MmuLock::Unlock();
sl@0
    91
	}
sl@0
    92
sl@0
    93
sl@0
    94
TInt DMemoryMapping::Map(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TMappingPermissions aPermissions)
sl@0
    95
	{
sl@0
    96
	TRACE(("DMemoryMapping[0x%08x]::Map(0x%08x,0x%x,0x%x,0x%08x)",this,aMemory,aIndex,aCount,aPermissions));
sl@0
    97
	__NK_ASSERT_DEBUG(!IsAttached());
sl@0
    98
sl@0
    99
	// check reserved resources are compatible (memory objects with reserved resources 
sl@0
   100
	// don't expect to have to allocate memory when mapping new pages),,,
sl@0
   101
	if(aMemory->iFlags&DMemoryObject::EReserveResources && !(Flags()&EPermanentPageTables))
sl@0
   102
		return KErrArgument;
sl@0
   103
sl@0
   104
	// check arguments for coarse mappings...
sl@0
   105
	if(IsCoarse())
sl@0
   106
		{
sl@0
   107
		if(!aMemory->IsCoarse())
sl@0
   108
			return KErrArgument;
sl@0
   109
		if((aCount|aIndex)&(KChunkMask>>KPageShift))
sl@0
   110
			return KErrArgument;
sl@0
   111
		}
sl@0
   112
sl@0
   113
	TLinAddr base = iAllocatedLinAddrAndOsAsid & ~KPageMask;
sl@0
   114
	TLinAddr top = base + (aCount << KPageShift);
sl@0
   115
sl@0
   116
	// check user/supervisor memory partitioning...
sl@0
   117
	if (aPermissions & EUser)
sl@0
   118
		{
sl@0
   119
		if (base > KUserMemoryLimit || top > KUserMemoryLimit)
sl@0
   120
			return KErrAccessDenied;
sl@0
   121
		}
sl@0
   122
	else
sl@0
   123
		{
sl@0
   124
		if (base < KUserMemoryLimit || top < KUserMemoryLimit)
sl@0
   125
			return KErrAccessDenied;
sl@0
   126
		}
sl@0
   127
sl@0
   128
	// check that mapping doesn't straddle KUserMemoryLimit or KGlobalMemoryBase ...
sl@0
   129
	__NK_ASSERT_DEBUG((base < KUserMemoryLimit) == (top <= KUserMemoryLimit));
sl@0
   130
	__NK_ASSERT_DEBUG((base < KGlobalMemoryBase) == (top <= KGlobalMemoryBase));
sl@0
   131
sl@0
   132
	// check that only global memory is mapped into the kernel process
sl@0
   133
	TBool global = base >= KGlobalMemoryBase;
sl@0
   134
	__NK_ASSERT_DEBUG(global || (iAllocatedLinAddrAndOsAsid & KPageMask) != KKernelOsAsid);
sl@0
   135
sl@0
   136
	// setup attributes...
sl@0
   137
	PteType() =	Mmu::PteType(aPermissions,global);
sl@0
   138
	iBlankPte = Mmu::BlankPte(aMemory->Attributes(),PteType());
sl@0
   139
sl@0
   140
	// setup base address... 
sl@0
   141
	TUint colourOffset = ((aIndex&KPageColourMask)<<KPageShift);
sl@0
   142
	if(colourOffset+aCount*KPageSize > iAllocatedSize)
sl@0
   143
		return KErrTooBig;
sl@0
   144
	__NK_ASSERT_DEBUG(!iLinAddrAndOsAsid || ((iLinAddrAndOsAsid^iAllocatedLinAddrAndOsAsid)&~(KPageColourMask<<KPageShift))==0); // new, OR, only differ in page colour
sl@0
   145
	iLinAddrAndOsAsid = iAllocatedLinAddrAndOsAsid+colourOffset;
sl@0
   146
sl@0
   147
	// attach to memory object...
sl@0
   148
	TInt r = Attach(aMemory,aIndex,aCount);
sl@0
   149
sl@0
   150
	// cleanup if error...
sl@0
   151
	if(r!=KErrNone)
sl@0
   152
		iLinAddrAndOsAsid = 0;
sl@0
   153
sl@0
   154
	return r;
sl@0
   155
	}
sl@0
   156
sl@0
   157
sl@0
   158
void DMemoryMapping::Unmap()
sl@0
   159
	{
sl@0
   160
	Detach();
sl@0
   161
	// we can't clear iLinAddrAndOsAsid here because this may be needed by other code,
sl@0
   162
	// e.g. DFineMapping::MapPages/UnmapPages/RestrictPages/PageIn
sl@0
   163
	}
sl@0
   164
sl@0
   165
sl@0
   166
TInt DMemoryMapping::AllocateVirtualMemory(TMappingCreateFlags aFlags, TInt aOsAsid, TLinAddr aAddr, TUint aSize, TLinAddr aColourOffset)
sl@0
   167
	{
sl@0
   168
	TRACE(("DMemoryMapping[0x%08x]::AllocateVirtualMemory(0x%x,%d,0x%08x,0x%08x,0x%08x)",this,aFlags,aOsAsid,aAddr,aSize,aColourOffset));
sl@0
   169
	__NK_ASSERT_DEBUG((aAddr&KPageMask)==0);
sl@0
   170
	__NK_ASSERT_DEBUG(!iAllocatedLinAddrAndOsAsid);
sl@0
   171
	__NK_ASSERT_DEBUG(!iAllocatedSize);
sl@0
   172
sl@0
   173
	// setup PDE type...
sl@0
   174
	TUint pdeType = 0;
sl@0
   175
	if(aFlags&EMappingCreateCommonVirtual)
sl@0
   176
		pdeType |= EVirtualSlabTypeCommonVirtual;
sl@0
   177
	if(aFlags&EMappingCreateDemandPaged)
sl@0
   178
		pdeType |= EVirtualSlabTypeDemandPaged;
sl@0
   179
sl@0
   180
	TInt r;
sl@0
   181
	TUint colourOffset = aColourOffset&(KPageColourMask<<KPageShift);
sl@0
   182
	TLinAddr addr;
sl@0
   183
	TUint size;
sl@0
   184
	if(aFlags&(EMappingCreateFixedVirtual|EMappingCreateAdoptVirtual))
sl@0
   185
		{
sl@0
   186
		// just use the supplied virtual address...
sl@0
   187
		__NK_ASSERT_ALWAYS(aAddr);
sl@0
   188
		__NK_ASSERT_ALWAYS(colourOffset==0);
sl@0
   189
		__NK_ASSERT_DEBUG((aFlags&EMappingCreateAdoptVirtual)==0 || AddressSpace[aOsAsid]->CheckPdeType(aAddr,aSize,pdeType));
sl@0
   190
		addr = aAddr;
sl@0
   191
		size = aSize;
sl@0
   192
		r = KErrNone;
sl@0
   193
		}
sl@0
   194
	else
sl@0
   195
		{
sl@0
   196
		if(aFlags&(EMappingCreateExactVirtual|EMappingCreateCommonVirtual))
sl@0
   197
			{
sl@0
   198
			__NK_ASSERT_ALWAYS(aAddr); // address must be specified
sl@0
   199
			}
sl@0
   200
		else
sl@0
   201
			{
sl@0
   202
			__NK_ASSERT_ALWAYS(!aAddr); // address shouldn't have been specified
sl@0
   203
			}
sl@0
   204
sl@0
   205
		// adjust for colour...
sl@0
   206
		TUint allocSize = aSize+colourOffset;
sl@0
   207
		TUint allocAddr = aAddr;
sl@0
   208
		if(allocAddr)
sl@0
   209
			{
sl@0
   210
			allocAddr -= colourOffset;
sl@0
   211
			if(allocAddr&(KPageColourMask<<KPageShift))
sl@0
   212
				return KErrArgument; // wrong colour
sl@0
   213
			}
sl@0
   214
sl@0
   215
		// allocate virtual addresses...
sl@0
   216
		if(aFlags&EMappingCreateUserGlobalVirtual)
sl@0
   217
			{
sl@0
   218
			if(aOsAsid!=(TInt)KKernelOsAsid)
sl@0
   219
				return KErrArgument;
sl@0
   220
			r = DAddressSpace::AllocateUserGlobalVirtualMemory(addr,size,allocAddr,allocSize,pdeType);
sl@0
   221
			}
sl@0
   222
		else
sl@0
   223
			r = AddressSpace[aOsAsid]->AllocateVirtualMemory(addr,size,allocAddr,allocSize,pdeType);
sl@0
   224
		}
sl@0
   225
sl@0
   226
	if(r==KErrNone)
sl@0
   227
		{
sl@0
   228
		iAllocatedLinAddrAndOsAsid = addr|aOsAsid;
sl@0
   229
		iAllocatedSize = size;
sl@0
   230
		}
sl@0
   231
sl@0
   232
	TRACE(("DMemoryMapping[0x%08x]::AllocateVirtualMemory returns %d address=0x%08x",this,r,addr));
sl@0
   233
	return r;
sl@0
   234
	}
sl@0
   235
sl@0
   236
sl@0
   237
void DMemoryMapping::FreeVirtualMemory()
sl@0
   238
	{
sl@0
   239
	if(!iAllocatedSize)
sl@0
   240
		return; // no virtual memory to free
sl@0
   241
sl@0
   242
	TRACE(("DMemoryMapping[0x%08x]::FreeVirtualMemory()",this));
sl@0
   243
sl@0
   244
	iLinAddrAndOsAsid = 0;
sl@0
   245
sl@0
   246
	TLinAddr addr = iAllocatedLinAddrAndOsAsid&~KPageMask;
sl@0
   247
	TInt osAsid = iAllocatedLinAddrAndOsAsid&KPageMask;
sl@0
   248
	AddressSpace[osAsid]->FreeVirtualMemory(addr,iAllocatedSize);
sl@0
   249
	iAllocatedLinAddrAndOsAsid = 0;
sl@0
   250
	iAllocatedSize = 0;
sl@0
   251
	}
sl@0
   252
sl@0
   253
sl@0
   254
sl@0
   255
//
sl@0
   256
// DCoarseMapping
sl@0
   257
//
sl@0
   258
sl@0
   259
DCoarseMapping::DCoarseMapping()
sl@0
   260
	: DMemoryMapping(ECoarseMapping)
sl@0
   261
	{
sl@0
   262
	}
sl@0
   263
sl@0
   264
sl@0
   265
DCoarseMapping::DCoarseMapping(TUint aFlags)
sl@0
   266
	: DMemoryMapping(ECoarseMapping|aFlags)
sl@0
   267
	{
sl@0
   268
	}
sl@0
   269
sl@0
   270
sl@0
   271
DCoarseMapping::~DCoarseMapping()
sl@0
   272
	{
sl@0
   273
	}
sl@0
   274
sl@0
   275
sl@0
   276
TInt DCoarseMapping::DoMap()
sl@0
   277
	{
sl@0
   278
	TRACE(("DCoarseMapping[0x%08x]::DoMap()", this));
sl@0
   279
	__NK_ASSERT_DEBUG(((iStartIndex|iSizeInPages)&(KChunkMask>>KPageShift))==0); // be extra paranoid about alignment
sl@0
   280
sl@0
   281
	MmuLock::Lock();
sl@0
   282
	TPde* pPde = Mmu::PageDirectoryEntry(OsAsid(),Base());
sl@0
   283
	DCoarseMemory* memory = (DCoarseMemory*)Memory(true); // safe because we're called from code which has added mapping to memory
sl@0
   284
	
sl@0
   285
	TUint flash = 0;
sl@0
   286
	TUint chunk = iStartIndex >> KPagesInPDEShift;
sl@0
   287
	TUint endChunk = (iStartIndex + iSizeInPages) >> KPagesInPDEShift;
sl@0
   288
	TBool sectionMappingsBroken = EFalse;
sl@0
   289
	
sl@0
   290
	while(chunk < endChunk)
sl@0
   291
		{
sl@0
   292
		MmuLock::Flash(flash,KMaxPdesInOneGo*2);
sl@0
   293
		TPte* pt = memory->GetPageTable(PteType(), chunk);
sl@0
   294
		if(!pt)
sl@0
   295
			{
sl@0
   296
			TRACE2(("!PDE %x=%x (was %x)",pPde,KPdeUnallocatedEntry,*pPde));
sl@0
   297
			__NK_ASSERT_DEBUG(*pPde==KPdeUnallocatedEntry);
sl@0
   298
			}
sl@0
   299
		else
sl@0
   300
			{
sl@0
   301
			TPde pde = Mmu::PageTablePhysAddr(pt)|iBlankPde;
sl@0
   302
#ifdef	__USER_MEMORY_GUARDS_ENABLED__
sl@0
   303
			if (IsUserMapping())
sl@0
   304
				pde = PDE_IN_DOMAIN(pde, USER_MEMORY_DOMAIN);
sl@0
   305
#endif
sl@0
   306
			TRACE2(("!PDE %x=%x (was %x)",pPde,pde,*pPde));
sl@0
   307
			if (Mmu::PdeMapsSection(*pPde))
sl@0
   308
				{
sl@0
   309
				// break previous section mapping...
sl@0
   310
				__NK_ASSERT_DEBUG(*pPde==Mmu::PageToSectionEntry(pt[0],iBlankPde));
sl@0
   311
				sectionMappingsBroken = ETrue;
sl@0
   312
				}
sl@0
   313
			else
sl@0
   314
				__NK_ASSERT_DEBUG(*pPde==KPdeUnallocatedEntry || ((*pPde^pde)&~KPdeMatchMask)==0); 
sl@0
   315
			*pPde = pde;
sl@0
   316
			SinglePdeUpdated(pPde);
sl@0
   317
			flash += 3; // increase flash rate because we've done quite a bit more work
sl@0
   318
			}
sl@0
   319
		++pPde;
sl@0
   320
		++chunk;
sl@0
   321
		}
sl@0
   322
	MmuLock::Unlock();
sl@0
   323
sl@0
   324
	if (sectionMappingsBroken)
sl@0
   325
		{
sl@0
   326
		// We must invalidate the TLB since we broke section mappings created by the bootstrap.
sl@0
   327
		// Since this will only ever happen on boot, we just invalidate the entire TLB for this
sl@0
   328
		// process.
sl@0
   329
		InvalidateTLBForAsid(OsAsid());
sl@0
   330
		}
sl@0
   331
sl@0
   332
	return KErrNone;
sl@0
   333
	}
sl@0
   334
sl@0
   335
sl@0
   336
void DCoarseMapping::DoUnmap()
sl@0
   337
	{
sl@0
   338
	TRACE(("DCoarseMapping[0x%08x]::DoUnmap()", this));
sl@0
   339
	MmuLock::Lock();
sl@0
   340
	TPde* pPde = Mmu::PageDirectoryEntry(OsAsid(),Base());
sl@0
   341
	TPde* pPdeEnd = pPde+(iSizeInPages>>(KChunkShift-KPageShift));
sl@0
   342
	TUint flash = 0;
sl@0
   343
	do
sl@0
   344
		{
sl@0
   345
		MmuLock::Flash(flash,KMaxPdesInOneGo);
sl@0
   346
		TPde pde = KPdeUnallocatedEntry;
sl@0
   347
		TRACE2(("!PDE %x=%x",pPde,pde));
sl@0
   348
		*pPde = pde;
sl@0
   349
		SinglePdeUpdated(pPde);
sl@0
   350
		++pPde;
sl@0
   351
		}
sl@0
   352
	while(pPde<pPdeEnd);
sl@0
   353
	MmuLock::Unlock();
sl@0
   354
sl@0
   355
	InvalidateTLBForAsid(OsAsid());
sl@0
   356
	}
sl@0
   357
sl@0
   358
sl@0
   359
TInt DCoarseMapping::MapPages(RPageArray::TIter aPages, TUint aMapInstanceCount)
sl@0
   360
	{
sl@0
   361
	// shouldn't ever be called because coarse mappings don't have their own page tables...
sl@0
   362
	__NK_ASSERT_DEBUG(0);
sl@0
   363
	return KErrNotSupported;
sl@0
   364
	}
sl@0
   365
sl@0
   366
sl@0
   367
void DCoarseMapping::UnmapPages(RPageArray::TIter aPages, TUint aMapInstanceCount)
sl@0
   368
	{
sl@0
   369
	// shouldn't ever be called because coarse mappings don't have their own page tables...
sl@0
   370
	__NK_ASSERT_DEBUG(0);
sl@0
   371
	}
sl@0
   372
sl@0
   373
void DCoarseMapping::RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB)
sl@0
   374
	{
sl@0
   375
	// shouldn't ever be called because coarse mappings don't have their own page tables...
sl@0
   376
	__NK_ASSERT_DEBUG(0);
sl@0
   377
	}
sl@0
   378
sl@0
   379
void DCoarseMapping::RestrictPagesNA(RPageArray::TIter aPages, TUint aMapInstanceCount)
sl@0
   380
	{
sl@0
   381
	// shouldn't ever be called because coarse mappings don't have their own page tables...
sl@0
   382
	__NK_ASSERT_DEBUG(0);
sl@0
   383
	}
sl@0
   384
sl@0
   385
sl@0
   386
TInt DCoarseMapping::PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount)
sl@0
   387
	{
sl@0
   388
	MmuLock::Lock();
sl@0
   389
sl@0
   390
	if(!IsAttached())
sl@0
   391
		{
sl@0
   392
		MmuLock::Unlock();
sl@0
   393
		return KErrNotFound;
sl@0
   394
		}
sl@0
   395
sl@0
   396
	DCoarseMemory* memory = (DCoarseMemory*)Memory(true); // safe because we've checked mapping IsAttached
sl@0
   397
	return memory->PageIn(this, aPages, aPinArgs, aMapInstanceCount);
sl@0
   398
	}
sl@0
   399
sl@0
   400
sl@0
   401
TBool DCoarseMapping::MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex)
sl@0
   402
	{
sl@0
   403
	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
sl@0
   404
	__NK_ASSERT_DEBUG(IsAttached());
sl@0
   405
sl@0
   406
	DCoarseMemory* memory = (DCoarseMemory*)Memory(true); // safe because we've checked mapping IsAttached
sl@0
   407
	TBool success = memory->MovingPageIn(this, aPageArrayPtr, aIndex);
sl@0
   408
	if (success)
sl@0
   409
		{
sl@0
   410
		TLinAddr addr = Base() + (aIndex - iStartIndex) * KPageSize;
sl@0
   411
		InvalidateTLBForPage(addr);
sl@0
   412
		}
sl@0
   413
	return success;
sl@0
   414
	}
sl@0
   415
sl@0
   416
sl@0
   417
TPte* DCoarseMapping::FindPageTable(TLinAddr aLinAddr, TUint aMemoryIndex)
sl@0
   418
	{
sl@0
   419
	TRACE(("DCoarseMapping::FindPageTable(0x%x, %d)", aLinAddr, aMemoryIndex));
sl@0
   420
	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
sl@0
   421
	__NK_ASSERT_DEBUG(IsAttached());
sl@0
   422
	DCoarseMemory* memory = (DCoarseMemory*)Memory(true); // safe because we've checked mapping IsAttached
sl@0
   423
	return memory->FindPageTable(this, aLinAddr, aMemoryIndex);
sl@0
   424
	}
sl@0
   425
sl@0
   426
sl@0
   427
sl@0
   428
//
sl@0
   429
// DFineMapping
sl@0
   430
//
sl@0
   431
sl@0
   432
DFineMapping::DFineMapping()
sl@0
   433
	: DMemoryMapping(0)
sl@0
   434
	{
sl@0
   435
	}
sl@0
   436
sl@0
   437
sl@0
   438
DFineMapping::~DFineMapping()
sl@0
   439
	{
sl@0
   440
	TRACE(("DFineMapping[0x%08x]::~DFineMapping()",this));
sl@0
   441
	FreePermanentPageTables();
sl@0
   442
	}
sl@0
   443
sl@0
   444
#ifdef _DEBUG
sl@0
   445
void DFineMapping::ValidatePageTable(TPte* aPt, TLinAddr aAddr)
sl@0
   446
	{
sl@0
   447
	if(aPt)
sl@0
   448
		{
sl@0
   449
		// check page table is correct...
sl@0
   450
		SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPt);
sl@0
   451
		__NK_ASSERT_DEBUG(pti->CheckFine(aAddr&~KChunkMask,OsAsid()));
sl@0
   452
		DMemoryObject* memory = Memory();
sl@0
   453
		if(memory)
sl@0
   454
			{
sl@0
   455
			if(memory->IsDemandPaged() && !IsPinned() && !(Flags()&EPageTablesAllocated))
sl@0
   456
				__NK_ASSERT_DEBUG(pti->IsDemandPaged());
sl@0
   457
			else
sl@0
   458
				__NK_ASSERT_DEBUG(!pti->IsDemandPaged());
sl@0
   459
			}
sl@0
   460
		}
sl@0
   461
	}
sl@0
   462
#endif
sl@0
   463
sl@0
   464
TPte* DFineMapping::GetPageTable(TLinAddr aAddr)
sl@0
   465
	{
sl@0
   466
	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
sl@0
   467
sl@0
   468
	// get address of PDE which refers to the page table...
sl@0
   469
	TPde* pPde = Mmu::PageDirectoryEntry(OsAsid(),aAddr);
sl@0
   470
sl@0
   471
	// get page table...
sl@0
   472
	TPte* pt = Mmu::PageTableFromPde(*pPde);
sl@0
   473
#ifdef _DEBUG
sl@0
   474
	ValidatePageTable(pt, aAddr);
sl@0
   475
#endif
sl@0
   476
	return pt;
sl@0
   477
	}
sl@0
   478
sl@0
   479
sl@0
   480
TPte* DFineMapping::GetOrAllocatePageTable(TLinAddr aAddr)
sl@0
   481
	{
sl@0
   482
	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
sl@0
   483
sl@0
   484
	// get address of PDE which refers to the page table...
sl@0
   485
	TPde* pPde = Mmu::PageDirectoryEntry(OsAsid(),aAddr);
sl@0
   486
sl@0
   487
	// get page table...
sl@0
   488
	TPte* pt = Mmu::PageTableFromPde(*pPde);
sl@0
   489
	if(!pt)
sl@0
   490
		{
sl@0
   491
		pt = AllocatePageTable(aAddr,pPde);
sl@0
   492
#ifdef _DEBUG
sl@0
   493
		ValidatePageTable(pt, aAddr);
sl@0
   494
#endif
sl@0
   495
		}
sl@0
   496
sl@0
   497
	return pt;
sl@0
   498
	}
sl@0
   499
sl@0
   500
sl@0
   501
TPte* DFineMapping::GetOrAllocatePageTable(TLinAddr aAddr, TPinArgs& aPinArgs)
sl@0
   502
	{
sl@0
   503
	__NK_ASSERT_DEBUG(aPinArgs.iPinnedPageTables);
sl@0
   504
sl@0
   505
	if(!aPinArgs.HaveSufficientPages(KNumPagesToPinOnePageTable))
sl@0
   506
		return 0;
sl@0
   507
sl@0
   508
	TPte* pinnedPt = 0;
sl@0
   509
	for(;;)
sl@0
   510
		{
sl@0
   511
		TPte* pt = GetOrAllocatePageTable(aAddr);
sl@0
   512
sl@0
   513
		if(pinnedPt && pinnedPt!=pt)
sl@0
   514
			{
sl@0
   515
			// previously pinned page table not needed...
sl@0
   516
			::PageTables.UnpinPageTable(pinnedPt,aPinArgs);
sl@0
   517
sl@0
   518
			// make sure we have memory for next pin attempt...
sl@0
   519
			MmuLock::Unlock();
sl@0
   520
			aPinArgs.AllocReplacementPages(KNumPagesToPinOnePageTable);
sl@0
   521
			if(!aPinArgs.HaveSufficientPages(KNumPagesToPinOnePageTable)) // if out of memory...
sl@0
   522
				{
sl@0
   523
				// make sure we free any unneeded page table we allocated...
sl@0
   524
				if(pt)
sl@0
   525
					FreePageTable(Mmu::PageDirectoryEntry(OsAsid(),aAddr));
sl@0
   526
				MmuLock::Lock();
sl@0
   527
				return 0;
sl@0
   528
				}
sl@0
   529
			MmuLock::Lock();
sl@0
   530
			}
sl@0
   531
sl@0
   532
		if(!pt)
sl@0
   533
			return 0; // out of memory
sl@0
   534
sl@0
   535
		if(pt==pinnedPt)
sl@0
   536
			{
sl@0
   537
			// we got a page table and it was pinned...
sl@0
   538
			*aPinArgs.iPinnedPageTables++ = pt;
sl@0
   539
			++aPinArgs.iNumPinnedPageTables;
sl@0
   540
			return pt;
sl@0
   541
			}
sl@0
   542
sl@0
   543
		// don't pin page table if it's not paged (e.g. unpaged part of ROM)...
sl@0
   544
		SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt);
sl@0
   545
		if(!pti->IsDemandPaged())
sl@0
   546
			return pt;
sl@0
   547
sl@0
   548
		// pin the page table...
sl@0
   549
		if (::PageTables.PinPageTable(pt,aPinArgs) != KErrNone)
sl@0
   550
			{// Couldn't pin the page table...
sl@0
   551
			MmuLock::Unlock();
sl@0
   552
			// make sure we free any unneeded page table we allocated...
sl@0
   553
			FreePageTable(Mmu::PageDirectoryEntry(OsAsid(),aAddr));
sl@0
   554
			MmuLock::Lock();
sl@0
   555
			return 0;
sl@0
   556
			}
sl@0
   557
sl@0
   558
		pinnedPt = pt;
sl@0
   559
		}
sl@0
   560
	}
sl@0
   561
sl@0
   562
sl@0
   563
TInt DFineMapping::AllocateVirtualMemory(TMappingCreateFlags aFlags, TInt aOsAsid, TLinAddr aAddr, TUint aSize, TLinAddr aColourOffset)
sl@0
   564
	{
sl@0
   565
	TInt r = DMemoryMapping::AllocateVirtualMemory(aFlags,aOsAsid,aAddr,aSize,aColourOffset);
sl@0
   566
	if(r==KErrNone && (Flags()&EPermanentPageTables))
sl@0
   567
		{
sl@0
   568
		r = AllocatePermanentPageTables();
sl@0
   569
		if(r!=KErrNone)
sl@0
   570
			FreeVirtualMemory();
sl@0
   571
		}
sl@0
   572
	return r;
sl@0
   573
	}
sl@0
   574
sl@0
   575
sl@0
   576
void DFineMapping::FreeVirtualMemory()
sl@0
   577
	{
sl@0
   578
	FreePermanentPageTables();
sl@0
   579
	DMemoryMapping::FreeVirtualMemory();
sl@0
   580
	}
sl@0
   581
sl@0
   582
sl@0
   583
TPte* DFineMapping::AllocatePageTable(TLinAddr aAddr, TPde* aPdeAddress, TBool aPermanent)
sl@0
   584
	{
sl@0
   585
	TRACE2(("DFineMapping[0x%08x]::AllocatePageTable(0x%08x,0x%08x,%d)",this,aAddr,aPdeAddress,aPermanent));
sl@0
   586
sl@0
   587
	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
sl@0
   588
sl@0
   589
	for(;;)
sl@0
   590
		{
sl@0
   591
		// mapping is going, so we don't need a page table any more...
sl@0
   592
		if(BeingDetached())
sl@0
   593
			return 0;
sl@0
   594
sl@0
   595
		// get paged state...
sl@0
   596
		TBool demandPaged = false;
sl@0
   597
		if(!aPermanent)
sl@0
   598
			{
sl@0
   599
			DMemoryObject* memory = Memory();
sl@0
   600
			__NK_ASSERT_DEBUG(memory); // can't be NULL because not BeingDetached()
sl@0
   601
			demandPaged = memory->IsDemandPaged();
sl@0
   602
			}
sl@0
   603
sl@0
   604
		// get page table...
sl@0
   605
		TPte* pt = Mmu::PageTableFromPde(*aPdeAddress);
sl@0
   606
		if(pt!=0)
sl@0
   607
			{
sl@0
   608
			// we have a page table...
sl@0
   609
			__NK_ASSERT_DEBUG(SPageTableInfo::FromPtPtr(pt)->CheckFine(aAddr&~KChunkMask,iAllocatedLinAddrAndOsAsid&KPageMask));
sl@0
   610
			if(aPermanent)
sl@0
   611
				{
sl@0
   612
				__NK_ASSERT_DEBUG(BeingDetached()==false);
sl@0
   613
				__NK_ASSERT_ALWAYS(!demandPaged);
sl@0
   614
				SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt);
sl@0
   615
				pti->IncPermanenceCount();
sl@0
   616
				}
sl@0
   617
			return pt;
sl@0
   618
			}
sl@0
   619
sl@0
   620
		// allocate a new page table...
sl@0
   621
		MmuLock::Unlock();
sl@0
   622
		::PageTables.Lock();
sl@0
   623
		TPte* newPt = ::PageTables.Alloc(demandPaged);
sl@0
   624
		if(!newPt)
sl@0
   625
			{
sl@0
   626
			// out of memory...
sl@0
   627
			::PageTables.Unlock();
sl@0
   628
			MmuLock::Lock();
sl@0
   629
			return 0;
sl@0
   630
			}
sl@0
   631
sl@0
   632
		// check if new page table is still needed...
sl@0
   633
		MmuLock::Lock();
sl@0
   634
		pt = Mmu::PageTableFromPde(*aPdeAddress);
sl@0
   635
		if(pt)
sl@0
   636
			{
sl@0
   637
			// someone else has already allocated a page table,
sl@0
   638
			// so free the one we just allocated and try again...
sl@0
   639
			MmuLock::Unlock();
sl@0
   640
			::PageTables.Free(newPt);
sl@0
   641
			}
sl@0
   642
		else if(BeingDetached())
sl@0
   643
			{
sl@0
   644
			// mapping is going, so we don't need a page table any more...
sl@0
   645
			MmuLock::Unlock();
sl@0
   646
			::PageTables.Free(newPt);
sl@0
   647
			::PageTables.Unlock();
sl@0
   648
			MmuLock::Lock();
sl@0
   649
			return 0;
sl@0
   650
			}
sl@0
   651
		else
sl@0
   652
			{
sl@0
   653
			// setup new page table...
sl@0
   654
			SPageTableInfo* pti = SPageTableInfo::FromPtPtr(newPt);
sl@0
   655
			pti->SetFine(aAddr&~KChunkMask,iAllocatedLinAddrAndOsAsid&KPageMask);
sl@0
   656
sl@0
   657
			TPde pde = Mmu::PageTablePhysAddr(newPt)|iBlankPde;
sl@0
   658
#ifdef	__USER_MEMORY_GUARDS_ENABLED__
sl@0
   659
			if (IsUserMapping())
sl@0
   660
				pde = PDE_IN_DOMAIN(pde, USER_MEMORY_DOMAIN);
sl@0
   661
#endif
sl@0
   662
			TRACE2(("!PDE %x=%x",aPdeAddress,pde));
sl@0
   663
			__NK_ASSERT_DEBUG(((*aPdeAddress^pde)&~KPdeMatchMask)==0 || *aPdeAddress==KPdeUnallocatedEntry);
sl@0
   664
			*aPdeAddress = pde;
sl@0
   665
			SinglePdeUpdated(aPdeAddress);
sl@0
   666
sl@0
   667
			MmuLock::Unlock();
sl@0
   668
			}
sl@0
   669
sl@0
   670
		// loop back and recheck...
sl@0
   671
		::PageTables.Unlock();
sl@0
   672
		MmuLock::Lock();
sl@0
   673
		}
sl@0
   674
	}
sl@0
   675
sl@0
   676
sl@0
   677
void DFineMapping::FreePageTable(TPde* aPdeAddress)
sl@0
   678
	{
sl@0
   679
	TRACE2(("DFineMapping[0x%08x]::FreePageTable(0x%08x)",this,aPdeAddress));
sl@0
   680
sl@0
   681
	// get page table lock...
sl@0
   682
	::PageTables.Lock();
sl@0
   683
	MmuLock::Lock();
sl@0
   684
sl@0
   685
	// find page table...
sl@0
   686
	TPte* pt = Mmu::PageTableFromPde(*aPdeAddress);
sl@0
   687
	if(pt)
sl@0
   688
		{
sl@0
   689
		SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt);
sl@0
   690
		if(pti->PageCount() || pti->PermanenceCount())
sl@0
   691
			{
sl@0
   692
			// page table still in use, so don't free it...
sl@0
   693
			pt = 0;
sl@0
   694
			}
sl@0
   695
		else
sl@0
   696
			{
sl@0
   697
			// page table not used, so unmap it...
sl@0
   698
			TPde pde = KPdeUnallocatedEntry;
sl@0
   699
			TRACE2(("!PDE %x=%x",aPdeAddress,pde));
sl@0
   700
			*aPdeAddress = pde;
sl@0
   701
			SinglePdeUpdated(aPdeAddress);
sl@0
   702
			}
sl@0
   703
		}
sl@0
   704
sl@0
   705
	MmuLock::Unlock();
sl@0
   706
	if(pt)
sl@0
   707
		::PageTables.Free(pt);
sl@0
   708
	::PageTables.Unlock();
sl@0
   709
	}
sl@0
   710
sl@0
   711
sl@0
   712
void DFineMapping::RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB)
sl@0
   713
	{
sl@0
   714
	TRACE2(("DFineMemoryMapping[0x%08x]::RemapPage(0x%x,0x%x,%d,%d)",this,aPageArray,aIndex,aMapInstanceCount,aInvalidateTLB));
sl@0
   715
sl@0
   716
	__NK_ASSERT_DEBUG(aIndex >= iStartIndex);
sl@0
   717
	__NK_ASSERT_DEBUG(aIndex < iStartIndex + iSizeInPages);
sl@0
   718
sl@0
   719
	TLinAddr addr = Base() + ((aIndex - iStartIndex) << KPageShift);
sl@0
   720
	TUint pteIndex = (addr >> KPageShift) & (KChunkMask >> KPageShift);
sl@0
   721
sl@0
   722
	// get address of page table...
sl@0
   723
	MmuLock::Lock();
sl@0
   724
	TPte* pPte = GetPageTable(addr);
sl@0
   725
sl@0
   726
	// check the page is still mapped and mapping isn't being detached 
sl@0
   727
	// or hasn't been reused for another purpose...
sl@0
   728
	if(!pPte || BeingDetached() || aMapInstanceCount != MapInstanceCount())
sl@0
   729
		{
sl@0
   730
		// can't map pages to this mapping any more so just exit.
sl@0
   731
		MmuLock::Unlock();
sl@0
   732
		return;
sl@0
   733
		}
sl@0
   734
sl@0
   735
	// remap the page...
sl@0
   736
	pPte += pteIndex;
sl@0
   737
	Mmu::RemapPage(pPte, aPageArray, iBlankPte);
sl@0
   738
	MmuLock::Unlock();
sl@0
   739
sl@0
   740
#ifndef COARSE_GRAINED_TLB_MAINTENANCE
sl@0
   741
	// clean TLB...
sl@0
   742
	if (aInvalidateTLB)
sl@0
   743
		{
sl@0
   744
		InvalidateTLBForPage(addr + OsAsid());
sl@0
   745
		}
sl@0
   746
#endif
sl@0
   747
	}
sl@0
   748
sl@0
   749
sl@0
   750
TInt DFineMapping::MapPages(RPageArray::TIter aPages, TUint aMapInstanceCount)
sl@0
   751
	{
sl@0
   752
	TRACE2(("DFineMapping[0x%08x]::MapPages(?,%d) index=0x%x count=0x%x",this,aMapInstanceCount,aPages.Index(),aPages.Count()));
sl@0
   753
sl@0
   754
	__NK_ASSERT_DEBUG(aPages.Count());
sl@0
   755
	__NK_ASSERT_DEBUG(aPages.Index()>=iStartIndex);
sl@0
   756
	__NK_ASSERT_DEBUG(aPages.IndexEnd()-iStartIndex<=iSizeInPages);
sl@0
   757
sl@0
   758
	TLinAddr addr = Base()+(aPages.Index()-iStartIndex)*KPageSize;
sl@0
   759
	for(;;)
sl@0
   760
		{
sl@0
   761
		TUint pteIndex = (addr>>KPageShift)&(KChunkMask>>KPageShift);
sl@0
   762
sl@0
   763
		// calculate max number of pages to do...
sl@0
   764
		TUint n = (KChunkSize>>KPageShift)-pteIndex; // pages left in page table
sl@0
   765
		if(n>KMaxPagesInOneGo)
sl@0
   766
			n = KMaxPagesInOneGo;
sl@0
   767
sl@0
   768
		// get some pages...
sl@0
   769
		TPhysAddr* pages;
sl@0
   770
		n = aPages.Pages(pages,n);
sl@0
   771
		if(!n)
sl@0
   772
			break;
sl@0
   773
sl@0
   774
		// get address of page table...
sl@0
   775
		MmuLock::Lock();
sl@0
   776
		TPte* pPte = GetOrAllocatePageTable(addr);
sl@0
   777
sl@0
   778
		// check mapping isn't being unmapped, or been reused for another purpose...
sl@0
   779
		if(BeingDetached() || aMapInstanceCount!=MapInstanceCount())
sl@0
   780
			{
sl@0
   781
			// can't map pages to this mapping any more, so free any page table
sl@0
   782
			// we just got (if it's not used)...
sl@0
   783
			if(!pPte)
sl@0
   784
				MmuLock::Unlock();
sl@0
   785
			else
sl@0
   786
				{
sl@0
   787
				SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pPte);
sl@0
   788
				TBool keepPt = pti->PermanenceCount() ||  pti->PageCount();
sl@0
   789
				MmuLock::Unlock();
sl@0
   790
				if(!keepPt)
sl@0
   791
					FreePageTable(Mmu::PageDirectoryEntry(OsAsid(),addr));
sl@0
   792
				}
sl@0
   793
			// then end...
sl@0
   794
			return KErrNone;
sl@0
   795
			}
sl@0
   796
sl@0
   797
		// check for OOM...
sl@0
   798
		if(!pPte)
sl@0
   799
			{
sl@0
   800
			MmuLock::Unlock();
sl@0
   801
			return KErrNoMemory;
sl@0
   802
			}
sl@0
   803
sl@0
   804
		// map some pages...
sl@0
   805
		pPte += pteIndex;
sl@0
   806
		TBool keepPt = Mmu::MapPages(pPte, n, pages, iBlankPte);
sl@0
   807
		MmuLock::Unlock();
sl@0
   808
sl@0
   809
		// free page table if no longer needed...
sl@0
   810
		if(!keepPt)
sl@0
   811
			FreePageTable(Mmu::PageDirectoryEntry(OsAsid(),addr));
sl@0
   812
sl@0
   813
		// move on...
sl@0
   814
		aPages.Skip(n);
sl@0
   815
		addr += n*KPageSize;
sl@0
   816
		}
sl@0
   817
sl@0
   818
	return KErrNone;
sl@0
   819
	}
sl@0
   820
sl@0
   821
sl@0
   822
void DFineMapping::UnmapPages(RPageArray::TIter aPages, TUint aMapInstanceCount)
sl@0
   823
	{
sl@0
   824
	TRACE2(("DFineMapping[0x%08x]::UnmapPages(?,%d) index=0x%x count=0x%x",this,aMapInstanceCount,aPages.Index(),aPages.Count()));
sl@0
   825
sl@0
   826
	__NK_ASSERT_DEBUG(aPages.Count());
sl@0
   827
sl@0
   828
	TLinAddr addr = Base()+(aPages.Index()-iStartIndex)*KPageSize;
sl@0
   829
#ifndef COARSE_GRAINED_TLB_MAINTENANCE
sl@0
   830
	TLinAddr startAddr = addr;
sl@0
   831
#endif
sl@0
   832
	for(;;)
sl@0
   833
		{
sl@0
   834
		TUint pteIndex = (addr>>KPageShift)&(KChunkMask>>KPageShift);
sl@0
   835
sl@0
   836
		// calculate max number of pages to do...
sl@0
   837
		TUint n = (KChunkSize>>KPageShift)-pteIndex; // pages left in page table
sl@0
   838
		if(n>KMaxPagesInOneGo)
sl@0
   839
			n = KMaxPagesInOneGo;
sl@0
   840
sl@0
   841
		// get some pages...
sl@0
   842
		TPhysAddr* pages;
sl@0
   843
		n = aPages.Pages(pages,n);
sl@0
   844
		if(!n)
sl@0
   845
			break;
sl@0
   846
sl@0
   847
		MmuLock::Lock();
sl@0
   848
sl@0
   849
		// check that mapping hasn't been reused for another purpose...
sl@0
   850
		if(aMapInstanceCount!=MapInstanceCount())
sl@0
   851
			{
sl@0
   852
			MmuLock::Unlock();
sl@0
   853
			break;
sl@0
   854
			}
sl@0
   855
sl@0
   856
		// get address of PTE for pages...
sl@0
   857
		TPde* pPde = Mmu::PageDirectoryEntry(OsAsid(),addr);
sl@0
   858
		TPte* pPte = Mmu::PageTableFromPde(*pPde);
sl@0
   859
		if(pPte)
sl@0
   860
			{
sl@0
   861
			// unmap some pages...
sl@0
   862
			pPte += pteIndex;
sl@0
   863
			TBool keepPt = Mmu::UnmapPages(pPte,n,pages);
sl@0
   864
			MmuLock::Unlock();
sl@0
   865
sl@0
   866
			// free page table if no longer needed...
sl@0
   867
			if(!keepPt)
sl@0
   868
				FreePageTable(pPde);
sl@0
   869
			}
sl@0
   870
		else
sl@0
   871
			{
sl@0
   872
			// no page table found...
sl@0
   873
			MmuLock::Unlock();
sl@0
   874
			}
sl@0
   875
sl@0
   876
		// move on...
sl@0
   877
		aPages.Skip(n);
sl@0
   878
		addr += n*KPageSize;
sl@0
   879
		}
sl@0
   880
sl@0
   881
#ifndef COARSE_GRAINED_TLB_MAINTENANCE
sl@0
   882
	// clean TLB...
sl@0
   883
	TLinAddr endAddr = addr;
sl@0
   884
	addr = startAddr+OsAsid();
sl@0
   885
	do InvalidateTLBForPage(addr);
sl@0
   886
	while((addr+=KPageSize)<endAddr);
sl@0
   887
#endif
sl@0
   888
	}
sl@0
   889
sl@0
   890
sl@0
   891
void DFineMapping::RestrictPagesNA(RPageArray::TIter aPages, TUint aMapInstanceCount)
sl@0
   892
	{
sl@0
   893
	TRACE2(("DFineMapping[0x%08x]::RestrictPages(?,%d) index=0x%x count=0x%x",this,aMapInstanceCount,aPages.Index(),aPages.Count()));
sl@0
   894
sl@0
   895
	__NK_ASSERT_DEBUG(aPages.Count());
sl@0
   896
sl@0
   897
	TLinAddr addr = Base()+(aPages.Index()-iStartIndex)*KPageSize;
sl@0
   898
#ifndef COARSE_GRAINED_TLB_MAINTENANCE
sl@0
   899
	TLinAddr startAddr = addr;
sl@0
   900
#endif
sl@0
   901
	for(;;)
sl@0
   902
		{
sl@0
   903
		TUint pteIndex = (addr>>KPageShift)&(KChunkMask>>KPageShift);
sl@0
   904
sl@0
   905
		// calculate max number of pages to do...
sl@0
   906
		TUint n = (KChunkSize>>KPageShift)-pteIndex; // pages left in page table
sl@0
   907
		if(n>KMaxPagesInOneGo)
sl@0
   908
			n = KMaxPagesInOneGo;
sl@0
   909
sl@0
   910
		// get some pages...
sl@0
   911
		TPhysAddr* pages;
sl@0
   912
		n = aPages.Pages(pages,n);
sl@0
   913
		if(!n)
sl@0
   914
			break;
sl@0
   915
sl@0
   916
		MmuLock::Lock();
sl@0
   917
sl@0
   918
		// check that mapping hasn't been reused for another purpose...
sl@0
   919
		if(aMapInstanceCount!=MapInstanceCount())
sl@0
   920
			{
sl@0
   921
			MmuLock::Unlock();
sl@0
   922
			break;
sl@0
   923
			}
sl@0
   924
sl@0
   925
		// get address of PTE for pages...
sl@0
   926
		TPde* pPde = Mmu::PageDirectoryEntry(OsAsid(),addr);
sl@0
   927
		TPte* pPte = Mmu::PageTableFromPde(*pPde);
sl@0
   928
		if(pPte)
sl@0
   929
			{
sl@0
   930
			// restrict some pages...
sl@0
   931
			pPte += pteIndex;
sl@0
   932
			Mmu::RestrictPagesNA(pPte,n,pages);
sl@0
   933
			}
sl@0
   934
		MmuLock::Unlock();
sl@0
   935
sl@0
   936
		// move on...
sl@0
   937
		aPages.Skip(n);
sl@0
   938
		addr += n*KPageSize;
sl@0
   939
		}
sl@0
   940
sl@0
   941
#ifndef COARSE_GRAINED_TLB_MAINTENANCE
sl@0
   942
	// clean TLB...
sl@0
   943
	TLinAddr endAddr = addr;
sl@0
   944
	addr = startAddr+OsAsid();
sl@0
   945
	do InvalidateTLBForPage(addr);
sl@0
   946
	while((addr+=KPageSize)<endAddr);
sl@0
   947
#endif
sl@0
   948
	}
sl@0
   949
sl@0
   950
sl@0
   951
TInt DFineMapping::PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount)
sl@0
   952
	{
sl@0
   953
	TRACE2(("DFineMapping[0x%08x]::PageIn(?,?,%d) index=0x%x count=0x%x",this,aMapInstanceCount,aPages.Index(),aPages.Count()));
sl@0
   954
sl@0
   955
	__NK_ASSERT_DEBUG(aPages.Count());
sl@0
   956
	__NK_ASSERT_DEBUG(aPages.Index()>=iStartIndex);
sl@0
   957
	__NK_ASSERT_DEBUG(aPages.IndexEnd()-iStartIndex<=iSizeInPages);
sl@0
   958
sl@0
   959
	TInt r = KErrNone;
sl@0
   960
sl@0
   961
	TLinAddr addr = Base()+(aPages.Index()-iStartIndex)*KPageSize;
sl@0
   962
#ifndef COARSE_GRAINED_TLB_MAINTENANCE
sl@0
   963
	TLinAddr startAddr = addr;
sl@0
   964
#endif
sl@0
   965
	TBool pinPageTable = aPinArgs.iPinnedPageTables!=0; // check if we need to pin the first page table
sl@0
   966
	for(;;)
sl@0
   967
		{
sl@0
   968
		TUint pteIndex = (addr>>KPageShift)&(KChunkMask>>KPageShift);
sl@0
   969
		if(pteIndex==0)
sl@0
   970
			pinPageTable = aPinArgs.iPinnedPageTables!=0;	// started a new page table, check if we need to pin it
sl@0
   971
sl@0
   972
		// calculate max number of pages to do...
sl@0
   973
		TUint n = (KChunkSize>>KPageShift)-pteIndex; // pages left in page table
sl@0
   974
		if(n>KMaxPagesInOneGo)
sl@0
   975
			n = KMaxPagesInOneGo;
sl@0
   976
sl@0
   977
		// get some pages...
sl@0
   978
		TPhysAddr* pages;
sl@0
   979
		n = aPages.Pages(pages,n);
sl@0
   980
		if(!n)
sl@0
   981
			break;
sl@0
   982
sl@0
   983
		// make sure we have memory to pin the page table if required...
sl@0
   984
		if(pinPageTable)
sl@0
   985
			aPinArgs.AllocReplacementPages(KNumPagesToPinOnePageTable);
sl@0
   986
sl@0
   987
		// get address of page table...
sl@0
   988
		MmuLock::Lock();
sl@0
   989
		TPte* pPte;
sl@0
   990
		if(pinPageTable)
sl@0
   991
			pPte = GetOrAllocatePageTable(addr,aPinArgs);
sl@0
   992
		else
sl@0
   993
			pPte = GetOrAllocatePageTable(addr);
sl@0
   994
sl@0
   995
		// check mapping isn't being unmapped or hasn't been reused...
sl@0
   996
		if(BeingDetached() || aMapInstanceCount != MapInstanceCount())
sl@0
   997
			{
sl@0
   998
			// can't map pages to this mapping any more, so free any page table
sl@0
   999
			// we just got (if it's not used)...
sl@0
  1000
			if(!pPte)
sl@0
  1001
				MmuLock::Unlock();
sl@0
  1002
			else
sl@0
  1003
				{
sl@0
  1004
				SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pPte);
sl@0
  1005
				TBool keepPt = pti->PermanenceCount() ||  pti->PageCount();
sl@0
  1006
				MmuLock::Unlock();
sl@0
  1007
				if(!keepPt)
sl@0
  1008
					FreePageTable(Mmu::PageDirectoryEntry(OsAsid(),addr));
sl@0
  1009
				}
sl@0
  1010
			// then end...
sl@0
  1011
			r = KErrNotFound;
sl@0
  1012
			break;
sl@0
  1013
			}
sl@0
  1014
sl@0
  1015
		// check for OOM...
sl@0
  1016
		if(!pPte)
sl@0
  1017
			{
sl@0
  1018
			MmuLock::Unlock();
sl@0
  1019
			r = KErrNoMemory;
sl@0
  1020
			break;
sl@0
  1021
			}
sl@0
  1022
sl@0
  1023
		// map some pages...
sl@0
  1024
		pPte += pteIndex;
sl@0
  1025
		TPte blankPte = iBlankPte;
sl@0
  1026
		if(aPinArgs.iReadOnly)
sl@0
  1027
			blankPte = Mmu::MakePteInaccessible(blankPte,true);
sl@0
  1028
		TBool keepPt = Mmu::PageInPages(pPte, n, pages, blankPte);
sl@0
  1029
		MmuLock::Unlock();
sl@0
  1030
sl@0
  1031
		// free page table if no longer needed...
sl@0
  1032
		if(!keepPt)
sl@0
  1033
			FreePageTable(Mmu::PageDirectoryEntry(OsAsid(),addr));
sl@0
  1034
sl@0
  1035
		// move on...
sl@0
  1036
		aPages.Skip(n);
sl@0
  1037
		addr += n*KPageSize;
sl@0
  1038
		pinPageTable = false;
sl@0
  1039
		}
sl@0
  1040
sl@0
  1041
#ifndef COARSE_GRAINED_TLB_MAINTENANCE
sl@0
  1042
	// clean TLB...
sl@0
  1043
	TLinAddr endAddr = addr;
sl@0
  1044
	addr = startAddr+OsAsid();
sl@0
  1045
	do InvalidateTLBForPage(addr);
sl@0
  1046
	while((addr+=KPageSize)<endAddr);
sl@0
  1047
#endif
sl@0
  1048
	return r;
sl@0
  1049
	}
sl@0
  1050
sl@0
  1051
sl@0
  1052
TBool DFineMapping::MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex)
sl@0
  1053
	{
sl@0
  1054
	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
sl@0
  1055
	__NK_ASSERT_DEBUG(IsAttached());
sl@0
  1056
	__NK_ASSERT_DEBUG(!BeingDetached());
sl@0
  1057
sl@0
  1058
	TLinAddr addr = Base() + (aIndex - iStartIndex) * KPageSize;
sl@0
  1059
	TUint pteIndex = (addr >> KPageShift) & (KChunkMask >> KPageShift);
sl@0
  1060
sl@0
  1061
	// get address of page table...
sl@0
  1062
	TPte* pPte = GetPageTable(addr);
sl@0
  1063
	
sl@0
  1064
	// Check the page is still mapped.
sl@0
  1065
	if (!pPte)
sl@0
  1066
		return EFalse;
sl@0
  1067
sl@0
  1068
	// map some pages...
sl@0
  1069
	pPte += pteIndex;
sl@0
  1070
	Mmu::RemapPage(pPte, aPageArrayPtr, iBlankPte);
sl@0
  1071
	InvalidateTLBForPage(addr);
sl@0
  1072
	return ETrue;
sl@0
  1073
	}
sl@0
  1074
sl@0
  1075
sl@0
  1076
TInt DFineMapping::DoMap()
sl@0
  1077
	{
sl@0
  1078
	TRACE(("DFineMapping[0x%08x]::DoMap()", this));
sl@0
  1079
	DMemoryObject* memory = Memory(true); // safe because we're called from code which has added mapping to memory
sl@0
  1080
	if(memory->IsDemandPaged())
sl@0
  1081
		{
sl@0
  1082
		// do nothing, allow pages to be mapped on demand...
sl@0
  1083
		return KErrNone;
sl@0
  1084
		}
sl@0
  1085
sl@0
  1086
	RPageArray::TIter pageIter;
sl@0
  1087
	memory->iPages.FindStart(iStartIndex,iSizeInPages,pageIter);
sl@0
  1088
sl@0
  1089
	// map pages...
sl@0
  1090
	TInt r = KErrNone;
sl@0
  1091
	for(;;)
sl@0
  1092
		{
sl@0
  1093
		// find some pages...
sl@0
  1094
		RPageArray::TIter pageList;
sl@0
  1095
		TUint n = pageIter.Find(pageList);
sl@0
  1096
		if(!n)
sl@0
  1097
			break; // done
sl@0
  1098
sl@0
  1099
		// map some pages...
sl@0
  1100
		r = MapPages(pageList,MapInstanceCount());
sl@0
  1101
sl@0
  1102
		// done with pages...
sl@0
  1103
		pageIter.FindRelease(n);
sl@0
  1104
sl@0
  1105
		if(r!=KErrNone)
sl@0
  1106
			break;
sl@0
  1107
		}
sl@0
  1108
sl@0
  1109
	memory->iPages.FindEnd(iStartIndex,iSizeInPages);
sl@0
  1110
	return r;
sl@0
  1111
	}
sl@0
  1112
sl@0
  1113
sl@0
  1114
void DFineMapping::DoUnmap()
sl@0
  1115
	{
sl@0
  1116
	TRACE2(("DFineMapping[0x%08x]::DoUnmap()",this));
sl@0
  1117
sl@0
  1118
	TLinAddr startAddr = Base();
sl@0
  1119
	TUint count = iSizeInPages;
sl@0
  1120
	TLinAddr addr = startAddr;
sl@0
  1121
	TPde* pPde = Mmu::PageDirectoryEntry(OsAsid(),addr);
sl@0
  1122
sl@0
  1123
	for(;;)
sl@0
  1124
		{
sl@0
  1125
		TUint pteIndex = (addr>>KPageShift)&(KChunkMask>>KPageShift);
sl@0
  1126
sl@0
  1127
		// calculate number of pages to do...
sl@0
  1128
		TUint n = (KChunkSize>>KPageShift)-pteIndex; // pages left in page table
sl@0
  1129
		if(n>count)
sl@0
  1130
			n = count;
sl@0
  1131
sl@0
  1132
		// get page table...
sl@0
  1133
		MmuLock::Lock();
sl@0
  1134
		TPte* pPte = Mmu::PageTableFromPde(*pPde);
sl@0
  1135
		if(!pPte)
sl@0
  1136
			{
sl@0
  1137
			// no page table found, so nothing to do...
sl@0
  1138
			MmuLock::Unlock();
sl@0
  1139
			}
sl@0
  1140
		else
sl@0
  1141
			{
sl@0
  1142
			// unmap some pages...
sl@0
  1143
			pPte += pteIndex;
sl@0
  1144
			if(n>KMaxPagesInOneGo)
sl@0
  1145
				n = KMaxPagesInOneGo;
sl@0
  1146
			TBool keepPt = Mmu::UnmapPages(pPte, n);
sl@0
  1147
			MmuLock::Unlock();
sl@0
  1148
sl@0
  1149
			// free page table if no longer needed...
sl@0
  1150
			if(!keepPt)
sl@0
  1151
				FreePageTable(pPde);
sl@0
  1152
			}
sl@0
  1153
sl@0
  1154
		// move on...
sl@0
  1155
		addr += n*KPageSize;
sl@0
  1156
		count -= n;
sl@0
  1157
		if(!count)
sl@0
  1158
			break;
sl@0
  1159
		if(!(addr&KChunkMask))
sl@0
  1160
			++pPde;
sl@0
  1161
		}
sl@0
  1162
sl@0
  1163
#ifdef COARSE_GRAINED_TLB_MAINTENANCE
sl@0
  1164
	InvalidateTLBForAsid(OsAsid());
sl@0
  1165
#else
sl@0
  1166
	// clean TLB...
sl@0
  1167
	TLinAddr endAddr = addr;
sl@0
  1168
	addr = LinAddrAndOsAsid();
sl@0
  1169
	do InvalidateTLBForPage(addr);
sl@0
  1170
	while((addr+=KPageSize)<endAddr);
sl@0
  1171
#endif
sl@0
  1172
	}
sl@0
  1173
sl@0
  1174
sl@0
  1175
TInt DFineMapping::AllocatePermanentPageTables()
sl@0
  1176
	{
sl@0
  1177
	TRACE2(("DFineMapping[0x%08x]::AllocatePermanentPageTables()",this));
sl@0
  1178
	__NK_ASSERT_DEBUG(((Flags()&EPageTablesAllocated)==0));
sl@0
  1179
	__NK_ASSERT_DEBUG(iBlankPde);
sl@0
  1180
sl@0
  1181
	TLinAddr addr = iAllocatedLinAddrAndOsAsid&~KPageMask;
sl@0
  1182
	TInt osAsid = iAllocatedLinAddrAndOsAsid&KPageMask;
sl@0
  1183
	TPde* pStartPde = Mmu::PageDirectoryEntry(osAsid,addr);
sl@0
  1184
	TPde* pEndPde = Mmu::PageDirectoryEntry(osAsid,addr+iAllocatedSize-1);
sl@0
  1185
	TPde* pPde = pStartPde;
sl@0
  1186
sl@0
  1187
	while(pPde<=pEndPde)
sl@0
  1188
		{
sl@0
  1189
		MmuLock::Lock();
sl@0
  1190
		TPte* pPte = AllocatePageTable(addr,pPde,true);
sl@0
  1191
		if(!pPte)
sl@0
  1192
			{
sl@0
  1193
			// out of memory...
sl@0
  1194
			MmuLock::Unlock();
sl@0
  1195
			FreePermanentPageTables(pStartPde,pPde-1);
sl@0
  1196
			return KErrNoMemory;
sl@0
  1197
			}
sl@0
  1198
		MmuLock::Unlock();
sl@0
  1199
sl@0
  1200
		addr += KChunkSize;
sl@0
  1201
		++pPde;
sl@0
  1202
		}
sl@0
  1203
sl@0
  1204
	TRACE2(("DFineMapping[0x%08x]::AllocatePermanentPageTables() done",this));
sl@0
  1205
	Flags() |= DMemoryMapping::EPageTablesAllocated;
sl@0
  1206
	return KErrNone;
sl@0
  1207
	}
sl@0
  1208
sl@0
  1209
sl@0
  1210
void DFineMapping::FreePermanentPageTables(TPde* aFirstPde, TPde* aLastPde)
sl@0
  1211
	{
sl@0
  1212
	Flags() &= ~DMemoryMapping::EPageTablesAllocated;
sl@0
  1213
sl@0
  1214
	MmuLock::Lock();
sl@0
  1215
sl@0
  1216
	TUint flash = 0;
sl@0
  1217
	TPde* pPde = aFirstPde;
sl@0
  1218
	while(pPde<=aLastPde)
sl@0
  1219
		{
sl@0
  1220
		TPte* pPte = Mmu::PageTableFromPde(*pPde);
sl@0
  1221
		__NK_ASSERT_DEBUG(pPte);
sl@0
  1222
		SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pPte);
sl@0
  1223
		if(pti->DecPermanenceCount() || pti->PageCount())
sl@0
  1224
			{
sl@0
  1225
			// still in use...
sl@0
  1226
			MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo*2);
sl@0
  1227
			}
sl@0
  1228
		else
sl@0
  1229
			{
sl@0
  1230
			// page table no longer used for anything...
sl@0
  1231
			MmuLock::Unlock();
sl@0
  1232
			FreePageTable(pPde);
sl@0
  1233
			MmuLock::Lock();
sl@0
  1234
			}
sl@0
  1235
sl@0
  1236
		++pPde;
sl@0
  1237
		}
sl@0
  1238
sl@0
  1239
	MmuLock::Unlock();
sl@0
  1240
	}
sl@0
  1241
sl@0
  1242
sl@0
  1243
void DFineMapping::FreePermanentPageTables()
sl@0
  1244
	{
sl@0
  1245
	if((Flags()&EPageTablesAllocated)==0)
sl@0
  1246
		return;
sl@0
  1247
sl@0
  1248
	TRACE2(("DFineMapping[0x%08x]::FreePermanentPageTables()",this));
sl@0
  1249
sl@0
  1250
	TLinAddr addr = iAllocatedLinAddrAndOsAsid&~KPageMask;
sl@0
  1251
	TInt osAsid = iAllocatedLinAddrAndOsAsid&KPageMask;
sl@0
  1252
	TPde* pPde = Mmu::PageDirectoryEntry(osAsid,addr);
sl@0
  1253
	TPde* pEndPde = Mmu::PageDirectoryEntry(osAsid,addr+iAllocatedSize-1);
sl@0
  1254
	FreePermanentPageTables(pPde,pEndPde);
sl@0
  1255
	}
sl@0
  1256
sl@0
  1257
sl@0
  1258
TPte* DFineMapping::FindPageTable(TLinAddr aLinAddr, TUint aMemoryIndex)
sl@0
  1259
	{
sl@0
  1260
	TRACE(("DFineMapping::FindPageTable(0x%x, %d)", aLinAddr, aMemoryIndex));
sl@0
  1261
	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
sl@0
  1262
	__NK_ASSERT_DEBUG(IsAttached());
sl@0
  1263
	return GetPageTable(aLinAddr);
sl@0
  1264
	}
sl@0
  1265
sl@0
  1266
sl@0
  1267
//
sl@0
  1268
// DKernelPinMapping
sl@0
  1269
//
sl@0
  1270
DKernelPinMapping::DKernelPinMapping()
sl@0
  1271
	// : iReservePages(0)	// Allocated on the kernel heap so will already be 0.
sl@0
  1272
	{
sl@0
  1273
	Flags() |= EPhysicalPinningMapping | EPinned;
sl@0
  1274
	}
sl@0
  1275
sl@0
  1276
sl@0
  1277
TInt DKernelPinMapping::Construct(TUint aReserveMaxSize)
sl@0
  1278
	{
sl@0
  1279
	TInt r = KErrNone;
sl@0
  1280
	if (aReserveMaxSize)
sl@0
  1281
		{
sl@0
  1282
		// Should not call Construct() on a mapping that has already reserved resources.
sl@0
  1283
		__NK_ASSERT_DEBUG(!iReservePages);
sl@0
  1284
		r = DFineMapping::Construct(EMemoryAttributeStandard, 
sl@0
  1285
									EMappingCreateReserveAllResources, 
sl@0
  1286
									KKernelOsAsid, 
sl@0
  1287
									0, 
sl@0
  1288
									aReserveMaxSize, 
sl@0
  1289
									0);
sl@0
  1290
		if (r == KErrNone)
sl@0
  1291
			iReservePages = aReserveMaxSize >> KPageShift;
sl@0
  1292
		}
sl@0
  1293
	return r;
sl@0
  1294
	}
sl@0
  1295
sl@0
  1296
sl@0
  1297
TInt DKernelPinMapping::MapAndPin(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TMappingPermissions aPermissions)
sl@0
  1298
	{
sl@0
  1299
	if (IsAttached())
sl@0
  1300
		{
sl@0
  1301
		return KErrInUse;
sl@0
  1302
		}
sl@0
  1303
sl@0
  1304
	if (!iReservePages)
sl@0
  1305
		{
sl@0
  1306
		TInt r = DFineMapping::Construct(	EMemoryAttributeStandard, 
sl@0
  1307
											EMappingCreateDefault, 
sl@0
  1308
											KKernelOsAsid, 
sl@0
  1309
											0, 
sl@0
  1310
											aCount, 
sl@0
  1311
											0);
sl@0
  1312
		if (r != KErrNone)
sl@0
  1313
			return r;
sl@0
  1314
		}
sl@0
  1315
	// Map the memory, this will pin it first then map it.
sl@0
  1316
	TInt r = DFineMapping::Map(aMemory, aIndex, aCount, aPermissions);
sl@0
  1317
sl@0
  1318
	if (r != KErrNone && !iReservePages)
sl@0
  1319
		{// Reset this mapping object so it can be reused but has freed its address space.
sl@0
  1320
		DMemoryMapping::Destruct();
sl@0
  1321
		}
sl@0
  1322
	return r;
sl@0
  1323
	}
sl@0
  1324
sl@0
  1325
sl@0
  1326
void DKernelPinMapping::UnmapAndUnpin()
sl@0
  1327
	{
sl@0
  1328
	DFineMapping::Unmap();
sl@0
  1329
	if (!iReservePages)
sl@0
  1330
		{// Reset this mapping object so it can be reused but has freed its address space.
sl@0
  1331
		DMemoryMapping::Destruct();
sl@0
  1332
		}
sl@0
  1333
	}
sl@0
  1334
sl@0
  1335
sl@0
  1336
//
sl@0
  1337
// DPhysicalPinMapping
sl@0
  1338
//
sl@0
  1339
sl@0
  1340
DPhysicalPinMapping::DPhysicalPinMapping()
sl@0
  1341
	: DMemoryMappingBase(EPinned|EPhysicalPinningMapping)
sl@0
  1342
	{
sl@0
  1343
	}
sl@0
  1344
sl@0
  1345
sl@0
  1346
TInt DPhysicalPinMapping::Pin(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TMappingPermissions aPermissions)
sl@0
  1347
	{
sl@0
  1348
	PteType() =	Mmu::PteType(aPermissions,true);
sl@0
  1349
	return Attach(aMemory,aIndex,aCount);
sl@0
  1350
	}
sl@0
  1351
sl@0
  1352
sl@0
  1353
void DPhysicalPinMapping::Unpin()
sl@0
  1354
	{
sl@0
  1355
	Detach();
sl@0
  1356
	}
sl@0
  1357
sl@0
  1358
sl@0
  1359
TInt DPhysicalPinMapping::MapPages(RPageArray::TIter /*aPages*/, TUint /*aMapInstanceCount*/)
sl@0
  1360
	{
sl@0
  1361
	// shouldn't ever be called because these mappings are always pinned...
sl@0
  1362
	__NK_ASSERT_DEBUG(0);
sl@0
  1363
	return KErrNotSupported;
sl@0
  1364
	}
sl@0
  1365
sl@0
  1366
sl@0
  1367
void DPhysicalPinMapping::UnmapPages(RPageArray::TIter /*aPages*/, TUint /*aMapInstanceCount*/)
sl@0
  1368
	{
sl@0
  1369
	// nothing to do...
sl@0
  1370
	}
sl@0
  1371
sl@0
  1372
sl@0
  1373
void DPhysicalPinMapping::RemapPage(TPhysAddr& /*aPageArrayPtr*/, TUint /*aIndex*/, TUint /*aMapInstanceCount*/, TBool /*aInvalidateTLB*/)
sl@0
  1374
	{
sl@0
  1375
	// shouldn't ever be called because physically pinned mappings block page moving.
sl@0
  1376
	__NK_ASSERT_DEBUG(0);
sl@0
  1377
	}
sl@0
  1378
sl@0
  1379
sl@0
  1380
void DPhysicalPinMapping::RestrictPagesNA(RPageArray::TIter /*aPages*/, TUint /*aMapInstanceCount*/)
sl@0
  1381
	{
sl@0
  1382
	// nothing to do...
sl@0
  1383
	}
sl@0
  1384
sl@0
  1385
sl@0
  1386
TInt DPhysicalPinMapping::PageIn(RPageArray::TIter /*aPages*/, TPinArgs& /*aPinArgs*/, TUint /*aMapInstanceCount*/)
sl@0
  1387
	{
sl@0
  1388
	// nothing to do...
sl@0
  1389
	return KErrNone;
sl@0
  1390
	}
sl@0
  1391
sl@0
  1392
sl@0
  1393
TInt DPhysicalPinMapping::MovingPageIn(TPhysAddr& /*aPageArrayPtr*/, TUint /*aIndex*/)
sl@0
  1394
	{
sl@0
  1395
	// Should never be asked to page in a page that is being moved as physical 
sl@0
  1396
	// pin mappings don't own any page tables.
sl@0
  1397
	__NK_ASSERT_DEBUG(0);
sl@0
  1398
	return KErrAbort;
sl@0
  1399
	}
sl@0
  1400
sl@0
  1401
TInt DPhysicalPinMapping::DoMap()
sl@0
  1402
	{
sl@0
  1403
	// nothing to do...
sl@0
  1404
	return KErrNone;
sl@0
  1405
	}
sl@0
  1406
sl@0
  1407
sl@0
  1408
void DPhysicalPinMapping::DoUnmap()
sl@0
  1409
	{
sl@0
  1410
	// nothing to do...
sl@0
  1411
	}
sl@0
  1412
sl@0
  1413
sl@0
  1414
sl@0
  1415
//
sl@0
  1416
// DVirtualPinMapping
sl@0
  1417
//
sl@0
  1418
sl@0
  1419
DVirtualPinMapping::DVirtualPinMapping()
sl@0
  1420
	: iMaxCount(0)
sl@0
  1421
	{
sl@0
  1422
	// Clear flag so it is possible to distingish between virtual and physical pin mappings.
sl@0
  1423
	Flags() &= ~EPhysicalPinningMapping;
sl@0
  1424
	}
sl@0
  1425
sl@0
  1426
sl@0
  1427
DVirtualPinMapping::~DVirtualPinMapping()
sl@0
  1428
	{
sl@0
  1429
	TRACE(("DVirtualPinMapping[0x%08x]::~DVirtualPinMapping()",this));
sl@0
  1430
	FreePageTableArray();
sl@0
  1431
	}
sl@0
  1432
sl@0
  1433
sl@0
  1434
DVirtualPinMapping* DVirtualPinMapping::New(TUint aMaxCount)
sl@0
  1435
	{
sl@0
  1436
	TRACE(("DVirtualPinMapping::New(0x%x)",aMaxCount));
sl@0
  1437
	DVirtualPinMapping* self = new DVirtualPinMapping;
sl@0
  1438
	if(aMaxCount)
sl@0
  1439
		{
sl@0
  1440
		// pages have been reserved for our use.
sl@0
  1441
sl@0
  1442
		// Create the array for storing pinned paged tables now, so we
sl@0
  1443
		// don't risk out-of-memory errors trying to do so later...
sl@0
  1444
		if(self->AllocPageTableArray(aMaxCount)!=KErrNone)
sl@0
  1445
			{
sl@0
  1446
			// failed, so cleanup...
sl@0
  1447
			self->Close();
sl@0
  1448
			self = 0;
sl@0
  1449
			}
sl@0
  1450
		else
sl@0
  1451
			{
sl@0
  1452
			// success, so remember the pages that have been reserved for us...
sl@0
  1453
			self->iMaxCount = aMaxCount;
sl@0
  1454
			self->Flags() |= EPinningPagesReserved;
sl@0
  1455
			}
sl@0
  1456
		}
sl@0
  1457
	TRACE(("DVirtualPinMapping::New(0x%x) returns 0x%08x",aMaxCount,self));
sl@0
  1458
	return self;
sl@0
  1459
	}
sl@0
  1460
sl@0
  1461
sl@0
  1462
TUint DVirtualPinMapping::MaxPageTables(TUint aPageCount)
sl@0
  1463
	{
sl@0
  1464
	return (aPageCount+2*KChunkSize/KPageSize-2)>>(KChunkShift-KPageShift);
sl@0
  1465
	}
sl@0
  1466
sl@0
  1467
sl@0
  1468
TInt DVirtualPinMapping::AllocPageTableArray(TUint aCount)
sl@0
  1469
	{
sl@0
  1470
	__NK_ASSERT_ALWAYS(iAllocatedPinnedPageTables==0);
sl@0
  1471
	TUint maxPt	= MaxPageTables(aCount);
sl@0
  1472
	if(maxPt>KSmallPinnedPageTableCount)
sl@0
  1473
		{
sl@0
  1474
		iAllocatedPinnedPageTables = new TPte*[maxPt];
sl@0
  1475
		if(!iAllocatedPinnedPageTables)
sl@0
  1476
			return KErrNoMemory;
sl@0
  1477
		}
sl@0
  1478
	return KErrNone;
sl@0
  1479
	}
sl@0
  1480
sl@0
  1481
sl@0
  1482
void DVirtualPinMapping::FreePageTableArray()
sl@0
  1483
	{
sl@0
  1484
	delete [] iAllocatedPinnedPageTables;
sl@0
  1485
	iAllocatedPinnedPageTables = 0;
sl@0
  1486
	}
sl@0
  1487
sl@0
  1488
sl@0
  1489
TPte** DVirtualPinMapping::PageTableArray()
sl@0
  1490
	{
sl@0
  1491
	return iAllocatedPinnedPageTables ? iAllocatedPinnedPageTables : iSmallPinnedPageTablesArray;
sl@0
  1492
	}
sl@0
  1493
sl@0
  1494
sl@0
  1495
TInt DVirtualPinMapping::Pin(	DMemoryObject* aMemory, TUint aIndex, TUint aCount, TMappingPermissions aPermissions, 
sl@0
  1496
								DMemoryMappingBase* aMapping, TUint aMappingInstanceCount)
sl@0
  1497
	{
sl@0
  1498
	// Virtual pinning ensures a page is always mapped to a particular virtual address
sl@0
  1499
	// and therefore require a non-pinning mapping of the virtual address to pin.
sl@0
  1500
	__NK_ASSERT_ALWAYS(aMapping && !aMapping->IsPinned());
sl@0
  1501
sl@0
  1502
	if(iMaxCount)
sl@0
  1503
		{
sl@0
  1504
		if(aCount>iMaxCount)
sl@0
  1505
			return KErrArgument;
sl@0
  1506
		}
sl@0
  1507
	else
sl@0
  1508
		{
sl@0
  1509
		TInt r = AllocPageTableArray(aCount);
sl@0
  1510
		if(r!=KErrNone)
sl@0
  1511
			return r;
sl@0
  1512
		}
sl@0
  1513
sl@0
  1514
	iPinVirtualMapping = aMapping;
sl@0
  1515
	iPinVirtualMapInstanceCount = aMappingInstanceCount;
sl@0
  1516
	TInt r = DPhysicalPinMapping::Pin(aMemory,aIndex,aCount,aPermissions);
sl@0
  1517
	iPinVirtualMapping = 0;
sl@0
  1518
sl@0
  1519
	return r;
sl@0
  1520
	}
sl@0
  1521
sl@0
  1522
sl@0
  1523
void DVirtualPinMapping::Unpin()
sl@0
  1524
	{
sl@0
  1525
	Detach();
sl@0
  1526
	}
sl@0
  1527
sl@0
  1528
sl@0
  1529
void DVirtualPinMapping::UnpinPageTables(TPinArgs& aPinArgs)
sl@0
  1530
	{
sl@0
  1531
	TPte** pPt = PageTableArray();
sl@0
  1532
	TPte** pPtEnd = pPt+iNumPinnedPageTables;
sl@0
  1533
sl@0
  1534
	MmuLock::Lock();
sl@0
  1535
	while(pPt<pPtEnd)
sl@0
  1536
		::PageTables.UnpinPageTable(*pPt++,aPinArgs);
sl@0
  1537
	MmuLock::Unlock();
sl@0
  1538
	iNumPinnedPageTables = 0;
sl@0
  1539
sl@0
  1540
	if(!iMaxCount)
sl@0
  1541
		FreePageTableArray();
sl@0
  1542
	}
sl@0
  1543
sl@0
  1544
sl@0
  1545
void DVirtualPinMapping::RemapPage(TPhysAddr& /*aPageArrayPtr*/, TUint /*aIndex*/, TUint /*aMapInstanceCount*/, TBool /*aInvalidateTLB*/)
sl@0
  1546
	{
sl@0
  1547
	__NK_ASSERT_DEBUG(0);
sl@0
  1548
	}
sl@0
  1549
sl@0
  1550
sl@0
  1551
TInt DVirtualPinMapping::PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount)
sl@0
  1552
	{
sl@0
  1553
	if(iPinVirtualMapping)
sl@0
  1554
		return iPinVirtualMapping->PageIn(aPages, aPinArgs, iPinVirtualMapInstanceCount);
sl@0
  1555
	return KErrNone;
sl@0
  1556
	}
sl@0
  1557
sl@0
  1558
sl@0
  1559
TInt DVirtualPinMapping::MovingPageIn(TPhysAddr& /*aPageArrayPtr*/, TUint /*aIndex*/)
sl@0
  1560
	{
sl@0
  1561
	// Should never be asked to page in a page that is being moved as virtual 
sl@0
  1562
	// pin mappings don't own any page tables.
sl@0
  1563
	__NK_ASSERT_DEBUG(0);
sl@0
  1564
	return KErrAbort;
sl@0
  1565
	}
sl@0
  1566
sl@0
  1567
sl@0
  1568
TInt DVirtualPinMapping::DoPin(TPinArgs& aPinArgs)
sl@0
  1569
	{
sl@0
  1570
	// setup for page table pinning...
sl@0
  1571
	aPinArgs.iPinnedPageTables = PageTableArray();
sl@0
  1572
sl@0
  1573
	// do pinning...
sl@0
  1574
	TInt r = DPhysicalPinMapping::DoPin(aPinArgs);
sl@0
  1575
sl@0
  1576
	// save results...
sl@0
  1577
	iNumPinnedPageTables = aPinArgs.iNumPinnedPageTables;
sl@0
  1578
	__NK_ASSERT_DEBUG(iNumPinnedPageTables<=MaxPageTables(iSizeInPages));
sl@0
  1579
sl@0
  1580
	// cleanup if error...
sl@0
  1581
	if(r!=KErrNone)
sl@0
  1582
		UnpinPageTables(aPinArgs);
sl@0
  1583
sl@0
  1584
	return r;
sl@0
  1585
	}
sl@0
  1586
sl@0
  1587
sl@0
  1588
void DVirtualPinMapping::DoUnpin(TPinArgs& aPinArgs)
sl@0
  1589
	{
sl@0
  1590
	DPhysicalPinMapping::DoUnpin(aPinArgs);
sl@0
  1591
	UnpinPageTables(aPinArgs);
sl@0
  1592
	}
sl@0
  1593
sl@0
  1594
sl@0
  1595
sl@0
  1596
//
sl@0
  1597
// DMemoryMappingBase
sl@0
  1598
//
sl@0
  1599
sl@0
  1600
sl@0
  1601
DMemoryMappingBase::DMemoryMappingBase(TUint aType)
sl@0
  1602
	{
sl@0
  1603
	Flags() = aType; // rest of members cleared by DBase
sl@0
  1604
	}
sl@0
  1605
sl@0
  1606
sl@0
  1607
TInt DMemoryMappingBase::Attach(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
sl@0
  1608
	{
sl@0
  1609
	TRACE(("DMemoryMappingBase[0x%08x]::Attach(0x%08x,0x%x,0x%x)",this,aMemory,aIndex,aCount));
sl@0
  1610
	__NK_ASSERT_DEBUG(!IsAttached());
sl@0
  1611
	TInt r;
sl@0
  1612
sl@0
  1613
	if(++iMapInstanceCount>1)
sl@0
  1614
		{// This mapping is being reused...
sl@0
  1615
sl@0
  1616
		// Non-pinned mappings can be reused however this is only exercised 
sl@0
  1617
		// by aligned shared buffers whose memory is managed by the unpaged 
sl@0
  1618
		// or hardware memory manager.  Reusing mappings to paged or movable 
sl@0
  1619
		// memory hasn't tested and may need reusing mappings and its 
sl@0
  1620
		// interactions with the fault handler, pinning etc to be tested.
sl@0
  1621
		__NK_ASSERT_DEBUG(	IsPinned() ||
sl@0
  1622
							aMemory->iManager == TheUnpagedMemoryManager || 
sl@0
  1623
							aMemory->iManager == TheHardwareMemoryManager);
sl@0
  1624
sl@0
  1625
		// make sure new instance count is seen by other threads which may be operating
sl@0
  1626
		// on old mapping instance (this will stop them changing the mapping any more)...
sl@0
  1627
		MmuLock::Lock();
sl@0
  1628
		MmuLock::Unlock();
sl@0
  1629
		// clear unmapping flag from previous use...
sl@0
  1630
		__e32_atomic_and_ord16(&Flags(), (TUint16)~(EDetaching|EPageUnmapVetoed));
sl@0
  1631
		}
sl@0
  1632
sl@0
  1633
	__NK_ASSERT_DEBUG((Flags()&(EDetaching|EPageUnmapVetoed))==0);
sl@0
  1634
sl@0
  1635
	// set region being mapped...
sl@0
  1636
	iStartIndex = aIndex;
sl@0
  1637
	iSizeInPages = aCount;
sl@0
  1638
sl@0
  1639
	// reserve any pages required for pinning demand paged memory.
sl@0
  1640
	// We must do this before we add the mapping to the memory object
sl@0
  1641
	// because once that is done the pages we are mapping will be prevented
sl@0
  1642
	// from being paged out. That could leave the paging system without
sl@0
  1643
	// enough pages to correctly handle page faults...
sl@0
  1644
	TPinArgs pinArgs;
sl@0
  1645
	pinArgs.iReadOnly = IsReadOnly();
sl@0
  1646
	if(IsPinned() && aMemory->IsDemandPaged())
sl@0
  1647
		{
sl@0
  1648
		pinArgs.iUseReserve = Flags()&EPinningPagesReserved;
sl@0
  1649
		r = pinArgs.AllocReplacementPages(aCount);
sl@0
  1650
		if(r!=KErrNone)
sl@0
  1651
			return r;
sl@0
  1652
		}
sl@0
  1653
sl@0
  1654
	// link into memory object...
sl@0
  1655
	r = aMemory->AddMapping(this);
sl@0
  1656
	if(r==KErrNone)
sl@0
  1657
		{
sl@0
  1658
		// pin pages if needed...
sl@0
  1659
		if(IsPinned())
sl@0
  1660
			r = DoPin(pinArgs);
sl@0
  1661
sl@0
  1662
		// add pages to this mapping...
sl@0
  1663
		if(r==KErrNone)
sl@0
  1664
			r = DoMap();
sl@0
  1665
sl@0
  1666
		// revert if error...
sl@0
  1667
		if(r!=KErrNone)
sl@0
  1668
			Detach();
sl@0
  1669
		}
sl@0
  1670
sl@0
  1671
	// free any left over pinning pages...
sl@0
  1672
	pinArgs.FreeReplacementPages();
sl@0
  1673
sl@0
  1674
	return r;
sl@0
  1675
	}
sl@0
  1676
sl@0
  1677
sl@0
  1678
void DMemoryMappingBase::Detach()
sl@0
  1679
	{
sl@0
  1680
	TRACE(("DMemoryMappingBase[0x%08x]::Detach()",this));
sl@0
  1681
	__NK_ASSERT_DEBUG(IsAttached());
sl@0
  1682
sl@0
  1683
	// set EDetaching flag, which prevents anyone modifying pages in this
sl@0
  1684
	// mapping, except to remove them...
sl@0
  1685
	MmuLock::Lock();
sl@0
  1686
	__e32_atomic_ior_ord16(&Flags(), (TUint16)EDetaching);
sl@0
  1687
	MmuLock::Unlock();
sl@0
  1688
sl@0
  1689
	// remove all pages from this mapping...
sl@0
  1690
	DoUnmap();
sl@0
  1691
sl@0
  1692
	// unpin pages if needed...
sl@0
  1693
	TPinArgs pinArgs;
sl@0
  1694
	if(IsPinned())
sl@0
  1695
		DoUnpin(pinArgs);
sl@0
  1696
sl@0
  1697
	// unlink from memory object...
sl@0
  1698
	iMemory->RemoveMapping(this);
sl@0
  1699
sl@0
  1700
	// free any spare pages produced by unpinning...
sl@0
  1701
	pinArgs.FreeReplacementPages();
sl@0
  1702
	}
sl@0
  1703
sl@0
  1704
sl@0
  1705
TInt DMemoryMappingBase::DoPin(TPinArgs& aPinArgs)
sl@0
  1706
	{
sl@0
  1707
	DMemoryObject* memory = Memory(true); // safe because we're called from code which has added mapping to memory
sl@0
  1708
	return memory->iManager->Pin(memory,this,aPinArgs);
sl@0
  1709
	}
sl@0
  1710
sl@0
  1711
sl@0
  1712
void DMemoryMappingBase::DoUnpin(TPinArgs& aPinArgs)
sl@0
  1713
	{
sl@0
  1714
	DMemoryObject* memory = Memory(true); // safe because we're called from code which will be removing this mapping from memory afterwards
sl@0
  1715
	memory->iManager->Unpin(memory,this,aPinArgs);
sl@0
  1716
	}
sl@0
  1717
sl@0
  1718
sl@0
  1719
void DMemoryMappingBase::LinkToMemory(DMemoryObject* aMemory, TMappingList& aMappingList)
sl@0
  1720
	{
sl@0
  1721
	TRACE(("DMemoryMappingBase[0x%08x]::LinkToMemory(0x%08x,?)",this,aMemory));
sl@0
  1722
	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
sl@0
  1723
	__NK_ASSERT_DEBUG(aMappingList.LockIsHeld());
sl@0
  1724
	__NK_ASSERT_ALWAYS(!IsAttached());
sl@0
  1725
	__NK_ASSERT_DEBUG(!BeingDetached());
sl@0
  1726
	aMappingList.Add(this);
sl@0
  1727
	iMemory = aMemory;
sl@0
  1728
	iMemory->SetMappingAddedFlag();
sl@0
  1729
	}
sl@0
  1730
sl@0
  1731
sl@0
  1732
void DMemoryMappingBase::UnlinkFromMemory(TMappingList& aMappingList)
sl@0
  1733
	{
sl@0
  1734
	TRACE(("DMemoryMappingBase[0x%08x]::UnlinkMapping(?)",this));
sl@0
  1735
sl@0
  1736
	// unlink...
sl@0
  1737
	MmuLock::Lock();
sl@0
  1738
	aMappingList.Lock();
sl@0
  1739
	__NK_ASSERT_DEBUG(IsAttached());
sl@0
  1740
	__NK_ASSERT_DEBUG(BeingDetached());
sl@0
  1741
	aMappingList.Remove(this);
sl@0
  1742
	DMemoryObject* memory = iMemory;
sl@0
  1743
	iMemory = 0;
sl@0
  1744
	aMappingList.Unlock();
sl@0
  1745
	MmuLock::Unlock();
sl@0
  1746
sl@0
  1747
	// if mapping had vetoed any page decommits...
sl@0
  1748
	if(Flags()&DMemoryMapping::EPageUnmapVetoed)
sl@0
  1749
		{
sl@0
  1750
		// then queue cleanup of decommitted pages...
sl@0
  1751
		memory->iManager->QueueCleanup(memory,DMemoryManager::ECleanupDecommitted);
sl@0
  1752
		}
sl@0
  1753
	}
sl@0
  1754
sl@0
  1755
sl@0
  1756
TInt DMemoryMappingBase::PhysAddr(TUint aIndex, TUint aCount, TPhysAddr& aPhysicalAddress, TPhysAddr* aPhysicalPageList)
sl@0
  1757
	{
sl@0
  1758
	__NK_ASSERT_ALWAYS(IsAttached() && IsPhysicalPinning());
sl@0
  1759
sl@0
  1760
	__NK_ASSERT_ALWAYS(TUint(aIndex+aCount)>aIndex && TUint(aIndex+aCount)<=iSizeInPages);
sl@0
  1761
	aIndex += iStartIndex;
sl@0
  1762
sl@0
  1763
	DCoarseMemory* memory = (DCoarseMemory*)Memory(true); // safe because we should only be called whilst memory is Pinned
sl@0
  1764
	TInt r = memory->PhysAddr(aIndex,aCount,aPhysicalAddress,aPhysicalPageList);
sl@0
  1765
	if(r!=KErrNone)
sl@0
  1766
		return r;
sl@0
  1767
sl@0
  1768
	if(memory->IsDemandPaged() && !IsReadOnly())
sl@0
  1769
		{
sl@0
  1770
		// the memory is demand paged and writeable so we need to mark it as dirty
sl@0
  1771
		// as we have to assume that the memory will be modified via the physical
sl@0
  1772
		// addresses we return...
sl@0
  1773
		MmuLock::Lock();
sl@0
  1774
		TPhysAddr* pages = aPhysicalPageList;
sl@0
  1775
		TUint count = aCount;
sl@0
  1776
		while(count)
sl@0
  1777
			{
sl@0
  1778
			SPageInfo* pi = SPageInfo::FromPhysAddr(*(pages++));
sl@0
  1779
			pi->SetDirty();
sl@0
  1780
			if((count&(KMaxPageInfoUpdatesInOneGo-1))==0)
sl@0
  1781
				MmuLock::Flash(); // flash lock every KMaxPageInfoUpdatesInOneGo iterations of the loop
sl@0
  1782
			--count;
sl@0
  1783
			}
sl@0
  1784
		MmuLock::Unlock();
sl@0
  1785
		}
sl@0
  1786
sl@0
  1787
	return KErrNone;
sl@0
  1788
	}
sl@0
  1789
sl@0
  1790
sl@0
  1791
sl@0
  1792
//
sl@0
  1793
// Debug
sl@0
  1794
//
sl@0
  1795
sl@0
  1796
void DMemoryMappingBase::Dump()
sl@0
  1797
	{
sl@0
  1798
#ifdef _DEBUG
sl@0
  1799
	Kern::Printf("DMemoryMappingBase[0x%08x]::Dump()",this);
sl@0
  1800
	Kern::Printf("  IsAttached() = %d",(bool)IsAttached());
sl@0
  1801
	Kern::Printf("  iMemory = 0x%08x",iMemory);
sl@0
  1802
	Kern::Printf("  iStartIndex = 0x%x",iStartIndex);
sl@0
  1803
	Kern::Printf("  iSizeInPages = 0x%x",iSizeInPages);
sl@0
  1804
	Kern::Printf("  Flags() = 0x%x",Flags());
sl@0
  1805
	Kern::Printf("  PteType() = 0x%x",PteType());
sl@0
  1806
#endif // _DEBUG
sl@0
  1807
	}
sl@0
  1808
sl@0
  1809
sl@0
  1810
void DMemoryMapping::Dump()
sl@0
  1811
	{
sl@0
  1812
#ifdef _DEBUG
sl@0
  1813
	Kern::Printf("DMemoryMapping[0x%08x]::Dump()",this);
sl@0
  1814
	Kern::Printf("  Base() = 0x08%x",iLinAddrAndOsAsid&~KPageMask);
sl@0
  1815
	Kern::Printf("  OsAsid() = %d",iLinAddrAndOsAsid&KPageMask);
sl@0
  1816
	Kern::Printf("  iBlankPde = 0x%08x",iBlankPde);
sl@0
  1817
	Kern::Printf("  iBlankPte = 0x%08x",iBlankPte);
sl@0
  1818
	Kern::Printf("  iAllocatedLinAddrAndOsAsid = 0x%08x",iAllocatedLinAddrAndOsAsid);
sl@0
  1819
	Kern::Printf("  iAllocatedSize = 0x%x",iAllocatedSize);
sl@0
  1820
	DMemoryMappingBase::Dump();
sl@0
  1821
#endif // _DEBUG
sl@0
  1822
	}
sl@0
  1823
sl@0
  1824
sl@0
  1825
void DVirtualPinMapping::Dump()
sl@0
  1826
	{
sl@0
  1827
#ifdef _DEBUG
sl@0
  1828
	Kern::Printf("DVirtualPinMapping[0x%08x]::Dump()",this);
sl@0
  1829
	Kern::Printf("  iMaxCount = %d",iMaxCount);
sl@0
  1830
	Kern::Printf("  iNumPinnedPageTables = %d",iNumPinnedPageTables);
sl@0
  1831
	DMemoryMappingBase::Dump();
sl@0
  1832
#endif // _DEBUG
sl@0
  1833
	}
sl@0
  1834