os/kernelhwsrv/kernel/eka/memmodel/epoc/flexible/mmu/mobject.cpp
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
     1 // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
     2 // All rights reserved.
     3 // This component and the accompanying materials are made available
     4 // under the terms of the License "Eclipse Public License v1.0"
     5 // which accompanies this distribution, and is available
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
     7 //
     8 // Initial Contributors:
     9 // Nokia Corporation - initial contribution.
    10 //
    11 // Contributors:
    12 //
    13 // Description:
    14 //
    15 
    16 #include <plat_priv.h>
    17 #include "mm.h"
    18 #include "mmu.h"
    19 
    20 #include "mobject.h"
    21 #include "mmapping.h"
    22 #include "mptalloc.h"
    23 #include "mmanager.h"
    24 #include "cache_maintenance.inl"
    25 
    26 const TUint KMaxMappingsInOneGo = KMaxPageInfoUpdatesInOneGo; // must be power-of-2
    27 
    28 
    29 
    30 //
    31 // MemoryObjectLock
    32 //
    33 
    34 /**
    35 The mutex pool used to assign locks to memory objects.
    36 @see #MemoryObjectLock.
    37 */
    38 DMutexPool MemoryObjectMutexPool;
    39 
    40 void MemoryObjectLock::Lock(DMemoryObject* aMemory)
    41 	{
    42 	TRACE2(("MemoryObjectLock::Lock(0x%08x) try",aMemory));
    43 	MemoryObjectMutexPool.Wait(aMemory->iLock);
    44 	TRACE2(("MemoryObjectLock::Lock(0x%08x) acquired",aMemory));
    45 	}
    46 
    47 void MemoryObjectLock::Unlock(DMemoryObject* aMemory)
    48 	{
    49 	TRACE2(("MemoryObjectLock::Unlock(0x%08x)",aMemory));
    50 	MemoryObjectMutexPool.Signal(aMemory->iLock);
    51 	}
    52 
    53 TBool MemoryObjectLock::IsHeld(DMemoryObject* aMemory)
    54 	{
    55 	return MemoryObjectMutexPool.IsHeld(aMemory->iLock);
    56 	}
    57 
    58 
    59 
    60 //
    61 // DMemoryObject
    62 //
    63 
    64 DMemoryObject::DMemoryObject(DMemoryManager* aManager, TUint aFlags, TUint aSizeInPages, TMemoryAttributes aAttributes, TMemoryCreateFlags aCreateFlags)
    65 	: iManager(aManager), iFlags(aFlags), iAttributes(Mmu::CanonicalMemoryAttributes(aAttributes)),
    66 	  iSizeInPages(aSizeInPages)
    67 	{
    68 	__ASSERT_COMPILE(EMemoryAttributeMask<0x100); // make sure aAttributes fits into a TUint8
    69 
    70 	TMemoryType type = (TMemoryType)(aAttributes&EMemoryAttributeTypeMask);
    71 	iRamAllocFlags = type;
    72 	if(aCreateFlags&EMemoryCreateNoWipe)
    73 		iRamAllocFlags |= Mmu::EAllocNoWipe;
    74 	else if(aCreateFlags&EMemoryCreateUseCustomWipeByte)
    75 		{
    76 		TUint8 wipeByte = (aCreateFlags>>EMemoryCreateWipeByteShift)&0xff;
    77 		iRamAllocFlags |= wipeByte<<Mmu::EAllocWipeByteShift;
    78 		iRamAllocFlags |= Mmu::EAllocUseCustomWipeByte;
    79 		}
    80 
    81 	if(aCreateFlags&EMemoryCreateDemandPaged)
    82 		iFlags |= EDemandPaged;
    83 	if(aCreateFlags&EMemoryCreateReserveAllResources)
    84 		iFlags |= EReserveResources;
    85 	if(aCreateFlags&EMemoryCreateDisallowPinning)
    86 		iFlags |= EDenyPinning;
    87 	if(aCreateFlags&EMemoryCreateReadOnly)
    88 		iFlags |= EDenyWriteMappings;
    89 	if(!(aCreateFlags&EMemoryCreateAllowExecution))
    90 		iFlags |= EDenyExecuteMappings;
    91 	}
    92 
    93 
    94 TInt DMemoryObject::Construct()
    95 	{
    96 	TBool preAllocateMemory = iFlags&(EReserveResources|EDemandPaged);
    97 	TInt r = iPages.Construct(iSizeInPages,preAllocateMemory);
    98 	return r;
    99 	}
   100 
   101 
   102 DMemoryObject::~DMemoryObject()
   103 	{
   104 	TRACE(("DMemoryObject[0x%08x]::~DMemoryObject()",this));
   105 	__NK_ASSERT_DEBUG(iMappings.IsEmpty());
   106 	}
   107 
   108 
   109 TBool DMemoryObject::CheckRegion(TUint aIndex, TUint aCount)
   110 	{
   111 	TUint end = aIndex+aCount;
   112 	return end>=aIndex && end<=iSizeInPages;
   113 	}
   114 
   115 
   116 void DMemoryObject::ClipRegion(TUint& aIndex, TUint& aCount)
   117 	{
   118 	TUint end = aIndex+aCount;
   119 	if(end<aIndex) // overflow?
   120 		end = ~0u;
   121 	if(end>iSizeInPages)
   122 		end = iSizeInPages;
   123 	if(aIndex>=end)
   124 		aIndex = end;
   125 	aCount = end-aIndex;
   126 	}
   127 
   128 
   129 void DMemoryObject::SetLock(DMutex* aLock)
   130 	{
   131 	__NK_ASSERT_DEBUG(!iLock);
   132 	iLock = aLock;
   133 	TRACE(("MemoryObject[0x%08x]::SetLock(0x%08x) \"%O\"",this,aLock,aLock));
   134 	}
   135 
   136 
   137 DMemoryMapping* DMemoryObject::CreateMapping(TUint, TUint)
   138 	{
   139 	return new DFineMapping();
   140 	}
   141 
   142 
   143 TInt DMemoryObject::MapPages(RPageArray::TIter aPages)
   144 	{
   145 	TRACE2(("DMemoryObject[0x%08x]::MapPages(?) index=0x%x count=0x%x",this,aPages.Index(),aPages.Count()));
   146 
   147 	TUint offset = aPages.Index();
   148 	TUint offsetEnd = aPages.IndexEnd();
   149 	TInt r = KErrNone;
   150 
   151 	iMappings.Lock();
   152 	TMappingListIter iter;
   153 	DMemoryMappingBase* mapping = iter.Start(iMappings);
   154 	while(mapping)
   155 		{
   156 		if(mapping->IsPinned())
   157 			{
   158 			// pinned mappings don't change, so nothing to do...
   159 			iMappings.Unlock();
   160 			}
   161 		else
   162 			{
   163 			// get region where pages overlap the mapping...
   164 			TUint start = mapping->iStartIndex;
   165 			TUint end = start+mapping->iSizeInPages;
   166 			if(start<offset)
   167 				start = offset;
   168 			if(end>offsetEnd)
   169 				end = offsetEnd;
   170 			if(start>=end)
   171 				{
   172 				// the mapping doesn't contain the pages...
   173 				iMappings.Unlock();
   174 				}
   175 			else
   176 				{
   177 				// map pages in the mapping...
   178 				mapping->Open();
   179 				TUint mapInstanceCount = mapping->MapInstanceCount();
   180 				iMappings.Unlock();
   181 				r = mapping->MapPages(aPages.Slice(start,end),mapInstanceCount);
   182 				mapping->AsyncClose();
   183 				if(r!=KErrNone)
   184 					{
   185 					iMappings.Lock();
   186 					break;
   187 					}
   188 				}
   189 			}
   190 		iMappings.Lock();
   191 		mapping = iter.Next();
   192 		}
   193 	iter.Finish();
   194 	iMappings.Unlock();
   195 
   196 	return r;
   197 	}
   198 
   199 
   200 void DMemoryObject::RemapPage(TPhysAddr& aPageArray, TUint aIndex, TBool aInvalidateTLB)
   201 	{
   202 	TRACE2(("DMemoryObject[0x%08x]::RemapPage(0x%x,%d,%d)",this,aPageArray,aIndex,aInvalidateTLB));
   203 
   204 	iMappings.RemapPage(aPageArray, aIndex, aInvalidateTLB);
   205 
   206 #ifdef COARSE_GRAINED_TLB_MAINTENANCE
   207 	if (aInvalidateTLB)
   208 		InvalidateTLB();
   209 #endif
   210 	}
   211 
   212 
   213 void DMemoryObject::UnmapPages(RPageArray::TIter aPages, TBool aDecommitting)
   214 	{
   215 	TRACE2(("DMemoryObject[0x%08x]::UnmapPages(?,%d) index=0x%x count=0x%x",this,(bool)aDecommitting,aPages.Index(),aPages.Count()));
   216 
   217 	TUint offset = aPages.Index();
   218 	TUint offsetEnd = aPages.IndexEnd();
   219 	if(offset==offsetEnd)
   220 		return;
   221 
   222 	iMappings.Lock();
   223 	TMappingListIter iter;
   224 	DMemoryMappingBase* mapping = iter.Start(iMappings);
   225 	while(mapping)
   226 		{
   227 		// get region where pages overlap the mapping...
   228 		TUint start = mapping->iStartIndex;
   229 		TUint end = start+mapping->iSizeInPages;
   230 		if(start<offset)
   231 			start = offset;
   232 		if(end>offsetEnd)
   233 			end = offsetEnd;
   234 		if(start>=end)
   235 			{
   236 			// the mapping doesn't contain the pages...
   237 			iMappings.Unlock();
   238 			}
   239 		else
   240 			{
   241 			RPageArray::TIter pages = aPages.Slice(start,end);
   242 			if(mapping->IsPinned())
   243 				{
   244 				// pinned mappings veto page unmapping...
   245 				if(aDecommitting)
   246 					__e32_atomic_ior_ord8(&mapping->Flags(), (TUint8)DMemoryMapping::EPageUnmapVetoed);
   247 				iMappings.Unlock();
   248 				TRACE2(("DFineMemoryMapping[0x%08x] veto UnmapPages, index=0x%x count=0x%x",mapping,pages.Index(),pages.Count()));
   249 				pages.VetoUnmap();
   250 				}
   251 			else
   252 				{
   253 				// unmap pages in the mapping...
   254 				mapping->Open();
   255 				TUint mapInstanceCount = mapping->MapInstanceCount();
   256 				iMappings.Unlock();
   257 				mapping->UnmapPages(pages,mapInstanceCount);
   258 				mapping->AsyncClose();
   259 				}
   260 			}
   261 		iMappings.Lock();
   262 		mapping = iter.Next();
   263 		}
   264 	iter.Finish();
   265 	iMappings.Unlock();
   266 
   267 #ifdef COARSE_GRAINED_TLB_MAINTENANCE
   268 	InvalidateTLB();
   269 #endif
   270 	}
   271 
   272 
   273 void DMemoryObject::RestrictPages(RPageArray::TIter aPages, TRestrictPagesType aRestriction)
   274 	{
   275 	TRACE2(("DMemoryObject[0x%08x]::RestrictPages(?,%d) index=0x%x count=0x%x",this,aRestriction,aPages.Index(),aPages.Count()));
   276 
   277 	TUint offset = aPages.Index();
   278 	TUint offsetEnd = aPages.IndexEnd();
   279 	if(offset==offsetEnd)
   280 		return;
   281 
   282 	iMappings.Lock();
   283 	TMappingListIter iter;
   284 	DMemoryMappingBase* mapping = iter.Start(iMappings);
   285 	while(mapping)
   286 		{
   287 		// get region where pages overlap the mapping...
   288 		TUint start = mapping->iStartIndex;
   289 		TUint end = start+mapping->iSizeInPages;
   290 		if(start<offset)
   291 			start = offset;
   292 		if(end>offsetEnd)
   293 			end = offsetEnd;
   294 		if(start>=end)
   295 			{
   296 			// the mapping doesn't contain the pages...
   297 			iMappings.Unlock();
   298 			}
   299 		else
   300 			{
   301 			RPageArray::TIter pages = aPages.Slice(start,end);
   302 			if(mapping->IsPhysicalPinning() ||
   303 				(!(aRestriction & ERestrictPagesForMovingFlag) && mapping->IsPinned()))
   304 				{
   305 				// Pinned mappings veto page restrictions except for page moving 
   306 				// where only physically pinned mappings block page moving.
   307 				iMappings.Unlock();
   308 				TRACE2(("DFineMemoryMapping[0x%08x] veto RestrictPages, index=0x%x count=0x%x",mapping,pages.Index(),pages.Count()));
   309 				pages.VetoRestrict(aRestriction & ERestrictPagesForMovingFlag);
   310 				// Mappings lock required for iter.Finish() as iter will be removed from the mappings list.
   311 				iMappings.Lock();
   312 				break;
   313 				}
   314 			else
   315 				{
   316 				// pages not pinned so do they need restricting...
   317 				if(aRestriction == ERestrictPagesForMovingFlag)
   318 					{
   319 					// nothing to do when just checking for pinned mappings for 
   320 					// page moving purposes and not restricting to NA.
   321 					iMappings.Unlock();
   322 					}
   323 				else
   324 					{
   325 					// restrict pages in the mapping...
   326 					mapping->Open();
   327 					TUint mapInstanceCount = mapping->MapInstanceCount();
   328 					iMappings.Unlock();
   329 					mapping->RestrictPagesNA(pages, mapInstanceCount);
   330 					mapping->AsyncClose();
   331 					}
   332 				}
   333 			}
   334 		iMappings.Lock();
   335 		mapping = iter.Next();
   336 		}
   337 
   338 	if(aRestriction & ERestrictPagesForMovingFlag)
   339 		{// Clear the mappings addded flag so page moving can detect whether any 
   340 		// new mappings have been added
   341 		ClearMappingAddedFlag();
   342 		}
   343 
   344 	iter.Finish();
   345 	iMappings.Unlock();
   346 
   347 	#ifdef COARSE_GRAINED_TLB_MAINTENANCE
   348 	// Writable memory objects will have been restricted no access so invalidate TLB.
   349 	if (aRestriction != ERestrictPagesForMovingFlag)
   350 		InvalidateTLB();
   351 	#endif
   352 	}
   353 
   354 
   355 TInt DMemoryObject::CheckNewMapping(DMemoryMappingBase* aMapping)
   356 	{
   357 	if(iFlags&EDenyPinning && aMapping->IsPinned())
   358 		return KErrAccessDenied;
   359 	if(iFlags&EDenyMappings)
   360 		return KErrAccessDenied;
   361 	if(iFlags&EDenyWriteMappings && !aMapping->IsReadOnly())
   362 		return KErrAccessDenied;
   363 #ifdef MMU_SUPPORTS_EXECUTE_NEVER
   364 	if((iFlags&EDenyExecuteMappings) && aMapping->IsExecutable())
   365 		return KErrAccessDenied;
   366 #endif
   367 	return KErrNone;
   368 	}
   369 
   370 
   371 TInt DMemoryObject::AddMapping(DMemoryMappingBase* aMapping)
   372 	{
   373 	__NK_ASSERT_DEBUG(!aMapping->IsCoarse());
   374 
   375 	// check mapping allowed...
   376 	MmuLock::Lock();
   377 	iMappings.Lock();
   378 
   379 	TInt r = CheckNewMapping(aMapping);
   380 	if(r == KErrNone)
   381 		{
   382 		Open();
   383 		aMapping->LinkToMemory(this, iMappings);
   384 		}
   385 
   386 	iMappings.Unlock();
   387 	MmuLock::Unlock();
   388 
   389 	TRACE(("DMemoryObject[0x%08x]::AddMapping(0x%08x)  returns %d", this, aMapping, r));
   390 
   391 	return r;
   392 	}
   393 
   394 
   395 void DMemoryObject::RemoveMapping(DMemoryMappingBase* aMapping)
   396 	{
   397 	aMapping->UnlinkFromMemory(iMappings);
   398 	Close();
   399 	}
   400 
   401 
   402 TInt DMemoryObject::SetReadOnly()
   403 	{
   404 	TRACE(("DMemoryObject[0x%08x]::SetReadOnly()",this));
   405 	__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(this));
   406 
   407 	TInt r = KErrNone;
   408 	iMappings.Lock();
   409 	if (iFlags & EDenyWriteMappings)
   410 		{// The object is already read only.
   411 		iMappings.Unlock();
   412 		return KErrNone;
   413 		}		
   414 
   415 	TMappingListIter iter;
   416 	DMemoryMappingBase* mapping = iter.Start(iMappings);
   417 	while(mapping)
   418 		{
   419 		if (!mapping->IsReadOnly())
   420 			{
   421 			r = KErrInUse;
   422 			goto exit;
   423 			}
   424 		// This will flash iMappings.Lock to stop it being held too long.
   425 		// This is safe as new mappings will be added to the end of the list so we
   426 		// won't miss them.
   427 		mapping = iter.Next();
   428 		}
   429 	// Block any writable mapping from being added to this memory object.
   430 	// Use atomic operation as iMappings.Lock protects EDenyWriteMappings
   431 	// but not the whole word.
   432 	__e32_atomic_ior_ord8(&iFlags, (TUint8)EDenyWriteMappings);
   433 
   434 exit:
   435 	iter.Finish();
   436 	iMappings.Unlock();
   437 	return r;
   438 	}
   439 
   440 
   441 void DMemoryObject::DenyMappings()
   442 	{
   443 	TRACE(("DMemoryObject[0x%08x]::LockMappings()",this));
   444 	MmuLock::Lock();
   445 	// Use atomic operation as MmuLock protects EDenyMappings
   446 	// but not the whole word.
   447 	__e32_atomic_ior_ord8(&iFlags, (TUint8)EDenyMappings);
   448 	MmuLock::Unlock();
   449 	}
   450 
   451 
   452 TInt DMemoryObject::PhysAddr(TUint aIndex, TUint aCount, TPhysAddr& aPhysicalAddress, TPhysAddr* aPhysicalPageList)
   453 	{
   454 	TRACE2(("DMemoryObject[0x%08x]::PhysAddr(0x%x,0x%x,?,?)",this,aIndex,aCount));
   455 	TInt r = iPages.PhysAddr(aIndex,aCount,aPhysicalAddress,aPhysicalPageList);
   456 	TRACE2(("DMemoryObject[0x%08x]::PhysAddr(0x%x,0x%x,?,?) returns %d aPhysicalAddress=0x%08x",this,aIndex,aCount,r,aPhysicalAddress));
   457 	return r;
   458 	}
   459 
   460 
   461 void DMemoryObject::BTraceCreate()
   462 	{
   463 	BTraceContext8(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectCreate,this,iSizeInPages);
   464 	}
   465 
   466 
   467 TUint DMemoryObject::PagingManagerData(TUint aIndex)
   468 	{
   469 	TRACE2(("DMemoryObject[0x%08x]::PagingManagerData(0x%x)",this,aIndex));
   470 	__NK_ASSERT_DEBUG(IsDemandPaged());
   471 	TUint value = iPages.PagingManagerData(aIndex);
   472 	TRACE2(("DMemoryObject[0x%08x]::PagingManagerData(0x%x) returns 0x%x",this,aIndex,value));
   473 	return value;
   474 	}
   475 
   476 
   477 void DMemoryObject::SetPagingManagerData(TUint aIndex, TUint aValue)
   478 	{
   479 	TRACE(("DMemoryObject[0x%08x]::SetPagingManagerData(0x%x,0x%08x)",this,aIndex,aValue));
   480 	__NK_ASSERT_DEBUG(IsDemandPaged());
   481 	iPages.SetPagingManagerData(aIndex, aValue);
   482 	__NK_ASSERT_DEBUG(iPages.PagingManagerData(aIndex)==aValue);
   483 	}
   484 
   485 
   486 
   487 //
   488 // DCoarseMemory::DPageTables
   489 //
   490 
   491 DCoarseMemory::DPageTables::DPageTables(DCoarseMemory* aMemory, TInt aNumPts, TUint aPteType)
   492 	: iMemory(aMemory), iPteType(aPteType), iPermanenceCount(0), iNumPageTables(aNumPts)
   493 	{
   494 	aMemory->Open();
   495 	iBlankPte = Mmu::BlankPte(aMemory->Attributes(),aPteType);
   496 	}
   497 
   498 
   499 DCoarseMemory::DPageTables* DCoarseMemory::DPageTables::New(DCoarseMemory* aMemory, TUint aNumPages, TUint aPteType)
   500 	{
   501 	TRACE2(("DCoarseMemory::DPageTables::New(0x%08x,0x%x,0x%08x)",aMemory, aNumPages, aPteType));
   502 	__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
   503 	__NK_ASSERT_DEBUG((aNumPages&(KChunkMask>>KPageShift))==0);
   504 	TUint numPts = aNumPages>>(KChunkShift-KPageShift);
   505 	DPageTables* self = (DPageTables*)Kern::AllocZ(sizeof(DPageTables)+(numPts-1)*sizeof(TPte*));
   506 	if(self)
   507 		{
   508 		new (self) DPageTables(aMemory,numPts,aPteType);
   509 		TInt r = self->Construct();
   510 		if(r!=KErrNone)
   511 			{
   512 			self->Close();
   513 			self = 0;
   514 			}
   515 		}
   516 	TRACE2(("DCoarseMemory::DPageTables::New(0x%08x,0x%x,0x%08x) returns 0x%08x",aMemory, aNumPages, aPteType, self));
   517 	return self;
   518 	}
   519 
   520 
   521 TInt DCoarseMemory::DPageTables::Construct()
   522 	{
   523 	if(iMemory->IsDemandPaged())
   524 		{
   525 		// do nothing, allow pages to be mapped on demand...
   526 		return KErrNone;
   527 		}
   528 
   529 	RPageArray::TIter pageIter;
   530 	iMemory->iPages.FindStart(0,iMemory->iSizeInPages,pageIter);
   531 
   532 	// map pages...
   533 	TInt r = KErrNone;
   534 	for(;;)
   535 		{
   536 		// find some pages...
   537 		RPageArray::TIter pageList;
   538 		TUint n = pageIter.Find(pageList);
   539 		if(!n)
   540 			break; // done
   541 
   542 		// map some pages...
   543 		r = MapPages(pageList);
   544 
   545 		// done with pages...
   546 		pageIter.FindRelease(n);
   547 
   548 		if(r!=KErrNone)
   549 			break;
   550 		}
   551 
   552 	iMemory->iPages.FindEnd(0,iMemory->iSizeInPages);
   553 
   554 	return r;
   555 	}
   556 
   557 
   558 void DCoarseMemory::DPageTables::Close()
   559 	{
   560 	__NK_ASSERT_DEBUG(CheckCloseIsSafe());
   561 	MmuLock::Lock();
   562 	if (__e32_atomic_tas_ord32(&iReferenceCount, 1, -1, 0) != 1)
   563 		{
   564 		MmuLock::Unlock();
   565 		return;
   566 		}
   567 	DCoarseMemory* memory = iMemory;
   568 	if(memory)
   569 		{
   570 		iMemory->iPageTables[iPteType] = 0;
   571 		iMemory = 0;
   572 		}
   573 	MmuLock::Unlock();
   574 	if(memory)
   575 		memory->Close();
   576 	delete this;
   577 	}
   578 
   579 
   580 void DCoarseMemory::DPageTables::AsyncClose()
   581 	{
   582 	__NK_ASSERT_DEBUG(CheckAsyncCloseIsSafe());
   583 	MmuLock::Lock();
   584 	if (__e32_atomic_tas_ord32(&iReferenceCount, 1, -1, 0) != 1)
   585 		{
   586 		MmuLock::Unlock();
   587 		return;
   588 		}
   589 	DCoarseMemory* memory = iMemory;
   590 	if(memory)
   591 		{
   592 		iMemory->iPageTables[iPteType] = 0;
   593 		iMemory = 0;
   594 		}
   595 	MmuLock::Unlock();
   596 	if(memory)
   597 		memory->AsyncClose();
   598 	AsyncDelete();
   599 	}
   600 
   601 
   602 DCoarseMemory::DPageTables::~DPageTables()
   603 	{
   604 	TRACE2(("DCoarseMemory::DPageTables[0x%08x]::~DPageTables()",this));
   605 	__NK_ASSERT_DEBUG(!iMemory);
   606 	__NK_ASSERT_DEBUG(iMappings.IsEmpty());
   607 	TUint i=0;
   608 	while(i<iNumPageTables)
   609 		{
   610 		TPte* pt = iTables[i];
   611 		if(pt)
   612 			{
   613 			iTables[i] = 0;
   614 			::PageTables.Lock();
   615 			::PageTables.Free(pt);
   616 			::PageTables.Unlock();
   617 			}
   618 		++i;
   619 		}
   620 	}
   621 
   622 
   623 TPte* DCoarseMemory::DPageTables::GetOrAllocatePageTable(TUint aChunkIndex)
   624 	{
   625 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   626 
   627 	// get page table...
   628 	TPte* pt = GetPageTable(aChunkIndex);
   629 	if(!pt)
   630 		pt = AllocatePageTable(aChunkIndex, iMemory->IsDemandPaged());
   631 
   632 	return pt;
   633 	}
   634 
   635 
   636 TPte* DCoarseMemory::DPageTables::GetOrAllocatePageTable(TUint aChunkIndex, TPinArgs& aPinArgs)
   637 	{
   638 	__NK_ASSERT_DEBUG(aPinArgs.iPinnedPageTables);
   639 
   640 	if(!aPinArgs.HaveSufficientPages(KNumPagesToPinOnePageTable))
   641 		return 0;
   642 
   643 	TPte* pinnedPt = 0;
   644 	for(;;)
   645 		{
   646 		TPte* pt = GetOrAllocatePageTable(aChunkIndex);
   647 
   648 		if(pinnedPt && pinnedPt!=pt)
   649 			{
   650 			// previously pinned page table not needed...
   651 			::PageTables.UnpinPageTable(pinnedPt,aPinArgs);
   652 
   653 			// make sure we have memory for next pin attempt...
   654 			MmuLock::Unlock();
   655 			aPinArgs.AllocReplacementPages(KNumPagesToPinOnePageTable);
   656 			if(!aPinArgs.HaveSufficientPages(KNumPagesToPinOnePageTable)) // if out of memory...
   657 				{
   658 				// make sure we free any unneeded page table we allocated...
   659 				if(pt)
   660 					FreePageTable(aChunkIndex);
   661 				MmuLock::Lock();
   662 				return 0;
   663 				}
   664 			MmuLock::Lock();
   665 			}
   666 
   667 		if(!pt)
   668 			return 0; // out of memory
   669 
   670 		if(pt==pinnedPt)
   671 			{
   672 			// we got a page table and it was pinned...
   673 			*aPinArgs.iPinnedPageTables++ = pt;
   674 			++aPinArgs.iNumPinnedPageTables;
   675 			return pt;
   676 			}
   677 
   678 		// don't pin page table if it's not paged (e.g. unpaged part of ROM)...
   679 		SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt);
   680 		if(!pti->IsDemandPaged())
   681 			return pt;
   682 
   683 		// pin the page table...
   684 		if (::PageTables.PinPageTable(pt,aPinArgs) != KErrNone)
   685 			{
   686 			// Couldn't pin the page table...
   687 			MmuLock::Unlock();
   688 			// make sure we free any unneeded page table we allocated...
   689 			FreePageTable(aChunkIndex);
   690 			MmuLock::Lock();
   691 			return 0;
   692 			}
   693 		pinnedPt = pt;
   694 		}
   695 	}
   696 
   697 
   698 TPte* DCoarseMemory::DPageTables::AllocatePageTable(TUint aChunkIndex, TBool aDemandPaged, TBool aPermanent)
   699 	{
   700 	TRACE2(("DCoarseMemory::DPageTables[0x%08x]::AllocatePageTable(0x%08x,%d,%d)",this,aChunkIndex,aDemandPaged,aPermanent));
   701 
   702 	TPte* pt;
   703 	do
   704 		{
   705 		// acquire page table lock...
   706 		MmuLock::Unlock();
   707 		::PageTables.Lock();
   708 
   709 		// see if we still need to allocate a page table...
   710 		pt = iTables[aChunkIndex];
   711 		if(!pt)
   712 			{
   713 			// allocate page table...
   714 			pt = ::PageTables.Alloc(aDemandPaged);
   715 			if(!pt)
   716 				{
   717 				// out of memory...
   718 				::PageTables.Unlock();
   719 				MmuLock::Lock();
   720 				return 0;
   721 				}
   722 			AssignPageTable(aChunkIndex,pt);
   723 			}
   724 
   725 		// release page table lock...
   726 		::PageTables.Unlock();
   727 		MmuLock::Lock();
   728 
   729 		// check again...
   730 		pt = iTables[aChunkIndex];
   731 		}
   732 	while(!pt);
   733 
   734 	// we have a page table...
   735 	if(aPermanent)
   736 		{
   737 		__NK_ASSERT_ALWAYS(!aDemandPaged);
   738 		SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt);
   739 		pti->IncPermanenceCount();
   740 		}
   741 	return pt;
   742 	}
   743 
   744 
   745 void DCoarseMemory::DPageTables::AssignPageTable(TUint aChunkIndex, TPte* aPageTable)
   746 	{
   747 	__NK_ASSERT_DEBUG(PageTablesLockIsHeld());
   748 
   749 	MmuLock::Lock();
   750 
   751 	// get physical address of page table now, this can't change whilst we have the page table allocator mutex...
   752 	TPhysAddr ptPhys = Mmu::PageTablePhysAddr(aPageTable);
   753 
   754 	// update mappings with new page table...
   755 	TUint offset = aChunkIndex<<(KChunkShift-KPageShift);
   756 	iMappings.Lock();
   757 	TMappingListIter iter;
   758 	DMemoryMapping* mapping = (DMemoryMapping*)iter.Start(iMappings);
   759 	TUint flash = 0;
   760 	while(mapping)
   761 		{
   762 		TUint size = mapping->iSizeInPages;
   763 		TUint start = offset-mapping->iStartIndex;
   764 		if(start<size && !mapping->BeingDetached())
   765 			{
   766 			// page table is used by this mapping, so set PDE...
   767 			TLinAddr linAddrAndOsAsid = mapping->LinAddrAndOsAsid()+start*KPageSize;
   768 			TPde* pPde = Mmu::PageDirectoryEntry(linAddrAndOsAsid&KPageMask,linAddrAndOsAsid);
   769 			TPde pde = ptPhys|mapping->BlankPde();
   770 #ifdef	__USER_MEMORY_GUARDS_ENABLED__
   771 			if (mapping->IsUserMapping())
   772 				pde = PDE_IN_DOMAIN(pde, USER_MEMORY_DOMAIN);
   773 #endif
   774 			TRACE2(("!PDE %x=%x",pPde,pde));
   775 			__NK_ASSERT_DEBUG(((*pPde^pde)&~KPdeMatchMask)==0 || *pPde==KPdeUnallocatedEntry);
   776 			*pPde = pde;
   777 			SinglePdeUpdated(pPde);
   778 
   779 			++flash; // increase flash rate because we've done quite a bit more work
   780 			}
   781 		iMappings.Unlock();
   782 		MmuLock::Flash(flash,KMaxMappingsInOneGo);
   783 		iMappings.Lock();
   784 		mapping = (DMemoryMapping*)iter.Next();
   785 		}
   786 	iter.Finish();
   787 	iMappings.Unlock();
   788 
   789 	// next, assign page table to us...
   790 	// NOTE: Must happen before MmuLock is released after reaching the end of the mapping list
   791 	// otherwise it would be possible for a new mapping to be added and mapped before we manage
   792 	// to update iTables with the page table it should use.
   793 	SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPageTable);
   794 	pti->SetCoarse(iMemory,aChunkIndex,iPteType);
   795 	__NK_ASSERT_DEBUG(!iTables[aChunkIndex]);
   796 	iTables[aChunkIndex] = aPageTable; // new mappings can now see the page table
   797 
   798 	MmuLock::Unlock();
   799 	}
   800 
   801 
   802 void DCoarseMemory::DPageTables::FreePageTable(TUint aChunkIndex)
   803 	{
   804 	TRACE2(("DCoarseMemory::DPageTables[0x%08x]::FreePageTable(0x%08x)",this,aChunkIndex));
   805 
   806 	// acquire locks...
   807 	::PageTables.Lock();
   808 	MmuLock::Lock();
   809 
   810 	// test if page table still needs freeing...
   811 	TPte* pt = iTables[aChunkIndex];
   812 	if(pt)
   813 		{
   814 		SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt);
   815 		if(pti->PageCount()==0 && pti->PermanenceCount()==0)
   816 			{
   817 			// page table needs freeing...
   818 			UnassignPageTable(aChunkIndex);
   819 			MmuLock::Unlock();
   820 			::PageTables.Free(pt);
   821 			::PageTables.Unlock();
   822 			return;
   823 			}
   824 		}
   825 
   826 	// page table doesn't need freeing...
   827 	MmuLock::Unlock();
   828 	::PageTables.Unlock();
   829 	return;
   830 	}
   831 
   832 
   833 void DCoarseMemory::StealPageTable(TUint aChunkIndex, TUint aPteType)
   834 	{
   835 	__NK_ASSERT_DEBUG(PageTablesLockIsHeld());
   836 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   837 	__NK_ASSERT_DEBUG(iPageTables[aPteType]);
   838 	iPageTables[aPteType]->StealPageTable(aChunkIndex);
   839 	}
   840 
   841 
   842 void DCoarseMemory::DPageTables::StealPageTable(TUint aChunkIndex)
   843 	{
   844 	__NK_ASSERT_DEBUG(PageTablesLockIsHeld());
   845 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   846 #ifdef _DEBUG
   847 	TPte* pt = iTables[aChunkIndex];
   848 	__NK_ASSERT_DEBUG(pt);
   849 	SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt);
   850 	__NK_ASSERT_DEBUG(pti->PageCount()==0);
   851 	__NK_ASSERT_DEBUG(pti->PermanenceCount()==0);
   852 #endif
   853 	UnassignPageTable(aChunkIndex);
   854 	}
   855 
   856 
   857 void DCoarseMemory::DPageTables::UnassignPageTable(TUint aChunkIndex)
   858 	{
   859 	__NK_ASSERT_DEBUG(PageTablesLockIsHeld());
   860 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   861 
   862 #ifdef _DEBUG
   863 	TPhysAddr ptPhys = Mmu::PageTablePhysAddr(iTables[aChunkIndex]);
   864 #endif
   865 
   866 	// zero page table pointer immediately so new mappings or memory commits will be force to
   867 	// create a new one (which will block until we've finished here because it also needs the
   868 	// PageTablesLock...
   869 	iTables[aChunkIndex] = 0; 
   870 
   871 	// remove page table from mappings...
   872 	TUint offset = aChunkIndex<<(KChunkShift-KPageShift);
   873 	iMappings.Lock();
   874 	TMappingListIter iter;
   875 	DMemoryMapping* mapping = (DMemoryMapping*)iter.Start(iMappings);
   876 	TUint flash = 0;
   877 	while(mapping)
   878 		{
   879 		__NK_ASSERT_DEBUG(iTables[aChunkIndex]==0); // can't have been recreated because we hold PageTablesLock
   880 		TUint size = mapping->iSizeInPages;
   881 		TUint start = offset-mapping->iStartIndex;
   882 		if(start<size)
   883 			{
   884 			// page table is used by this mapping, so clear PDE...
   885 			TLinAddr linAddrAndOsAsid = mapping->LinAddrAndOsAsid()+start*KPageSize;
   886 			TPde* pPde = Mmu::PageDirectoryEntry(linAddrAndOsAsid&KPageMask,linAddrAndOsAsid);
   887 			TPde pde = KPdeUnallocatedEntry;
   888 			TRACE2(("!PDE %x=%x",pPde,pde));
   889 			__NK_ASSERT_DEBUG(*pPde==pde || (*pPde&~KPageTableMask)==ptPhys);
   890 			*pPde = pde;
   891 			SinglePdeUpdated(pPde);
   892 
   893 			++flash; // increase flash rate because we've done quite a bit more work
   894 			}
   895 		iMappings.Unlock();
   896 		MmuLock::Flash(flash,KMaxMappingsInOneGo);
   897 		iMappings.Lock();
   898 		mapping = (DMemoryMapping*)iter.Next();
   899 		}
   900 	iter.Finish();
   901 
   902 	iMappings.Unlock();
   903 	}
   904 
   905 
   906 TInt DCoarseMemory::DPageTables::AllocatePermanentPageTables()
   907 	{
   908 	__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(iMemory));
   909 	__NK_ASSERT_ALWAYS(!iMemory->IsDemandPaged());
   910 
   911 	if(iPermanenceCount++)
   912 		{
   913 		// page tables already marked permanent, so end...
   914 		return KErrNone;
   915 		}
   916 
   917 	// allocate all page tables...
   918 	MmuLock::Lock();
   919 	TUint flash = 0;
   920 	TUint i;
   921 	for(i=0; i<iNumPageTables; ++i)
   922 		{
   923 		TPte* pt = iTables[i];
   924 		if(pt)
   925 			{
   926 			// already have page table...
   927 			SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt);
   928 			pti->IncPermanenceCount();
   929 			MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo);
   930 			}
   931 		else
   932 			{
   933 			// allocate new page table...
   934 			pt = AllocatePageTable(i,EFalse,ETrue);
   935 			if(!pt)
   936 				{
   937 				MmuLock::Unlock();
   938 				--iPermanenceCount;
   939 				FreePermanentPageTables(0,i);
   940 				return KErrNoMemory;
   941 				}
   942 			}
   943 		}
   944 	MmuLock::Unlock();
   945 
   946 	return KErrNone;
   947 	}
   948 
   949 
   950 void DCoarseMemory::DPageTables::FreePermanentPageTables(TUint aChunkIndex, TUint aChunkCount)
   951 	{
   952 	MmuLock::Lock();
   953 
   954 	TUint flash = 0;
   955 	TUint i;
   956 	for(i=aChunkIndex; i<aChunkIndex+aChunkCount; ++i)
   957 		{
   958 		TPte* pt = iTables[i];
   959 		SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt);
   960 		if(pti->DecPermanenceCount() || pti->PageCount())
   961 			{
   962 			// still in use...
   963 			MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo);
   964 			}
   965 		else
   966 			{
   967 			// page table no longer used for anything...
   968 			MmuLock::Unlock();
   969 			FreePageTable(i);
   970 			MmuLock::Lock();
   971 			}
   972 		}
   973 
   974 	MmuLock::Unlock();
   975 	}
   976 
   977 
   978 void DCoarseMemory::DPageTables::FreePermanentPageTables()
   979 	{
   980 	__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(iMemory));
   981 
   982 	if(--iPermanenceCount)
   983 		{
   984 		// page tables still permanent, so end...
   985 		return;
   986 		}
   987 
   988 	FreePermanentPageTables(0,iNumPageTables);
   989 	}
   990 
   991 
   992 TInt DCoarseMemory::DPageTables::AddMapping(DCoarseMapping* aMapping)
   993 	{
   994 	TRACE(("DCoarseMemory::DPageTables[0x%08x]::AddMapping(0x%08x)",this,aMapping));
   995 	__NK_ASSERT_DEBUG(aMapping->IsCoarse());
   996 	Open();
   997 	MmuLock::Lock();
   998 	iMappings.Lock();
   999 	aMapping->LinkToMemory(iMemory,iMappings);
  1000 	iMappings.Unlock();
  1001 	MmuLock::Unlock();
  1002 	return KErrNone;
  1003 	}
  1004 
  1005 
  1006 void DCoarseMemory::DPageTables::RemoveMapping(DCoarseMapping* aMapping)
  1007 	{
  1008 	aMapping->UnlinkFromMemory(iMappings);
  1009 	Close();
  1010 	}
  1011 
  1012 
  1013 void DCoarseMemory::DPageTables::RemapPage(TPhysAddr& aPageArray, TUint aIndex, TBool aInvalidateTLB)
  1014 	{
  1015 	TUint pteIndex = aIndex & (KChunkMask>>KPageShift);
  1016 
  1017 	// get address of page table...
  1018 	MmuLock::Lock();
  1019 	TUint i = aIndex>>(KChunkShift-KPageShift);
  1020 	TPte* pPte = GetPageTable(i);
  1021 
  1022 	if (!pPte)
  1023 		{// This page has been unmapped so just return.
  1024 		MmuLock::Unlock();
  1025 		return;
  1026 		}
  1027 
  1028 	// remap the page...
  1029 	pPte += pteIndex;
  1030 	Mmu::RemapPage(pPte, aPageArray, iBlankPte);
  1031 
  1032 	MmuLock::Unlock();
  1033 	
  1034 	if (aInvalidateTLB)
  1035 		FlushTLB(aIndex, aIndex + 1);
  1036 	}
  1037 
  1038 
  1039 TInt DCoarseMemory::DPageTables::MapPages(RPageArray::TIter aPages)
  1040 	{
  1041 	__NK_ASSERT_DEBUG(aPages.Count());
  1042 
  1043 	for(;;)
  1044 		{
  1045 		TUint pteIndex = aPages.Index()&(KChunkMask>>KPageShift);
  1046 
  1047 		// calculate max number of pages to do...
  1048 		TUint n = (KChunkSize>>KPageShift)-pteIndex; // pages left in page table
  1049 		if(n>KMaxPagesInOneGo)
  1050 			n = KMaxPagesInOneGo;
  1051 
  1052 		// get some pages...
  1053 		TPhysAddr* pages;
  1054 		n = aPages.Pages(pages,n);
  1055 		if(!n)
  1056 			break;
  1057 
  1058 		// get address of page table...
  1059 		MmuLock::Lock();
  1060 		TUint i = aPages.Index()>>(KChunkShift-KPageShift);
  1061 		TPte* pPte = GetOrAllocatePageTable(i);
  1062 
  1063 		// check for OOM...
  1064 		if(!pPte)
  1065 			{
  1066 			MmuLock::Unlock();
  1067 			return KErrNoMemory;
  1068 			}
  1069 
  1070 		// map some pages...
  1071 		pPte += pteIndex;
  1072 		TBool keepPt = Mmu::MapPages(pPte, n, pages, iBlankPte);
  1073 		MmuLock::Unlock();
  1074 
  1075 		// free page table if no longer needed...
  1076 		if(!keepPt)
  1077 			FreePageTable(i);
  1078 
  1079 		// move on...
  1080 		aPages.Skip(n);
  1081 		}
  1082 
  1083 	return KErrNone;
  1084 	}
  1085 
  1086 
  1087 void DCoarseMemory::DPageTables::UnmapPages(RPageArray::TIter aPages, TBool aDecommitting)
  1088 	{
  1089 	__NK_ASSERT_DEBUG(aPages.Count());
  1090 
  1091 	TUint startIndex = aPages.Index();
  1092 
  1093 	for(;;)
  1094 		{
  1095 		TUint pteIndex = aPages.Index()&(KChunkMask>>KPageShift);
  1096 
  1097 		// calculate max number of pages to do...
  1098 		TUint n = (KChunkSize>>KPageShift)-pteIndex; // pages left in page table
  1099 		if(n>KMaxPagesInOneGo)
  1100 			n = KMaxPagesInOneGo;
  1101 
  1102 		// get some pages...
  1103 		TPhysAddr* pages;
  1104 		n = aPages.Pages(pages,n);
  1105 		if(!n)
  1106 			break;
  1107 
  1108 		// get address of PTE for pages...
  1109 		MmuLock::Lock();
  1110 		TUint i = aPages.Index()>>(KChunkShift-KPageShift);
  1111 		TPte* pPte = iTables[i];
  1112 		if(pPte)
  1113 			{
  1114 			// unmap some pages...
  1115 			pPte += pteIndex;
  1116 			TBool keepPt = Mmu::UnmapPages(pPte,n,pages);
  1117 			MmuLock::Unlock();
  1118 
  1119 			// free page table if no longer needed...
  1120 			if(!keepPt)
  1121 				FreePageTable(i);
  1122 			}
  1123 		else
  1124 			{
  1125 			// no page table found...
  1126 			MmuLock::Unlock();
  1127 			}
  1128 
  1129 		// move on...
  1130 		aPages.Skip(n);
  1131 		}
  1132 
  1133 	FlushTLB(startIndex,aPages.IndexEnd());
  1134 	}
  1135 
  1136 
  1137 void DCoarseMemory::DPageTables::RestrictPagesNA(RPageArray::TIter aPages)
  1138 	{
  1139 	__NK_ASSERT_DEBUG(aPages.Count());
  1140 
  1141 	TUint startIndex = aPages.Index();
  1142 
  1143 	for(;;)
  1144 		{
  1145 		TUint pteIndex = aPages.Index()&(KChunkMask>>KPageShift);
  1146 
  1147 		// calculate max number of pages to do...
  1148 		TUint n = (KChunkSize>>KPageShift)-pteIndex; // pages left in page table
  1149 		if(n>KMaxPagesInOneGo)
  1150 			n = KMaxPagesInOneGo;
  1151 
  1152 		// get some pages...
  1153 		TPhysAddr* pages;
  1154 		n = aPages.Pages(pages,n);
  1155 		if(!n)
  1156 			break;
  1157 
  1158 		// get address of PTE for pages...
  1159 		MmuLock::Lock();
  1160 		TUint i = aPages.Index()>>(KChunkShift-KPageShift);
  1161 		TPte* pPte = iTables[i];
  1162 		if(pPte)
  1163 			{
  1164 			// restrict some pages...
  1165 			pPte += pteIndex;
  1166 			Mmu::RestrictPagesNA(pPte,n,pages);
  1167 			}
  1168 		MmuLock::Unlock();
  1169 
  1170 		// move on...
  1171 		aPages.Skip(n);
  1172 		}
  1173 
  1174 	FlushTLB(startIndex,aPages.IndexEnd());
  1175 	}
  1176 
  1177 
  1178 TInt DCoarseMemory::DPageTables::PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, 
  1179 										DMemoryMappingBase* aMapping, TUint aMapInstanceCount)
  1180 	{
  1181 	__NK_ASSERT_DEBUG(aPages.Count());
  1182 
  1183 	TBool pinPageTable = aPinArgs.iPinnedPageTables!=0; // check if we need to pin the first page table
  1184 	for(;;)
  1185 		{
  1186 		TUint pteIndex = aPages.Index()&(KChunkMask>>KPageShift);
  1187 		if(pteIndex==0)
  1188 			pinPageTable = aPinArgs.iPinnedPageTables!=0;	// started a new page table, check if we need to pin it
  1189 
  1190 		// calculate max number of pages to do...
  1191 		TUint n = (KChunkSize>>KPageShift)-pteIndex; // pages left in page table
  1192 		if(n>KMaxPagesInOneGo)
  1193 			n = KMaxPagesInOneGo;
  1194 
  1195 		// get some pages...
  1196 		TPhysAddr* pages;
  1197 		n = aPages.Pages(pages,n);
  1198 		if(!n)
  1199 			break;
  1200 
  1201 		// make sure we have memory to pin the page table if required...
  1202 		if(pinPageTable)
  1203 			aPinArgs.AllocReplacementPages(KNumPagesToPinOnePageTable);
  1204 
  1205 		// get address of page table...
  1206 		MmuLock::Lock();
  1207 		TUint i = aPages.Index()>>(KChunkShift-KPageShift);
  1208 		TPte* pPte;
  1209 		if(pinPageTable)
  1210 			pPte = GetOrAllocatePageTable(i,aPinArgs);
  1211 		else
  1212 			pPte = GetOrAllocatePageTable(i);
  1213 
  1214 		// check for OOM...
  1215 		if(!pPte)
  1216 			{
  1217 			MmuLock::Unlock();
  1218 			return KErrNoMemory;
  1219 			}
  1220 
  1221 		if (aMapInstanceCount != aMapping->MapInstanceCount())
  1222 			{// The mapping that took the page fault has been reused.
  1223 			MmuLock::Unlock();
  1224 			FreePageTable(i);	// This will only free if this is the only pt referencer.
  1225 			return KErrNotFound;
  1226 			}
  1227 
  1228 		// map some pages...
  1229 		pPte += pteIndex;
  1230 		TPte blankPte = iBlankPte;
  1231 		if(aPinArgs.iReadOnly)
  1232 			blankPte = Mmu::MakePteInaccessible(blankPte,true);
  1233 		TBool keepPt = Mmu::PageInPages(pPte, n, pages, blankPte);
  1234 		MmuLock::Unlock();
  1235 
  1236 		// free page table if no longer needed...
  1237 		if(!keepPt)
  1238 			FreePageTable(i);
  1239 
  1240 		// move on...
  1241 		aPages.Skip(n);
  1242 		pinPageTable = false;
  1243 		}
  1244 
  1245 	return KErrNone;
  1246 	}
  1247 
  1248 
  1249 TBool DCoarseMemory::DPageTables::MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex)
  1250 	{
  1251 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1252 
  1253 	TUint pteIndex = aIndex & (KChunkMask >> KPageShift);
  1254 
  1255 	// get address of page table...
  1256 	TUint i = aIndex >> (KChunkShift - KPageShift);
  1257 	TPte* pPte = GetPageTable(i);
  1258 
  1259 	// Check the page is still mapped..
  1260 	if (!pPte)
  1261 		return EFalse;
  1262 
  1263 	// map the page...
  1264 	pPte += pteIndex;
  1265 	Mmu::RemapPage(pPte, aPageArrayPtr, iBlankPte);
  1266 	return ETrue;
  1267 	}
  1268 
  1269 
  1270 void DCoarseMemory::DPageTables::FlushTLB(TUint aStartIndex, TUint aEndIndex)
  1271 	{
  1272 #ifndef COARSE_GRAINED_TLB_MAINTENANCE
  1273 	iMappings.Lock();
  1274 	TMappingListIter iter;
  1275 	DMemoryMapping* mapping = (DMemoryMapping*)iter.Start(iMappings);
  1276 	while(mapping)
  1277 		{
  1278 		// get region which overlaps the mapping...
  1279 		TUint start = mapping->iStartIndex;
  1280 		TUint end = start+mapping->iSizeInPages;
  1281 		if(start<aStartIndex)
  1282 			start = aStartIndex;
  1283 		if(end>aEndIndex)
  1284 			end = aEndIndex;
  1285 		if(start>=end)
  1286 			{
  1287 			// the mapping doesn't contain the pages...
  1288 			iMappings.Unlock();
  1289 			}
  1290 		else
  1291 			{
  1292 			// flush TLB for pages in the mapping...
  1293 			TUint size = end-start;
  1294 			start -= mapping->iStartIndex;
  1295 			TLinAddr addr = mapping->LinAddrAndOsAsid()+start*KPageSize;
  1296 			TLinAddr endAddr = addr+size*KPageSize;
  1297 			iMappings.Unlock();
  1298 			do
  1299 				{
  1300 				InvalidateTLBForPage(addr);
  1301 				}
  1302 			while((addr+=KPageSize)<endAddr);
  1303 			}
  1304 		iMappings.Lock();
  1305 		mapping = (DMemoryMapping*)iter.Next();
  1306 		}
  1307 	iter.Finish();
  1308 	iMappings.Unlock();
  1309 #endif
  1310 	}
  1311 
  1312 
  1313 
  1314 //
  1315 // DCoarseMemory
  1316 //
  1317 
  1318 DCoarseMemory::DCoarseMemory(DMemoryManager* aManager, TUint aSizeInPages, TMemoryAttributes aAttributes, TMemoryCreateFlags aCreateFlags)
  1319 	: DMemoryObject(aManager,ECoarseObject,aSizeInPages,aAttributes,aCreateFlags)
  1320 	{
  1321 	}
  1322 
  1323 
  1324 DCoarseMemory* DCoarseMemory::New(DMemoryManager* aManager, TUint aSizeInPages, TMemoryAttributes aAttributes, TMemoryCreateFlags aCreateFlags)
  1325 	{
  1326 	DCoarseMemory* self = new DCoarseMemory(aManager, aSizeInPages, aAttributes, aCreateFlags);
  1327 	if(self)
  1328 		{
  1329 		if(self->Construct()==KErrNone)
  1330 			return self;
  1331 		self->Close();
  1332 		}
  1333 	return 0;
  1334 	}
  1335 
  1336 
  1337 DCoarseMemory::~DCoarseMemory()
  1338 	{
  1339 	TRACE2(("DCoarseMemory[0x%08x]::~DCoarseMemory()",this));
  1340 #ifdef _DEBUG
  1341 	for(TUint i=0; i<ENumPteTypes; i++)
  1342 		{
  1343 		__NK_ASSERT_DEBUG(!iPageTables[i]);
  1344 		}
  1345 #endif
  1346 	}
  1347 
  1348 
  1349 DMemoryMapping* DCoarseMemory::CreateMapping(TUint aIndex, TUint aCount)
  1350 	{
  1351 	if (((aIndex|aCount)&(KChunkMask>>KPageShift))==0)
  1352 		return new DCoarseMapping();
  1353 	else
  1354 		return new DFineMapping();
  1355 	}
  1356 
  1357 
  1358 TInt DCoarseMemory::MapPages(RPageArray::TIter aPages)
  1359 	{
  1360 	TRACE2(("DCoarseMemory[0x%08x]::MapPages(?) index=0x%x count=0x%x",this,aPages.Index(),aPages.Count()));
  1361 
  1362 	// map pages in all page tables for coarse mapping...
  1363 	MmuLock::Lock();
  1364 	TUint pteType = 0;
  1365 	do
  1366 		{
  1367 		DPageTables* tables = iPageTables[pteType];
  1368 		if(tables)
  1369 			{
  1370 			tables->Open();
  1371 			MmuLock::Unlock();
  1372 			TInt r = tables->MapPages(aPages);
  1373 			tables->AsyncClose();
  1374 			if(r!=KErrNone)
  1375 				return r;
  1376 			MmuLock::Lock();
  1377 			}
  1378 		}
  1379 	while(++pteType<ENumPteTypes);
  1380 	MmuLock::Unlock();
  1381 
  1382 	// map page in all fine mappings...
  1383 	return DMemoryObject::MapPages(aPages);
  1384 	}
  1385 
  1386 
  1387 void DCoarseMemory::RemapPage(TPhysAddr& aPageArray, TUint aIndex, TBool aInvalidateTLB)
  1388 	{
  1389 	TRACE2(("DCoarseMemory[0x%08x]::RemapPage() index=0x%x",this, aIndex));
  1390 
  1391 	// remap pages in all page tables for coarse mapping...
  1392 	MmuLock::Lock();
  1393 	TUint pteType = 0;
  1394 	do
  1395 		{
  1396 		DPageTables* tables = iPageTables[pteType];
  1397 		if(tables)
  1398 			{
  1399 			tables->Open();
  1400 			MmuLock::Unlock();
  1401 			tables->RemapPage(aPageArray, aIndex, aInvalidateTLB);
  1402 			tables->AsyncClose();
  1403 			MmuLock::Lock();
  1404 			}
  1405 		}
  1406 	while(++pteType<ENumPteTypes);
  1407 	MmuLock::Unlock();
  1408 
  1409 	// remap page in all fine mappings...
  1410 	DMemoryObject::RemapPage(aPageArray, aIndex, aInvalidateTLB);
  1411 	}
  1412 
  1413 
  1414 void DCoarseMemory::UnmapPages(RPageArray::TIter aPages, TBool aDecommitting)
  1415 	{
  1416 	TRACE2(("DCoarseMemory[0x%08x]::UnmapPages(?,%d) index=0x%x count=0x%x",this,(bool)aDecommitting,aPages.Index(),aPages.Count()));
  1417 
  1418 	if(!aPages.Count())
  1419 		return;
  1420 
  1421 	// unmap pages from all page tables for coarse mapping...
  1422 	MmuLock::Lock();
  1423 	TUint pteType = 0;
  1424 	do
  1425 		{
  1426 		DPageTables* tables = iPageTables[pteType];
  1427 		if(tables)
  1428 			{
  1429 			tables->Open();
  1430 			MmuLock::Unlock();
  1431 			tables->UnmapPages(aPages,aDecommitting);
  1432 			tables->AsyncClose();
  1433 			MmuLock::Lock();
  1434 			}
  1435 		}
  1436 	while(++pteType<ENumPteTypes);
  1437 	MmuLock::Unlock();
  1438 
  1439 	// unmap pages from all fine mappings...
  1440 	DMemoryObject::UnmapPages(aPages,aDecommitting);
  1441 	}
  1442 
  1443 
  1444 void DCoarseMemory::RestrictPages(RPageArray::TIter aPages, TRestrictPagesType aRestriction)
  1445 	{
  1446 	TRACE2(("DCoarseMemory[0x%08x]::RestrictPages(?,%d) index=0x%x count=0x%x",this,aRestriction,aPages.Index(),aPages.Count()));
  1447 	__ASSERT_COMPILE(ERestrictPagesForMovingFlag != ERestrictPagesNoAccessForMoving);
  1448 
  1449 	if(!aPages.Count())
  1450 		return;
  1451 
  1452 	if (aRestriction != ERestrictPagesForMovingFlag)
  1453 		{// restrict pages in all the page tables for the coarse mapping...
  1454 		MmuLock::Lock();
  1455 		TUint pteType = 0;
  1456 		do
  1457 			{
  1458 			DPageTables* tables = iPageTables[pteType];
  1459 			if(tables)
  1460 				{
  1461 				tables->Open();
  1462 				MmuLock::Unlock();
  1463 				tables->RestrictPagesNA(aPages);
  1464 				tables->AsyncClose();
  1465 				MmuLock::Lock();
  1466 				}
  1467 			}
  1468 		while(++pteType<ENumPteTypes);
  1469 		MmuLock::Unlock();
  1470 		}
  1471 
  1472 	// restrict pages in all fine mappings, will also check for pinned mappings...
  1473 	DMemoryObject::RestrictPages(aPages,aRestriction);
  1474 	}
  1475 
  1476 
  1477 TPte* DCoarseMemory::GetPageTable(TUint aPteType, TUint aChunkIndex)
  1478 	{
  1479 	__NK_ASSERT_DEBUG(aChunkIndex < (iSizeInPages >> KPagesInPDEShift));
  1480 	return iPageTables[aPteType]->GetPageTable(aChunkIndex);
  1481 	}
  1482 
  1483 
  1484 TInt DCoarseMemory::PageIn(DCoarseMapping* aMapping, RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount)
  1485 	{
  1486 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1487 
  1488 	DPageTables* tables = iPageTables[aMapping->PteType()];
  1489 	tables->Open();
  1490 
  1491 	MmuLock::Unlock();
  1492 
  1493 #ifndef COARSE_GRAINED_TLB_MAINTENANCE
  1494 	TLinAddr startAddr = aMapping->Base()+(aPages.Index()-aMapping->iStartIndex)*KPageSize;
  1495 	TLinAddr endAddr = startAddr+aPages.Count()*KPageSize;
  1496 #endif
  1497 
  1498 	TInt r = tables->PageIn(aPages, aPinArgs, aMapping, aMapInstanceCount);
  1499 
  1500 	// clean TLB...
  1501 #ifdef COARSE_GRAINED_TLB_MAINTENANCE
  1502 	InvalidateTLBForAsid(aMapping->OsAsid());
  1503 #else
  1504 	TLinAddr addr = startAddr+aMapping->OsAsid();
  1505 	do InvalidateTLBForPage(addr);
  1506 	while((addr+=KPageSize)<endAddr);
  1507 #endif
  1508 
  1509 	tables->AsyncClose();
  1510 
  1511 	return r;
  1512 	}
  1513 
  1514 
  1515 TBool DCoarseMemory::MovingPageIn(DCoarseMapping* aMapping, TPhysAddr& aPageArrayPtr, TUint aIndex)
  1516 	{
  1517 	DCoarseMemory::DPageTables* tables = iPageTables[aMapping->PteType()];
  1518 	return tables->MovingPageIn(aPageArrayPtr, aIndex);
  1519 	}
  1520 
  1521 
  1522 TPte* DCoarseMemory::FindPageTable(DCoarseMapping* aMapping, TLinAddr aLinAddr, TUint aMemoryIndex)
  1523 	{
  1524 	DCoarseMemory::DPageTables* tables = iPageTables[aMapping->PteType()];
  1525 
  1526 	// get address of page table...
  1527 	TUint i = aMemoryIndex >> (KChunkShift - KPageShift);	
  1528 	return tables->GetPageTable(i);
  1529 	}
  1530 
  1531 
  1532 TInt DCoarseMemory::ClaimInitialPages(TLinAddr aBase, TUint aSize, TMappingPermissions aPermissions, TBool aAllowGaps, TBool aAllowNonRamPages)
  1533 	{
  1534 	TRACE(("DCoarseMemory[0x%08x]::ClaimInitialPages(0x%08x,0x%08x,0x%08x,%d,%d)",this,aBase,aSize,aPermissions,aAllowGaps,aAllowNonRamPages));
  1535 
  1536 	// validate arguments...
  1537 	if(aBase&KChunkMask || aBase<KGlobalMemoryBase)
  1538 		return KErrArgument;
  1539 	if(aSize&KPageMask || aSize>iSizeInPages*KPageSize)
  1540 		return KErrArgument;
  1541 
  1542 	// get DPageTables object...
  1543 	TUint pteType = Mmu::PteType(aPermissions,true);
  1544 	MemoryObjectLock::Lock(this);
  1545 	DPageTables* tables = GetOrAllocatePageTables(pteType);
  1546 	MemoryObjectLock::Unlock(this);
  1547 	__NK_ASSERT_DEBUG(tables);
  1548 
  1549 	// check and allocate page array entries...
  1550 	RPageArray::TIter pageIter;
  1551 	TInt r = iPages.AddStart(0,aSize>>KPageShift,pageIter);
  1552 	__NK_ASSERT_ALWAYS(r==KErrNone);
  1553 
  1554 	// hold MmuLock for long time, shouldn't matter as this is only done during boot
  1555 	::PageTables.Lock();
  1556 	MmuLock::Lock();
  1557 
  1558 	TPte blankPte = tables->iBlankPte;
  1559 	TPte** pPt = tables->iTables;
  1560 	TPde* pPde = Mmu::PageDirectoryEntry(KKernelOsAsid,aBase);
  1561 	TUint offset = 0;
  1562 	TUint size = aSize;
  1563 	while(size)
  1564 		{
  1565 		TPde pde = *pPde;
  1566 		TRACE(("DCoarseMemory::ClaimInitialPages: %08x: 0x%08x",aBase+offset,pde));
  1567 		
  1568 		TPte* pPte = NULL;
  1569 		SPageTableInfo* pti = NULL;
  1570 
  1571 		if (Mmu::PdeMapsSection(pde))
  1572 			{
  1573 			TPhysAddr sectionBase = Mmu::SectionBaseFromPde(pde);
  1574 			TRACE(("  chunk is section mapped, base at %08x", sectionBase));
  1575 			__NK_ASSERT_DEBUG(sectionBase != KPhysAddrInvalid);
  1576 
  1577 			TPde pde = sectionBase | Mmu::BlankSectionPde(Attributes(),pteType);
  1578 			__NK_ASSERT_DEBUG(((*pPde^pde)&~KPdeMatchMask)==0);			
  1579 			*pPde = pde;
  1580 			SinglePdeUpdated(pPde);
  1581 			InvalidateTLB();
  1582 
  1583 			// We allocate and populate a page table for the section even though it won't be mapped
  1584 			// initially because the presense of the page table is used to check whether RAM is
  1585 			// mapped in a chunk, and because it makes it possible to break the section mapping
  1586 			// without allocating memory.  This may change in the future.
  1587 
  1588 			// Note these page table are always unpaged here regardless of paged bit in iFlags
  1589 			// (e.g. ROM object is marked as paged despite initial pages being unpaged)
  1590 			pPte = tables->AllocatePageTable(offset >> KChunkShift, EFalse, EFalse);
  1591 			if (!pPte)
  1592 				{
  1593 				MmuLock::Unlock();
  1594 				return KErrNoMemory;
  1595 				}
  1596 			pti = SPageTableInfo::FromPtPtr(pPte);
  1597 			}
  1598 		else if (Mmu::PdeMapsPageTable(pde))
  1599 			{
  1600 			pPte = Mmu::PageTableFromPde(*pPde);
  1601 			TRACE(("  page table found at %08x", pPte));
  1602 			__NK_ASSERT_DEBUG(pPte);
  1603 			pti = SPageTableInfo::FromPtPtr(pPte);
  1604 			pti->SetCoarse(this,offset>>KChunkShift,pteType);
  1605 			}
  1606 		
  1607 		*pPt++ = pPte;
  1608 		++pPde;
  1609 		
  1610 		TUint numPages = 0;
  1611 		do
  1612 			{
  1613 			TPhysAddr pagePhys = Mmu::LinearToPhysical(aBase+offset);
  1614 			TPte pte;
  1615 			if(pagePhys==KPhysAddrInvalid)
  1616 				{
  1617 				if(size)
  1618 					{
  1619 					__NK_ASSERT_ALWAYS(aAllowGaps); // we have a gap, check this is allowed
  1620 					pageIter.Skip(1);
  1621 					}
  1622 
  1623 				pte = KPteUnallocatedEntry;
  1624 				}
  1625 			else
  1626 				{
  1627 				__NK_ASSERT_ALWAYS(size); // pages can't be mapped above aSize
  1628 
  1629 				pageIter.Add(1,&pagePhys);
  1630 
  1631 				SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pagePhys);
  1632 				__NK_ASSERT_ALWAYS(pi || aAllowNonRamPages);
  1633 				if(pi)
  1634 					{
  1635 					__NK_ASSERT_ALWAYS(pi->Type()==SPageInfo::EFixed);
  1636 					pi->SetManaged(this,offset>>KPageShift,PageInfoFlags());
  1637 					}
  1638 
  1639 				++numPages;
  1640 				pte = pagePhys|blankPte;
  1641 				}
  1642 
  1643 			if(pPte)
  1644 				{
  1645 				TRACE2(("!PTE %x=%x (was %x)",pPte,pte,*pPte));
  1646 				__NK_ASSERT_DEBUG(((*pPte^pte)&~KPteMatchMask)==0 || *pPte==KPteUnallocatedEntry);
  1647 				*pPte = pte;
  1648 				CacheMaintenance::SinglePteUpdated((TLinAddr)pPte);
  1649 				++pPte;
  1650 				}
  1651 
  1652 			offset += KPageSize;
  1653 			if(size)
  1654 				size -= KPageSize;
  1655 			}
  1656 		while(offset&(KChunkMask&~KPageMask));
  1657 
  1658 		if(pti)
  1659 			{
  1660 			pti->IncPageCount(numPages);
  1661 			TRACE2(("pt %x page count=%d",TLinAddr(pPte)-KPageTableSize,numPages));
  1662 			__NK_ASSERT_DEBUG(pti->CheckPageCount());
  1663 			}
  1664 		}
  1665 
  1666 	InvalidateTLBForAsid(KKernelOsAsid);
  1667 
  1668 	MmuLock::Unlock();
  1669 	::PageTables.Unlock();
  1670 
  1671 	// release page array entries...
  1672 	iPages.AddEnd(0,aSize>>KPageShift);
  1673 
  1674 	return KErrNone;
  1675 	}
  1676 
  1677 
  1678 DCoarseMemory::DPageTables* DCoarseMemory::GetOrAllocatePageTables(TUint aPteType)
  1679 	{
  1680 	__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(this));
  1681 	
  1682 	MmuLock::Lock();
  1683 	DPageTables* tables = iPageTables[aPteType];
  1684 	if(tables)
  1685 		tables->Open();
  1686 	MmuLock::Unlock();
  1687 
  1688 	if(!tables)
  1689 		{
  1690 		// allocate a new one if required...
  1691 		tables = DPageTables::New(this, iSizeInPages, aPteType);
  1692 		if (tables)
  1693 			{
  1694 			__NK_ASSERT_DEBUG(!iPageTables[aPteType]);
  1695 			iPageTables[aPteType] = tables;
  1696 			}
  1697 		}		
  1698 
  1699 	return tables;
  1700 	}
  1701 
  1702 
  1703 TInt DCoarseMemory::AddMapping(DMemoryMappingBase* aMapping)
  1704 	{
  1705 	if(!aMapping->IsCoarse())
  1706 		{
  1707 		// not coarse mapping...
  1708 		return DMemoryObject::AddMapping(aMapping);
  1709 		}
  1710 
  1711 	__NK_ASSERT_DEBUG(aMapping->IsPinned()==false); // coarse mappings can't pin
  1712 
  1713 	// Check mapping allowed.  Must hold memory object lock to prevent changes 
  1714 	// to object's restrictions.
  1715 	MemoryObjectLock::Lock(this);
  1716 	TInt r = CheckNewMapping(aMapping);
  1717 	if(r!=KErrNone)
  1718 		{
  1719 		MemoryObjectLock::Unlock(this);
  1720 		return r;
  1721 		}
  1722 
  1723 	// get DPageTable for mapping...
  1724 	DPageTables* tables = GetOrAllocatePageTables(aMapping->PteType());
  1725 	
  1726 	// Safe to release here as no restrictions to this type of mapping can be added as 
  1727 	// we now have an iPageTables entry for this type of mapping.
  1728 	MemoryObjectLock::Unlock(this);	
  1729 	if(!tables)
  1730 		return KErrNoMemory;
  1731 
  1732 	// add mapping to DPageTable...
  1733 	r = tables->AddMapping((DCoarseMapping*)aMapping);
  1734 	if(r==KErrNone)
  1735 		{
  1736 		// allocate permanent page tables if required...
  1737 		if(aMapping->Flags()&DMemoryMapping::EPermanentPageTables)
  1738 			{
  1739 			MemoryObjectLock::Lock(this);
  1740 			r = tables->AllocatePermanentPageTables();
  1741 			MemoryObjectLock::Unlock(this);
  1742 
  1743 			if(r==KErrNone)
  1744 				__e32_atomic_ior_ord8(&aMapping->Flags(), (TUint8)DMemoryMapping::EPageTablesAllocated);
  1745 			else
  1746 				tables->RemoveMapping((DCoarseMapping*)aMapping);
  1747 			}
  1748 		}
  1749 
  1750 	tables->Close();
  1751 
  1752 	return r;
  1753 	}
  1754 
  1755 
  1756 void DCoarseMemory::RemoveMapping(DMemoryMappingBase* aMapping)
  1757 	{
  1758 	if(!aMapping->IsCoarse())
  1759 		{
  1760 		// not coarse mapping...
  1761 		DMemoryObject::RemoveMapping(aMapping);
  1762 		return;
  1763 		}
  1764 
  1765 	// need a temporary reference on self because we may be removing the last mapping
  1766 	// which will delete this...
  1767 	Open();
  1768 
  1769 	// get DPageTable the mapping is attached to...
  1770 	DPageTables* tables = iPageTables[aMapping->PteType()];
  1771 	__NK_ASSERT_DEBUG(tables); // must exist because aMapping has a reference on it
  1772 
  1773 	// free permanent page tables if required...
  1774 	if(aMapping->Flags()&DMemoryMapping::EPageTablesAllocated)
  1775 		{
  1776 		MemoryObjectLock::Lock(this);
  1777 		tables->FreePermanentPageTables();
  1778 		MemoryObjectLock::Unlock(this);
  1779 		}
  1780 
  1781 	// remove mapping from page tables object...
  1782 	tables->RemoveMapping((DCoarseMapping*)aMapping);
  1783 
  1784 	Close(); // may delete this memory object
  1785 	}
  1786 
  1787 
  1788 TInt DCoarseMemory::SetReadOnly()
  1789 	{
  1790 	__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(this));
  1791 	
  1792 	// Search for writable iPageTable entries.
  1793 	// We hold the MemoryObjectLock so iPageTable entries can't be added or removed.
  1794 	MmuLock::Lock();
  1795 	TUint pteType = 0;
  1796 	do
  1797 		{
  1798 		if((pteType & EPteTypeWritable) && iPageTables[pteType])
  1799 			{
  1800 			MmuLock::Unlock();
  1801 			return KErrInUse;
  1802 			}
  1803 		}
  1804 	while(++pteType < ENumPteTypes);
  1805 	MmuLock::Unlock();
  1806 
  1807 	// unmap pages from all fine mappings...
  1808 	return DMemoryObject::SetReadOnly();
  1809 	}
  1810 
  1811 
  1812 //
  1813 // DFineMemory
  1814 //
  1815 
  1816 DFineMemory::DFineMemory(DMemoryManager* aManager, TUint aSizeInPages, TMemoryAttributes aAttributes, TMemoryCreateFlags aCreateFlags)
  1817 	: DMemoryObject(aManager,0,aSizeInPages,aAttributes,aCreateFlags)
  1818 	{
  1819 	}
  1820 
  1821 
  1822 DFineMemory* DFineMemory::New(DMemoryManager* aManager, TUint aSizeInPages, TMemoryAttributes aAttributes, TMemoryCreateFlags aCreateFlags)
  1823 	{
  1824 	DFineMemory* self = new DFineMemory(aManager,aSizeInPages,aAttributes,aCreateFlags);
  1825 	if(self)
  1826 		{
  1827 		if(self->Construct()==KErrNone)
  1828 			return self;
  1829 		self->Close();
  1830 		}
  1831 	return 0;
  1832 	}
  1833 
  1834 
  1835 DFineMemory::~DFineMemory()
  1836 	{
  1837 	TRACE2(("DFineMemory[0x%08x]::~DFineMemory",this));
  1838 	}
  1839 
  1840 
  1841 TInt DFineMemory::ClaimInitialPages(TLinAddr aBase, TUint aSize, TMappingPermissions aPermissions, TBool aAllowGaps, TBool aAllowNonRamPages)
  1842 	{
  1843 	TRACE(("DFineMemory[0x%08x]::ClaimInitialPages(0x%08x,0x%08x,0x%08x,%d,%d)",this,aBase,aSize,aPermissions,aAllowGaps,aAllowNonRamPages));
  1844 	(void)aPermissions;
  1845 
  1846 	// validate arguments...
  1847 	if(aBase&KPageMask || aBase<KGlobalMemoryBase)
  1848 		return KErrArgument;
  1849 	if(aSize&KPageMask || aSize>iSizeInPages*KPageSize)
  1850 		return KErrArgument;
  1851 
  1852 #ifdef _DEBUG
  1853 	// calculate 'blankPte', the correct PTE value for pages in this memory object...
  1854 	TUint pteType = Mmu::PteType(aPermissions,true);
  1855 	TPte blankPte = Mmu::BlankPte(Attributes(),pteType);
  1856 #endif
  1857 
  1858 	// get page table...
  1859 	TPde* pPde = Mmu::PageDirectoryEntry(KKernelOsAsid,aBase);
  1860 	TPte* pPte = Mmu::PageTableFromPde(*pPde);
  1861 	if(!pPte)
  1862 		return KErrNone; // no pages mapped
  1863 
  1864 	// check and allocate page array entries...
  1865 	RPageArray::TIter pageIter;
  1866 	TInt r = iPages.AddStart(0,aSize>>KPageShift,pageIter);
  1867 	__NK_ASSERT_ALWAYS(r==KErrNone);
  1868 
  1869 	// hold MmuLock for long time, shouldn't matter as this is only done during boot
  1870 	MmuLock::Lock();
  1871 
  1872 	// setup page table for fine mappings...
  1873 	SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pPte);
  1874 	__NK_ASSERT_DEBUG(pti->CheckPageCount());
  1875 	TBool pageTableOk = pti->ClaimFine(aBase&~KChunkMask,KKernelOsAsid);
  1876 	__NK_ASSERT_ALWAYS(pageTableOk);
  1877 	TRACE(("DFineMemory::ClaimInitialPages page table = 0x%08x",pPte));
  1878 
  1879 	TUint pteIndex = (aBase>>KPageShift)&(KChunkMask>>KPageShift);
  1880 	TUint pageIndex = 0;
  1881 	TUint size = aSize;
  1882 	while(pageIndex<iSizeInPages)
  1883 		{
  1884 		TPhysAddr pagePhys = Mmu::PtePhysAddr(pPte[pteIndex],pteIndex);
  1885 		if(pagePhys==KPhysAddrInvalid)
  1886 			{
  1887 			if(size)
  1888 				{
  1889 				__NK_ASSERT_ALWAYS(aAllowGaps); // we have a gap, check this is allowed
  1890 				pageIter.Skip(1);
  1891 				}
  1892 
  1893 			// check PTE is correct...
  1894 			__NK_ASSERT_DEBUG(pPte[pteIndex]==KPteUnallocatedEntry);
  1895 			}
  1896 		else
  1897 			{
  1898 			__NK_ASSERT_ALWAYS(size); // pages can't be mapped above aSize
  1899 
  1900 			pageIter.Add(1,&pagePhys);
  1901 
  1902 			SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pagePhys);
  1903 
  1904 			if(!pi)
  1905 				__NK_ASSERT_ALWAYS(aAllowNonRamPages);
  1906 			else
  1907 				{
  1908 				__NK_ASSERT_ALWAYS(pi->Type()==SPageInfo::EFixed);
  1909 				pi->SetManaged(this,pageIndex,PageInfoFlags());
  1910 				}
  1911 
  1912 #ifdef _DEBUG
  1913 			// check PTE is correct...
  1914 			TPte pte = pagePhys|blankPte;
  1915 			__NK_ASSERT_DEBUG(((pPte[pteIndex]^pte)&~KPteMatchMask)==0);
  1916 #endif
  1917 			}
  1918 
  1919 		// move on to next page...
  1920 		++pteIndex;
  1921 		__NK_ASSERT_ALWAYS(pteIndex<(KChunkSize>>KPageShift));
  1922 		++pageIndex;
  1923 		if(size)
  1924 			size -= KPageSize;
  1925 		}
  1926 
  1927 	MmuLock::Unlock();
  1928 
  1929 	// release page array entries...
  1930 	iPages.AddEnd(0,aSize>>KPageShift);
  1931 
  1932 	return KErrNone;
  1933 	}
  1934 
  1935 
  1936