os/kernelhwsrv/kernel/eka/memmodel/epoc/flexible/mmu/mmu.cpp
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
     1 // Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies).
     2 // All rights reserved.
     3 // This component and the accompanying materials are made available
     4 // under the terms of the License "Eclipse Public License v1.0"
     5 // which accompanies this distribution, and is available
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
     7 //
     8 // Initial Contributors:
     9 // Nokia Corporation - initial contribution.
    10 //
    11 // Contributors:
    12 //
    13 // Description:
    14 //
    15 
    16 #include "memmodel.h"
    17 #include "kernel/cache_maintenance.inl"
    18 #include <kernel/cache.h>
    19 #include <ramalloc.h>
    20 #include <defrag.h>
    21 #include "mm.h"
    22 #include "mmu.h"
    23 #include "mpager.h"
    24 #include "mmapping.h"
    25 #include "mobject.h"
    26 #include "mmanager.h"
    27 #include "mpagearray.h"
    28 
    29 
    30 //
    31 // SPageInfo
    32 //
    33 
    34 // check enough space for page infos...
    35 __ASSERT_COMPILE((KPageInfoLinearEnd-KPageInfoLinearBase)/sizeof(SPageInfo)==(1<<(32-KPageShift)));
    36 
    37 // check KPageInfoShift...
    38 __ASSERT_COMPILE(sizeof(SPageInfo)==(1<<KPageInfoShift));
    39 
    40 
    41 SPageInfo* SPageInfo::SafeFromPhysAddr(TPhysAddr aAddress)
    42 	{
    43 	__NK_ASSERT_DEBUG((aAddress&KPageMask)==0);
    44 	TUint index = aAddress>>(KPageShift+KPageShift-KPageInfoShift);
    45 	TUint flags = ((TUint8*)KPageInfoMap)[index>>3];
    46 	TUint mask = 1<<(index&7);
    47 	if(!(flags&mask))
    48 		return 0; // no SPageInfo for aAddress
    49 	SPageInfo* info = FromPhysAddr(aAddress);
    50 	if(info->iType==SPageInfo::EInvalid)
    51 		return 0;
    52 	return info;
    53 	}
    54 
    55 
    56 #ifdef _DEBUG
    57 
    58 void SPageInfo::CheckAccess(const char* aMessage, TUint aFlags)
    59 	{
    60 	if(K::Initialising || NKern::Crashed())
    61 		return;
    62 
    63 	if((aFlags&ECheckNotAllocated) && (iType!=EUnknown))
    64 		{
    65 		Kern::Printf("SPageInfo[0x%08x]::CheckAccess failed, PhysAddr()=0x%08x, iType==%d : %s",this,PhysAddr(),iType,aMessage);
    66 		__NK_ASSERT_DEBUG(0);
    67 		goto fail;
    68 		}
    69 
    70 	if((aFlags&ECheckNotUnused) && (iType==EUnused))
    71 		{
    72 		Kern::Printf("SPageInfo[0x%08x]::CheckAccess failed, PhysAddr()=0x%08x, iType==%d : %s",this,PhysAddr(),iType,aMessage);
    73 		__NK_ASSERT_DEBUG(0);
    74 		goto fail;
    75 		}
    76 
    77 	if((aFlags&ECheckUnused) && (iType!=EUnused))
    78 		{
    79 		Kern::Printf("SPageInfo[0x%08x]::CheckAccess failed, PhysAddr()=0x%08x, iType==%d : %s",this,PhysAddr(),iType,aMessage);
    80 		__NK_ASSERT_DEBUG(0);
    81 		goto fail;
    82 		}
    83 
    84 	if((aFlags&ECheckNotPaged) && (iPagedState!=EUnpaged))
    85 		{
    86 		Kern::Printf("SPageInfo[0x%08x]::CheckAccess failed, PhysAddr()=0x%08x, iPagedState=%d : %s",this,PhysAddr(),iPagedState,aMessage);
    87 		__NK_ASSERT_DEBUG(0);
    88 		goto fail;
    89 		}
    90 
    91 	if((aFlags&ECheckRamAllocLock) && !RamAllocLock::IsHeld())
    92 		{
    93 		Kern::Printf("SPageInfo[0x%08x]::CheckAccess failed, PhysAddr()=0x%08x, iType==%d : %s",this,PhysAddr(),iType,aMessage);
    94 		__NK_ASSERT_DEBUG(0);
    95 		goto fail;
    96 		}
    97 
    98 	if((aFlags&ENoCheckMmuLock) || MmuLock::IsHeld())
    99 		return;
   100 fail:
   101 	Kern::Printf("SPageInfo[0x%08x]::CheckAccess failed, PhysAddr()=0x%08x : %s",this,PhysAddr(),aMessage);
   102 	Mmu::Panic(Mmu::EUnsafePageInfoAccess);
   103 	}
   104 
   105 
   106 void SPageInfo::Dump()
   107 	{
   108 	Kern::Printf("SPageInfo for page %x = %d,%d,%02x,0x%08x,0x%x,%d",PhysAddr(),iType,iPagedState,iFlags,iOwner,iIndex,iPinCount);
   109 	}
   110 
   111 #endif
   112 
   113 
   114 
   115 //
   116 // SPageTableInfo
   117 //
   118 
   119 // check enough space for page table infos...
   120 __ASSERT_COMPILE((KPageTableInfoEnd-KPageTableInfoBase)/sizeof(SPageTableInfo)
   121 					>=(KPageTableEnd-KPageTableBase)/KPageTableSize);
   122 
   123 // check KPtBlockShift...
   124 __ASSERT_COMPILE((sizeof(SPageTableInfo)<<KPtBlockShift)==KPageSize);
   125 
   126 
   127 #ifdef _DEBUG
   128 
   129 TBool SPageTableInfo::CheckPageCount()
   130 	{
   131 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   132 	TPte* pt = PageTable();
   133 	TUint realCount = 0;
   134 	do if(*pt++) ++realCount;
   135 	while(TLinAddr(pt)&(KPageTableMask/sizeof(TPte)*sizeof(TPte)));
   136 	if(iPageCount==realCount)
   137 		return true;
   138 	Kern::Printf("CheckPageCount Failed: pt=0x%08x count=%d realCount=%d",TLinAddr(pt)-KPageTableSize,iPageCount,realCount);
   139 	return false;
   140 	}
   141 
   142 
   143 void SPageTableInfo::CheckChangeUse(const char* aName)
   144 	{
   145 	if(K::Initialising)
   146 		return;
   147 	if(PageTablesLockIsHeld() && MmuLock::IsHeld())
   148 		return;
   149 	Kern::Printf("SPageTableInfo::CheckChangeUse failed : %s",aName);
   150 	Mmu::Panic(Mmu::EUnsafePageTableInfoAccess);
   151 	}
   152 
   153 
   154 void SPageTableInfo::CheckCheckUse(const char* aName)
   155 	{
   156 	if(K::Initialising)
   157 		return;
   158 	if(PageTablesLockIsHeld() || MmuLock::IsHeld())
   159 		return;
   160 	Kern::Printf("SPageTableInfo::CheckCheckUse failed : %s",aName);
   161 	Mmu::Panic(Mmu::EUnsafePageTableInfoAccess);
   162 	}
   163 
   164 
   165 void SPageTableInfo::CheckAccess(const char* aName)
   166 	{
   167 	if(K::Initialising)
   168 		return;
   169 	if(MmuLock::IsHeld())
   170 		return;
   171 	Kern::Printf("SPageTableInfo::CheckAccess failed : %s",aName);
   172 	Mmu::Panic(Mmu::EUnsafePageTableInfoAccess);
   173 	}
   174 
   175 
   176 void SPageTableInfo::CheckInit(const char* aName)
   177 	{
   178 	if(K::Initialising)
   179 		return;
   180 	if(PageTablesLockIsHeld() && iType==EUnused)
   181 		return;
   182 	Kern::Printf("SPageTableInfo::CheckInit failed : %s",aName);
   183 	Mmu::Panic(Mmu::EUnsafePageTableInfoAccess);
   184 	}
   185 
   186 #endif
   187 
   188 
   189 
   190 //
   191 // RamAllocLock
   192 //
   193 
   194 _LIT(KLitRamAlloc,"RamAlloc");
   195 _LIT(KLitPhysMemSync,"PhysMemSync");
   196 
   197 void RamAllocLock::Lock()
   198 	{
   199 	Mmu& m = TheMmu;
   200 	Kern::MutexWait(*m.iRamAllocatorMutex);
   201 	if(!m.iRamAllocLockCount++)
   202 		{
   203 		// first lock, so setup memory fail data...
   204 		m.iRamAllocFailed = EFalse;
   205 		__NK_ASSERT_DEBUG(m.iRamAllocInitialFreePages==m.FreeRamInPages()); // free RAM shouldn't have changed whilst lock was held
   206 		}
   207 	}
   208 
   209 
   210 void RamAllocLock::Unlock()
   211 	{
   212 	Mmu& m = TheMmu;
   213 	if(--m.iRamAllocLockCount)
   214 		{
   215 		Kern::MutexSignal(*m.iRamAllocatorMutex);
   216 		return;
   217 		}
   218 	TBool failed = m.iRamAllocFailed;
   219 	TUint initial = m.iRamAllocInitialFreePages;
   220 	TUint final = m.FreeRamInPages();
   221 	m.iRamAllocInitialFreePages = final; // new baseline value
   222 	TUint changes = K::CheckFreeMemoryLevel(initial*KPageSize,final*KPageSize,failed);
   223 	if(changes)
   224 		{
   225 		__KTRACE_OPT(KMMU,Kern::Printf("RamAllocLock::Unlock() changes=%x",changes));
   226 		}
   227 	Kern::MutexSignal(*m.iRamAllocatorMutex);
   228 	}
   229 
   230 
   231 TBool RamAllocLock::Flash()
   232 	{
   233 	Unlock();
   234 	Lock();
   235 	return true; // lock was released
   236 	}
   237 
   238 
   239 TBool RamAllocLock::IsHeld()
   240 	{
   241 	Mmu& m = TheMmu;
   242 	return m.iRamAllocatorMutex->iCleanup.iThread == &Kern::CurrentThread() && m.iRamAllocLockCount;
   243 	}
   244 
   245 
   246 
   247 //
   248 // MmuLock
   249 //
   250 
   251 #ifdef _DEBUG
   252 TUint MmuLock::UnlockGuardNest =0;
   253 TUint MmuLock::UnlockGuardFail =0;
   254 #endif
   255 
   256 NFastMutex MmuLock::iLock;
   257 
   258 void MmuLock::Lock()
   259 	{
   260 	NKern::FMWait(&iLock);
   261 	}
   262 
   263 void MmuLock::Unlock()
   264 	{
   265 	UnlockGuardCheck();
   266 	NKern::FMSignal(&iLock);
   267 	}
   268 
   269 TBool MmuLock::Flash()
   270 	{
   271 	UnlockGuardCheck();
   272 	return NKern::FMFlash(&iLock);
   273 	}
   274 
   275 TBool MmuLock::IsHeld()
   276 	{
   277 	NFastMutex& m = iLock;
   278 	return m.HeldByCurrentThread();
   279 	}
   280 
   281 
   282 
   283 //
   284 // Initialisation
   285 //
   286 
   287 Mmu TheMmu;
   288 
   289 void Mmu::Init1Common()
   290 	{
   291 	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("Mmu::Init1Common"));
   292 
   293 	// Mmu data
   294 	TUint pteType = PteType(ESupervisorReadWrite,true);
   295 	iTempPteCached = BlankPte((TMemoryAttributes)(EMemoryAttributeNormalCached|EMemoryAttributeDefaultShareable),pteType);
   296 	iTempPteUncached = BlankPte((TMemoryAttributes)(EMemoryAttributeNormalUncached|EMemoryAttributeDefaultShareable),pteType);
   297 	iTempPteCacheMaintenance = BlankPte((TMemoryAttributes)(CacheMaintenance::TemporaryMapping()|EMemoryAttributeDefaultShareable),pteType);
   298 	
   299 	// other
   300 	PP::MaxUserThreadStack=0x14000;			// 80K - STDLIB asks for 64K for PosixServer!!!!
   301 	PP::UserThreadStackGuard=0x2000;		// 8K
   302 	PP::MaxStackSpacePerProcess=0x200000;	// 2Mb
   303 	K::SupervisorThreadStackSize=0x1000;	// 4K
   304 	PP::SupervisorThreadStackGuard=0x1000;	// 4K
   305 	K::MachineConfig=(TMachineConfig*)KMachineConfigLinAddr;
   306 	PP::RamDriveStartAddress=0;
   307 	PP::RamDriveRange=0;
   308 	PP::RamDriveMaxSize=0x20000000;	// 512MB, probably will be reduced later
   309 	K::MemModelAttributes=EMemModelTypeFlexible|EMemModelAttrNonExProt|EMemModelAttrKernProt|EMemModelAttrWriteProt|
   310 						EMemModelAttrVA|EMemModelAttrProcessProt|EMemModelAttrSameVA|EMemModelAttrSvKernProt|
   311 						EMemModelAttrIPCKernProt|EMemModelAttrRamCodeProt;
   312 	}
   313 
   314 
   315 #if 0
   316 void Mmu::VerifyRam()
   317 	{
   318 	Kern::Printf("Mmu::VerifyRam() pass 1");
   319 	RamAllocLock::Lock();
   320 
   321 	TPhysAddr p = 0;
   322 	do
   323 		{
   324 		SPageInfo* pi = SPageInfo::SafeFromPhysAddr(p);
   325 		if(pi)
   326 			{
   327 			Kern::Printf("%08x %d",p,pi->Type());
   328 			if(pi->Type()==SPageInfo::EUnused)
   329 				{
   330 				volatile TPhysAddr* b = (volatile TPhysAddr*)MapTemp(p,0);
   331 				b[0] = p;
   332 				b[1] = ~p;
   333 				__NK_ASSERT_DEBUG(b[0]==p);
   334 				__NK_ASSERT_DEBUG(b[1]==~p);
   335 				UnmapTemp();
   336 				}
   337 			}
   338 		p += KPageSize;
   339 		}
   340 	while(p);
   341 
   342 	TBool fail = false;
   343 	Kern::Printf("Mmu::VerifyRam() pass 2");
   344 	do
   345 		{
   346 		SPageInfo* pi = SPageInfo::SafeFromPhysAddr(p);
   347 		if(pi)
   348 			{
   349 			if(pi->Type()==SPageInfo::EUnused)
   350 				{
   351 				volatile TPhysAddr* b = (volatile TPhysAddr*)MapTemp(p,0);
   352 				if(b[0]!=p || b[1]!=~p)
   353 					{
   354 					fail = true;
   355 					Kern::Printf("%08x FAILED %x %x",b[0],b[1]);
   356 					}
   357 				UnmapTemp();
   358 				}
   359 			}
   360 		p += KPageSize;
   361 		}
   362 	while(p);
   363 
   364 	__NK_ASSERT_DEBUG(!fail);
   365 	RamAllocLock::Unlock();
   366 	}
   367 #endif
   368 
   369 
   370 void Mmu::Init2Common()
   371 	{
   372 	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("Mmu::Init2Common"));
   373 
   374 	// create allocator...
   375 	const SRamInfo& info = *(const SRamInfo*)TheSuperPage().iRamBootData;
   376 	iRamPageAllocator = DRamAllocator::New(info, iRamZones, iRamZoneCallback);
   377 
   378 	// initialise all pages in banks as unused...
   379 	const SRamBank* bank = info.iBanks;
   380 	while(bank->iSize)
   381 		{
   382 		TUint32 base = bank->iBase;
   383 		TUint32 size = bank->iSize;
   384 		__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("Found RAM bank 0x%08x size %d",base,size));
   385 		if(base+size<=base || ((base|size)&KPageMask))
   386 			Panic(EInvalidRamBankAtBoot);
   387 
   388 		SPageInfo* pi = SPageInfo::FromPhysAddr(base);
   389 		SPageInfo* piEnd = pi+(size>>KPageShift);
   390 		while(pi<piEnd)
   391 			(pi++)->SetUnused();
   392 		++bank;
   393 		}
   394 	// step over the last bank to get to the reserved banks.
   395 	++bank;
   396 	// mark any reserved regions as allocated...
   397 	while(bank->iSize)
   398 		{
   399 		TUint32 base = bank->iBase;
   400 		TUint32 size = bank->iSize;
   401 		__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("Found reserved bank 0x%08x size %d",base,size));
   402 		if(base+size<=base || ((base|size)&KPageMask))
   403 			Panic(EInvalidReservedBankAtBoot);
   404 
   405 		SPageInfo* pi = SPageInfo::FromPhysAddr(base);
   406 		SPageInfo* piEnd = pi+(size>>KPageShift);
   407 		while(pi<piEnd)
   408 			(pi++)->SetPhysAlloc();
   409 		++bank;
   410 		}
   411 
   412 	// Clear the inital (and only so far) page table info page so all unused
   413 	// page tables infos will be marked as unused.
   414 	__ASSERT_COMPILE(SPageTableInfo::EUnused == 0);
   415 	memclr((TAny*)KPageTableInfoBase, KPageSize);
   416 
   417 	// look for page tables - assume first page table maps page tables
   418 	TPte* pPte = (TPte*)KPageTableBase;
   419 	TInt i;
   420 	for(i=0; i<KChunkSize/KPageSize; ++i)
   421 		{
   422 		TPte pte = *pPte++;
   423 		if(pte==KPteUnallocatedEntry)	// after boot, page tables are contiguous
   424 			break;
   425 		TPhysAddr ptpgPhys = Mmu::PtePhysAddr(pte,i);
   426 		__KTRACE_OPT(KBOOT,Kern::Printf("Page Table Group %08x -> Phys %08x", KPageTableBase+i*KPageSize, ptpgPhys));
   427 		SPageInfo* pi = SPageInfo::SafeFromPhysAddr(ptpgPhys);
   428 		__ASSERT_ALWAYS(pi, Panic(EInvalidPageTableAtBoot));
   429 		pi->SetFixed(i); // this also sets the SPageInfo::iOffset so that linear-to-physical works
   430 		}
   431 
   432 	// look for mapped pages
   433 	TPde* pd = Mmu::PageDirectory(KKernelOsAsid);
   434 	for(i=0; i<(1<<(32-KChunkShift)); ++i)
   435 		{
   436 		TPde pde = pd[i];
   437 		if(pde==KPdeUnallocatedEntry)
   438 			continue;
   439 		TPhysAddr pdePhys = Mmu::PdePhysAddr(pde);
   440 		TPte* pt = 0;
   441 		if(pdePhys!=KPhysAddrInvalid)
   442 			{
   443 			__KTRACE_OPT(KBOOT,Kern::Printf("Addr %08x -> Whole PDE Phys %08x", i<<KChunkShift, pdePhys));
   444 			}
   445 		else
   446 			{
   447 			pt = Mmu::PageTableFromPde(pde);
   448 			__KTRACE_OPT(KBOOT,Kern::Printf("Addr %08x -> page table %08x", i<<KChunkShift, pt));
   449 			__ASSERT_ALWAYS(pt,Panic(EInvalidPdeAtBoot)); // bad PDE
   450 			}
   451 
   452 		TInt j;
   453 		TInt np = 0;
   454 		for(j=0; j<KChunkSize/KPageSize; ++j)
   455 			{
   456 			TBool present = ETrue;	// all pages present if whole PDE mapping
   457 			TPte pte = 0;
   458 			if(pt)
   459 				{
   460 				pte = pt[j];
   461 				present = pte!=KPteUnallocatedEntry;
   462 				}
   463 			if(present)
   464 				{
   465 				++np;
   466 				TPhysAddr pa = pt ? Mmu::PtePhysAddr(pte,j) : (pdePhys + (j<<KPageShift));
   467 				SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pa);
   468 				__KTRACE_OPT(KBOOT,Kern::Printf("Addr: %08x PA=%08x",
   469 													(i<<KChunkShift)+(j<<KPageShift), pa));
   470 				if(pi)	// ignore non-RAM mappings
   471 					{
   472 					TInt r = iRamPageAllocator->MarkPageAllocated(pa, EPageFixed);
   473 					// allow KErrAlreadyExists since it's possible that a page is doubly mapped
   474 					__ASSERT_ALWAYS(r==KErrNone || r==KErrAlreadyExists, Panic(EBadMappedPageAfterBoot));
   475 					if(pi->Type()==SPageInfo::EUnused)
   476 						pi->SetFixed();
   477 					}
   478 				}
   479 			}
   480 		__KTRACE_OPT(KBOOT,Kern::Printf("Addr: %08x #PTEs=%d",(i<<KChunkShift),np));
   481 		if(pt)
   482 			{
   483 			SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt);
   484 			pti->Boot(np);
   485 			}
   486 		}
   487 
   488 	TInt r = K::MutexCreate(iRamAllocatorMutex, KLitRamAlloc, NULL, EFalse, KMutexOrdRamAlloc);
   489 	if(r!=KErrNone)
   490 		Panic(ERamAllocMutexCreateFailed);
   491 	iRamAllocLockCount = 0;
   492 	iRamAllocInitialFreePages = FreeRamInPages();
   493 
   494 	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("Mmu::DoInit2"));
   495 
   496 	for(i=0; i<KNumTempMappingSlots; ++i)
   497 		iTempMap[i].Alloc(1);
   498 
   499 	iPhysMemSyncTemp.Alloc(1);
   500 	r = K::MutexCreate(iPhysMemSyncMutex, KLitPhysMemSync, NULL, EFalse, KMutexOrdSyncPhysMem);
   501 	if(r!=KErrNone)
   502 		Panic(EPhysMemSyncMutexCreateFailed);
   503 //	VerifyRam();
   504 	}
   505 
   506 
   507 void Mmu::Init2FinalCommon()
   508 	{
   509 	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("Mmu::Init2FinalCommon"));
   510 	// hack, reduce free memory to <2GB...
   511 	while(FreeRamInPages()>=0x80000000/KPageSize)
   512 		{
   513 		TPhysAddr dummyPage;
   514 		TInt r = iRamPageAllocator->AllocRamPages(&dummyPage,1, EPageFixed);
   515 		__NK_ASSERT_ALWAYS(r==KErrNone);
   516 		}
   517 	// hack, reduce total RAM to <2GB...
   518 	if(TheSuperPage().iTotalRamSize<0)
   519 		TheSuperPage().iTotalRamSize = 0x80000000-KPageSize;
   520 
   521 	// Save current free RAM size - there can never be more free RAM than this
   522 	TUint maxFreePages = FreeRamInPages();
   523 	K::MaxFreeRam = maxFreePages*KPageSize;
   524 	if(maxFreePages < (TUint(PP::RamDriveMaxSize)>>KPageShift))
   525 		PP::RamDriveMaxSize = maxFreePages*KPageSize;
   526 
   527 	// update this to stop assert triggering in RamAllocLock::Lock()
   528 	iRamAllocInitialFreePages = maxFreePages;
   529 	}
   530 
   531  
   532 void Mmu::Init3()
   533 	{
   534 	iDefrag = new Defrag;
   535 	if (!iDefrag)
   536 		Panic(EDefragAllocFailed);
   537 	iDefrag->Init3(TheMmu.iRamPageAllocator);
   538 	}
   539 
   540 //
   541 // Utils
   542 //
   543 
   544 void Mmu::Panic(TPanic aPanic)
   545 	{
   546 	Kern::Fault("MMU",aPanic);
   547 	}
   548 
   549 
   550 TUint Mmu::FreeRamInPages()
   551 	{
   552 	return iRamPageAllocator->FreeRamInPages()+ThePager.NumberOfFreePages();
   553 	}
   554 
   555 
   556 TUint Mmu::TotalPhysicalRamPages()
   557 	{
   558 	return iRamPageAllocator->TotalPhysicalRamPages();
   559 	}
   560 
   561 
   562 const SRamZone* Mmu::RamZoneConfig(TRamZoneCallback& aCallback) const
   563 	{
   564 	aCallback = iRamZoneCallback;
   565 	return iRamZones;
   566 	}
   567 
   568 
   569 void Mmu::SetRamZoneConfig(const SRamZone* aZones, TRamZoneCallback aCallback)
   570 	{
   571 	iRamZones = aZones;
   572 	iRamZoneCallback = aCallback;
   573 	}
   574 
   575 
   576 TInt Mmu::ModifyRamZoneFlags(TUint aId, TUint aClearMask, TUint aSetMask)
   577 	{
   578 	return iRamPageAllocator->ModifyZoneFlags(aId, aClearMask, aSetMask);
   579 	}
   580 
   581 
   582 TInt Mmu::GetRamZonePageCount(TUint aId, SRamZonePageCount& aPageData)
   583 	{
   584 	return iRamPageAllocator->GetZonePageCount(aId, aPageData);
   585 	}
   586 
   587 
   588 TInt Mmu::ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aBytes, TPhysAddr& aPhysAddr, TInt aAlign)
   589 	{
   590 	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::ZoneAllocPhysicalRam(?,%d,%d,?,%d)", aZoneIdCount, aBytes, aPhysAddr, aAlign));
   591 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
   592 
   593 	TInt r = iRamPageAllocator->ZoneAllocContiguousRam(aZoneIdList, aZoneIdCount, aBytes, aPhysAddr, EPageFixed, aAlign);
   594 	if(r!=KErrNone)
   595 		iRamAllocFailed = ETrue;
   596 	else
   597 		{
   598 		TUint pages = MM::RoundToPageCount(aBytes);
   599 		AllocatedPhysicalRam(aPhysAddr, pages,  (Mmu::TRamAllocFlags)EMemAttStronglyOrdered);
   600 		}
   601 	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::ZoneAllocPhysicalRam returns %d and aPhysAddr=0x%08x",r,aPhysAddr));
   602 	return r;
   603 	}
   604 
   605 
   606 TInt Mmu::ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aNumPages, TPhysAddr* aPageList)
   607 	{
   608 	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::ZoneAllocPhysicalRam(?,%d,%d,?)", aZoneIdCount, aNumPages));
   609 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
   610 
   611 	TInt r = iRamPageAllocator->ZoneAllocRamPages(aZoneIdList, aZoneIdCount, aPageList, aNumPages, EPageFixed);
   612 	if(r!=KErrNone)
   613 		iRamAllocFailed = ETrue;
   614 	else
   615 		{
   616 		PagesAllocated(aPageList, aNumPages, (Mmu::TRamAllocFlags)EMemAttStronglyOrdered);
   617 
   618 		// update page infos...
   619 		TUint flash = 0;
   620 		TPhysAddr* pageEnd = aPageList + aNumPages;
   621 		MmuLock::Lock();
   622 		TPhysAddr* page = aPageList;
   623 		while (page < pageEnd)
   624 			{
   625 			MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo/2);
   626 			TPhysAddr pagePhys = *page++;
   627 			__NK_ASSERT_DEBUG(pagePhys != KPhysAddrInvalid);
   628 			SPageInfo::FromPhysAddr(pagePhys)->SetPhysAlloc();
   629 			}
   630 		MmuLock::Unlock();
   631 		}
   632 	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::ZoneAllocPhysicalRam returns %d",r));
   633 	return r;
   634 	}
   635 
   636 
   637 TInt Mmu::RamHalFunction(TInt aFunction, TAny* a1, TAny* a2)
   638 	{
   639 	// This function should only be registered with hal and therefore can only 
   640 	// be invoked after the ram allocator has been created.
   641 	__NK_ASSERT_DEBUG(iRamPageAllocator);
   642 	return iRamPageAllocator->HalFunction(aFunction, a1, a2);
   643 	}
   644 
   645 
   646 void Mmu::ChangePageType(SPageInfo* aPageInfo, TZonePageType aOldPageType, TZonePageType aNewPageType)
   647 	{
   648 	iRamPageAllocator->ChangePageType(aPageInfo, aOldPageType, aNewPageType);
   649 	}
   650 
   651 TInt Mmu::HandlePageFault(TLinAddr aPc, TLinAddr aFaultAddress, TUint aAccessPermissions, TAny* aExceptionInfo)
   652 	{
   653 	TRACE(("Mmu::HandlePageFault(0x%08x,0x%08x,%d)",aPc,aFaultAddress,aAccessPermissions));
   654 
   655 	DMemModelThread* thread = (DMemModelThread*)TheCurrentThread;
   656 	// Get the os asid of the process taking the fault, no need to open a reference 
   657 	// as it is the current thread's process so can't be freed.
   658 	TUint faultOsAsid = ((DMemModelProcess*)thread->iNThread.iAddressSpace)->OsAsid();
   659 
   660 	// check if any fast mutexes held...
   661 	NFastMutex* fm = NKern::HeldFastMutex();
   662 	TPagingExcTrap* trap = thread->iPagingExcTrap;
   663 	if(fm)
   664 		{
   665 		// check there is an XTRAP_PAGING in effect...
   666 		if(!trap)
   667 			{
   668 			// oops, kill system...
   669 			__KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("Fault with FM Held! addr=0x%08x (%O pc=%x)",aFaultAddress,thread,aPc));
   670 			Exc::Fault(aExceptionInfo);
   671 			}
   672 
   673 		// release the fast mutex...
   674 		NKern::FMSignal(fm);
   675 		}
   676 
   677 	NKern::ThreadEnterCS();
   678 
   679 	// work out address space for aFaultAddress...
   680 	TUint osAsid = faultOsAsid;
   681 	TLinAddr addr = aFaultAddress;
   682 	if(thread->iAliasLinAddr && TUint(addr - thread->iAliasLinAddr) < TUint(KPageSize))
   683 		{
   684 		// Address in aliased memory...
   685 		addr = (addr - thread->iAliasLinAddr) + thread->iAliasTarget;
   686 		// Get the os asid of the process thread is aliasing, no need to open 
   687 		// a reference on it as one was already opened when the alias was created.
   688 		osAsid = thread->iAliasProcess->OsAsid();
   689 		}
   690 	else if(addr>=KGlobalMemoryBase)
   691 		{
   692 		// Address in global region, so look it up in kernel's address space...
   693 		osAsid = KKernelOsAsid;
   694 		}
   695 
   696 	// NOTE, osAsid will remain valid for duration of this function because it is either
   697 	// - The current thread's address space, which can't go away whilst the thread
   698 	//   is running.
   699 	// - The address space of another thread which we are aliasing memory from,
   700 	//   and we would only do this if we have a reference on this other thread,
   701 	//   which has a reference on it's process, which should own the address space!
   702 
   703 #ifdef __BROADCAST_CACHE_MAINTENANCE__
   704 	TInt aliasAsid = -1;
   705 	if (thread->iAliasLinAddr)
   706 		{
   707 		// If an alias is in effect, the the thread will be locked to the current CPU,
   708 		// but we need to be able to migrate between CPUs for cache maintainance.  This
   709 		// must be dealt with by removing the alias and restoring it with a paging trap
   710 		// handler.
   711 		if(!trap)
   712 			{
   713 			// oops, kill system...
   714 			__KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("Fault with thread locked to current CPU! addr=0x%08x (%O pc=%x)",aFaultAddress,thread,aPc));
   715 			Exc::Fault(aExceptionInfo);
   716 			}
   717 		// Open a reference on the aliased process's os asid before removing the alias
   718 		// so that the address space can't be freed while we try to access its members.
   719 		aliasAsid = thread->iAliasProcess->TryOpenOsAsid();
   720 		// This should never fail as until we remove the alias there will 
   721 		// always be at least one reference on the os asid.
   722 		__NK_ASSERT_DEBUG(aliasAsid >= 0);
   723 		thread->RemoveAlias();
   724 		}
   725 #endif
   726 
   727 	// find mapping...
   728 	TUint offsetInMapping;
   729 	TUint mapInstanceCount;
   730 	DMemoryMapping* mapping = MM::FindMappingInAddressSpace(osAsid, addr, 1, offsetInMapping, mapInstanceCount);
   731 //	TRACE(("%O mapping=0x%08x",TheCurrentThread,mapping));
   732 	TInt r = KErrNotFound;
   733 
   734 	if(mapping)
   735 		{
   736 		MmuLock::Lock();
   737 
   738 		// check if we need to process page fault...
   739 		if(!Mmu::CheckPteTypePermissions(mapping->PteType(),aAccessPermissions) ||
   740 			mapInstanceCount != mapping->MapInstanceCount())
   741 			{
   742 			// Invalid access to the page.
   743 			MmuLock::Unlock();
   744 			r = KErrAbort;
   745 			}
   746 		else
   747 			{
   748 			// Should not be able to take a fault on a pinned mapping if accessing it 
   749 			// with the correct permissions.
   750 			__NK_ASSERT_DEBUG(!mapping->IsPinned());
   751 
   752 			// we do need to handle fault so is this a demand paging or page moving fault
   753 			DMemoryObject* memory = mapping->Memory();
   754 			if(!memory)
   755 				MmuLock::Unlock();
   756 			else
   757 				{
   758 				TUint faultIndex = (offsetInMapping >> KPageShift) + mapping->iStartIndex;
   759 				memory->Open();
   760 
   761 				// This is safe as we have the instance count so can detect the mapping 
   762 				// being reused and we have a reference to the memory object so it can't 
   763 				// be deleted.
   764 				MmuLock::Unlock();
   765 
   766 				if(memory->IsDemandPaged())
   767 					{
   768 					// Let the pager handle the fault...
   769 					r = ThePager.HandlePageFault(	aPc, aFaultAddress, faultOsAsid, faultIndex,
   770 													aAccessPermissions, memory, mapping, mapInstanceCount,
   771 													thread, aExceptionInfo);
   772 					}
   773 				else
   774 					{// The page could be being moved so verify that with its manager.
   775 					DMemoryManager* manager = memory->iManager;
   776 					r = manager->HandleFault(memory, faultIndex, mapping, mapInstanceCount, aAccessPermissions);
   777 					}
   778 				if (r == KErrNone)
   779 					{// alias PDE needs updating because page tables have changed...
   780 					thread->RefreshAlias();
   781 					}
   782 				memory->Close();
   783 				}
   784 			}
   785 		mapping->Close();
   786 		}
   787 
   788 	if (trap)
   789 		{
   790 		// restore address space (because the trap will bypass any code
   791 		// which would have done this.)...
   792 		DMemModelThread::RestoreAddressSpace();
   793 		}
   794 
   795 #ifdef __BROADCAST_CACHE_MAINTENANCE__
   796 	// Close any reference on the aliased process's os asid before we leave the
   797 	// critical section.
   798 	if (aliasAsid >= 0)
   799 		{
   800 		thread->iAliasProcess->CloseOsAsid();
   801 		}
   802 #endif
   803 
   804 	NKern::ThreadLeaveCS();  // thread will die now if CheckRealtimeThreadFault caused a panic
   805 
   806 	// deal with XTRAP_PAGING...
   807 	if(trap)
   808 		{
   809 		// re-acquire any fast mutex which was held before the page fault...
   810 		if(fm)
   811 			NKern::FMWait(fm);
   812 		if (r == KErrNone)
   813 			{
   814 			trap->Exception(1); // return from exception trap with result '1' (value>0)
   815 			// code doesn't continue beyond this point.
   816 			__NK_ASSERT_DEBUG(0);
   817 			}
   818 		}
   819 
   820 	return r;
   821 	}
   822 
   823 
   824 //
   825 // Memory allocation
   826 //
   827 
   828 TInt Mmu::AllocRam(	TPhysAddr* aPages, TUint aCount, TRamAllocFlags aFlags, TZonePageType aZonePageType, 
   829 					TUint aBlockZoneId, TBool aBlockRest)
   830 	{
   831 	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocRam(?,%d,%x)",aCount,aFlags));
   832 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
   833 #ifdef _DEBUG
   834 	if(K::CheckForSimulatedAllocFail())
   835 		{
   836 		__KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocRam returns simulated OOM %d",KErrNoMemory));
   837 		return KErrNoMemory;
   838 		}
   839 #endif
   840 	TInt missing = iRamPageAllocator->AllocRamPages(aPages, aCount, aZonePageType, aBlockZoneId, aBlockRest);
   841 	if(missing && !(aFlags&EAllocNoPagerReclaim) && ThePager.GetFreePages(missing))
   842 		missing = iRamPageAllocator->AllocRamPages(aPages, aCount, aZonePageType, aBlockZoneId, aBlockRest);
   843 	TInt r = missing ? KErrNoMemory : KErrNone;
   844 	if(r!=KErrNone)
   845 		iRamAllocFailed = ETrue;
   846 	else
   847 		PagesAllocated(aPages,aCount,aFlags);
   848 	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocRam returns %d",r));
   849 	return r;
   850 	}
   851 
   852 
   853 void Mmu::FreeRam(TPhysAddr* aPages, TUint aCount, TZonePageType aZonePageType)
   854 	{
   855 	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::FreeRam(?,%d)",aCount));
   856 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
   857 
   858 	// update page infos...
   859 	TPhysAddr* pages = aPages;
   860 	TPhysAddr* pagesEnd = pages+aCount;
   861 	TPhysAddr* pagesOut = aPages;
   862 	MmuLock::Lock();
   863 	TUint flash = 0;
   864 	while(pages<pagesEnd)
   865 		{
   866 		MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo/2);
   867 		TPhysAddr pagePhys = *pages++;
   868 		__NK_ASSERT_DEBUG(pagePhys!=KPhysAddrInvalid);
   869 		SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys);
   870 		PageFreed(pi);
   871 
   872 		// If this is an old page of a page being moved that was previously pinned
   873 		// then make sure it is freed as discardable otherwise despite DPager::DonatePages()
   874 		// having marked it as discardable it would be freed as movable.
   875 		__NK_ASSERT_DEBUG(pi->PagedState() != SPageInfo::EPagedPinnedMoved || aCount == 1);
   876 		if (pi->PagedState() == SPageInfo::EPagedPinnedMoved)
   877 			aZonePageType = EPageDiscard;
   878 
   879 		if(ThePager.PageFreed(pi)==KErrNone)
   880 			--aCount; // pager has dealt with this page, so one less for us
   881 		else
   882 			{
   883 			// All paged pages should have been dealt with by the pager above.
   884 			__NK_ASSERT_DEBUG(pi->PagedState() == SPageInfo::EUnpaged);
   885 			*pagesOut++ = pagePhys; // store page address for freeing later
   886 			}
   887 		}
   888 	MmuLock::Unlock();
   889 
   890 	iRamPageAllocator->FreeRamPages(aPages, aCount, aZonePageType);
   891 	}
   892 
   893 
   894 TInt Mmu::AllocContiguousRam(TPhysAddr& aPhysAddr, TUint aCount, TUint aAlign, TRamAllocFlags aFlags)
   895 	{
   896 	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocContiguousRam(?,0x%x,%d,%x)",aCount,aAlign,aFlags));
   897 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
   898 #ifdef _DEBUG
   899 	if(K::CheckForSimulatedAllocFail())
   900 		{
   901 		__KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocContiguousRam returns simulated OOM %d",KErrNoMemory));
   902 		return KErrNoMemory;
   903 		}
   904 	// Only the page sets EAllocNoPagerReclaim and it shouldn't allocate contiguous ram.
   905 	__NK_ASSERT_DEBUG(!(aFlags&EAllocNoPagerReclaim));
   906 #endif
   907 	TInt r = iRamPageAllocator->AllocContiguousRam(aCount, aPhysAddr, EPageFixed, aAlign+KPageShift);
   908 	if(r==KErrNoMemory && aCount > KMaxFreeableContiguousPages)
   909 		{
   910 		// flush paging cache and retry...
   911 		ThePager.FlushAll();
   912 		r = iRamPageAllocator->AllocContiguousRam(aCount, aPhysAddr, EPageFixed, aAlign+KPageShift);
   913 		}
   914 	if(r!=KErrNone)
   915 		iRamAllocFailed = ETrue;
   916 	else
   917 		PagesAllocated((TPhysAddr*)(aPhysAddr|1), aCount, aFlags);
   918 	__KTRACE_OPT(KMMU,Kern::Printf("AllocContiguouseRam returns %d and aPhysAddr=0x%08x",r,aPhysAddr));
   919 	return r;
   920 	}
   921 
   922 
   923 void Mmu::FreeContiguousRam(TPhysAddr aPhysAddr, TUint aCount)
   924 	{
   925 	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::FreeContiguousRam(0x%08x,0x%x)",aPhysAddr,aCount));
   926 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
   927 	__NK_ASSERT_DEBUG((aPhysAddr&KPageMask)==0);
   928 
   929 	TUint pageCount = aCount;
   930 
   931 	// update page infos...
   932 	SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr);
   933 	SPageInfo* piEnd = pi+pageCount;
   934 	TUint flash = 0;
   935 	MmuLock::Lock();
   936 	while(pi<piEnd)
   937 		{
   938 		MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo);
   939 		PageFreed(pi++);
   940 		}
   941 	MmuLock::Unlock();
   942 
   943 	// free pages...
   944 	while(pageCount)
   945 		{
   946 		iRamPageAllocator->FreeRamPage(aPhysAddr, EPageFixed);
   947 		aPhysAddr += KPageSize;
   948 		--pageCount;
   949 		}
   950 	}
   951 
   952 
   953 TInt Mmu::AllocPhysicalRam(TPhysAddr* aPages, TUint aCount, TRamAllocFlags aFlags)
   954 	{
   955 	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocPhysicalRam(?,%d,%x)",aCount,aFlags));
   956 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
   957 	// Allocate fixed pages as physically allocated pages aren't movable or discardable.
   958 	TInt r = AllocRam(aPages, aCount, aFlags, EPageFixed);
   959 	if (r!=KErrNone)
   960 		return r;
   961 
   962 	// update page infos...
   963 	TPhysAddr* pages = aPages;
   964 	TPhysAddr* pagesEnd = pages+aCount;
   965 	MmuLock::Lock();
   966 	TUint flash = 0;
   967 	while(pages<pagesEnd)
   968 		{
   969 		MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo/2);
   970 		TPhysAddr pagePhys = *pages++;
   971 		__NK_ASSERT_DEBUG(pagePhys!=KPhysAddrInvalid);
   972 		SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys);
   973 		pi->SetPhysAlloc();
   974 		}
   975 	MmuLock::Unlock();
   976 
   977 	return KErrNone;
   978 	}
   979 
   980 
   981 void Mmu::FreePhysicalRam(TPhysAddr* aPages, TUint aCount)
   982 	{
   983 	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::FreePhysicalRam(?,%d)",aCount));
   984 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
   985 
   986 	// update page infos...
   987 	TPhysAddr* pages = aPages;
   988 	TPhysAddr* pagesEnd = pages+aCount;
   989 	MmuLock::Lock();
   990 	TUint flash = 0;
   991 	while(pages<pagesEnd)
   992 		{
   993 		MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo/2);
   994 		TPhysAddr pagePhys = *pages++;
   995 		__NK_ASSERT_DEBUG(pagePhys!=KPhysAddrInvalid);
   996 		SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys);
   997 		__ASSERT_ALWAYS(pi->Type()==SPageInfo::EPhysAlloc, Panic(EBadFreePhysicalRam));
   998 		__ASSERT_ALWAYS(!pi->UseCount(), Panic(EBadFreePhysicalRam));
   999 		pi->SetUnused();
  1000 		}
  1001 	MmuLock::Unlock();
  1002 
  1003 	iRamPageAllocator->FreeRamPages(aPages,aCount, EPageFixed);
  1004 	}
  1005 
  1006 
  1007 TInt Mmu::AllocPhysicalRam(TPhysAddr& aPhysAddr, TUint aCount, TUint aAlign, TRamAllocFlags aFlags)
  1008 	{
  1009 	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocPhysicalRam(?,0x%x,d,%x)",aCount,aAlign,aFlags));
  1010 	TInt r = AllocContiguousRam(aPhysAddr,aCount,aAlign,aFlags);
  1011 	if (r!=KErrNone)
  1012 		return r;
  1013 
  1014 	// update page infos...
  1015 	SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr);
  1016 	SPageInfo* piEnd = pi+aCount;
  1017 	TUint flash = 0;
  1018 	MmuLock::Lock();
  1019 	while(pi<piEnd)
  1020 		{
  1021 		MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo);
  1022 		pi->SetPhysAlloc();
  1023 		++pi;
  1024 		}
  1025 	MmuLock::Unlock();
  1026 
  1027 	return KErrNone;
  1028 	}
  1029 
  1030 
  1031 void Mmu::FreePhysicalRam(TPhysAddr aPhysAddr, TUint aCount)
  1032 	{
  1033 	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::FreePhysicalRam(0x%08x,0x%x)",aPhysAddr,aCount));
  1034 
  1035 	// update page infos...
  1036 	SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr);
  1037 	SPageInfo* piEnd = pi+aCount;
  1038 	TUint flash = 0;
  1039 	MmuLock::Lock();
  1040 	while(pi<piEnd)
  1041 		{
  1042 		MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo);
  1043 		__ASSERT_ALWAYS(pi->Type()==SPageInfo::EPhysAlloc, Panic(EBadFreePhysicalRam));
  1044 		__ASSERT_ALWAYS(!pi->UseCount(), Panic(EBadFreePhysicalRam));
  1045 		pi->SetUnused();
  1046 		++pi;
  1047 		}
  1048 	MmuLock::Unlock();
  1049 
  1050 	iRamPageAllocator->FreePhysicalRam(aPhysAddr, aCount << KPageShift);
  1051 	}
  1052 
  1053 
  1054 TInt Mmu::ClaimPhysicalRam(TPhysAddr aPhysAddr, TUint aCount, TRamAllocFlags aFlags)
  1055 	{
  1056 	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::ClaimPhysicalRam(0x%08x,0x%x,0x%08x)",aPhysAddr,aCount,aFlags));
  1057 	aPhysAddr &= ~KPageMask;
  1058 	TInt r = iRamPageAllocator->ClaimPhysicalRam(aPhysAddr,(aCount << KPageShift));
  1059 	if(r!=KErrNone)
  1060 		return r;
  1061 
  1062 	PagesAllocated((TPhysAddr*)(aPhysAddr|1), aCount, aFlags);
  1063 
  1064 	// update page infos...
  1065 	SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr);
  1066 	SPageInfo* piEnd = pi+aCount;
  1067 	TUint flash = 0;
  1068 	MmuLock::Lock();
  1069 	while(pi<piEnd)
  1070 		{
  1071 		MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo);
  1072 		pi->SetPhysAlloc();
  1073 		++pi;
  1074 		}
  1075 	MmuLock::Unlock();
  1076 
  1077 	return KErrNone;
  1078 	}
  1079 
  1080 
  1081 void Mmu::AllocatedPhysicalRam(TPhysAddr aPhysAddr, TUint aCount, TRamAllocFlags aFlags)
  1082 	{
  1083 	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocatedPhysicalRam(0x%08x,0x%x,d,%x)",aPhysAddr,aCount,aFlags));
  1084 
  1085 	PagesAllocated((TPhysAddr*)(aPhysAddr|1), aCount, aFlags);
  1086 
  1087 	// update page infos...
  1088 	SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr);
  1089 	SPageInfo* piEnd = pi+aCount;
  1090 	TUint flash = 0;
  1091 	MmuLock::Lock();
  1092 	while(pi<piEnd)
  1093 		{
  1094 		MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo);
  1095 		pi->SetPhysAlloc();
  1096 		++pi;
  1097 		}
  1098 	MmuLock::Unlock();
  1099 	}
  1100 
  1101 
  1102 //
  1103 // Misc
  1104 //
  1105 
  1106 #ifdef _DEBUG
  1107 /**
  1108 Perform a page table walk to return the physical address of
  1109 the memory mapped at virtual address \a aLinAddr in the
  1110 address space \a aOsAsid.
  1111 
  1112 If the page table used was not one allocated by the kernel
  1113 then the results are unpredictable and may cause a system fault.
  1114 
  1115 @pre #MmuLock held.
  1116 */
  1117 TPhysAddr Mmu::LinearToPhysical(TLinAddr aLinAddr, TInt aOsAsid)
  1118 	{
  1119 	__NK_ASSERT_DEBUG(MmuLock::IsHeld() || K::Initialising);
  1120 	return UncheckedLinearToPhysical(aLinAddr,aOsAsid);
  1121 	}
  1122 #endif
  1123 
  1124 
  1125 /**
  1126 Next virtual address available for allocation by TTempMapping.
  1127 This is initialised to #KTempAddr and addresses may be allocated
  1128 until they reach #KTempAddrEnd.
  1129 */
  1130 TLinAddr Mmu::TTempMapping::iNextLinAddr = KTempAddr;
  1131 
  1132 
  1133 /**
  1134 Allocate virtual address space required to map a given number of memory pages.
  1135 
  1136 The actual size of allocated virtual allocated needs to accommodate \a aNumPages
  1137 number of pages of any colour. For example: if \a aNumPages == 4 and #KPageColourCount == 4,
  1138 then at least 7 pages are required. 
  1139 
  1140 @param aNumPages	Maximum number of pages that can be mapped into this temporary mapping.
  1141 
  1142 @pre Called in single threaded content (boot) only.
  1143 
  1144 @pre #iNextLinAddr points to virtual page with zero colour.
  1145 @post #iNextLinAddr points to virtual page with zero colour.
  1146 */
  1147 void Mmu::TTempMapping::Alloc(TUint aNumPages)
  1148 	{
  1149 	__NK_ASSERT_DEBUG(aNumPages<=(KTempAddrEnd-KTempAddr)/KPageSize);
  1150 
  1151 	// This runs during the boot only (single threaded context) so the access to iNextLinAddr is not guarded by any mutex.
  1152 	TLinAddr tempAddr = iNextLinAddr;
  1153 	TUint numPages = (KPageColourMask+aNumPages+KPageColourMask)&~KPageColourMask;
  1154 	iNextLinAddr = tempAddr+numPages*KPageSize;
  1155 
  1156 	__NK_ASSERT_ALWAYS(iNextLinAddr<=KTempAddrEnd);
  1157 
  1158 	__NK_ASSERT_DEBUG(iSize==0);
  1159 	iLinAddr = tempAddr;
  1160 	MmuLock::Lock();
  1161 	iPtePtr = Mmu::PtePtrFromLinAddr(tempAddr,KKernelOsAsid);
  1162 	__NK_ASSERT_DEBUG(iPtePtr);
  1163 	MmuLock::Unlock();
  1164 	iBlankPte = TheMmu.iTempPteCached;
  1165 	iSize = aNumPages;
  1166 	iCount = 0;
  1167 
  1168 	TRACEB(("Mmu::TTempMapping::Alloc(%d) iLinAddr=0x%08x, iPtePtr=0x%08x",aNumPages,iLinAddr,iPtePtr));
  1169 	}
  1170 
  1171 
  1172 /**
  1173 Map a single physical page into this temporary mapping.
  1174 
  1175 Supervisor read/write access and EMemoryAttributeStandard memory attributes apply.
  1176 
  1177 @param aPage		The physical page to map.
  1178 @param aColour 		The required colour for the mapping.
  1179 
  1180 @return 			The linear address at which the page is mapped.
  1181 */
  1182 TLinAddr Mmu::TTempMapping::Map(TPhysAddr aPage, TUint aColour)
  1183 	{
  1184 	__NK_ASSERT_DEBUG(iSize>=1);
  1185 	__NK_ASSERT_DEBUG(iCount==0);
  1186 
  1187 	TUint colour = aColour&KPageColourMask;
  1188 	TLinAddr addr = iLinAddr+(colour<<KPageShift);
  1189 	TPte* pPte = iPtePtr+colour;
  1190 	iColour = colour;
  1191 
  1192 	__ASSERT_DEBUG(*pPte==KPteUnallocatedEntry,MM::Panic(MM::ETempMappingAlreadyInUse));
  1193 	*pPte = (aPage&~KPageMask) | iBlankPte;
  1194 	CacheMaintenance::SinglePteUpdated((TLinAddr)pPte);
  1195 	InvalidateTLBForPage(addr|KKernelOsAsid);
  1196 
  1197 	iCount = 1;
  1198 	return addr;
  1199 	}
  1200 
  1201 /**
  1202 Map a single physical page into this temporary mapping using the given page table entry (PTE) value.
  1203 
  1204 @param aPage		The physical page to map.
  1205 @param aColour 		The required colour for the mapping.
  1206 @param aBlankPte	The PTE value to use for mapping the page,
  1207 					with the physical address component equal to zero.
  1208 
  1209 @return 			The linear address at which the page is mapped.
  1210 */
  1211 TLinAddr Mmu::TTempMapping::Map(TPhysAddr aPage, TUint aColour, TPte aBlankPte)
  1212 	{
  1213 	__NK_ASSERT_DEBUG(iSize>=1);
  1214 	__NK_ASSERT_DEBUG(iCount==0);
  1215 
  1216 	TUint colour = aColour&KPageColourMask;
  1217 	TLinAddr addr = iLinAddr+(colour<<KPageShift);
  1218 	TPte* pPte = iPtePtr+colour;
  1219 	iColour = colour;
  1220 
  1221 	__ASSERT_DEBUG(*pPte==KPteUnallocatedEntry,MM::Panic(MM::ETempMappingAlreadyInUse));
  1222 	*pPte = (aPage&~KPageMask) | aBlankPte;
  1223 	CacheMaintenance::SinglePteUpdated((TLinAddr)pPte);
  1224 	InvalidateTLBForPage(addr|KKernelOsAsid);
  1225 
  1226 	iCount = 1;
  1227 	return addr;
  1228 	}
  1229 
  1230 
  1231 /**
  1232 Map a number of physical pages into this temporary mapping.
  1233 
  1234 Supervisor read/write access and EMemoryAttributeStandard memory attributes apply.
  1235 
  1236 @param aPages		The array of physical pages to map.
  1237 @param aCount		The number of pages to map.
  1238 @param aColour 		The required colour for the first page.
  1239 					Consecutive pages will be coloured accordingly.
  1240 
  1241 @return 			The linear address at which the first page is mapped.
  1242 */
  1243 TLinAddr Mmu::TTempMapping::Map(TPhysAddr* aPages, TUint aCount, TUint aColour)
  1244 	{
  1245 	__NK_ASSERT_DEBUG(iSize>=aCount);
  1246 	__NK_ASSERT_DEBUG(iCount==0);
  1247 
  1248 	TUint colour = aColour&KPageColourMask;
  1249 	TLinAddr addr = iLinAddr+(colour<<KPageShift);
  1250 	TPte* pPte = iPtePtr+colour;
  1251 	iColour = colour;
  1252 
  1253 	for(TUint i=0; i<aCount; ++i)
  1254 		{
  1255 		__ASSERT_DEBUG(pPte[i]==KPteUnallocatedEntry,MM::Panic(MM::ETempMappingAlreadyInUse));
  1256 		pPte[i] = (aPages[i]&~KPageMask) | iBlankPte;
  1257 		CacheMaintenance::SinglePteUpdated((TLinAddr)&pPte[i]);
  1258 		InvalidateTLBForPage((addr+i*KPageSize)|KKernelOsAsid);
  1259 		}
  1260 
  1261 	iCount = aCount;
  1262 	return addr;
  1263 	}
  1264 
  1265 
  1266 /**
  1267 Unmap all pages from this temporary mapping.
  1268 
  1269 @param aIMBRequired	True if IMB barrier is required prior unmapping.
  1270 */
  1271 void Mmu::TTempMapping::Unmap(TBool aIMBRequired)
  1272 	{
  1273 	__NK_ASSERT_DEBUG(iSize>=1);
  1274 	if(aIMBRequired)
  1275 		CacheMaintenance::CodeChanged(iLinAddr+iColour*KPageSize,iCount*KPageSize);
  1276 	Unmap();
  1277 	}
  1278 
  1279 
  1280 /**
  1281 Unmap all pages from this temporary mapping.
  1282 */
  1283 void Mmu::TTempMapping::Unmap()
  1284 	{
  1285 	__NK_ASSERT_DEBUG(iSize>=1);
  1286 
  1287 	TUint colour = iColour;
  1288 	TLinAddr addr = iLinAddr+(colour<<KPageShift);
  1289 	TPte* pPte = iPtePtr+colour;
  1290 	TUint count = iCount;
  1291 
  1292 	while(count)
  1293 		{
  1294 		*pPte = KPteUnallocatedEntry;
  1295 		CacheMaintenance::SinglePteUpdated((TLinAddr)pPte);
  1296 		InvalidateTLBForPage(addr|KKernelOsAsid);
  1297 		addr += KPageSize;
  1298 		++pPte;
  1299 		--count;
  1300 		}
  1301 
  1302 	iCount = 0;
  1303 	}
  1304 
  1305 #ifdef __SMP__
  1306 /**
  1307 Dummy IPI to be invoked when a thread's alias pde members are updated remotely
  1308 by another thread.
  1309 
  1310 @internalComponent
  1311 */
  1312 class TAliasIPI : public TGenericIPI
  1313 	{
  1314 public:
  1315 	static void RefreshIsr(TGenericIPI*);
  1316 	void RefreshAlias();
  1317 	};
  1318 
  1319 
  1320 /**
  1321 Dummy isr method.
  1322 */
  1323 void TAliasIPI::RefreshIsr(TGenericIPI*)
  1324 	{
  1325 	TRACE2(("TAliasIPI"));
  1326 	}
  1327 
  1328 
  1329 /**
  1330 Queue the dummy IPI on all other processors.  This ensures that DoProcessSwitch will
  1331 have completed updating iAliasPdePtr once this method returns.
  1332 */
  1333 void TAliasIPI::RefreshAlias()
  1334 	{
  1335 	NKern::Lock();
  1336 	QueueAllOther(&RefreshIsr);
  1337 	NKern::Unlock();
  1338 	WaitCompletion();
  1339 	}
  1340 
  1341 
  1342 /** 
  1343 Perform a dummy ipi on all the other processors to ensure if any of them are 
  1344 executing DoProcessSwitch they will see the new value of iAliasPde before they 
  1345 update iAliasPdePtr or will finish updating iAliasPdePtr before we continue.  
  1346 This works as DoProcessSwitch() has interrupts disabled while reading iAliasPde 
  1347 and updating iAliasPdePtr.
  1348 */
  1349 void BroadcastAliasRefresh()
  1350 	{
  1351 	TAliasIPI ipi;
  1352 	ipi.RefreshAlias();
  1353 	}
  1354 #endif //__SMP__
  1355 
  1356 /**
  1357 Remove any thread IPC aliases which use the specified page table.
  1358 This is used by the page table allocator when a page table is freed.
  1359 
  1360 @pre #PageTablesLockIsHeld
  1361 */
  1362 void Mmu::RemoveAliasesForPageTable(TPhysAddr aPageTable)
  1363 	{
  1364 	__NK_ASSERT_DEBUG(PageTablesLockIsHeld());
  1365 
  1366 	MmuLock::Lock();
  1367 
  1368 	SDblQue checkedList;
  1369 
  1370 	TUint ptId = aPageTable>>KPageTableShift;
  1371 	while(!iAliasList.IsEmpty())
  1372 		{
  1373 		SDblQueLink* next = iAliasList.First()->Deque();
  1374 		checkedList.Add(next);
  1375 		DMemModelThread* thread = (DMemModelThread*)((TInt)next-_FOFF(DMemModelThread,iAliasLink));
  1376 		if((thread->iAliasPde>>KPageTableShift)==ptId)
  1377 			{
  1378 			// the page table is being aliased by the thread, so remove it...
  1379 			TRACE2(("Thread %O RemoveAliasesForPageTable", this));
  1380 			thread->iAliasPde = KPdeUnallocatedEntry;
  1381 #ifdef __SMP__ // we need to also unmap the page table in case thread is running on another core...
  1382 
  1383 			// Ensure other processors see the update to iAliasPde.
  1384 			BroadcastAliasRefresh();
  1385 
  1386 			*thread->iAliasPdePtr = KPdeUnallocatedEntry;
  1387 
  1388 			SinglePdeUpdated(thread->iAliasPdePtr);
  1389 			__NK_ASSERT_DEBUG((thread->iAliasLinAddr&KPageMask)==0);
  1390 			// Invalidate the tlb for the page using os asid of the process that created the alias
  1391 			// this is safe as the os asid will be valid as thread must be running otherwise the alias
  1392 			// would have been removed.
  1393 			InvalidateTLBForPage(thread->iAliasLinAddr | ((DMemModelProcess*)thread->iOwningProcess)->OsAsid());
  1394 			// note, race condition with 'thread' updating its iAliasLinAddr is
  1395 			// not a problem because 'thread' will not the be accessing the aliased
  1396 			// region and will take care of invalidating the TLB.
  1397 #endif
  1398 			}
  1399 		MmuLock::Flash();
  1400 		}
  1401 
  1402 	// copy checkedList back to iAliasList
  1403 	iAliasList.MoveFrom(&checkedList);
  1404 
  1405 	MmuLock::Unlock();
  1406 	}
  1407 
  1408 
  1409 void DMemModelThread::RefreshAlias()
  1410 	{
  1411 	if(iAliasLinAddr)
  1412 		{
  1413 		TRACE2(("Thread %O RefreshAlias", this));
  1414 		// Get the os asid, this is the current thread so no need to open a reference.
  1415 		TUint thisAsid = ((DMemModelProcess*)iOwningProcess)->OsAsid();
  1416 		MmuLock::Lock();
  1417 		TInt osAsid = iAliasProcess->OsAsid();
  1418 		TPde pde = *Mmu::PageDirectoryEntry(osAsid,iAliasTarget);
  1419 		iAliasPde = pde;
  1420 		*iAliasPdePtr = pde;
  1421 		SinglePdeUpdated(iAliasPdePtr);
  1422 		InvalidateTLBForPage(iAliasLinAddr|thisAsid);
  1423 		MmuLock::Unlock();
  1424 		}
  1425 	}
  1426 
  1427 
  1428 
  1429 //
  1430 // Mapping/unmapping functions
  1431 //
  1432 
  1433 
  1434 /**
  1435 Modify page table entries (PTEs) so they map the given memory pages.
  1436 Entries are only updated if the current state of the corresponding page
  1437 is RPageArray::ECommitted.
  1438 
  1439 @param aPtePtr		Pointer into a page table for the PTE of the first page.
  1440 @param aCount		The number of pages to modify.
  1441 @param aPages		Pointer to the entry for the first page in a memory object's #RPageArray.
  1442 					Each entry contains the physical address of a page together with its
  1443 					current state (RPageArray::TState).
  1444 @param aBlankPte	The value to use for each PTE, with the physical address component equal
  1445 					to zero.
  1446 
  1447 @return False, if the page table no longer maps any entries and may be freed.
  1448 		True otherwise, to indicate that the page table is still needed.
  1449 
  1450 @pre #MmuLock held.
  1451 @post #MmuLock held and has not been released by this function.
  1452 */
  1453 TBool Mmu::MapPages(TPte* const aPtePtr, const TUint aCount, TPhysAddr* aPages, TPte aBlankPte)
  1454 	{
  1455 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1456 	__NK_ASSERT_DEBUG(aCount);
  1457  	__NK_ASSERT_DEBUG(aBlankPte!=KPteUnallocatedEntry);
  1458 
  1459 	TUint count = 0;
  1460 	if(aCount==1)
  1461 		{
  1462 		// get page to map...
  1463 		TPhysAddr pagePhys = *aPages;
  1464 		TPte pte = *aPtePtr;
  1465 		if(!RPageArray::TargetStateIsCommitted(pagePhys))
  1466 			goto done; // page no longer needs mapping
  1467 
  1468 		// clear type flags...
  1469 		pagePhys &= ~KPageMask;
  1470 	
  1471 		// check nobody has already mapped the page...
  1472 		if(pte!=KPteUnallocatedEntry)
  1473 			{
  1474 			// already mapped...
  1475 #ifdef _DEBUG
  1476 			if((pte^pagePhys)>=TPte(KPageSize))
  1477 				{
  1478 				// but different!
  1479 				Kern::Printf("Mmu::MapPages already mapped %x->%x",pagePhys,pte);
  1480 				__NK_ASSERT_DEBUG(0);
  1481 				}
  1482 #endif
  1483 			return true; // return true to keep page table (it already had at least page mapped)
  1484 			}
  1485 
  1486 		// map page...
  1487 		pte = pagePhys|aBlankPte;
  1488 		TRACE2(("!PTE %x=%x",aPtePtr,pte));
  1489 		*aPtePtr = pte;
  1490 		count = 1;
  1491 
  1492 		// clean cache...
  1493 		CacheMaintenance::SinglePteUpdated((TLinAddr)aPtePtr);
  1494 		}
  1495 	else
  1496 		{
  1497 		// check we are only updating a single page table...
  1498 		__NK_ASSERT_DEBUG(((TLinAddr(aPtePtr)^TLinAddr(aPtePtr+aCount-1))>>KPageTableShift)==0);
  1499 
  1500 		// map pages...
  1501 		TPte* pPte = aPtePtr;
  1502 		TPte* pPteEnd = aPtePtr+aCount;
  1503 		do
  1504 			{
  1505 			// map page...
  1506 			TPhysAddr pagePhys = *aPages++;
  1507 			TPte pte = *pPte++;
  1508 			if(RPageArray::TargetStateIsCommitted(pagePhys))
  1509 				{
  1510 				// clear type flags...
  1511 				pagePhys &= ~KPageMask;
  1512 
  1513 				// page not being freed, so try and map it...
  1514 				if(pte!=KPteUnallocatedEntry)
  1515 					{
  1516 					// already mapped...
  1517 #ifdef _DEBUG
  1518 					if((pte^pagePhys)>=TPte(KPageSize))
  1519 						{
  1520 						// but different!
  1521 						Kern::Printf("Mmu::MapPages already mapped %x->%x",pagePhys,pte);
  1522 						__NK_ASSERT_DEBUG(0);
  1523 						}
  1524 #endif
  1525 					}
  1526 				else
  1527 					{
  1528 					// map page...
  1529 					pte = pagePhys|aBlankPte;
  1530 					TRACE2(("!PTE %x=%x",pPte-1,pte));
  1531 					pPte[-1] = pte;
  1532 					++count;
  1533 					}
  1534 				}
  1535 			}
  1536 		while(pPte!=pPteEnd);
  1537 
  1538 		// clean cache...
  1539 		CacheMaintenance::MultiplePtesUpdated((TLinAddr)aPtePtr,(TLinAddr)pPte-(TLinAddr)aPtePtr);
  1540 		}
  1541 
  1542 done:
  1543 	// update page counts...
  1544 	SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPtePtr);
  1545 	count = pti->IncPageCount(count);
  1546 	TRACE2(("pt %x page count=%d",TLinAddr(aPtePtr)&~KPageTableMask,pti->PageCount()));
  1547 	__NK_ASSERT_DEBUG(pti->CheckPageCount());
  1548 
  1549 	// see if page table needs freeing...
  1550 	TUint keepPt = count | pti->PermanenceCount();
  1551 
  1552 	__NK_ASSERT_DEBUG(!pti->IsDemandPaged()); // check not demand paged page table
  1553 
  1554 	return keepPt;
  1555 	}
  1556 
  1557 
  1558 /**
  1559 Modify page table entries (PTEs) so they map a new page.
  1560 Entries are only updated if the current state of the corresponding page
  1561 is RPageArray::ECommitted or RPageArray::EMoving.
  1562 
  1563 @param aPtePtr		Pointer into a page table for the PTE of the page.
  1564 @param aPage		Pointer to the entry for the page in a memory object's #RPageArray.
  1565 					The entry contains the physical address of a page together with its
  1566 					current state (RPageArray::TState).
  1567 @param aBlankPte	The value to use for each PTE, with the physical address component equal
  1568 					to zero.
  1569 
  1570 @pre #MmuLock held.
  1571 @post #MmuLock held and has not been released by this function.
  1572 */
  1573 void Mmu::RemapPage(TPte* const aPtePtr, TPhysAddr& aPage, TPte aBlankPte)
  1574 	{
  1575 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1576  	__NK_ASSERT_DEBUG(aBlankPte!=KPteUnallocatedEntry);
  1577 
  1578 	// get page to remap...
  1579 	TPhysAddr pagePhys = aPage;
  1580 	
  1581 	// Only remap the page if it is committed or it is being moved and
  1582 	// no other operation has been performed on the page.
  1583 	if(!RPageArray::TargetStateIsCommitted(pagePhys))
  1584 		return; // page no longer needs mapping
  1585 	
  1586 	// Only remap the page if it is currently mapped, i.e. doesn't have an unallocated pte.
  1587 	// This will only be true if a new mapping is being added but it hasn't yet updated 
  1588 	// all the ptes for the pages that it maps.
  1589 	TPte pte = *aPtePtr;
  1590 	if (pte == KPteUnallocatedEntry)
  1591 		return;
  1592 	
  1593 	// clear type flags...
  1594 	pagePhys &= ~KPageMask;
  1595 
  1596 	SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pagePhys);
  1597 	if (pi)
  1598 		{
  1599 		SPageInfo::TPagedState pagedState = pi->PagedState();
  1600 		if (pagedState != SPageInfo::EUnpaged)
  1601 			{
  1602 			// The page is demand paged.  Only remap the page if it is pinned or is currently
  1603 			// accessible but to the old physical page.
  1604 			if (pagedState != SPageInfo::EPagedPinned &&
  1605 				 (Mmu::IsPteInaccessible(pte) || (pte^pagePhys) < TPte(KPageSize)))
  1606 				return;
  1607 			if (!pi->IsDirty())
  1608 				{
  1609 				// Ensure that the page is mapped as read only to prevent pages being marked dirty
  1610 				// by page moving despite not having been written to
  1611 				Mmu::MakePteInaccessible(aBlankPte, EFalse);
  1612 				}
  1613 			}
  1614 		}
  1615 	
  1616 	// Map the page in the page array entry as this is always the physical
  1617 	// page that the memory object's page should be mapped to.
  1618 	pte = pagePhys|aBlankPte;
  1619 	TRACE2(("!PTE %x=%x",aPtePtr,pte));
  1620 	*aPtePtr = pte;
  1621 	
  1622 	// clean cache...
  1623 	CacheMaintenance::SinglePteUpdated((TLinAddr)aPtePtr);
  1624 	}
  1625 
  1626 
  1627 /**
  1628 Modify page table entries (PTEs) so they no longer map any memory pages.
  1629 
  1630 @param aPtePtr		Pointer into a page table for the PTE of the first page.
  1631 @param aCount		The number of pages to modify.
  1632 
  1633 @return False, if the page table no longer maps any entries and may be freed.
  1634 		True otherwise, to indicate that the page table is still needed.
  1635 
  1636 @pre #MmuLock held.
  1637 @post #MmuLock held and has not been released by this function.
  1638 */
  1639 TBool Mmu::UnmapPages(TPte* const aPtePtr, TUint aCount)
  1640 	{
  1641 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1642 	__NK_ASSERT_DEBUG(aCount);
  1643 
  1644 	TUint count = 0;
  1645 	if(aCount==1)
  1646 		{
  1647 		if(*aPtePtr==KPteUnallocatedEntry)
  1648 			return true; // page already unmapped
  1649 
  1650 		// unmap page...
  1651 		++count;
  1652 		TPte pte = KPteUnallocatedEntry;
  1653 		TRACE2(("!PTE %x=%x",aPtePtr,pte));
  1654 		*aPtePtr = pte;
  1655 
  1656 		// clean cache...
  1657 		CacheMaintenance::SinglePteUpdated((TLinAddr)aPtePtr);
  1658 		}
  1659 	else
  1660 		{
  1661 		// check we are only updating a single page table...
  1662 		__NK_ASSERT_DEBUG(((TLinAddr(aPtePtr)^TLinAddr(aPtePtr+aCount-1))>>KPageTableShift)==0);
  1663 
  1664 		// unmap pages...
  1665 		TPte* pPte = aPtePtr;
  1666 		TPte* pPteEnd = aPtePtr+aCount;
  1667 		do
  1668 			{
  1669 			if(*pPte!=KPteUnallocatedEntry)
  1670 				{
  1671 				// unmap page...
  1672 				++count;
  1673 				TPte pte = KPteUnallocatedEntry;
  1674 				TRACE2(("!PTE %x=%x",pPte,pte));
  1675 				*pPte = pte;
  1676 				}
  1677 			}
  1678 		while(++pPte<pPteEnd);
  1679 
  1680 		if(!count)
  1681 			return true; // no PTEs changed, so nothing more to do
  1682 
  1683 		// clean cache...
  1684 		CacheMaintenance::MultiplePtesUpdated((TLinAddr)aPtePtr,(TLinAddr)pPte-(TLinAddr)aPtePtr);
  1685 		}
  1686 
  1687 	// update page table info...
  1688 	SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPtePtr);
  1689 	count = pti->DecPageCount(count);
  1690 	TRACE2(("pt %x page count=%d",TLinAddr(aPtePtr)&~KPageTableMask,count));
  1691 	__NK_ASSERT_DEBUG(pti->CheckPageCount());
  1692 
  1693 	// see if page table needs freeing...
  1694 	TUint keepPt = count | pti->PermanenceCount();
  1695 
  1696 	return keepPt;
  1697 	}
  1698 
  1699 
  1700 /**
  1701 Modify page table entries (PTEs) so they no longer map the given memory pages.
  1702 Entries are only updated if the current state of the corresponding page
  1703 is 'decommitted' i.e. RPageArray::TargetStateIsDecommitted returns true.
  1704 
  1705 @param aPtePtr		Pointer into a page table for the PTE of the first page.
  1706 @param aCount		The number of pages to modify.
  1707 @param aPages		Pointer to the entry for the first page in a memory object's #RPageArray.
  1708 					Each entry contains the physical address of a page together with its
  1709 					current state (RPageArray::TState).
  1710 
  1711 @return False, if the page table no longer maps any entries and may be freed.
  1712 		True otherwise, to indicate that the page table is still needed.
  1713 
  1714 @pre #MmuLock held.
  1715 @post #MmuLock held and has not been released by this function.
  1716 */
  1717 TBool Mmu::UnmapPages(TPte* const aPtePtr, TUint aCount, TPhysAddr* aPages)
  1718 	{
  1719 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1720 	__NK_ASSERT_DEBUG(aCount);
  1721 
  1722 	TUint count = 0;
  1723 	if(aCount==1)
  1724 		{
  1725 		if(*aPtePtr==KPteUnallocatedEntry)
  1726 			return true; // page already unmapped
  1727 
  1728 		if(!RPageArray::TargetStateIsDecommitted(*aPages))
  1729 			return true; // page has been reallocated
  1730 
  1731 		// unmap page...
  1732 		++count;
  1733 		TPte pte = KPteUnallocatedEntry;
  1734 		TRACE2(("!PTE %x=%x",aPtePtr,pte));
  1735 		*aPtePtr = pte;
  1736 
  1737 		// clean cache...
  1738 		CacheMaintenance::SinglePteUpdated((TLinAddr)aPtePtr);
  1739 		}
  1740 	else
  1741 		{
  1742 		// check we are only updating a single page table...
  1743 		__NK_ASSERT_DEBUG(((TLinAddr(aPtePtr)^TLinAddr(aPtePtr+aCount-1))>>KPageTableShift)==0);
  1744 
  1745 		// unmap pages...
  1746 		TPte* pPte = aPtePtr;
  1747 		TPte* pPteEnd = aPtePtr+aCount;
  1748 		do
  1749 			{
  1750 			if(RPageArray::TargetStateIsDecommitted(*aPages++) && *pPte!=KPteUnallocatedEntry)
  1751 				{
  1752 				// unmap page...
  1753 				++count;
  1754 				TPte pte = KPteUnallocatedEntry;
  1755 				TRACE2(("!PTE %x=%x",pPte,pte));
  1756 				*pPte = pte;
  1757 				}
  1758 			}
  1759 		while(++pPte<pPteEnd);
  1760 
  1761 		if(!count)
  1762 			return true; // no PTEs changed, so nothing more to do
  1763 
  1764 		// clean cache...
  1765 		CacheMaintenance::MultiplePtesUpdated((TLinAddr)aPtePtr,(TLinAddr)pPte-(TLinAddr)aPtePtr);
  1766 		}
  1767 
  1768 	// update page table info...
  1769 	SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPtePtr);
  1770 	count = pti->DecPageCount(count);
  1771 	TRACE2(("pt %x page count=%d",TLinAddr(aPtePtr)&~KPageTableMask,count));
  1772 	__NK_ASSERT_DEBUG(pti->CheckPageCount());
  1773 
  1774 	// see if page table needs freeing...
  1775 	TUint keepPt = count | pti->PermanenceCount();
  1776 
  1777 	return keepPt;
  1778 	}
  1779 
  1780 
  1781 /**
  1782 Modify page table entries (PTEs) so the given memory pages are not accessible.
  1783 Entries are only updated if the current state of the corresponding page
  1784 is RPageArray::ERestrictingNA.
  1785 
  1786 @param aPtePtr		Pointer into a page table for the PTE of the first page.
  1787 @param aCount		The number of pages to modify.
  1788 @param aPages		Pointer to the entry for the first page in a memory object's #RPageArray.
  1789 					Each entry contains the physical address of a page together with its
  1790 					current state (RPageArray::TState).
  1791 
  1792 @pre #MmuLock held.
  1793 @post #MmuLock held and has not been released by this function.
  1794 */
  1795 void Mmu::RestrictPagesNA(TPte* const aPtePtr, TUint aCount, TPhysAddr* aPages)
  1796 	{
  1797 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1798 	__NK_ASSERT_DEBUG(aCount);
  1799 
  1800 	if(aCount==1)
  1801 		{
  1802 		TPhysAddr page = *aPages;
  1803 		TPte pte = *aPtePtr;
  1804 		RPageArray::TState state = RPageArray::State(page);
  1805 		if(state != RPageArray::ERestrictingNA && state != RPageArray::EMoving)
  1806 			return; // page no longer needs restricting
  1807 
  1808 		if(pte==KPteUnallocatedEntry)
  1809 			return; // page gone
  1810 
  1811 		// restrict page...
  1812 		pte = Mmu::MakePteInaccessible(pte,false);
  1813 		TRACE2(("!PTE %x=%x",aPtePtr,pte));
  1814 		*aPtePtr = pte;
  1815 
  1816 		// clean cache...
  1817 		CacheMaintenance::SinglePteUpdated((TLinAddr)aPtePtr);
  1818 		}
  1819 	else
  1820 		{
  1821 		// check we are only updating a single page table...
  1822 		__NK_ASSERT_DEBUG(((TLinAddr(aPtePtr)^TLinAddr(aPtePtr+aCount-1))>>KPageTableShift)==0);
  1823 
  1824 		// restrict pages...
  1825 		TPte* pPte = aPtePtr;
  1826 		TPte* pPteEnd = aPtePtr+aCount;
  1827 		do
  1828 			{
  1829 			TPhysAddr page = *aPages++;
  1830 			TPte pte = *pPte++;
  1831 			if(RPageArray::State(page)==RPageArray::ERestrictingNA && pte!=KPteUnallocatedEntry)
  1832 				{
  1833 				pte = Mmu::MakePteInaccessible(pte,false);
  1834 				TRACE2(("!PTE %x=%x",pPte-1,pte));
  1835 				pPte[-1] = pte;
  1836 				}
  1837 			}
  1838 		while(pPte<pPteEnd);
  1839 
  1840 		// clean cache...
  1841 		CacheMaintenance::MultiplePtesUpdated((TLinAddr)aPtePtr,(TLinAddr)pPte-(TLinAddr)aPtePtr);
  1842 		}
  1843 	}
  1844 
  1845 
  1846 /**
  1847 Modify page table entries (PTEs) so they map the given demand paged memory pages.
  1848 
  1849 Entries are only updated if the current state of the corresponding page
  1850 is RPageArray::ECommitted.
  1851 
  1852 This function is used for demand paged memory when handling a page fault or
  1853 memory pinning operation. It will widen the access permission of existing entries
  1854 if required to match \a aBlankPte and will 'rejuvenate' the page table.
  1855 
  1856 @param aPtePtr		Pointer into a page table for the PTE of the first page.
  1857 @param aCount		The number of pages to modify.
  1858 @param aPages		Pointer to the entry for the first page in a memory object's #RPageArray.
  1859 					Each entry contains the physical address of a page together with its
  1860 					current state (RPageArray::TState).
  1861 @param aBlankPte	The value to use for each PTE, with the physical address component equal
  1862 					to zero.
  1863 
  1864 @return False, if the page table no longer maps any entries and may be freed.
  1865 		True otherwise, to indicate that the page table is still needed.
  1866 
  1867 @pre #MmuLock held.
  1868 @post MmuLock held (but may have been released by this function)
  1869 */
  1870 TBool Mmu::PageInPages(TPte* const aPtePtr, const TUint aCount, TPhysAddr* aPages, TPte aBlankPte)
  1871 	{
  1872 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1873 	__NK_ASSERT_DEBUG(aCount);
  1874 	__NK_ASSERT_DEBUG(aBlankPte!=KPteUnallocatedEntry);
  1875 
  1876 	TUint count = 0;
  1877 
  1878 	if(aCount==1)
  1879 		{
  1880 		// get page to map...
  1881 		TPhysAddr page = *aPages;
  1882 		TPte pte = *aPtePtr;
  1883 		if(!RPageArray::TargetStateIsCommitted(page))
  1884 			goto done; // page no longer needs mapping
  1885 
  1886 #ifdef _DEBUG
  1887 		if(pte!=KPteUnallocatedEntry)
  1888 			{
  1889 			if ((pte^page)>=TPte(KPageSize) && !Mmu::IsPteInaccessible(pte) &&
  1890 				!Mmu::IsPteReadOnly(pte))
  1891 				{
  1892 				// Page has been mapped before but the physical address is different
  1893 				// and the page hasn't been moved as it is not inaccessible.
  1894 				Kern::Printf("Mmu::PageInPages already mapped %x->%x",page,pte);
  1895 				__NK_ASSERT_DEBUG(0);
  1896 				}
  1897 			}
  1898 #endif
  1899 		if(!Mmu::IsPteMoreAccessible(aBlankPte,pte))
  1900 			return true; // return true to keep page table (it already had at least page mapped)
  1901 
  1902 		// remap page with new increased permissions...
  1903 		if(pte==KPteUnallocatedEntry)
  1904 			count = 1; // we'll be adding a new pte entry, count it
  1905 		if(!Mmu::IsPteReadOnly(aBlankPte))
  1906 			ThePager.SetWritable(*SPageInfo::FromPhysAddr(page));
  1907 		pte = (page&~KPageMask)|aBlankPte;
  1908 		TRACE2(("!PTE %x=%x",aPtePtr,pte));
  1909 		*aPtePtr = pte;
  1910 
  1911 		// clean cache...
  1912 		CacheMaintenance::SinglePteUpdated((TLinAddr)aPtePtr);
  1913 		}
  1914 	else
  1915 		{
  1916 		// check we are only updating a single page table...
  1917 		__NK_ASSERT_DEBUG(((TLinAddr(aPtePtr)^TLinAddr(aPtePtr+aCount-1))>>KPageTableShift)==0);
  1918 
  1919 		// map pages...
  1920 		TPte* pPte = aPtePtr;
  1921 		TPte* pPteEnd = aPtePtr+aCount;
  1922 		do
  1923 			{
  1924 			// map page...
  1925 			TPhysAddr page = *aPages++;
  1926 			TPte pte = *pPte++;
  1927 			if(RPageArray::TargetStateIsCommitted(page))
  1928 				{
  1929 #ifdef _DEBUG
  1930 				if(pte!=KPteUnallocatedEntry)
  1931 					{
  1932 					if ((pte^page)>=TPte(KPageSize) && !Mmu::IsPteInaccessible(pte) &&
  1933 						!Mmu::IsPteReadOnly(pte))
  1934 						{
  1935 						// Page has been mapped before but the physical address is different
  1936 						// and the page hasn't been moved as it is not inaccessible.
  1937 						Kern::Printf("Mmu::PageInPages already mapped %x->%x",page,pte);
  1938 						__NK_ASSERT_DEBUG(0);
  1939 						}
  1940 					}
  1941 #endif
  1942 				if(Mmu::IsPteMoreAccessible(aBlankPte,pte))
  1943 					{
  1944 					// remap page with new increased permissions...
  1945 					if(pte==KPteUnallocatedEntry)
  1946 						++count; // we'll be adding a new pte entry, count it
  1947 					if(!Mmu::IsPteReadOnly(aBlankPte))
  1948 						ThePager.SetWritable(*SPageInfo::FromPhysAddr(page));
  1949 					pte = (page&~KPageMask)|aBlankPte;
  1950 					TRACE2(("!PTE %x=%x",pPte-1,pte));
  1951 					pPte[-1] = pte;
  1952 					}
  1953 				}
  1954 			}
  1955 		while(pPte!=pPteEnd);
  1956 
  1957 		// clean cache...
  1958 		CacheMaintenance::MultiplePtesUpdated((TLinAddr)aPtePtr,(TLinAddr)pPte-(TLinAddr)aPtePtr);
  1959 		}
  1960 
  1961 done:
  1962 	// update page counts...
  1963 	SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPtePtr);
  1964 	count = pti->IncPageCount(count);
  1965 	TRACE2(("pt %x page count=%d",TLinAddr(aPtePtr)&~KPageTableMask,pti->PageCount()));
  1966 	__NK_ASSERT_DEBUG(pti->CheckPageCount());
  1967 
  1968 	// see if page table needs freeing...
  1969 	TUint keepPt = count | pti->PermanenceCount();
  1970 
  1971 	// rejuvenate demand paged page tables...
  1972 	ThePager.RejuvenatePageTable(aPtePtr);
  1973 
  1974 	return keepPt;
  1975 	}
  1976 
  1977 
  1978 //
  1979 // CodeModifier
  1980 //
  1981 
  1982 #ifdef __DEBUGGER_SUPPORT__
  1983 
  1984 void DoWriteCode(TUint32* aAddress, TUint32 aValue);
  1985 
  1986 #ifdef __SMP__
  1987 
  1988 extern "C" void __e32_instruction_barrier();
  1989 
  1990 class TCodeModifierBroadcast : public TGenericIPI
  1991 	{
  1992 public:
  1993 	TCodeModifierBroadcast(TUint32* aAddress, TUint32 aValue);
  1994 	static void Isr(TGenericIPI*);
  1995 	void Go();
  1996 public:
  1997 	TUint32*		iAddress;
  1998 	TUint32			iValue;
  1999 	volatile TInt	iFlag;
  2000 	};
  2001 
  2002 TCodeModifierBroadcast::TCodeModifierBroadcast(TUint32* aAddress, TUint32 aValue)
  2003 	:	iAddress(aAddress), iValue(aValue), iFlag(0)
  2004 	{
  2005 	}
  2006 
  2007 void TCodeModifierBroadcast::Isr(TGenericIPI* aPtr)
  2008 	{
  2009 	TCodeModifierBroadcast& a = *(TCodeModifierBroadcast*)aPtr;
  2010 	while (!__e32_atomic_load_acq32(&a.iFlag))
  2011 		__chill();
  2012 #ifdef __BROADCAST_CACHE_MAINTENANCE__
  2013 	CacheMaintenance::CodeChanged((TLinAddr)a.iAddress, sizeof (TInt), CacheMaintenance::ECodeModifier);	// need to do separate Clean-D, Purge-I on each core
  2014 #else
  2015 	__e32_instruction_barrier();		// synchronize instruction execution
  2016 #endif
  2017 	}
  2018 
  2019 void TCodeModifierBroadcast::Go()
  2020 	{
  2021 	NKern::Lock();
  2022 	QueueAllOther(&Isr);
  2023 	WaitEntry();					// wait for other cores to stop
  2024 	DoWriteCode(iAddress, iValue);
  2025 	iFlag = 1;
  2026 	__e32_instruction_barrier();	// synchronize instruction execution
  2027 	WaitCompletion();				// wait for other cores to resume
  2028 	NKern::Unlock();
  2029 	}
  2030 #endif
  2031 
  2032 /**
  2033 @pre Calling thread must be in critical section
  2034 @pre CodeSeg mutex held
  2035 */
  2036 TInt CodeModifier::SafeWriteCode(DProcess* aProcess, TLinAddr aAddress, TInt aSize, TUint aValue, void* aOldValue)
  2037 	{
  2038 	__ASSERT_CRITICAL;
  2039 	Mmu& m=TheMmu;
  2040 	RamAllocLock::Lock();
  2041 	MmuLock::Lock();
  2042 	__UNLOCK_GUARD_START(MmuLock);
  2043 
  2044 	// Check aProcess is still alive by opening a reference on its os asid.
  2045 	TInt osAsid = ((DMemModelProcess*)aProcess)->TryOpenOsAsid();
  2046 	if (osAsid < 0)
  2047 		{
  2048 		__KTRACE_OPT(KDEBUGGER,Kern::Printf("CodeModifier::SafeWriteCode - zombie process"));
  2049 		__UNLOCK_GUARD_END(MmuLock);
  2050 		MmuLock::Unlock();
  2051 		RamAllocLock::Unlock();
  2052 		return KErrBadDescriptor;
  2053 		}
  2054 
  2055 	// Find physical address of the page, the breakpoint belongs to
  2056 	TPhysAddr physAddr = Mmu::LinearToPhysical(aAddress, osAsid);
  2057 	__KTRACE_OPT(KDEBUGGER,Kern::Printf("CodeModifier::SafeWriteCode - PA:%x", physAddr));
  2058 
  2059 
  2060 	if (physAddr==KPhysAddrInvalid)
  2061 		{
  2062 		__KTRACE_OPT(KDEBUGGER,Kern::Printf("CodeModifier::SafeWriteCode - invalid VA"));
  2063 		__UNLOCK_GUARD_END(MmuLock);
  2064 		MmuLock::Unlock();
  2065 		RamAllocLock::Unlock();
  2066 		// The os asid is no longer required.
  2067 		((DMemModelProcess*)aProcess)->CloseOsAsid();
  2068 		return KErrBadDescriptor;
  2069 		}
  2070 
  2071 	// Temporary map physical page
  2072 	TLinAddr tempAddr = m.MapTemp(physAddr&~KPageMask, aAddress>>KPageShift);
  2073 	tempAddr |=  aAddress & KPageMask;
  2074 	__KTRACE_OPT(KDEBUGGER,Kern::Printf("CodeModifier::SafeWriteCode - tempAddr:%x",tempAddr));
  2075 
  2076 	TInt r = KErrBadDescriptor;
  2077 	TUint32* ptr = (TUint32*)(tempAddr&~3);
  2078 	TUint32 oldWord;
  2079 
  2080 	if(Kern::SafeRead(ptr,&oldWord,sizeof(oldWord))==0 // safely read the original value...
  2081 		&& Kern::SafeWrite(ptr,&oldWord,sizeof(oldWord))==0 ) // and write it back
  2082 		{
  2083 		// We have successfully probed the memory by reading and writing to it
  2084 		// so we assume it is now safe to access without generating exceptions.
  2085 		// If this is wrong it will kill the system horribly.
  2086 
  2087 		TUint32 newWord;
  2088 		TUint badAlign;
  2089 		TUint shift = (aAddress&3)*8;
  2090 
  2091 		switch(aSize)
  2092 			{
  2093 		case 1: // 1 byte value
  2094 			badAlign = 0;
  2095 			*(TUint8*)aOldValue = oldWord>>shift;
  2096 			newWord = (oldWord&~(0xff<<shift)) | ((aValue&0xff)<<shift);
  2097 			break;
  2098 
  2099 		case 2: // 2 byte value
  2100 			badAlign = tempAddr&1;
  2101 			if(!badAlign)
  2102 				*(TUint16*)aOldValue = oldWord>>shift;
  2103 			newWord = (oldWord&~(0xffff<<shift)) | ((aValue&0xffff)<<shift);
  2104 			break;
  2105 
  2106 		default: // 4 byte value
  2107 			badAlign = tempAddr&3;
  2108 			if(!badAlign)
  2109 				*(TUint32*)aOldValue = oldWord;
  2110 			newWord = aValue;
  2111 			break;
  2112 			}
  2113 
  2114 		if(!badAlign)
  2115 			{
  2116 			// write the new value...
  2117 #ifdef __SMP__
  2118 			TCodeModifierBroadcast b(ptr, newWord);
  2119 			b.Go();
  2120 #else
  2121 			DoWriteCode(ptr, newWord);
  2122 #endif
  2123 			r = KErrNone;
  2124 			}
  2125 		}
  2126 
  2127 	__UNLOCK_GUARD_END(MmuLock);
  2128 	m.UnmapTemp();
  2129 	MmuLock::Unlock();
  2130 	RamAllocLock::Unlock();
  2131 	// The os asid is no longer required.
  2132 	((DMemModelProcess*)aProcess)->CloseOsAsid();
  2133 	return r;
  2134 	}
  2135 
  2136 /**
  2137 @pre Calling thread must be in critical section
  2138 @pre CodeSeg mutex held
  2139 */
  2140 void DoWriteCode(TUint32* aAddress, TUint32 aValue)
  2141 	{
  2142 	// We do not want to be interrupted by e.g. ISR that will run altered code before IMB-Range.
  2143 	// Therefore, copy data and clean/invalidate caches with interrupts disabled.
  2144 	TInt irq = NKern::DisableAllInterrupts();
  2145 	*aAddress = aValue;
  2146 	CacheMaintenance::CodeChanged((TLinAddr)aAddress, sizeof(TUint32), CacheMaintenance::ECodeModifier);
  2147 	NKern::RestoreInterrupts(irq);
  2148 	}
  2149 
  2150 #endif //__DEBUGGER_SUPPORT__
  2151 
  2152 
  2153 
  2154 //
  2155 // Virtual pinning
  2156 //
  2157 
  2158 TInt M::CreateVirtualPinObject(TVirtualPinObject*& aPinObject)
  2159 	{
  2160 	aPinObject = (TVirtualPinObject*)new DVirtualPinMapping;
  2161 	return aPinObject != NULL ? KErrNone : KErrNoMemory;
  2162 	}
  2163 
  2164 TInt M::PinVirtualMemory(TVirtualPinObject* aPinObject, TLinAddr aStart, TUint aSize, DThread* aThread)
  2165 	{
  2166 	__ASSERT_CRITICAL;
  2167 	TUint offsetInMapping;
  2168 	TUint mapInstanceCount;
  2169 	DMemoryMapping* mapping = MM::FindMappingInThread(	(DMemModelThread*)aThread, 
  2170 														aStart, 
  2171 														aSize, 
  2172 														offsetInMapping, 
  2173 														mapInstanceCount);
  2174 	TInt r = KErrBadDescriptor;
  2175 	if (mapping)
  2176 		{
  2177 		TInt count = ((aStart & KPageMask) + aSize + KPageMask) >> KPageShift;
  2178 		if(mapping->IsPinned())
  2179 			{
  2180 			// Mapping for specified virtual address is pinned so we don't need to
  2181 			// do anything. Also, we can't safely pin the memory in this case
  2182 			// anyway, as pinned mappings may move between memory objects
  2183 			r = KErrNone;
  2184 			}
  2185 		else
  2186 			{
  2187 			MmuLock::Lock();
  2188 			DMemoryObject* memory = mapping->Memory();
  2189 			if (mapInstanceCount != mapping->MapInstanceCount() || 
  2190 				!memory || !memory->IsDemandPaged())
  2191 				{
  2192 				// mapping has been reused, no memory, or it's not paged, so no need to pin...
  2193 				MmuLock::Unlock();
  2194 				r = KErrNone;
  2195 				}
  2196 			else
  2197 				{
  2198 				// paged memory needs pinning...
  2199 				// Open a reference on the memory so it doesn't get deleted.
  2200 				memory->Open();
  2201 				MmuLock::Unlock();
  2202 
  2203 				TUint startInMemory = (offsetInMapping >> KPageShift) + mapping->iStartIndex;
  2204 				r = ((DVirtualPinMapping*)aPinObject)->Pin(	memory, startInMemory, count, mapping->Permissions(),
  2205 															mapping, mapInstanceCount);
  2206 				memory->Close();
  2207 				}
  2208 			}
  2209 		mapping->Close();
  2210 		}	
  2211 	return r;
  2212 	}
  2213 
  2214 TInt M::CreateAndPinVirtualMemory(TVirtualPinObject*& aPinObject, TLinAddr aStart, TUint aSize)
  2215 	{
  2216 	__ASSERT_CRITICAL;
  2217 	aPinObject = 0;
  2218 	TUint offsetInMapping;
  2219 	TUint mapInstanceCount;
  2220 	DMemoryMapping* mapping = MM::FindMappingInThread(	(DMemModelThread*)&Kern::CurrentThread(), 
  2221 														aStart, 
  2222 														aSize, 
  2223 														offsetInMapping,
  2224 														mapInstanceCount);
  2225 	TInt r = KErrBadDescriptor;
  2226 	if (mapping)
  2227 		{
  2228 		TInt count = ((aStart & KPageMask) + aSize + KPageMask) >> KPageShift;
  2229 		if(mapping->IsPinned())
  2230 			{
  2231 			// Mapping for specified virtual address is pinned so we don't need to
  2232 			// do anything. Also, we can't safely pin the memory in this case
  2233 			// anyway, as pinned mappings may move between memory objects
  2234 			r = KErrNone;
  2235 			}
  2236 		else
  2237 			{
  2238 			MmuLock::Lock();
  2239 			DMemoryObject* memory = mapping->Memory();
  2240 			if (mapInstanceCount != mapping->MapInstanceCount() || 
  2241 				!memory || !memory->IsDemandPaged())
  2242 				{
  2243 				// mapping has been reused, no memory, or it's not paged, so no need to pin...
  2244 				MmuLock::Unlock();
  2245 				r = KErrNone;
  2246 				}
  2247 			else
  2248 				{// The memory is demand paged so create a pin object and pin it.
  2249 				// Open a reference on the memory so it doesn't get deleted.
  2250 				memory->Open();
  2251 				MmuLock::Unlock();
  2252 				r = CreateVirtualPinObject(aPinObject);
  2253 				if (r == KErrNone)
  2254 					{
  2255 					TUint startInMemory = (offsetInMapping >> KPageShift) + mapping->iStartIndex;
  2256 					r = ((DVirtualPinMapping*)aPinObject)->Pin(	memory, startInMemory, count, mapping->Permissions(), 
  2257 																mapping, mapInstanceCount);
  2258 					if (r != KErrNone)
  2259 						{// Failed to pin the memory so pin object is not required.
  2260 						DestroyVirtualPinObject(aPinObject);
  2261 						}
  2262 					}
  2263 				memory->Close();
  2264 				}
  2265 			}
  2266 		mapping->Close();
  2267 		}	
  2268 	return r;
  2269 	}
  2270 
  2271 void M::UnpinVirtualMemory(TVirtualPinObject* aPinObject)
  2272 	{
  2273 	DVirtualPinMapping* mapping = (DVirtualPinMapping*)aPinObject;
  2274 	if (mapping->IsAttached())
  2275 		mapping->Unpin();
  2276 	}
  2277 	
  2278 void M::DestroyVirtualPinObject(TVirtualPinObject*& aPinObject)
  2279 	{
  2280 	DVirtualPinMapping* mapping = (DVirtualPinMapping*)__e32_atomic_swp_ord_ptr(&aPinObject, 0);
  2281 	if (mapping)
  2282 		{
  2283 		if (mapping->IsAttached())
  2284 			mapping->Unpin();
  2285 		mapping->AsyncClose();
  2286 		}
  2287 	}
  2288 
  2289 //
  2290 // Physical pinning
  2291 //
  2292 
  2293 TInt M::CreatePhysicalPinObject(TPhysicalPinObject*& aPinObject)
  2294 	{
  2295 	aPinObject = (TPhysicalPinObject*)new DPhysicalPinMapping;
  2296 	return aPinObject != NULL ? KErrNone : KErrNoMemory;
  2297 	}
  2298 
  2299 TInt M::PinPhysicalMemory(TPhysicalPinObject* aPinObject, TLinAddr aStart, TUint aSize, TBool aReadOnly,
  2300 				TPhysAddr& aAddress, TPhysAddr* aPages, TUint32& aMapAttr, TUint& aColour, DThread* aThread)
  2301 	{
  2302 	__ASSERT_CRITICAL;
  2303 	TUint offsetInMapping;
  2304 	TUint mapInstanceCount;
  2305 	DMemoryMapping* mapping = MM::FindMappingInThread(	(DMemModelThread*)aThread, 
  2306 														aStart, 
  2307 														aSize, 
  2308 														offsetInMapping, 
  2309 														mapInstanceCount);
  2310 	TInt r = KErrBadDescriptor;
  2311 	if (mapping)
  2312 		{
  2313 		TInt count = ((aStart & KPageMask) + aSize + KPageMask) >> KPageShift;
  2314 
  2315 		MmuLock::Lock();
  2316 		DMemoryObject* memory = mapping->Memory();
  2317 		if (mapInstanceCount == mapping->MapInstanceCount() && memory)
  2318 			{
  2319 			memory->Open();
  2320 			MmuLock::Unlock();
  2321 
  2322 			TUint startInMemory = (offsetInMapping >> KPageShift) + mapping->iStartIndex;
  2323 			TMappingPermissions permissions = aReadOnly ? ESupervisorReadOnly : ESupervisorReadWrite;
  2324 			r = ((DPhysicalPinMapping*)aPinObject)->Pin(memory, startInMemory, count, permissions);
  2325 			if (r == KErrNone)
  2326 				{
  2327 				r = ((DPhysicalPinMapping*)aPinObject)->PhysAddr(0, count, aAddress, aPages);
  2328 				if (r>=KErrNone)
  2329 					{
  2330 					r = KErrNone; //Do not report discontiguous memory in return value.
  2331 					const TMappingAttributes2& mapAttr2 =
  2332 											MM::LegacyMappingAttributes(memory->Attributes(), mapping->Permissions());
  2333 					*(TMappingAttributes2*)&aMapAttr = mapAttr2;
  2334 					}
  2335 				else
  2336 					UnpinPhysicalMemory(aPinObject);
  2337 				}
  2338 			memory->Close();
  2339 			}
  2340 		else // mapping has been reused or no memory...
  2341 			{
  2342 			MmuLock::Unlock();
  2343 			}
  2344 		mapping->Close();
  2345 		}
  2346 	aColour = (aStart >>KPageShift) & KPageColourMask;
  2347 	return r;
  2348 	}
  2349 
  2350 void M::UnpinPhysicalMemory(TPhysicalPinObject* aPinObject)
  2351 	{
  2352 	DPhysicalPinMapping* mapping = (DPhysicalPinMapping*)aPinObject;
  2353 	if (mapping->IsAttached())
  2354 		mapping->Unpin();
  2355 	}
  2356 
  2357 void M::DestroyPhysicalPinObject(TPhysicalPinObject*& aPinObject)
  2358 	{
  2359 	DPhysicalPinMapping* mapping = (DPhysicalPinMapping*)__e32_atomic_swp_ord_ptr(&aPinObject, 0);
  2360 	if (mapping)
  2361 		{
  2362 		if (mapping->IsAttached())
  2363 			mapping->Unpin();
  2364 		mapping->AsyncClose();
  2365 		}
  2366 	}
  2367 
  2368 
  2369 //
  2370 // Kernel map and pin.
  2371 //
  2372 
  2373 TInt M::CreateKernelMapObject(TKernelMapObject*& aMapObject, TUint aMaxReserveSize)
  2374 	{
  2375 	DKernelPinMapping*  pinObject = new DKernelPinMapping();
  2376 	aMapObject = (TKernelMapObject*) pinObject;
  2377 	if (pinObject == NULL)
  2378 		{
  2379 		return KErrNoMemory;
  2380 		}
  2381 	// Ensure we reserve enough bytes for all possible alignments of the start and 
  2382 	// end of the region to map.
  2383 	TUint reserveBytes = aMaxReserveSize? ((aMaxReserveSize + KPageMask) & ~KPageMask) + KPageSize : 0;
  2384 	TInt r = pinObject->Construct(reserveBytes);
  2385 	if (r != KErrNone)
  2386 		{// Failed so delete the kernel mapping object.
  2387 		pinObject->Close();
  2388 		aMapObject = NULL;
  2389 		}
  2390 	return r;
  2391 	}
  2392 
  2393 
  2394 TInt M::MapAndPinMemory(TKernelMapObject* aMapObject, DThread* aThread, TLinAddr aStart, 
  2395 						TUint aSize, TUint aMapAttributes, TLinAddr& aKernelAddr, TPhysAddr* aPages)
  2396 	{
  2397 	__ASSERT_CRITICAL;
  2398 	TUint offsetInMapping;
  2399 	TUint mapInstanceCount;
  2400 	DMemoryMapping* mapping = MM::FindMappingInThread(	(DMemModelThread*)aThread, 
  2401 														aStart, 
  2402 														aSize, 
  2403 														offsetInMapping, 
  2404 														mapInstanceCount);
  2405 	TInt r = KErrBadDescriptor;
  2406 	if (mapping)
  2407 		{
  2408 		DKernelPinMapping* kernelMap = (DKernelPinMapping*)aMapObject;
  2409 		TInt count = (((aStart + aSize + KPageMask) & ~KPageMask) - (aStart & ~KPageMask)) >> KPageShift;
  2410 		if (kernelMap->iReservePages && kernelMap->iReservePages < count)
  2411 			{
  2412 			mapping->Close();
  2413 			return KErrArgument;
  2414 			}
  2415 
  2416 		MmuLock::Lock();
  2417 		DMemoryObject* memory = mapping->Memory();
  2418 		if (mapInstanceCount == mapping->MapInstanceCount() && memory)
  2419 			{
  2420 			memory->Open();
  2421 			MmuLock::Unlock();
  2422 
  2423 			TUint startInMemory = (offsetInMapping >> KPageShift) + mapping->iStartIndex;
  2424 			TBool readOnly = aMapAttributes & Kern::EKernelMap_ReadOnly;
  2425 			TMappingPermissions permissions =  readOnly ? ESupervisorReadOnly : ESupervisorReadWrite;
  2426 			r = kernelMap->MapAndPin(memory, startInMemory, count, permissions);
  2427 			if (r == KErrNone)
  2428 				{
  2429 				__NK_ASSERT_DEBUG(!kernelMap->IsUserMapping());
  2430 				aKernelAddr = kernelMap->Base();
  2431 				TPhysAddr contigAddr;	// Ignore this value as aPages will be populated 
  2432 										// whether the memory is contiguous or not.
  2433 				r = kernelMap->PhysAddr(0, count, contigAddr, aPages);
  2434 				if (r>=KErrNone)
  2435 					{
  2436 					r = KErrNone; //Do not report discontiguous memory in return value.
  2437 					}
  2438 				else
  2439 					{
  2440 					UnmapAndUnpinMemory((TKernelMapObject*)kernelMap);
  2441 					}
  2442 				}
  2443 			memory->Close();
  2444 			}
  2445 		else // mapping has been reused or no memory...
  2446 			{
  2447 			MmuLock::Unlock();
  2448 			}
  2449 		mapping->Close();
  2450 		}
  2451 	return r;
  2452 	}
  2453 
  2454 
  2455 void M::UnmapAndUnpinMemory(TKernelMapObject* aMapObject)
  2456 	{
  2457 	DKernelPinMapping* mapping = (DKernelPinMapping*)aMapObject;
  2458 	if (mapping->IsAttached())
  2459 		mapping->UnmapAndUnpin();
  2460 	}
  2461 
  2462 
  2463 void M::DestroyKernelMapObject(TKernelMapObject*& aMapObject)
  2464 	{
  2465 	DKernelPinMapping* mapping = (DKernelPinMapping*)__e32_atomic_swp_ord_ptr(&aMapObject, 0);
  2466 	if (mapping)
  2467 		{
  2468 		if (mapping->IsAttached())
  2469 			mapping->UnmapAndUnpin();
  2470 		mapping->AsyncClose();
  2471 		}
  2472 	}
  2473 
  2474 
  2475 //
  2476 // Cache sync operations
  2477 //
  2478 
  2479 //@pre	As for MASK_THREAD_STANDARD
  2480 void Mmu::SyncPhysicalMemoryBeforeDmaWrite(TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr)
  2481 	{
  2482 	//Jump over the pages we do not have to sync
  2483 	aPages += aOffset>>KPageShift;
  2484 	aOffset &=KPageMask;
  2485 	aColour  = (aColour + (aOffset>>KPageShift)) & KPageColourMask;
  2486 
  2487 	//Calculate page table entry for the temporary mapping.
  2488 	TUint pteType = PteType(ESupervisorReadWrite,true);
  2489 	TMappingAttributes2 mapAttr2(aMapAttr);
  2490 	TPte pte = Mmu::BlankPte((TMemoryAttributes)mapAttr2.Type(), pteType);
  2491 	
  2492 	while (aSize) //A single pass of loop operates within page boundaries.
  2493 		{
  2494 		TUint sizeInLoopPass = Min (KPageSize, aOffset+aSize) - aOffset; //The size of the region in this pass.
  2495 
  2496 		NKern::ThreadEnterCS();
  2497 		Kern::MutexWait(*iPhysMemSyncMutex);
  2498 		
  2499 		TLinAddr linAddr = iPhysMemSyncTemp.Map(*aPages, aColour, pte);
  2500 		CacheMaintenance::MakeCPUChangesVisible(linAddr+aOffset, sizeInLoopPass, aMapAttr, *aPages+aOffset);
  2501 		iPhysMemSyncTemp.Unmap();
  2502 		
  2503 		Kern::MutexSignal(*iPhysMemSyncMutex);
  2504 		NKern::ThreadLeaveCS();
  2505 
  2506 		aSize-=sizeInLoopPass;  // Remaining bytes to sync
  2507 		aOffset=0;				// In all the pages after the first, sync will always start with zero offset.
  2508 		aPages++;	// Point to the next page
  2509 		aColour  = (aColour+1) & KPageColourMask;
  2510 		}
  2511 	}
  2512 
  2513 //@pre	As for MASK_THREAD_STANDARD
  2514 void Mmu::SyncPhysicalMemoryBeforeDmaRead(TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr)
  2515 	{
  2516 	//Jump over the pages we do not have to sync
  2517 	aPages += aOffset>>KPageShift;
  2518 	aOffset &=KPageMask;
  2519 	aColour  = (aColour + (aOffset>>KPageShift)) & KPageColourMask;
  2520 
  2521 	//Calculate page table entry for the temporary mapping.
  2522 	TUint pteType = PteType(ESupervisorReadWrite,true);
  2523 	TMappingAttributes2 mapAttr2(aMapAttr);
  2524 	TPte pte = Mmu::BlankPte((TMemoryAttributes)mapAttr2.Type(), pteType);
  2525 	
  2526 	while (aSize) //A single pass of loop operates within page boundaries.
  2527 		{
  2528 		TUint sizeInLoopPass = Min (KPageSize, aOffset+aSize) - aOffset; //The size of the region in this pass.
  2529 
  2530 		NKern::ThreadEnterCS();
  2531 		Kern::MutexWait(*iPhysMemSyncMutex);
  2532 		
  2533 		TLinAddr linAddr = iPhysMemSyncTemp.Map(*aPages, aColour, pte);
  2534 		CacheMaintenance::PrepareMemoryForExternalWrites(linAddr+aOffset, sizeInLoopPass, aMapAttr, *aPages+aOffset);
  2535 		iPhysMemSyncTemp.Unmap();
  2536 		
  2537 		Kern::MutexSignal(*iPhysMemSyncMutex);
  2538 		NKern::ThreadLeaveCS();
  2539 
  2540 		aSize-=sizeInLoopPass;  // Remaining bytes to sync
  2541 		aOffset=0;				// In all the pages after the first, sync will always start with zero offset.
  2542 		aPages++;	// Point to the next page
  2543 		aColour  = (aColour+1) & KPageColourMask;
  2544 		}
  2545 	}
  2546 
  2547 //@pre	As for MASK_THREAD_STANDARD
  2548 void Mmu::SyncPhysicalMemoryAfterDmaRead(TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr)
  2549 	{
  2550 	//Jump over the pages we do not have to sync
  2551 	aPages += aOffset>>KPageShift;
  2552 	aOffset &=KPageMask;
  2553 	aColour  = (aColour + (aOffset>>KPageShift)) & KPageColourMask;
  2554 
  2555 	//Calculate page table entry for the temporary mapping.
  2556 	TUint pteType = PteType(ESupervisorReadWrite,true);
  2557 	TMappingAttributes2 mapAttr2(aMapAttr);
  2558 	TPte pte = Mmu::BlankPte((TMemoryAttributes)mapAttr2.Type(), pteType);
  2559 	
  2560 	while (aSize) //A single pass of loop operates within page boundaries.
  2561 		{
  2562 		TUint sizeInLoopPass = Min (KPageSize, aOffset+aSize) - aOffset; //The size of the region in this pass.
  2563 
  2564 		NKern::ThreadEnterCS();
  2565 		Kern::MutexWait(*iPhysMemSyncMutex);
  2566 		
  2567 		TLinAddr linAddr = iPhysMemSyncTemp.Map(*aPages, aColour, pte);
  2568 		CacheMaintenance::MakeExternalChangesVisible(linAddr+aOffset, sizeInLoopPass, aMapAttr, *aPages+aOffset);
  2569 		iPhysMemSyncTemp.Unmap();
  2570 		
  2571 		Kern::MutexSignal(*iPhysMemSyncMutex);
  2572 		NKern::ThreadLeaveCS();
  2573 
  2574 		aSize-=sizeInLoopPass;  // Remaining bytes to sync
  2575 		aOffset=0;				// In all the pages after the first, sync will always start with zero offset.
  2576 		aPages++;	// Point to the next page
  2577 		aColour  = (aColour+1) & KPageColourMask;
  2578 		}
  2579 	}
  2580 
  2581 EXPORT_C TInt Cache::SyncPhysicalMemoryBeforeDmaWrite(TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr)
  2582 	{
  2583 	CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Cache::SyncPhysicalMemoryBeforeDmaWrite");
  2584 	TheMmu.SyncPhysicalMemoryBeforeDmaWrite(aPages, aColour, aOffset, aSize, aMapAttr);
  2585 	return KErrNone;
  2586 	}
  2587 
  2588 EXPORT_C TInt Cache::SyncPhysicalMemoryBeforeDmaRead(TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr)
  2589 	{
  2590 	CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Cache::SyncPhysicalMemoryBeforeDmaRead");
  2591 	TheMmu.SyncPhysicalMemoryBeforeDmaRead(aPages, aColour, aOffset, aSize, aMapAttr);
  2592 	return KErrNone;
  2593 	}
  2594 
  2595 EXPORT_C TInt Cache::SyncPhysicalMemoryAfterDmaRead(TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr)
  2596 	{
  2597 	CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Cache::SyncPhysicalMemoryAfterDmaRead");
  2598 	TheMmu.SyncPhysicalMemoryAfterDmaRead(aPages, aColour, aOffset, aSize, aMapAttr);
  2599 	return KErrNone;
  2600 	}