os/kernelhwsrv/kernel/eka/memmodel/epoc/flexible/mmu/x86/xmmu.cpp
author sl
Tue, 10 Jun 2014 14:32:02 +0200
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
     1 // Copyright (c) 1997-2009 Nokia Corporation and/or its subsidiary(-ies).
     2 // All rights reserved.
     3 // This component and the accompanying materials are made available
     4 // under the terms of the License "Eclipse Public License v1.0"
     5 // which accompanies this distribution, and is available
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
     7 //
     8 // Initial Contributors:
     9 // Nokia Corporation - initial contribution.
    10 //
    11 // Contributors:
    12 //
    13 // Description:
    14 //
    15 
    16 #include <x86_mem.h>
    17 #include "cache_maintenance.inl"
    18 #include "execs.h"
    19 #include "mm.h"
    20 #include "mmu.h"
    21 #include "mpager.h"
    22 #include "mpdalloc.h"
    23 
    24 
    25 TPte PteGlobal;	// =0x100 on processors which support global pages, 0 on processors which don't
    26 
    27 #if defined(KMMU)
    28 extern "C" void __DebugMsgFlushTLB()
    29 	{
    30 	__KTRACE_OPT(KMMU,Kern::Printf("FlushTLB"));
    31 	}
    32 
    33 extern "C" void __DebugMsgLocalFlushTLB()
    34 	{
    35 	__KTRACE_OPT(KMMU,Kern::Printf("FlushTLB"));
    36 	}
    37 
    38 extern "C" void __DebugMsgINVLPG(int a)
    39 	{
    40 	__KTRACE_OPT(KMMU,Kern::Printf("INVLPG(%08x)",a));
    41 	}
    42 #endif
    43 
    44 
    45 
    46 extern void DoLocalInvalidateTLB();
    47 
    48 
    49 #ifndef __SMP__
    50 
    51 
    52 FORCE_INLINE void LocalInvalidateTLB()
    53 	{
    54 	DoLocalInvalidateTLB();
    55 	}
    56 
    57 
    58 #else // __SMP__
    59 
    60 
    61 const TInt KMaxPages = 1;
    62 
    63 class TTLBIPI : public TGenericIPI
    64 	{
    65 public:
    66 	TTLBIPI();
    67 	static void InvalidateForPagesIsr(TGenericIPI*);
    68 	static void LocalInvalidateIsr(TGenericIPI*);
    69 	static void InvalidateIsr(TGenericIPI*);
    70 	static void WaitAndInvalidateIsr(TGenericIPI*);
    71 	void AddAddress(TLinAddr aAddr);
    72 	void InvalidateList();
    73 public:
    74 	volatile TInt	iFlag;
    75 	TInt			iCount;
    76 	TLinAddr		iAddr[KMaxPages];
    77 	};
    78 
    79 TTLBIPI::TTLBIPI()
    80 	:	iFlag(0), iCount(0)
    81 	{
    82 	}
    83 
    84 void TTLBIPI::LocalInvalidateIsr(TGenericIPI*)
    85 	{
    86 	TRACE2(("TLBLocInv"));
    87 	DoLocalInvalidateTLB();
    88 	}
    89 
    90 void TTLBIPI::InvalidateIsr(TGenericIPI*)
    91 	{
    92 	TRACE2(("TLBInv"));
    93 	DoInvalidateTLB();
    94 	}
    95 
    96 void TTLBIPI::WaitAndInvalidateIsr(TGenericIPI* aTLBIPI)
    97 	{
    98 	TRACE2(("TLBWtInv"));
    99 	TTLBIPI& a = *(TTLBIPI*)aTLBIPI;
   100 	while (!a.iFlag)
   101 		{}
   102 	if (a.iCount == 1)
   103 		DoInvalidateTLBForPage(a.iAddr[0]);
   104 	else
   105 		DoInvalidateTLB();
   106 	}
   107 
   108 void TTLBIPI::InvalidateForPagesIsr(TGenericIPI* aTLBIPI)
   109 	{
   110 	TTLBIPI& a = *(TTLBIPI*)aTLBIPI;
   111 	TInt i;
   112 	for (i=0; i<a.iCount; ++i)
   113 		{
   114 		TRACE2(("TLBInv %08x", a.iAddr[i]));
   115 		DoInvalidateTLBForPage(a.iAddr[i]);
   116 		}
   117 	}
   118 
   119 void TTLBIPI::AddAddress(TLinAddr aAddr)
   120 	{
   121 	iAddr[iCount] = aAddr;
   122 	if (++iCount == KMaxPages)
   123 		InvalidateList();
   124 	}
   125 
   126 void TTLBIPI::InvalidateList()
   127 	{
   128 	NKern::Lock();
   129 	InvalidateForPagesIsr(this);
   130 	QueueAllOther(&InvalidateForPagesIsr);
   131 	NKern::Unlock();
   132 	WaitCompletion();
   133 	iCount = 0;
   134 	}
   135 
   136 void LocalInvalidateTLB()
   137 	{
   138 	TTLBIPI ipi;
   139 	NKern::Lock();
   140 	DoLocalInvalidateTLB();
   141 	ipi.QueueAllOther(&TTLBIPI::LocalInvalidateIsr);
   142 	NKern::Unlock();
   143 	ipi.WaitCompletion();
   144 	}
   145 
   146 void InvalidateTLB()
   147 	{
   148 	TTLBIPI ipi;
   149 	NKern::Lock();
   150 	DoInvalidateTLB();
   151 	ipi.QueueAllOther(&TTLBIPI::InvalidateIsr);
   152 	NKern::Unlock();
   153 	ipi.WaitCompletion();
   154 	}
   155 
   156 void InvalidateTLBForPage(TLinAddr aAddr)
   157 	{
   158 	TTLBIPI ipi;
   159 	ipi.AddAddress(aAddr);
   160 	ipi.InvalidateList();
   161 	}
   162 
   163 
   164 #endif // __SMP__
   165 
   166 
   167 void InvalidateTLBForAsid(TUint aAsid)
   168 	{
   169 	if(aAsid==KKernelOsAsid)
   170 		InvalidateTLB();
   171 	else
   172 		LocalInvalidateTLB();
   173 	}
   174 
   175 
   176 void SinglePdeUpdated(TPde* aPde)
   177 	{
   178 	CacheMaintenance::SinglePdeUpdated((TLinAddr)aPde);
   179 	PageDirectories.GlobalPdeChanged(aPde);
   180 	}
   181 
   182 
   183 //
   184 // Functions for class Mmu
   185 //
   186 
   187 TPhysAddr Mmu::PtePhysAddr(TPte aPte, TUint /*aPteIndex*/)
   188 	{
   189 	if(aPte&KPdePtePresent)
   190 		return aPte & KPdePtePhysAddrMask;
   191 	return KPhysAddrInvalid;
   192 	}
   193 
   194 
   195 TPte* Mmu::PageTableFromPde(TPde aPde)
   196 	{
   197 	if((aPde&(KPdeLargePage|KPdePtePresent)) == KPdePtePresent)
   198 		{
   199 		SPageInfo* pi = SPageInfo::FromPhysAddr(aPde);
   200 		TInt id = (pi->Index()<<KPtClusterShift) | ((aPde>>KPageTableShift)&KPtClusterMask);
   201 		return (TPte*)(KPageTableBase+(id<<KPageTableShift));
   202 		}
   203 	return 0;
   204 	}
   205 
   206 
   207 TPte* Mmu::SafePageTableFromPde(TPde aPde)
   208 	{
   209 	if((aPde&(KPdeLargePage|KPdePtePresent)) == KPdePtePresent)
   210 		{
   211 		SPageInfo* pi = SPageInfo::SafeFromPhysAddr(aPde&~KPageMask);
   212 		if(pi)
   213 			{
   214 			TInt id = (pi->Index()<<KPtClusterShift) | ((aPde>>KPageTableShift)&KPtClusterMask);
   215 			return (TPte*)(KPageTableBase+(id<<KPageTableShift));
   216 			}
   217 		}
   218 	return 0;
   219 	}
   220 
   221 
   222 /**
   223 Return the base phsical address of the section table referenced by the given
   224 Page Directory Entry (PDE) \a aPde. If the PDE doesn't refer to a
   225 section then KPhysAddrInvalid is returned.
   226 
   227 @pre #MmuLock held.
   228 */
   229 TPhysAddr Mmu::SectionBaseFromPde(TPde aPde)
   230 	{
   231 	if(PdeMapsSection(aPde))
   232 	   return aPde&KPdeLargePagePhysAddrMask;
   233 	return KPhysAddrInvalid;
   234 	}
   235 
   236 
   237 TPte* Mmu::PtePtrFromLinAddr(TLinAddr aAddress, TInt aOsAsid)
   238 	{
   239 	TPde pde = PageDirectory(aOsAsid)[aAddress>>KChunkShift];
   240 	SPageInfo* pi = SPageInfo::FromPhysAddr(pde);
   241 	TPte* pt = (TPte*)(KPageTableBase+(pi->Index()<<KPageShift)+(pde&(KPageMask&~KPageTableMask)));
   242 	pt += (aAddress>>KPageShift)&(KChunkMask>>KPageShift);
   243 	return pt;
   244 	}
   245 
   246 
   247 TPte* Mmu::SafePtePtrFromLinAddr(TLinAddr aAddress, TInt aOsAsid)
   248 	{
   249 	TPde pde = PageDirectory(aOsAsid)[aAddress>>KChunkShift];
   250 	TPte* pt = SafePageTableFromPde(pde);
   251 	if(pt)
   252 		pt += (aAddress>>KPageShift)&(KChunkMask>>KPageShift);
   253 	return pt;
   254 	}
   255 
   256 
   257 TPhysAddr Mmu::PageTablePhysAddr(TPte* aPt)
   258 	{
   259 	__NK_ASSERT_DEBUG(MmuLock::IsHeld() || PageTablesLockIsHeld());
   260 
   261 	TInt pdeIndex = ((TLinAddr)aPt)>>KChunkShift;
   262 	TPde pde = PageDirectory(KKernelOsAsid)[pdeIndex];
   263 	__NK_ASSERT_DEBUG((pde&(KPdePtePresent|KPdeLargePage))==KPdePtePresent);
   264 
   265 	SPageInfo* pi = SPageInfo::FromPhysAddr(pde);
   266 	TPte* pPte = (TPte*)(KPageTableBase+(pi->Index(true)<<KPageShift));
   267 	TPte pte = pPte[(((TLinAddr)aPt)&KChunkMask)>>KPageShift];
   268 	__NK_ASSERT_DEBUG(pte & KPdePtePresent);
   269 
   270 	return pte&KPdePtePhysAddrMask;
   271 	}
   272 
   273 
   274 TPhysAddr Mmu::UncheckedLinearToPhysical(TLinAddr aLinAddr, TInt aOsAsid)
   275 	{
   276 	TRACE2(("Mmu::UncheckedLinearToPhysical(%08x,%d)",aLinAddr,aOsAsid));
   277 	TInt pdeIndex = aLinAddr>>KChunkShift;
   278 	TPde pde = PageDirectory(aOsAsid)[pdeIndex];
   279 	TPhysAddr pa=KPhysAddrInvalid;
   280 	if (pde & KPdePtePresent)
   281 		{
   282 		if(pde&KPdeLargePage)
   283 			{
   284 			pa=(pde&KPdeLargePagePhysAddrMask)+(aLinAddr&~KPdeLargePagePhysAddrMask);
   285 			__KTRACE_OPT(KMMU,Kern::Printf("Mapped with large table - returning %08x",pa));
   286 			}
   287 		else
   288 			{
   289 			SPageInfo* pi = SPageInfo::FromPhysAddr(pde);
   290 			TInt id = (pi->Index(true)<<KPtClusterShift) | ((pde>>KPageTableShift)&KPtClusterMask);
   291 			TPte* pPte = (TPte*)(KPageTableBase+(id<<KPageTableShift));
   292 			TPte pte = pPte[(aLinAddr&KChunkMask)>>KPageShift];
   293 			if (pte & KPdePtePresent)
   294 				{
   295 				pa=(pte&KPdePtePhysAddrMask)+(aLinAddr&KPageMask);
   296 				__KTRACE_OPT(KMMU,Kern::Printf("Mapped with page table - returning %08x",pa));
   297 				}
   298 			}
   299 		}
   300 	return pa;
   301 	}
   302 
   303 
   304 void Mmu::Init1()
   305 	{
   306 	TRACEB(("Mmu::Init1"));
   307 
   308 	TUint pge = TheSuperPage().iCpuId & EX86Feat_PGE;
   309 	PteGlobal = pge ? KPdePteGlobal : 0;
   310 	X86_UseGlobalPTEs = pge!=0;
   311 
   312 #ifdef __SMP__
   313 	ApTrampolinePage = KApTrampolinePageLin;
   314 
   315 	TInt i;
   316 	for (i=0; i<KMaxCpus; ++i)
   317 		{
   318 		TSubScheduler& ss = TheSubSchedulers[i];
   319 		TLinAddr a = KIPCAlias + (i<<KChunkShift);
   320 		ss.i_AliasLinAddr = (TAny*)a;
   321 		ss.i_AliasPdePtr = (TAny*)(KPageDirectoryBase + (a>>KChunkShift)*sizeof(TPde));
   322 		}
   323 #endif
   324 
   325 	Init1Common();
   326 	}
   327 
   328 void Mmu::Init2()
   329 	{
   330 	TRACEB(("Mmu::Init2"));
   331 
   332 	Init2Common();
   333 	}
   334 
   335 void Mmu::Init2Final()
   336 	{
   337 	TRACEB(("Mmu::Init2Final"));
   338 
   339 	Init2FinalCommon();
   340 	}
   341 
   342 
   343 const TPde KPdeForBlankPageTable = KPdePtePresent|KPdePteWrite|KPdePteUser;
   344 
   345 TPde Mmu::BlankPde(TMemoryAttributes aAttributes)
   346 	{
   347 	(void)aAttributes;
   348 	TPde pde = KPdeForBlankPageTable;
   349 	TRACE2(("Mmu::BlankPde(%x) returns 0x%x",aAttributes,pde));
   350 	return pde;
   351 	}
   352 
   353 
   354 TPde Mmu::BlankSectionPde(TMemoryAttributes aAttributes, TUint aPteType)
   355 	{
   356 	return PageToSectionEntry(BlankPte(aAttributes, aPteType), KPdeForBlankPageTable);
   357 	}
   358 
   359 
   360 TPte Mmu::BlankPte(TMemoryAttributes aAttributes, TUint aPteType)
   361 	{
   362 	TPte pte = KPdePtePresent;
   363 	if(aPteType&EPteTypeUserAccess)
   364 		pte |= KPdePteUser;
   365 	if(aPteType&EPteTypeWritable)
   366 		pte |= KPdePteWrite;
   367 	if(aPteType&EPteTypeGlobal)
   368 		pte |= PteGlobal;
   369 
   370 	switch((TMemoryType)(aAttributes&EMemoryAttributeTypeMask))
   371 		{
   372 	case EMemAttStronglyOrdered:
   373 	case EMemAttDevice:
   374 	case EMemAttNormalUncached:
   375 		pte |= KPdePteUncached;
   376 		break;
   377 	case EMemAttNormalCached:
   378 		break;
   379 	default:
   380 		__NK_ASSERT_ALWAYS(0);
   381 		break;
   382 		}
   383 
   384 	TRACE2(("Mmu::BlankPte(%x,%x) returns 0x%x",aAttributes,aPteType,pte));
   385 	return pte;
   386 	}
   387 
   388 
   389 TPte Mmu::SectionToPageEntry(TPde& aPde)
   390 	{
   391 	TPte pte = aPde&~(KPdePtePhysAddrMask|KPdeLargePage);
   392 	aPde = KPdeForBlankPageTable;
   393 	return pte;
   394 	}
   395 
   396 
   397 TPde Mmu::PageToSectionEntry(TPte aPte, TPde /*aPde*/)
   398 	{
   399 	TPte pde = aPte&~KPdeLargePagePhysAddrMask;
   400 	pde |= KPdeLargePage;
   401 	return pde;
   402 	}
   403 
   404 
   405 TMemoryAttributes Mmu::CanonicalMemoryAttributes(TMemoryAttributes aAttr)
   406 	{
   407 	TUint attr = aAttr;
   408 	if(attr&EMemoryAttributeDefaultShareable)
   409 		{
   410 		// sharing not specified, use default...
   411 #if defined	(__CPU_USE_SHARED_MEMORY)
   412 		attr |= EMemoryAttributeShareable;
   413 #else
   414 		attr &= ~EMemoryAttributeShareable;
   415 #endif
   416 		}
   417 
   418 	// remove invalid attributes...
   419 	attr &= ~(EMemoryAttributeUseECC);
   420 
   421 	return (TMemoryAttributes)(attr&EMemoryAttributeMask);
   422 	}
   423 
   424 
   425 void Mmu::PagesAllocated(TPhysAddr* aPageList, TUint aCount, TRamAllocFlags aFlags, TBool aReallocate)
   426 	{
   427 	TRACE2(("Mmu::PagesAllocated(0x%08x,%d,0x%x,%d)",aPageList, aCount, aFlags, (bool)aReallocate));
   428 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
   429 
   430 	TBool wipe = !(aFlags&EAllocNoWipe); // do we need to wipe page contents?
   431 	TMemoryType newType = (TMemoryType)(aFlags&KMemoryTypeMask); // memory type that pages will be used for
   432 	TUint8 wipeByte = (aFlags&EAllocUseCustomWipeByte) ? (aFlags>>EAllocWipeByteShift)&0xff : 0x03; // value to wipe memory with
   433 
   434 	// process each page in turn...
   435 	while(aCount--)
   436 		{
   437 		// get physical address of next page...
   438 		TPhysAddr pagePhys;
   439 		if((TPhysAddr)aPageList&1)
   440 			{
   441 			// aPageList is actually the physical address to use...
   442 			pagePhys = (TPhysAddr)aPageList&~1;
   443 			*(TPhysAddr*)&aPageList += KPageSize;
   444 			}
   445 		else
   446 			pagePhys = *aPageList++;
   447 		__NK_ASSERT_DEBUG((pagePhys&KPageMask)==0);
   448 
   449 		// get info about page...
   450 		SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys);
   451 		TMemoryType oldType = (TMemoryType)(pi->Flags(true)&KMemoryTypeMask);
   452 
   453 		TRACE2(("Mmu::PagesAllocated page=0x%08x, oldType=%d, wipe=%d",pagePhys,oldType,wipe));
   454 		if(wipe)
   455 			{
   456 			// work out temporary mapping values...
   457 			TLinAddr tempLinAddr = iTempMap[0].iLinAddr;
   458 			TPte* tempPte = iTempMap[0].iPtePtr;
   459 
   460 			// temporarily map page...
   461 			*tempPte = pagePhys | iTempPteCached;
   462 			CacheMaintenance::SinglePteUpdated((TLinAddr)tempPte);
   463 			InvalidateTLBForPage(tempLinAddr|KKernelOsAsid);
   464 
   465 			// wipe contents of memory...
   466 			memset((TAny*)tempLinAddr, wipeByte, KPageSize);
   467 			__e32_io_completion_barrier();
   468 
   469 			// invalidate temporary mapping...
   470 			*tempPte = KPteUnallocatedEntry;
   471 			CacheMaintenance::SinglePteUpdated((TLinAddr)tempPte);
   472 			InvalidateTLBForPage(tempLinAddr|KKernelOsAsid);
   473 			}
   474 
   475 		// indicate page has been allocated...
   476 		if(aReallocate==false)
   477 			pi->SetAllocated();
   478 		}
   479 	}
   480 
   481 
   482 void Mmu::PageFreed(SPageInfo* aPageInfo)
   483 	{
   484 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   485 
   486 	if(aPageInfo->Type()==SPageInfo::EUnused)
   487 		return;
   488 
   489 	aPageInfo->SetUnused();
   490 
   491 	TRACE2(("Mmu::PageFreed page=0x%08x type=%d colour=%d",aPageInfo->PhysAddr(),aPageInfo->Flags()&KMemoryTypeMask,aPageInfo->Index()&KPageColourMask));
   492 	}
   493 
   494 
   495 void Mmu::CleanAndInvalidatePages(TPhysAddr* aPages, TUint aCount, TMemoryAttributes aAttributes, TUint aColour)
   496 	{
   497 	TMemoryType type = (TMemoryType)(aAttributes&EMemoryAttributeTypeMask);
   498 	if(!CacheMaintenance::IsCached(type))
   499 		{
   500 		TRACE2(("Mmu::CleanAndInvalidatePages - nothing to do"));
   501 		return;
   502 		}
   503 
   504 	RamAllocLock::Lock();
   505 
   506 	while(aCount--)
   507 		{
   508 		TPhysAddr pagePhys = *aPages++;
   509 		TRACE2(("Mmu::CleanAndInvalidatePages 0x%08x",pagePhys));
   510 
   511 		// work out temporary mapping values...
   512 		TLinAddr tempLinAddr = iTempMap[0].iLinAddr;
   513 		TPte* tempPte = iTempMap[0].iPtePtr;
   514 
   515 		// temporarily map page...
   516 		*tempPte = pagePhys | iTempPteCached;
   517 		CacheMaintenance::SinglePteUpdated((TLinAddr)tempPte);
   518 		InvalidateTLBForPage(tempLinAddr|KKernelOsAsid);
   519 
   520 		// sort out cache for memory reuse...
   521 		CacheMaintenance::PageToPreserveAndReuse(tempLinAddr, type, KPageSize);
   522 
   523 		// invalidate temporary mapping...
   524 		*tempPte = KPteUnallocatedEntry;
   525 		CacheMaintenance::SinglePteUpdated((TLinAddr)tempPte);
   526 		InvalidateTLBForPage(tempLinAddr|KKernelOsAsid);
   527 
   528 		RamAllocLock::Flash();
   529 		}
   530 	RamAllocLock::Unlock();
   531 	}
   532 
   533 
   534 TInt DMemModelThread::Alias(TLinAddr aAddr, DMemModelProcess* aProcess, TInt aSize, TLinAddr& aAliasAddr, TUint& aAliasSize)
   535 //
   536 // Set up an alias mapping starting at address aAddr in specified process.
   537 // Note: Alias is removed if an exception is trapped by DThread::IpcExcHandler.
   538 //
   539 	{
   540 	TRACE2(("Thread %O Alias %08x+%x Process %O",this,aAddr,aSize,aProcess));
   541 	__NK_ASSERT_DEBUG(this==TheCurrentThread); // many bad things can happen if false
   542 	// If there is an existing alias it should be on the same process otherwise
   543 	// the os asid reference may be leaked.
   544 	__NK_ASSERT_DEBUG(!iAliasLinAddr || aProcess == iAliasProcess);
   545 
   546 	if(TUint(aAddr^KIPCAlias)<TUint(KIPCAliasAreaSize))
   547 		return KErrBadDescriptor; // prevent access to alias region
   548 
   549 	// Grab the mmu lock before opening a reference on os asid so that this thread 
   550 	// is in an implicit critical section and therefore can't leak the reference by
   551 	// dying before iAliasLinAddr is set.
   552 	MmuLock::Lock();
   553 
   554 	TInt osAsid;
   555 	if (!iAliasLinAddr)
   556 		{// There isn't any existing alias.
   557 		// Open a reference on the aProcess's os asid so that it is not freed and/or reused
   558 		// while we are aliasing an address belonging to it.
   559 		osAsid = aProcess->TryOpenOsAsid();
   560 		if (osAsid < 0)
   561 			{// Couldn't open os asid so aProcess is no longer running.
   562 			MmuLock::Unlock();
   563 			return KErrBadDescriptor;
   564 			}
   565 		}
   566 	else
   567 		{
   568 		// Just read the os asid of the process being aliased we already have a reference on it.
   569 		osAsid = aProcess->OsAsid();
   570 		}
   571 
   572 	// Now we have the os asid check access to kernel memory.
   573 	if(aAddr >= KUserMemoryLimit && osAsid != (TUint)KKernelOsAsid)
   574 		{
   575 		NKern::ThreadEnterCS();
   576 		MmuLock::Unlock();
   577 		if (!iAliasLinAddr)
   578 			{// Close the new reference as RemoveAlias won't do as iAliasLinAddr is not set.
   579 			aProcess->AsyncCloseOsAsid();	// Asynchronous close as this method should be quick.
   580 			}
   581 		NKern::ThreadLeaveCS();
   582 		return KErrBadDescriptor; // prevent access to supervisor only memory
   583 		}
   584 
   585 	// Now we know all accesses to global memory are safe so check if aAddr is global.
   586 	if(aAddr >= KGlobalMemoryBase)
   587 		{
   588 		// address is in global section, don't bother aliasing it...
   589 		if (!iAliasLinAddr)
   590 			{// Close the new reference as not required.
   591 			NKern::ThreadEnterCS();
   592 			MmuLock::Unlock();
   593 			aProcess->AsyncCloseOsAsid();	// Asynchronous close as this method should be quick.
   594 			NKern::ThreadLeaveCS();
   595 			}
   596 		else
   597 			{// Remove the existing alias as it is not required.
   598 			DoRemoveAlias(iAliasLinAddr);	// Releases mmulock.
   599 			}
   600 		aAliasAddr = aAddr;
   601 		TInt maxSize = KChunkSize-(aAddr&KChunkMask);
   602 		aAliasSize = aSize<maxSize ? aSize : maxSize;
   603 		TRACE2(("DMemModelThread::Alias() abandoned as memory is globally mapped"));
   604 		return KErrNone;
   605 		}
   606 
   607 	TPde* pd = Mmu::PageDirectory(osAsid);
   608 	TInt pdeIndex = aAddr>>KChunkShift;
   609 	TPde pde = pd[pdeIndex];
   610 #ifdef __SMP__
   611 	TLinAddr aliasAddr;
   612 #else
   613 	TLinAddr aliasAddr = KIPCAlias+(aAddr&(KChunkMask & ~KPageMask));
   614 #endif
   615 	if(pde==iAliasPde && iAliasLinAddr)
   616 		{
   617 		// pde already aliased, so just update linear address...
   618 #ifdef __SMP__
   619 		__NK_ASSERT_DEBUG(iCpuRestoreCookie>=0);
   620 		aliasAddr = iAliasLinAddr & ~KChunkMask;
   621 		aliasAddr |= (aAddr & (KChunkMask & ~KPageMask));
   622 #endif
   623 		iAliasLinAddr = aliasAddr;
   624 		}
   625 	else
   626 		{
   627 		// alias PDE changed...
   628 		if(!iAliasLinAddr)
   629 			{
   630 			TheMmu.iAliasList.Add(&iAliasLink); // add to list if not already aliased
   631 #ifdef __SMP__
   632 			__NK_ASSERT_DEBUG(iCpuRestoreCookie==-1);
   633 			iCpuRestoreCookie = NKern::FreezeCpu();	// temporarily lock current thread to this processor
   634 #endif
   635 			}
   636 		iAliasPde = pde;
   637 		iAliasProcess = aProcess;
   638 #ifdef __SMP__
   639 		TSubScheduler& ss = SubScheduler();		// OK since we are locked to this CPU
   640 		aliasAddr = TLinAddr(ss.i_AliasLinAddr) + (aAddr & (KChunkMask & ~KPageMask));
   641 		iAliasPdePtr = (TPde*)(TLinAddr(ss.i_AliasPdePtr) + (osAsid << KPageTableShift));
   642 #endif
   643 		iAliasLinAddr = aliasAddr;
   644 		*iAliasPdePtr = pde;
   645 		}
   646 	TRACE2(("DMemModelThread::Alias() PDEntry=%x, iAliasLinAddr=%x",pde, aliasAddr));
   647 	LocalInvalidateTLBForPage(aliasAddr);
   648 	TInt offset = aAddr&KPageMask;
   649 	aAliasAddr = aliasAddr | offset;
   650 	TInt maxSize = KPageSize - offset;
   651 	aAliasSize = aSize<maxSize ? aSize : maxSize;
   652 	iAliasTarget = aAddr & ~KPageMask;
   653 
   654 	MmuLock::Unlock();
   655 
   656 	return KErrNone;
   657 	}
   658 
   659 
   660 void DMemModelThread::RemoveAlias()
   661 //
   662 // Remove alias mapping (if present)
   663 //
   664 	{
   665 	TRACE2(("Thread %O RemoveAlias", this));
   666 	__NK_ASSERT_DEBUG(this==TheCurrentThread); // many bad things can happen if false
   667 
   668 	TLinAddr addr = iAliasLinAddr;
   669 	if(addr)
   670 		{
   671 		MmuLock::Lock();
   672 
   673 		DoRemoveAlias(addr);	// Unlocks mmulock.
   674 		}
   675 	}
   676 
   677 
   678 /**
   679 Remove the alias mapping.
   680 
   681 @pre Mmulock held
   682 */
   683 void DMemModelThread::DoRemoveAlias(TLinAddr aAddr)
   684 	{
   685 	iAliasLinAddr = 0;
   686 	iAliasPde = KPdeUnallocatedEntry;
   687 	*iAliasPdePtr = KPdeUnallocatedEntry;
   688 	SinglePdeUpdated(iAliasPdePtr);
   689 	__NK_ASSERT_DEBUG((aAddr&KPageMask)==0);
   690 	LocalInvalidateTLBForPage(aAddr);
   691 	iAliasLink.Deque();
   692 #ifdef __SMP__
   693 	__NK_ASSERT_DEBUG(iCpuRestoreCookie>=0);
   694 	NKern::EndFreezeCpu(iCpuRestoreCookie);
   695 	iCpuRestoreCookie = -1;
   696 #endif
   697 
   698 	// Must close the os asid while in critical section to prevent it being 
   699 	// leaked.  However, we can't hold the mmu lock so we have to enter an 
   700 	// explict crtical section. It is ok to release the mmu lock as the 
   701 	// iAliasLinAddr and iAliasProcess members are only ever updated by the 
   702 	// current thread.
   703 	NKern::ThreadEnterCS();
   704 	MmuLock::Unlock();
   705 	iAliasProcess->AsyncCloseOsAsid();	// Asynchronous close as this method should be quick.
   706 	NKern::ThreadLeaveCS();
   707 	}
   708 
   709 
   710 TInt M::DemandPagingFault(TAny* aExceptionInfo)
   711 	{
   712 	TX86ExcInfo& exc=*(TX86ExcInfo*)aExceptionInfo;
   713 	if(exc.iExcId!=EX86VectorPageFault)
   714 		return KErrAbort; // not a page fault
   715 
   716 	/*
   717 	Meanings of exc.iExcErrorCode when exception type is EX86VectorPageFault...
   718 
   719 	Bit 0	0 The fault was caused by a non-present page.
   720 			1 The fault was caused by a page-level protection violation.
   721 	Bit 1	0 The access causing the fault was a read.
   722 			1 The access causing the fault was a write.
   723 	Bit 2	0 The access causing the fault originated when the processor was executing in supervisor mode.
   724 			1 The access causing the fault originated when the processor was executing in user mode.   
   725 	Bit 3	0 The fault was not caused by reserved bit violation.
   726 			1 The fault was caused by reserved bits set to 1 in a page directory.
   727 	Bit 4	0 The fault was not caused by an instruction fetch.
   728 			1 The fault was caused by an instruction fetch.
   729 	*/
   730 
   731 	// check access type...
   732 	TUint accessPermissions = EUser; // we only allow paging of user memory
   733 	if(exc.iExcErrorCode&(1<<1))
   734 		accessPermissions |= EReadWrite;
   735 
   736 	// let TheMmu handle the fault...
   737 	return TheMmu.HandlePageFault(exc.iEip, exc.iFaultAddress, accessPermissions, aExceptionInfo);
   738 	}
   739 
   740