os/kernelhwsrv/kernel/eka/memmodel/epoc/flexible/mmu/arm/xmmu.cpp
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
     1 // Copyright (c) 1997-2009 Nokia Corporation and/or its subsidiary(-ies).
     2 // All rights reserved.
     3 // This component and the accompanying materials are made available
     4 // under the terms of the License "Eclipse Public License v1.0"
     5 // which accompanies this distribution, and is available
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
     7 //
     8 // Initial Contributors:
     9 // Nokia Corporation - initial contribution.
    10 //
    11 // Contributors:
    12 //
    13 // Description:
    14 //
    15 
    16 #include "arm_mem.h"
    17 #include "mm.h"
    18 #include "mmu.h"
    19 #include "mpager.h"
    20 
    21 #include "cache_maintenance.inl"
    22 #include "execs.h"
    23 
    24 
    25 #ifdef BROADCAST_TLB_MAINTENANCE
    26 class TTLBIPI : public TGenericIPI
    27 	{
    28 public:
    29 	TTLBIPI();
    30 	static void InvalidateIsr(TGenericIPI*);
    31 	static void WaitAndInvalidateIsr(TGenericIPI*);
    32 	void AddArg(TLinAddr aArg);
    33 public:
    34 	volatile TInt	iFlag;
    35 	TLinAddr		iArg;
    36 	};
    37 
    38 TTLBIPI::TTLBIPI()
    39 	:	iFlag(0), iArg(0)
    40 	{
    41 	}
    42 
    43 void TTLBIPI::InvalidateIsr(TGenericIPI* aPtr)
    44 	{
    45 	TRACE2(("TLBInv"));
    46 	TTLBIPI& a = *(TTLBIPI*)aPtr;
    47 	TLinAddr arg = a.iArg;
    48 	if (arg==0)
    49 		LocalInvalidateTLB();
    50 	else if (arg<256)
    51 		LocalInvalidateTLBForAsid(arg);
    52 	else
    53 		LocalInvalidateTLBForPage(arg);
    54 	}
    55 
    56 void TTLBIPI::WaitAndInvalidateIsr(TGenericIPI* aPtr)
    57 	{
    58 	TRACE2(("TLBWtInv"));
    59 	TTLBIPI& a = *(TTLBIPI*)aPtr;
    60 	while (!a.iFlag)
    61 		{ __chill(); }
    62 	InvalidateIsr(aPtr);
    63 	}
    64 
    65 void TTLBIPI::AddArg(TLinAddr aArg)
    66 	{
    67 	iArg = aArg;
    68 	NKern::Lock();
    69 	InvalidateIsr(this);
    70 	QueueAllOther(&InvalidateIsr);
    71 	NKern::Unlock();
    72 	WaitCompletion();
    73 	}
    74 
    75 void BroadcastInvalidateTLB(TLinAddr aLinAddrAndAsid)
    76 	{
    77 	TTLBIPI ipi;
    78 	ipi.AddArg(aLinAddrAndAsid);
    79 	}
    80 #endif	// BROADCAST_TLB_MAINTENANCE
    81 
    82 //
    83 // Functions for class Mmu
    84 //
    85 
    86 /**
    87 Return the physical address of the memory mapped by a Page Table Entry (PTE).
    88 
    89 @param aPte			The value contained in the PTE.
    90 @param aPteIndex	The index of the PTE within its page table.
    91 */
    92 TPhysAddr Mmu::PtePhysAddr(TPte aPte, TUint aPteIndex)
    93 	{
    94 	if(aPte&KArmV6PteSmallPage)
    95 		return aPte & KPteSmallPageAddrMask;
    96 	if(aPte&KArmV6PteLargePage)
    97 		return (aPte & KPteLargePageAddrMask) + (TPhysAddr(aPteIndex << KPageShift) & KLargePageMask);
    98 	return KPhysAddrInvalid;
    99 	}
   100 
   101 
   102 /**
   103 Return the virtual address of the page table referenced by the given
   104 Page Directory Entry (PDE) \a aPde. If the PDE doesn't refer to a
   105 page table then the null-pointer is returned.
   106 
   107 If the page table was not one allocated by the kernel then the
   108 results are unpredictable and may cause a system fault.
   109 
   110 @pre #MmuLock held.
   111 */
   112 TPte* Mmu::PageTableFromPde(TPde aPde)
   113 	{
   114 	if((aPde&KPdePresentMask)==KArmV6PdePageTable)
   115 		{
   116 		SPageInfo* pi = SPageInfo::FromPhysAddr(aPde);
   117 		return (TPte*)(KPageTableBase+(pi->Index()<<KPageShift)+(aPde&(KPageMask&~KPageTableMask)));
   118 		}
   119 	return 0;
   120 	}
   121 
   122 
   123 /**
   124 Perform the action of #PageTableFromPde but without the possibility of
   125 a system fault caused the page table not being one allocated by the kernel.
   126 
   127 @pre #MmuLock held.
   128 */
   129 TPte* Mmu::SafePageTableFromPde(TPde aPde)
   130 	{
   131 	if((aPde&KPdeTypeMask)==KArmV6PdePageTable)
   132 		{
   133 		SPageInfo* pi = SPageInfo::SafeFromPhysAddr(aPde&~KPageMask);
   134 		if(pi)
   135 			return (TPte*)(KPageTableBase+(pi->Index()<<KPageShift)+(aPde&(KPageMask&~KPageTableMask)));
   136 		}
   137 	return 0;
   138 	}
   139 
   140 
   141 /**
   142 Return the base phsical address of the section table referenced by the given
   143 Page Directory Entry (PDE) \a aPde. If the PDE doesn't refer to a
   144 section then KPhysAddrInvalid is returned.
   145 
   146 @pre #MmuLock held.
   147 */
   148 TPhysAddr Mmu::SectionBaseFromPde(TPde aPde)
   149 	{
   150 	if(PdeMapsSection(aPde))
   151 		return aPde&KPdeSectionAddrMask;
   152 	return KPhysAddrInvalid;
   153 	}
   154 
   155 
   156 /**
   157 Return a pointer to the Page Table Entry (PTE) which maps the
   158 virtual address \a aAddress in the address space \a aOsAsid.
   159 
   160 If no page table exists or it was not one allocated by the kernel
   161 then the results are unpredictable and may cause a system fault.
   162 
   163 @pre #MmuLock held.
   164 */
   165 TPte* Mmu::PtePtrFromLinAddr(TLinAddr aAddress, TInt aOsAsid)
   166 	{
   167 	TPde pde = PageDirectory(aOsAsid)[aAddress>>KChunkShift];
   168 	SPageInfo* pi = SPageInfo::FromPhysAddr(pde);
   169 	TPte* pt = (TPte*)(KPageTableBase+(pi->Index()<<KPageShift)+(pde&(KPageMask&~KPageTableMask)));
   170 	pt += (aAddress>>KPageShift)&(KChunkMask>>KPageShift);
   171 	return pt;
   172 	}
   173 
   174 
   175 /**
   176 Perform the action of #PtePtrFromLinAddr but without the possibility
   177 of a system fault. If the page table is not present or not one
   178 allocated by the kernel then the null-pointer is returned.
   179 
   180 @pre #MmuLock held.
   181 */
   182 TPte* Mmu::SafePtePtrFromLinAddr(TLinAddr aAddress, TInt aOsAsid)
   183 	{
   184 	TPde pde = PageDirectory(aOsAsid)[aAddress>>KChunkShift];
   185 	TPte* pt = SafePageTableFromPde(pde);
   186 	if(pt)
   187 		pt += (aAddress>>KPageShift)&(KChunkMask>>KPageShift);
   188 	return pt;
   189 	}
   190 
   191 
   192 /**
   193 Return the physical address for the page table whose virtual
   194 address is \a aPt.
   195 
   196 If the page table was not one allocated by the kernel then the
   197 results are unpredictable and may cause a system fault.
   198 
   199 @pre #MmuLock held.
   200 */
   201 TPhysAddr Mmu::PageTablePhysAddr(TPte* aPt)
   202 	{
   203 	__NK_ASSERT_DEBUG(MmuLock::IsHeld() || PageTablesLockIsHeld());
   204 
   205 	TInt pdeIndex = ((TLinAddr)aPt)>>KChunkShift;
   206 	TPde pde = PageDirectory(KKernelOsAsid)[pdeIndex];
   207 	__NK_ASSERT_DEBUG((pde&KPdePresentMask)==KArmV6PdePageTable);
   208 
   209 	SPageInfo* pi = SPageInfo::FromPhysAddr(pde);
   210 	TPte* pPte = (TPte*)(KPageTableBase+(pi->Index(true)<<KPageShift)+(pde&(KPageMask&~KPageTableMask)));
   211 	TPte pte = pPte[(((TLinAddr)aPt)&KChunkMask)>>KPageShift];
   212 	__NK_ASSERT_DEBUG(pte & KArmV6PteSmallPage);
   213 
   214 	return (pte&KPteSmallPageAddrMask)|(((TLinAddr)aPt)&(KPageMask&~KPageTableMask));
   215 	}
   216 
   217 
   218 /**
   219 Perform a page table walk to return the physical address of
   220 the memory mapped at virtual address \a aLinAddr in the
   221 address space \a aOsAsid.
   222 
   223 If the page table used was not one allocated by the kernel
   224 then the results are unpredictable and may cause a system fault.
   225 
   226 Use of this function should be avoided, use instead Mmu::LinearToPhysical
   227 which contains debug assertions for its preconditions.
   228 
   229 @pre #MmuLock held.
   230 */
   231 TPhysAddr Mmu::UncheckedLinearToPhysical(TLinAddr aLinAddr, TInt aOsAsid)
   232 	{
   233 	TRACE2(("Mmu::UncheckedLinearToPhysical(%08x,%d)",aLinAddr,aOsAsid));
   234 	TInt pdeIndex = aLinAddr>>KChunkShift;
   235 	TPde pde = PageDirectory(aOsAsid)[pdeIndex];
   236 	if ((pde&KPdePresentMask)==KArmV6PdePageTable)
   237 		{
   238 		SPageInfo* pi = SPageInfo::FromPhysAddr(pde);
   239 		TPte* pPte = (TPte*)(KPageTableBase+(pi->Index(true)<<KPageShift)+(pde&(KPageMask&~KPageTableMask)));
   240 		TPte pte = pPte[(aLinAddr&KChunkMask)>>KPageShift];
   241 		if (pte & KArmV6PteSmallPage)
   242 			{
   243 			TPhysAddr pa=(pte&KPteSmallPageAddrMask)|(aLinAddr&~KPteSmallPageAddrMask);
   244 			__KTRACE_OPT(KMMU,Kern::Printf("Mapped with small page - returning %08x",pa));
   245 			return pa;
   246 			}
   247 		else if (pte & KArmV6PteLargePage)
   248 			{
   249 			TPhysAddr pa=(pte&KPteLargePageAddrMask)|(aLinAddr&~KPteLargePageAddrMask);
   250 			__KTRACE_OPT(KMMU,Kern::Printf("Mapped with large page - returning %08x",pa));
   251 			return pa;
   252 			}
   253 		}
   254 	else if ((pde&KPdePresentMask)==KArmV6PdeSection)
   255 		{
   256 		TPhysAddr pa=(pde&KPdeSectionAddrMask)|(aLinAddr&~KPdeSectionAddrMask);
   257 		__KTRACE_OPT(KMMU,Kern::Printf("Mapped with section - returning %08x",pa));
   258 		return pa;
   259 		}
   260 	return KPhysAddrInvalid;
   261 	}
   262 
   263 
   264 extern TUint32 TTCR();
   265 extern TUint32 CPUID(TInt /*aRegNum*/);
   266 
   267 
   268 void Mmu::Init1()
   269 	{
   270 	TRACEB(("Mmu::Init1"));
   271 
   272 	// check page local/global page directory split is correct...
   273 	__NK_ASSERT_ALWAYS(TTCR()==1);
   274 
   275 	// check cache type is supported and consistent with compile time macros...
   276 	TInt iColourCount = 0;
   277 	TInt dColourCount = 0;
   278 	TUint32 ctr = InternalCache::TypeRegister();
   279 	TRACEB(("CacheTypeRegister = %08x",ctr));
   280 #ifdef __CPU_ARMV6
   281 	__NK_ASSERT_ALWAYS((ctr>>29)==0);	// check ARMv6 format
   282 	if(ctr&0x800)
   283 		iColourCount = 4;
   284 	if(ctr&0x800000)
   285 		dColourCount = 4;
   286 #else
   287 	__NK_ASSERT_ALWAYS((ctr>>29)==4);	// check ARMv7 format
   288 	TUint l1ip = (ctr>>14)&3;			// L1 instruction cache indexing and tagging policy
   289 	__NK_ASSERT_ALWAYS(l1ip>=2);		// check I cache is physically tagged
   290 
   291 	TUint32 clidr = InternalCache::LevelIDRegister();
   292 	TRACEB(("CacheLevelIDRegister = %08x",clidr));
   293 	TUint l1type = clidr&7;
   294 	if(l1type)
   295 		{
   296 		if(l1type==2 || l1type==3 || l1type==4)
   297 			{
   298 			// we have an L1 data cache...
   299 			TUint32 csir = InternalCache::SizeIdRegister(0,0);
   300 			TUint sets = ((csir>>13)&0x7fff)+1;
   301 			TUint ways = ((csir>>3)&0x3ff)+1;
   302 			TUint lineSizeShift = (csir&7)+4;
   303 			// assume L1 data cache is VIPT and alias checks broken and so we need data cache colouring...
   304 			dColourCount = (sets<<lineSizeShift)>>KPageShift;
   305 			if(l1type==4) // unified cache, so set instruction cache colour as well...
   306 				iColourCount = (sets<<lineSizeShift)>>KPageShift;
   307 			TRACEB(("L1DCache = 0x%x,0x%x,%d colourCount=%d",sets,ways,lineSizeShift,(sets<<lineSizeShift)>>KPageShift));
   308 			}
   309 
   310 		if(l1type==1 || l1type==3)
   311 			{
   312 			// we have a separate L1 instruction cache...
   313 			TUint32 csir = InternalCache::SizeIdRegister(1,0);
   314 			TUint sets = ((csir>>13)&0x7fff)+1;
   315 			TUint ways = ((csir>>3)&0x3ff)+1;
   316 			TUint lineSizeShift = (csir&7)+4;
   317 			iColourCount = (sets<<lineSizeShift)>>KPageShift;
   318 			TRACEB(("L1ICache = 0x%x,0x%x,%d colourCount=%d",sets,ways,lineSizeShift,(sets<<lineSizeShift)>>KPageShift));
   319 			}
   320 		}
   321 	if(l1ip==3)
   322 		{
   323 		// PIPT cache, so no colouring restrictions...
   324 		TRACEB(("L1ICache is PIPT"));
   325 		iColourCount = 0;
   326 		}
   327 	else
   328 		{
   329 		// VIPT cache...
   330 		TRACEB(("L1ICache is VIPT"));
   331 		}
   332 #endif
   333 	TRACEB(("page colouring counts I=%d, D=%d",iColourCount,dColourCount));
   334 	__NK_ASSERT_ALWAYS(iColourCount<=KPageColourCount);
   335 	__NK_ASSERT_ALWAYS(dColourCount<=KPageColourCount);
   336 	#ifndef __CPU_I_CACHE_HAS_COLOUR
   337 	__NK_ASSERT_ALWAYS(iColourCount==0);
   338 	#endif
   339 	#ifndef __CPU_D_CACHE_HAS_COLOUR
   340 	__NK_ASSERT_ALWAYS(dColourCount==0);
   341 	#endif
   342 	#ifndef __CPU_CACHE_HAS_COLOUR
   343 	__NK_ASSERT_ALWAYS(iColourCount==0);
   344 	__NK_ASSERT_ALWAYS(dColourCount==0);
   345 	#endif
   346 
   347 	// check MMU attributes match our assumptions...
   348 	if(((CPUID(-1)>>16)&0xf)==0xf) // if have new CPUID format....
   349 		{
   350 		TUint mmfr1 = CPUID(5);
   351 		TRACEB(("mmfr1 = %08x",mmfr1));
   352 		#ifdef __CPU_NEEDS_BTAC_FLUSH_AFTER_ASID_CHANGE
   353 			__NK_ASSERT_ALWAYS(((mmfr1>>28)&0xf)==1); // Branch Predictor needs invalidating after ASID change
   354 		#else
   355 			__NK_ASSERT_ALWAYS(((mmfr1>>28)&0xf)>=2); // Branch Predictor doesn't needs invalidating after ASID change
   356 		#endif
   357 
   358 		TUint mmfr2 = CPUID(6);
   359 		TRACEB(("mmfr2 = %08x",mmfr2));
   360 		__NK_ASSERT_ALWAYS(((mmfr2>>20)&0xf)>=2); // check Mem Barrier instructions are supported in CP15
   361 
   362 		TUint mmfr3 = CPUID(7);
   363 		TRACEB(("mmfr3 = %08x",mmfr3));
   364 		(void)mmfr3;
   365 
   366 		#if defined(__SMP__) && !defined(__CPU_ARM11MP__)
   367 			__NK_ASSERT_ALWAYS(((mmfr3>>12)&0xf)>=2); // check Maintenance Broadcast is for all cache and TLB operations
   368 		#endif	
   369 		#ifdef __CPU_SUPPORTS_PAGE_TABLE_WALK_TO_L1_CACHE
   370 			__NK_ASSERT_ALWAYS(((mmfr3>>20)&0xf)>=1); // check Coherent Walk for page tables
   371 		#endif	
   372 		}
   373 
   374 	Arm::DefaultDomainAccess = KDefaultDomainAccess;
   375 
   376 #ifdef __SMP__
   377 	TInt i;
   378 	for (i=0; i<KMaxCpus; ++i)
   379 		{
   380 		TSubScheduler& ss = TheSubSchedulers[i];
   381 		TLinAddr a = KIPCAlias + (i<<KChunkShift);
   382 		ss.i_AliasLinAddr = (TAny*)a;
   383 		ss.i_AliasPdePtr = (TAny*)(KPageDirectoryBase + (a>>KChunkShift)*sizeof(TPde));
   384 		}
   385 #endif
   386 
   387 	Init1Common();
   388 	}
   389 
   390 void Mmu::Init2()
   391 	{
   392 	TRACEB(("Mmu::Init2"));
   393 
   394 	Init2Common();
   395 	}
   396 
   397 DMemoryObject* ExceptionStacks;
   398 
   399 void Mmu::Init2Final()
   400 	{
   401 	TRACEB(("Mmu::Init2Final"));
   402 
   403 	Init2FinalCommon();
   404 
   405 	// initialise memory object for exception stacks...
   406 	TMappingCreateFlags mapFlags = (TMappingCreateFlags)(EMappingCreateFixedVirtual|EMappingCreateReserveAllResources);
   407 	TMemoryAttributes memAttr = EMemoryAttributeStandard;
   408 	TUint size = 4*2*KPageSize; // 4 exception stacks each of one guard page and one mapped page
   409 	size |= 1; // lower bit of size is set if region to be claimed contains gaps
   410 	TInt r = MM::InitFixedKernelMemory(ExceptionStacks, KExcptStacksLinearBase, KExcptStacksLinearEnd, size, EMemoryObjectUnpaged, EMemoryCreateNoWipe, memAttr, mapFlags);
   411 	__NK_ASSERT_ALWAYS(r==KErrNone);
   412 	}
   413 
   414 
   415 /**
   416 Return the page directory entry (PDE) value to use for when mapping page tables intended
   417 to map memory with the given attributes.
   418 The returned value has the physical address component being zero, so a page table's physical
   419 address can be simply ORed in.
   420 */
   421 TPde Mmu::BlankPde(TMemoryAttributes aAttributes)
   422 	{
   423 	TPde pde = KArmV6PdePageTable;
   424 	if(aAttributes&EMemoryAttributeUseECC)
   425 		pde |= 1<<9;
   426 
   427 	TRACE2(("Mmu::BlankPde(%x) returns 0x%x",aAttributes,pde));
   428 	return pde;
   429 	}
   430 
   431 
   432 /**
   433 Return the page directory entry (PDE) value to use for when creating a section mapping for memory
   434 with the given attributes and #TPteType.
   435 The returned value has the physical address component being zero, so the section's physical address
   436 can be simply ORed in.
   437 */
   438 TPde Mmu::BlankSectionPde(TMemoryAttributes aAttributes, TUint aPteType)
   439 	{
   440 	// reuse existing functions rather than duplicating the logic
   441 	TPde pde = BlankPde(aAttributes);
   442 	TPte pte = BlankPte(aAttributes, aPteType);
   443 	return PageToSectionEntry(pte, pde);
   444 	}
   445 
   446 
   447 /**
   448 Return the page table entry (PTE) to use when mapping memory pages
   449 with the given attributes and #TPteType.
   450 This value has the physical address component being zero, so a page's physical
   451 address can be simply ORed in.
   452 */
   453 
   454 TPte Mmu::BlankPte(TMemoryAttributes aAttributes, TUint aPteType)
   455 	{
   456 	TUint attr = CanonicalMemoryAttributes(aAttributes);
   457 
   458 	// common PTE setup...
   459 	TPte pte = KArmV6PteSmallPage|KArmV6PteAP0;
   460 	if(aPteType&EPteTypeUserAccess)
   461 		pte |= KArmV6PteAP1;					// AP1 = user access
   462 	if((aPteType&EPteTypeWritable)==false)
   463 		pte |= KArmV6PteAP2;					// AP2 = !writable
   464 	if(attr&EMemoryAttributeShareable)
   465 		pte |= KArmV6PteS;
   466 	if((aPteType&EPteTypeGlobal)==false)
   467 		pte |= KArmV6PteNG;
   468 	if((aPteType&EPteTypeExecutable)==false)
   469 		pte |= KArmV6PteSmallXN;
   470 
   471 	#if defined(__CPU_MEMORY_TYPE_REMAPPING)
   472 
   473 		// other PTE bits...
   474 		if(pte&KArmV6PteSmallXN)
   475 			pte |= KArmV6PteSmallTEX1;	// TEX1 is a copy of the XN
   476 
   477 		// process memory type...
   478 		TUint type = attr&EMemoryAttributeTypeMask;
   479 		pte |= ((type&3)<<2) | ((type&4)<<4);
   480 
   481 	#else
   482 
   483 		// other PTE bits...
   484 		if((pte&(KArmV6PteAP2|KArmV6PteAP1))==(KArmV6PteAP2|KArmV6PteAP1))
   485 			pte &= ~KArmV6PteAP0;		// clear AP0 if user r/o
   486 
   487 		// process memory type...
   488 		TUint texcb;
   489 		switch((TMemoryType)(attr&EMemoryAttributeTypeMask))
   490 			{
   491 		case EMemAttStronglyOrdered:
   492 			texcb = KArmV6MemAttSO;
   493 			break;
   494 		case EMemAttDevice:
   495 			if(attr&EMemoryAttributeShareable)
   496 				texcb = KArmV6MemAttSD;
   497 			else
   498 				texcb = KArmV6MemAttSD;	// should be KArmV6MemAttNSD? (but this made H4 go bang)
   499 			break;
   500 		case EMemAttNormalUncached:
   501 			texcb = KArmV6MemAttNCNC;
   502 			break;
   503 		case EMemAttNormalCached:
   504 			texcb = KArmV6MemAttWBWAWBWA;
   505 			break;
   506 		default:
   507 			__NK_ASSERT_ALWAYS(0);		// undefined memory type
   508 			texcb = KArmV6MemAttSO;
   509 			break;
   510 			}
   511 		pte |= ((texcb&0x1c)<<4) | ((texcb&0x03)<<2);
   512 
   513 	#endif
   514 
   515 	TRACE2(("Mmu::BlankPte(%x,%x) returns 0x%x",aAttributes,aPteType,pte));
   516 	return pte;
   517 	}
   518 
   519 
   520 /**
   521 Calculate PDE and PTE which represent a page table mapping for an existing
   522 section mapping.
   523 
   524 @param[in] aPde The PDE for the existing section mapping.
   525 @param[out] aPde A PDE for a page table mapping, with physical address == 0.
   526 
   527 @return The PTE value for the first entry in the page table.
   528 */
   529 TPte Mmu::SectionToPageEntry(TPde& aPde)
   530 	{
   531 	TPde pde = aPde;
   532 
   533 	// calculate new PTE...
   534 	TPte pte = pde&0xc; // copy CB bits
   535 	if(pde&KArmV6PdeSectionXN)
   536 		pte |= KArmV6PteSmallXN; // copy XN bit
   537 	pte |= (pde&(0xff<<10))>>6; // copy NG, S, APX, TEX, AP bits
   538 	pte |= KArmV6PteSmallPage;
   539 
   540 	// calculate new PDE...
   541 	pde &= 0x3e0;	// keep IMP and DOMAIN
   542 	pde |= KArmV6PdePageTable;
   543 
   544 	aPde = pde;
   545 	return pte;
   546 	}
   547 
   548 
   549 /**
   550 Calculate a PDE entry which represents a section mapping for an existing
   551 page table mapping.
   552 
   553 @pre The existing page table contains mappings for a chunk sized and
   554 	 aligned contiguous region.
   555 
   556 @param aPte A PTE from the existing page table.
   557 @param aPde The existing PDE for the page table mappings.
   558 			(Physical address portion is ignored.)
   559 
   560 @return A PDE entry value for a section mapping.
   561 */
   562 TPde Mmu::PageToSectionEntry(TPte aPte, TPde aPde)
   563 	{
   564 	TPde pde = aPde&0x3e0;	// keep IMP and DOMAIN
   565 	pde |= aPte&(KPdeSectionAddrMask|0xc); // copy address and CB bits
   566 	if(aPte&KArmV6PteSmallXN)
   567 		pde |= KArmV6PdeSectionXN; // copy XN bit
   568 	pde |= (aPte&(0xff<<4))<<6;  // copy NG, S, APX, TEX, AP bits
   569 	pde |= KArmV6PdeSection;
   570 	return pde;
   571 	}
   572 
   573 
   574 /**
   575 Tranform the specified memory attributes into the canonical form relevant to
   576 the platform the code is running on. This applies defaults and overrides to
   577 the attributes to return what should be used with the MMU.
   578 */
   579 TMemoryAttributes Mmu::CanonicalMemoryAttributes(TMemoryAttributes aAttr)
   580 	{
   581 	TUint attr = aAttr;
   582 	if(attr&EMemoryAttributeDefaultShareable)
   583 		{
   584 		// sharing not specified, use default...
   585 #if defined	(__CPU_USE_SHARED_MEMORY)
   586 		attr |= EMemoryAttributeShareable;
   587 #else
   588 		attr &= ~EMemoryAttributeShareable;
   589 #endif
   590 		}
   591 
   592 #if defined(FAULTY_NONSHARED_DEVICE_MEMORY)
   593 	if((attr&(EMemoryAttributeShareable|EMemoryAttributeTypeMask))==EMemoryAttributeDevice)
   594 		{
   595 		// make unshared device memory into shared strongly ordered memory...
   596 		attr ^= EMemoryAttributeShareable;
   597 		attr ^= EMemoryAttributeDevice^EMemoryAttributeStronglyOrdered;
   598 		}
   599 #endif
   600 
   601 #if	defined(__SMP__) || defined(__CPU_FORCE_SHARED_MEMORY_IF_CACHED)
   602 	TMemoryType type = (TMemoryType)(attr&KMemoryTypeMask);
   603 	if(CacheMaintenance::IsCached(type))
   604 		{
   605 		// force cached memory to be shared memory on SMP systems...
   606 		attr |= EMemoryAttributeShareable;
   607 		}
   608 #endif
   609 
   610 	return (TMemoryAttributes)(attr&EMemoryAttributeMask);
   611 	}
   612 
   613 /**
   614 Method called to initialise RAM pages when they are allocated for a new use.
   615 This performs any cache synchronisation required to remove old entries
   616 and also wipes the contents of the memory (if requested via \a aFlags).
   617 
   618 @param aPageList	Pointer to a list of physical addresses for the RAM pages,
   619 					or, if the least significant bit of this value is set, then
   620 					the rest of the value is the physical address of a contiguous
   621 					region of RAM pages being allocated.
   622 
   623 @param aCount		The number of pages.
   624 
   625 @param aFlags		A set of flag values from #TRamAllocFlags which indicate
   626 					the memory type the pages will be used for and whether
   627 					the contents should be wiped.
   628 
   629 @param aReallocate	True, if the RAM pages have already been previously allocated
   630 					and are being reinitilised e.g. by DMemoryManager::ReAllocDecommitted.
   631 					False, to indicate that these pages have been newly allocated (are in
   632 					the SPageInfo::EUnused state.)
   633 
   634 @pre #RamAllocLock held.
   635 */
   636 void Mmu::PagesAllocated(TPhysAddr* aPageList, TUint aCount, TRamAllocFlags aFlags, TBool aReallocate)
   637 	{
   638 	TRACE2(("Mmu::PagesAllocated(0x%08x,%d,0x%x,%d)",aPageList, aCount, aFlags, (bool)aReallocate));
   639 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
   640 
   641 	TBool wipe = !(aFlags&EAllocNoWipe); // do we need to wipe page contents?
   642 	TUint8 wipeByte = (aFlags&EAllocUseCustomWipeByte) ? (aFlags>>EAllocWipeByteShift)&0xff : 0x03; // value to wipe memory with
   643 
   644 	// process each page in turn...
   645 	while(aCount--)
   646 		{
   647 		// get physical address of next page...
   648 		TPhysAddr pagePhys;
   649 		if((TPhysAddr)aPageList&1)
   650 			{
   651 			// aPageList is actually the physical address to use...
   652 			pagePhys = (TPhysAddr)aPageList&~1;
   653 			*(TPhysAddr*)&aPageList += KPageSize;
   654 			}
   655 		else
   656 			pagePhys = *aPageList++;
   657 		__NK_ASSERT_DEBUG((pagePhys&KPageMask)==0);
   658 
   659 		// get info about page...
   660 		SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys);
   661 		TMemoryType oldType = (TMemoryType)(pi->Flags(true)&KMemoryTypeMask);
   662 		TBool oldTypeNormal = CacheMaintenance::IsNormal(oldType);
   663 
   664 		TRACE2(("Mmu::PagesAllocated page=0x%08x, oldType=%d, wipe=%d, colour=%d",pagePhys,oldType,wipe,pi->Index(true)&KPageColourMask));
   665 		if(wipe || oldTypeNormal)
   666 			{
   667 			// work out temporary mapping values...
   668 			TUint colour = pi->Index(true)&KPageColourMask;
   669 			TLinAddr tempLinAddr = iTempMap[0].iLinAddr+colour*KPageSize;
   670 			TPte* tempPte = iTempMap[0].iPtePtr+colour;
   671 
   672 			if(oldTypeNormal)
   673 				{
   674 				// cache maintenance required. Prepare temporary mapping.
   675 				*tempPte = pagePhys | iTempPteCacheMaintenance;
   676 				CacheMaintenance::SinglePteUpdated((TLinAddr)tempPte);
   677 				InvalidateTLBForPage(tempLinAddr|KKernelOsAsid);
   678 
   679 				// will hold additional arguments in CacheMaintenance::PageToReuse call
   680 				TInt pageToReuseMask = 0;
   681 				
   682 				// check if old and new mappings are the same. (Wiping needs temporary
   683 				// mapping which may not be the same as the old and new mapping.)
   684 				TMemoryType newType = (TMemoryType)(aFlags&KMemoryTypeMask); // memory type that pages will be used for
   685 				if (!wipe && (newType ==oldType))
   686 					pageToReuseMask |= CacheMaintenance::EOldAndNewMappingMatch;
   687 
   688 				MmuLock::Lock();
   689 
   690 				// decide wether to trigger maintenance of entire cache(s).
   691 				if(CacheMaintenance::IsPageToReuseThresholdReached(iCacheInvalidatePageCount))
   692 					{
   693 					// enough pages to make it worth triggering maintenance of entire cache(s)
   694 					pageToReuseMask |= CacheMaintenance::EThresholdReached;
   695 					++iCacheInvalidateCounter;
   696 					iCacheInvalidatePageCount = 0; // all pages will be partially synced 
   697 					}
   698 				
   699 				if(CacheMaintenance::IsCached(oldType) && !aReallocate)
   700 					{
   701 					if(pi->CacheInvalidateCounter()==(TUint32)iCacheInvalidateCounter)
   702 						{
   703 						// one less unused page in the L1 cache...
   704 						__NK_ASSERT_DEBUG(iCacheInvalidatePageCount);
   705 						--iCacheInvalidatePageCount;
   706 						}
   707 					else
   708 						{
   709 						// our page has been already partially maintained in cache
   710 						// by a previous PageToReuse call.
   711 						pageToReuseMask |= CacheMaintenance::EPageHasBeenPartiallySynced;
   712 						}
   713 					}
   714 				
   715 				MmuLock::Unlock();
   716 				
   717 				TBool pageRemovedFromCache = CacheMaintenance::PageToReuse(tempLinAddr, oldType, pagePhys, pageToReuseMask);
   718 				if(pageRemovedFromCache && !aReallocate)
   719 					pi->SetUncached();
   720 				}
   721 
   722 			if(wipe)
   723 				{
   724 				//We need uncached normal temporary mapping to wipe. Change it if necessary.
   725 				//or , in case of !oldTypeNormal it is not configured yet.
   726 				if (!oldTypeNormal || (CacheMaintenance::TemporaryMapping()!=EMemAttNormalUncached))
   727 					{
   728 					*tempPte = pagePhys | iTempPteUncached;
   729 					CacheMaintenance::SinglePteUpdated((TLinAddr)tempPte);
   730 					InvalidateTLBForPage(tempLinAddr|KKernelOsAsid);
   731 					}
   732 				// wipe contents of memory...
   733 				memset((TAny*)tempLinAddr, wipeByte, KPageSize);
   734 				CacheMaintenance::PageToReuse(tempLinAddr, EMemAttNormalUncached, pagePhys);
   735 				}
   736 
   737 			// invalidate temporary mapping...
   738 			*tempPte = KPteUnallocatedEntry;
   739 			CacheMaintenance::SinglePteUpdated((TLinAddr)tempPte);
   740 			InvalidateTLBForPage(tempLinAddr|KKernelOsAsid);
   741 			}
   742 
   743 		// indicate page has been allocated...
   744 		if(!aReallocate)
   745 			pi->SetAllocated();
   746 
   747 		// loop round for next page...
   748 		} // end of while(aCount--)
   749 	}
   750 
   751 
   752 /**
   753 Method called to update the state of a RAM page when it is freed.
   754 This sets the page state to SPageInfo::EUnused.
   755 
   756 @param aPageInfo	The page information structure for the RAM page.
   757 
   758 @pre #MmuLock held.
   759 */
   760 void Mmu::PageFreed(SPageInfo* aPageInfo)
   761 	{
   762 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   763 
   764 	if(aPageInfo->Type()==SPageInfo::EUnused)
   765 		return;
   766 
   767 	aPageInfo->SetUnused();
   768 
   769 	TMemoryType type = (TMemoryType)(aPageInfo->Flags()&KMemoryTypeMask);
   770 	if(CacheMaintenance::IsCached(type))
   771 		{
   772 		// another unused page with L1 cache entries...
   773 		aPageInfo->SetCacheInvalidateCounter(iCacheInvalidateCounter);
   774 		++iCacheInvalidatePageCount;
   775 		}
   776 
   777 	TRACE2(("Mmu::PageFreed page=0x%08x type=%d colour=%d",aPageInfo->PhysAddr(),aPageInfo->Flags()&KMemoryTypeMask,aPageInfo->Index()&KPageColourMask));
   778 	}
   779 
   780 /**
   781 Remove the contents of RAM pages from any memory caches.
   782 
   783 @param aPages		Pointer to a list of physical addresses for the RAM pages,
   784 					or, if the least significant bit of this value is set, then
   785 					the rest of the value is the physical address of a contiguous
   786 					region of RAM pages.
   787 
   788 @param aCount		The number of pages.
   789 
   790 @param aAttributes	The memory attributes of the pages.
   791 
   792 @param aColour 		The colour for the first page;
   793 					consecutive pages will be coloured accordingly.
   794 					Only #KPageColourShift least significant bits are used,
   795 					therefore an index into a memory object's memory can be
   796 					used for this value.
   797 */
   798 void Mmu::CleanAndInvalidatePages(TPhysAddr* aPages, TUint aCount, TMemoryAttributes aAttributes, TUint aColour)
   799 	{
   800 	TMemoryType type = (TMemoryType)(aAttributes&EMemoryAttributeTypeMask);
   801 
   802 	if(!CacheMaintenance::IsNormal(type))
   803 		{
   804 		TRACE2(("Mmu::CleanAndInvalidatePages - nothing to do"));
   805 		return;
   806 		}
   807 	
   808 	RamAllocLock::Lock();
   809 
   810 	while(aCount--)
   811 		{
   812 		TPhysAddr pagePhys = *aPages++;
   813 		TRACE2(("Mmu::CleanAndInvalidatePages 0x%08x",pagePhys));
   814 
   815 		// work out temporary mapping values...
   816 		aColour &= KPageColourMask;
   817 		TLinAddr tempLinAddr = iTempMap[0].iLinAddr+aColour*KPageSize;
   818 		TPte* tempPte = iTempMap[0].iPtePtr+aColour;
   819 		++aColour;
   820 
   821 		// temporarily map page...
   822 		*tempPte = pagePhys | iTempPteCacheMaintenance;
   823 		CacheMaintenance::SinglePteUpdated((TLinAddr)tempPte);
   824 		InvalidateTLBForPage(tempLinAddr|KKernelOsAsid);
   825 
   826 		// preserve memory content and remove from cache...
   827 		CacheMaintenance::PageToPreserveAndReuse(tempLinAddr, type, pagePhys);
   828 
   829 		// invalidate temporary mapping...
   830 		*tempPte = KPteUnallocatedEntry;
   831 		CacheMaintenance::SinglePteUpdated((TLinAddr)tempPte);
   832 		InvalidateTLBForPage(tempLinAddr|KKernelOsAsid);
   833 
   834 		RamAllocLock::Flash();
   835 		}
   836 	RamAllocLock::Unlock();
   837 	}
   838 
   839 
   840 extern void UnlockIPCAlias();
   841 extern void LockIPCAlias();
   842 
   843 
   844 TInt DMemModelThread::Alias(TLinAddr aAddr, DMemModelProcess* aProcess, TInt aSize, TLinAddr& aAliasAddr, TUint& aAliasSize)
   845 //
   846 // Set up an alias mapping starting at address aAddr in specified process.
   847 // Note: Alias is removed if an exception is trapped by DThread::IpcExcHandler.
   848 //
   849 	{
   850 	TRACE2(("Thread %O Alias %08x+%x Process %O",this,aAddr,aSize,aProcess));
   851 	__NK_ASSERT_DEBUG(this==TheCurrentThread); // many bad things can happen if false
   852 	// If there is an existing alias it should be on the same process otherwise
   853 	// the os asid reference may be leaked.
   854 	__NK_ASSERT_DEBUG(!iAliasLinAddr || aProcess == iAliasProcess);
   855 
   856 	if(TUint(aAddr^KIPCAlias)<TUint(KIPCAliasAreaSize))
   857 		return KErrBadDescriptor; // prevent access to alias region
   858 
   859 	// Grab the mmu lock before opening a reference on os asid so that this thread 
   860 	// is in an implicit critical section and therefore can't leak the reference by
   861 	// dying before iAliasLinAddr is set.
   862 	MmuLock::Lock();
   863 
   864 	TInt osAsid;
   865 	if (!iAliasLinAddr)
   866 		{// There isn't any existing alias.
   867 		// Open a reference on the aProcess's os asid so that it is not freed and/or reused
   868 		// while we are aliasing an address belonging to it.
   869 		osAsid = aProcess->TryOpenOsAsid();
   870 		if (osAsid < 0)
   871 			{// Couldn't open os asid so aProcess is no longer running.
   872 			MmuLock::Unlock();
   873 			return KErrBadDescriptor;
   874 			}
   875 		}
   876 	else
   877 		{
   878 		// Just read the os asid of the process being aliased we already have a reference on it.
   879 		osAsid = aProcess->OsAsid();
   880 		}
   881 
   882 	// Now we have the os asid check access to kernel memory.
   883 	if(aAddr >= KUserMemoryLimit && osAsid != (TUint)KKernelOsAsid)
   884 		{
   885 		NKern::ThreadEnterCS();
   886 		MmuLock::Unlock();
   887 		if (!iAliasLinAddr)
   888 			{// Close the new reference as RemoveAlias won't do as iAliasLinAddr is not set.
   889 			aProcess->AsyncCloseOsAsid();	// Asynchronous close as this method should be quick.
   890 			}
   891 		NKern::ThreadLeaveCS();
   892 		return KErrBadDescriptor; // prevent access to supervisor only memory
   893 		}
   894 
   895 	// Now we know all accesses to global memory are safe so check if aAddr is global.
   896 	if(aAddr >= KGlobalMemoryBase)
   897 		{
   898 		// address is in global section, don't bother aliasing it...
   899 		if (!iAliasLinAddr)
   900 			{// Close the new reference as not required.
   901 			NKern::ThreadEnterCS();
   902 			MmuLock::Unlock();
   903 			aProcess->AsyncCloseOsAsid(); // Asynchronous close as this method should be quick.
   904 			NKern::ThreadLeaveCS();
   905 			}
   906 		else
   907 			{// Remove the existing alias as it is not required.
   908 			DoRemoveAlias(iAliasLinAddr);	// Releases mmulock.
   909 			}
   910 		aAliasAddr = aAddr;
   911 		TInt maxSize = KChunkSize-(aAddr&KChunkMask);
   912 		aAliasSize = aSize<maxSize ? aSize : maxSize;
   913 		TRACE2(("DMemModelThread::Alias() abandoned as memory is globally mapped"));
   914 		return KErrNone;
   915 		}
   916 
   917 	TPde* pd = Mmu::PageDirectory(osAsid);
   918 	TInt pdeIndex = aAddr>>KChunkShift;
   919 	TPde pde = pd[pdeIndex];
   920 	pde = PDE_IN_DOMAIN(pde, KIPCAliasDomain);	// change domain for PDE
   921 	// Get os asid, this is the current thread's process so no need for reference.
   922 	TUint32 local_asid = ((DMemModelProcess*)iOwningProcess)->OsAsid();
   923 #ifdef __SMP__
   924 	TLinAddr aliasAddr;
   925 #else
   926 	TLinAddr aliasAddr = KIPCAlias+(aAddr&(KChunkMask & ~KPageMask));
   927 #endif
   928 	if(pde==iAliasPde && iAliasLinAddr)
   929 		{
   930 		// pde already aliased, so just update linear address...
   931 #ifdef __SMP__
   932 		__NK_ASSERT_DEBUG(iCpuRestoreCookie>=0);
   933 		aliasAddr = iAliasLinAddr & ~KChunkMask;
   934 		aliasAddr |= (aAddr & (KChunkMask & ~KPageMask));
   935 #endif
   936 		iAliasLinAddr = aliasAddr;
   937 		}
   938 	else
   939 		{
   940 		// alias PDE changed...
   941 		if(!iAliasLinAddr)
   942 			{
   943 			UnlockIPCAlias();
   944 			TheMmu.iAliasList.Add(&iAliasLink); // add to list if not already aliased
   945 #ifdef __SMP__
   946 			__NK_ASSERT_DEBUG(iCpuRestoreCookie==-1);
   947 			iCpuRestoreCookie = NKern::FreezeCpu();	// temporarily lock current thread to this processor
   948 #endif
   949 			}
   950 		iAliasPde = pde;
   951 		iAliasProcess = aProcess;
   952 #ifdef __SMP__
   953 		TSubScheduler& ss = SubScheduler();		// OK since we are locked to this CPU
   954 		aliasAddr = TLinAddr(ss.i_AliasLinAddr) + (aAddr & (KChunkMask & ~KPageMask));
   955 		iAliasPdePtr = (TPde*)(TLinAddr(ss.i_AliasPdePtr) + (local_asid << KPageDirectoryShift));
   956 #endif
   957 		iAliasLinAddr = aliasAddr;
   958 		*iAliasPdePtr = pde;
   959 		SinglePdeUpdated(iAliasPdePtr);
   960 		}
   961 
   962 	TRACE2(("DMemModelThread::Alias() PDEntry=%x, iAliasLinAddr=%x",pde, aliasAddr));
   963 	LocalInvalidateTLBForPage(aliasAddr | local_asid);
   964 	TInt offset = aAddr&KPageMask;
   965 	aAliasAddr = aliasAddr | offset;
   966 	TInt maxSize = KPageSize - offset;
   967 	aAliasSize = aSize<maxSize ? aSize : maxSize;
   968 	iAliasTarget = aAddr & ~KPageMask;
   969 
   970 	MmuLock::Unlock();
   971 
   972 	return KErrNone;
   973 	}
   974 
   975 
   976 void DMemModelThread::RemoveAlias()
   977 //
   978 // Remove alias mapping (if present)
   979 //
   980 	{
   981 	TRACE2(("Thread %O RemoveAlias", this));
   982 	__NK_ASSERT_DEBUG(this==TheCurrentThread); // many bad things can happen if false
   983 
   984 	TLinAddr addr = iAliasLinAddr;
   985 	if(addr)
   986 		{
   987 		MmuLock::Lock();
   988 
   989 		DoRemoveAlias(addr);	// Unlocks mmulock.
   990 		}
   991 	}
   992 
   993 
   994 /**
   995 Remove the alias mapping.
   996 
   997 @pre Mmulock held
   998 @post MmuLock released.
   999 */
  1000 void DMemModelThread::DoRemoveAlias(TLinAddr aAddr)
  1001 	{
  1002 	LockIPCAlias();
  1003 	iAliasLinAddr = 0;
  1004 	iAliasPde = KPdeUnallocatedEntry;
  1005 	*iAliasPdePtr = KPdeUnallocatedEntry;
  1006 	SinglePdeUpdated(iAliasPdePtr);
  1007 	__NK_ASSERT_DEBUG((aAddr&KPageMask)==0);
  1008 	// Invalidate the tlb using os asid, no need to open a reference as this
  1009 	// is the current thread's process os asid.
  1010 	LocalInvalidateTLBForPage(aAddr | ((DMemModelProcess*)iOwningProcess)->OsAsid());
  1011 	iAliasLink.Deque();
  1012 #ifdef __SMP__
  1013 	__NK_ASSERT_DEBUG(iCpuRestoreCookie>=0);
  1014 	NKern::EndFreezeCpu(iCpuRestoreCookie);
  1015 	iCpuRestoreCookie = -1;
  1016 #endif
  1017 
  1018 	// Must close the os asid while in critical section to prevent it being 
  1019 	// leaked.  However, we can't hold the mmu lock so we have to enter an 
  1020 	// explict crtical section. It is ok to release the mmu lock as the 
  1021 	// iAliasLinAddr and iAliasProcess members are only ever updated by the 
  1022 	// current thread.
  1023 	NKern::ThreadEnterCS();
  1024 	MmuLock::Unlock();
  1025 	iAliasProcess->AsyncCloseOsAsid(); // Asynchronous close as this method should be quick.
  1026 	NKern::ThreadLeaveCS();
  1027 	}
  1028 
  1029 
  1030 TInt M::DemandPagingFault(TAny* aExceptionInfo)
  1031 	{
  1032 	TArmExcInfo& exc=*(TArmExcInfo*)aExceptionInfo;
  1033 
  1034 	// permissions required by faulting memory access...
  1035 	TUint accessPermissions = EUser; // we only allow paging of user memory
  1036 
  1037 	// get faulting address...
  1038 	TLinAddr faultAddress = exc.iFaultAddress;
  1039 	if(exc.iExcCode==EArmExceptionPrefetchAbort)
  1040 		{
  1041 		// fault trying to read code to execute...
  1042 		accessPermissions |= EExecute;
  1043 		}
  1044 	else if(exc.iExcCode!=EArmExceptionDataAbort)
  1045 		return KErrUnknown; // not prefetch or data abort
  1046 
  1047 	// check fault type...
  1048 	if((exc.iFaultStatus&0x405) != 5 && (exc.iFaultStatus&0x40f) != 4)
  1049 		return KErrUnknown; // not translation, permission or instruction cache maintenance fault.
  1050 
  1051 	// check access type...
  1052 	if(exc.iFaultStatus&(1<<11))
  1053 		accessPermissions |= EReadWrite;
  1054 
  1055 	// let TheMmu handle the fault...
  1056 	return TheMmu.HandlePageFault(exc.iR15, faultAddress, accessPermissions, aExceptionInfo);
  1057 	}
  1058 
  1059