os/kernelhwsrv/kernel/eka/memmodel/epoc/flexible/mmu/arm/xmmu.h
author sl
Tue, 10 Jun 2014 14:32:02 +0200
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
     1 // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
     2 // All rights reserved.
     3 // This component and the accompanying materials are made available
     4 // under the terms of the License "Eclipse Public License v1.0"
     5 // which accompanies this distribution, and is available
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
     7 //
     8 // Initial Contributors:
     9 // Nokia Corporation - initial contribution.
    10 //
    11 // Contributors:
    12 //
    13 // Description:
    14 //
    15 
    16 #include "cache_maintenance.inl"
    17 
    18 
    19 /**
    20  @file
    21  @internalComponent
    22 */
    23 
    24 #if defined(GNUC) && !defined(__MARM_ARM4__)
    25 #define	__VOLATILE__	volatile
    26 #else
    27 #define __VOLATILE__
    28 #endif
    29 
    30 #if defined(__SMP__) && defined(__CPU_ARM11MP__) 
    31 #define COARSE_GRAINED_TLB_MAINTENANCE
    32 #define BROADCAST_TLB_MAINTENANCE
    33 #endif
    34 
    35 
    36 
    37 FORCE_INLINE void __arm_dmb()
    38 	{
    39 	#if defined(__CPU_ARMV6)
    40 		// dmb instruction...
    41 		#ifdef __GNUC__
    42 			TInt zero = 0;
    43 			asm __VOLATILE__ ("mcr p15, 0, %0, c7, c10, 5 " : : "r"(zero));
    44 		#elif defined(__ARMCC__)
    45 			TInt zero = 0;
    46 			asm("mcr p15, 0, zero, c7, c10, 5 ");
    47 		#elif defined(__GCCXML__)
    48 			// empty
    49 		#else
    50 			#error Unknown compiler
    51 		#endif
    52 	#elif defined(__CPU_ARMV7)
    53 		// deprecated CP15 version of DMB...
    54 		#ifdef __GNUC__
    55 			TInt zero = 0;
    56 			asm __VOLATILE__ ("mcr p15, 0, %0, c7, c10, 5 " : : "r"(zero));
    57 		#elif defined(__ARMCC__)
    58 			TInt zero = 0;
    59 			asm("mcr p15, 0, zero, c7, c10, 5 ");
    60 		#elif defined(__GCCXML__)
    61 			// empty
    62 		#else
    63 			#error Unknown compiler
    64 		#endif
    65 	#else
    66 		// non inline version...
    67 		__e32_memory_barrier();
    68 	#endif
    69 	}
    70 
    71 
    72 FORCE_INLINE void __arm_dsb()
    73 	{
    74 	#if defined(__CPU_ARMV6)
    75 		// drain write buffer...
    76 		#ifdef __GNUC__
    77 			TInt zero = 0;
    78 			asm __VOLATILE__ ("mcr p15, 0, %0, c7, c10, 4 " : : "r"(zero));
    79 		#elif defined(__ARMCC__)
    80 			TInt zero = 0;
    81 			asm("mcr p15, 0, zero, c7, c10, 4 ");
    82 		#elif defined(__GCCXML__)
    83 			// empty
    84 		#else
    85 			#error Unknown compiler
    86 		#endif
    87 	#elif defined(__CPU_ARMV7)
    88 		// deprecated CP15 version of DSB...
    89 		#ifdef __GNUC__
    90 			TInt zero = 0;
    91 			asm __VOLATILE__ ("mcr p15, 0, %0, c7, c10, 4 " : : "r"(zero));
    92 		#elif defined(__ARMCC__)
    93 			TInt zero = 0;
    94 			asm("mcr p15, 0, zero, c7, c10, 4 ");
    95 		#elif defined(__GCCXML__)
    96 			// empty
    97 		#else
    98 			#error Unknown compiler
    99 		#endif
   100 	#else
   101 		// non inline version...
   102 		__e32_io_completion_barrier();
   103 	#endif
   104 	}
   105 
   106 
   107 extern "C" void __e32_instruction_barrier();
   108 
   109 FORCE_INLINE void __arm_isb()
   110 	{
   111 	#if defined(__CPU_ARMV6)
   112 		// prefetch flush...
   113 		#ifdef __GNUC__
   114 			TInt zero = 0;
   115 			asm __VOLATILE__ ("mcr p15, 0, %0, c7, c5, 4 " : : "r"(zero));
   116 		#elif defined(__ARMCC__)
   117 			TInt zero = 0;
   118 			asm("mcr p15, 0, zero, c7, c5, 4 ");
   119 		#elif defined(__GCCXML__)
   120 			// empty
   121 		#else
   122 			#error Unknown compiler
   123 		#endif
   124 	#elif defined(__CPU_ARMV7)
   125 		// deprecated CP15 version of ISB...
   126 		#ifdef __GNUC__
   127 			TInt zero = 0;
   128 			asm __VOLATILE__ ("mcr p15, 0, %0, c7, c5, 4 " : : "r"(zero));
   129 		#elif defined(__ARMCC__)
   130 			TInt zero = 0;
   131 			asm("mcr p15, 0, zero, c7, c5, 4 ");
   132 		#elif defined(__GCCXML__)
   133 			// empty
   134 		#else
   135 			#error Unknown compiler
   136 		#endif
   137 	#else
   138 		// non inline version...
   139 		__e32_instruction_barrier();
   140 	#endif
   141 	}
   142 
   143 
   144 /**
   145 Branch predictor invalidate all
   146 */
   147 FORCE_INLINE void __arm_bpiall()
   148 	{
   149 	#ifdef __GNUC__
   150 		TInt zero = 0;
   151 		asm __VOLATILE__ ("mcr p15, 0, %0, c7, c5, 6 " : : "r"(zero));
   152 	#elif defined(__ARMCC__)
   153 		TInt zero = 0;
   154 		asm("mcr p15, 0, zero, c7, c5, 6 ");
   155 	#elif defined(__GCCXML__)
   156 		// empty
   157 	#else
   158 		#error Unknown compiler
   159 	#endif
   160 	}
   161 
   162 
   163 #ifdef __SMP__
   164 
   165 /**
   166 Branch predictor invalidate all inner-shareable
   167 */
   168 FORCE_INLINE void __arm_bpiallis()
   169 	{
   170 	// branch predictor invalidate all inner-shareable
   171 	#ifdef __GNUC__
   172 		TInt zero = 0;
   173 		asm __VOLATILE__ ("mcr p15, 0, %0, c7, c1, 6 " : : "r"(zero));
   174 	#elif defined(__ARMCC__)
   175 		TInt zero = 0;
   176 		asm("mcr p15, 0, zero, c7, c1, 6 ");
   177 	#elif defined(__GCCXML__)
   178 		// empty
   179 	#else
   180 		#error Unknown compiler
   181 	#endif
   182 	}
   183 
   184 #endif
   185 
   186 
   187 /**	
   188 This will make sure that the change in page directory is visible by H/W Page-Table Walk.  
   189 Call this function when a single entry in page directory is changed.
   190 */
   191 FORCE_INLINE void SinglePdeUpdated(TPde* aPde)
   192   	{
   193 	CacheMaintenance::SinglePdeUpdated((TLinAddr)aPde);
   194   	}
   195 
   196 
   197 #ifdef BROADCAST_TLB_MAINTENANCE
   198 
   199 /**
   200 Signal other CPU cores to perform TLB maintenance.
   201 
   202 @param aLinAddrAndAsid	If == 0, then InvalidateTLB;
   203 						if < KMmuAsidCount, then InvalidateTLBForAsid;
   204 						else InvalidateTLBForPage.
   205 */
   206 extern void BroadcastInvalidateTLB(TLinAddr aLinAddrAndAsid=0);
   207 
   208 #endif
   209 
   210 
   211 /**
   212 Invalidate a single I+D TLB entry on this CPU core only.
   213 @param aLinAddrAndAsid Virtual address of a page of memory ORed with the ASID value.
   214 */
   215 FORCE_INLINE void LocalInvalidateTLBForPage(TLinAddr aLinAddrAndAsid)
   216 	{
   217 	#ifdef __GNUC__
   218 		#if defined(__CPU_ARM11MP__) // why?...
   219 			asm __VOLATILE__ ("mcr p15, 0, %0, c8, c7, 3 " : : "r"(aLinAddrAndAsid));
   220 		#else
   221 			asm __VOLATILE__ ("mcr p15, 0, %0, c8, c7, 1 " : : "r"(aLinAddrAndAsid));
   222 		#endif
   223 	#elif defined(__ARMCC__)
   224 		#if defined(__CPU_ARM11MP__) // why?...
   225 			asm("mcr p15, 0, aLinAddrAndAsid, c8, c7, 3 ");
   226 		#else
   227 			asm("mcr p15, 0, aLinAddrAndAsid, c8, c7, 1 ");
   228 		#endif
   229 	#elif defined(__GCCXML__)
   230 		// empty
   231 	#else
   232 		#error Unknown compiler
   233 	#endif
   234 	__arm_bpiall();
   235 	__arm_dsb();
   236 	__arm_isb();
   237 	}
   238 
   239 
   240 /**
   241 Invalidate a single I+D TLB entry on all CPU cores.
   242 @param aLinAddrAndAsid Virtual address of a page of memory ORed with the ASID value.
   243 */
   244 FORCE_INLINE void InvalidateTLBForPage(TLinAddr aLinAddrAndAsid)
   245 	{
   246 	#ifdef BROADCAST_TLB_MAINTENANCE
   247 		BroadcastInvalidateTLB(aLinAddrAndAsid);
   248 	#elif !defined(__SMP__)
   249 		LocalInvalidateTLBForPage(aLinAddrAndAsid);
   250 	#else // __SMP__
   251 		// inner-shareable invalidate...
   252 		#ifdef __GNUC__
   253 			asm __VOLATILE__ ("mcr p15, 0, %0, c8, c3, 1 " : : "r"(aLinAddrAndAsid));
   254 		#elif defined(__ARMCC__)
   255 			asm("mcr p15, 0, aLinAddrAndAsid, c8, c3, 1 ");
   256 		#elif defined(__GCCXML__)
   257 			// empty
   258 		#else
   259 			#error Unknown compiler
   260 		#endif
   261 		__arm_bpiallis();
   262 		__arm_dsb();
   263 		__arm_isb();
   264 	#endif
   265 	}
   266 
   267 
   268 /**
   269 Invalidate entire TLB on this CPU only
   270 */
   271 FORCE_INLINE void LocalInvalidateTLB()
   272 	{
   273 	#ifdef __GNUC__
   274 		asm __VOLATILE__ ("mcr p15, 0, r0, c8, c7, 0 ");
   275 	#elif defined(__ARMCC__)
   276 		TInt dummy = 0; // damned RVCT
   277 		asm("mcr p15, 0, dummy, c8, c7, 0 ");
   278 	#elif defined(__GCCXML__)
   279 		// empty
   280 	#else
   281 		#error Unknown compiler
   282 	#endif
   283 	__arm_bpiall();
   284 	__arm_dsb();
   285 	__arm_isb();
   286 	}
   287 
   288 
   289 /**
   290 Invalidate entire TLB on all CPU cores.
   291 */
   292 FORCE_INLINE void InvalidateTLB()
   293 	{
   294 	#ifdef BROADCAST_TLB_MAINTENANCE
   295 		BroadcastInvalidateTLB(0);
   296 	#elif !defined(__SMP__)
   297 		LocalInvalidateTLB();
   298 	#else // __SMP__
   299 		// inner-shareable invalidate...
   300 		#ifdef __GNUC__
   301 			TInt zero = 0;
   302 			asm __VOLATILE__ ("mcr p15, 0, %0, c8, c3, 0 " : : "r"(zero));
   303 		#elif defined(__ARMCC__)
   304 			TInt zero = 0;
   305 			asm("mcr p15, 0, zero, c8, c3, 0 ");
   306 		#elif defined(__GCCXML__)
   307 			// empty
   308 		#else
   309 			#error Unknown compiler
   310 		#endif
   311 		__arm_bpiallis();
   312 		__arm_dsb();
   313 		__arm_isb();
   314 	#endif
   315 	}
   316 
   317 
   318 #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_424067_FIXED)
   319 #define INVALIDATE_TLB_BY_ASID_BROKEN
   320 #endif
   321 #if defined(__CPU_ARM1176__) && !defined(__CPU_ARM1176_ERRATUM_424692_FIXED)
   322 #define INVALIDATE_TLB_BY_ASID_BROKEN
   323 #endif
   324  
   325 
   326 __ASSERT_COMPILE(KKernelOsAsid==0); // InvalidateTLBForAsid assumes this
   327 
   328 
   329 /**
   330 Invalidate all TLB entries which match the given ASID value (current CPU only)
   331 */
   332 FORCE_INLINE void LocalInvalidateTLBForAsid(TUint aAsid)
   333 	{
   334 #ifndef INVALIDATE_TLB_BY_ASID_BROKEN
   335 	if(aAsid&=0xff)
   336 		{
   337 #if defined(__CPU_CORTEX_A9__) && !defined(__CPU_ARM_A9_ERRATUM_571618_FIXED)
   338 		__arm_dmb();	// ARM Cortex-A9 MPCore erratum 571618 workaround
   339 						// Execute memory barrier before interruptible CP15 operations
   340 #endif
   341 		// invalidate all I+D TLB entries for ASID...
   342 		#ifdef __GNUC__
   343 			asm __VOLATILE__ ("mcr p15, 0, %0, c8, c7, 2 " : : "r"(aAsid));
   344 		#elif defined(__ARMCC__)
   345 			asm("mcr p15, 0, aAsid, c8, c7, 2 ");
   346 		#elif defined(__GCCXML__)
   347 			// empty
   348 		#else
   349 			#error Unknown compiler
   350 		#endif
   351 		}
   352 	else
   353 		// ASID==0 means 'kernel' memory. We have to invalidate the entire TLB here
   354 		// as this is the only way of getting rid of global entries...
   355 #endif
   356 		{
   357 		// invalidate entire TLB...
   358 		#ifdef __GNUC__
   359 			asm __VOLATILE__ ("mcr p15, 0, r0, c8, c7, 0 ");
   360 		#elif defined(__ARMCC__)
   361 			TInt dummy = 0; // damned RVCT
   362 			asm("mcr p15, 0, dummy, c8, c7, 0 ");
   363 		#elif defined(__GCCXML__)
   364 			// empty
   365 		#else
   366 			#error Unknown compiler
   367 		#endif
   368 		}
   369 	__arm_bpiall();
   370 	__arm_dsb();
   371 	__arm_isb();
   372 	}
   373 
   374 
   375 /**
   376 Invalidate all TLB entries which match the given ASID value on all CPU cores.
   377 */
   378 FORCE_INLINE void InvalidateTLBForAsid(TUint aAsid)	
   379 	{
   380 	aAsid &= 0xff;
   381 	#ifdef BROADCAST_TLB_MAINTENANCE
   382 		BroadcastInvalidateTLB(aAsid);
   383 	#elif !defined(__SMP__)
   384 		LocalInvalidateTLBForAsid(aAsid);
   385 	#else // __SMP__
   386 		if(aAsid!=0)
   387 			{
   388 #if defined(__CPU_CORTEX_A9__) && !defined(__CPU_ARM_A9_ERRATUM_571618_FIXED)
   389 			__arm_dmb();	// ARM Cortex-A9 MPCore erratum 571618 workaround
   390 							// Execute memory barrier before interruptible CP15 operations
   391 #endif
   392 			// invalidate all I+D TLB entries for ASID...
   393 			#ifdef __GNUC__
   394 				asm __VOLATILE__ ("mcr p15, 0, %0, c8, c3, 2 " : : "r"(aAsid));
   395 			#elif defined(__ARMCC__)
   396 				asm("mcr p15, 0, aAsid, c8, c3, 2 ");
   397 			#elif defined(__GCCXML__)
   398 				// empty
   399 			#else
   400 				#error Unknown compiler
   401 			#endif
   402 			}
   403 		else
   404 			{
   405 			// ASID==0 means 'kernel' memory. We have to invalidate the entire TLB here
   406 			// as this is the only way of getting rid of global entries...
   407 			#ifdef __GNUC__
   408 				asm __VOLATILE__ ("mcr p15, 0, %0, c8, c3, 0 " : : "r"(aAsid));
   409 			#elif defined(__ARMCC__)
   410 				asm("mcr p15, 0, aAsid, c8, c3, 0 ");
   411 			#elif defined(__GCCXML__)
   412 				// empty
   413 			#else
   414 				#error Unknown compiler
   415 			#endif
   416 			}
   417 		__arm_bpiallis();
   418 		__arm_dsb();
   419 		__arm_isb();
   420 	#endif
   421 	}
   422 
   423 
   424 /**
   425 Return the virtual address of the page directory used for address space
   426 \a aOsAsid. Note, the global page directory is mapped after each
   427 address space specific page director in a way which means that it
   428 appears to be a single contiguous page directory which maps the
   429 entire 32bit virtual address range. I.e. the returned page directory
   430 address can be simply indexed by any virtual address without regard
   431 to whether it belongs to the given address space or lies in the
   432 global region.
   433 */
   434 FORCE_INLINE TPde* Mmu::PageDirectory(TInt aOsAsid)
   435 	{
   436 	return (TPde*)(KPageDirectoryBase+(aOsAsid<<KPageDirectoryShift));
   437 	}
   438 
   439 
   440 /**
   441 Return the virtual address of the Page Directory Entry (PDE) used to map
   442 the region containing the virtual address \a aAddress in the address space
   443 \a aOsAsid.
   444 */
   445 FORCE_INLINE TPde* Mmu::PageDirectoryEntry(TInt aOsAsid, TLinAddr aAddress)
   446 	{
   447 	return PageDirectory(aOsAsid) + (aAddress>>KChunkShift);
   448 	}
   449 
   450 
   451 /**
   452 Return the physical address mapped by the section mapping contained
   453 in the given Page Directory Entry \a aPde. If \a aPde is not a
   454 section mapping, then KPhysAddrInvalid is returned.
   455 */
   456 FORCE_INLINE TPhysAddr Mmu::PdePhysAddr(TPde aPde)
   457 	{
   458 	if((aPde&KPdePresentMask)==KArmV6PdeSection)
   459 		return aPde&KPdeSectionAddrMask;
   460 	return KPhysAddrInvalid;
   461 	}
   462 
   463 
   464 #ifdef __CPU_MEMORY_TYPE_REMAPPING
   465 
   466 /*
   467 Bits in a PTE which represent access permissions...
   468 
   469 AP2 AP1 AP0		usr	wr
   470 0	0	x		n	y
   471 0	1	x		y	y
   472 1	0	x		n	n
   473 1	1	x		y	n
   474 */
   475 
   476 /**
   477 Modify a Page Table Entry (PTE) value so it restricts access to the memory it maps.
   478 The returned PTE value is the same as \a aPte but with its access permissions set
   479 to read-only if \a aReadOnly is true, and set to allow no access if \a aReadOnly is false.
   480 */
   481 FORCE_INLINE TPte Mmu::MakePteInaccessible(TPte aPte, TBool aReadOnly)
   482 	{
   483 	__NK_ASSERT_DEBUG((aPte&KArmV6PteTypeMask)!=KArmV6PteLargePage);
   484 	if(aPte&KPtePresentMask)
   485 		{
   486 		__NK_ASSERT_DEBUG((bool)(aPte&KArmV6PteSmallTEX1)==(bool)(aPte&KArmV6PteSmallXN)); // TEX1 should be a copy of XN
   487 		if(aReadOnly)
   488 			aPte |= KArmV6PteAP2; // make read only
   489 		else
   490 			aPte &= ~KPtePresentMask; // make inaccessible
   491 		}
   492 	return aPte;
   493 	}
   494 
   495 
   496 /**
   497 Modify a Page Table Entry (PTE) value so it allows greater access to the memory it maps.
   498 The returned PTE value is the same as \a aPte but with its access permissions set
   499 to read/write if \a aWrite is true, and set to read-only if \a aWrite is false.
   500 */
   501 FORCE_INLINE TPte Mmu::MakePteAccessible(TPte aPte, TBool aWrite)
   502 	{
   503 	__NK_ASSERT_DEBUG((aPte&KArmV6PteTypeMask)!=KArmV6PteLargePage);
   504 	if((aPte&KPtePresentMask)==0)
   505 		{
   506 		// wasn't accessible, make it so...
   507 		if(aPte&KArmV6PteSmallTEX1)
   508 			aPte |= KArmV6PteSmallXN; // restore XN by copying from TEX1
   509 		aPte |= KArmV6PteSmallPage;
   510 		aPte |= KArmV6PteAP2; // make read only
   511 		}
   512 	if(aWrite)
   513 		aPte &= ~KArmV6PteAP2; // make writable
   514 	return aPte;
   515 	}
   516 
   517 
   518 #else // not __CPU_MEMORY_TYPE_REMAPPING
   519 
   520 /*
   521 Bits in a PTE which represent access permissions...
   522 
   523 AP2 AP1 AP0		usr	wr
   524 0	0	0
   525 0	0	1		n	y
   526 0	1	0
   527 0	1	1		y	y
   528 1	0	0
   529 1	0	1		n	n
   530 1	1	0		y	n
   531 1	1	1
   532 */
   533 
   534 FORCE_INLINE TPte Mmu::MakePteInaccessible(TPte aPte, TBool aReadOnly)
   535 	{
   536 	__NK_ASSERT_DEBUG((aPte&KArmV6PteTypeMask)!=KArmV6PteLargePage);
   537 	if(aPte&KPtePresentMask)
   538 		{
   539 		if(!aReadOnly)
   540 			{
   541 			// copy XN to AP0...
   542 			if(aPte&KArmV6PteSmallXN)
   543 				aPte |= KArmV6PteAP0;
   544 			else
   545 				aPte &= ~KArmV6PteAP0;
   546 
   547 			// make inaccessible...
   548 			aPte &= ~KPtePresentMask;
   549 			}
   550 		else
   551 			{
   552 			// make read only...
   553 			aPte |= KArmV6PteAP2; // make read only
   554 			if(aPte&KArmV6PteAP1)
   555 				aPte &= ~KArmV6PteAP0; // correct AP0
   556 			}
   557 		}
   558 	return aPte;
   559 	}
   560 
   561 
   562 FORCE_INLINE TPte Mmu::MakePteAccessible(TPte aPte, TBool aWrite)
   563 	{
   564 	__NK_ASSERT_DEBUG((aPte&KArmV6PteTypeMask)!=KArmV6PteLargePage);
   565 	if((aPte&KPtePresentMask)==0)
   566 		{
   567 		// wasn't accessible, make it so...
   568 		if(aPte&KArmV6PteAP0)
   569 			aPte |= KArmV6PteSmallXN; // restore XN by copying from AP0
   570 		aPte |= KArmV6PteAP0;
   571 		aPte |= KArmV6PteSmallPage;
   572 
   573 		// make read only...
   574 		aPte |= KArmV6PteAP2; // make read only
   575 		if(aPte&KArmV6PteAP1)
   576 			aPte &= ~KArmV6PteAP0; // correct AP0
   577 		}
   578 	if(aWrite)
   579 		{
   580 		// make writable...
   581 		aPte &= ~KArmV6PteAP2;
   582 		aPte |= KArmV6PteAP0; 
   583 		}
   584 	return aPte;
   585 	}
   586 
   587 #endif // __CPU_MEMORY_TYPE_REMAPPING
   588 
   589 
   590 /**
   591 Return true if a Page Table Entry (PTE) only allows read-only access to memory.
   592 */
   593 FORCE_INLINE TBool Mmu::IsPteReadOnly(TPte aPte)
   594 	{
   595 	__NK_ASSERT_DEBUG(aPte&KPtePresentMask); // read-only state is ambiguous if pte not present
   596 	return aPte&KArmV6PteAP2;
   597 	}
   598 
   599 
   600 /**
   601 Return true if a Page Table Entry (PTE) doesn't allow any access to the memory.
   602 */
   603 FORCE_INLINE TBool Mmu::IsPteInaccessible(TPte aPte)
   604 	{
   605 	return !(aPte&KPtePresentMask);
   606 	}
   607 
   608 /**
   609 Return true if the Page Table Entry \a aNewPte allows greater access to
   610 memory that \a aOldPte. Only the permissions read/write, read-only and no-access
   611 are considered, not any execute or privileged access.
   612 */
   613 FORCE_INLINE TBool Mmu::IsPteMoreAccessible(TPte aNewPte, TPte aOldPte)
   614 	{
   615 	if(aNewPte&aOldPte&KPtePresentMask)			// if ptes both present
   616 		return (aOldPte&~aNewPte)&KArmV6PteAP2;	//   check for more writable
   617 	else										// else
   618 		return aNewPte&KPtePresentMask;			//   check for new pte being present
   619 	}
   620 
   621 
   622 /**
   623 Bit flag values representing the memory mapping differences governed by
   624 MMU Page Directory Entries (PDEs). Memory which differs in #TPdeType can
   625 not be mapped using the same Page Table, as they would share the same PDE
   626 entry.
   627 */
   628 enum TPdeType
   629 	{
   630 	/**
   631 	Legacy (and little-used/unused?) ARM attribute.
   632 	This could potentially be removed (see DMemoryMapping::PdeType()).
   633 	*/
   634 	EPdeTypeECC				= 1<<0,
   635 
   636 	/**
   637 	Total number of combinations of #TPdeType values.
   638 	*/
   639 	ENumPdeTypes			= 2
   640 	};
   641 
   642 
   643 /**
   644 Bit flag values representing the memory mapping differences governed by
   645 MMU Page Table Entries (PTEs).
   646 */
   647 enum TPteType
   648 	{
   649 	/**
   650 	PTE grants user mode access to memory.
   651 	*/
   652 	EPteTypeUserAccess		= EUser,
   653 
   654 	/**
   655 	PTE grants write access to memory.
   656 	*/
   657 	EPteTypeWritable		= EReadWrite,
   658 
   659 	/**
   660 	PTE grants execute  access to memory.
   661 	*/
   662 	EPteTypeExecutable		= EExecute,
   663 
   664 	/**
   665 	PTE is 'global'. I.e. the memory it maps is intended to be accessible
   666 	in all process contexts, i.e. for mappings at virtual address >= KGlobalMemoryBase.
   667 	The MMU uses this to tag TLB entries as valid for all ASIDs.
   668 	*/
   669 	EPteTypeGlobal			= 1<<3,
   670 
   671 	/**
   672 	Total number of combinations of #TPteType values.
   673 	*/
   674 	ENumPteTypes			= 16
   675 	};
   676 
   677 __ASSERT_COMPILE(EPteTypeUserAccess==(1<<0));
   678 __ASSERT_COMPILE(EPteTypeWritable==(1<<1));
   679 __ASSERT_COMPILE(EPteTypeExecutable==(1<<2));
   680 
   681 
   682 #define MMU_SUPPORTS_EXECUTE_NEVER
   683 
   684 
   685 /**
   686 Return the #TPdeType for memory with the given attributes value.
   687 */
   688 FORCE_INLINE TUint Mmu::PdeType(TMemoryAttributes aAttributes)
   689 	{
   690 	return aAttributes&EMemoryAttributeUseECC ? EPdeTypeECC : 0;
   691 	}
   692 
   693 
   694 /**
   695 Return the #TPteType to use for memory mappings requiring the given access permissions
   696 and Global attribute. The global flag is true if #EPteTypeGlobal is to be set.
   697 */
   698 FORCE_INLINE TUint Mmu::PteType(TMappingPermissions aPermissions, TBool aGlobal)
   699 	{
   700 	__NK_ASSERT_DEBUG(aPermissions&EUser || aGlobal); // can't have supervisor local memory
   701 
   702 	TUint pteType =	(aPermissions&(EUser|EReadWrite|EExecute));
   703 	if(aGlobal)
   704 		pteType |= EPteTypeGlobal;
   705 
   706 	__NK_ASSERT_DEBUG(pteType<ENumPteTypes);
   707 
   708 	return pteType;
   709 	}
   710 
   711 
   712 /**
   713 Test if a memory access is allowed by a given mapping type.
   714 
   715 @param aPteType				#TPteType used for a mapping. E.g. TMemoryMappingBase::PteType()
   716 @param aAccessPermissions	Flags from #TMappingPermissions indicating the memory access
   717 							required.
   718 
   719 @return True if a memory access requested with permissions \a aAccessPermissions
   720 		is allowed on a mapping of the specified #TPteType.
   721 		False if the access is not allowed.
   722 */
   723 FORCE_INLINE TBool Mmu::CheckPteTypePermissions(TUint aPteType, TUint aAccessPermissions)
   724 	{
   725 	aAccessPermissions &= EUser|EReadWrite|EExecute;
   726 	return (aPteType&aAccessPermissions)==aAccessPermissions;
   727 	}
   728 
   729 
   730 /**
   731 Extract the #TMappingPermissions corresponding to a given #TPteType.
   732 */
   733 FORCE_INLINE TMappingPermissions Mmu::PermissionsFromPteType(TUint aPteType)
   734 	{
   735 	return (TMappingPermissions)(aPteType&(EPteTypeUserAccess|EPteTypeWritable|EPteTypeExecutable));
   736 	}
   737 
   738 extern void UserWriteFault(TLinAddr aAddr);
   739 extern void UserReadFault(TLinAddr aAddr);
   740 
   741 
   742 //
   743 // TODO: Move these to NKern
   744 //
   745 
   746 FORCE_INLINE void inline_DisableAllInterrupts()
   747 	{
   748 #ifdef __GNUC__
   749 	#ifdef __CPU_ARM_HAS_CPS
   750 		CPSIDIF;
   751 	#else
   752 		TInt reg;
   753 		asm __VOLATILE__ ("mrs %0, cpsr" : "=r"(reg));
   754 		asm __VOLATILE__ ("orr %0, %0, #0xc0" : : "r"(reg));
   755 		asm __VOLATILE__ ("msr cpsr_c, %0" : : "r"(reg));
   756 	#endif
   757 /*
   758 #elif defined(__ARMCC__)
   759 	#if defined(__CPU_ARM_HAS_CPS) && __ARMCC_VERSION>=300000
   760 		asm("cpsid if");
   761 	#else
   762 		TInt reg;
   763 		asm("mrs reg, cpsr");
   764 		asm("orr reg, reg, #0xc0");
   765 		asm("msr cpsr_c, reg");
   766 	#endif
   767 */
   768 #else
   769 	NKern::DisableAllInterrupts();
   770 #endif
   771 	}
   772 
   773 FORCE_INLINE void inline_EnableAllInterrupts()
   774 	{
   775 #ifdef __GNUC__
   776 	#ifdef __CPU_ARM_HAS_CPS
   777 		CPSIEIF;
   778 	#else
   779 		TInt reg;
   780 		asm __VOLATILE__ ("mrs %0, cpsr" : "=r"(reg));
   781 		asm __VOLATILE__ ("bic %0, %0, #0xc0" : : "r"(reg));
   782 		asm __VOLATILE__ ("msr cpsr_c, %0" : : "r"(reg));
   783 	#endif
   784 /*
   785 #elif defined(__ARMCC__)
   786 	#if defined(__CPU_ARM_HAS_CPS) && __ARMCC_VERSION>=300000
   787 		asm("cpsie if");
   788 	#else
   789 		TInt reg;
   790 		asm("mrs reg, cpsr");
   791 		asm("bic reg, reg, #0xc0");
   792 		asm("msr cpsr_c, reg");
   793 	#endif
   794 */
   795 #else
   796 	NKern::EnableAllInterrupts();
   797 #endif
   798 	}
   799 
   800 
   801 #ifndef	__SMP__
   802 #undef __SPIN_LOCK_IRQ
   803 #define __SPIN_LOCK_IRQ(lock)					(inline_DisableAllInterrupts())
   804 #undef __SPIN_UNLOCK_IRQ
   805 #define __SPIN_UNLOCK_IRQ(lock)					(inline_EnableAllInterrupts())
   806 #undef __SPIN_FLASH_IRQ
   807 #define __SPIN_FLASH_IRQ(lock)					(inline_EnableAllInterrupts(),inline_DisableAllInterrupts(),((TBool)TRUE))
   808 #endif
   809 
   810 
   811 /**
   812 Indicate whether a PDE entry maps a page table.
   813 
   814 @param aPde The PDE entry in question.
   815 */
   816 FORCE_INLINE TBool Mmu::PdeMapsPageTable(TPde aPde)
   817 	{
   818 	return (aPde & KPdeTypeMask) == KArmV6PdePageTable;
   819 	}
   820 
   821 
   822 /**
   823 Indicate whether a PDE entry maps a section.
   824 
   825 @param aPde The PDE entry in question.
   826 */
   827 FORCE_INLINE TBool Mmu::PdeMapsSection(TPde aPde)
   828 	{
   829 	return (aPde & KPdeTypeMask) == KArmV6PdeSection;
   830 	}