os/kernelhwsrv/kernel/eka/memmodel/epoc/flexible/mmu/arm/xmmu.h
author sl
Tue, 10 Jun 2014 14:32:02 +0200
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
sl@0
     1
// Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
//
sl@0
    15
sl@0
    16
#include "cache_maintenance.inl"
sl@0
    17
sl@0
    18
sl@0
    19
/**
sl@0
    20
 @file
sl@0
    21
 @internalComponent
sl@0
    22
*/
sl@0
    23
sl@0
    24
#if defined(GNUC) && !defined(__MARM_ARM4__)
sl@0
    25
#define	__VOLATILE__	volatile
sl@0
    26
#else
sl@0
    27
#define __VOLATILE__
sl@0
    28
#endif
sl@0
    29
sl@0
    30
#if defined(__SMP__) && defined(__CPU_ARM11MP__) 
sl@0
    31
#define COARSE_GRAINED_TLB_MAINTENANCE
sl@0
    32
#define BROADCAST_TLB_MAINTENANCE
sl@0
    33
#endif
sl@0
    34
sl@0
    35
sl@0
    36
sl@0
    37
FORCE_INLINE void __arm_dmb()
sl@0
    38
	{
sl@0
    39
	#if defined(__CPU_ARMV6)
sl@0
    40
		// dmb instruction...
sl@0
    41
		#ifdef __GNUC__
sl@0
    42
			TInt zero = 0;
sl@0
    43
			asm __VOLATILE__ ("mcr p15, 0, %0, c7, c10, 5 " : : "r"(zero));
sl@0
    44
		#elif defined(__ARMCC__)
sl@0
    45
			TInt zero = 0;
sl@0
    46
			asm("mcr p15, 0, zero, c7, c10, 5 ");
sl@0
    47
		#elif defined(__GCCXML__)
sl@0
    48
			// empty
sl@0
    49
		#else
sl@0
    50
			#error Unknown compiler
sl@0
    51
		#endif
sl@0
    52
	#elif defined(__CPU_ARMV7)
sl@0
    53
		// deprecated CP15 version of DMB...
sl@0
    54
		#ifdef __GNUC__
sl@0
    55
			TInt zero = 0;
sl@0
    56
			asm __VOLATILE__ ("mcr p15, 0, %0, c7, c10, 5 " : : "r"(zero));
sl@0
    57
		#elif defined(__ARMCC__)
sl@0
    58
			TInt zero = 0;
sl@0
    59
			asm("mcr p15, 0, zero, c7, c10, 5 ");
sl@0
    60
		#elif defined(__GCCXML__)
sl@0
    61
			// empty
sl@0
    62
		#else
sl@0
    63
			#error Unknown compiler
sl@0
    64
		#endif
sl@0
    65
	#else
sl@0
    66
		// non inline version...
sl@0
    67
		__e32_memory_barrier();
sl@0
    68
	#endif
sl@0
    69
	}
sl@0
    70
sl@0
    71
sl@0
    72
FORCE_INLINE void __arm_dsb()
sl@0
    73
	{
sl@0
    74
	#if defined(__CPU_ARMV6)
sl@0
    75
		// drain write buffer...
sl@0
    76
		#ifdef __GNUC__
sl@0
    77
			TInt zero = 0;
sl@0
    78
			asm __VOLATILE__ ("mcr p15, 0, %0, c7, c10, 4 " : : "r"(zero));
sl@0
    79
		#elif defined(__ARMCC__)
sl@0
    80
			TInt zero = 0;
sl@0
    81
			asm("mcr p15, 0, zero, c7, c10, 4 ");
sl@0
    82
		#elif defined(__GCCXML__)
sl@0
    83
			// empty
sl@0
    84
		#else
sl@0
    85
			#error Unknown compiler
sl@0
    86
		#endif
sl@0
    87
	#elif defined(__CPU_ARMV7)
sl@0
    88
		// deprecated CP15 version of DSB...
sl@0
    89
		#ifdef __GNUC__
sl@0
    90
			TInt zero = 0;
sl@0
    91
			asm __VOLATILE__ ("mcr p15, 0, %0, c7, c10, 4 " : : "r"(zero));
sl@0
    92
		#elif defined(__ARMCC__)
sl@0
    93
			TInt zero = 0;
sl@0
    94
			asm("mcr p15, 0, zero, c7, c10, 4 ");
sl@0
    95
		#elif defined(__GCCXML__)
sl@0
    96
			// empty
sl@0
    97
		#else
sl@0
    98
			#error Unknown compiler
sl@0
    99
		#endif
sl@0
   100
	#else
sl@0
   101
		// non inline version...
sl@0
   102
		__e32_io_completion_barrier();
sl@0
   103
	#endif
sl@0
   104
	}
sl@0
   105
sl@0
   106
sl@0
   107
extern "C" void __e32_instruction_barrier();
sl@0
   108
sl@0
   109
FORCE_INLINE void __arm_isb()
sl@0
   110
	{
sl@0
   111
	#if defined(__CPU_ARMV6)
sl@0
   112
		// prefetch flush...
sl@0
   113
		#ifdef __GNUC__
sl@0
   114
			TInt zero = 0;
sl@0
   115
			asm __VOLATILE__ ("mcr p15, 0, %0, c7, c5, 4 " : : "r"(zero));
sl@0
   116
		#elif defined(__ARMCC__)
sl@0
   117
			TInt zero = 0;
sl@0
   118
			asm("mcr p15, 0, zero, c7, c5, 4 ");
sl@0
   119
		#elif defined(__GCCXML__)
sl@0
   120
			// empty
sl@0
   121
		#else
sl@0
   122
			#error Unknown compiler
sl@0
   123
		#endif
sl@0
   124
	#elif defined(__CPU_ARMV7)
sl@0
   125
		// deprecated CP15 version of ISB...
sl@0
   126
		#ifdef __GNUC__
sl@0
   127
			TInt zero = 0;
sl@0
   128
			asm __VOLATILE__ ("mcr p15, 0, %0, c7, c5, 4 " : : "r"(zero));
sl@0
   129
		#elif defined(__ARMCC__)
sl@0
   130
			TInt zero = 0;
sl@0
   131
			asm("mcr p15, 0, zero, c7, c5, 4 ");
sl@0
   132
		#elif defined(__GCCXML__)
sl@0
   133
			// empty
sl@0
   134
		#else
sl@0
   135
			#error Unknown compiler
sl@0
   136
		#endif
sl@0
   137
	#else
sl@0
   138
		// non inline version...
sl@0
   139
		__e32_instruction_barrier();
sl@0
   140
	#endif
sl@0
   141
	}
sl@0
   142
sl@0
   143
sl@0
   144
/**
sl@0
   145
Branch predictor invalidate all
sl@0
   146
*/
sl@0
   147
FORCE_INLINE void __arm_bpiall()
sl@0
   148
	{
sl@0
   149
	#ifdef __GNUC__
sl@0
   150
		TInt zero = 0;
sl@0
   151
		asm __VOLATILE__ ("mcr p15, 0, %0, c7, c5, 6 " : : "r"(zero));
sl@0
   152
	#elif defined(__ARMCC__)
sl@0
   153
		TInt zero = 0;
sl@0
   154
		asm("mcr p15, 0, zero, c7, c5, 6 ");
sl@0
   155
	#elif defined(__GCCXML__)
sl@0
   156
		// empty
sl@0
   157
	#else
sl@0
   158
		#error Unknown compiler
sl@0
   159
	#endif
sl@0
   160
	}
sl@0
   161
sl@0
   162
sl@0
   163
#ifdef __SMP__
sl@0
   164
sl@0
   165
/**
sl@0
   166
Branch predictor invalidate all inner-shareable
sl@0
   167
*/
sl@0
   168
FORCE_INLINE void __arm_bpiallis()
sl@0
   169
	{
sl@0
   170
	// branch predictor invalidate all inner-shareable
sl@0
   171
	#ifdef __GNUC__
sl@0
   172
		TInt zero = 0;
sl@0
   173
		asm __VOLATILE__ ("mcr p15, 0, %0, c7, c1, 6 " : : "r"(zero));
sl@0
   174
	#elif defined(__ARMCC__)
sl@0
   175
		TInt zero = 0;
sl@0
   176
		asm("mcr p15, 0, zero, c7, c1, 6 ");
sl@0
   177
	#elif defined(__GCCXML__)
sl@0
   178
		// empty
sl@0
   179
	#else
sl@0
   180
		#error Unknown compiler
sl@0
   181
	#endif
sl@0
   182
	}
sl@0
   183
sl@0
   184
#endif
sl@0
   185
sl@0
   186
sl@0
   187
/**	
sl@0
   188
This will make sure that the change in page directory is visible by H/W Page-Table Walk.  
sl@0
   189
Call this function when a single entry in page directory is changed.
sl@0
   190
*/
sl@0
   191
FORCE_INLINE void SinglePdeUpdated(TPde* aPde)
sl@0
   192
  	{
sl@0
   193
	CacheMaintenance::SinglePdeUpdated((TLinAddr)aPde);
sl@0
   194
  	}
sl@0
   195
sl@0
   196
sl@0
   197
#ifdef BROADCAST_TLB_MAINTENANCE
sl@0
   198
sl@0
   199
/**
sl@0
   200
Signal other CPU cores to perform TLB maintenance.
sl@0
   201
sl@0
   202
@param aLinAddrAndAsid	If == 0, then InvalidateTLB;
sl@0
   203
						if < KMmuAsidCount, then InvalidateTLBForAsid;
sl@0
   204
						else InvalidateTLBForPage.
sl@0
   205
*/
sl@0
   206
extern void BroadcastInvalidateTLB(TLinAddr aLinAddrAndAsid=0);
sl@0
   207
sl@0
   208
#endif
sl@0
   209
sl@0
   210
sl@0
   211
/**
sl@0
   212
Invalidate a single I+D TLB entry on this CPU core only.
sl@0
   213
@param aLinAddrAndAsid Virtual address of a page of memory ORed with the ASID value.
sl@0
   214
*/
sl@0
   215
FORCE_INLINE void LocalInvalidateTLBForPage(TLinAddr aLinAddrAndAsid)
sl@0
   216
	{
sl@0
   217
	#ifdef __GNUC__
sl@0
   218
		#if defined(__CPU_ARM11MP__) // why?...
sl@0
   219
			asm __VOLATILE__ ("mcr p15, 0, %0, c8, c7, 3 " : : "r"(aLinAddrAndAsid));
sl@0
   220
		#else
sl@0
   221
			asm __VOLATILE__ ("mcr p15, 0, %0, c8, c7, 1 " : : "r"(aLinAddrAndAsid));
sl@0
   222
		#endif
sl@0
   223
	#elif defined(__ARMCC__)
sl@0
   224
		#if defined(__CPU_ARM11MP__) // why?...
sl@0
   225
			asm("mcr p15, 0, aLinAddrAndAsid, c8, c7, 3 ");
sl@0
   226
		#else
sl@0
   227
			asm("mcr p15, 0, aLinAddrAndAsid, c8, c7, 1 ");
sl@0
   228
		#endif
sl@0
   229
	#elif defined(__GCCXML__)
sl@0
   230
		// empty
sl@0
   231
	#else
sl@0
   232
		#error Unknown compiler
sl@0
   233
	#endif
sl@0
   234
	__arm_bpiall();
sl@0
   235
	__arm_dsb();
sl@0
   236
	__arm_isb();
sl@0
   237
	}
sl@0
   238
sl@0
   239
sl@0
   240
/**
sl@0
   241
Invalidate a single I+D TLB entry on all CPU cores.
sl@0
   242
@param aLinAddrAndAsid Virtual address of a page of memory ORed with the ASID value.
sl@0
   243
*/
sl@0
   244
FORCE_INLINE void InvalidateTLBForPage(TLinAddr aLinAddrAndAsid)
sl@0
   245
	{
sl@0
   246
	#ifdef BROADCAST_TLB_MAINTENANCE
sl@0
   247
		BroadcastInvalidateTLB(aLinAddrAndAsid);
sl@0
   248
	#elif !defined(__SMP__)
sl@0
   249
		LocalInvalidateTLBForPage(aLinAddrAndAsid);
sl@0
   250
	#else // __SMP__
sl@0
   251
		// inner-shareable invalidate...
sl@0
   252
		#ifdef __GNUC__
sl@0
   253
			asm __VOLATILE__ ("mcr p15, 0, %0, c8, c3, 1 " : : "r"(aLinAddrAndAsid));
sl@0
   254
		#elif defined(__ARMCC__)
sl@0
   255
			asm("mcr p15, 0, aLinAddrAndAsid, c8, c3, 1 ");
sl@0
   256
		#elif defined(__GCCXML__)
sl@0
   257
			// empty
sl@0
   258
		#else
sl@0
   259
			#error Unknown compiler
sl@0
   260
		#endif
sl@0
   261
		__arm_bpiallis();
sl@0
   262
		__arm_dsb();
sl@0
   263
		__arm_isb();
sl@0
   264
	#endif
sl@0
   265
	}
sl@0
   266
sl@0
   267
sl@0
   268
/**
sl@0
   269
Invalidate entire TLB on this CPU only
sl@0
   270
*/
sl@0
   271
FORCE_INLINE void LocalInvalidateTLB()
sl@0
   272
	{
sl@0
   273
	#ifdef __GNUC__
sl@0
   274
		asm __VOLATILE__ ("mcr p15, 0, r0, c8, c7, 0 ");
sl@0
   275
	#elif defined(__ARMCC__)
sl@0
   276
		TInt dummy = 0; // damned RVCT
sl@0
   277
		asm("mcr p15, 0, dummy, c8, c7, 0 ");
sl@0
   278
	#elif defined(__GCCXML__)
sl@0
   279
		// empty
sl@0
   280
	#else
sl@0
   281
		#error Unknown compiler
sl@0
   282
	#endif
sl@0
   283
	__arm_bpiall();
sl@0
   284
	__arm_dsb();
sl@0
   285
	__arm_isb();
sl@0
   286
	}
sl@0
   287
sl@0
   288
sl@0
   289
/**
sl@0
   290
Invalidate entire TLB on all CPU cores.
sl@0
   291
*/
sl@0
   292
FORCE_INLINE void InvalidateTLB()
sl@0
   293
	{
sl@0
   294
	#ifdef BROADCAST_TLB_MAINTENANCE
sl@0
   295
		BroadcastInvalidateTLB(0);
sl@0
   296
	#elif !defined(__SMP__)
sl@0
   297
		LocalInvalidateTLB();
sl@0
   298
	#else // __SMP__
sl@0
   299
		// inner-shareable invalidate...
sl@0
   300
		#ifdef __GNUC__
sl@0
   301
			TInt zero = 0;
sl@0
   302
			asm __VOLATILE__ ("mcr p15, 0, %0, c8, c3, 0 " : : "r"(zero));
sl@0
   303
		#elif defined(__ARMCC__)
sl@0
   304
			TInt zero = 0;
sl@0
   305
			asm("mcr p15, 0, zero, c8, c3, 0 ");
sl@0
   306
		#elif defined(__GCCXML__)
sl@0
   307
			// empty
sl@0
   308
		#else
sl@0
   309
			#error Unknown compiler
sl@0
   310
		#endif
sl@0
   311
		__arm_bpiallis();
sl@0
   312
		__arm_dsb();
sl@0
   313
		__arm_isb();
sl@0
   314
	#endif
sl@0
   315
	}
sl@0
   316
sl@0
   317
sl@0
   318
#if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_424067_FIXED)
sl@0
   319
#define INVALIDATE_TLB_BY_ASID_BROKEN
sl@0
   320
#endif
sl@0
   321
#if defined(__CPU_ARM1176__) && !defined(__CPU_ARM1176_ERRATUM_424692_FIXED)
sl@0
   322
#define INVALIDATE_TLB_BY_ASID_BROKEN
sl@0
   323
#endif
sl@0
   324
 
sl@0
   325
sl@0
   326
__ASSERT_COMPILE(KKernelOsAsid==0); // InvalidateTLBForAsid assumes this
sl@0
   327
sl@0
   328
sl@0
   329
/**
sl@0
   330
Invalidate all TLB entries which match the given ASID value (current CPU only)
sl@0
   331
*/
sl@0
   332
FORCE_INLINE void LocalInvalidateTLBForAsid(TUint aAsid)
sl@0
   333
	{
sl@0
   334
#ifndef INVALIDATE_TLB_BY_ASID_BROKEN
sl@0
   335
	if(aAsid&=0xff)
sl@0
   336
		{
sl@0
   337
#if defined(__CPU_CORTEX_A9__) && !defined(__CPU_ARM_A9_ERRATUM_571618_FIXED)
sl@0
   338
		__arm_dmb();	// ARM Cortex-A9 MPCore erratum 571618 workaround
sl@0
   339
						// Execute memory barrier before interruptible CP15 operations
sl@0
   340
#endif
sl@0
   341
		// invalidate all I+D TLB entries for ASID...
sl@0
   342
		#ifdef __GNUC__
sl@0
   343
			asm __VOLATILE__ ("mcr p15, 0, %0, c8, c7, 2 " : : "r"(aAsid));
sl@0
   344
		#elif defined(__ARMCC__)
sl@0
   345
			asm("mcr p15, 0, aAsid, c8, c7, 2 ");
sl@0
   346
		#elif defined(__GCCXML__)
sl@0
   347
			// empty
sl@0
   348
		#else
sl@0
   349
			#error Unknown compiler
sl@0
   350
		#endif
sl@0
   351
		}
sl@0
   352
	else
sl@0
   353
		// ASID==0 means 'kernel' memory. We have to invalidate the entire TLB here
sl@0
   354
		// as this is the only way of getting rid of global entries...
sl@0
   355
#endif
sl@0
   356
		{
sl@0
   357
		// invalidate entire TLB...
sl@0
   358
		#ifdef __GNUC__
sl@0
   359
			asm __VOLATILE__ ("mcr p15, 0, r0, c8, c7, 0 ");
sl@0
   360
		#elif defined(__ARMCC__)
sl@0
   361
			TInt dummy = 0; // damned RVCT
sl@0
   362
			asm("mcr p15, 0, dummy, c8, c7, 0 ");
sl@0
   363
		#elif defined(__GCCXML__)
sl@0
   364
			// empty
sl@0
   365
		#else
sl@0
   366
			#error Unknown compiler
sl@0
   367
		#endif
sl@0
   368
		}
sl@0
   369
	__arm_bpiall();
sl@0
   370
	__arm_dsb();
sl@0
   371
	__arm_isb();
sl@0
   372
	}
sl@0
   373
sl@0
   374
sl@0
   375
/**
sl@0
   376
Invalidate all TLB entries which match the given ASID value on all CPU cores.
sl@0
   377
*/
sl@0
   378
FORCE_INLINE void InvalidateTLBForAsid(TUint aAsid)	
sl@0
   379
	{
sl@0
   380
	aAsid &= 0xff;
sl@0
   381
	#ifdef BROADCAST_TLB_MAINTENANCE
sl@0
   382
		BroadcastInvalidateTLB(aAsid);
sl@0
   383
	#elif !defined(__SMP__)
sl@0
   384
		LocalInvalidateTLBForAsid(aAsid);
sl@0
   385
	#else // __SMP__
sl@0
   386
		if(aAsid!=0)
sl@0
   387
			{
sl@0
   388
#if defined(__CPU_CORTEX_A9__) && !defined(__CPU_ARM_A9_ERRATUM_571618_FIXED)
sl@0
   389
			__arm_dmb();	// ARM Cortex-A9 MPCore erratum 571618 workaround
sl@0
   390
							// Execute memory barrier before interruptible CP15 operations
sl@0
   391
#endif
sl@0
   392
			// invalidate all I+D TLB entries for ASID...
sl@0
   393
			#ifdef __GNUC__
sl@0
   394
				asm __VOLATILE__ ("mcr p15, 0, %0, c8, c3, 2 " : : "r"(aAsid));
sl@0
   395
			#elif defined(__ARMCC__)
sl@0
   396
				asm("mcr p15, 0, aAsid, c8, c3, 2 ");
sl@0
   397
			#elif defined(__GCCXML__)
sl@0
   398
				// empty
sl@0
   399
			#else
sl@0
   400
				#error Unknown compiler
sl@0
   401
			#endif
sl@0
   402
			}
sl@0
   403
		else
sl@0
   404
			{
sl@0
   405
			// ASID==0 means 'kernel' memory. We have to invalidate the entire TLB here
sl@0
   406
			// as this is the only way of getting rid of global entries...
sl@0
   407
			#ifdef __GNUC__
sl@0
   408
				asm __VOLATILE__ ("mcr p15, 0, %0, c8, c3, 0 " : : "r"(aAsid));
sl@0
   409
			#elif defined(__ARMCC__)
sl@0
   410
				asm("mcr p15, 0, aAsid, c8, c3, 0 ");
sl@0
   411
			#elif defined(__GCCXML__)
sl@0
   412
				// empty
sl@0
   413
			#else
sl@0
   414
				#error Unknown compiler
sl@0
   415
			#endif
sl@0
   416
			}
sl@0
   417
		__arm_bpiallis();
sl@0
   418
		__arm_dsb();
sl@0
   419
		__arm_isb();
sl@0
   420
	#endif
sl@0
   421
	}
sl@0
   422
sl@0
   423
sl@0
   424
/**
sl@0
   425
Return the virtual address of the page directory used for address space
sl@0
   426
\a aOsAsid. Note, the global page directory is mapped after each
sl@0
   427
address space specific page director in a way which means that it
sl@0
   428
appears to be a single contiguous page directory which maps the
sl@0
   429
entire 32bit virtual address range. I.e. the returned page directory
sl@0
   430
address can be simply indexed by any virtual address without regard
sl@0
   431
to whether it belongs to the given address space or lies in the
sl@0
   432
global region.
sl@0
   433
*/
sl@0
   434
FORCE_INLINE TPde* Mmu::PageDirectory(TInt aOsAsid)
sl@0
   435
	{
sl@0
   436
	return (TPde*)(KPageDirectoryBase+(aOsAsid<<KPageDirectoryShift));
sl@0
   437
	}
sl@0
   438
sl@0
   439
sl@0
   440
/**
sl@0
   441
Return the virtual address of the Page Directory Entry (PDE) used to map
sl@0
   442
the region containing the virtual address \a aAddress in the address space
sl@0
   443
\a aOsAsid.
sl@0
   444
*/
sl@0
   445
FORCE_INLINE TPde* Mmu::PageDirectoryEntry(TInt aOsAsid, TLinAddr aAddress)
sl@0
   446
	{
sl@0
   447
	return PageDirectory(aOsAsid) + (aAddress>>KChunkShift);
sl@0
   448
	}
sl@0
   449
sl@0
   450
sl@0
   451
/**
sl@0
   452
Return the physical address mapped by the section mapping contained
sl@0
   453
in the given Page Directory Entry \a aPde. If \a aPde is not a
sl@0
   454
section mapping, then KPhysAddrInvalid is returned.
sl@0
   455
*/
sl@0
   456
FORCE_INLINE TPhysAddr Mmu::PdePhysAddr(TPde aPde)
sl@0
   457
	{
sl@0
   458
	if((aPde&KPdePresentMask)==KArmV6PdeSection)
sl@0
   459
		return aPde&KPdeSectionAddrMask;
sl@0
   460
	return KPhysAddrInvalid;
sl@0
   461
	}
sl@0
   462
sl@0
   463
sl@0
   464
#ifdef __CPU_MEMORY_TYPE_REMAPPING
sl@0
   465
sl@0
   466
/*
sl@0
   467
Bits in a PTE which represent access permissions...
sl@0
   468
sl@0
   469
AP2 AP1 AP0		usr	wr
sl@0
   470
0	0	x		n	y
sl@0
   471
0	1	x		y	y
sl@0
   472
1	0	x		n	n
sl@0
   473
1	1	x		y	n
sl@0
   474
*/
sl@0
   475
sl@0
   476
/**
sl@0
   477
Modify a Page Table Entry (PTE) value so it restricts access to the memory it maps.
sl@0
   478
The returned PTE value is the same as \a aPte but with its access permissions set
sl@0
   479
to read-only if \a aReadOnly is true, and set to allow no access if \a aReadOnly is false.
sl@0
   480
*/
sl@0
   481
FORCE_INLINE TPte Mmu::MakePteInaccessible(TPte aPte, TBool aReadOnly)
sl@0
   482
	{
sl@0
   483
	__NK_ASSERT_DEBUG((aPte&KArmV6PteTypeMask)!=KArmV6PteLargePage);
sl@0
   484
	if(aPte&KPtePresentMask)
sl@0
   485
		{
sl@0
   486
		__NK_ASSERT_DEBUG((bool)(aPte&KArmV6PteSmallTEX1)==(bool)(aPte&KArmV6PteSmallXN)); // TEX1 should be a copy of XN
sl@0
   487
		if(aReadOnly)
sl@0
   488
			aPte |= KArmV6PteAP2; // make read only
sl@0
   489
		else
sl@0
   490
			aPte &= ~KPtePresentMask; // make inaccessible
sl@0
   491
		}
sl@0
   492
	return aPte;
sl@0
   493
	}
sl@0
   494
sl@0
   495
sl@0
   496
/**
sl@0
   497
Modify a Page Table Entry (PTE) value so it allows greater access to the memory it maps.
sl@0
   498
The returned PTE value is the same as \a aPte but with its access permissions set
sl@0
   499
to read/write if \a aWrite is true, and set to read-only if \a aWrite is false.
sl@0
   500
*/
sl@0
   501
FORCE_INLINE TPte Mmu::MakePteAccessible(TPte aPte, TBool aWrite)
sl@0
   502
	{
sl@0
   503
	__NK_ASSERT_DEBUG((aPte&KArmV6PteTypeMask)!=KArmV6PteLargePage);
sl@0
   504
	if((aPte&KPtePresentMask)==0)
sl@0
   505
		{
sl@0
   506
		// wasn't accessible, make it so...
sl@0
   507
		if(aPte&KArmV6PteSmallTEX1)
sl@0
   508
			aPte |= KArmV6PteSmallXN; // restore XN by copying from TEX1
sl@0
   509
		aPte |= KArmV6PteSmallPage;
sl@0
   510
		aPte |= KArmV6PteAP2; // make read only
sl@0
   511
		}
sl@0
   512
	if(aWrite)
sl@0
   513
		aPte &= ~KArmV6PteAP2; // make writable
sl@0
   514
	return aPte;
sl@0
   515
	}
sl@0
   516
sl@0
   517
sl@0
   518
#else // not __CPU_MEMORY_TYPE_REMAPPING
sl@0
   519
sl@0
   520
/*
sl@0
   521
Bits in a PTE which represent access permissions...
sl@0
   522
sl@0
   523
AP2 AP1 AP0		usr	wr
sl@0
   524
0	0	0
sl@0
   525
0	0	1		n	y
sl@0
   526
0	1	0
sl@0
   527
0	1	1		y	y
sl@0
   528
1	0	0
sl@0
   529
1	0	1		n	n
sl@0
   530
1	1	0		y	n
sl@0
   531
1	1	1
sl@0
   532
*/
sl@0
   533
sl@0
   534
FORCE_INLINE TPte Mmu::MakePteInaccessible(TPte aPte, TBool aReadOnly)
sl@0
   535
	{
sl@0
   536
	__NK_ASSERT_DEBUG((aPte&KArmV6PteTypeMask)!=KArmV6PteLargePage);
sl@0
   537
	if(aPte&KPtePresentMask)
sl@0
   538
		{
sl@0
   539
		if(!aReadOnly)
sl@0
   540
			{
sl@0
   541
			// copy XN to AP0...
sl@0
   542
			if(aPte&KArmV6PteSmallXN)
sl@0
   543
				aPte |= KArmV6PteAP0;
sl@0
   544
			else
sl@0
   545
				aPte &= ~KArmV6PteAP0;
sl@0
   546
sl@0
   547
			// make inaccessible...
sl@0
   548
			aPte &= ~KPtePresentMask;
sl@0
   549
			}
sl@0
   550
		else
sl@0
   551
			{
sl@0
   552
			// make read only...
sl@0
   553
			aPte |= KArmV6PteAP2; // make read only
sl@0
   554
			if(aPte&KArmV6PteAP1)
sl@0
   555
				aPte &= ~KArmV6PteAP0; // correct AP0
sl@0
   556
			}
sl@0
   557
		}
sl@0
   558
	return aPte;
sl@0
   559
	}
sl@0
   560
sl@0
   561
sl@0
   562
FORCE_INLINE TPte Mmu::MakePteAccessible(TPte aPte, TBool aWrite)
sl@0
   563
	{
sl@0
   564
	__NK_ASSERT_DEBUG((aPte&KArmV6PteTypeMask)!=KArmV6PteLargePage);
sl@0
   565
	if((aPte&KPtePresentMask)==0)
sl@0
   566
		{
sl@0
   567
		// wasn't accessible, make it so...
sl@0
   568
		if(aPte&KArmV6PteAP0)
sl@0
   569
			aPte |= KArmV6PteSmallXN; // restore XN by copying from AP0
sl@0
   570
		aPte |= KArmV6PteAP0;
sl@0
   571
		aPte |= KArmV6PteSmallPage;
sl@0
   572
sl@0
   573
		// make read only...
sl@0
   574
		aPte |= KArmV6PteAP2; // make read only
sl@0
   575
		if(aPte&KArmV6PteAP1)
sl@0
   576
			aPte &= ~KArmV6PteAP0; // correct AP0
sl@0
   577
		}
sl@0
   578
	if(aWrite)
sl@0
   579
		{
sl@0
   580
		// make writable...
sl@0
   581
		aPte &= ~KArmV6PteAP2;
sl@0
   582
		aPte |= KArmV6PteAP0; 
sl@0
   583
		}
sl@0
   584
	return aPte;
sl@0
   585
	}
sl@0
   586
sl@0
   587
#endif // __CPU_MEMORY_TYPE_REMAPPING
sl@0
   588
sl@0
   589
sl@0
   590
/**
sl@0
   591
Return true if a Page Table Entry (PTE) only allows read-only access to memory.
sl@0
   592
*/
sl@0
   593
FORCE_INLINE TBool Mmu::IsPteReadOnly(TPte aPte)
sl@0
   594
	{
sl@0
   595
	__NK_ASSERT_DEBUG(aPte&KPtePresentMask); // read-only state is ambiguous if pte not present
sl@0
   596
	return aPte&KArmV6PteAP2;
sl@0
   597
	}
sl@0
   598
sl@0
   599
sl@0
   600
/**
sl@0
   601
Return true if a Page Table Entry (PTE) doesn't allow any access to the memory.
sl@0
   602
*/
sl@0
   603
FORCE_INLINE TBool Mmu::IsPteInaccessible(TPte aPte)
sl@0
   604
	{
sl@0
   605
	return !(aPte&KPtePresentMask);
sl@0
   606
	}
sl@0
   607
sl@0
   608
/**
sl@0
   609
Return true if the Page Table Entry \a aNewPte allows greater access to
sl@0
   610
memory that \a aOldPte. Only the permissions read/write, read-only and no-access
sl@0
   611
are considered, not any execute or privileged access.
sl@0
   612
*/
sl@0
   613
FORCE_INLINE TBool Mmu::IsPteMoreAccessible(TPte aNewPte, TPte aOldPte)
sl@0
   614
	{
sl@0
   615
	if(aNewPte&aOldPte&KPtePresentMask)			// if ptes both present
sl@0
   616
		return (aOldPte&~aNewPte)&KArmV6PteAP2;	//   check for more writable
sl@0
   617
	else										// else
sl@0
   618
		return aNewPte&KPtePresentMask;			//   check for new pte being present
sl@0
   619
	}
sl@0
   620
sl@0
   621
sl@0
   622
/**
sl@0
   623
Bit flag values representing the memory mapping differences governed by
sl@0
   624
MMU Page Directory Entries (PDEs). Memory which differs in #TPdeType can
sl@0
   625
not be mapped using the same Page Table, as they would share the same PDE
sl@0
   626
entry.
sl@0
   627
*/
sl@0
   628
enum TPdeType
sl@0
   629
	{
sl@0
   630
	/**
sl@0
   631
	Legacy (and little-used/unused?) ARM attribute.
sl@0
   632
	This could potentially be removed (see DMemoryMapping::PdeType()).
sl@0
   633
	*/
sl@0
   634
	EPdeTypeECC				= 1<<0,
sl@0
   635
sl@0
   636
	/**
sl@0
   637
	Total number of combinations of #TPdeType values.
sl@0
   638
	*/
sl@0
   639
	ENumPdeTypes			= 2
sl@0
   640
	};
sl@0
   641
sl@0
   642
sl@0
   643
/**
sl@0
   644
Bit flag values representing the memory mapping differences governed by
sl@0
   645
MMU Page Table Entries (PTEs).
sl@0
   646
*/
sl@0
   647
enum TPteType
sl@0
   648
	{
sl@0
   649
	/**
sl@0
   650
	PTE grants user mode access to memory.
sl@0
   651
	*/
sl@0
   652
	EPteTypeUserAccess		= EUser,
sl@0
   653
sl@0
   654
	/**
sl@0
   655
	PTE grants write access to memory.
sl@0
   656
	*/
sl@0
   657
	EPteTypeWritable		= EReadWrite,
sl@0
   658
sl@0
   659
	/**
sl@0
   660
	PTE grants execute  access to memory.
sl@0
   661
	*/
sl@0
   662
	EPteTypeExecutable		= EExecute,
sl@0
   663
sl@0
   664
	/**
sl@0
   665
	PTE is 'global'. I.e. the memory it maps is intended to be accessible
sl@0
   666
	in all process contexts, i.e. for mappings at virtual address >= KGlobalMemoryBase.
sl@0
   667
	The MMU uses this to tag TLB entries as valid for all ASIDs.
sl@0
   668
	*/
sl@0
   669
	EPteTypeGlobal			= 1<<3,
sl@0
   670
sl@0
   671
	/**
sl@0
   672
	Total number of combinations of #TPteType values.
sl@0
   673
	*/
sl@0
   674
	ENumPteTypes			= 16
sl@0
   675
	};
sl@0
   676
sl@0
   677
__ASSERT_COMPILE(EPteTypeUserAccess==(1<<0));
sl@0
   678
__ASSERT_COMPILE(EPteTypeWritable==(1<<1));
sl@0
   679
__ASSERT_COMPILE(EPteTypeExecutable==(1<<2));
sl@0
   680
sl@0
   681
sl@0
   682
#define MMU_SUPPORTS_EXECUTE_NEVER
sl@0
   683
sl@0
   684
sl@0
   685
/**
sl@0
   686
Return the #TPdeType for memory with the given attributes value.
sl@0
   687
*/
sl@0
   688
FORCE_INLINE TUint Mmu::PdeType(TMemoryAttributes aAttributes)
sl@0
   689
	{
sl@0
   690
	return aAttributes&EMemoryAttributeUseECC ? EPdeTypeECC : 0;
sl@0
   691
	}
sl@0
   692
sl@0
   693
sl@0
   694
/**
sl@0
   695
Return the #TPteType to use for memory mappings requiring the given access permissions
sl@0
   696
and Global attribute. The global flag is true if #EPteTypeGlobal is to be set.
sl@0
   697
*/
sl@0
   698
FORCE_INLINE TUint Mmu::PteType(TMappingPermissions aPermissions, TBool aGlobal)
sl@0
   699
	{
sl@0
   700
	__NK_ASSERT_DEBUG(aPermissions&EUser || aGlobal); // can't have supervisor local memory
sl@0
   701
sl@0
   702
	TUint pteType =	(aPermissions&(EUser|EReadWrite|EExecute));
sl@0
   703
	if(aGlobal)
sl@0
   704
		pteType |= EPteTypeGlobal;
sl@0
   705
sl@0
   706
	__NK_ASSERT_DEBUG(pteType<ENumPteTypes);
sl@0
   707
sl@0
   708
	return pteType;
sl@0
   709
	}
sl@0
   710
sl@0
   711
sl@0
   712
/**
sl@0
   713
Test if a memory access is allowed by a given mapping type.
sl@0
   714
sl@0
   715
@param aPteType				#TPteType used for a mapping. E.g. TMemoryMappingBase::PteType()
sl@0
   716
@param aAccessPermissions	Flags from #TMappingPermissions indicating the memory access
sl@0
   717
							required.
sl@0
   718
sl@0
   719
@return True if a memory access requested with permissions \a aAccessPermissions
sl@0
   720
		is allowed on a mapping of the specified #TPteType.
sl@0
   721
		False if the access is not allowed.
sl@0
   722
*/
sl@0
   723
FORCE_INLINE TBool Mmu::CheckPteTypePermissions(TUint aPteType, TUint aAccessPermissions)
sl@0
   724
	{
sl@0
   725
	aAccessPermissions &= EUser|EReadWrite|EExecute;
sl@0
   726
	return (aPteType&aAccessPermissions)==aAccessPermissions;
sl@0
   727
	}
sl@0
   728
sl@0
   729
sl@0
   730
/**
sl@0
   731
Extract the #TMappingPermissions corresponding to a given #TPteType.
sl@0
   732
*/
sl@0
   733
FORCE_INLINE TMappingPermissions Mmu::PermissionsFromPteType(TUint aPteType)
sl@0
   734
	{
sl@0
   735
	return (TMappingPermissions)(aPteType&(EPteTypeUserAccess|EPteTypeWritable|EPteTypeExecutable));
sl@0
   736
	}
sl@0
   737
sl@0
   738
extern void UserWriteFault(TLinAddr aAddr);
sl@0
   739
extern void UserReadFault(TLinAddr aAddr);
sl@0
   740
sl@0
   741
sl@0
   742
//
sl@0
   743
// TODO: Move these to NKern
sl@0
   744
//
sl@0
   745
sl@0
   746
FORCE_INLINE void inline_DisableAllInterrupts()
sl@0
   747
	{
sl@0
   748
#ifdef __GNUC__
sl@0
   749
	#ifdef __CPU_ARM_HAS_CPS
sl@0
   750
		CPSIDIF;
sl@0
   751
	#else
sl@0
   752
		TInt reg;
sl@0
   753
		asm __VOLATILE__ ("mrs %0, cpsr" : "=r"(reg));
sl@0
   754
		asm __VOLATILE__ ("orr %0, %0, #0xc0" : : "r"(reg));
sl@0
   755
		asm __VOLATILE__ ("msr cpsr_c, %0" : : "r"(reg));
sl@0
   756
	#endif
sl@0
   757
/*
sl@0
   758
#elif defined(__ARMCC__)
sl@0
   759
	#if defined(__CPU_ARM_HAS_CPS) && __ARMCC_VERSION>=300000
sl@0
   760
		asm("cpsid if");
sl@0
   761
	#else
sl@0
   762
		TInt reg;
sl@0
   763
		asm("mrs reg, cpsr");
sl@0
   764
		asm("orr reg, reg, #0xc0");
sl@0
   765
		asm("msr cpsr_c, reg");
sl@0
   766
	#endif
sl@0
   767
*/
sl@0
   768
#else
sl@0
   769
	NKern::DisableAllInterrupts();
sl@0
   770
#endif
sl@0
   771
	}
sl@0
   772
sl@0
   773
FORCE_INLINE void inline_EnableAllInterrupts()
sl@0
   774
	{
sl@0
   775
#ifdef __GNUC__
sl@0
   776
	#ifdef __CPU_ARM_HAS_CPS
sl@0
   777
		CPSIEIF;
sl@0
   778
	#else
sl@0
   779
		TInt reg;
sl@0
   780
		asm __VOLATILE__ ("mrs %0, cpsr" : "=r"(reg));
sl@0
   781
		asm __VOLATILE__ ("bic %0, %0, #0xc0" : : "r"(reg));
sl@0
   782
		asm __VOLATILE__ ("msr cpsr_c, %0" : : "r"(reg));
sl@0
   783
	#endif
sl@0
   784
/*
sl@0
   785
#elif defined(__ARMCC__)
sl@0
   786
	#if defined(__CPU_ARM_HAS_CPS) && __ARMCC_VERSION>=300000
sl@0
   787
		asm("cpsie if");
sl@0
   788
	#else
sl@0
   789
		TInt reg;
sl@0
   790
		asm("mrs reg, cpsr");
sl@0
   791
		asm("bic reg, reg, #0xc0");
sl@0
   792
		asm("msr cpsr_c, reg");
sl@0
   793
	#endif
sl@0
   794
*/
sl@0
   795
#else
sl@0
   796
	NKern::EnableAllInterrupts();
sl@0
   797
#endif
sl@0
   798
	}
sl@0
   799
sl@0
   800
sl@0
   801
#ifndef	__SMP__
sl@0
   802
#undef __SPIN_LOCK_IRQ
sl@0
   803
#define __SPIN_LOCK_IRQ(lock)					(inline_DisableAllInterrupts())
sl@0
   804
#undef __SPIN_UNLOCK_IRQ
sl@0
   805
#define __SPIN_UNLOCK_IRQ(lock)					(inline_EnableAllInterrupts())
sl@0
   806
#undef __SPIN_FLASH_IRQ
sl@0
   807
#define __SPIN_FLASH_IRQ(lock)					(inline_EnableAllInterrupts(),inline_DisableAllInterrupts(),((TBool)TRUE))
sl@0
   808
#endif
sl@0
   809
sl@0
   810
sl@0
   811
/**
sl@0
   812
Indicate whether a PDE entry maps a page table.
sl@0
   813
sl@0
   814
@param aPde The PDE entry in question.
sl@0
   815
*/
sl@0
   816
FORCE_INLINE TBool Mmu::PdeMapsPageTable(TPde aPde)
sl@0
   817
	{
sl@0
   818
	return (aPde & KPdeTypeMask) == KArmV6PdePageTable;
sl@0
   819
	}
sl@0
   820
sl@0
   821
sl@0
   822
/**
sl@0
   823
Indicate whether a PDE entry maps a section.
sl@0
   824
sl@0
   825
@param aPde The PDE entry in question.
sl@0
   826
*/
sl@0
   827
FORCE_INLINE TBool Mmu::PdeMapsSection(TPde aPde)
sl@0
   828
	{
sl@0
   829
	return (aPde & KPdeTypeMask) == KArmV6PdeSection;
sl@0
   830
	}