os/kernelhwsrv/kernel/eka/memmodel/epoc/multiple/arm/xmmu.cia
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
sl@0
     1
// Copyright (c) 1997-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
// e32\memmodel\epoc\multiple\arm\xmmu.cia
sl@0
    15
// 
sl@0
    16
//
sl@0
    17
sl@0
    18
#include <arm_mem.h>
sl@0
    19
#include "execs.h"
sl@0
    20
#include <nk_cpu.h>
sl@0
    21
sl@0
    22
#if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
sl@0
    23
// This will also invalidate TLB entry if the third argument (asid) is specified (>=0).
sl@0
    24
// If asid is < 0, the caller is expected to deal with TLB invalidation.
sl@0
    25
__NAKED__ void remove_and_invalidate_page(TPte*, TLinAddr, TInt)
sl@0
    26
	{
sl@0
    27
	asm("stmfd sp!, {r4-r6,lr} ");
sl@0
    28
	asm("mov r6, r2 ");			//r6 = asid
sl@0
    29
	asm("mov r4, r0 ");
sl@0
    30
	asm("mov r5, #1 ");			//by default, one cache line to clean
sl@0
    31
	
sl@0
    32
	asm("ldr r3, [r0] ");		// r0 = original PTE
sl@0
    33
	asm("cmp r2, #0 ");
sl@0
    34
	asm("bicpl r1, r1, #0xff ");
sl@0
    35
	asm("orrpl r1, r1, r2 ");	// if ASID supplied, combine with VA
sl@0
    36
	asm("mrs r12, cpsr ");
sl@0
    37
	asm("mov r2, #0 ");
sl@0
    38
	CPSIDAIF;					// interrupts off
sl@0
    39
	asm("str r2, [r0], #4 ");	// clear PTE
sl@0
    40
	asm("tst r3, #3 ");			// PTE present?
sl@0
    41
	asm("beq 0f ");				// if not, done
sl@0
    42
	asm("tst r3, #2 ");			// small page?
sl@0
    43
	asm("bne 1f ");				// skip if small
sl@0
    44
	
sl@0
    45
	asm("mov r5, #2 ");			// there will be 2 cache lines to clean
sl@0
    46
	asm("mov r3, #0 ");
sl@0
    47
	asm("str r2, [r0], #4 ");
sl@0
    48
	asm("stmia r0!, {r2,r3} ");	// clear 16 consecutive PTEs
sl@0
    49
	asm("stmia r0!, {r2,r3} ");
sl@0
    50
	asm("stmia r0!, {r2,r3} ");
sl@0
    51
	asm("stmia r0!, {r2,r3} ");
sl@0
    52
	asm("stmia r0!, {r2,r3} ");
sl@0
    53
	asm("stmia r0!, {r2,r3} ");
sl@0
    54
	asm("stmia r0!, {r2,r3} ");
sl@0
    55
	
sl@0
    56
	asm("1: ");
sl@0
    57
#if defined(__CPU_PAGE_TABLES_FULLY_CACHED)
sl@0
    58
	// Clean the changed page table entries from the cache.
sl@0
    59
	// On ARM1136, cache line is always 32 bytes. 
sl@0
    60
	// For small page, a single cache line has to be cached.
sl@0
    61
	// For large page, 16 page table entries always fits into two cache lines
sl@0
    62
	CLEAN_DCACHE_LINE(,r4);		
sl@0
    63
	asm("subs r5, r5, #1");
sl@0
    64
	asm("addhi r4, r4, #32");// Clean the next cache line as well. Executes ...
sl@0
    65
	CLEAN_DCACHE_LINE(hi,r4);// ... only in case of large page table.
sl@0
    66
#endif
sl@0
    67
	
sl@0
    68
	asm("mcr p15, 0, r1, c7, c10, 4 ");	// drain write buffer
sl@0
    69
	asm("cmp r6, #0");				//is asid valid?
sl@0
    70
sl@0
    71
	FLUSH_DTLB_ENTRY(pl,r1);		// remove stale TLB entry if asid >= 0
sl@0
    72
	FLUSH_ITLB_ENTRY(pl,r1);
sl@0
    73
sl@0
    74
	asm("0: ");
sl@0
    75
	asm("msr cpsr, r12 ");
sl@0
    76
	asm("ldmfd sp!, {r4-r6,pc} ");		// if successful, exit
sl@0
    77
	}
sl@0
    78
sl@0
    79
// This will also invalidate TLB entry. (The third argument (asid) is assumed to be valid (>=0).)
sl@0
    80
__NAKED__ void remove_and_invalidate_section(TPde*, TLinAddr, TInt)
sl@0
    81
	{
sl@0
    82
	asm("ldr r3, [r0] ");		// r0 = original PDE
sl@0
    83
	asm("cmp r2, #0 ");
sl@0
    84
	asm("bicpl r1, r1, #0xff ");
sl@0
    85
	asm("orrpl r1, r1, r2 ");	// if ASID supplied, combine with VA
sl@0
    86
	asm("mrs r12, cpsr ");
sl@0
    87
	asm("mov r2, #0 ");
sl@0
    88
	CPSIDAIF;					// interrupts off
sl@0
    89
	asm("tst r3, #3 ");			// PDE present?
sl@0
    90
	asm("beq 0f ");				// if not, done
sl@0
    91
	asm("str r2, [r0] ");		// clear PDE
sl@0
    92
#if defined(__CPU_PAGE_TABLES_FULLY_CACHED)
sl@0
    93
	CLEAN_DCACHE_LINE(,r0);		
sl@0
    94
#endif
sl@0
    95
	asm("mcr p15, 0, r1, c7, c10, 4 ");	// drain write buffer
sl@0
    96
	FLUSH_DTLB_ENTRY(,r1);		// remove stale TLB entry
sl@0
    97
	FLUSH_ITLB_ENTRY(,r1);
sl@0
    98
	asm("0: ");
sl@0
    99
	asm("msr cpsr, r12 ");
sl@0
   100
	__JUMP(,lr);
sl@0
   101
	}
sl@0
   102
#endif
sl@0
   103
sl@0
   104
sl@0
   105
__NAKED__ void MakeGlobalPTEInaccessible(TPte* aPtePtr, TPte aNewPte, TLinAddr aLinAddr)
sl@0
   106
	{
sl@0
   107
	asm("mov r3,#0 ");
sl@0
   108
	// fall through
sl@0
   109
	}
sl@0
   110
sl@0
   111
__NAKED__ void MakePTEInaccessible(TPte* aPtePtr, TPte aNewPte, TLinAddr aLinAddr, TInt aAsid)
sl@0
   112
	{
sl@0
   113
	asm("bic r2, r2, #0xff ");
sl@0
   114
	asm("orr r2, r2, r3 ");
sl@0
   115
#if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
sl@0
   116
	CPSIDIF;					// interrupts off
sl@0
   117
	asm("str r1,[r0]");
sl@0
   118
	#if defined(__CPU_PAGE_TABLES_FULLY_CACHED)
sl@0
   119
	CLEAN_DCACHE_LINE(,r0);		
sl@0
   120
	#endif
sl@0
   121
	DRAIN_WRITE_BUFFER(,r1,r1);
sl@0
   122
	FLUSH_DTLB_ENTRY(,r2);		// remove stale TLB entries
sl@0
   123
	FLUSH_ITLB_ENTRY(,r2);
sl@0
   124
	asm("mov r1, #0");
sl@0
   125
	FLUSH_BTB(,r1);
sl@0
   126
	CPSIEIF;					// interrupts on
sl@0
   127
#else
sl@0
   128
	asm("str r1,[r0]");
sl@0
   129
#if defined(__CPU_PAGE_TABLES_FULLY_CACHED)
sl@0
   130
#ifdef __CPU_ARMV7
sl@0
   131
	DCCMVAU(r0);
sl@0
   132
	ARM_DSBSH;
sl@0
   133
#else
sl@0
   134
	CLEAN_DCACHE_LINE(,r0);
sl@0
   135
	DRAIN_WRITE_BUFFER(,r1,r1);
sl@0
   136
#endif
sl@0
   137
#endif
sl@0
   138
#ifdef __CPU_ARMV7
sl@0
   139
	UTLBIMVA(r2);
sl@0
   140
	ARM_DSBSH;
sl@0
   141
	ARM_ISBSY;
sl@0
   142
#else
sl@0
   143
	FLUSH_DTLB_ENTRY(,r2);		// remove stale TLB entries
sl@0
   144
	FLUSH_ITLB_ENTRY(,r2);
sl@0
   145
#endif
sl@0
   146
#endif
sl@0
   147
	__JUMP(,lr);
sl@0
   148
	}
sl@0
   149
sl@0
   150
sl@0
   151
__NAKED__ void InvalidateTLBForPage(TLinAddr /*aLinAddr*/, TInt /*aAsid*/)
sl@0
   152
//
sl@0
   153
// Flush a specified virtual address from the TLB.
sl@0
   154
// If aAsid>0, flush is restricted to ASID=aAsid for non-global entries.
sl@0
   155
// If aAsid=0, Kernel asid is specified - will flush global entry or the entry belonging to local Kernel space.
sl@0
   156
// If aAsid<0, no ASID is specified - will flush all TLB entries with matching VA regardles of ASID (or whether they are
sl@0
   157
//									  local or global). In the absence of such MMU command, flush-entire-TLB will apply here.
sl@0
   158
	{
sl@0
   159
	asm("cmp r1, #0 ");
sl@0
   160
	asm("bmi 1f ");
sl@0
   161
	asm("bic r0, r0, #0xff ");		// if aAsid > 0, orr it with linear address in r0.
sl@0
   162
	asm("orr r0, r0, r1 ");
sl@0
   163
#ifdef __CPU_ARMV7
sl@0
   164
	UTLBIMVA(r0);
sl@0
   165
	ARM_DSBSH;
sl@0
   166
	ARM_ISBSY;
sl@0
   167
#else
sl@0
   168
	FLUSH_DTLB_ENTRY(,r0);
sl@0
   169
	FLUSH_ITLB_ENTRY(,r0);
sl@0
   170
#endif
sl@0
   171
	__JUMP(,lr);
sl@0
   172
sl@0
   173
	asm("1: ");
sl@0
   174
#ifdef __CPU_ARMV7
sl@0
   175
	UTLBIALL;
sl@0
   176
	ARM_DSBSH;
sl@0
   177
	ARM_ISBSY;
sl@0
   178
#else
sl@0
   179
	asm("mov r0, #0 ");
sl@0
   180
	FLUSH_IDTLB(,r0);				// aAsid < 0. There is no coprocessor instruction that will flush all ...
sl@0
   181
									// ... entries matching Linear address. Flush entire TLB instead and exit.
sl@0
   182
#endif
sl@0
   183
	__JUMP(,lr);
sl@0
   184
	}
sl@0
   185
sl@0
   186
__NAKED__ void FlushTLBs()
sl@0
   187
	{
sl@0
   188
#ifdef __CPU_ARMV7
sl@0
   189
	UTLBIALL;
sl@0
   190
	ARM_DSBSH;
sl@0
   191
	ARM_ISBSY;
sl@0
   192
#else
sl@0
   193
	asm("mov r0, #0 ");
sl@0
   194
	FLUSH_IDTLB(,r0);
sl@0
   195
#endif
sl@0
   196
	__JUMP(,lr);
sl@0
   197
	}
sl@0
   198
sl@0
   199
__NAKED__ TUint32 TTCR()
sl@0
   200
	{
sl@0
   201
	asm("mrc p15, 0, r0, c2, c0, 2 ");
sl@0
   202
	asm("and r0, r0, #7 ");	// only bottom 3 bits are defined
sl@0
   203
	__JUMP(,lr);
sl@0
   204
	}
sl@0
   205
sl@0
   206
GLDEF_C __NAKED__ void __FlushBtb()
sl@0
   207
	{
sl@0
   208
#ifdef __CPU_ARMV7
sl@0
   209
#ifdef __SMP__
sl@0
   210
	BPIALLIS;
sl@0
   211
#else 	//def __SMP__
sl@0
   212
	BPIALL;
sl@0
   213
#endif 	// else __SMP__
sl@0
   214
    ARM_DSBSH;
sl@0
   215
	ARM_ISBSY;
sl@0
   216
#else	//def __CPU_ARMV7
sl@0
   217
	asm("mov r1, #0");
sl@0
   218
	FLUSH_BTB(,r1);
sl@0
   219
#endif	//else __CPU_ARMV7
sl@0
   220
	__JUMP(,lr);
sl@0
   221
	}
sl@0
   222
sl@0
   223
// Generic cache/TLB flush function.
sl@0
   224
// Which things are flushed is determined by aMask.
sl@0
   225
__NAKED__ void ArmMmu::GenericFlush(TUint32 /*aMask*/)
sl@0
   226
	{
sl@0
   227
#ifdef __CPU_ARMV7
sl@0
   228
	asm("tst r1, #%a0" : : "i" (EFlushDTLB) );
sl@0
   229
	asm("tsteq r1, #%a0" : : "i" (EFlushDPermChg) );
sl@0
   230
	asm("tsteq r1, #%a0" : : "i" (EFlushITLB) );
sl@0
   231
	asm("tsteq r1, #%a0" : : "i" (EFlushIPermChg) );
sl@0
   232
	asm("beq 1f ");
sl@0
   233
	UTLBIALL;
sl@0
   234
	ARM_DSBSH;
sl@0
   235
	ARM_ISBSY;
sl@0
   236
	asm("1: ");
sl@0
   237
#else
sl@0
   238
	asm("mov r2, #0 ");
sl@0
   239
	asm("tst r1, #%a0" : : "i" (EFlushDTLB) );
sl@0
   240
	asm("tsteq r1, #%a0" : : "i" (EFlushDPermChg) );
sl@0
   241
	FLUSH_DTLB(ne,r2);
sl@0
   242
	asm("tst r1, #%a0" : : "i" (EFlushITLB) );
sl@0
   243
	asm("tsteq r1, #%a0" : : "i" (EFlushIPermChg) );
sl@0
   244
	FLUSH_ITLB(ne,r2);
sl@0
   245
#endif
sl@0
   246
	__JUMP(,lr);
sl@0
   247
	}
sl@0
   248
sl@0
   249
__NAKED__ void ExecHandler::UnlockRamDrive()
sl@0
   250
	{
sl@0
   251
	asm("ldr r0, [r1, #%a0]" : : "i" (_FOFF(DThread,iOwningProcess)-_FOFF(DThread,iNThread)));
sl@0
   252
	asm("ldr r0, [r0, #%a0]" : : "i" _FOFF(DProcess,iS.iCaps));
sl@0
   253
// __KERNEL_CAPABILITY_CHECK
sl@0
   254
	asm("tst r0, #%a0 " : : "i" ((TInt)(1<<ECapabilityTCB)));
sl@0
   255
	__JUMP(eq,lr);	   // don't unlock the RAM drive if don't have MediaDD capability
sl@0
   256
sl@0
   257
	// fall through to unlock
sl@0
   258
	}
sl@0
   259
sl@0
   260
EXPORT_C __NAKED__ void TInternalRamDrive::Unlock()
sl@0
   261
	{
sl@0
   262
	asm("mrc p15, 0, r0, c3, c0, 0 ");
sl@0
   263
	asm("bic r0, r0, #0x0c ");
sl@0
   264
	asm("orr r0, r0, #0x04 ");			// RAM drive in domain 1
sl@0
   265
	asm("mcr p15, 0, r0, c3, c0, 0 ");
sl@0
   266
	CPWAIT(,r0);
sl@0
   267
	__JUMP(,lr);
sl@0
   268
	}
sl@0
   269
sl@0
   270
EXPORT_C __NAKED__ void TInternalRamDrive::Lock()
sl@0
   271
	{
sl@0
   272
	asm("mrc p15, 0, r0, c3, c0, 0 ");
sl@0
   273
	asm("bic r0, r0, #0x0c ");			// RAM drive in domain 1
sl@0
   274
	asm("mcr p15, 0, r0, c3, c0, 0 ");
sl@0
   275
	CPWAIT(,r0);
sl@0
   276
	__JUMP(,lr);
sl@0
   277
	}
sl@0
   278
sl@0
   279
__NAKED__ void ArmMmu::UnlockAlias()
sl@0
   280
	{
sl@0
   281
	asm("mrc p15, 0, r0, c3, c0, 0 ");
sl@0
   282
	asm("orr r0, r0, #0x10 ");			// Alias memory in domain 2
sl@0
   283
	asm("mcr p15, 0, r0, c3, c0, 0 ");
sl@0
   284
	CPWAIT(,r0);
sl@0
   285
	__JUMP(,lr);
sl@0
   286
	}
sl@0
   287
sl@0
   288
__NAKED__ void ArmMmu::LockAlias()
sl@0
   289
	{
sl@0
   290
	asm("mrc p15, 0, r0, c3, c0, 0 ");
sl@0
   291
	asm("bic r0, r0, #0x30 ");			// Alias memory in domain 2
sl@0
   292
	asm("mcr p15, 0, r0, c3, c0, 0 ");
sl@0
   293
	CPWAIT(,r0);
sl@0
   294
	__JUMP(,lr);
sl@0
   295
	}
sl@0
   296
sl@0
   297
sl@0
   298
__NAKED__ void M::LockUserMemory()
sl@0
   299
	{
sl@0
   300
	USER_MEMORY_GUARD_ON(,r0,r0);
sl@0
   301
	__JUMP(,lr);
sl@0
   302
	}
sl@0
   303
sl@0
   304
sl@0
   305
__NAKED__ void M::UnlockUserMemory()
sl@0
   306
	{
sl@0
   307
	USER_MEMORY_GUARD_OFF(,r0,r0);
sl@0
   308
	__JUMP(,lr);
sl@0
   309
	}
sl@0
   310