os/kernelhwsrv/kernel/eka/memmodel/epoc/moving/arm/xmmu.cia
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
sl@0
     1
// Copyright (c) 1997-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
// e32\memmodel\epoc\moving\arm\xmmu.cia
sl@0
    15
// 
sl@0
    16
//
sl@0
    17
sl@0
    18
#include <e32cia.h>
sl@0
    19
#include <arm_mem.h>
sl@0
    20
#include "execs.h"
sl@0
    21
#include "cache_maintenance.h"
sl@0
    22
sl@0
    23
__NAKED__ void InvalidateTLBForPage(TLinAddr /*aLinAddr*/)
sl@0
    24
//
sl@0
    25
// Flush a specified virtual address from the DTLB.
sl@0
    26
// Flush from the ITLB as well, provided that doesn't require flushing the whole ITLB
sl@0
    27
// ArmMmu::SyncCodeMappings() should follow this if flushing from the ITLB is essential.
sl@0
    28
//
sl@0
    29
	{
sl@0
    30
#ifdef __CPU_SPLIT_TLB
sl@0
    31
#if defined(__CPU_HAS_SINGLE_ENTRY_IDTLB_FLUSH)
sl@0
    32
	FLUSH_IDTLB_ENTRY(,r0);
sl@0
    33
#elif defined(__CPU_HAS_SINGLE_ENTRY_ITLB_FLUSH)
sl@0
    34
	FLUSH_DTLB_ENTRY(,r0);
sl@0
    35
	FLUSH_ITLB_ENTRY(,r0);
sl@0
    36
#else
sl@0
    37
	FLUSH_DTLB_ENTRY(,r0);
sl@0
    38
#endif
sl@0
    39
#else
sl@0
    40
	FLUSH_IDTLB_ENTRY(,r0);
sl@0
    41
#endif
sl@0
    42
	// don't need CPWAIT since it always happens in the function which calls this one
sl@0
    43
	__JUMP(,lr);
sl@0
    44
	}
sl@0
    45
sl@0
    46
__NAKED__ void ArmMmu::SyncCodeMappings()
sl@0
    47
//
sl@0
    48
// Flush the ITLB if it is not flushed page-by-page during unmapping of pages
sl@0
    49
//
sl@0
    50
	{
sl@0
    51
#if defined(__CPU_SPLIT_TLB) && !defined(__CPU_HAS_SINGLE_ENTRY_ITLB_FLUSH) && !defined(__CPU_HAS_SINGLE_ENTRY_IDTLB_FLUSH)
sl@0
    52
	asm("mov r2, #0 ");
sl@0
    53
	FLUSH_ITLB(,r2);
sl@0
    54
	CPWAIT(,r0);
sl@0
    55
#endif
sl@0
    56
	__JUMP(,lr);
sl@0
    57
	}
sl@0
    58
sl@0
    59
__NAKED__ void FlushTLBs()
sl@0
    60
	{
sl@0
    61
	asm("mov r0, #0 ");
sl@0
    62
	FLUSH_IDTLB(,r0);
sl@0
    63
	CPWAIT(,r0);
sl@0
    64
	__JUMP(,lr);
sl@0
    65
	}
sl@0
    66
sl@0
    67
__NAKED__ void FlushUnmapShadow(TLinAddr /*aRomAddr*/)
sl@0
    68
//
sl@0
    69
// Flush both I and D TLBs and flush page at aRomAddr from both caches
sl@0
    70
//
sl@0
    71
	{
sl@0
    72
	asm("mov r0, #0 ");
sl@0
    73
	FLUSH_IDTLB(,r0);		// flush both TLBs
sl@0
    74
	CPWAIT(,r0);
sl@0
    75
	__JUMP(,lr);
sl@0
    76
	}
sl@0
    77
sl@0
    78
// Generic cache/TLB flush function.
sl@0
    79
// Which things are flushed is determined by aMask.
sl@0
    80
// Call this with the system locked. Preemption can occur during this function.
sl@0
    81
__NAKED__ void ArmMmu::GenericFlush(TUint32 /*aMask*/)
sl@0
    82
	{
sl@0
    83
	asm("tst r1, #%a0" : : "i" (EFlushDMove|EFlushDDecommit));
sl@0
    84
	asm("orrne r1, r1, #%a0" : : "i" (EFlushDCache));
sl@0
    85
	asm("tst r1, #%a0" : : "i" (EFlushDMove|EFlushDDecommit|EFlushDPermChg));
sl@0
    86
	asm("orrne r1, r1, #%a0" : : "i" (EFlushDTLB));
sl@0
    87
	asm("tst r1, #%a0" : : "i" (EFlushIMove));
sl@0
    88
	asm("orrne r1, r1, #%a0" : : "i" (EFlushICache));
sl@0
    89
	asm("tst r1, #%a0" : : "i" (EFlushIMove|EFlushIPermChg));
sl@0
    90
	asm("orrne r1, r1, #%a0" : : "i" (EFlushITLB));
sl@0
    91
	asm("mov r2, #0 ");
sl@0
    92
#ifdef __CPU_SPLIT_CACHE
sl@0
    93
	asm("tst r1, #%a0" : : "i" (EFlushDCache) );
sl@0
    94
#else
sl@0
    95
	asm("tst r1, #%a0" : : "i" (EFlushDCache|EFlushICache) );
sl@0
    96
#endif
sl@0
    97
	asm("beq 1f ");
sl@0
    98
	asm("stmfd sp!, {r1,lr} ");
sl@0
    99
	asm("bl  " CSM_ZN16CacheMaintenance15OnProcessSwitchEv);	// flush data or unified cache
sl@0
   100
	asm("ldmfd sp!, {r1,lr} ");
sl@0
   101
	asm("mov r2, #0 ");
sl@0
   102
	asm("1: ");
sl@0
   103
sl@0
   104
#ifdef __CPU_SPLIT_CACHE
sl@0
   105
	asm("tst r1, #%a0" : : "i" (EFlushICache) );
sl@0
   106
	FLUSH_ICACHE(ne,r2);
sl@0
   107
#endif
sl@0
   108
sl@0
   109
#ifdef __CPU_SPLIT_TLB
sl@0
   110
	asm("tst r1, #%a0" : : "i" (EFlushDTLB) );
sl@0
   111
	FLUSH_DTLB(ne,r2);
sl@0
   112
	asm("tst r1, #%a0" : : "i" (EFlushITLB) );
sl@0
   113
	FLUSH_ITLB(ne,r2);
sl@0
   114
#else
sl@0
   115
	asm("tst r1, #%a0" : : "i" (EFlushDTLB|EFlushITLB) );
sl@0
   116
	FLUSH_IDTLB(ne,r2);
sl@0
   117
#endif
sl@0
   118
	CPWAIT(,r0);
sl@0
   119
	__JUMP(,lr);
sl@0
   120
	}
sl@0
   121
sl@0
   122
#if defined(__CPU_XSCALE__)
sl@0
   123
// Special routine to process minicache attributes
sl@0
   124
__NAKED__ TUint MiniCacheConfig()
sl@0
   125
	{
sl@0
   126
	asm("mrc p15, 0, r0, c1, c0, 1 ");
sl@0
   127
#if defined (__CPU_XSCALE_MANZANO__)
sl@0
   128
	asm("and r0, r0, #0x30 ");	// 00=WBRA 01=WBRA 10=WTRA 11=WBRA
sl@0
   129
	asm("cmp r0, #0x20");		//is it WTRA?
sl@0
   130
	asm("moveq r0, #8");		// yes
sl@0
   131
	asm("movne r0, #10");		// no
sl@0
   132
#else	
sl@0
   133
	asm("mov r0, r0, lsr #4 ");
sl@0
   134
	asm("and r0, r0, #3 ");		// 00=WBRA 01=WBWA 10=WTRA 11=UNP
sl@0
   135
	asm("bic r0, r0, #2 ");		// 10=WBRA 11=WBWA 00=WTRA 01=UNP
sl@0
   136
	asm("add r0, r0, #8 ");		// WBRA->AWBR, WBWA->AWBW, WTRA->AWTR, UNP->AWTW (can't occur)
sl@0
   137
#endif	
sl@0
   138
	__JUMP(,lr);
sl@0
   139
	}
sl@0
   140
#endif
sl@0
   141
sl@0
   142
__NAKED__ void ExecHandler::UnlockRamDrive()
sl@0
   143
	{
sl@0
   144
	asm("ldr r0, [r1, #%a0]" : : "i" (_FOFF(DThread,iOwningProcess)-_FOFF(DThread,iNThread)));
sl@0
   145
	asm("ldr r0, [r0, #%a0]" : : "i" _FOFF(DProcess,iS.iCaps));
sl@0
   146
// __KERNEL_CAPABILITY_CHECK
sl@0
   147
	asm("tst r0, #%a0 " : : "i" ((TInt)(1<<ECapabilityTCB)));
sl@0
   148
	__JUMP(eq,lr);				// don't unlock the RAM drive if don't have MediaDD capability
sl@0
   149
sl@0
   150
	// fall through to unlock
sl@0
   151
	}
sl@0
   152
sl@0
   153
EXPORT_C __NAKED__ void TInternalRamDrive::Unlock()
sl@0
   154
	{
sl@0
   155
	asm("mrc p15, 0, r0, c3, c0, 0 ");
sl@0
   156
	asm("orr r0, r0, #0xc0 ");
sl@0
   157
	asm("mcr p15, 0, r0, c3, c0, 0 ");
sl@0
   158
	CPWAIT(,r0);
sl@0
   159
	__JUMP(,lr);
sl@0
   160
	}
sl@0
   161
sl@0
   162
EXPORT_C __NAKED__ void TInternalRamDrive::Lock()
sl@0
   163
	{
sl@0
   164
	asm("mrc p15, 0, r0, c3, c0, 0 ");
sl@0
   165
	asm("bic r0, r0, #0xc0 ");
sl@0
   166
	asm("mcr p15, 0, r0, c3, c0, 0 ");
sl@0
   167
	CPWAIT(,r0);
sl@0
   168
	__JUMP(,lr);
sl@0
   169
	}
sl@0
   170
sl@0
   171
#if defined(__CPU_WRITE_BACK_CACHE)
sl@0
   172
#if defined(__CPU_HAS_SINGLE_ENTRY_DCACHE_FLUSH)
sl@0
   173
__NAKED__ void CopyPageForRemap32(TLinAddr /*aDest*/, TLinAddr /*aSrc*/)
sl@0
   174
	{
sl@0
   175
	// Source and destination 4k page aligned (and thus cache aligned)
sl@0
   176
	// Fixed copy size of 4k
sl@0
   177
	// But.. after each cache line we need to purge the line from the cache
sl@0
   178
	// and when we're done we need to drain the write buffer
sl@0
   179
	// We are assuming 32-byte cache lines here but this function is only used
sl@0
   180
	// when this is the case, so it's ok.
sl@0
   181
	
sl@0
   182
	asm("stmfd sp!, {r4-r9} ");
sl@0
   183
	asm("1: ");
sl@0
   184
	PLD_ioff(1, 32);
sl@0
   185
	asm("mov ip, r1 ");
sl@0
   186
	asm("ldmia r1!, {r2-r9} ");
sl@0
   187
	asm("tst r1, #0xff0 ");
sl@0
   188
	asm("stmia r0!, {r2-r9} ");
sl@0
   189
	PURGE_DCACHE_LINE(,ip);
sl@0
   190
	asm("bne 1b ");
sl@0
   191
	asm("ldmfd sp!, {r4-r9} ");
sl@0
   192
	DRAIN_WRITE_BUFFER(,r0,r1);
sl@0
   193
	CPWAIT(,r0);
sl@0
   194
	__JUMP(,lr);
sl@0
   195
	}
sl@0
   196
sl@0
   197
__NAKED__ void CopyPageForRemap16(TLinAddr /*aDest*/, TLinAddr /*aSrc*/)
sl@0
   198
	{
sl@0
   199
	// Source and destination 4k page aligned (and thus cache aligned)
sl@0
   200
	// Fixed copy size of 4k
sl@0
   201
	// But.. after each cache line we need to purge the line from the cache
sl@0
   202
	// and when we're done we need to drain the write buffer
sl@0
   203
	// We are assuming 16-byte cache lines here but this function is only used
sl@0
   204
	// when this is the case, so it's ok.
sl@0
   205
	
sl@0
   206
	asm("stmfd sp!, {r4-r5} ");
sl@0
   207
	asm("1: ");
sl@0
   208
	PLD_ioff(1, 16);
sl@0
   209
	asm("mov ip, r1 ");
sl@0
   210
	asm("ldmia r1!, {r2-r5} ");
sl@0
   211
	asm("tst r1, #0xff0 ");
sl@0
   212
	asm("stmia r0!, {r2-r5} ");
sl@0
   213
	PURGE_DCACHE_LINE(,ip);
sl@0
   214
	asm("bne 1b ");
sl@0
   215
	asm("ldmfd sp!, {r4-r5} ");
sl@0
   216
	DRAIN_WRITE_BUFFER(,r0,r1);
sl@0
   217
	CPWAIT(,r0);
sl@0
   218
	__JUMP(,lr);
sl@0
   219
	}
sl@0
   220
#endif
sl@0
   221
#else //!__CPU_HAS_WRITE_BACK_CACHE
sl@0
   222
__NAKED__ void CopyPageForRemapWT(TLinAddr /*aDest*/, TLinAddr /*aSrc*/)
sl@0
   223
	{
sl@0
   224
	// Source and destination 4k page aligned (and thus cache aligned)
sl@0
   225
	// Fixed copy size of 4k
sl@0
   226
	// Writethrough cache means no purging is required, but
sl@0
   227
	// when we're done we still need to drain the write buffer
sl@0
   228
	
sl@0
   229
	asm("stmfd sp!, {r4-r8} ");
sl@0
   230
	asm("1: ");
sl@0
   231
	PLD_ioff(1, 16);
sl@0
   232
	asm("ldmia r1!, {r2-r8,ip} ");
sl@0
   233
	asm("tst r1, #0xff0 ");
sl@0
   234
	asm("stmia r0!, {r2-r8,ip} ");
sl@0
   235
	asm("bne 1b ");
sl@0
   236
	asm("ldmfd sp!, {r4-r8,ip} ");
sl@0
   237
	DRAIN_WRITE_BUFFER(,r0,r1);
sl@0
   238
	CPWAIT(,r0);
sl@0
   239
	__JUMP(,lr);
sl@0
   240
	}
sl@0
   241
#endif
sl@0
   242
sl@0
   243
#ifdef __MMU_MACHINE_CODED__
sl@0
   244
__NAKED__ void ImpMmu::MapRamPages(TInt /*anId*/, TLinAddr /*anAddr*/, TPhysAddr* /*aPageList*/, TInt /*aNumPages*/, TPte /*aPtePerm*/)
sl@0
   245
//
sl@0
   246
// Map a list of physical RAM pages to a specified linear address using a specified page table and
sl@0
   247
// specified PTE permissions. Call this with the kernel locked.
sl@0
   248
//
sl@0
   249
	{
sl@0
   250
	// enter with r0=&MM::TheMmu, r1=anId, r2=anAddr, r3=aPageList, [sp]=aNumPages, [sp+4]=aPtePerm
sl@0
   251
	asm("stmfd sp!, {r4-r6,lr} ");
sl@0
   252
	asm("mov r4, r1 ");						// r4=anId
sl@0
   253
	asm("mov r5, r2 ");						// r5=anAddr
sl@0
   254
	asm("mov r6, r3 ");						// r6=aPageList
sl@0
   255
	asm("bl  " CSM_ZN6ImpMmu16UnlockPageTablesEv);	// unlock page tables
sl@0
   256
	asm("mov r0, r5, lsr #20 ");			// r0=pdeIndex
sl@0
   257
	asm("bic r1, r5, r0, lsl #20 ");		// r1=anAddr & 0xfffff
sl@0
   258
	asm("and r1, r1, #0xff000 ");			// r1=ptOffset<<12
sl@0
   259
	asm("mov r4, r4, lsl #10 ");
sl@0
   260
	asm("add r4, r4, #%a0" : : "i" ((TInt)KPageTableLinearBase));	// r4=linear address of page table anId
sl@0
   261
	asm("add r1, r4, r1, lsr #10 ");		// r1 points to first PTE to add
sl@0
   262
	asm("ldr r2, [sp, #16] ");				// r2=number of pages to map
sl@0
   263
	asm("mov r0, r0, lsl #2 ");
sl@0
   264
	asm("add r0, r0, #%a0" : : "i" ((TInt)KPageDirectoryLinearAddress));
sl@0
   265
	asm("add r0, r0, #%a0" : : "i" ((TInt)KPageTableInfoOffset));	// r0->page table info entry for anAddr
sl@0
   266
	asm("ldr r3, [r0] ");
sl@0
   267
	asm("add r3, r3, r2 ");					// add number of pages to pages present count
sl@0
   268
	asm("str r3, [r0] ");
sl@0
   269
	asm("ldr r3, [sp, #20] ");				// r3=PTE permissions
sl@0
   270
	asm("b map_ram_pages2 ");
sl@0
   271
sl@0
   272
	asm("map_ram_pages1: ");
sl@0
   273
	asm("ldr lr, [r6], #4 ");				// get physical address of page and step to next in list
sl@0
   274
	asm("orr lr, lr, r3 ");					// OR in permissions to give PTE
sl@0
   275
	asm("str lr, [r1], #4 ");				// store PTE and step to next
sl@0
   276
sl@0
   277
	asm("map_ram_pages2: ");
sl@0
   278
	asm("subs r2, r2, #1 ");
sl@0
   279
	asm("bge map_ram_pages1 ");				// loop for all pages
sl@0
   280
	asm("ldmfd sp!, {r4-r6,lr} ");
sl@0
   281
	DRAIN_WRITE_BUFFER(,r0,r0);
sl@0
   282
	CPWAIT(,r0);
sl@0
   283
	asm("b  " CSM_ZN6ImpMmu14LockPageTablesEv);		// lock page tables and exit
sl@0
   284
	}
sl@0
   285
sl@0
   286
__NAKED__ void ImpMmu::MapPhysicalPages(TInt /*anId*/, TLinAddr /*anAddr*/, TPhysAddr /*aPhysAddr*/, TInt /*aNumPages*/, TPte /*aPtePerm*/)
sl@0
   287
//
sl@0
   288
// Map consecutive physical pages to a specified linear address using a specified page table and
sl@0
   289
// specified PTE permissions. Call this with the kernel locked.
sl@0
   290
//
sl@0
   291
	{
sl@0
   292
	// enter with r0=&MM::TheMmu, r1=anId, r2=anAddr, r3=aPhysAddr, [sp]=aNumPages, [sp+4]=aPtePerm
sl@0
   293
	asm("stmfd sp!, {r4-r6,lr} ");
sl@0
   294
	asm("mov r4, r1 ");						// r4=anId
sl@0
   295
	asm("mov r5, r2 ");						// r5=anAddr
sl@0
   296
	asm("mov r6, r3 ");						// r6=aPhysAddr
sl@0
   297
	asm("bl  " CSM_ZN6ImpMmu16UnlockPageTablesEv);	// unlock page tables
sl@0
   298
	asm("mov r0, r5, lsr #20 ");			// r0=pdeIndex
sl@0
   299
	asm("bic r1, r5, r0, lsl #20 ");		// r1=anAddr & 0xfffff
sl@0
   300
	asm("and r1, r1, #0xff000 ");			// r1=ptOffset<<12
sl@0
   301
	asm("mov r4, r4, lsl #10 ");
sl@0
   302
	asm("add r4, r4, #%a0" : : "i" ((TInt)KPageTableLinearBase));	// r4=linear address of page table anId
sl@0
   303
	asm("add r1, r4, r1, lsr #10 ");		// r1 points to first PTE to add
sl@0
   304
	asm("ldr r2, [sp, #16] ");				// r2=number of pages to map
sl@0
   305
	asm("mov r0, r0, lsl #2 ");
sl@0
   306
	asm("add r0, r0, #%a0" : : "i" ((TInt)KPageDirectoryLinearAddress));
sl@0
   307
	asm("add r0, r0, #%a0" : : "i" ((TInt)KPageTableInfoOffset));	// r0->page table info entry for anAddr
sl@0
   308
	asm("ldr r3, [r0] ");
sl@0
   309
	asm("add r3, r3, r2 ");					// add number of pages to pages present count
sl@0
   310
	asm("str r3, [r0] ");
sl@0
   311
	asm("ldr r3, [sp, #20] ");				// r3=PTE permissions
sl@0
   312
	asm("orr r3, r3, r6 ");					// OR in physical address to give first PTE
sl@0
   313
	asm("b map_phys_pages2 ");
sl@0
   314
sl@0
   315
	asm("map_phys_pages1: ");
sl@0
   316
	asm("str r3, [r1], #4 ");				// store PTE and step to next
sl@0
   317
	asm("add r3, r3, #0x1000 ");			// step physical address on by page size
sl@0
   318
sl@0
   319
	asm("map_phys_pages2: ");
sl@0
   320
	asm("subs r2, r2, #1 ");
sl@0
   321
	asm("bge map_phys_pages1 ");			// loop for all pages
sl@0
   322
	asm("ldmfd sp!, {r4-r6,lr} ");
sl@0
   323
	DRAIN_WRITE_BUFFER(,r0,r0);
sl@0
   324
	CPWAIT(,r0);
sl@0
   325
	asm("b  " CSM_ZN6ImpMmu14LockPageTablesEv);		// lock page tables and exit
sl@0
   326
	}
sl@0
   327
sl@0
   328
__NAKED__ TInt ImpMmu::UnmapPages(TInt /*anId*/, TLinAddr /*anAddr*/, TInt /*aNumPages*/, TPhysAddr* /*aPageList*/, TInt& /*aNumPtes*/)
sl@0
   329
//
sl@0
   330
// Unmap a specified area at address anAddr mapped by page table anId. Place physical addresses of unmapped
sl@0
   331
// RAM pages into aPageList and count of unmapped pages into aNumPtes. Return number of pages still
sl@0
   332
// mapped using this page table. Call this with the kernel locked.
sl@0
   333
// Note that a write-back cache may also require flushing after this.
sl@0
   334
//
sl@0
   335
	{
sl@0
   336
	// Enter with r0=this, r1=anId, r2=anAddr, r3=aNumPages, [sp]=aPageList, [sp+4]=&aNumPtes
sl@0
   337
	asm("stmfd sp!, {r4-r9,lr} ");
sl@0
   338
	asm("mov r4, r0 ");
sl@0
   339
	asm("mov r5, r1 ");
sl@0
   340
	asm("mov r6, r2 ");
sl@0
   341
	asm("mov r7, r3 ");
sl@0
   342
	asm("bl  " CSM_ZN6ImpMmu16UnlockPageTablesEv);	// unlock the page tables
sl@0
   343
	asm("mov r8, r6, lsr #20 ");			// r8=pdeIndex
sl@0
   344
	asm("bic r0, r6, r8, lsl #20 ");		// r0=anAddr&0xfffff
sl@0
   345
	asm("and r0, r0, #0xff000 ");			// r0=ptOffset<<12
sl@0
   346
	asm("mov r5, r5, lsl #10 ");			// convert page table id to linear address
sl@0
   347
	asm("add r5, r5, r0, lsr #10 ");		// add offset within page table
sl@0
   348
	asm("add r5, r5, #%a0" : : "i" ((TInt)KPageTableLinearBase));	// r5=pte address
sl@0
   349
	asm("mov ip, #0 ");						// ip=0 throughout loop
sl@0
   350
	asm("mov r3, #0 ");						// r3 counts present pages
sl@0
   351
	asm("ldr r9, [sp, #28] ");				// r9=aPageList
sl@0
   352
	asm("mov r2, #0xff ");
sl@0
   353
	asm("orr r2, r2, #0xf00 ");				// r2=BIC mask for PTE->page physical address
sl@0
   354
	asm("b unmap_pages_2 ");
sl@0
   355
sl@0
   356
	asm("unmap_pages_1: ");
sl@0
   357
	asm("ldr r0, [r5] ");					// fetch PTE
sl@0
   358
	asm("str ip, [r5], #4 ");				// clear PTE
sl@0
   359
	asm("tst r0, #3 ");						// test if page present
sl@0
   360
#ifdef __CPU_SPLIT_TLB
sl@0
   361
#if defined(__CPU_HAS_SINGLE_ENTRY_IDTLB_FLUSH)
sl@0
   362
	FLUSH_IDTLB_ENTRY(ne,r6);				// flush page from both TLBs if possible
sl@0
   363
#elif defined(__CPU_HAS_SINGLE_ENTRY_ITLB_FLUSH)
sl@0
   364
	FLUSH_DTLB_ENTRY(ne,r6);
sl@0
   365
	FLUSH_ITLB_ENTRY(ne,r6);
sl@0
   366
#else
sl@0
   367
	FLUSH_DTLB_ENTRY(ne,r6);				// no single-entry ITLB flush, complete ITLB flush will be done later
sl@0
   368
#endif
sl@0
   369
#else
sl@0
   370
	FLUSH_IDTLB_ENTRY(ne,r6);
sl@0
   371
#endif
sl@0
   372
	asm("bicne r0, r0, r2 ");				// ... r0=page physical address ...
sl@0
   373
	asm("strne r0, [r9], #4 ");				// ... *aPageList++=r0 ...
sl@0
   374
	asm("addne r3, r3, #1 ");				// ... increment present pages count
sl@0
   375
	asm("add r6, r6, #0x1000 ");			// increment address by page size
sl@0
   376
sl@0
   377
	asm("unmap_pages_2: ");
sl@0
   378
	asm("subs r7, r7, #1 ");				// decrement page count
sl@0
   379
	asm("bge unmap_pages_1 ");
sl@0
   380
sl@0
   381
	asm("ldr r0, [sp, #32] ");				// r0=&aNumPtes
sl@0
   382
	asm("str r3, [r0] ");					// aNumPtes=r3
sl@0
   383
	asm("mov r0, #%a0" : : "i" ((TInt)KPageDirectoryLinearAddress));
sl@0
   384
	asm("add r0, r0, #%a0" : : "i" ((TInt)KPageTableInfoOffset));	// r0->base of page table info array
sl@0
   385
	asm("add r0, r0, r8, lsl #2 ");			// r0 points to PTINFO entry for this pde
sl@0
   386
	asm("ldr r1, [r0] ");					// r1[31:16]=page table id, r1[15:0]=present pages
sl@0
   387
	asm("sub r1, r1, r3 ");					// subtract number of pages unmapped
sl@0
   388
	asm("str r1, [r0] ");					// store new pages present count
sl@0
   389
	asm("mov r4, r1, lsl #16 ");			// shift out top 16 bits and store in r4
sl@0
   390
	asm("bl  " CSM_ZN6ImpMmu14LockPageTablesEv);		// lock the page tables
sl@0
   391
	asm("mov r0, r4, lsr #16 ");			// r0=number of pages remaining
sl@0
   392
	DRAIN_WRITE_BUFFER(,r0,r1);
sl@0
   393
	CPWAIT(,r1);
sl@0
   394
	asm("ldmfd sp!, {r4-r9,pc} ");			// restore registers and return
sl@0
   395
	}
sl@0
   396
sl@0
   397
__NAKED__ TInt Mmu::PageTableId(TLinAddr /*anAddr*/)
sl@0
   398
	{
sl@0
   399
	asm("mov r1, r1, lsr #20 ");			// r1=anAddr>>20
sl@0
   400
	asm("mov r0, #%a0" : : "i" ((TInt)KPageDirectoryLinearAddress));
sl@0
   401
	asm("add r0, r0, #%a0" : : "i" ((TInt)KPageTableInfoOffset));	// r0->base of page table info array
sl@0
   402
	asm("mrc p15, 0, r2, c3, c0, 0 ");		// r2=current DACR
sl@0
   403
	asm("orr r3, r2, #0x30 ");
sl@0
   404
	asm("mcr p15, 0, r3, c3, c0, 0 ");		// unlock page tables
sl@0
   405
	CPWAIT(,r3);
sl@0
   406
	asm("ldr r0, [r0, r1, lsl #2] ");		// fetch page table info entry for anAddr
sl@0
   407
	asm("mcr p15, 0, r2, c3, c0, 0 ");		// lock page tables
sl@0
   408
	asm("cmn r0, #0x10000 ");				// test if page table id=0xffff
sl@0
   409
	asm("movcc r0, r0, lsr #16 ");			// if not, return page table id
sl@0
   410
	asm("mvncs r0, #0 ");					// else return -1
sl@0
   411
	__JUMP(,lr);
sl@0
   412
	}
sl@0
   413
sl@0
   414
__NAKED__ void ImpMmu::AssignPageTable(TInt /*anId*/, TLinAddr /*anAddr*/, TPde /*aPdePerm*/)
sl@0
   415
//
sl@0
   416
// Assign an allocated page table to map a given linear address with specified permissions.
sl@0
   417
// This function assumes the page table initially contains no physical RAM page mappings.
sl@0
   418
// This should be called with the kernel locked.
sl@0
   419
//
sl@0
   420
	{
sl@0
   421
	// on entry r0=&MM::TheMmu, r1=anId, r2=anAddr, r3=aPdePerm
sl@0
   422
	asm("stmfd sp!, {r4,lr} ");
sl@0
   423
	asm("and r4, r1, #3 ");					// r4=bottom 2 bits of anId (offset of page table within page)
sl@0
   424
	asm("orr r3, r3, r4, lsl #10 ");		// combine these bits with PDE permissions
sl@0
   425
	asm("bic r0, r1, #3 ");					// r0=anId with bottom 2 bits cleared
sl@0
   426
	asm("add r0, r0, #%a0" : : "i" ((TInt)KPageTableLinearBase));	// r0=address of PTE mapping page table anId
sl@0
   427
	asm("mov r1, r1, lsl #16 ");			// put ptid into top 16 bits of r1, zero bottom 16 bits
sl@0
   428
	asm("mov r2, r2, lsr #20 ");			// r2=anAddr>>20
sl@0
   429
	asm("mov r2, r2, lsl #2 ");				// r2=pdeIndex*4
sl@0
   430
	asm("add r2, r2, #%a0" : : "i" ((TInt)KPageDirectoryLinearAddress));	// r2 points to PDE for anAddr
sl@0
   431
	asm("mrc p15, 0, lr, c3, c0, 0 ");		// lr=current DACR
sl@0
   432
	asm("orr r4, lr, #0x30 ");
sl@0
   433
	asm("mcr p15, 0, r4, c3, c0, 0 ");		// unlock page tables
sl@0
   434
	CPWAIT(,r4);
sl@0
   435
	asm("ldr r0, [r0] ");					// fetch page table PTE
sl@0
   436
	asm("mov r0, r0, lsr #12 ");			// shift out permission bits, leave phys addr>>12
sl@0
   437
	asm("orr r3, r3, r0, lsl #12 ");		// r3=PDE word (add PDE permissions and offset within page)
sl@0
   438
	asm("str r3, [r2] ");					// store PDE
sl@0
   439
	asm("add r2, r2, #%a0" : : "i" ((TInt)KPageTableInfoOffset));	// r2 points to PT info entry
sl@0
   440
	asm("str r1, [r2] ");					// PTinfo top 16=page table ID, PT info bottom 16=pages present=0 (assumption)
sl@0
   441
	asm("mcr p15, 0, lr, c3, c0, 0 ");		// lock page tables
sl@0
   442
	DRAIN_WRITE_BUFFER(,r0,r0);
sl@0
   443
	CPWAIT(,r0);
sl@0
   444
	asm("ldmfd sp!, {r4,pc} ");
sl@0
   445
	}
sl@0
   446
sl@0
   447
__NAKED__ void ImpMmu::UnassignPageTable(TLinAddr /*anAddr*/)
sl@0
   448
//
sl@0
   449
// Unassign a now-empty page table currently mapping the specified linear address.
sl@0
   450
// We assume that TLB and/or cache flushing has been done when any RAM pages were unmapped.
sl@0
   451
// Call this with the kernel locked.
sl@0
   452
//
sl@0
   453
	{
sl@0
   454
	asm("mov r1, r1, lsr #20 ");			// r1=anAddr>>20
sl@0
   455
	asm("mov r0, #%a0" : : "i" ((TInt)KPageDirectoryLinearAddress));
sl@0
   456
	asm("add r0, r0, r1, lsl #2 ");			// r0 points to page directory entry for anAddr
sl@0
   457
	asm("ldr r1, __NotPresentPtInfo ");		// r1=PTInfo entry for not present PDE
sl@0
   458
	asm("mrc p15, 0, r2, c3, c0, 0 ");		// r2=current DACR
sl@0
   459
	asm("orr r3, r2, #0x30 ");
sl@0
   460
	asm("mcr p15, 0, r3, c3, c0, 0 ");		// unlock page tables
sl@0
   461
	CPWAIT(,r3);
sl@0
   462
	asm("mov r3, #0 ");
sl@0
   463
	asm("str r3, [r0] ");					// clear the PDE
sl@0
   464
	asm("add r0, r0, #%a0" : : "i" ((TInt)KPageTableInfoOffset));	// step r0 on to PT info entry
sl@0
   465
	asm("str r1, [r0] ");					// clear the PT info entry
sl@0
   466
	asm("mcr p15, 0, r2, c3, c0, 0 ");		// lock page tables
sl@0
   467
	DRAIN_WRITE_BUFFER(,r0,r0);
sl@0
   468
	CPWAIT(,r0);
sl@0
   469
	__JUMP(,lr);
sl@0
   470
sl@0
   471
	asm("__NotPresentPtInfo: ");
sl@0
   472
	asm(".word 0xffff0000 ");
sl@0
   473
	}
sl@0
   474
#endif
sl@0
   475
sl@0
   476