os/kernelhwsrv/kernel/eka/memmodel/epoc/moving/arm/xmmu.cia
changeset 0 bde4ae8d615e
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/os/kernelhwsrv/kernel/eka/memmodel/epoc/moving/arm/xmmu.cia	Fri Jun 15 03:10:57 2012 +0200
     1.3 @@ -0,0 +1,476 @@
     1.4 +// Copyright (c) 1997-2009 Nokia Corporation and/or its subsidiary(-ies).
     1.5 +// All rights reserved.
     1.6 +// This component and the accompanying materials are made available
     1.7 +// under the terms of the License "Eclipse Public License v1.0"
     1.8 +// which accompanies this distribution, and is available
     1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
    1.10 +//
    1.11 +// Initial Contributors:
    1.12 +// Nokia Corporation - initial contribution.
    1.13 +//
    1.14 +// Contributors:
    1.15 +//
    1.16 +// Description:
    1.17 +// e32\memmodel\epoc\moving\arm\xmmu.cia
    1.18 +// 
    1.19 +//
    1.20 +
    1.21 +#include <e32cia.h>
    1.22 +#include <arm_mem.h>
    1.23 +#include "execs.h"
    1.24 +#include "cache_maintenance.h"
    1.25 +
    1.26 +__NAKED__ void InvalidateTLBForPage(TLinAddr /*aLinAddr*/)
    1.27 +//
    1.28 +// Flush a specified virtual address from the DTLB.
    1.29 +// Flush from the ITLB as well, provided that doesn't require flushing the whole ITLB
    1.30 +// ArmMmu::SyncCodeMappings() should follow this if flushing from the ITLB is essential.
    1.31 +//
    1.32 +	{
    1.33 +#ifdef __CPU_SPLIT_TLB
    1.34 +#if defined(__CPU_HAS_SINGLE_ENTRY_IDTLB_FLUSH)
    1.35 +	FLUSH_IDTLB_ENTRY(,r0);
    1.36 +#elif defined(__CPU_HAS_SINGLE_ENTRY_ITLB_FLUSH)
    1.37 +	FLUSH_DTLB_ENTRY(,r0);
    1.38 +	FLUSH_ITLB_ENTRY(,r0);
    1.39 +#else
    1.40 +	FLUSH_DTLB_ENTRY(,r0);
    1.41 +#endif
    1.42 +#else
    1.43 +	FLUSH_IDTLB_ENTRY(,r0);
    1.44 +#endif
    1.45 +	// don't need CPWAIT since it always happens in the function which calls this one
    1.46 +	__JUMP(,lr);
    1.47 +	}
    1.48 +
    1.49 +__NAKED__ void ArmMmu::SyncCodeMappings()
    1.50 +//
    1.51 +// Flush the ITLB if it is not flushed page-by-page during unmapping of pages
    1.52 +//
    1.53 +	{
    1.54 +#if defined(__CPU_SPLIT_TLB) && !defined(__CPU_HAS_SINGLE_ENTRY_ITLB_FLUSH) && !defined(__CPU_HAS_SINGLE_ENTRY_IDTLB_FLUSH)
    1.55 +	asm("mov r2, #0 ");
    1.56 +	FLUSH_ITLB(,r2);
    1.57 +	CPWAIT(,r0);
    1.58 +#endif
    1.59 +	__JUMP(,lr);
    1.60 +	}
    1.61 +
    1.62 +__NAKED__ void FlushTLBs()
    1.63 +	{
    1.64 +	asm("mov r0, #0 ");
    1.65 +	FLUSH_IDTLB(,r0);
    1.66 +	CPWAIT(,r0);
    1.67 +	__JUMP(,lr);
    1.68 +	}
    1.69 +
    1.70 +__NAKED__ void FlushUnmapShadow(TLinAddr /*aRomAddr*/)
    1.71 +//
    1.72 +// Flush both I and D TLBs and flush page at aRomAddr from both caches
    1.73 +//
    1.74 +	{
    1.75 +	asm("mov r0, #0 ");
    1.76 +	FLUSH_IDTLB(,r0);		// flush both TLBs
    1.77 +	CPWAIT(,r0);
    1.78 +	__JUMP(,lr);
    1.79 +	}
    1.80 +
    1.81 +// Generic cache/TLB flush function.
    1.82 +// Which things are flushed is determined by aMask.
    1.83 +// Call this with the system locked. Preemption can occur during this function.
    1.84 +__NAKED__ void ArmMmu::GenericFlush(TUint32 /*aMask*/)
    1.85 +	{
    1.86 +	asm("tst r1, #%a0" : : "i" (EFlushDMove|EFlushDDecommit));
    1.87 +	asm("orrne r1, r1, #%a0" : : "i" (EFlushDCache));
    1.88 +	asm("tst r1, #%a0" : : "i" (EFlushDMove|EFlushDDecommit|EFlushDPermChg));
    1.89 +	asm("orrne r1, r1, #%a0" : : "i" (EFlushDTLB));
    1.90 +	asm("tst r1, #%a0" : : "i" (EFlushIMove));
    1.91 +	asm("orrne r1, r1, #%a0" : : "i" (EFlushICache));
    1.92 +	asm("tst r1, #%a0" : : "i" (EFlushIMove|EFlushIPermChg));
    1.93 +	asm("orrne r1, r1, #%a0" : : "i" (EFlushITLB));
    1.94 +	asm("mov r2, #0 ");
    1.95 +#ifdef __CPU_SPLIT_CACHE
    1.96 +	asm("tst r1, #%a0" : : "i" (EFlushDCache) );
    1.97 +#else
    1.98 +	asm("tst r1, #%a0" : : "i" (EFlushDCache|EFlushICache) );
    1.99 +#endif
   1.100 +	asm("beq 1f ");
   1.101 +	asm("stmfd sp!, {r1,lr} ");
   1.102 +	asm("bl  " CSM_ZN16CacheMaintenance15OnProcessSwitchEv);	// flush data or unified cache
   1.103 +	asm("ldmfd sp!, {r1,lr} ");
   1.104 +	asm("mov r2, #0 ");
   1.105 +	asm("1: ");
   1.106 +
   1.107 +#ifdef __CPU_SPLIT_CACHE
   1.108 +	asm("tst r1, #%a0" : : "i" (EFlushICache) );
   1.109 +	FLUSH_ICACHE(ne,r2);
   1.110 +#endif
   1.111 +
   1.112 +#ifdef __CPU_SPLIT_TLB
   1.113 +	asm("tst r1, #%a0" : : "i" (EFlushDTLB) );
   1.114 +	FLUSH_DTLB(ne,r2);
   1.115 +	asm("tst r1, #%a0" : : "i" (EFlushITLB) );
   1.116 +	FLUSH_ITLB(ne,r2);
   1.117 +#else
   1.118 +	asm("tst r1, #%a0" : : "i" (EFlushDTLB|EFlushITLB) );
   1.119 +	FLUSH_IDTLB(ne,r2);
   1.120 +#endif
   1.121 +	CPWAIT(,r0);
   1.122 +	__JUMP(,lr);
   1.123 +	}
   1.124 +
   1.125 +#if defined(__CPU_XSCALE__)
   1.126 +// Special routine to process minicache attributes
   1.127 +__NAKED__ TUint MiniCacheConfig()
   1.128 +	{
   1.129 +	asm("mrc p15, 0, r0, c1, c0, 1 ");
   1.130 +#if defined (__CPU_XSCALE_MANZANO__)
   1.131 +	asm("and r0, r0, #0x30 ");	// 00=WBRA 01=WBRA 10=WTRA 11=WBRA
   1.132 +	asm("cmp r0, #0x20");		//is it WTRA?
   1.133 +	asm("moveq r0, #8");		// yes
   1.134 +	asm("movne r0, #10");		// no
   1.135 +#else	
   1.136 +	asm("mov r0, r0, lsr #4 ");
   1.137 +	asm("and r0, r0, #3 ");		// 00=WBRA 01=WBWA 10=WTRA 11=UNP
   1.138 +	asm("bic r0, r0, #2 ");		// 10=WBRA 11=WBWA 00=WTRA 01=UNP
   1.139 +	asm("add r0, r0, #8 ");		// WBRA->AWBR, WBWA->AWBW, WTRA->AWTR, UNP->AWTW (can't occur)
   1.140 +#endif	
   1.141 +	__JUMP(,lr);
   1.142 +	}
   1.143 +#endif
   1.144 +
   1.145 +__NAKED__ void ExecHandler::UnlockRamDrive()
   1.146 +	{
   1.147 +	asm("ldr r0, [r1, #%a0]" : : "i" (_FOFF(DThread,iOwningProcess)-_FOFF(DThread,iNThread)));
   1.148 +	asm("ldr r0, [r0, #%a0]" : : "i" _FOFF(DProcess,iS.iCaps));
   1.149 +// __KERNEL_CAPABILITY_CHECK
   1.150 +	asm("tst r0, #%a0 " : : "i" ((TInt)(1<<ECapabilityTCB)));
   1.151 +	__JUMP(eq,lr);				// don't unlock the RAM drive if don't have MediaDD capability
   1.152 +
   1.153 +	// fall through to unlock
   1.154 +	}
   1.155 +
   1.156 +EXPORT_C __NAKED__ void TInternalRamDrive::Unlock()
   1.157 +	{
   1.158 +	asm("mrc p15, 0, r0, c3, c0, 0 ");
   1.159 +	asm("orr r0, r0, #0xc0 ");
   1.160 +	asm("mcr p15, 0, r0, c3, c0, 0 ");
   1.161 +	CPWAIT(,r0);
   1.162 +	__JUMP(,lr);
   1.163 +	}
   1.164 +
   1.165 +EXPORT_C __NAKED__ void TInternalRamDrive::Lock()
   1.166 +	{
   1.167 +	asm("mrc p15, 0, r0, c3, c0, 0 ");
   1.168 +	asm("bic r0, r0, #0xc0 ");
   1.169 +	asm("mcr p15, 0, r0, c3, c0, 0 ");
   1.170 +	CPWAIT(,r0);
   1.171 +	__JUMP(,lr);
   1.172 +	}
   1.173 +
   1.174 +#if defined(__CPU_WRITE_BACK_CACHE)
   1.175 +#if defined(__CPU_HAS_SINGLE_ENTRY_DCACHE_FLUSH)
   1.176 +__NAKED__ void CopyPageForRemap32(TLinAddr /*aDest*/, TLinAddr /*aSrc*/)
   1.177 +	{
   1.178 +	// Source and destination 4k page aligned (and thus cache aligned)
   1.179 +	// Fixed copy size of 4k
   1.180 +	// But.. after each cache line we need to purge the line from the cache
   1.181 +	// and when we're done we need to drain the write buffer
   1.182 +	// We are assuming 32-byte cache lines here but this function is only used
   1.183 +	// when this is the case, so it's ok.
   1.184 +	
   1.185 +	asm("stmfd sp!, {r4-r9} ");
   1.186 +	asm("1: ");
   1.187 +	PLD_ioff(1, 32);
   1.188 +	asm("mov ip, r1 ");
   1.189 +	asm("ldmia r1!, {r2-r9} ");
   1.190 +	asm("tst r1, #0xff0 ");
   1.191 +	asm("stmia r0!, {r2-r9} ");
   1.192 +	PURGE_DCACHE_LINE(,ip);
   1.193 +	asm("bne 1b ");
   1.194 +	asm("ldmfd sp!, {r4-r9} ");
   1.195 +	DRAIN_WRITE_BUFFER(,r0,r1);
   1.196 +	CPWAIT(,r0);
   1.197 +	__JUMP(,lr);
   1.198 +	}
   1.199 +
   1.200 +__NAKED__ void CopyPageForRemap16(TLinAddr /*aDest*/, TLinAddr /*aSrc*/)
   1.201 +	{
   1.202 +	// Source and destination 4k page aligned (and thus cache aligned)
   1.203 +	// Fixed copy size of 4k
   1.204 +	// But.. after each cache line we need to purge the line from the cache
   1.205 +	// and when we're done we need to drain the write buffer
   1.206 +	// We are assuming 16-byte cache lines here but this function is only used
   1.207 +	// when this is the case, so it's ok.
   1.208 +	
   1.209 +	asm("stmfd sp!, {r4-r5} ");
   1.210 +	asm("1: ");
   1.211 +	PLD_ioff(1, 16);
   1.212 +	asm("mov ip, r1 ");
   1.213 +	asm("ldmia r1!, {r2-r5} ");
   1.214 +	asm("tst r1, #0xff0 ");
   1.215 +	asm("stmia r0!, {r2-r5} ");
   1.216 +	PURGE_DCACHE_LINE(,ip);
   1.217 +	asm("bne 1b ");
   1.218 +	asm("ldmfd sp!, {r4-r5} ");
   1.219 +	DRAIN_WRITE_BUFFER(,r0,r1);
   1.220 +	CPWAIT(,r0);
   1.221 +	__JUMP(,lr);
   1.222 +	}
   1.223 +#endif
   1.224 +#else //!__CPU_HAS_WRITE_BACK_CACHE
   1.225 +__NAKED__ void CopyPageForRemapWT(TLinAddr /*aDest*/, TLinAddr /*aSrc*/)
   1.226 +	{
   1.227 +	// Source and destination 4k page aligned (and thus cache aligned)
   1.228 +	// Fixed copy size of 4k
   1.229 +	// Writethrough cache means no purging is required, but
   1.230 +	// when we're done we still need to drain the write buffer
   1.231 +	
   1.232 +	asm("stmfd sp!, {r4-r8} ");
   1.233 +	asm("1: ");
   1.234 +	PLD_ioff(1, 16);
   1.235 +	asm("ldmia r1!, {r2-r8,ip} ");
   1.236 +	asm("tst r1, #0xff0 ");
   1.237 +	asm("stmia r0!, {r2-r8,ip} ");
   1.238 +	asm("bne 1b ");
   1.239 +	asm("ldmfd sp!, {r4-r8,ip} ");
   1.240 +	DRAIN_WRITE_BUFFER(,r0,r1);
   1.241 +	CPWAIT(,r0);
   1.242 +	__JUMP(,lr);
   1.243 +	}
   1.244 +#endif
   1.245 +
   1.246 +#ifdef __MMU_MACHINE_CODED__
   1.247 +__NAKED__ void ImpMmu::MapRamPages(TInt /*anId*/, TLinAddr /*anAddr*/, TPhysAddr* /*aPageList*/, TInt /*aNumPages*/, TPte /*aPtePerm*/)
   1.248 +//
   1.249 +// Map a list of physical RAM pages to a specified linear address using a specified page table and
   1.250 +// specified PTE permissions. Call this with the kernel locked.
   1.251 +//
   1.252 +	{
   1.253 +	// enter with r0=&MM::TheMmu, r1=anId, r2=anAddr, r3=aPageList, [sp]=aNumPages, [sp+4]=aPtePerm
   1.254 +	asm("stmfd sp!, {r4-r6,lr} ");
   1.255 +	asm("mov r4, r1 ");						// r4=anId
   1.256 +	asm("mov r5, r2 ");						// r5=anAddr
   1.257 +	asm("mov r6, r3 ");						// r6=aPageList
   1.258 +	asm("bl  " CSM_ZN6ImpMmu16UnlockPageTablesEv);	// unlock page tables
   1.259 +	asm("mov r0, r5, lsr #20 ");			// r0=pdeIndex
   1.260 +	asm("bic r1, r5, r0, lsl #20 ");		// r1=anAddr & 0xfffff
   1.261 +	asm("and r1, r1, #0xff000 ");			// r1=ptOffset<<12
   1.262 +	asm("mov r4, r4, lsl #10 ");
   1.263 +	asm("add r4, r4, #%a0" : : "i" ((TInt)KPageTableLinearBase));	// r4=linear address of page table anId
   1.264 +	asm("add r1, r4, r1, lsr #10 ");		// r1 points to first PTE to add
   1.265 +	asm("ldr r2, [sp, #16] ");				// r2=number of pages to map
   1.266 +	asm("mov r0, r0, lsl #2 ");
   1.267 +	asm("add r0, r0, #%a0" : : "i" ((TInt)KPageDirectoryLinearAddress));
   1.268 +	asm("add r0, r0, #%a0" : : "i" ((TInt)KPageTableInfoOffset));	// r0->page table info entry for anAddr
   1.269 +	asm("ldr r3, [r0] ");
   1.270 +	asm("add r3, r3, r2 ");					// add number of pages to pages present count
   1.271 +	asm("str r3, [r0] ");
   1.272 +	asm("ldr r3, [sp, #20] ");				// r3=PTE permissions
   1.273 +	asm("b map_ram_pages2 ");
   1.274 +
   1.275 +	asm("map_ram_pages1: ");
   1.276 +	asm("ldr lr, [r6], #4 ");				// get physical address of page and step to next in list
   1.277 +	asm("orr lr, lr, r3 ");					// OR in permissions to give PTE
   1.278 +	asm("str lr, [r1], #4 ");				// store PTE and step to next
   1.279 +
   1.280 +	asm("map_ram_pages2: ");
   1.281 +	asm("subs r2, r2, #1 ");
   1.282 +	asm("bge map_ram_pages1 ");				// loop for all pages
   1.283 +	asm("ldmfd sp!, {r4-r6,lr} ");
   1.284 +	DRAIN_WRITE_BUFFER(,r0,r0);
   1.285 +	CPWAIT(,r0);
   1.286 +	asm("b  " CSM_ZN6ImpMmu14LockPageTablesEv);		// lock page tables and exit
   1.287 +	}
   1.288 +
   1.289 +__NAKED__ void ImpMmu::MapPhysicalPages(TInt /*anId*/, TLinAddr /*anAddr*/, TPhysAddr /*aPhysAddr*/, TInt /*aNumPages*/, TPte /*aPtePerm*/)
   1.290 +//
   1.291 +// Map consecutive physical pages to a specified linear address using a specified page table and
   1.292 +// specified PTE permissions. Call this with the kernel locked.
   1.293 +//
   1.294 +	{
   1.295 +	// enter with r0=&MM::TheMmu, r1=anId, r2=anAddr, r3=aPhysAddr, [sp]=aNumPages, [sp+4]=aPtePerm
   1.296 +	asm("stmfd sp!, {r4-r6,lr} ");
   1.297 +	asm("mov r4, r1 ");						// r4=anId
   1.298 +	asm("mov r5, r2 ");						// r5=anAddr
   1.299 +	asm("mov r6, r3 ");						// r6=aPhysAddr
   1.300 +	asm("bl  " CSM_ZN6ImpMmu16UnlockPageTablesEv);	// unlock page tables
   1.301 +	asm("mov r0, r5, lsr #20 ");			// r0=pdeIndex
   1.302 +	asm("bic r1, r5, r0, lsl #20 ");		// r1=anAddr & 0xfffff
   1.303 +	asm("and r1, r1, #0xff000 ");			// r1=ptOffset<<12
   1.304 +	asm("mov r4, r4, lsl #10 ");
   1.305 +	asm("add r4, r4, #%a0" : : "i" ((TInt)KPageTableLinearBase));	// r4=linear address of page table anId
   1.306 +	asm("add r1, r4, r1, lsr #10 ");		// r1 points to first PTE to add
   1.307 +	asm("ldr r2, [sp, #16] ");				// r2=number of pages to map
   1.308 +	asm("mov r0, r0, lsl #2 ");
   1.309 +	asm("add r0, r0, #%a0" : : "i" ((TInt)KPageDirectoryLinearAddress));
   1.310 +	asm("add r0, r0, #%a0" : : "i" ((TInt)KPageTableInfoOffset));	// r0->page table info entry for anAddr
   1.311 +	asm("ldr r3, [r0] ");
   1.312 +	asm("add r3, r3, r2 ");					// add number of pages to pages present count
   1.313 +	asm("str r3, [r0] ");
   1.314 +	asm("ldr r3, [sp, #20] ");				// r3=PTE permissions
   1.315 +	asm("orr r3, r3, r6 ");					// OR in physical address to give first PTE
   1.316 +	asm("b map_phys_pages2 ");
   1.317 +
   1.318 +	asm("map_phys_pages1: ");
   1.319 +	asm("str r3, [r1], #4 ");				// store PTE and step to next
   1.320 +	asm("add r3, r3, #0x1000 ");			// step physical address on by page size
   1.321 +
   1.322 +	asm("map_phys_pages2: ");
   1.323 +	asm("subs r2, r2, #1 ");
   1.324 +	asm("bge map_phys_pages1 ");			// loop for all pages
   1.325 +	asm("ldmfd sp!, {r4-r6,lr} ");
   1.326 +	DRAIN_WRITE_BUFFER(,r0,r0);
   1.327 +	CPWAIT(,r0);
   1.328 +	asm("b  " CSM_ZN6ImpMmu14LockPageTablesEv);		// lock page tables and exit
   1.329 +	}
   1.330 +
   1.331 +__NAKED__ TInt ImpMmu::UnmapPages(TInt /*anId*/, TLinAddr /*anAddr*/, TInt /*aNumPages*/, TPhysAddr* /*aPageList*/, TInt& /*aNumPtes*/)
   1.332 +//
   1.333 +// Unmap a specified area at address anAddr mapped by page table anId. Place physical addresses of unmapped
   1.334 +// RAM pages into aPageList and count of unmapped pages into aNumPtes. Return number of pages still
   1.335 +// mapped using this page table. Call this with the kernel locked.
   1.336 +// Note that a write-back cache may also require flushing after this.
   1.337 +//
   1.338 +	{
   1.339 +	// Enter with r0=this, r1=anId, r2=anAddr, r3=aNumPages, [sp]=aPageList, [sp+4]=&aNumPtes
   1.340 +	asm("stmfd sp!, {r4-r9,lr} ");
   1.341 +	asm("mov r4, r0 ");
   1.342 +	asm("mov r5, r1 ");
   1.343 +	asm("mov r6, r2 ");
   1.344 +	asm("mov r7, r3 ");
   1.345 +	asm("bl  " CSM_ZN6ImpMmu16UnlockPageTablesEv);	// unlock the page tables
   1.346 +	asm("mov r8, r6, lsr #20 ");			// r8=pdeIndex
   1.347 +	asm("bic r0, r6, r8, lsl #20 ");		// r0=anAddr&0xfffff
   1.348 +	asm("and r0, r0, #0xff000 ");			// r0=ptOffset<<12
   1.349 +	asm("mov r5, r5, lsl #10 ");			// convert page table id to linear address
   1.350 +	asm("add r5, r5, r0, lsr #10 ");		// add offset within page table
   1.351 +	asm("add r5, r5, #%a0" : : "i" ((TInt)KPageTableLinearBase));	// r5=pte address
   1.352 +	asm("mov ip, #0 ");						// ip=0 throughout loop
   1.353 +	asm("mov r3, #0 ");						// r3 counts present pages
   1.354 +	asm("ldr r9, [sp, #28] ");				// r9=aPageList
   1.355 +	asm("mov r2, #0xff ");
   1.356 +	asm("orr r2, r2, #0xf00 ");				// r2=BIC mask for PTE->page physical address
   1.357 +	asm("b unmap_pages_2 ");
   1.358 +
   1.359 +	asm("unmap_pages_1: ");
   1.360 +	asm("ldr r0, [r5] ");					// fetch PTE
   1.361 +	asm("str ip, [r5], #4 ");				// clear PTE
   1.362 +	asm("tst r0, #3 ");						// test if page present
   1.363 +#ifdef __CPU_SPLIT_TLB
   1.364 +#if defined(__CPU_HAS_SINGLE_ENTRY_IDTLB_FLUSH)
   1.365 +	FLUSH_IDTLB_ENTRY(ne,r6);				// flush page from both TLBs if possible
   1.366 +#elif defined(__CPU_HAS_SINGLE_ENTRY_ITLB_FLUSH)
   1.367 +	FLUSH_DTLB_ENTRY(ne,r6);
   1.368 +	FLUSH_ITLB_ENTRY(ne,r6);
   1.369 +#else
   1.370 +	FLUSH_DTLB_ENTRY(ne,r6);				// no single-entry ITLB flush, complete ITLB flush will be done later
   1.371 +#endif
   1.372 +#else
   1.373 +	FLUSH_IDTLB_ENTRY(ne,r6);
   1.374 +#endif
   1.375 +	asm("bicne r0, r0, r2 ");				// ... r0=page physical address ...
   1.376 +	asm("strne r0, [r9], #4 ");				// ... *aPageList++=r0 ...
   1.377 +	asm("addne r3, r3, #1 ");				// ... increment present pages count
   1.378 +	asm("add r6, r6, #0x1000 ");			// increment address by page size
   1.379 +
   1.380 +	asm("unmap_pages_2: ");
   1.381 +	asm("subs r7, r7, #1 ");				// decrement page count
   1.382 +	asm("bge unmap_pages_1 ");
   1.383 +
   1.384 +	asm("ldr r0, [sp, #32] ");				// r0=&aNumPtes
   1.385 +	asm("str r3, [r0] ");					// aNumPtes=r3
   1.386 +	asm("mov r0, #%a0" : : "i" ((TInt)KPageDirectoryLinearAddress));
   1.387 +	asm("add r0, r0, #%a0" : : "i" ((TInt)KPageTableInfoOffset));	// r0->base of page table info array
   1.388 +	asm("add r0, r0, r8, lsl #2 ");			// r0 points to PTINFO entry for this pde
   1.389 +	asm("ldr r1, [r0] ");					// r1[31:16]=page table id, r1[15:0]=present pages
   1.390 +	asm("sub r1, r1, r3 ");					// subtract number of pages unmapped
   1.391 +	asm("str r1, [r0] ");					// store new pages present count
   1.392 +	asm("mov r4, r1, lsl #16 ");			// shift out top 16 bits and store in r4
   1.393 +	asm("bl  " CSM_ZN6ImpMmu14LockPageTablesEv);		// lock the page tables
   1.394 +	asm("mov r0, r4, lsr #16 ");			// r0=number of pages remaining
   1.395 +	DRAIN_WRITE_BUFFER(,r0,r1);
   1.396 +	CPWAIT(,r1);
   1.397 +	asm("ldmfd sp!, {r4-r9,pc} ");			// restore registers and return
   1.398 +	}
   1.399 +
   1.400 +__NAKED__ TInt Mmu::PageTableId(TLinAddr /*anAddr*/)
   1.401 +	{
   1.402 +	asm("mov r1, r1, lsr #20 ");			// r1=anAddr>>20
   1.403 +	asm("mov r0, #%a0" : : "i" ((TInt)KPageDirectoryLinearAddress));
   1.404 +	asm("add r0, r0, #%a0" : : "i" ((TInt)KPageTableInfoOffset));	// r0->base of page table info array
   1.405 +	asm("mrc p15, 0, r2, c3, c0, 0 ");		// r2=current DACR
   1.406 +	asm("orr r3, r2, #0x30 ");
   1.407 +	asm("mcr p15, 0, r3, c3, c0, 0 ");		// unlock page tables
   1.408 +	CPWAIT(,r3);
   1.409 +	asm("ldr r0, [r0, r1, lsl #2] ");		// fetch page table info entry for anAddr
   1.410 +	asm("mcr p15, 0, r2, c3, c0, 0 ");		// lock page tables
   1.411 +	asm("cmn r0, #0x10000 ");				// test if page table id=0xffff
   1.412 +	asm("movcc r0, r0, lsr #16 ");			// if not, return page table id
   1.413 +	asm("mvncs r0, #0 ");					// else return -1
   1.414 +	__JUMP(,lr);
   1.415 +	}
   1.416 +
   1.417 +__NAKED__ void ImpMmu::AssignPageTable(TInt /*anId*/, TLinAddr /*anAddr*/, TPde /*aPdePerm*/)
   1.418 +//
   1.419 +// Assign an allocated page table to map a given linear address with specified permissions.
   1.420 +// This function assumes the page table initially contains no physical RAM page mappings.
   1.421 +// This should be called with the kernel locked.
   1.422 +//
   1.423 +	{
   1.424 +	// on entry r0=&MM::TheMmu, r1=anId, r2=anAddr, r3=aPdePerm
   1.425 +	asm("stmfd sp!, {r4,lr} ");
   1.426 +	asm("and r4, r1, #3 ");					// r4=bottom 2 bits of anId (offset of page table within page)
   1.427 +	asm("orr r3, r3, r4, lsl #10 ");		// combine these bits with PDE permissions
   1.428 +	asm("bic r0, r1, #3 ");					// r0=anId with bottom 2 bits cleared
   1.429 +	asm("add r0, r0, #%a0" : : "i" ((TInt)KPageTableLinearBase));	// r0=address of PTE mapping page table anId
   1.430 +	asm("mov r1, r1, lsl #16 ");			// put ptid into top 16 bits of r1, zero bottom 16 bits
   1.431 +	asm("mov r2, r2, lsr #20 ");			// r2=anAddr>>20
   1.432 +	asm("mov r2, r2, lsl #2 ");				// r2=pdeIndex*4
   1.433 +	asm("add r2, r2, #%a0" : : "i" ((TInt)KPageDirectoryLinearAddress));	// r2 points to PDE for anAddr
   1.434 +	asm("mrc p15, 0, lr, c3, c0, 0 ");		// lr=current DACR
   1.435 +	asm("orr r4, lr, #0x30 ");
   1.436 +	asm("mcr p15, 0, r4, c3, c0, 0 ");		// unlock page tables
   1.437 +	CPWAIT(,r4);
   1.438 +	asm("ldr r0, [r0] ");					// fetch page table PTE
   1.439 +	asm("mov r0, r0, lsr #12 ");			// shift out permission bits, leave phys addr>>12
   1.440 +	asm("orr r3, r3, r0, lsl #12 ");		// r3=PDE word (add PDE permissions and offset within page)
   1.441 +	asm("str r3, [r2] ");					// store PDE
   1.442 +	asm("add r2, r2, #%a0" : : "i" ((TInt)KPageTableInfoOffset));	// r2 points to PT info entry
   1.443 +	asm("str r1, [r2] ");					// PTinfo top 16=page table ID, PT info bottom 16=pages present=0 (assumption)
   1.444 +	asm("mcr p15, 0, lr, c3, c0, 0 ");		// lock page tables
   1.445 +	DRAIN_WRITE_BUFFER(,r0,r0);
   1.446 +	CPWAIT(,r0);
   1.447 +	asm("ldmfd sp!, {r4,pc} ");
   1.448 +	}
   1.449 +
   1.450 +__NAKED__ void ImpMmu::UnassignPageTable(TLinAddr /*anAddr*/)
   1.451 +//
   1.452 +// Unassign a now-empty page table currently mapping the specified linear address.
   1.453 +// We assume that TLB and/or cache flushing has been done when any RAM pages were unmapped.
   1.454 +// Call this with the kernel locked.
   1.455 +//
   1.456 +	{
   1.457 +	asm("mov r1, r1, lsr #20 ");			// r1=anAddr>>20
   1.458 +	asm("mov r0, #%a0" : : "i" ((TInt)KPageDirectoryLinearAddress));
   1.459 +	asm("add r0, r0, r1, lsl #2 ");			// r0 points to page directory entry for anAddr
   1.460 +	asm("ldr r1, __NotPresentPtInfo ");		// r1=PTInfo entry for not present PDE
   1.461 +	asm("mrc p15, 0, r2, c3, c0, 0 ");		// r2=current DACR
   1.462 +	asm("orr r3, r2, #0x30 ");
   1.463 +	asm("mcr p15, 0, r3, c3, c0, 0 ");		// unlock page tables
   1.464 +	CPWAIT(,r3);
   1.465 +	asm("mov r3, #0 ");
   1.466 +	asm("str r3, [r0] ");					// clear the PDE
   1.467 +	asm("add r0, r0, #%a0" : : "i" ((TInt)KPageTableInfoOffset));	// step r0 on to PT info entry
   1.468 +	asm("str r1, [r0] ");					// clear the PT info entry
   1.469 +	asm("mcr p15, 0, r2, c3, c0, 0 ");		// lock page tables
   1.470 +	DRAIN_WRITE_BUFFER(,r0,r0);
   1.471 +	CPWAIT(,r0);
   1.472 +	__JUMP(,lr);
   1.473 +
   1.474 +	asm("__NotPresentPtInfo: ");
   1.475 +	asm(".word 0xffff0000 ");
   1.476 +	}
   1.477 +#endif
   1.478 +
   1.479 +