os/kernelhwsrv/kernel/eka/memmodel/epoc/moving/arm/xmmu.cia
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
     1 // Copyright (c) 1997-2009 Nokia Corporation and/or its subsidiary(-ies).
     2 // All rights reserved.
     3 // This component and the accompanying materials are made available
     4 // under the terms of the License "Eclipse Public License v1.0"
     5 // which accompanies this distribution, and is available
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
     7 //
     8 // Initial Contributors:
     9 // Nokia Corporation - initial contribution.
    10 //
    11 // Contributors:
    12 //
    13 // Description:
    14 // e32\memmodel\epoc\moving\arm\xmmu.cia
    15 // 
    16 //
    17 
    18 #include <e32cia.h>
    19 #include <arm_mem.h>
    20 #include "execs.h"
    21 #include "cache_maintenance.h"
    22 
    23 __NAKED__ void InvalidateTLBForPage(TLinAddr /*aLinAddr*/)
    24 //
    25 // Flush a specified virtual address from the DTLB.
    26 // Flush from the ITLB as well, provided that doesn't require flushing the whole ITLB
    27 // ArmMmu::SyncCodeMappings() should follow this if flushing from the ITLB is essential.
    28 //
    29 	{
    30 #ifdef __CPU_SPLIT_TLB
    31 #if defined(__CPU_HAS_SINGLE_ENTRY_IDTLB_FLUSH)
    32 	FLUSH_IDTLB_ENTRY(,r0);
    33 #elif defined(__CPU_HAS_SINGLE_ENTRY_ITLB_FLUSH)
    34 	FLUSH_DTLB_ENTRY(,r0);
    35 	FLUSH_ITLB_ENTRY(,r0);
    36 #else
    37 	FLUSH_DTLB_ENTRY(,r0);
    38 #endif
    39 #else
    40 	FLUSH_IDTLB_ENTRY(,r0);
    41 #endif
    42 	// don't need CPWAIT since it always happens in the function which calls this one
    43 	__JUMP(,lr);
    44 	}
    45 
    46 __NAKED__ void ArmMmu::SyncCodeMappings()
    47 //
    48 // Flush the ITLB if it is not flushed page-by-page during unmapping of pages
    49 //
    50 	{
    51 #if defined(__CPU_SPLIT_TLB) && !defined(__CPU_HAS_SINGLE_ENTRY_ITLB_FLUSH) && !defined(__CPU_HAS_SINGLE_ENTRY_IDTLB_FLUSH)
    52 	asm("mov r2, #0 ");
    53 	FLUSH_ITLB(,r2);
    54 	CPWAIT(,r0);
    55 #endif
    56 	__JUMP(,lr);
    57 	}
    58 
    59 __NAKED__ void FlushTLBs()
    60 	{
    61 	asm("mov r0, #0 ");
    62 	FLUSH_IDTLB(,r0);
    63 	CPWAIT(,r0);
    64 	__JUMP(,lr);
    65 	}
    66 
    67 __NAKED__ void FlushUnmapShadow(TLinAddr /*aRomAddr*/)
    68 //
    69 // Flush both I and D TLBs and flush page at aRomAddr from both caches
    70 //
    71 	{
    72 	asm("mov r0, #0 ");
    73 	FLUSH_IDTLB(,r0);		// flush both TLBs
    74 	CPWAIT(,r0);
    75 	__JUMP(,lr);
    76 	}
    77 
    78 // Generic cache/TLB flush function.
    79 // Which things are flushed is determined by aMask.
    80 // Call this with the system locked. Preemption can occur during this function.
    81 __NAKED__ void ArmMmu::GenericFlush(TUint32 /*aMask*/)
    82 	{
    83 	asm("tst r1, #%a0" : : "i" (EFlushDMove|EFlushDDecommit));
    84 	asm("orrne r1, r1, #%a0" : : "i" (EFlushDCache));
    85 	asm("tst r1, #%a0" : : "i" (EFlushDMove|EFlushDDecommit|EFlushDPermChg));
    86 	asm("orrne r1, r1, #%a0" : : "i" (EFlushDTLB));
    87 	asm("tst r1, #%a0" : : "i" (EFlushIMove));
    88 	asm("orrne r1, r1, #%a0" : : "i" (EFlushICache));
    89 	asm("tst r1, #%a0" : : "i" (EFlushIMove|EFlushIPermChg));
    90 	asm("orrne r1, r1, #%a0" : : "i" (EFlushITLB));
    91 	asm("mov r2, #0 ");
    92 #ifdef __CPU_SPLIT_CACHE
    93 	asm("tst r1, #%a0" : : "i" (EFlushDCache) );
    94 #else
    95 	asm("tst r1, #%a0" : : "i" (EFlushDCache|EFlushICache) );
    96 #endif
    97 	asm("beq 1f ");
    98 	asm("stmfd sp!, {r1,lr} ");
    99 	asm("bl  " CSM_ZN16CacheMaintenance15OnProcessSwitchEv);	// flush data or unified cache
   100 	asm("ldmfd sp!, {r1,lr} ");
   101 	asm("mov r2, #0 ");
   102 	asm("1: ");
   103 
   104 #ifdef __CPU_SPLIT_CACHE
   105 	asm("tst r1, #%a0" : : "i" (EFlushICache) );
   106 	FLUSH_ICACHE(ne,r2);
   107 #endif
   108 
   109 #ifdef __CPU_SPLIT_TLB
   110 	asm("tst r1, #%a0" : : "i" (EFlushDTLB) );
   111 	FLUSH_DTLB(ne,r2);
   112 	asm("tst r1, #%a0" : : "i" (EFlushITLB) );
   113 	FLUSH_ITLB(ne,r2);
   114 #else
   115 	asm("tst r1, #%a0" : : "i" (EFlushDTLB|EFlushITLB) );
   116 	FLUSH_IDTLB(ne,r2);
   117 #endif
   118 	CPWAIT(,r0);
   119 	__JUMP(,lr);
   120 	}
   121 
   122 #if defined(__CPU_XSCALE__)
   123 // Special routine to process minicache attributes
   124 __NAKED__ TUint MiniCacheConfig()
   125 	{
   126 	asm("mrc p15, 0, r0, c1, c0, 1 ");
   127 #if defined (__CPU_XSCALE_MANZANO__)
   128 	asm("and r0, r0, #0x30 ");	// 00=WBRA 01=WBRA 10=WTRA 11=WBRA
   129 	asm("cmp r0, #0x20");		//is it WTRA?
   130 	asm("moveq r0, #8");		// yes
   131 	asm("movne r0, #10");		// no
   132 #else	
   133 	asm("mov r0, r0, lsr #4 ");
   134 	asm("and r0, r0, #3 ");		// 00=WBRA 01=WBWA 10=WTRA 11=UNP
   135 	asm("bic r0, r0, #2 ");		// 10=WBRA 11=WBWA 00=WTRA 01=UNP
   136 	asm("add r0, r0, #8 ");		// WBRA->AWBR, WBWA->AWBW, WTRA->AWTR, UNP->AWTW (can't occur)
   137 #endif	
   138 	__JUMP(,lr);
   139 	}
   140 #endif
   141 
   142 __NAKED__ void ExecHandler::UnlockRamDrive()
   143 	{
   144 	asm("ldr r0, [r1, #%a0]" : : "i" (_FOFF(DThread,iOwningProcess)-_FOFF(DThread,iNThread)));
   145 	asm("ldr r0, [r0, #%a0]" : : "i" _FOFF(DProcess,iS.iCaps));
   146 // __KERNEL_CAPABILITY_CHECK
   147 	asm("tst r0, #%a0 " : : "i" ((TInt)(1<<ECapabilityTCB)));
   148 	__JUMP(eq,lr);				// don't unlock the RAM drive if don't have MediaDD capability
   149 
   150 	// fall through to unlock
   151 	}
   152 
   153 EXPORT_C __NAKED__ void TInternalRamDrive::Unlock()
   154 	{
   155 	asm("mrc p15, 0, r0, c3, c0, 0 ");
   156 	asm("orr r0, r0, #0xc0 ");
   157 	asm("mcr p15, 0, r0, c3, c0, 0 ");
   158 	CPWAIT(,r0);
   159 	__JUMP(,lr);
   160 	}
   161 
   162 EXPORT_C __NAKED__ void TInternalRamDrive::Lock()
   163 	{
   164 	asm("mrc p15, 0, r0, c3, c0, 0 ");
   165 	asm("bic r0, r0, #0xc0 ");
   166 	asm("mcr p15, 0, r0, c3, c0, 0 ");
   167 	CPWAIT(,r0);
   168 	__JUMP(,lr);
   169 	}
   170 
   171 #if defined(__CPU_WRITE_BACK_CACHE)
   172 #if defined(__CPU_HAS_SINGLE_ENTRY_DCACHE_FLUSH)
   173 __NAKED__ void CopyPageForRemap32(TLinAddr /*aDest*/, TLinAddr /*aSrc*/)
   174 	{
   175 	// Source and destination 4k page aligned (and thus cache aligned)
   176 	// Fixed copy size of 4k
   177 	// But.. after each cache line we need to purge the line from the cache
   178 	// and when we're done we need to drain the write buffer
   179 	// We are assuming 32-byte cache lines here but this function is only used
   180 	// when this is the case, so it's ok.
   181 	
   182 	asm("stmfd sp!, {r4-r9} ");
   183 	asm("1: ");
   184 	PLD_ioff(1, 32);
   185 	asm("mov ip, r1 ");
   186 	asm("ldmia r1!, {r2-r9} ");
   187 	asm("tst r1, #0xff0 ");
   188 	asm("stmia r0!, {r2-r9} ");
   189 	PURGE_DCACHE_LINE(,ip);
   190 	asm("bne 1b ");
   191 	asm("ldmfd sp!, {r4-r9} ");
   192 	DRAIN_WRITE_BUFFER(,r0,r1);
   193 	CPWAIT(,r0);
   194 	__JUMP(,lr);
   195 	}
   196 
   197 __NAKED__ void CopyPageForRemap16(TLinAddr /*aDest*/, TLinAddr /*aSrc*/)
   198 	{
   199 	// Source and destination 4k page aligned (and thus cache aligned)
   200 	// Fixed copy size of 4k
   201 	// But.. after each cache line we need to purge the line from the cache
   202 	// and when we're done we need to drain the write buffer
   203 	// We are assuming 16-byte cache lines here but this function is only used
   204 	// when this is the case, so it's ok.
   205 	
   206 	asm("stmfd sp!, {r4-r5} ");
   207 	asm("1: ");
   208 	PLD_ioff(1, 16);
   209 	asm("mov ip, r1 ");
   210 	asm("ldmia r1!, {r2-r5} ");
   211 	asm("tst r1, #0xff0 ");
   212 	asm("stmia r0!, {r2-r5} ");
   213 	PURGE_DCACHE_LINE(,ip);
   214 	asm("bne 1b ");
   215 	asm("ldmfd sp!, {r4-r5} ");
   216 	DRAIN_WRITE_BUFFER(,r0,r1);
   217 	CPWAIT(,r0);
   218 	__JUMP(,lr);
   219 	}
   220 #endif
   221 #else //!__CPU_HAS_WRITE_BACK_CACHE
   222 __NAKED__ void CopyPageForRemapWT(TLinAddr /*aDest*/, TLinAddr /*aSrc*/)
   223 	{
   224 	// Source and destination 4k page aligned (and thus cache aligned)
   225 	// Fixed copy size of 4k
   226 	// Writethrough cache means no purging is required, but
   227 	// when we're done we still need to drain the write buffer
   228 	
   229 	asm("stmfd sp!, {r4-r8} ");
   230 	asm("1: ");
   231 	PLD_ioff(1, 16);
   232 	asm("ldmia r1!, {r2-r8,ip} ");
   233 	asm("tst r1, #0xff0 ");
   234 	asm("stmia r0!, {r2-r8,ip} ");
   235 	asm("bne 1b ");
   236 	asm("ldmfd sp!, {r4-r8,ip} ");
   237 	DRAIN_WRITE_BUFFER(,r0,r1);
   238 	CPWAIT(,r0);
   239 	__JUMP(,lr);
   240 	}
   241 #endif
   242 
   243 #ifdef __MMU_MACHINE_CODED__
   244 __NAKED__ void ImpMmu::MapRamPages(TInt /*anId*/, TLinAddr /*anAddr*/, TPhysAddr* /*aPageList*/, TInt /*aNumPages*/, TPte /*aPtePerm*/)
   245 //
   246 // Map a list of physical RAM pages to a specified linear address using a specified page table and
   247 // specified PTE permissions. Call this with the kernel locked.
   248 //
   249 	{
   250 	// enter with r0=&MM::TheMmu, r1=anId, r2=anAddr, r3=aPageList, [sp]=aNumPages, [sp+4]=aPtePerm
   251 	asm("stmfd sp!, {r4-r6,lr} ");
   252 	asm("mov r4, r1 ");						// r4=anId
   253 	asm("mov r5, r2 ");						// r5=anAddr
   254 	asm("mov r6, r3 ");						// r6=aPageList
   255 	asm("bl  " CSM_ZN6ImpMmu16UnlockPageTablesEv);	// unlock page tables
   256 	asm("mov r0, r5, lsr #20 ");			// r0=pdeIndex
   257 	asm("bic r1, r5, r0, lsl #20 ");		// r1=anAddr & 0xfffff
   258 	asm("and r1, r1, #0xff000 ");			// r1=ptOffset<<12
   259 	asm("mov r4, r4, lsl #10 ");
   260 	asm("add r4, r4, #%a0" : : "i" ((TInt)KPageTableLinearBase));	// r4=linear address of page table anId
   261 	asm("add r1, r4, r1, lsr #10 ");		// r1 points to first PTE to add
   262 	asm("ldr r2, [sp, #16] ");				// r2=number of pages to map
   263 	asm("mov r0, r0, lsl #2 ");
   264 	asm("add r0, r0, #%a0" : : "i" ((TInt)KPageDirectoryLinearAddress));
   265 	asm("add r0, r0, #%a0" : : "i" ((TInt)KPageTableInfoOffset));	// r0->page table info entry for anAddr
   266 	asm("ldr r3, [r0] ");
   267 	asm("add r3, r3, r2 ");					// add number of pages to pages present count
   268 	asm("str r3, [r0] ");
   269 	asm("ldr r3, [sp, #20] ");				// r3=PTE permissions
   270 	asm("b map_ram_pages2 ");
   271 
   272 	asm("map_ram_pages1: ");
   273 	asm("ldr lr, [r6], #4 ");				// get physical address of page and step to next in list
   274 	asm("orr lr, lr, r3 ");					// OR in permissions to give PTE
   275 	asm("str lr, [r1], #4 ");				// store PTE and step to next
   276 
   277 	asm("map_ram_pages2: ");
   278 	asm("subs r2, r2, #1 ");
   279 	asm("bge map_ram_pages1 ");				// loop for all pages
   280 	asm("ldmfd sp!, {r4-r6,lr} ");
   281 	DRAIN_WRITE_BUFFER(,r0,r0);
   282 	CPWAIT(,r0);
   283 	asm("b  " CSM_ZN6ImpMmu14LockPageTablesEv);		// lock page tables and exit
   284 	}
   285 
   286 __NAKED__ void ImpMmu::MapPhysicalPages(TInt /*anId*/, TLinAddr /*anAddr*/, TPhysAddr /*aPhysAddr*/, TInt /*aNumPages*/, TPte /*aPtePerm*/)
   287 //
   288 // Map consecutive physical pages to a specified linear address using a specified page table and
   289 // specified PTE permissions. Call this with the kernel locked.
   290 //
   291 	{
   292 	// enter with r0=&MM::TheMmu, r1=anId, r2=anAddr, r3=aPhysAddr, [sp]=aNumPages, [sp+4]=aPtePerm
   293 	asm("stmfd sp!, {r4-r6,lr} ");
   294 	asm("mov r4, r1 ");						// r4=anId
   295 	asm("mov r5, r2 ");						// r5=anAddr
   296 	asm("mov r6, r3 ");						// r6=aPhysAddr
   297 	asm("bl  " CSM_ZN6ImpMmu16UnlockPageTablesEv);	// unlock page tables
   298 	asm("mov r0, r5, lsr #20 ");			// r0=pdeIndex
   299 	asm("bic r1, r5, r0, lsl #20 ");		// r1=anAddr & 0xfffff
   300 	asm("and r1, r1, #0xff000 ");			// r1=ptOffset<<12
   301 	asm("mov r4, r4, lsl #10 ");
   302 	asm("add r4, r4, #%a0" : : "i" ((TInt)KPageTableLinearBase));	// r4=linear address of page table anId
   303 	asm("add r1, r4, r1, lsr #10 ");		// r1 points to first PTE to add
   304 	asm("ldr r2, [sp, #16] ");				// r2=number of pages to map
   305 	asm("mov r0, r0, lsl #2 ");
   306 	asm("add r0, r0, #%a0" : : "i" ((TInt)KPageDirectoryLinearAddress));
   307 	asm("add r0, r0, #%a0" : : "i" ((TInt)KPageTableInfoOffset));	// r0->page table info entry for anAddr
   308 	asm("ldr r3, [r0] ");
   309 	asm("add r3, r3, r2 ");					// add number of pages to pages present count
   310 	asm("str r3, [r0] ");
   311 	asm("ldr r3, [sp, #20] ");				// r3=PTE permissions
   312 	asm("orr r3, r3, r6 ");					// OR in physical address to give first PTE
   313 	asm("b map_phys_pages2 ");
   314 
   315 	asm("map_phys_pages1: ");
   316 	asm("str r3, [r1], #4 ");				// store PTE and step to next
   317 	asm("add r3, r3, #0x1000 ");			// step physical address on by page size
   318 
   319 	asm("map_phys_pages2: ");
   320 	asm("subs r2, r2, #1 ");
   321 	asm("bge map_phys_pages1 ");			// loop for all pages
   322 	asm("ldmfd sp!, {r4-r6,lr} ");
   323 	DRAIN_WRITE_BUFFER(,r0,r0);
   324 	CPWAIT(,r0);
   325 	asm("b  " CSM_ZN6ImpMmu14LockPageTablesEv);		// lock page tables and exit
   326 	}
   327 
   328 __NAKED__ TInt ImpMmu::UnmapPages(TInt /*anId*/, TLinAddr /*anAddr*/, TInt /*aNumPages*/, TPhysAddr* /*aPageList*/, TInt& /*aNumPtes*/)
   329 //
   330 // Unmap a specified area at address anAddr mapped by page table anId. Place physical addresses of unmapped
   331 // RAM pages into aPageList and count of unmapped pages into aNumPtes. Return number of pages still
   332 // mapped using this page table. Call this with the kernel locked.
   333 // Note that a write-back cache may also require flushing after this.
   334 //
   335 	{
   336 	// Enter with r0=this, r1=anId, r2=anAddr, r3=aNumPages, [sp]=aPageList, [sp+4]=&aNumPtes
   337 	asm("stmfd sp!, {r4-r9,lr} ");
   338 	asm("mov r4, r0 ");
   339 	asm("mov r5, r1 ");
   340 	asm("mov r6, r2 ");
   341 	asm("mov r7, r3 ");
   342 	asm("bl  " CSM_ZN6ImpMmu16UnlockPageTablesEv);	// unlock the page tables
   343 	asm("mov r8, r6, lsr #20 ");			// r8=pdeIndex
   344 	asm("bic r0, r6, r8, lsl #20 ");		// r0=anAddr&0xfffff
   345 	asm("and r0, r0, #0xff000 ");			// r0=ptOffset<<12
   346 	asm("mov r5, r5, lsl #10 ");			// convert page table id to linear address
   347 	asm("add r5, r5, r0, lsr #10 ");		// add offset within page table
   348 	asm("add r5, r5, #%a0" : : "i" ((TInt)KPageTableLinearBase));	// r5=pte address
   349 	asm("mov ip, #0 ");						// ip=0 throughout loop
   350 	asm("mov r3, #0 ");						// r3 counts present pages
   351 	asm("ldr r9, [sp, #28] ");				// r9=aPageList
   352 	asm("mov r2, #0xff ");
   353 	asm("orr r2, r2, #0xf00 ");				// r2=BIC mask for PTE->page physical address
   354 	asm("b unmap_pages_2 ");
   355 
   356 	asm("unmap_pages_1: ");
   357 	asm("ldr r0, [r5] ");					// fetch PTE
   358 	asm("str ip, [r5], #4 ");				// clear PTE
   359 	asm("tst r0, #3 ");						// test if page present
   360 #ifdef __CPU_SPLIT_TLB
   361 #if defined(__CPU_HAS_SINGLE_ENTRY_IDTLB_FLUSH)
   362 	FLUSH_IDTLB_ENTRY(ne,r6);				// flush page from both TLBs if possible
   363 #elif defined(__CPU_HAS_SINGLE_ENTRY_ITLB_FLUSH)
   364 	FLUSH_DTLB_ENTRY(ne,r6);
   365 	FLUSH_ITLB_ENTRY(ne,r6);
   366 #else
   367 	FLUSH_DTLB_ENTRY(ne,r6);				// no single-entry ITLB flush, complete ITLB flush will be done later
   368 #endif
   369 #else
   370 	FLUSH_IDTLB_ENTRY(ne,r6);
   371 #endif
   372 	asm("bicne r0, r0, r2 ");				// ... r0=page physical address ...
   373 	asm("strne r0, [r9], #4 ");				// ... *aPageList++=r0 ...
   374 	asm("addne r3, r3, #1 ");				// ... increment present pages count
   375 	asm("add r6, r6, #0x1000 ");			// increment address by page size
   376 
   377 	asm("unmap_pages_2: ");
   378 	asm("subs r7, r7, #1 ");				// decrement page count
   379 	asm("bge unmap_pages_1 ");
   380 
   381 	asm("ldr r0, [sp, #32] ");				// r0=&aNumPtes
   382 	asm("str r3, [r0] ");					// aNumPtes=r3
   383 	asm("mov r0, #%a0" : : "i" ((TInt)KPageDirectoryLinearAddress));
   384 	asm("add r0, r0, #%a0" : : "i" ((TInt)KPageTableInfoOffset));	// r0->base of page table info array
   385 	asm("add r0, r0, r8, lsl #2 ");			// r0 points to PTINFO entry for this pde
   386 	asm("ldr r1, [r0] ");					// r1[31:16]=page table id, r1[15:0]=present pages
   387 	asm("sub r1, r1, r3 ");					// subtract number of pages unmapped
   388 	asm("str r1, [r0] ");					// store new pages present count
   389 	asm("mov r4, r1, lsl #16 ");			// shift out top 16 bits and store in r4
   390 	asm("bl  " CSM_ZN6ImpMmu14LockPageTablesEv);		// lock the page tables
   391 	asm("mov r0, r4, lsr #16 ");			// r0=number of pages remaining
   392 	DRAIN_WRITE_BUFFER(,r0,r1);
   393 	CPWAIT(,r1);
   394 	asm("ldmfd sp!, {r4-r9,pc} ");			// restore registers and return
   395 	}
   396 
   397 __NAKED__ TInt Mmu::PageTableId(TLinAddr /*anAddr*/)
   398 	{
   399 	asm("mov r1, r1, lsr #20 ");			// r1=anAddr>>20
   400 	asm("mov r0, #%a0" : : "i" ((TInt)KPageDirectoryLinearAddress));
   401 	asm("add r0, r0, #%a0" : : "i" ((TInt)KPageTableInfoOffset));	// r0->base of page table info array
   402 	asm("mrc p15, 0, r2, c3, c0, 0 ");		// r2=current DACR
   403 	asm("orr r3, r2, #0x30 ");
   404 	asm("mcr p15, 0, r3, c3, c0, 0 ");		// unlock page tables
   405 	CPWAIT(,r3);
   406 	asm("ldr r0, [r0, r1, lsl #2] ");		// fetch page table info entry for anAddr
   407 	asm("mcr p15, 0, r2, c3, c0, 0 ");		// lock page tables
   408 	asm("cmn r0, #0x10000 ");				// test if page table id=0xffff
   409 	asm("movcc r0, r0, lsr #16 ");			// if not, return page table id
   410 	asm("mvncs r0, #0 ");					// else return -1
   411 	__JUMP(,lr);
   412 	}
   413 
   414 __NAKED__ void ImpMmu::AssignPageTable(TInt /*anId*/, TLinAddr /*anAddr*/, TPde /*aPdePerm*/)
   415 //
   416 // Assign an allocated page table to map a given linear address with specified permissions.
   417 // This function assumes the page table initially contains no physical RAM page mappings.
   418 // This should be called with the kernel locked.
   419 //
   420 	{
   421 	// on entry r0=&MM::TheMmu, r1=anId, r2=anAddr, r3=aPdePerm
   422 	asm("stmfd sp!, {r4,lr} ");
   423 	asm("and r4, r1, #3 ");					// r4=bottom 2 bits of anId (offset of page table within page)
   424 	asm("orr r3, r3, r4, lsl #10 ");		// combine these bits with PDE permissions
   425 	asm("bic r0, r1, #3 ");					// r0=anId with bottom 2 bits cleared
   426 	asm("add r0, r0, #%a0" : : "i" ((TInt)KPageTableLinearBase));	// r0=address of PTE mapping page table anId
   427 	asm("mov r1, r1, lsl #16 ");			// put ptid into top 16 bits of r1, zero bottom 16 bits
   428 	asm("mov r2, r2, lsr #20 ");			// r2=anAddr>>20
   429 	asm("mov r2, r2, lsl #2 ");				// r2=pdeIndex*4
   430 	asm("add r2, r2, #%a0" : : "i" ((TInt)KPageDirectoryLinearAddress));	// r2 points to PDE for anAddr
   431 	asm("mrc p15, 0, lr, c3, c0, 0 ");		// lr=current DACR
   432 	asm("orr r4, lr, #0x30 ");
   433 	asm("mcr p15, 0, r4, c3, c0, 0 ");		// unlock page tables
   434 	CPWAIT(,r4);
   435 	asm("ldr r0, [r0] ");					// fetch page table PTE
   436 	asm("mov r0, r0, lsr #12 ");			// shift out permission bits, leave phys addr>>12
   437 	asm("orr r3, r3, r0, lsl #12 ");		// r3=PDE word (add PDE permissions and offset within page)
   438 	asm("str r3, [r2] ");					// store PDE
   439 	asm("add r2, r2, #%a0" : : "i" ((TInt)KPageTableInfoOffset));	// r2 points to PT info entry
   440 	asm("str r1, [r2] ");					// PTinfo top 16=page table ID, PT info bottom 16=pages present=0 (assumption)
   441 	asm("mcr p15, 0, lr, c3, c0, 0 ");		// lock page tables
   442 	DRAIN_WRITE_BUFFER(,r0,r0);
   443 	CPWAIT(,r0);
   444 	asm("ldmfd sp!, {r4,pc} ");
   445 	}
   446 
   447 __NAKED__ void ImpMmu::UnassignPageTable(TLinAddr /*anAddr*/)
   448 //
   449 // Unassign a now-empty page table currently mapping the specified linear address.
   450 // We assume that TLB and/or cache flushing has been done when any RAM pages were unmapped.
   451 // Call this with the kernel locked.
   452 //
   453 	{
   454 	asm("mov r1, r1, lsr #20 ");			// r1=anAddr>>20
   455 	asm("mov r0, #%a0" : : "i" ((TInt)KPageDirectoryLinearAddress));
   456 	asm("add r0, r0, r1, lsl #2 ");			// r0 points to page directory entry for anAddr
   457 	asm("ldr r1, __NotPresentPtInfo ");		// r1=PTInfo entry for not present PDE
   458 	asm("mrc p15, 0, r2, c3, c0, 0 ");		// r2=current DACR
   459 	asm("orr r3, r2, #0x30 ");
   460 	asm("mcr p15, 0, r3, c3, c0, 0 ");		// unlock page tables
   461 	CPWAIT(,r3);
   462 	asm("mov r3, #0 ");
   463 	asm("str r3, [r0] ");					// clear the PDE
   464 	asm("add r0, r0, #%a0" : : "i" ((TInt)KPageTableInfoOffset));	// step r0 on to PT info entry
   465 	asm("str r1, [r0] ");					// clear the PT info entry
   466 	asm("mcr p15, 0, r2, c3, c0, 0 ");		// lock page tables
   467 	DRAIN_WRITE_BUFFER(,r0,r0);
   468 	CPWAIT(,r0);
   469 	__JUMP(,lr);
   470 
   471 	asm("__NotPresentPtInfo: ");
   472 	asm(".word 0xffff0000 ");
   473 	}
   474 #endif
   475 
   476