os/kernelhwsrv/kernel/eka/memmodel/epoc/moving/arm/xsched.cia
changeset 0 bde4ae8d615e
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/os/kernelhwsrv/kernel/eka/memmodel/epoc/moving/arm/xsched.cia	Fri Jun 15 03:10:57 2012 +0200
     1.3 @@ -0,0 +1,665 @@
     1.4 +// Copyright (c) 1997-2009 Nokia Corporation and/or its subsidiary(-ies).
     1.5 +// All rights reserved.
     1.6 +// This component and the accompanying materials are made available
     1.7 +// under the terms of the License "Eclipse Public License v1.0"
     1.8 +// which accompanies this distribution, and is available
     1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
    1.10 +//
    1.11 +// Initial Contributors:
    1.12 +// Nokia Corporation - initial contribution.
    1.13 +//
    1.14 +// Contributors:
    1.15 +//
    1.16 +// Description:
    1.17 +// e32\memmodel\epoc\moving\arm\xsched.cia
    1.18 +// 
    1.19 +//
    1.20 +
    1.21 +#include <e32cia.h>
    1.22 +#include <arm_mem.h>
    1.23 +
    1.24 +#define iMState		iWaitLink.iSpare1
    1.25 +
    1.26 +//#define __DEBUG_BAD_ADDR
    1.27 +
    1.28 +#define iCurrentVMProcess				iExtras[0]
    1.29 +#define	iCurrentDataSectionProcess		iExtras[1]
    1.30 +#define	iCompleteDataSectionProcess		iExtras[2]
    1.31 +
    1.32 +#ifdef __REQUEST_COMPLETE_MACHINE_CODED__
    1.33 +
    1.34 +#if defined(_DEBUG)
    1.35 +extern "C" void __DebugMsgRequestComplete(TInt a0, TInt a1, TInt a2);
    1.36 +extern "C" void __DebugMsgReqCompleteWrite(TInt a0, TInt a1, TInt a2);
    1.37 +#endif
    1.38 +
    1.39 +__NAKED__ void DThread::RequestComplete(TRequestStatus*& /*aStatus*/, TInt /*aReason*/)
    1.40 +//
    1.41 +// Signal this threads request semaphore.
    1.42 +// Enter with system locked, return with system unlocked.
    1.43 +//
    1.44 +	{
    1.45 +	ASM_DEBUG2(DThreadRequestComplete,r0,lr);
    1.46 +
    1.47 +	asm("ldr r3, [r1] ");					// r3 points to TRequestStatus
    1.48 +	asm("mov r12, #0 ");
    1.49 +	asm("str r12, [r1] ");					// aStatus=NULL
    1.50 +
    1.51 +	asm(".global _asm_RequestComplete ");
    1.52 +	asm("_asm_RequestComplete: ");
    1.53 +
    1.54 +#ifdef BTRACE_REQUESTS
    1.55 +	asm("stmdb sp!,{r0-r3,lr}");
    1.56 +	asm("mov r1,r3");
    1.57 +	asm("mov r3,r2");											// arg3 = aReason
    1.58 +	asm("mov r2,r1");											// arg2 = aStatus
    1.59 +	asm("add r1,r0,#%a0" : : "i" _FOFF(DThread,iNThread));		// arg1 = &this->iNThread
    1.60 +	asm("ldr r0,_threadReqequestCompleteTraceHeader");			// arg0 = header
    1.61 +	asm("bl " CSM_ZN6BTrace4OutXEmmmm);
    1.62 +	asm("ldmia sp!,{r0-r3,lr}");
    1.63 +#endif
    1.64 +
    1.65 +	ASM_DEBUG3(RequestComplete,r0,r3,r2);
    1.66 +	asm("ldrb r12, [r0, #%a0]" : : "i" _FOFF(DThread,iMState));
    1.67 +	asm("stmfd sp!, {r4-r6} ");
    1.68 +	asm("add r6, r0, #%a0" : : "i" _FOFF(DThread,iNThread));
    1.69 +	asm("ldr r0, [r0, #%a0]" : : "i" _FOFF(DThread,iOwningProcess));
    1.70 +	asm("cmp r12, #%a0" : : "i" (DThread::EDead));	// test if iMState=EDead
    1.71 +	asm("ldr r12, [r0, #%a0]!" : : "i" _FOFF(DMemModelProcess, iNumChunks));	// step r0 on to iChunks[0]
    1.72 +	asm("beq req_complete_dead_thread ");	// if it is, finished
    1.73 +	asm("b req_complete_2");
    1.74 +
    1.75 +	// lookup r3 in address space of process r0
    1.76 +	asm("req_complete_1: ");
    1.77 +	asm("ldmcsib r0!, {r1,r4} ");			// r1=data section base, r4=chunk ptr
    1.78 +	asm("subcss r1, r3, r1 ");				// r1=offset
    1.79 +	asm("ldrcs r5, [r4, #%a0]" : : "i" _FOFF(DChunk,iMaxSize));	// if offset>=0, r5=chunk max size
    1.80 +	asm("cmpcs r1, r5 ");					// and compare offset to max size
    1.81 +	asm("addcs r0, r0, #4 ");				// if offset>=max size, move to next entry
    1.82 +	asm("req_complete_2: ");
    1.83 + 	asm("subcss r12, r12, #1 ");			// and decrement counter
    1.84 +	asm("bcs req_complete_1 ");				// loop if more chunks to check
    1.85 +	asm("cmp r12, #0 ");					// did we find chunk?
    1.86 +	asm("ldrge r3, [r4, #%a0]" : : "i" _FOFF(DMemModelChunk,iHomeRegionOffset));
    1.87 +	asm("ldrge r5, [r4, #%a0]" : : "i" _FOFF(DMemModelChunk,iHomeRegionSize));
    1.88 +	asm("cmpge r1, r3 ");					// if we did but offset<iHomeRegionOffset, no good
    1.89 +	asm("blt req_complete_invalid ");		// or if we didn't find it, no good
    1.90 +	asm("add r3, r3, r5 ");					// r3=home region offset+home region size
    1.91 +	asm("cmp r1, r3 ");						// if offset >= iHomeRegionOffset+iHomeRegionSize, no good
    1.92 +	asm("ldrlt r0, [r4, #%a0]" : : "i" _FOFF(DChunk,iBase));	// if OK, r0=chunk base
    1.93 +	asm("bge req_complete_invalid ");
    1.94 +											// Zero flag always clear here
    1.95 +
    1.96 +	ASM_DEBUG3(ReqCompleteWrite,r0,r1,r2);
    1.97 +
    1.98 +	asm(".global __magic_address_reqc ");
    1.99 +	asm("__magic_address_reqc: ");			// this instruction is magically immune from exceptions
   1.100 +	asm("str r2, [r0, r1] ");				// r0+r1=current address, store completion code
   1.101 +
   1.102 +	asm("movne r0, r6 ");					// if write OK, r0->iNThread
   1.103 +	asm("ldmnefd sp!, {r4-r6} ");			// restore registers
   1.104 +	asm("movne r1, #0 ");
   1.105 +	asm("bne  " CSM_ZN5NKern19ThreadRequestSignalEP7NThreadP10NFastMutex);
   1.106 +#ifndef __DEBUG_BAD_ADDR
   1.107 +	asm("req_complete_invalid: ");
   1.108 +	asm("ldmfd sp!, {r4-r6} ");				// restore registers
   1.109 +	asm("b  " CSM_ZN5NKern12UnlockSystemEv);
   1.110 +#endif
   1.111 +#ifdef __DEBUG_BAD_ADDR
   1.112 +	asm("req_complete_invalid: ");			//DEBUG
   1.113 +	asm("mov r9, #0xde000000 ");
   1.114 +	asm("str r9, [r9, #0xaf] ");			//HACK-CRASH SYSTEM
   1.115 +#endif
   1.116 +	asm("req_complete_dead_thread: ");
   1.117 +	asm("ldmfd sp!, {r4-r6} ");				// restore registers
   1.118 +	asm("b  " CSM_ZN5NKern12UnlockSystemEv);
   1.119 +
   1.120 +#ifdef BTRACE_REQUESTS
   1.121 +	asm("_threadReqequestCompleteTraceHeader:");
   1.122 +	asm(".word %a0" : : "i" (BTRACE_HEADER_C(16,BTrace::ERequests,BTrace::ERequestComplete)));
   1.123 +#endif
   1.124 +	}
   1.125 +#endif
   1.126 +
   1.127 +#ifdef __SCHEDULER_MACHINE_CODED__
   1.128 +
   1.129 +#if defined(_DEBUG)
   1.130 +extern "C" void __DebugMsgFixedAddress();
   1.131 +extern "C" void __DebugMsgMoving();
   1.132 +extern "C" void __DebugMsgProtectChunks(int aProc);
   1.133 +extern "C" void __DebugMsgUnprotectChunks(int aProc);
   1.134 +extern "C" void __DebugMsgMoveChunksToHome(int aProc);
   1.135 +extern "C" void __DebugMsgMoveChunksToData(int aProc);
   1.136 +extern "C" void __DebugMsgMoveChunkToHomeAddress(int aChunk);
   1.137 +extern "C" void __DebugMsgProtectChunk(int aChunk);
   1.138 +extern "C" void __DebugMsgMoveChunkToRunAddress(int aChunk, int aRunAddr);
   1.139 +extern "C" void __DebugMsgUnprotectChunk(int aChunk);
   1.140 +#endif
   1.141 +
   1.142 +GLDEF_C __NAKED__ void DoProcessSwitch()
   1.143 +	{
   1.144 +	//
   1.145 +	// Enter and return in mode_svc with system lock held, kernel unlocked, interrupts enabled.
   1.146 +	// On entry r0->scheduler, r2->current thread, r5->current address space, r9->new address space
   1.147 +	// Must preserve r0,r2, can modify other registers
   1.148 +	//
   1.149 +	asm("mrc p15, 0, r7, c3, c0, 0 ");		// r7=DACR
   1.150 +	asm("ldr r4, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentVMProcess));
   1.151 +	asm("mov r1, r9 ");
   1.152 +	asm("stmfd sp!, {r7,lr} ");
   1.153 +	asm("orr r7, r7, #0x30 ");				// unlock page tables
   1.154 +	asm("mcr p15, 0, r7, c3, c0, 0 ");		// set DACR
   1.155 +	CPWAIT(,ip);
   1.156 +
   1.157 +	asm("ldr lr, [r9, #%a0]" : : "i" _FOFF(DArmPlatProcess,iAttributes));
   1.158 +	asm("mov ip, #0 ");
   1.159 +	asm("cmp r4, #0 ");
   1.160 +	asm("ldrne ip, [r4, #%a0]" : : "i" _FOFF(DArmPlatProcess,iAttributes));
   1.161 +
   1.162 +	// register contents at this point are:
   1.163 +	// r0->TheScheduler, r1->new process, r2->new thread, r3=scratch, r4=current VM process
   1.164 +	// r5=scratch, r7-11 scratch, r12=current VM process attributes,
   1.165 +	// r14=new process attributes
   1.166 +
   1.167 +	asm("ands r6, lr, #%a0" : : "i" ((TInt)DMemModelProcess::EMoving));
   1.168 +	asm("beq resched_fixed ");				// skip if new process is fixed
   1.169 +
   1.170 +	// The current process has moving chunks
   1.171 +	ASM_DEBUG0(Moving)
   1.172 +
   1.173 +#ifdef __CPU_WRITE_BACK_CACHE
   1.174 +	// We need to flush the DCache before moving anything.
   1.175 +	// Condition for flush here is
   1.176 +	// NEW PROCESS MOVING && (NEW PROCESS!=COMPLETE DATA SECTION PROCESS)
   1.177 +	asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCompleteDataSectionProcess));	// r3=complete data section process
   1.178 +	asm("cmp r3, r1 ");						// check if TheCurrentProcess==TheCompleteDataSectionProcess
   1.179 +	asm("blne SyncMapChangeAttempt ");		// if not same, flush the DCache; on return ZF=0 means abort
   1.180 +											// stack alignment OK since called function is assembler
   1.181 +	asm("bne end_process_switch ");			// if contention, just exit now
   1.182 +#endif
   1.183 +
   1.184 +	asm("mov r6, #0 ");
   1.185 +	asm("str r6, [r0, #%a0]" : : "i" _FOFF(TScheduler,iAddressSpace));
   1.186 +
   1.187 +	// if (pD && pD!=pP) ...
   1.188 +	asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentDataSectionProcess));		// r3=current data section process
   1.189 +	asm("mov r8, #0 ");
   1.190 +	asm("str r8, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCompleteDataSectionProcess));
   1.191 +	asm("cmp r3, #0 ");						// if !TheCurrentDataSectionProcess
   1.192 +	asm("cmpne r3, r1 ");					// || TheCurrentDataSectionProcess==TheCurrentProcess
   1.193 +	asm("beq resched_moving1 ");			// skip next section
   1.194 +
   1.195 +	// Move TheCurrentDataSectionProcess to the home section and protect it
   1.196 +	ASM_DEBUG1(MoveChunksToHome,r3)
   1.197 +
   1.198 +	asm("ldr r5, [r3, #%a0]" : : "i" _FOFF(DMemModelProcess,iNumChunks));
   1.199 +	asm("cmp r5, #0 ");						// check if current data section process has no chunks
   1.200 +	asm("beq resched_moving1b ");			// skip if it does
   1.201 +	asm("stmfd sp!, {r1-r4,r12} ");
   1.202 +	asm("add r4, r3, #%a0" : : "i" (_FOFF(DMemModelProcess,iChunks)+4));
   1.203 +	asm("resched_moving1a: ");
   1.204 +	asm("ldr r7, [r4], #12 ");				// r7=address of chunk to move
   1.205 +	asm("bl MoveHomeOrProtectChunk ");		// move chunk - this must preserve r0
   1.206 +	asm("ldr ip, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iWaiting));
   1.207 +	asm("cmp ip, #0 ");						// check for contention
   1.208 +	asm("bne abort_resched_mh ");			// if there is, abort
   1.209 +#ifdef BTRACE_FAST_MUTEX
   1.210 +	asm("ldrb ip, [r0, #%a0]" : : "i" (_FOFF(TScheduler,iFastMutexFilter)));
   1.211 +	asm("cmp ip, #0");
   1.212 +	asm("ldrne r2, [sp, #4]"); // current thread
   1.213 +	asm("blne procswitch_trace_flash");
   1.214 +#endif
   1.215 +	asm("subs r5, r5, #1 ");
   1.216 +	asm("bne resched_moving1a ");
   1.217 +	asm("ldmfd sp!, {r1-r4,r12} ");
   1.218 +	asm("resched_moving1b: ");
   1.219 +	asm("cmp r3, r4 ");
   1.220 +	asm("moveq r4, #0 ");					// ... TheCurrentVMProcess=NULL
   1.221 +	asm("streq r4, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentVMProcess));
   1.222 +	asm("mov r3, #0 ");						// TheCurrentDataSectionProcess=NULL
   1.223 +	asm("str r3, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentDataSectionProcess));
   1.224 +
   1.225 +	// if (pV && pV!=pP)
   1.226 +	asm("resched_moving1: ");
   1.227 +	asm("cmp r4, #0 ");						// if !TheCurrentVMProcess
   1.228 +	asm("cmpne r4, r1 ");					// || TheCurrentVMProcess==TheCurrentProcess
   1.229 +	asm("beq resched_moving2 ");			// skip next section
   1.230 +
   1.231 +	// Protect TheCurrentVMProcess but don't move it
   1.232 +	// register contents at this point are:
   1.233 +	// r0->TheScheduler, r2->new thread, r1->TheCurrentProcess, r4=current VM process
   1.234 +	// r12=current VM process attributes, r3,r5,r7-r11->scratch
   1.235 +	
   1.236 +	ASM_DEBUG1(ProtectChunks,r4)
   1.237 +
   1.238 +	asm("tst r12, #%a0" : : "i" (DMemModelProcess::EVariableAccess));	// r12=TheCurrentVMProcess->iAttributes
   1.239 +	asm("beq resched_moving2 ");			// if fixed access process, nothing to do
   1.240 +	asm("ldr r5, [r4, #%a0]" : : "i" _FOFF(DMemModelProcess,iNumChunks));
   1.241 +	asm("cmp r5, #0 ");
   1.242 +	asm("beq resched_moving2b ");			// if number of chunks=0, nothing to do
   1.243 +	asm("add r11, r4, #%a0" : : "i" (_FOFF(DMemModelProcess,iChunks)+4));
   1.244 +	asm("resched_moving2a: ");
   1.245 +	asm("ldr r7, [r11], #12 ");				// r7=address of chunk to protect
   1.246 +	asm("bl ProtectChunk ");				// protect it
   1.247 +	asm("ldr r7, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iWaiting));
   1.248 +	asm("cmp r7, #0 ");						// check for contention
   1.249 +	asm("bne abort_resched_mp ");			// if there is, abort
   1.250 +#ifdef BTRACE_FAST_MUTEX
   1.251 +	asm("ldrb ip, [r0, #%a0]" : : "i" (_FOFF(TScheduler,iFastMutexFilter)));
   1.252 +	asm("cmp ip, #0");
   1.253 +	asm("blne procswitch_trace_flash");
   1.254 +#endif
   1.255 +	asm("subs r5, r5, #1 ");
   1.256 +	asm("bne resched_moving2a ");
   1.257 +	asm("resched_moving2b: ");
   1.258 +	asm("mov r4, #0 ");						// TheCurrentVMProcess=NULL
   1.259 +	asm("str r4, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentVMProcess));
   1.260 +
   1.261 +	asm("resched_moving2: ");
   1.262 +	asm("str r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentDataSectionProcess));
   1.263 +	asm("str r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentVMProcess));
   1.264 +
   1.265 +	// Unprotect the TheCurrentProcess and move it to the data section if necessary
   1.266 +	// register contents at this point are:
   1.267 +	// r0->TheScheduler, r2->new thread, r1->TheCurrentProcess
   1.268 +	// r12=current VM process attributes, r3,r5,r7-r11->scratch
   1.269 +	
   1.270 +	ASM_DEBUG1(MoveChunksToData,r1)
   1.271 +
   1.272 +	asm("ldr r5, [r1, #%a0]" : : "i" _FOFF(DMemModelProcess,iNumChunks));
   1.273 +	asm("cmp r5, #0 ");
   1.274 +	asm("beq resched_finish ");				// if number of chunks=0, nothing to do
   1.275 +	asm("stmfd sp!, {r1-r2,r12} ");			// don't need r3 or r4 any more
   1.276 +	asm("add r4, r1, #%a0" : : "i" _FOFF(DMemModelProcess,iChunks));
   1.277 +	asm("resched_moving3a: ");
   1.278 +	asm("ldmia r4!, {r7,r8,r9} ");			// r7=run address, r8=ptr to chunk, r9 top 16bits=isReadOnly
   1.279 +	asm("bl MoveRunOrUnprotectChunk ");		// move chunk to data section and/or unprotect it
   1.280 +	asm("ldr ip, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iWaiting));
   1.281 +	asm("cmp ip, #0 ");						// check for contention
   1.282 +	asm("bne abort_resched_mr ");			// if there is, abort
   1.283 +#ifdef BTRACE_FAST_MUTEX
   1.284 +	asm("ldrb ip, [r0, #%a0]" : : "i" (_FOFF(TScheduler,iFastMutexFilter)));
   1.285 +	asm("cmp ip, #0");
   1.286 +	asm("blne procswitch_trace_flash");
   1.287 +#endif
   1.288 +	asm("subs r5, r5, #1 ");
   1.289 +	asm("bne resched_moving3a ");
   1.290 +	asm("ldmfd sp!, {r1-r2,r12} ");
   1.291 +	asm("str r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCompleteDataSectionProcess));
   1.292 +	asm("b resched_finish ");
   1.293 +
   1.294 +	// The current process has no moving chunks
   1.295 +	asm("resched_fixed: ");
   1.296 +
   1.297 +	ASM_DEBUG0(FixedAddress)
   1.298 +
   1.299 +	asm("str r6, [r0, #%a0]" : : "i" _FOFF(TScheduler,iAddressSpace));
   1.300 +	asm("cmp r4, #0	");						// if TheCurrentVMProcess==NULL
   1.301 +	asm("beq resched_fixed1 ");				// skip this section
   1.302 +
   1.303 +	// Protect TheCurrentVMProcess
   1.304 +	ASM_DEBUG1(ProtectChunks,r4)
   1.305 +
   1.306 +	asm("ldr r5, [r4, #%a0]" : : "i" _FOFF(DMemModelProcess,iNumChunks));
   1.307 +	asm("add r11, r4, #%a0" : : "i" (_FOFF(DMemModelProcess,iChunks)+4));
   1.308 +	asm("cmp r5, #0 ");
   1.309 +	asm("beq resched_fixed1b ");			// if number of chunks=0, nothing to do
   1.310 +	asm("resched_fixed1a: ");
   1.311 +	asm("ldr r7, [r11], #12 ");				// r7=address of chunk to protect
   1.312 +	asm("bl ProtectChunk ");				// protect it
   1.313 +	asm("ldr r7, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iWaiting));
   1.314 +	asm("cmp r7, #0 ");						// check for contention
   1.315 +	asm("bne abort_resched_fp ");			// if there is, abort
   1.316 +#ifdef BTRACE_FAST_MUTEX
   1.317 +	asm("ldrb ip, [r0, #%a0]" : : "i" (_FOFF(TScheduler,iFastMutexFilter)));
   1.318 +	asm("cmp ip, #0");
   1.319 +	asm("blne procswitch_trace_flash");
   1.320 +#endif
   1.321 +	asm("subs r5, r5, #1 ");
   1.322 +	asm("bne resched_fixed1a ");
   1.323 +	asm("resched_fixed1b: ");
   1.324 +	asm("mov r4, #0 ");						// TheCurrentVMProcess=NULL
   1.325 +	asm("str r4, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentVMProcess));
   1.326 +
   1.327 +	asm("resched_fixed1: ");
   1.328 +
   1.329 +	// Unprotect TheCurrentProcess
   1.330 +	ASM_DEBUG1(UnprotectChunks,r1)
   1.331 +
   1.332 +	asm("ldr r5, [r1, #%a0]" : : "i" _FOFF(DMemModelProcess,iNumChunks));
   1.333 +	asm("str r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentVMProcess));
   1.334 +	asm("cmp r5, #0 ");
   1.335 +	asm("beq resched_finish ");				// if number of chunks=0, nothing to do
   1.336 +	asm("add r11, r1, #%a0" : : "i" _FOFF(DMemModelProcess,iChunks));
   1.337 +	asm("resched_fixed2a: ");
   1.338 +	asm("ldmia r11!, {r7,r8,r9} ");			// r7=run address, r8=ptr to chunk, r9 top 16bits=isReadOnly
   1.339 +	asm("bl UnprotectChunk ");				// unprotect chunk
   1.340 +	asm("ldr r7, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iWaiting));
   1.341 +	asm("cmp r7, #0 ");						// check for contention
   1.342 +	asm("bne abort_resched_fu ");			// if there is, abort
   1.343 +#ifdef BTRACE_FAST_MUTEX
   1.344 +	asm("ldrb ip, [r0, #%a0]" : : "i" (_FOFF(TScheduler,iFastMutexFilter)));
   1.345 +	asm("cmp ip, #0");
   1.346 +	asm("blne procswitch_trace_flash");
   1.347 +#endif
   1.348 +	asm("subs r5, r5, #1 ");
   1.349 +	asm("bne resched_fixed2a ");
   1.350 +
   1.351 +	asm("resched_finish: ");
   1.352 +	asm("str r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iAddressSpace));	// iAddressSpace=new process
   1.353 +
   1.354 +	asm("resched_flush: ");
   1.355 +#ifdef __CPU_SPLIT_TLB
   1.356 +	asm("tst r6, #%a0" : : "i" (Mmu::EFlushDPermChg));
   1.357 +	asm("mov r1, #0 ");
   1.358 +	FLUSH_DTLB(ne,r1);
   1.359 +	asm("tst r6, #%a0" : : "i" (Mmu::EFlushIPermChg));
   1.360 +	FLUSH_ITLB(ne,r1);
   1.361 +#else
   1.362 +	asm("tst r6, #%a0" : : "i" (Mmu::EFlushDPermChg|Mmu::EFlushIPermChg));
   1.363 +	asm("mov r1, #0 ");
   1.364 +	FLUSH_IDTLB(ne,r1);
   1.365 +#endif
   1.366 +#ifdef __CPU_WRITE_BACK_CACHE
   1.367 +//#ifdef __CPU_SPLIT_CACHE
   1.368 +	// D cache flush already done
   1.369 +//	asm("tst r6, #%a0" : : "i" (Mmu::EFlushIMove));	Should never need to flush ICache during context switch
   1.370 +//	FLUSH_ICACHE(ne,r1);
   1.371 +//#endif
   1.372 +#else
   1.373 +#ifdef __CPU_SPLIT_CACHE
   1.374 +	asm("tst r6, #%a0" : : "i" (Mmu::EFlushDMove));
   1.375 +	FLUSH_DCACHE(ne,r1);
   1.376 +//	asm("tst r6, #%a0" : : "i" (Mmu::EFlushIMove));	Should never need to flush ICache during context switch
   1.377 +//	FLUSH_ICACHE(ne,r1);
   1.378 +#else
   1.379 +	asm("tst r6, #%a0" : : "i" (Mmu::EFlushDMove|Mmu::EFlushIMove));
   1.380 +	FLUSH_IDCACHE(ne,r1);
   1.381 +#endif
   1.382 +#endif
   1.383 +	asm("cmp r6, #0 ");						// any page table changes?
   1.384 +	DRAIN_WRITE_BUFFER(ne,r1,r7);			// if so, make sure page table changes take effect
   1.385 +
   1.386 +	asm("end_process_switch: ");
   1.387 +	asm("ldmia sp!, {r7,lr} ");
   1.388 +	asm("mcr p15, 0, r7, c3, c0 ");			// restore DACR
   1.389 +	CPWAIT(,r3);
   1.390 +	__JUMP(,lr);
   1.391 +
   1.392 +	asm("abort_resched_mh: ");				// Reschedule aborted during MoveToHome
   1.393 +	asm("ldmfd sp!, {r1-r4,r12} ");
   1.394 +	asm("subs r5, r5, #1 ");
   1.395 +	asm("bne resched_flush ");				// if MoveToHome incomplete skip
   1.396 +	asm("cmp r3, r4 ");						// if TheCurrentDataSectionProcess==TheCurrentVMProcess ...
   1.397 +	asm("streq r5, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentVMProcess));	// ... TheCurrentVMProcess=NULL
   1.398 +	asm("str r5, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentDataSectionProcess));	// TheCurrentDataSectionProcess=NULL
   1.399 +	asm("b resched_flush ");				//
   1.400 +
   1.401 +	asm("abort_resched_mp: ");				// Reschedule aborted during protect before moving
   1.402 +	asm("subs r5, r5, #1 ");
   1.403 +	asm("streq r5, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentVMProcess));
   1.404 +	asm("b resched_flush ");				//
   1.405 +
   1.406 +	asm("abort_resched_mr: ");				// Reschedule aborted during MoveToRunAddress
   1.407 +	asm("ldmfd sp!, {r1-r2,r12} ");
   1.408 +	asm("subs r5, r5, #1 ");
   1.409 +	asm("streq r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCompleteDataSectionProcess));
   1.410 +	asm("b resched_flush ");				//
   1.411 +
   1.412 +	asm("abort_resched_fp: ");				// Reschedule aborted during protect before fixed
   1.413 +	asm("subs r5, r5, #1 ");
   1.414 +	asm("streq r5, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentVMProcess));
   1.415 +	asm("b resched_flush ");				//
   1.416 +
   1.417 +	asm("abort_resched_fu: ");				// Reschedule aborted during unprotect
   1.418 +	asm("subs r5, r5, #1 ");
   1.419 +	asm("streq r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iAddressSpace));	// iAddressSpace=new process
   1.420 +	asm("b resched_flush ");				//
   1.421 +
   1.422 +
   1.423 +	//
   1.424 +	// Subroutines
   1.425 +	//
   1.426 +	// MoveHomeOrProtectChunk
   1.427 +	// If a chunk is movable and not at its home address, move it to the home address;
   1.428 +	// if the chunk is variable-access and not protected, protect it
   1.429 +	// r7 points to chunk
   1.430 +	// modifies r1-r3 and r8-r12
   1.431 +	asm("MoveHomeOrProtectChunk: ");
   1.432 +	asm("ldr r10, [r7, #%a0]" : : "i" _FOFF(DMemModelChunk,iAttributes));
   1.433 +	asm("tst r10, #%a0" : : "i" (DMemModelChunk::EFixedAddress));
   1.434 +	asm("bne ProtectChunk1 ");
   1.435 +	asm("ldr r11, [r7, #%a0]" : : "i" _FOFF(DMemModelChunk,iHomeBase));	// r11=home base
   1.436 +	asm("ldr r12, [r7, #%a0]" : : "i" _FOFF(DMemModelChunk,iBase));	// r12=iBase
   1.437 +	asm("cmp r11, r12 ");
   1.438 +	asm("beq ProtectChunk1 ");			// already at home address
   1.439 +
   1.440 +	ASM_DEBUG1(MoveChunkToHomeAddress,r7)
   1.441 +
   1.442 +	asm("str r11, [r7, #%a0]" : : "i" _FOFF(DMemModelChunk,iBase));	// iBase=iHomeBase
   1.443 +	asm("add r8, r7, #%a0" : : "i" _FOFF(DMemModelChunk,iNumPdes));
   1.444 +	asm("ldmia r8, {r8-r10} ");	// r8=numpdes r9=base pdes, r10=home pdes
   1.445 +	asm("cmp r8, #0 ");
   1.446 +	asm("str r10, [r7, #%a0]" : : "i" _FOFF(DMemModelChunk,iPdes));
   1.447 +	asm("beq MoveHome0 ");
   1.448 +	asm("ldr r12, [r7, #%a0]" : : "i" _FOFF(DMemModelChunk,iPdePermissions[0]));
   1.449 +	asm("mov r1, #0 ");
   1.450 +	asm("str r1, [r7, #%a0]" : : "i" _FOFF(DMemModelChunk,iChunkState));	// iChunkState=ENotRunning
   1.451 +	asm("MoveHome1: ");
   1.452 +	asm("ldr r3, [r9] ");				// fetch old pde
   1.453 +	asm("str r1, [r9], #4 ");			// clear old pde
   1.454 +	asm("bics r3, r3, #0x3fc ");		// mask out permissions and check for not present PDE
   1.455 +	asm("orrne r3, r3, r12 ");			// if present, or in new permissions
   1.456 +	asm("moveq r3, #0 ");				// else zero entry
   1.457 +	asm("str r3, [r10], #4 ");			// put into new pde
   1.458 +	asm("subs r8, r8, #1 ");
   1.459 +	asm("bne MoveHome1 ");				// loop to do next PDE
   1.460 +	asm("orr r6, r6, #%a0" : : "i" (Mmu::EFlushDMove|Mmu::EFlushDPermChg));	// moving chunk can't contain code
   1.461 +	__JUMP(,lr);
   1.462 +	asm("MoveHome0: ");
   1.463 +	asm("str r8, [r7, #%a0]" : : "i" _FOFF(DMemModelChunk,iChunkState));	// iChunkState=ENotRunning
   1.464 +	__JUMP(,lr);
   1.465 +
   1.466 +	// ProtectChunk
   1.467 +	// Protect a chunk - r7 points to chunk
   1.468 +	// r8-r10,r12 modified
   1.469 +	asm("ProtectChunk: ");
   1.470 +	asm("ldr r10, [r7, #%a0]" : : "i" _FOFF(DMemModelChunk,iAttributes));
   1.471 +
   1.472 +	asm("ProtectChunk1: ");
   1.473 +	asm("tst r10, #%a0" : : "i" (DMemModelChunk::EFixedAccess));
   1.474 +	asm("ldreq r12, [r7, #%a0]" : : "i" _FOFF(DMemModelChunk,iChunkState));
   1.475 +	__JUMP(ne,lr);
   1.476 +	asm("cmp r12, #0 ");
   1.477 +	__JUMP(eq,lr);						// if already in non-running state, nothing to do
   1.478 +	asm("ldr r8, [r7, #%a0]" : : "i" _FOFF(DMemModelChunk,iNumPdes));	// r8=number of chunk pdes
   1.479 +	asm("ldr r9, [r7, #%a0]" : : "i" _FOFF(DMemModelChunk,iPdes));	// r9=pde address
   1.480 +
   1.481 +	ASM_DEBUG1(ProtectChunk,r7)
   1.482 +
   1.483 +	asm("cmp r8, #0 ");
   1.484 +	asm("beq ProtectChunk0 ");
   1.485 +	asm("tst r10, #%a0" : : "i" (DMemModelChunk::ECode));		// check if chunk contains code
   1.486 +	asm("orrne r6, r6, #%a0" : : "i" (Mmu::EFlushIPermChg));	// if it does, may need to flush ITLB
   1.487 +	asm("ldr r10, [r7, #%a0]" : : "i" _FOFF(DMemModelChunk,iPdePermissions[0]));	//r10=new permissions
   1.488 +	asm("ProtectChunk2: ");
   1.489 +	asm("ldr r12, [r9] ");				// fetch old pde
   1.490 +	asm("bics r12, r12, #0x3fc ");		// mask out permissions and check for not present PDE
   1.491 +	asm("orrne r12, r12, r10 ");		// if present, or in new permissions
   1.492 +	asm("moveq r12, #0 ");				// else zero pde
   1.493 +	asm("str r12, [r9], #4 ");			// replace pde
   1.494 +	asm("subs r8, r8, #1 ");
   1.495 +	asm("bne ProtectChunk2 ");			// loop for next PDE
   1.496 +	asm("orr r6, r6, #%a0" : : "i" (Mmu::EFlushDPermChg));
   1.497 +	asm("ProtectChunk0: ");
   1.498 +	asm("str r8, [r7, #%a0]" : : "i" _FOFF(DMemModelChunk,iChunkState));	// iChunkState=ENotRunning
   1.499 +	__JUMP(,lr);
   1.500 +
   1.501 +	// MoveRunOrUnprotectChunk
   1.502 +	// If a chunk is movable and not at its run address in this process, move it to the run address;
   1.503 +	// if the chunk is variable-access and not unprotected, unprotect it.
   1.504 +	// r7=run address, r8 points to chunk
   1.505 +	// ignore read-only flag since the ARM cannot support it
   1.506 +	// r1-r3, r7, r9-r12 modified, r8 unmodified
   1.507 +	asm("MoveRunOrUnprotectChunk: ");
   1.508 +	asm("mov r9, #2 ");					// r9=state of chunk
   1.509 +
   1.510 +	asm("MoveToRunAddress: ");
   1.511 +	asm("ldr r12, [r8, #%a0]" : : "i" _FOFF(DMemModelChunk,iAttributes));
   1.512 +	asm("tst r12, #%a0" : : "i" (DMemModelChunk::EFixedAddress));
   1.513 +	asm("bne UnprotectChunk1 ");
   1.514 +	asm("ldr r11, [r8, #%a0]" : : "i" _FOFF(DMemModelChunk,iBase));	// r11=old base
   1.515 +	asm("cmp r11, r7 ");				// check if already at run address
   1.516 +	asm("beq UnprotectChunk1 ");		// if it is, just unprotect it
   1.517 +
   1.518 +	ASM_DEBUG2(MoveChunkToRunAddress,r8,r7)
   1.519 +
   1.520 +	asm("str r7, [r8, #%a0]" : : "i" _FOFF(DMemModelChunk,iBase));	// iBase=run address
   1.521 +	asm("ldr r12, [r8, #%a0]" : : "i" _FOFF(DMemModelChunk,iNumPdes));
   1.522 +	asm("str r9, [r8, #%a0]" : : "i" _FOFF(DMemModelChunk,iChunkState));	// store new chunk state
   1.523 +	asm("cmp r12, #0 ");
   1.524 +	asm("beq MoveRun0 ");
   1.525 +	asm("ldr r10, [r8, #%a0]" : : "i" _FOFF(DMemModelChunk,iPdes));
   1.526 +	asm("mov r1, #0 ");
   1.527 +	asm("add r7, r10, r7, lsr #18 ");	// r7=iPdes+(new address/2^18)
   1.528 +	asm("sub r7, r7, r11, lsr #18 ");	// r7=iPdes+(new address-old address)/2^18
   1.529 +	asm("str r7, [r8, #%a0]" : : "i" _FOFF(DMemModelChunk,iPdes));
   1.530 +	asm("add r9, r8, r9, lsl #2 ");
   1.531 +	asm("ldr r9, [r9, #%a0]" : : "i" (_FOFF(DMemModelChunk,iPdePermissions[0])));	// r9=PDE permissions to use
   1.532 +	asm("MoveRun1: ");
   1.533 +	asm("ldr r3, [r10] ");				// fetch old pde
   1.534 +	asm("str r1, [r10], #4 ");			// clear old pde
   1.535 +	asm("bics r3, r3, #0x3fc ");		// mask out permissions and check for not present PDE
   1.536 +	asm("orrne r3, r3, r9 ");			// if present, or in new permissions
   1.537 +	asm("moveq r3, #0 ");				// else clear pde
   1.538 +	asm("str r3, [r7], #4 ");			// put into new pde
   1.539 +	asm("subs r12, r12, #1 ");
   1.540 +	asm("bne MoveRun1 ");
   1.541 +	asm("orr r6, r6, #%a0" : : "i" (Mmu::EFlushDMove|Mmu::EFlushDPermChg));	// moving chunk can't contain code
   1.542 +	asm("MoveRun0: ");
   1.543 +	__JUMP(,lr);
   1.544 +
   1.545 +	// UnprotectChunk
   1.546 +	// Apply running permissions to a chunk
   1.547 +	// r8 points to chunk
   1.548 +	// ignore read-only flag since the ARM cannot support it
   1.549 +	// r7,r9,r10,r12 modified, r8,r11 unmodified
   1.550 +	asm("UnprotectChunk: ");
   1.551 +	asm("mov r9, #2 ");					// r9=new state of chunk
   1.552 +
   1.553 +	asm("ApplyTopLevelPermissions: ");
   1.554 +	asm("ldr r12, [r8, #%a0]" : : "i" _FOFF(DMemModelChunk,iAttributes));
   1.555 +
   1.556 +	asm("UnprotectChunk1: ");
   1.557 +	asm("tst r12, #%a0" : : "i" (DMemModelChunk::EFixedAccess));
   1.558 +	asm("ldreq r10, [r8, #%a0]" : : "i" _FOFF(DMemModelChunk,iChunkState));	// r10=old chunk state
   1.559 +	__JUMP(ne,lr);
   1.560 +	asm("cmp r10, r9 ");
   1.561 +	__JUMP(eq,lr);						// if state already correct, nothing to do
   1.562 +	asm("ldr r10, [r8, #%a0]" : : "i" _FOFF(DMemModelChunk,iNumPdes));
   1.563 +
   1.564 +	ASM_DEBUG1(UnprotectChunk,r8)
   1.565 +
   1.566 +	asm("str r9, [r8, #%a0]" : : "i" _FOFF(DMemModelChunk,iChunkState));	// iChunkState=ERunningRW
   1.567 +	asm("cmp r10, #0 ");
   1.568 +	asm("beq UnprotectChunk0 ");
   1.569 +	asm("tst r12, #%a0" : : "i" (DMemModelChunk::ECode));		// check if chunk contains code
   1.570 +	asm("orrne r6, r6, #%a0" : : "i" (Mmu::EFlushIPermChg));	// if it does, may need to flush ITLB
   1.571 +	asm("ldr r12, [r8, #%a0]" : : "i" _FOFF(DMemModelChunk,iPdes));
   1.572 +	asm("add r9, r8, r9, lsl #2 ");
   1.573 +	asm("ldr r9, [r9, #%a0]" : : "i" (_FOFF(DMemModelChunk,iPdePermissions[0])));	//r9=new permissions
   1.574 +	asm("UnprotectChunk2: ");
   1.575 +	asm("ldr r7, [r12] ");				// fetch old pde
   1.576 +	asm("bics r7, r7, #0x3fc ");		// mask out permissions and check for not present PDE
   1.577 +	asm("orrne r7, r7, r9 ");			// if present, or in new permissions
   1.578 +	asm("moveq r7, #0 ");				// else clear pde
   1.579 +	asm("str r7, [r12], #4 ");			// replace pde
   1.580 +	asm("subs r10, r10, #1 ");
   1.581 +	asm("bne UnprotectChunk2 ");
   1.582 +	asm("orr r6, r6, #%a0" : : "i" (Mmu::EFlushDPermChg));
   1.583 +	asm("UnprotectChunk0: ");
   1.584 +	__JUMP(,lr);
   1.585 +
   1.586 +#ifdef BTRACE_FAST_MUTEX
   1.587 +	// expects scheduler in r0, current thread in r2, preserves all but r10
   1.588 +	asm("procswitch_trace_flash:");
   1.589 +	asm("mov r10, sp");
   1.590 +	asm("bic sp, sp, #7"); 					// align stack to 8 bytes
   1.591 +	asm("stmdb sp!,{r3,ip}");
   1.592 +	asm("stmdb sp!,{r0,r1,r2,lr}");		// 4th item on stack is PC value for trace
   1.593 +	asm("mov r1, r0");
   1.594 +	asm("ldr r0, procswitch_trace_header"); // header parameter in r0
   1.595 +	asm("add r3, r1, #%a0" : : "i" _FOFF(TScheduler,iLock)); // fast mutex parameter in r3
   1.596 +	asm("mov lr, pc");
   1.597 +	asm("ldr pc, [r1, #%a0]" : : "i" _FOFF(TScheduler,iBTraceHandler));	
   1.598 +	asm("ldmia sp!,{r0,r1,r2,lr}");
   1.599 +	asm("ldmia sp!,{r3,ip}");
   1.600 +	asm("mov sp, r10");					// put stack back
   1.601 +	__JUMP(,lr);
   1.602 +
   1.603 +	asm("procswitch_trace_header:");
   1.604 +	asm(".word %a0" : : "i" ((TInt)(16<<BTrace::ESizeIndex) + ((BTrace::EContextIdPresent|BTrace::EPcPresent) << BTrace::EFlagsIndex*8) + (BTrace::EFastMutex<< BTrace::ECategoryIndex*8) + (BTrace::EFastMutexFlash << BTrace::ESubCategoryIndex*8)) );
   1.605 +#endif
   1.606 +
   1.607 +	// Global data references
   1.608 +	asm("__TheScheduler: ");
   1.609 +	asm(".word TheScheduler ");
   1.610 +	asm("__TheMmu: ");
   1.611 +	asm(".word %a0" : : "i" ((TInt)&::TheMmu));
   1.612 +	};
   1.613 +
   1.614 +__NAKED__ TUint32 DMemModelChunk::ApplyTopLevelPermissions(TChunkState /*aChunkState*/)
   1.615 +	{
   1.616 +	asm("stmfd sp!, {r6-r11,lr} ");
   1.617 +	asm("mov r8, r0 ");		// r8 = chunk ptr
   1.618 +	asm("mov r9, r1 ");		// r9 = chunk state
   1.619 +	asm("mrc p15, 0, r3, c3, c0, 0 ");	// r3=DACR
   1.620 +	asm("mov r6, #0 ");
   1.621 +	asm("orr r1, r3, #0x30 ");
   1.622 +	asm("mcr p15, 0, r1, c3, c0, 0 ");	// unlock page tables
   1.623 +	CPWAIT(,lr);
   1.624 +	asm("bl ApplyTopLevelPermissions ");
   1.625 +	asm("mcr p15, 0, r3, c3, c0, 0 ");	// lock page tables
   1.626 +	asm("mov r0, r6 ");					// return flush flags
   1.627 +	DRAIN_WRITE_BUFFER(,r1,r1);
   1.628 +	CPWAIT(,r1);
   1.629 +	asm("ldmfd sp!, {r6-r11,pc} ");
   1.630 +	}
   1.631 +
   1.632 +__NAKED__ TUint32 DMemModelChunk::MoveToRunAddress(TLinAddr /*aLinAddr*/, TChunkState /*aChunkState*/)
   1.633 +	{
   1.634 +	asm("stmfd sp!, {r5-r11,lr} ");
   1.635 +	asm("mov r8, r0 ");		// r8 = chunk ptr
   1.636 +	asm("mov r7, r1 ");		// r7 = run address
   1.637 +	asm("mrc p15, 0, r5, c3, c0, 0 ");	// r5=DACR
   1.638 +	asm("mov r9, r2 ");		// r9 = chunk state
   1.639 +	asm("mov r6, #0 ");
   1.640 +	asm("orr r1, r5, #0x30 ");
   1.641 +	asm("mcr p15, 0, r1, c3, c0, 0 ");	// unlock page tables
   1.642 +	CPWAIT(,lr);
   1.643 +	asm("bl MoveToRunAddress ");
   1.644 +	asm("mcr p15, 0, r5, c3, c0, 0 ");	// lock page tables
   1.645 +	asm("mov r0, r6 ");					// return flush flags
   1.646 +	DRAIN_WRITE_BUFFER(,r1,r1);
   1.647 +	CPWAIT(,r1);
   1.648 +	asm("ldmfd sp!, {r5-r11,pc} ");
   1.649 +	}
   1.650 +
   1.651 +__NAKED__ TUint32 DMemModelChunk::MoveToHomeSection()
   1.652 +	{
   1.653 +	asm("stmfd sp!, {r5-r11,lr} ");
   1.654 +	asm("mov r7, r0 ");		// r7 = chunk ptr
   1.655 +	asm("mrc p15, 0, r0, c3, c0, 0 ");	// r0=DACR
   1.656 +	asm("mov r6, #0 ");
   1.657 +	asm("orr r1, r0, #0x30 ");
   1.658 +	asm("mcr p15, 0, r1, c3, c0, 0 ");	// unlock page tables
   1.659 +	CPWAIT(,lr);
   1.660 +	asm("bl MoveHomeOrProtectChunk ");
   1.661 +	asm("mcr p15, 0, r0, c3, c0, 0 ");	// lock page tables
   1.662 +	asm("mov r0, r6 ");					// return flush flags
   1.663 +	DRAIN_WRITE_BUFFER(,r1,r1);
   1.664 +	CPWAIT(,r1);
   1.665 +	asm("ldmfd sp!, {r5-r11,pc} ");
   1.666 +	}
   1.667 +#endif
   1.668 +