First public contribution.
1 // Copyright (c) 1997-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32\memmodel\epoc\moving\arm\xsched.cia
21 #define iMState iWaitLink.iSpare1
23 //#define __DEBUG_BAD_ADDR
25 #define iCurrentVMProcess iExtras[0]
26 #define iCurrentDataSectionProcess iExtras[1]
27 #define iCompleteDataSectionProcess iExtras[2]
29 #ifdef __REQUEST_COMPLETE_MACHINE_CODED__
32 extern "C" void __DebugMsgRequestComplete(TInt a0, TInt a1, TInt a2);
33 extern "C" void __DebugMsgReqCompleteWrite(TInt a0, TInt a1, TInt a2);
36 __NAKED__ void DThread::RequestComplete(TRequestStatus*& /*aStatus*/, TInt /*aReason*/)
38 // Signal this threads request semaphore.
39 // Enter with system locked, return with system unlocked.
42 ASM_DEBUG2(DThreadRequestComplete,r0,lr);
44 asm("ldr r3, [r1] "); // r3 points to TRequestStatus
46 asm("str r12, [r1] "); // aStatus=NULL
48 asm(".global _asm_RequestComplete ");
49 asm("_asm_RequestComplete: ");
51 #ifdef BTRACE_REQUESTS
52 asm("stmdb sp!,{r0-r3,lr}");
54 asm("mov r3,r2"); // arg3 = aReason
55 asm("mov r2,r1"); // arg2 = aStatus
56 asm("add r1,r0,#%a0" : : "i" _FOFF(DThread,iNThread)); // arg1 = &this->iNThread
57 asm("ldr r0,_threadReqequestCompleteTraceHeader"); // arg0 = header
58 asm("bl " CSM_ZN6BTrace4OutXEmmmm);
59 asm("ldmia sp!,{r0-r3,lr}");
62 ASM_DEBUG3(RequestComplete,r0,r3,r2);
63 asm("ldrb r12, [r0, #%a0]" : : "i" _FOFF(DThread,iMState));
64 asm("stmfd sp!, {r4-r6} ");
65 asm("add r6, r0, #%a0" : : "i" _FOFF(DThread,iNThread));
66 asm("ldr r0, [r0, #%a0]" : : "i" _FOFF(DThread,iOwningProcess));
67 asm("cmp r12, #%a0" : : "i" (DThread::EDead)); // test if iMState=EDead
68 asm("ldr r12, [r0, #%a0]!" : : "i" _FOFF(DMemModelProcess, iNumChunks)); // step r0 on to iChunks[0]
69 asm("beq req_complete_dead_thread "); // if it is, finished
70 asm("b req_complete_2");
72 // lookup r3 in address space of process r0
73 asm("req_complete_1: ");
74 asm("ldmcsib r0!, {r1,r4} "); // r1=data section base, r4=chunk ptr
75 asm("subcss r1, r3, r1 "); // r1=offset
76 asm("ldrcs r5, [r4, #%a0]" : : "i" _FOFF(DChunk,iMaxSize)); // if offset>=0, r5=chunk max size
77 asm("cmpcs r1, r5 "); // and compare offset to max size
78 asm("addcs r0, r0, #4 "); // if offset>=max size, move to next entry
79 asm("req_complete_2: ");
80 asm("subcss r12, r12, #1 "); // and decrement counter
81 asm("bcs req_complete_1 "); // loop if more chunks to check
82 asm("cmp r12, #0 "); // did we find chunk?
83 asm("ldrge r3, [r4, #%a0]" : : "i" _FOFF(DMemModelChunk,iHomeRegionOffset));
84 asm("ldrge r5, [r4, #%a0]" : : "i" _FOFF(DMemModelChunk,iHomeRegionSize));
85 asm("cmpge r1, r3 "); // if we did but offset<iHomeRegionOffset, no good
86 asm("blt req_complete_invalid "); // or if we didn't find it, no good
87 asm("add r3, r3, r5 "); // r3=home region offset+home region size
88 asm("cmp r1, r3 "); // if offset >= iHomeRegionOffset+iHomeRegionSize, no good
89 asm("ldrlt r0, [r4, #%a0]" : : "i" _FOFF(DChunk,iBase)); // if OK, r0=chunk base
90 asm("bge req_complete_invalid ");
91 // Zero flag always clear here
93 ASM_DEBUG3(ReqCompleteWrite,r0,r1,r2);
95 asm(".global __magic_address_reqc ");
96 asm("__magic_address_reqc: "); // this instruction is magically immune from exceptions
97 asm("str r2, [r0, r1] "); // r0+r1=current address, store completion code
99 asm("movne r0, r6 "); // if write OK, r0->iNThread
100 asm("ldmnefd sp!, {r4-r6} "); // restore registers
101 asm("movne r1, #0 ");
102 asm("bne " CSM_ZN5NKern19ThreadRequestSignalEP7NThreadP10NFastMutex);
103 #ifndef __DEBUG_BAD_ADDR
104 asm("req_complete_invalid: ");
105 asm("ldmfd sp!, {r4-r6} "); // restore registers
106 asm("b " CSM_ZN5NKern12UnlockSystemEv);
108 #ifdef __DEBUG_BAD_ADDR
109 asm("req_complete_invalid: "); //DEBUG
110 asm("mov r9, #0xde000000 ");
111 asm("str r9, [r9, #0xaf] "); //HACK-CRASH SYSTEM
113 asm("req_complete_dead_thread: ");
114 asm("ldmfd sp!, {r4-r6} "); // restore registers
115 asm("b " CSM_ZN5NKern12UnlockSystemEv);
117 #ifdef BTRACE_REQUESTS
118 asm("_threadReqequestCompleteTraceHeader:");
119 asm(".word %a0" : : "i" (BTRACE_HEADER_C(16,BTrace::ERequests,BTrace::ERequestComplete)));
124 #ifdef __SCHEDULER_MACHINE_CODED__
127 extern "C" void __DebugMsgFixedAddress();
128 extern "C" void __DebugMsgMoving();
129 extern "C" void __DebugMsgProtectChunks(int aProc);
130 extern "C" void __DebugMsgUnprotectChunks(int aProc);
131 extern "C" void __DebugMsgMoveChunksToHome(int aProc);
132 extern "C" void __DebugMsgMoveChunksToData(int aProc);
133 extern "C" void __DebugMsgMoveChunkToHomeAddress(int aChunk);
134 extern "C" void __DebugMsgProtectChunk(int aChunk);
135 extern "C" void __DebugMsgMoveChunkToRunAddress(int aChunk, int aRunAddr);
136 extern "C" void __DebugMsgUnprotectChunk(int aChunk);
139 GLDEF_C __NAKED__ void DoProcessSwitch()
142 // Enter and return in mode_svc with system lock held, kernel unlocked, interrupts enabled.
143 // On entry r0->scheduler, r2->current thread, r5->current address space, r9->new address space
144 // Must preserve r0,r2, can modify other registers
146 asm("mrc p15, 0, r7, c3, c0, 0 "); // r7=DACR
147 asm("ldr r4, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentVMProcess));
149 asm("stmfd sp!, {r7,lr} ");
150 asm("orr r7, r7, #0x30 "); // unlock page tables
151 asm("mcr p15, 0, r7, c3, c0, 0 "); // set DACR
154 asm("ldr lr, [r9, #%a0]" : : "i" _FOFF(DArmPlatProcess,iAttributes));
157 asm("ldrne ip, [r4, #%a0]" : : "i" _FOFF(DArmPlatProcess,iAttributes));
159 // register contents at this point are:
160 // r0->TheScheduler, r1->new process, r2->new thread, r3=scratch, r4=current VM process
161 // r5=scratch, r7-11 scratch, r12=current VM process attributes,
162 // r14=new process attributes
164 asm("ands r6, lr, #%a0" : : "i" ((TInt)DMemModelProcess::EMoving));
165 asm("beq resched_fixed "); // skip if new process is fixed
167 // The current process has moving chunks
170 #ifdef __CPU_WRITE_BACK_CACHE
171 // We need to flush the DCache before moving anything.
172 // Condition for flush here is
173 // NEW PROCESS MOVING && (NEW PROCESS!=COMPLETE DATA SECTION PROCESS)
174 asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCompleteDataSectionProcess)); // r3=complete data section process
175 asm("cmp r3, r1 "); // check if TheCurrentProcess==TheCompleteDataSectionProcess
176 asm("blne SyncMapChangeAttempt "); // if not same, flush the DCache; on return ZF=0 means abort
177 // stack alignment OK since called function is assembler
178 asm("bne end_process_switch "); // if contention, just exit now
182 asm("str r6, [r0, #%a0]" : : "i" _FOFF(TScheduler,iAddressSpace));
184 // if (pD && pD!=pP) ...
185 asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentDataSectionProcess)); // r3=current data section process
187 asm("str r8, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCompleteDataSectionProcess));
188 asm("cmp r3, #0 "); // if !TheCurrentDataSectionProcess
189 asm("cmpne r3, r1 "); // || TheCurrentDataSectionProcess==TheCurrentProcess
190 asm("beq resched_moving1 "); // skip next section
192 // Move TheCurrentDataSectionProcess to the home section and protect it
193 ASM_DEBUG1(MoveChunksToHome,r3)
195 asm("ldr r5, [r3, #%a0]" : : "i" _FOFF(DMemModelProcess,iNumChunks));
196 asm("cmp r5, #0 "); // check if current data section process has no chunks
197 asm("beq resched_moving1b "); // skip if it does
198 asm("stmfd sp!, {r1-r4,r12} ");
199 asm("add r4, r3, #%a0" : : "i" (_FOFF(DMemModelProcess,iChunks)+4));
200 asm("resched_moving1a: ");
201 asm("ldr r7, [r4], #12 "); // r7=address of chunk to move
202 asm("bl MoveHomeOrProtectChunk "); // move chunk - this must preserve r0
203 asm("ldr ip, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iWaiting));
204 asm("cmp ip, #0 "); // check for contention
205 asm("bne abort_resched_mh "); // if there is, abort
206 #ifdef BTRACE_FAST_MUTEX
207 asm("ldrb ip, [r0, #%a0]" : : "i" (_FOFF(TScheduler,iFastMutexFilter)));
209 asm("ldrne r2, [sp, #4]"); // current thread
210 asm("blne procswitch_trace_flash");
212 asm("subs r5, r5, #1 ");
213 asm("bne resched_moving1a ");
214 asm("ldmfd sp!, {r1-r4,r12} ");
215 asm("resched_moving1b: ");
217 asm("moveq r4, #0 "); // ... TheCurrentVMProcess=NULL
218 asm("streq r4, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentVMProcess));
219 asm("mov r3, #0 "); // TheCurrentDataSectionProcess=NULL
220 asm("str r3, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentDataSectionProcess));
223 asm("resched_moving1: ");
224 asm("cmp r4, #0 "); // if !TheCurrentVMProcess
225 asm("cmpne r4, r1 "); // || TheCurrentVMProcess==TheCurrentProcess
226 asm("beq resched_moving2 "); // skip next section
228 // Protect TheCurrentVMProcess but don't move it
229 // register contents at this point are:
230 // r0->TheScheduler, r2->new thread, r1->TheCurrentProcess, r4=current VM process
231 // r12=current VM process attributes, r3,r5,r7-r11->scratch
233 ASM_DEBUG1(ProtectChunks,r4)
235 asm("tst r12, #%a0" : : "i" (DMemModelProcess::EVariableAccess)); // r12=TheCurrentVMProcess->iAttributes
236 asm("beq resched_moving2 "); // if fixed access process, nothing to do
237 asm("ldr r5, [r4, #%a0]" : : "i" _FOFF(DMemModelProcess,iNumChunks));
239 asm("beq resched_moving2b "); // if number of chunks=0, nothing to do
240 asm("add r11, r4, #%a0" : : "i" (_FOFF(DMemModelProcess,iChunks)+4));
241 asm("resched_moving2a: ");
242 asm("ldr r7, [r11], #12 "); // r7=address of chunk to protect
243 asm("bl ProtectChunk "); // protect it
244 asm("ldr r7, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iWaiting));
245 asm("cmp r7, #0 "); // check for contention
246 asm("bne abort_resched_mp "); // if there is, abort
247 #ifdef BTRACE_FAST_MUTEX
248 asm("ldrb ip, [r0, #%a0]" : : "i" (_FOFF(TScheduler,iFastMutexFilter)));
250 asm("blne procswitch_trace_flash");
252 asm("subs r5, r5, #1 ");
253 asm("bne resched_moving2a ");
254 asm("resched_moving2b: ");
255 asm("mov r4, #0 "); // TheCurrentVMProcess=NULL
256 asm("str r4, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentVMProcess));
258 asm("resched_moving2: ");
259 asm("str r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentDataSectionProcess));
260 asm("str r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentVMProcess));
262 // Unprotect the TheCurrentProcess and move it to the data section if necessary
263 // register contents at this point are:
264 // r0->TheScheduler, r2->new thread, r1->TheCurrentProcess
265 // r12=current VM process attributes, r3,r5,r7-r11->scratch
267 ASM_DEBUG1(MoveChunksToData,r1)
269 asm("ldr r5, [r1, #%a0]" : : "i" _FOFF(DMemModelProcess,iNumChunks));
271 asm("beq resched_finish "); // if number of chunks=0, nothing to do
272 asm("stmfd sp!, {r1-r2,r12} "); // don't need r3 or r4 any more
273 asm("add r4, r1, #%a0" : : "i" _FOFF(DMemModelProcess,iChunks));
274 asm("resched_moving3a: ");
275 asm("ldmia r4!, {r7,r8,r9} "); // r7=run address, r8=ptr to chunk, r9 top 16bits=isReadOnly
276 asm("bl MoveRunOrUnprotectChunk "); // move chunk to data section and/or unprotect it
277 asm("ldr ip, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iWaiting));
278 asm("cmp ip, #0 "); // check for contention
279 asm("bne abort_resched_mr "); // if there is, abort
280 #ifdef BTRACE_FAST_MUTEX
281 asm("ldrb ip, [r0, #%a0]" : : "i" (_FOFF(TScheduler,iFastMutexFilter)));
283 asm("blne procswitch_trace_flash");
285 asm("subs r5, r5, #1 ");
286 asm("bne resched_moving3a ");
287 asm("ldmfd sp!, {r1-r2,r12} ");
288 asm("str r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCompleteDataSectionProcess));
289 asm("b resched_finish ");
291 // The current process has no moving chunks
292 asm("resched_fixed: ");
294 ASM_DEBUG0(FixedAddress)
296 asm("str r6, [r0, #%a0]" : : "i" _FOFF(TScheduler,iAddressSpace));
297 asm("cmp r4, #0 "); // if TheCurrentVMProcess==NULL
298 asm("beq resched_fixed1 "); // skip this section
300 // Protect TheCurrentVMProcess
301 ASM_DEBUG1(ProtectChunks,r4)
303 asm("ldr r5, [r4, #%a0]" : : "i" _FOFF(DMemModelProcess,iNumChunks));
304 asm("add r11, r4, #%a0" : : "i" (_FOFF(DMemModelProcess,iChunks)+4));
306 asm("beq resched_fixed1b "); // if number of chunks=0, nothing to do
307 asm("resched_fixed1a: ");
308 asm("ldr r7, [r11], #12 "); // r7=address of chunk to protect
309 asm("bl ProtectChunk "); // protect it
310 asm("ldr r7, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iWaiting));
311 asm("cmp r7, #0 "); // check for contention
312 asm("bne abort_resched_fp "); // if there is, abort
313 #ifdef BTRACE_FAST_MUTEX
314 asm("ldrb ip, [r0, #%a0]" : : "i" (_FOFF(TScheduler,iFastMutexFilter)));
316 asm("blne procswitch_trace_flash");
318 asm("subs r5, r5, #1 ");
319 asm("bne resched_fixed1a ");
320 asm("resched_fixed1b: ");
321 asm("mov r4, #0 "); // TheCurrentVMProcess=NULL
322 asm("str r4, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentVMProcess));
324 asm("resched_fixed1: ");
326 // Unprotect TheCurrentProcess
327 ASM_DEBUG1(UnprotectChunks,r1)
329 asm("ldr r5, [r1, #%a0]" : : "i" _FOFF(DMemModelProcess,iNumChunks));
330 asm("str r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentVMProcess));
332 asm("beq resched_finish "); // if number of chunks=0, nothing to do
333 asm("add r11, r1, #%a0" : : "i" _FOFF(DMemModelProcess,iChunks));
334 asm("resched_fixed2a: ");
335 asm("ldmia r11!, {r7,r8,r9} "); // r7=run address, r8=ptr to chunk, r9 top 16bits=isReadOnly
336 asm("bl UnprotectChunk "); // unprotect chunk
337 asm("ldr r7, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iWaiting));
338 asm("cmp r7, #0 "); // check for contention
339 asm("bne abort_resched_fu "); // if there is, abort
340 #ifdef BTRACE_FAST_MUTEX
341 asm("ldrb ip, [r0, #%a0]" : : "i" (_FOFF(TScheduler,iFastMutexFilter)));
343 asm("blne procswitch_trace_flash");
345 asm("subs r5, r5, #1 ");
346 asm("bne resched_fixed2a ");
348 asm("resched_finish: ");
349 asm("str r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iAddressSpace)); // iAddressSpace=new process
351 asm("resched_flush: ");
352 #ifdef __CPU_SPLIT_TLB
353 asm("tst r6, #%a0" : : "i" (Mmu::EFlushDPermChg));
356 asm("tst r6, #%a0" : : "i" (Mmu::EFlushIPermChg));
359 asm("tst r6, #%a0" : : "i" (Mmu::EFlushDPermChg|Mmu::EFlushIPermChg));
363 #ifdef __CPU_WRITE_BACK_CACHE
364 //#ifdef __CPU_SPLIT_CACHE
365 // D cache flush already done
366 // asm("tst r6, #%a0" : : "i" (Mmu::EFlushIMove)); Should never need to flush ICache during context switch
367 // FLUSH_ICACHE(ne,r1);
370 #ifdef __CPU_SPLIT_CACHE
371 asm("tst r6, #%a0" : : "i" (Mmu::EFlushDMove));
373 // asm("tst r6, #%a0" : : "i" (Mmu::EFlushIMove)); Should never need to flush ICache during context switch
374 // FLUSH_ICACHE(ne,r1);
376 asm("tst r6, #%a0" : : "i" (Mmu::EFlushDMove|Mmu::EFlushIMove));
377 FLUSH_IDCACHE(ne,r1);
380 asm("cmp r6, #0 "); // any page table changes?
381 DRAIN_WRITE_BUFFER(ne,r1,r7); // if so, make sure page table changes take effect
383 asm("end_process_switch: ");
384 asm("ldmia sp!, {r7,lr} ");
385 asm("mcr p15, 0, r7, c3, c0 "); // restore DACR
389 asm("abort_resched_mh: "); // Reschedule aborted during MoveToHome
390 asm("ldmfd sp!, {r1-r4,r12} ");
391 asm("subs r5, r5, #1 ");
392 asm("bne resched_flush "); // if MoveToHome incomplete skip
393 asm("cmp r3, r4 "); // if TheCurrentDataSectionProcess==TheCurrentVMProcess ...
394 asm("streq r5, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentVMProcess)); // ... TheCurrentVMProcess=NULL
395 asm("str r5, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentDataSectionProcess)); // TheCurrentDataSectionProcess=NULL
396 asm("b resched_flush "); //
398 asm("abort_resched_mp: "); // Reschedule aborted during protect before moving
399 asm("subs r5, r5, #1 ");
400 asm("streq r5, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentVMProcess));
401 asm("b resched_flush "); //
403 asm("abort_resched_mr: "); // Reschedule aborted during MoveToRunAddress
404 asm("ldmfd sp!, {r1-r2,r12} ");
405 asm("subs r5, r5, #1 ");
406 asm("streq r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCompleteDataSectionProcess));
407 asm("b resched_flush "); //
409 asm("abort_resched_fp: "); // Reschedule aborted during protect before fixed
410 asm("subs r5, r5, #1 ");
411 asm("streq r5, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentVMProcess));
412 asm("b resched_flush "); //
414 asm("abort_resched_fu: "); // Reschedule aborted during unprotect
415 asm("subs r5, r5, #1 ");
416 asm("streq r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iAddressSpace)); // iAddressSpace=new process
417 asm("b resched_flush "); //
423 // MoveHomeOrProtectChunk
424 // If a chunk is movable and not at its home address, move it to the home address;
425 // if the chunk is variable-access and not protected, protect it
426 // r7 points to chunk
427 // modifies r1-r3 and r8-r12
428 asm("MoveHomeOrProtectChunk: ");
429 asm("ldr r10, [r7, #%a0]" : : "i" _FOFF(DMemModelChunk,iAttributes));
430 asm("tst r10, #%a0" : : "i" (DMemModelChunk::EFixedAddress));
431 asm("bne ProtectChunk1 ");
432 asm("ldr r11, [r7, #%a0]" : : "i" _FOFF(DMemModelChunk,iHomeBase)); // r11=home base
433 asm("ldr r12, [r7, #%a0]" : : "i" _FOFF(DMemModelChunk,iBase)); // r12=iBase
434 asm("cmp r11, r12 ");
435 asm("beq ProtectChunk1 "); // already at home address
437 ASM_DEBUG1(MoveChunkToHomeAddress,r7)
439 asm("str r11, [r7, #%a0]" : : "i" _FOFF(DMemModelChunk,iBase)); // iBase=iHomeBase
440 asm("add r8, r7, #%a0" : : "i" _FOFF(DMemModelChunk,iNumPdes));
441 asm("ldmia r8, {r8-r10} "); // r8=numpdes r9=base pdes, r10=home pdes
443 asm("str r10, [r7, #%a0]" : : "i" _FOFF(DMemModelChunk,iPdes));
444 asm("beq MoveHome0 ");
445 asm("ldr r12, [r7, #%a0]" : : "i" _FOFF(DMemModelChunk,iPdePermissions[0]));
447 asm("str r1, [r7, #%a0]" : : "i" _FOFF(DMemModelChunk,iChunkState)); // iChunkState=ENotRunning
449 asm("ldr r3, [r9] "); // fetch old pde
450 asm("str r1, [r9], #4 "); // clear old pde
451 asm("bics r3, r3, #0x3fc "); // mask out permissions and check for not present PDE
452 asm("orrne r3, r3, r12 "); // if present, or in new permissions
453 asm("moveq r3, #0 "); // else zero entry
454 asm("str r3, [r10], #4 "); // put into new pde
455 asm("subs r8, r8, #1 ");
456 asm("bne MoveHome1 "); // loop to do next PDE
457 asm("orr r6, r6, #%a0" : : "i" (Mmu::EFlushDMove|Mmu::EFlushDPermChg)); // moving chunk can't contain code
460 asm("str r8, [r7, #%a0]" : : "i" _FOFF(DMemModelChunk,iChunkState)); // iChunkState=ENotRunning
464 // Protect a chunk - r7 points to chunk
465 // r8-r10,r12 modified
466 asm("ProtectChunk: ");
467 asm("ldr r10, [r7, #%a0]" : : "i" _FOFF(DMemModelChunk,iAttributes));
469 asm("ProtectChunk1: ");
470 asm("tst r10, #%a0" : : "i" (DMemModelChunk::EFixedAccess));
471 asm("ldreq r12, [r7, #%a0]" : : "i" _FOFF(DMemModelChunk,iChunkState));
474 __JUMP(eq,lr); // if already in non-running state, nothing to do
475 asm("ldr r8, [r7, #%a0]" : : "i" _FOFF(DMemModelChunk,iNumPdes)); // r8=number of chunk pdes
476 asm("ldr r9, [r7, #%a0]" : : "i" _FOFF(DMemModelChunk,iPdes)); // r9=pde address
478 ASM_DEBUG1(ProtectChunk,r7)
481 asm("beq ProtectChunk0 ");
482 asm("tst r10, #%a0" : : "i" (DMemModelChunk::ECode)); // check if chunk contains code
483 asm("orrne r6, r6, #%a0" : : "i" (Mmu::EFlushIPermChg)); // if it does, may need to flush ITLB
484 asm("ldr r10, [r7, #%a0]" : : "i" _FOFF(DMemModelChunk,iPdePermissions[0])); //r10=new permissions
485 asm("ProtectChunk2: ");
486 asm("ldr r12, [r9] "); // fetch old pde
487 asm("bics r12, r12, #0x3fc "); // mask out permissions and check for not present PDE
488 asm("orrne r12, r12, r10 "); // if present, or in new permissions
489 asm("moveq r12, #0 "); // else zero pde
490 asm("str r12, [r9], #4 "); // replace pde
491 asm("subs r8, r8, #1 ");
492 asm("bne ProtectChunk2 "); // loop for next PDE
493 asm("orr r6, r6, #%a0" : : "i" (Mmu::EFlushDPermChg));
494 asm("ProtectChunk0: ");
495 asm("str r8, [r7, #%a0]" : : "i" _FOFF(DMemModelChunk,iChunkState)); // iChunkState=ENotRunning
498 // MoveRunOrUnprotectChunk
499 // If a chunk is movable and not at its run address in this process, move it to the run address;
500 // if the chunk is variable-access and not unprotected, unprotect it.
501 // r7=run address, r8 points to chunk
502 // ignore read-only flag since the ARM cannot support it
503 // r1-r3, r7, r9-r12 modified, r8 unmodified
504 asm("MoveRunOrUnprotectChunk: ");
505 asm("mov r9, #2 "); // r9=state of chunk
507 asm("MoveToRunAddress: ");
508 asm("ldr r12, [r8, #%a0]" : : "i" _FOFF(DMemModelChunk,iAttributes));
509 asm("tst r12, #%a0" : : "i" (DMemModelChunk::EFixedAddress));
510 asm("bne UnprotectChunk1 ");
511 asm("ldr r11, [r8, #%a0]" : : "i" _FOFF(DMemModelChunk,iBase)); // r11=old base
512 asm("cmp r11, r7 "); // check if already at run address
513 asm("beq UnprotectChunk1 "); // if it is, just unprotect it
515 ASM_DEBUG2(MoveChunkToRunAddress,r8,r7)
517 asm("str r7, [r8, #%a0]" : : "i" _FOFF(DMemModelChunk,iBase)); // iBase=run address
518 asm("ldr r12, [r8, #%a0]" : : "i" _FOFF(DMemModelChunk,iNumPdes));
519 asm("str r9, [r8, #%a0]" : : "i" _FOFF(DMemModelChunk,iChunkState)); // store new chunk state
521 asm("beq MoveRun0 ");
522 asm("ldr r10, [r8, #%a0]" : : "i" _FOFF(DMemModelChunk,iPdes));
524 asm("add r7, r10, r7, lsr #18 "); // r7=iPdes+(new address/2^18)
525 asm("sub r7, r7, r11, lsr #18 "); // r7=iPdes+(new address-old address)/2^18
526 asm("str r7, [r8, #%a0]" : : "i" _FOFF(DMemModelChunk,iPdes));
527 asm("add r9, r8, r9, lsl #2 ");
528 asm("ldr r9, [r9, #%a0]" : : "i" (_FOFF(DMemModelChunk,iPdePermissions[0]))); // r9=PDE permissions to use
530 asm("ldr r3, [r10] "); // fetch old pde
531 asm("str r1, [r10], #4 "); // clear old pde
532 asm("bics r3, r3, #0x3fc "); // mask out permissions and check for not present PDE
533 asm("orrne r3, r3, r9 "); // if present, or in new permissions
534 asm("moveq r3, #0 "); // else clear pde
535 asm("str r3, [r7], #4 "); // put into new pde
536 asm("subs r12, r12, #1 ");
537 asm("bne MoveRun1 ");
538 asm("orr r6, r6, #%a0" : : "i" (Mmu::EFlushDMove|Mmu::EFlushDPermChg)); // moving chunk can't contain code
543 // Apply running permissions to a chunk
544 // r8 points to chunk
545 // ignore read-only flag since the ARM cannot support it
546 // r7,r9,r10,r12 modified, r8,r11 unmodified
547 asm("UnprotectChunk: ");
548 asm("mov r9, #2 "); // r9=new state of chunk
550 asm("ApplyTopLevelPermissions: ");
551 asm("ldr r12, [r8, #%a0]" : : "i" _FOFF(DMemModelChunk,iAttributes));
553 asm("UnprotectChunk1: ");
554 asm("tst r12, #%a0" : : "i" (DMemModelChunk::EFixedAccess));
555 asm("ldreq r10, [r8, #%a0]" : : "i" _FOFF(DMemModelChunk,iChunkState)); // r10=old chunk state
558 __JUMP(eq,lr); // if state already correct, nothing to do
559 asm("ldr r10, [r8, #%a0]" : : "i" _FOFF(DMemModelChunk,iNumPdes));
561 ASM_DEBUG1(UnprotectChunk,r8)
563 asm("str r9, [r8, #%a0]" : : "i" _FOFF(DMemModelChunk,iChunkState)); // iChunkState=ERunningRW
565 asm("beq UnprotectChunk0 ");
566 asm("tst r12, #%a0" : : "i" (DMemModelChunk::ECode)); // check if chunk contains code
567 asm("orrne r6, r6, #%a0" : : "i" (Mmu::EFlushIPermChg)); // if it does, may need to flush ITLB
568 asm("ldr r12, [r8, #%a0]" : : "i" _FOFF(DMemModelChunk,iPdes));
569 asm("add r9, r8, r9, lsl #2 ");
570 asm("ldr r9, [r9, #%a0]" : : "i" (_FOFF(DMemModelChunk,iPdePermissions[0]))); //r9=new permissions
571 asm("UnprotectChunk2: ");
572 asm("ldr r7, [r12] "); // fetch old pde
573 asm("bics r7, r7, #0x3fc "); // mask out permissions and check for not present PDE
574 asm("orrne r7, r7, r9 "); // if present, or in new permissions
575 asm("moveq r7, #0 "); // else clear pde
576 asm("str r7, [r12], #4 "); // replace pde
577 asm("subs r10, r10, #1 ");
578 asm("bne UnprotectChunk2 ");
579 asm("orr r6, r6, #%a0" : : "i" (Mmu::EFlushDPermChg));
580 asm("UnprotectChunk0: ");
583 #ifdef BTRACE_FAST_MUTEX
584 // expects scheduler in r0, current thread in r2, preserves all but r10
585 asm("procswitch_trace_flash:");
587 asm("bic sp, sp, #7"); // align stack to 8 bytes
588 asm("stmdb sp!,{r3,ip}");
589 asm("stmdb sp!,{r0,r1,r2,lr}"); // 4th item on stack is PC value for trace
591 asm("ldr r0, procswitch_trace_header"); // header parameter in r0
592 asm("add r3, r1, #%a0" : : "i" _FOFF(TScheduler,iLock)); // fast mutex parameter in r3
594 asm("ldr pc, [r1, #%a0]" : : "i" _FOFF(TScheduler,iBTraceHandler));
595 asm("ldmia sp!,{r0,r1,r2,lr}");
596 asm("ldmia sp!,{r3,ip}");
597 asm("mov sp, r10"); // put stack back
600 asm("procswitch_trace_header:");
601 asm(".word %a0" : : "i" ((TInt)(16<<BTrace::ESizeIndex) + ((BTrace::EContextIdPresent|BTrace::EPcPresent) << BTrace::EFlagsIndex*8) + (BTrace::EFastMutex<< BTrace::ECategoryIndex*8) + (BTrace::EFastMutexFlash << BTrace::ESubCategoryIndex*8)) );
604 // Global data references
605 asm("__TheScheduler: ");
606 asm(".word TheScheduler ");
608 asm(".word %a0" : : "i" ((TInt)&::TheMmu));
611 __NAKED__ TUint32 DMemModelChunk::ApplyTopLevelPermissions(TChunkState /*aChunkState*/)
613 asm("stmfd sp!, {r6-r11,lr} ");
614 asm("mov r8, r0 "); // r8 = chunk ptr
615 asm("mov r9, r1 "); // r9 = chunk state
616 asm("mrc p15, 0, r3, c3, c0, 0 "); // r3=DACR
618 asm("orr r1, r3, #0x30 ");
619 asm("mcr p15, 0, r1, c3, c0, 0 "); // unlock page tables
621 asm("bl ApplyTopLevelPermissions ");
622 asm("mcr p15, 0, r3, c3, c0, 0 "); // lock page tables
623 asm("mov r0, r6 "); // return flush flags
624 DRAIN_WRITE_BUFFER(,r1,r1);
626 asm("ldmfd sp!, {r6-r11,pc} ");
629 __NAKED__ TUint32 DMemModelChunk::MoveToRunAddress(TLinAddr /*aLinAddr*/, TChunkState /*aChunkState*/)
631 asm("stmfd sp!, {r5-r11,lr} ");
632 asm("mov r8, r0 "); // r8 = chunk ptr
633 asm("mov r7, r1 "); // r7 = run address
634 asm("mrc p15, 0, r5, c3, c0, 0 "); // r5=DACR
635 asm("mov r9, r2 "); // r9 = chunk state
637 asm("orr r1, r5, #0x30 ");
638 asm("mcr p15, 0, r1, c3, c0, 0 "); // unlock page tables
640 asm("bl MoveToRunAddress ");
641 asm("mcr p15, 0, r5, c3, c0, 0 "); // lock page tables
642 asm("mov r0, r6 "); // return flush flags
643 DRAIN_WRITE_BUFFER(,r1,r1);
645 asm("ldmfd sp!, {r5-r11,pc} ");
648 __NAKED__ TUint32 DMemModelChunk::MoveToHomeSection()
650 asm("stmfd sp!, {r5-r11,lr} ");
651 asm("mov r7, r0 "); // r7 = chunk ptr
652 asm("mrc p15, 0, r0, c3, c0, 0 "); // r0=DACR
654 asm("orr r1, r0, #0x30 ");
655 asm("mcr p15, 0, r1, c3, c0, 0 "); // unlock page tables
657 asm("bl MoveHomeOrProtectChunk ");
658 asm("mcr p15, 0, r0, c3, c0, 0 "); // lock page tables
659 asm("mov r0, r6 "); // return flush flags
660 DRAIN_WRITE_BUFFER(,r1,r1);
662 asm("ldmfd sp!, {r5-r11,pc} ");