os/kernelhwsrv/kernel/eka/memmodel/epoc/moving/arm/xsched.cia
author sl
Tue, 10 Jun 2014 14:32:02 +0200 (2014-06-10)
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
sl@0
     1
// Copyright (c) 1997-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
// e32\memmodel\epoc\moving\arm\xsched.cia
sl@0
    15
// 
sl@0
    16
//
sl@0
    17
sl@0
    18
#include <e32cia.h>
sl@0
    19
#include <arm_mem.h>
sl@0
    20
sl@0
    21
#define iMState		iWaitLink.iSpare1
sl@0
    22
sl@0
    23
//#define __DEBUG_BAD_ADDR
sl@0
    24
sl@0
    25
#define iCurrentVMProcess				iExtras[0]
sl@0
    26
#define	iCurrentDataSectionProcess		iExtras[1]
sl@0
    27
#define	iCompleteDataSectionProcess		iExtras[2]
sl@0
    28
sl@0
    29
#ifdef __REQUEST_COMPLETE_MACHINE_CODED__
sl@0
    30
sl@0
    31
#if defined(_DEBUG)
sl@0
    32
extern "C" void __DebugMsgRequestComplete(TInt a0, TInt a1, TInt a2);
sl@0
    33
extern "C" void __DebugMsgReqCompleteWrite(TInt a0, TInt a1, TInt a2);
sl@0
    34
#endif
sl@0
    35
sl@0
    36
__NAKED__ void DThread::RequestComplete(TRequestStatus*& /*aStatus*/, TInt /*aReason*/)
sl@0
    37
//
sl@0
    38
// Signal this threads request semaphore.
sl@0
    39
// Enter with system locked, return with system unlocked.
sl@0
    40
//
sl@0
    41
	{
sl@0
    42
	ASM_DEBUG2(DThreadRequestComplete,r0,lr);
sl@0
    43
sl@0
    44
	asm("ldr r3, [r1] ");					// r3 points to TRequestStatus
sl@0
    45
	asm("mov r12, #0 ");
sl@0
    46
	asm("str r12, [r1] ");					// aStatus=NULL
sl@0
    47
sl@0
    48
	asm(".global _asm_RequestComplete ");
sl@0
    49
	asm("_asm_RequestComplete: ");
sl@0
    50
sl@0
    51
#ifdef BTRACE_REQUESTS
sl@0
    52
	asm("stmdb sp!,{r0-r3,lr}");
sl@0
    53
	asm("mov r1,r3");
sl@0
    54
	asm("mov r3,r2");											// arg3 = aReason
sl@0
    55
	asm("mov r2,r1");											// arg2 = aStatus
sl@0
    56
	asm("add r1,r0,#%a0" : : "i" _FOFF(DThread,iNThread));		// arg1 = &this->iNThread
sl@0
    57
	asm("ldr r0,_threadReqequestCompleteTraceHeader");			// arg0 = header
sl@0
    58
	asm("bl " CSM_ZN6BTrace4OutXEmmmm);
sl@0
    59
	asm("ldmia sp!,{r0-r3,lr}");
sl@0
    60
#endif
sl@0
    61
sl@0
    62
	ASM_DEBUG3(RequestComplete,r0,r3,r2);
sl@0
    63
	asm("ldrb r12, [r0, #%a0]" : : "i" _FOFF(DThread,iMState));
sl@0
    64
	asm("stmfd sp!, {r4-r6} ");
sl@0
    65
	asm("add r6, r0, #%a0" : : "i" _FOFF(DThread,iNThread));
sl@0
    66
	asm("ldr r0, [r0, #%a0]" : : "i" _FOFF(DThread,iOwningProcess));
sl@0
    67
	asm("cmp r12, #%a0" : : "i" (DThread::EDead));	// test if iMState=EDead
sl@0
    68
	asm("ldr r12, [r0, #%a0]!" : : "i" _FOFF(DMemModelProcess, iNumChunks));	// step r0 on to iChunks[0]
sl@0
    69
	asm("beq req_complete_dead_thread ");	// if it is, finished
sl@0
    70
	asm("b req_complete_2");
sl@0
    71
sl@0
    72
	// lookup r3 in address space of process r0
sl@0
    73
	asm("req_complete_1: ");
sl@0
    74
	asm("ldmcsib r0!, {r1,r4} ");			// r1=data section base, r4=chunk ptr
sl@0
    75
	asm("subcss r1, r3, r1 ");				// r1=offset
sl@0
    76
	asm("ldrcs r5, [r4, #%a0]" : : "i" _FOFF(DChunk,iMaxSize));	// if offset>=0, r5=chunk max size
sl@0
    77
	asm("cmpcs r1, r5 ");					// and compare offset to max size
sl@0
    78
	asm("addcs r0, r0, #4 ");				// if offset>=max size, move to next entry
sl@0
    79
	asm("req_complete_2: ");
sl@0
    80
 	asm("subcss r12, r12, #1 ");			// and decrement counter
sl@0
    81
	asm("bcs req_complete_1 ");				// loop if more chunks to check
sl@0
    82
	asm("cmp r12, #0 ");					// did we find chunk?
sl@0
    83
	asm("ldrge r3, [r4, #%a0]" : : "i" _FOFF(DMemModelChunk,iHomeRegionOffset));
sl@0
    84
	asm("ldrge r5, [r4, #%a0]" : : "i" _FOFF(DMemModelChunk,iHomeRegionSize));
sl@0
    85
	asm("cmpge r1, r3 ");					// if we did but offset<iHomeRegionOffset, no good
sl@0
    86
	asm("blt req_complete_invalid ");		// or if we didn't find it, no good
sl@0
    87
	asm("add r3, r3, r5 ");					// r3=home region offset+home region size
sl@0
    88
	asm("cmp r1, r3 ");						// if offset >= iHomeRegionOffset+iHomeRegionSize, no good
sl@0
    89
	asm("ldrlt r0, [r4, #%a0]" : : "i" _FOFF(DChunk,iBase));	// if OK, r0=chunk base
sl@0
    90
	asm("bge req_complete_invalid ");
sl@0
    91
											// Zero flag always clear here
sl@0
    92
sl@0
    93
	ASM_DEBUG3(ReqCompleteWrite,r0,r1,r2);
sl@0
    94
sl@0
    95
	asm(".global __magic_address_reqc ");
sl@0
    96
	asm("__magic_address_reqc: ");			// this instruction is magically immune from exceptions
sl@0
    97
	asm("str r2, [r0, r1] ");				// r0+r1=current address, store completion code
sl@0
    98
sl@0
    99
	asm("movne r0, r6 ");					// if write OK, r0->iNThread
sl@0
   100
	asm("ldmnefd sp!, {r4-r6} ");			// restore registers
sl@0
   101
	asm("movne r1, #0 ");
sl@0
   102
	asm("bne  " CSM_ZN5NKern19ThreadRequestSignalEP7NThreadP10NFastMutex);
sl@0
   103
#ifndef __DEBUG_BAD_ADDR
sl@0
   104
	asm("req_complete_invalid: ");
sl@0
   105
	asm("ldmfd sp!, {r4-r6} ");				// restore registers
sl@0
   106
	asm("b  " CSM_ZN5NKern12UnlockSystemEv);
sl@0
   107
#endif
sl@0
   108
#ifdef __DEBUG_BAD_ADDR
sl@0
   109
	asm("req_complete_invalid: ");			//DEBUG
sl@0
   110
	asm("mov r9, #0xde000000 ");
sl@0
   111
	asm("str r9, [r9, #0xaf] ");			//HACK-CRASH SYSTEM
sl@0
   112
#endif
sl@0
   113
	asm("req_complete_dead_thread: ");
sl@0
   114
	asm("ldmfd sp!, {r4-r6} ");				// restore registers
sl@0
   115
	asm("b  " CSM_ZN5NKern12UnlockSystemEv);
sl@0
   116
sl@0
   117
#ifdef BTRACE_REQUESTS
sl@0
   118
	asm("_threadReqequestCompleteTraceHeader:");
sl@0
   119
	asm(".word %a0" : : "i" (BTRACE_HEADER_C(16,BTrace::ERequests,BTrace::ERequestComplete)));
sl@0
   120
#endif
sl@0
   121
	}
sl@0
   122
#endif
sl@0
   123
sl@0
   124
#ifdef __SCHEDULER_MACHINE_CODED__
sl@0
   125
sl@0
   126
#if defined(_DEBUG)
sl@0
   127
extern "C" void __DebugMsgFixedAddress();
sl@0
   128
extern "C" void __DebugMsgMoving();
sl@0
   129
extern "C" void __DebugMsgProtectChunks(int aProc);
sl@0
   130
extern "C" void __DebugMsgUnprotectChunks(int aProc);
sl@0
   131
extern "C" void __DebugMsgMoveChunksToHome(int aProc);
sl@0
   132
extern "C" void __DebugMsgMoveChunksToData(int aProc);
sl@0
   133
extern "C" void __DebugMsgMoveChunkToHomeAddress(int aChunk);
sl@0
   134
extern "C" void __DebugMsgProtectChunk(int aChunk);
sl@0
   135
extern "C" void __DebugMsgMoveChunkToRunAddress(int aChunk, int aRunAddr);
sl@0
   136
extern "C" void __DebugMsgUnprotectChunk(int aChunk);
sl@0
   137
#endif
sl@0
   138
sl@0
   139
GLDEF_C __NAKED__ void DoProcessSwitch()
sl@0
   140
	{
sl@0
   141
	//
sl@0
   142
	// Enter and return in mode_svc with system lock held, kernel unlocked, interrupts enabled.
sl@0
   143
	// On entry r0->scheduler, r2->current thread, r5->current address space, r9->new address space
sl@0
   144
	// Must preserve r0,r2, can modify other registers
sl@0
   145
	//
sl@0
   146
	asm("mrc p15, 0, r7, c3, c0, 0 ");		// r7=DACR
sl@0
   147
	asm("ldr r4, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentVMProcess));
sl@0
   148
	asm("mov r1, r9 ");
sl@0
   149
	asm("stmfd sp!, {r7,lr} ");
sl@0
   150
	asm("orr r7, r7, #0x30 ");				// unlock page tables
sl@0
   151
	asm("mcr p15, 0, r7, c3, c0, 0 ");		// set DACR
sl@0
   152
	CPWAIT(,ip);
sl@0
   153
sl@0
   154
	asm("ldr lr, [r9, #%a0]" : : "i" _FOFF(DArmPlatProcess,iAttributes));
sl@0
   155
	asm("mov ip, #0 ");
sl@0
   156
	asm("cmp r4, #0 ");
sl@0
   157
	asm("ldrne ip, [r4, #%a0]" : : "i" _FOFF(DArmPlatProcess,iAttributes));
sl@0
   158
sl@0
   159
	// register contents at this point are:
sl@0
   160
	// r0->TheScheduler, r1->new process, r2->new thread, r3=scratch, r4=current VM process
sl@0
   161
	// r5=scratch, r7-11 scratch, r12=current VM process attributes,
sl@0
   162
	// r14=new process attributes
sl@0
   163
sl@0
   164
	asm("ands r6, lr, #%a0" : : "i" ((TInt)DMemModelProcess::EMoving));
sl@0
   165
	asm("beq resched_fixed ");				// skip if new process is fixed
sl@0
   166
sl@0
   167
	// The current process has moving chunks
sl@0
   168
	ASM_DEBUG0(Moving)
sl@0
   169
sl@0
   170
#ifdef __CPU_WRITE_BACK_CACHE
sl@0
   171
	// We need to flush the DCache before moving anything.
sl@0
   172
	// Condition for flush here is
sl@0
   173
	// NEW PROCESS MOVING && (NEW PROCESS!=COMPLETE DATA SECTION PROCESS)
sl@0
   174
	asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCompleteDataSectionProcess));	// r3=complete data section process
sl@0
   175
	asm("cmp r3, r1 ");						// check if TheCurrentProcess==TheCompleteDataSectionProcess
sl@0
   176
	asm("blne SyncMapChangeAttempt ");		// if not same, flush the DCache; on return ZF=0 means abort
sl@0
   177
											// stack alignment OK since called function is assembler
sl@0
   178
	asm("bne end_process_switch ");			// if contention, just exit now
sl@0
   179
#endif
sl@0
   180
sl@0
   181
	asm("mov r6, #0 ");
sl@0
   182
	asm("str r6, [r0, #%a0]" : : "i" _FOFF(TScheduler,iAddressSpace));
sl@0
   183
sl@0
   184
	// if (pD && pD!=pP) ...
sl@0
   185
	asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentDataSectionProcess));		// r3=current data section process
sl@0
   186
	asm("mov r8, #0 ");
sl@0
   187
	asm("str r8, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCompleteDataSectionProcess));
sl@0
   188
	asm("cmp r3, #0 ");						// if !TheCurrentDataSectionProcess
sl@0
   189
	asm("cmpne r3, r1 ");					// || TheCurrentDataSectionProcess==TheCurrentProcess
sl@0
   190
	asm("beq resched_moving1 ");			// skip next section
sl@0
   191
sl@0
   192
	// Move TheCurrentDataSectionProcess to the home section and protect it
sl@0
   193
	ASM_DEBUG1(MoveChunksToHome,r3)
sl@0
   194
sl@0
   195
	asm("ldr r5, [r3, #%a0]" : : "i" _FOFF(DMemModelProcess,iNumChunks));
sl@0
   196
	asm("cmp r5, #0 ");						// check if current data section process has no chunks
sl@0
   197
	asm("beq resched_moving1b ");			// skip if it does
sl@0
   198
	asm("stmfd sp!, {r1-r4,r12} ");
sl@0
   199
	asm("add r4, r3, #%a0" : : "i" (_FOFF(DMemModelProcess,iChunks)+4));
sl@0
   200
	asm("resched_moving1a: ");
sl@0
   201
	asm("ldr r7, [r4], #12 ");				// r7=address of chunk to move
sl@0
   202
	asm("bl MoveHomeOrProtectChunk ");		// move chunk - this must preserve r0
sl@0
   203
	asm("ldr ip, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iWaiting));
sl@0
   204
	asm("cmp ip, #0 ");						// check for contention
sl@0
   205
	asm("bne abort_resched_mh ");			// if there is, abort
sl@0
   206
#ifdef BTRACE_FAST_MUTEX
sl@0
   207
	asm("ldrb ip, [r0, #%a0]" : : "i" (_FOFF(TScheduler,iFastMutexFilter)));
sl@0
   208
	asm("cmp ip, #0");
sl@0
   209
	asm("ldrne r2, [sp, #4]"); // current thread
sl@0
   210
	asm("blne procswitch_trace_flash");
sl@0
   211
#endif
sl@0
   212
	asm("subs r5, r5, #1 ");
sl@0
   213
	asm("bne resched_moving1a ");
sl@0
   214
	asm("ldmfd sp!, {r1-r4,r12} ");
sl@0
   215
	asm("resched_moving1b: ");
sl@0
   216
	asm("cmp r3, r4 ");
sl@0
   217
	asm("moveq r4, #0 ");					// ... TheCurrentVMProcess=NULL
sl@0
   218
	asm("streq r4, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentVMProcess));
sl@0
   219
	asm("mov r3, #0 ");						// TheCurrentDataSectionProcess=NULL
sl@0
   220
	asm("str r3, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentDataSectionProcess));
sl@0
   221
sl@0
   222
	// if (pV && pV!=pP)
sl@0
   223
	asm("resched_moving1: ");
sl@0
   224
	asm("cmp r4, #0 ");						// if !TheCurrentVMProcess
sl@0
   225
	asm("cmpne r4, r1 ");					// || TheCurrentVMProcess==TheCurrentProcess
sl@0
   226
	asm("beq resched_moving2 ");			// skip next section
sl@0
   227
sl@0
   228
	// Protect TheCurrentVMProcess but don't move it
sl@0
   229
	// register contents at this point are:
sl@0
   230
	// r0->TheScheduler, r2->new thread, r1->TheCurrentProcess, r4=current VM process
sl@0
   231
	// r12=current VM process attributes, r3,r5,r7-r11->scratch
sl@0
   232
	
sl@0
   233
	ASM_DEBUG1(ProtectChunks,r4)
sl@0
   234
sl@0
   235
	asm("tst r12, #%a0" : : "i" (DMemModelProcess::EVariableAccess));	// r12=TheCurrentVMProcess->iAttributes
sl@0
   236
	asm("beq resched_moving2 ");			// if fixed access process, nothing to do
sl@0
   237
	asm("ldr r5, [r4, #%a0]" : : "i" _FOFF(DMemModelProcess,iNumChunks));
sl@0
   238
	asm("cmp r5, #0 ");
sl@0
   239
	asm("beq resched_moving2b ");			// if number of chunks=0, nothing to do
sl@0
   240
	asm("add r11, r4, #%a0" : : "i" (_FOFF(DMemModelProcess,iChunks)+4));
sl@0
   241
	asm("resched_moving2a: ");
sl@0
   242
	asm("ldr r7, [r11], #12 ");				// r7=address of chunk to protect
sl@0
   243
	asm("bl ProtectChunk ");				// protect it
sl@0
   244
	asm("ldr r7, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iWaiting));
sl@0
   245
	asm("cmp r7, #0 ");						// check for contention
sl@0
   246
	asm("bne abort_resched_mp ");			// if there is, abort
sl@0
   247
#ifdef BTRACE_FAST_MUTEX
sl@0
   248
	asm("ldrb ip, [r0, #%a0]" : : "i" (_FOFF(TScheduler,iFastMutexFilter)));
sl@0
   249
	asm("cmp ip, #0");
sl@0
   250
	asm("blne procswitch_trace_flash");
sl@0
   251
#endif
sl@0
   252
	asm("subs r5, r5, #1 ");
sl@0
   253
	asm("bne resched_moving2a ");
sl@0
   254
	asm("resched_moving2b: ");
sl@0
   255
	asm("mov r4, #0 ");						// TheCurrentVMProcess=NULL
sl@0
   256
	asm("str r4, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentVMProcess));
sl@0
   257
sl@0
   258
	asm("resched_moving2: ");
sl@0
   259
	asm("str r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentDataSectionProcess));
sl@0
   260
	asm("str r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentVMProcess));
sl@0
   261
sl@0
   262
	// Unprotect the TheCurrentProcess and move it to the data section if necessary
sl@0
   263
	// register contents at this point are:
sl@0
   264
	// r0->TheScheduler, r2->new thread, r1->TheCurrentProcess
sl@0
   265
	// r12=current VM process attributes, r3,r5,r7-r11->scratch
sl@0
   266
	
sl@0
   267
	ASM_DEBUG1(MoveChunksToData,r1)
sl@0
   268
sl@0
   269
	asm("ldr r5, [r1, #%a0]" : : "i" _FOFF(DMemModelProcess,iNumChunks));
sl@0
   270
	asm("cmp r5, #0 ");
sl@0
   271
	asm("beq resched_finish ");				// if number of chunks=0, nothing to do
sl@0
   272
	asm("stmfd sp!, {r1-r2,r12} ");			// don't need r3 or r4 any more
sl@0
   273
	asm("add r4, r1, #%a0" : : "i" _FOFF(DMemModelProcess,iChunks));
sl@0
   274
	asm("resched_moving3a: ");
sl@0
   275
	asm("ldmia r4!, {r7,r8,r9} ");			// r7=run address, r8=ptr to chunk, r9 top 16bits=isReadOnly
sl@0
   276
	asm("bl MoveRunOrUnprotectChunk ");		// move chunk to data section and/or unprotect it
sl@0
   277
	asm("ldr ip, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iWaiting));
sl@0
   278
	asm("cmp ip, #0 ");						// check for contention
sl@0
   279
	asm("bne abort_resched_mr ");			// if there is, abort
sl@0
   280
#ifdef BTRACE_FAST_MUTEX
sl@0
   281
	asm("ldrb ip, [r0, #%a0]" : : "i" (_FOFF(TScheduler,iFastMutexFilter)));
sl@0
   282
	asm("cmp ip, #0");
sl@0
   283
	asm("blne procswitch_trace_flash");
sl@0
   284
#endif
sl@0
   285
	asm("subs r5, r5, #1 ");
sl@0
   286
	asm("bne resched_moving3a ");
sl@0
   287
	asm("ldmfd sp!, {r1-r2,r12} ");
sl@0
   288
	asm("str r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCompleteDataSectionProcess));
sl@0
   289
	asm("b resched_finish ");
sl@0
   290
sl@0
   291
	// The current process has no moving chunks
sl@0
   292
	asm("resched_fixed: ");
sl@0
   293
sl@0
   294
	ASM_DEBUG0(FixedAddress)
sl@0
   295
sl@0
   296
	asm("str r6, [r0, #%a0]" : : "i" _FOFF(TScheduler,iAddressSpace));
sl@0
   297
	asm("cmp r4, #0	");						// if TheCurrentVMProcess==NULL
sl@0
   298
	asm("beq resched_fixed1 ");				// skip this section
sl@0
   299
sl@0
   300
	// Protect TheCurrentVMProcess
sl@0
   301
	ASM_DEBUG1(ProtectChunks,r4)
sl@0
   302
sl@0
   303
	asm("ldr r5, [r4, #%a0]" : : "i" _FOFF(DMemModelProcess,iNumChunks));
sl@0
   304
	asm("add r11, r4, #%a0" : : "i" (_FOFF(DMemModelProcess,iChunks)+4));
sl@0
   305
	asm("cmp r5, #0 ");
sl@0
   306
	asm("beq resched_fixed1b ");			// if number of chunks=0, nothing to do
sl@0
   307
	asm("resched_fixed1a: ");
sl@0
   308
	asm("ldr r7, [r11], #12 ");				// r7=address of chunk to protect
sl@0
   309
	asm("bl ProtectChunk ");				// protect it
sl@0
   310
	asm("ldr r7, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iWaiting));
sl@0
   311
	asm("cmp r7, #0 ");						// check for contention
sl@0
   312
	asm("bne abort_resched_fp ");			// if there is, abort
sl@0
   313
#ifdef BTRACE_FAST_MUTEX
sl@0
   314
	asm("ldrb ip, [r0, #%a0]" : : "i" (_FOFF(TScheduler,iFastMutexFilter)));
sl@0
   315
	asm("cmp ip, #0");
sl@0
   316
	asm("blne procswitch_trace_flash");
sl@0
   317
#endif
sl@0
   318
	asm("subs r5, r5, #1 ");
sl@0
   319
	asm("bne resched_fixed1a ");
sl@0
   320
	asm("resched_fixed1b: ");
sl@0
   321
	asm("mov r4, #0 ");						// TheCurrentVMProcess=NULL
sl@0
   322
	asm("str r4, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentVMProcess));
sl@0
   323
sl@0
   324
	asm("resched_fixed1: ");
sl@0
   325
sl@0
   326
	// Unprotect TheCurrentProcess
sl@0
   327
	ASM_DEBUG1(UnprotectChunks,r1)
sl@0
   328
sl@0
   329
	asm("ldr r5, [r1, #%a0]" : : "i" _FOFF(DMemModelProcess,iNumChunks));
sl@0
   330
	asm("str r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentVMProcess));
sl@0
   331
	asm("cmp r5, #0 ");
sl@0
   332
	asm("beq resched_finish ");				// if number of chunks=0, nothing to do
sl@0
   333
	asm("add r11, r1, #%a0" : : "i" _FOFF(DMemModelProcess,iChunks));
sl@0
   334
	asm("resched_fixed2a: ");
sl@0
   335
	asm("ldmia r11!, {r7,r8,r9} ");			// r7=run address, r8=ptr to chunk, r9 top 16bits=isReadOnly
sl@0
   336
	asm("bl UnprotectChunk ");				// unprotect chunk
sl@0
   337
	asm("ldr r7, [r0, #%a0]" : : "i" _FOFF(TScheduler,iLock.iWaiting));
sl@0
   338
	asm("cmp r7, #0 ");						// check for contention
sl@0
   339
	asm("bne abort_resched_fu ");			// if there is, abort
sl@0
   340
#ifdef BTRACE_FAST_MUTEX
sl@0
   341
	asm("ldrb ip, [r0, #%a0]" : : "i" (_FOFF(TScheduler,iFastMutexFilter)));
sl@0
   342
	asm("cmp ip, #0");
sl@0
   343
	asm("blne procswitch_trace_flash");
sl@0
   344
#endif
sl@0
   345
	asm("subs r5, r5, #1 ");
sl@0
   346
	asm("bne resched_fixed2a ");
sl@0
   347
sl@0
   348
	asm("resched_finish: ");
sl@0
   349
	asm("str r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iAddressSpace));	// iAddressSpace=new process
sl@0
   350
sl@0
   351
	asm("resched_flush: ");
sl@0
   352
#ifdef __CPU_SPLIT_TLB
sl@0
   353
	asm("tst r6, #%a0" : : "i" (Mmu::EFlushDPermChg));
sl@0
   354
	asm("mov r1, #0 ");
sl@0
   355
	FLUSH_DTLB(ne,r1);
sl@0
   356
	asm("tst r6, #%a0" : : "i" (Mmu::EFlushIPermChg));
sl@0
   357
	FLUSH_ITLB(ne,r1);
sl@0
   358
#else
sl@0
   359
	asm("tst r6, #%a0" : : "i" (Mmu::EFlushDPermChg|Mmu::EFlushIPermChg));
sl@0
   360
	asm("mov r1, #0 ");
sl@0
   361
	FLUSH_IDTLB(ne,r1);
sl@0
   362
#endif
sl@0
   363
#ifdef __CPU_WRITE_BACK_CACHE
sl@0
   364
//#ifdef __CPU_SPLIT_CACHE
sl@0
   365
	// D cache flush already done
sl@0
   366
//	asm("tst r6, #%a0" : : "i" (Mmu::EFlushIMove));	Should never need to flush ICache during context switch
sl@0
   367
//	FLUSH_ICACHE(ne,r1);
sl@0
   368
//#endif
sl@0
   369
#else
sl@0
   370
#ifdef __CPU_SPLIT_CACHE
sl@0
   371
	asm("tst r6, #%a0" : : "i" (Mmu::EFlushDMove));
sl@0
   372
	FLUSH_DCACHE(ne,r1);
sl@0
   373
//	asm("tst r6, #%a0" : : "i" (Mmu::EFlushIMove));	Should never need to flush ICache during context switch
sl@0
   374
//	FLUSH_ICACHE(ne,r1);
sl@0
   375
#else
sl@0
   376
	asm("tst r6, #%a0" : : "i" (Mmu::EFlushDMove|Mmu::EFlushIMove));
sl@0
   377
	FLUSH_IDCACHE(ne,r1);
sl@0
   378
#endif
sl@0
   379
#endif
sl@0
   380
	asm("cmp r6, #0 ");						// any page table changes?
sl@0
   381
	DRAIN_WRITE_BUFFER(ne,r1,r7);			// if so, make sure page table changes take effect
sl@0
   382
sl@0
   383
	asm("end_process_switch: ");
sl@0
   384
	asm("ldmia sp!, {r7,lr} ");
sl@0
   385
	asm("mcr p15, 0, r7, c3, c0 ");			// restore DACR
sl@0
   386
	CPWAIT(,r3);
sl@0
   387
	__JUMP(,lr);
sl@0
   388
sl@0
   389
	asm("abort_resched_mh: ");				// Reschedule aborted during MoveToHome
sl@0
   390
	asm("ldmfd sp!, {r1-r4,r12} ");
sl@0
   391
	asm("subs r5, r5, #1 ");
sl@0
   392
	asm("bne resched_flush ");				// if MoveToHome incomplete skip
sl@0
   393
	asm("cmp r3, r4 ");						// if TheCurrentDataSectionProcess==TheCurrentVMProcess ...
sl@0
   394
	asm("streq r5, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentVMProcess));	// ... TheCurrentVMProcess=NULL
sl@0
   395
	asm("str r5, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentDataSectionProcess));	// TheCurrentDataSectionProcess=NULL
sl@0
   396
	asm("b resched_flush ");				//
sl@0
   397
sl@0
   398
	asm("abort_resched_mp: ");				// Reschedule aborted during protect before moving
sl@0
   399
	asm("subs r5, r5, #1 ");
sl@0
   400
	asm("streq r5, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentVMProcess));
sl@0
   401
	asm("b resched_flush ");				//
sl@0
   402
sl@0
   403
	asm("abort_resched_mr: ");				// Reschedule aborted during MoveToRunAddress
sl@0
   404
	asm("ldmfd sp!, {r1-r2,r12} ");
sl@0
   405
	asm("subs r5, r5, #1 ");
sl@0
   406
	asm("streq r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCompleteDataSectionProcess));
sl@0
   407
	asm("b resched_flush ");				//
sl@0
   408
sl@0
   409
	asm("abort_resched_fp: ");				// Reschedule aborted during protect before fixed
sl@0
   410
	asm("subs r5, r5, #1 ");
sl@0
   411
	asm("streq r5, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentVMProcess));
sl@0
   412
	asm("b resched_flush ");				//
sl@0
   413
sl@0
   414
	asm("abort_resched_fu: ");				// Reschedule aborted during unprotect
sl@0
   415
	asm("subs r5, r5, #1 ");
sl@0
   416
	asm("streq r1, [r0, #%a0]" : : "i" _FOFF(TScheduler,iAddressSpace));	// iAddressSpace=new process
sl@0
   417
	asm("b resched_flush ");				//
sl@0
   418
sl@0
   419
sl@0
   420
	//
sl@0
   421
	// Subroutines
sl@0
   422
	//
sl@0
   423
	// MoveHomeOrProtectChunk
sl@0
   424
	// If a chunk is movable and not at its home address, move it to the home address;
sl@0
   425
	// if the chunk is variable-access and not protected, protect it
sl@0
   426
	// r7 points to chunk
sl@0
   427
	// modifies r1-r3 and r8-r12
sl@0
   428
	asm("MoveHomeOrProtectChunk: ");
sl@0
   429
	asm("ldr r10, [r7, #%a0]" : : "i" _FOFF(DMemModelChunk,iAttributes));
sl@0
   430
	asm("tst r10, #%a0" : : "i" (DMemModelChunk::EFixedAddress));
sl@0
   431
	asm("bne ProtectChunk1 ");
sl@0
   432
	asm("ldr r11, [r7, #%a0]" : : "i" _FOFF(DMemModelChunk,iHomeBase));	// r11=home base
sl@0
   433
	asm("ldr r12, [r7, #%a0]" : : "i" _FOFF(DMemModelChunk,iBase));	// r12=iBase
sl@0
   434
	asm("cmp r11, r12 ");
sl@0
   435
	asm("beq ProtectChunk1 ");			// already at home address
sl@0
   436
sl@0
   437
	ASM_DEBUG1(MoveChunkToHomeAddress,r7)
sl@0
   438
sl@0
   439
	asm("str r11, [r7, #%a0]" : : "i" _FOFF(DMemModelChunk,iBase));	// iBase=iHomeBase
sl@0
   440
	asm("add r8, r7, #%a0" : : "i" _FOFF(DMemModelChunk,iNumPdes));
sl@0
   441
	asm("ldmia r8, {r8-r10} ");	// r8=numpdes r9=base pdes, r10=home pdes
sl@0
   442
	asm("cmp r8, #0 ");
sl@0
   443
	asm("str r10, [r7, #%a0]" : : "i" _FOFF(DMemModelChunk,iPdes));
sl@0
   444
	asm("beq MoveHome0 ");
sl@0
   445
	asm("ldr r12, [r7, #%a0]" : : "i" _FOFF(DMemModelChunk,iPdePermissions[0]));
sl@0
   446
	asm("mov r1, #0 ");
sl@0
   447
	asm("str r1, [r7, #%a0]" : : "i" _FOFF(DMemModelChunk,iChunkState));	// iChunkState=ENotRunning
sl@0
   448
	asm("MoveHome1: ");
sl@0
   449
	asm("ldr r3, [r9] ");				// fetch old pde
sl@0
   450
	asm("str r1, [r9], #4 ");			// clear old pde
sl@0
   451
	asm("bics r3, r3, #0x3fc ");		// mask out permissions and check for not present PDE
sl@0
   452
	asm("orrne r3, r3, r12 ");			// if present, or in new permissions
sl@0
   453
	asm("moveq r3, #0 ");				// else zero entry
sl@0
   454
	asm("str r3, [r10], #4 ");			// put into new pde
sl@0
   455
	asm("subs r8, r8, #1 ");
sl@0
   456
	asm("bne MoveHome1 ");				// loop to do next PDE
sl@0
   457
	asm("orr r6, r6, #%a0" : : "i" (Mmu::EFlushDMove|Mmu::EFlushDPermChg));	// moving chunk can't contain code
sl@0
   458
	__JUMP(,lr);
sl@0
   459
	asm("MoveHome0: ");
sl@0
   460
	asm("str r8, [r7, #%a0]" : : "i" _FOFF(DMemModelChunk,iChunkState));	// iChunkState=ENotRunning
sl@0
   461
	__JUMP(,lr);
sl@0
   462
sl@0
   463
	// ProtectChunk
sl@0
   464
	// Protect a chunk - r7 points to chunk
sl@0
   465
	// r8-r10,r12 modified
sl@0
   466
	asm("ProtectChunk: ");
sl@0
   467
	asm("ldr r10, [r7, #%a0]" : : "i" _FOFF(DMemModelChunk,iAttributes));
sl@0
   468
sl@0
   469
	asm("ProtectChunk1: ");
sl@0
   470
	asm("tst r10, #%a0" : : "i" (DMemModelChunk::EFixedAccess));
sl@0
   471
	asm("ldreq r12, [r7, #%a0]" : : "i" _FOFF(DMemModelChunk,iChunkState));
sl@0
   472
	__JUMP(ne,lr);
sl@0
   473
	asm("cmp r12, #0 ");
sl@0
   474
	__JUMP(eq,lr);						// if already in non-running state, nothing to do
sl@0
   475
	asm("ldr r8, [r7, #%a0]" : : "i" _FOFF(DMemModelChunk,iNumPdes));	// r8=number of chunk pdes
sl@0
   476
	asm("ldr r9, [r7, #%a0]" : : "i" _FOFF(DMemModelChunk,iPdes));	// r9=pde address
sl@0
   477
sl@0
   478
	ASM_DEBUG1(ProtectChunk,r7)
sl@0
   479
sl@0
   480
	asm("cmp r8, #0 ");
sl@0
   481
	asm("beq ProtectChunk0 ");
sl@0
   482
	asm("tst r10, #%a0" : : "i" (DMemModelChunk::ECode));		// check if chunk contains code
sl@0
   483
	asm("orrne r6, r6, #%a0" : : "i" (Mmu::EFlushIPermChg));	// if it does, may need to flush ITLB
sl@0
   484
	asm("ldr r10, [r7, #%a0]" : : "i" _FOFF(DMemModelChunk,iPdePermissions[0]));	//r10=new permissions
sl@0
   485
	asm("ProtectChunk2: ");
sl@0
   486
	asm("ldr r12, [r9] ");				// fetch old pde
sl@0
   487
	asm("bics r12, r12, #0x3fc ");		// mask out permissions and check for not present PDE
sl@0
   488
	asm("orrne r12, r12, r10 ");		// if present, or in new permissions
sl@0
   489
	asm("moveq r12, #0 ");				// else zero pde
sl@0
   490
	asm("str r12, [r9], #4 ");			// replace pde
sl@0
   491
	asm("subs r8, r8, #1 ");
sl@0
   492
	asm("bne ProtectChunk2 ");			// loop for next PDE
sl@0
   493
	asm("orr r6, r6, #%a0" : : "i" (Mmu::EFlushDPermChg));
sl@0
   494
	asm("ProtectChunk0: ");
sl@0
   495
	asm("str r8, [r7, #%a0]" : : "i" _FOFF(DMemModelChunk,iChunkState));	// iChunkState=ENotRunning
sl@0
   496
	__JUMP(,lr);
sl@0
   497
sl@0
   498
	// MoveRunOrUnprotectChunk
sl@0
   499
	// If a chunk is movable and not at its run address in this process, move it to the run address;
sl@0
   500
	// if the chunk is variable-access and not unprotected, unprotect it.
sl@0
   501
	// r7=run address, r8 points to chunk
sl@0
   502
	// ignore read-only flag since the ARM cannot support it
sl@0
   503
	// r1-r3, r7, r9-r12 modified, r8 unmodified
sl@0
   504
	asm("MoveRunOrUnprotectChunk: ");
sl@0
   505
	asm("mov r9, #2 ");					// r9=state of chunk
sl@0
   506
sl@0
   507
	asm("MoveToRunAddress: ");
sl@0
   508
	asm("ldr r12, [r8, #%a0]" : : "i" _FOFF(DMemModelChunk,iAttributes));
sl@0
   509
	asm("tst r12, #%a0" : : "i" (DMemModelChunk::EFixedAddress));
sl@0
   510
	asm("bne UnprotectChunk1 ");
sl@0
   511
	asm("ldr r11, [r8, #%a0]" : : "i" _FOFF(DMemModelChunk,iBase));	// r11=old base
sl@0
   512
	asm("cmp r11, r7 ");				// check if already at run address
sl@0
   513
	asm("beq UnprotectChunk1 ");		// if it is, just unprotect it
sl@0
   514
sl@0
   515
	ASM_DEBUG2(MoveChunkToRunAddress,r8,r7)
sl@0
   516
sl@0
   517
	asm("str r7, [r8, #%a0]" : : "i" _FOFF(DMemModelChunk,iBase));	// iBase=run address
sl@0
   518
	asm("ldr r12, [r8, #%a0]" : : "i" _FOFF(DMemModelChunk,iNumPdes));
sl@0
   519
	asm("str r9, [r8, #%a0]" : : "i" _FOFF(DMemModelChunk,iChunkState));	// store new chunk state
sl@0
   520
	asm("cmp r12, #0 ");
sl@0
   521
	asm("beq MoveRun0 ");
sl@0
   522
	asm("ldr r10, [r8, #%a0]" : : "i" _FOFF(DMemModelChunk,iPdes));
sl@0
   523
	asm("mov r1, #0 ");
sl@0
   524
	asm("add r7, r10, r7, lsr #18 ");	// r7=iPdes+(new address/2^18)
sl@0
   525
	asm("sub r7, r7, r11, lsr #18 ");	// r7=iPdes+(new address-old address)/2^18
sl@0
   526
	asm("str r7, [r8, #%a0]" : : "i" _FOFF(DMemModelChunk,iPdes));
sl@0
   527
	asm("add r9, r8, r9, lsl #2 ");
sl@0
   528
	asm("ldr r9, [r9, #%a0]" : : "i" (_FOFF(DMemModelChunk,iPdePermissions[0])));	// r9=PDE permissions to use
sl@0
   529
	asm("MoveRun1: ");
sl@0
   530
	asm("ldr r3, [r10] ");				// fetch old pde
sl@0
   531
	asm("str r1, [r10], #4 ");			// clear old pde
sl@0
   532
	asm("bics r3, r3, #0x3fc ");		// mask out permissions and check for not present PDE
sl@0
   533
	asm("orrne r3, r3, r9 ");			// if present, or in new permissions
sl@0
   534
	asm("moveq r3, #0 ");				// else clear pde
sl@0
   535
	asm("str r3, [r7], #4 ");			// put into new pde
sl@0
   536
	asm("subs r12, r12, #1 ");
sl@0
   537
	asm("bne MoveRun1 ");
sl@0
   538
	asm("orr r6, r6, #%a0" : : "i" (Mmu::EFlushDMove|Mmu::EFlushDPermChg));	// moving chunk can't contain code
sl@0
   539
	asm("MoveRun0: ");
sl@0
   540
	__JUMP(,lr);
sl@0
   541
sl@0
   542
	// UnprotectChunk
sl@0
   543
	// Apply running permissions to a chunk
sl@0
   544
	// r8 points to chunk
sl@0
   545
	// ignore read-only flag since the ARM cannot support it
sl@0
   546
	// r7,r9,r10,r12 modified, r8,r11 unmodified
sl@0
   547
	asm("UnprotectChunk: ");
sl@0
   548
	asm("mov r9, #2 ");					// r9=new state of chunk
sl@0
   549
sl@0
   550
	asm("ApplyTopLevelPermissions: ");
sl@0
   551
	asm("ldr r12, [r8, #%a0]" : : "i" _FOFF(DMemModelChunk,iAttributes));
sl@0
   552
sl@0
   553
	asm("UnprotectChunk1: ");
sl@0
   554
	asm("tst r12, #%a0" : : "i" (DMemModelChunk::EFixedAccess));
sl@0
   555
	asm("ldreq r10, [r8, #%a0]" : : "i" _FOFF(DMemModelChunk,iChunkState));	// r10=old chunk state
sl@0
   556
	__JUMP(ne,lr);
sl@0
   557
	asm("cmp r10, r9 ");
sl@0
   558
	__JUMP(eq,lr);						// if state already correct, nothing to do
sl@0
   559
	asm("ldr r10, [r8, #%a0]" : : "i" _FOFF(DMemModelChunk,iNumPdes));
sl@0
   560
sl@0
   561
	ASM_DEBUG1(UnprotectChunk,r8)
sl@0
   562
sl@0
   563
	asm("str r9, [r8, #%a0]" : : "i" _FOFF(DMemModelChunk,iChunkState));	// iChunkState=ERunningRW
sl@0
   564
	asm("cmp r10, #0 ");
sl@0
   565
	asm("beq UnprotectChunk0 ");
sl@0
   566
	asm("tst r12, #%a0" : : "i" (DMemModelChunk::ECode));		// check if chunk contains code
sl@0
   567
	asm("orrne r6, r6, #%a0" : : "i" (Mmu::EFlushIPermChg));	// if it does, may need to flush ITLB
sl@0
   568
	asm("ldr r12, [r8, #%a0]" : : "i" _FOFF(DMemModelChunk,iPdes));
sl@0
   569
	asm("add r9, r8, r9, lsl #2 ");
sl@0
   570
	asm("ldr r9, [r9, #%a0]" : : "i" (_FOFF(DMemModelChunk,iPdePermissions[0])));	//r9=new permissions
sl@0
   571
	asm("UnprotectChunk2: ");
sl@0
   572
	asm("ldr r7, [r12] ");				// fetch old pde
sl@0
   573
	asm("bics r7, r7, #0x3fc ");		// mask out permissions and check for not present PDE
sl@0
   574
	asm("orrne r7, r7, r9 ");			// if present, or in new permissions
sl@0
   575
	asm("moveq r7, #0 ");				// else clear pde
sl@0
   576
	asm("str r7, [r12], #4 ");			// replace pde
sl@0
   577
	asm("subs r10, r10, #1 ");
sl@0
   578
	asm("bne UnprotectChunk2 ");
sl@0
   579
	asm("orr r6, r6, #%a0" : : "i" (Mmu::EFlushDPermChg));
sl@0
   580
	asm("UnprotectChunk0: ");
sl@0
   581
	__JUMP(,lr);
sl@0
   582
sl@0
   583
#ifdef BTRACE_FAST_MUTEX
sl@0
   584
	// expects scheduler in r0, current thread in r2, preserves all but r10
sl@0
   585
	asm("procswitch_trace_flash:");
sl@0
   586
	asm("mov r10, sp");
sl@0
   587
	asm("bic sp, sp, #7"); 					// align stack to 8 bytes
sl@0
   588
	asm("stmdb sp!,{r3,ip}");
sl@0
   589
	asm("stmdb sp!,{r0,r1,r2,lr}");		// 4th item on stack is PC value for trace
sl@0
   590
	asm("mov r1, r0");
sl@0
   591
	asm("ldr r0, procswitch_trace_header"); // header parameter in r0
sl@0
   592
	asm("add r3, r1, #%a0" : : "i" _FOFF(TScheduler,iLock)); // fast mutex parameter in r3
sl@0
   593
	asm("mov lr, pc");
sl@0
   594
	asm("ldr pc, [r1, #%a0]" : : "i" _FOFF(TScheduler,iBTraceHandler));	
sl@0
   595
	asm("ldmia sp!,{r0,r1,r2,lr}");
sl@0
   596
	asm("ldmia sp!,{r3,ip}");
sl@0
   597
	asm("mov sp, r10");					// put stack back
sl@0
   598
	__JUMP(,lr);
sl@0
   599
sl@0
   600
	asm("procswitch_trace_header:");
sl@0
   601
	asm(".word %a0" : : "i" ((TInt)(16<<BTrace::ESizeIndex) + ((BTrace::EContextIdPresent|BTrace::EPcPresent) << BTrace::EFlagsIndex*8) + (BTrace::EFastMutex<< BTrace::ECategoryIndex*8) + (BTrace::EFastMutexFlash << BTrace::ESubCategoryIndex*8)) );
sl@0
   602
#endif
sl@0
   603
sl@0
   604
	// Global data references
sl@0
   605
	asm("__TheScheduler: ");
sl@0
   606
	asm(".word TheScheduler ");
sl@0
   607
	asm("__TheMmu: ");
sl@0
   608
	asm(".word %a0" : : "i" ((TInt)&::TheMmu));
sl@0
   609
	};
sl@0
   610
sl@0
   611
__NAKED__ TUint32 DMemModelChunk::ApplyTopLevelPermissions(TChunkState /*aChunkState*/)
sl@0
   612
	{
sl@0
   613
	asm("stmfd sp!, {r6-r11,lr} ");
sl@0
   614
	asm("mov r8, r0 ");		// r8 = chunk ptr
sl@0
   615
	asm("mov r9, r1 ");		// r9 = chunk state
sl@0
   616
	asm("mrc p15, 0, r3, c3, c0, 0 ");	// r3=DACR
sl@0
   617
	asm("mov r6, #0 ");
sl@0
   618
	asm("orr r1, r3, #0x30 ");
sl@0
   619
	asm("mcr p15, 0, r1, c3, c0, 0 ");	// unlock page tables
sl@0
   620
	CPWAIT(,lr);
sl@0
   621
	asm("bl ApplyTopLevelPermissions ");
sl@0
   622
	asm("mcr p15, 0, r3, c3, c0, 0 ");	// lock page tables
sl@0
   623
	asm("mov r0, r6 ");					// return flush flags
sl@0
   624
	DRAIN_WRITE_BUFFER(,r1,r1);
sl@0
   625
	CPWAIT(,r1);
sl@0
   626
	asm("ldmfd sp!, {r6-r11,pc} ");
sl@0
   627
	}
sl@0
   628
sl@0
   629
__NAKED__ TUint32 DMemModelChunk::MoveToRunAddress(TLinAddr /*aLinAddr*/, TChunkState /*aChunkState*/)
sl@0
   630
	{
sl@0
   631
	asm("stmfd sp!, {r5-r11,lr} ");
sl@0
   632
	asm("mov r8, r0 ");		// r8 = chunk ptr
sl@0
   633
	asm("mov r7, r1 ");		// r7 = run address
sl@0
   634
	asm("mrc p15, 0, r5, c3, c0, 0 ");	// r5=DACR
sl@0
   635
	asm("mov r9, r2 ");		// r9 = chunk state
sl@0
   636
	asm("mov r6, #0 ");
sl@0
   637
	asm("orr r1, r5, #0x30 ");
sl@0
   638
	asm("mcr p15, 0, r1, c3, c0, 0 ");	// unlock page tables
sl@0
   639
	CPWAIT(,lr);
sl@0
   640
	asm("bl MoveToRunAddress ");
sl@0
   641
	asm("mcr p15, 0, r5, c3, c0, 0 ");	// lock page tables
sl@0
   642
	asm("mov r0, r6 ");					// return flush flags
sl@0
   643
	DRAIN_WRITE_BUFFER(,r1,r1);
sl@0
   644
	CPWAIT(,r1);
sl@0
   645
	asm("ldmfd sp!, {r5-r11,pc} ");
sl@0
   646
	}
sl@0
   647
sl@0
   648
__NAKED__ TUint32 DMemModelChunk::MoveToHomeSection()
sl@0
   649
	{
sl@0
   650
	asm("stmfd sp!, {r5-r11,lr} ");
sl@0
   651
	asm("mov r7, r0 ");		// r7 = chunk ptr
sl@0
   652
	asm("mrc p15, 0, r0, c3, c0, 0 ");	// r0=DACR
sl@0
   653
	asm("mov r6, #0 ");
sl@0
   654
	asm("orr r1, r0, #0x30 ");
sl@0
   655
	asm("mcr p15, 0, r1, c3, c0, 0 ");	// unlock page tables
sl@0
   656
	CPWAIT(,lr);
sl@0
   657
	asm("bl MoveHomeOrProtectChunk ");
sl@0
   658
	asm("mcr p15, 0, r0, c3, c0, 0 ");	// lock page tables
sl@0
   659
	asm("mov r0, r6 ");					// return flush flags
sl@0
   660
	DRAIN_WRITE_BUFFER(,r1,r1);
sl@0
   661
	CPWAIT(,r1);
sl@0
   662
	asm("ldmfd sp!, {r5-r11,pc} ");
sl@0
   663
	}
sl@0
   664
#endif
sl@0
   665