os/kernelhwsrv/kernel/eka/nkernsmp/x86/ncsched.cia
author sl
Tue, 10 Jun 2014 14:32:02 +0200
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
sl@0
     1
// Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
// e32\nkernsmp\x86\ncsched.cia
sl@0
    15
// 
sl@0
    16
//
sl@0
    17
sl@0
    18
#include <x86.h>
sl@0
    19
#include <apic.h>
sl@0
    20
sl@0
    21
// SubSchedulerLookupTable	: global data, type: TSubScheduler* [256];
sl@0
    22
// BTraceLock				: global data, type: TSpinLock
sl@0
    23
sl@0
    24
const TLinAddr TScheduler_Reschedule = (TLinAddr)&TScheduler::Reschedule;
sl@0
    25
//const TLinAddr TheScheduler_iRescheduleNeededFlag = (TLinAddr)&TheScheduler.iRescheduleNeededFlag;
sl@0
    26
const TLinAddr NKern_FastCounter = (TLinAddr)&NKern::FastCounter;
sl@0
    27
const TLinAddr NKern_Lock = (TLinAddr)&NKern::Lock;
sl@0
    28
const TLinAddr NKern_Unlock = (TLinAddr)&NKern::Unlock;
sl@0
    29
const TLinAddr addressof_TheScheduler = (TLinAddr)&TheScheduler;
sl@0
    30
const TUint32 new_thread_trace_header = ((8<<BTrace::ESizeIndex) + (BTrace::EContextIdPresent<<BTrace::EFlagsIndex*8) + (BTrace::ECpuUsage<<BTrace::ECategoryIndex*8) + (BTrace::ENewThreadContext<<BTrace::ESubCategoryIndex*8));
sl@0
    31
sl@0
    32
extern "C" void __fastcall queue_dfcs(TSubScheduler* aS);
sl@0
    33
extern "C" NThreadBase* __fastcall select_next_thread(TSubScheduler* aS);
sl@0
    34
extern "C" void send_resched_ipis(TUint32 aMask);
sl@0
    35
extern "C" void __fastcall do_forced_exit(NThreadBase* aT);
sl@0
    36
extern "C" void NewThreadTrace(NThread* a);
sl@0
    37
sl@0
    38
sl@0
    39
/***************************************************************************
sl@0
    40
* Reschedule
sl@0
    41
* Enter with:
sl@0
    42
*		Kernel locked, interrupts enabled or disabled
sl@0
    43
* Return with:
sl@0
    44
*		Kernel unlocked, interrupts disabled
sl@0
    45
*		EAX=0 if no reschedule occurred, 1 if it did
sl@0
    46
*		ESI pointing to TSubScheduler for current CPU
sl@0
    47
*		EDI pointing to current NThread
sl@0
    48
***************************************************************************/
sl@0
    49
__NAKED__ void TScheduler::Reschedule()
sl@0
    50
	{
sl@0
    51
	asm("push 0 ");
sl@0
    52
	asm("mov eax, ds:[%0]" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID));	// OK since kernel locked
sl@0
    53
	asm("mov edi, %0" : : "i" (addressof_TheScheduler));
sl@0
    54
	asm("shr eax, 24 ");
sl@0
    55
	asm("mov esi, [eax*4+%0]" : : "i" (&SubSchedulerLookupTable));
sl@0
    56
	asm("cli ");
sl@0
    57
	asm("start_resched: ");
sl@0
    58
//	_asm cmp dword ptr [esi]TSubScheduler.iRescheduleNeededFlag, 10000h		VC6 ignores the "dword ptr"
sl@0
    59
	asm("lea eax, [esi+%0]" : : "i" _FOFF(TSubScheduler, iRescheduleNeededFlag));
sl@0
    60
	asm("cmp dword ptr [eax], 0x10000 ");
sl@0
    61
	asm("jb short resched_no_dfcs ");
sl@0
    62
	asm("mov ecx, esi ");
sl@0
    63
	asm("call %a0" : : "i" (&queue_dfcs));
sl@0
    64
	asm("resched_no_dfcs: ");
sl@0
    65
	asm("cmp byte ptr [esi+%0], 0" : : "i" _FOFF(TSubScheduler,iRescheduleNeededFlag));
sl@0
    66
	asm("jz resched_not_needed ");
sl@0
    67
	asm("sti ");
sl@0
    68
	asm("mov dword ptr [esp], 1 ");
sl@0
    69
	asm("mov ebp, [esi+%0]" : : "i"_FOFF(TSubScheduler, iCurrentThread));		// EBP -> original thread
sl@0
    70
	asm("mov eax, cr0");
sl@0
    71
	asm("push eax");
sl@0
    72
	asm("mov [ebp+%0], esp" : : "i" _FOFF(NThreadBase, iSavedSP));			// Save original thread stack pointer
sl@0
    73
sl@0
    74
	// We must move to a temporary stack before selecting the next thread.
sl@0
    75
	// This is because another CPU may begin executing this thread before the
sl@0
    76
	// select_next_thread() function returns and our stack would then be
sl@0
    77
	// corrupted. We use the stack belonging to this CPU's initial thread since
sl@0
    78
	// we are guaranteed that will never run on another CPU.
sl@0
    79
	asm("mov ecx, [esi+%0]" : : "i" _FOFF(TSubScheduler, iInitialThread));
sl@0
    80
	asm("mov esp, [ecx+%0]" : : "i" _FOFF(NThreadBase, iSavedSP));
sl@0
    81
sl@0
    82
	asm("select_thread:");
sl@0
    83
	asm("mov ecx, esi ");
sl@0
    84
	asm("call %a0" : : "i" (&select_next_thread));
sl@0
    85
	asm("mov ebx, eax ");
sl@0
    86
	asm("cmp ebx, 0 ");
sl@0
    87
	asm("jz no_thread ");
sl@0
    88
	asm("mov esp, [ebx+%0]" : : "i" _FOFF(NThreadBase, iSavedSP)); // move to new thread's stack
sl@0
    89
sl@0
    90
#ifdef BTRACE_CPU_USAGE
sl@0
    91
	asm("cmp byte ptr %a0, 0" : : "i" (&BTraceData.iFilter[4]));
sl@0
    92
	asm("jz short no_trace ");
sl@0
    93
	asm("push ebx ");
sl@0
    94
	asm("call %a0" : : "i" (NewThreadTrace));
sl@0
    95
	asm("pop ebx ");
sl@0
    96
	asm("no_trace: ");
sl@0
    97
#endif	// BTRACE_CPU_USAGE
sl@0
    98
sl@0
    99
	asm("cmp ebp, ebx ");
sl@0
   100
	asm("je same_thread ");
sl@0
   101
	asm("mov eax, [ebx+%0]" : : "i" _FOFF(NThreadBase, iStackBase));
sl@0
   102
	asm("add eax, [ebx+%0]" : : "i" _FOFF(NThreadBase, iStackSize));
sl@0
   103
	asm("mov ecx, [esi+60+%0]" : : "i" _FOFF(TSubScheduler, iExtras));		// iExtras[15] points to TSS
sl@0
   104
	asm("mov [ecx+%0], eax" : : "i" _FOFF(TX86Tss, iEsp0));					// set ESP0 to top of new thread supervisor stack
sl@0
   105
sl@0
   106
	asm("test byte ptr [ebx+%0], 2" : : "i" _FOFF(NThreadBase,i_ThrdAttr));	// test for address space switch
sl@0
   107
	asm("jz short resched_no_as_switch ");
sl@0
   108
	asm("call [edi+%0]" : : "i" _FOFF(TScheduler, iProcessHandler));		// call handler with
sl@0
   109
														// EBX=pointer to new thread, EDI->scheduler, ESI->subscheduler
sl@0
   110
	asm("resched_no_as_switch: ");
sl@0
   111
	asm("same_thread: ");
sl@0
   112
	asm("pop eax ");
sl@0
   113
	asm("mov cr0, eax ");
sl@0
   114
	asm("cli ");
sl@0
   115
//	asm("cmp dword ptr [esi]TSubScheduler.iRescheduleNeededFlag, 0	VC6 ignores the "dword ptr"
sl@0
   116
	asm("lea eax, [esi+%0]" : : "i" _FOFF(TSubScheduler, iRescheduleNeededFlag));
sl@0
   117
	asm("cmp dword ptr [eax], 0 ");
sl@0
   118
	asm("jnz start_resched ");
sl@0
   119
sl@0
   120
	asm("resched_not_needed: ");
sl@0
   121
	asm("mov edi, [esi+%0]" : : "i" _FOFF(TSubScheduler, iCurrentThread));
sl@0
   122
	asm("cmp dword ptr [edi+%0], -3" : : "i" _FOFF(NThreadBase, iCsFunction)); // ECSDivertPending
sl@0
   123
	asm("je resched_thread_divert ");
sl@0
   124
	asm("mov dword ptr [esi+%0], 0" : : "i" _FOFF(TSubScheduler, iKernLockCount));
sl@0
   125
	asm("pop eax ");
sl@0
   126
	asm("ret ");
sl@0
   127
sl@0
   128
	asm("resched_thread_divert: ");
sl@0
   129
	asm("push edi ");
sl@0
   130
	asm("xor eax, eax ");
sl@0
   131
	asm("lock xchg eax, [esi+%0]" : : "i" _FOFF(TSubScheduler, iReschedIPIs));
sl@0
   132
	asm("test eax, eax ");
sl@0
   133
	asm("jz short no_resched_ipis ");
sl@0
   134
	asm("push eax ");
sl@0
   135
	asm("call %a0" : : "i" (&send_resched_ipis));
sl@0
   136
	asm("add esp, 4 ");
sl@0
   137
	asm("no_resched_ipis: ");
sl@0
   138
sl@0
   139
	asm("sti ");
sl@0
   140
	asm("mov ecx, [esp+12] ");			// SThreadReschedStack iReason 0 not run 1 unlock 2 IRQ
sl@0
   141
	asm("cmp ecx, 2 ");
sl@0
   142
	asm("ja short rtd_unknown ");		// unknown - die
sl@0
   143
	asm("shl ecx, 2 ");					// reason * 4
sl@0
   144
	asm("mov eax, 0xa1a ");
sl@0
   145
	asm("shr eax, cl ");
sl@0
   146
	asm("and eax, 15 ");
sl@0
   147
	asm("mov gs, [esp+eax*4+16] ");		// restore GS
sl@0
   148
sl@0
   149
	asm("pop ecx ");					// exiting thread pointer
sl@0
   150
	asm("call %a0" : : "i" (&do_forced_exit));
sl@0
   151
	asm("int 0xff ");					// should never get here
sl@0
   152
sl@0
   153
	asm("rtd_unknown: ");
sl@0
   154
	asm("int 0xff ");					// should never get here
sl@0
   155
sl@0
   156
sl@0
   157
	// There is no thread ready to run
sl@0
   158
	asm("no_thread: ");
sl@0
   159
	asm("cli ");
sl@0
   160
	asm("xor eax, eax ");
sl@0
   161
	asm("lock xchg eax, [esi+%0]" : : "i" _FOFF(TSubScheduler, iReschedIPIs));
sl@0
   162
	asm("test eax, eax ");
sl@0
   163
	asm("jz short no_resched_ipis2 ");
sl@0
   164
	asm("push eax ");
sl@0
   165
	asm("call %a0" : : "i" (&send_resched_ipis));
sl@0
   166
	asm("add esp, 4 ");
sl@0
   167
	asm("no_resched_ipis2: ");
sl@0
   168
	asm("sti ");
sl@0
   169
	asm("hlt ");
sl@0
   170
	asm("no_thread2: ");
sl@0
   171
//	_asm cmp dword ptr [esi]TSubScheduler.iRescheduleNeededFlag, 10000h		VC6 ignores the "dword ptr"
sl@0
   172
	asm("lea eax, [esi+%0]" : : "i" _FOFF(TSubScheduler, iRescheduleNeededFlag));
sl@0
   173
	asm("cmp dword ptr [eax], 0x10000 ");
sl@0
   174
	asm("jb short no_thread ");
sl@0
   175
	asm("mov ecx, esi ");
sl@0
   176
	asm("call %a0" : : "i" (&queue_dfcs));
sl@0
   177
	asm("cmp byte ptr [esi+%0], 0" : : "i" _FOFF(TSubScheduler, iRescheduleNeededFlag));
sl@0
   178
	asm("jz short no_thread2 ");
sl@0
   179
	asm("jmp select_thread ");
sl@0
   180
	}
sl@0
   181
sl@0
   182
sl@0
   183
/** Disable interrupts to the specified level
sl@0
   184
sl@0
   185
If aLevel = 0 does not affect interrupt state
sl@0
   186
If aLevel <>0 disables all maskable interrupts.
sl@0
   187
sl@0
   188
@param	aLevel level to which to disable
sl@0
   189
@return	Cookie to pass into RestoreInterrupts()
sl@0
   190
*/
sl@0
   191
EXPORT_C __NAKED__ TInt NKern::DisableInterrupts(TInt /*aLevel*/)
sl@0
   192
	{
sl@0
   193
	asm("pushfd");
sl@0
   194
	asm("mov ecx, [esp+4]");
sl@0
   195
	asm("pop eax");
sl@0
   196
	asm("and eax, 0x200");
sl@0
   197
	asm("test ecx, ecx");
sl@0
   198
	asm("jz disable_ints_0");
sl@0
   199
	asm("cli");
sl@0
   200
	asm("disable_ints_0:");
sl@0
   201
	asm("ret");
sl@0
   202
	}
sl@0
   203
sl@0
   204
sl@0
   205
/** Disable all maskable interrupts
sl@0
   206
sl@0
   207
@return	Cookie to pass into RestoreInterrupts()
sl@0
   208
*/
sl@0
   209
EXPORT_C __NAKED__ TInt NKern::DisableAllInterrupts()
sl@0
   210
	{
sl@0
   211
	asm("pushfd");
sl@0
   212
	asm("pop eax");
sl@0
   213
	asm("and eax, 0x200");
sl@0
   214
	asm("cli");
sl@0
   215
	asm("ret");
sl@0
   216
	}
sl@0
   217
sl@0
   218
sl@0
   219
/** Restore interrupt mask to state preceding a DisableInterrupts() call
sl@0
   220
sl@0
   221
@param	aLevel Cookie returned by Disable(All)Interrupts()
sl@0
   222
*/
sl@0
   223
EXPORT_C __NAKED__ void NKern::RestoreInterrupts(TInt aLevel)
sl@0
   224
	{
sl@0
   225
	asm("test byte ptr [esp+5], 2");	// test saved I flag
sl@0
   226
	asm("jz restore_irq_off");			// jump if clear
sl@0
   227
	asm("sti");							// else reenable interrupts
sl@0
   228
	asm("ret");
sl@0
   229
	asm("restore_irq_off:");
sl@0
   230
	asm("cli");
sl@0
   231
	asm("ret");
sl@0
   232
	}
sl@0
   233
sl@0
   234
sl@0
   235
/** Enable all maskable interrupts
sl@0
   236
sl@0
   237
@internalComponent
sl@0
   238
*/
sl@0
   239
EXPORT_C __NAKED__ void NKern::EnableAllInterrupts()
sl@0
   240
	{
sl@0
   241
	asm("sti");
sl@0
   242
	asm("ret");
sl@0
   243
	}
sl@0
   244
sl@0
   245
sl@0
   246
/**	Unlocks the kernel
sl@0
   247
	Decrements iKernCSLocked; if it becomes zero and IDFCs or a reschedule are
sl@0
   248
	pending, calls the scheduler to process them.
sl@0
   249
sl@0
   250
	@pre	Thread or IDFC context. Don't call from ISRs.
sl@0
   251
 */
sl@0
   252
EXPORT_C __NAKED__ void NKern::Unlock()
sl@0
   253
	{
sl@0
   254
	asm("mov eax, ds:[%0]" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID)); // OK since kernel locked
sl@0
   255
	asm("shr eax, 24 ");
sl@0
   256
	asm("push esi ");
sl@0
   257
	asm("mov esi, [eax*4+%0]" : : "i" (&SubSchedulerLookupTable));
sl@0
   258
#ifdef _DEBUG
sl@0
   259
	asm("cmp dword ptr [esi+%0], 0" : : "i" _FOFF(TSubScheduler, iKernLockCount));
sl@0
   260
	asm("jg short _dbg1 ");
sl@0
   261
	asm("int 0xff ");
sl@0
   262
	asm("_dbg1: ");
sl@0
   263
#endif
sl@0
   264
	asm("cli ");
sl@0
   265
	asm("dec dword ptr [esi+%0]" : : "i" _FOFF(TSubScheduler, iKernLockCount));
sl@0
   266
	asm("jnz short still_locked ");
sl@0
   267
//	asm("cmp dword ptr [esi]TSubScheduler.iRescheduleNeededFlag, 0	VC6 ignores the "dword ptr"
sl@0
   268
	asm("lea eax, [esi+%0]" : : "i" _FOFF(TSubScheduler, iRescheduleNeededFlag));
sl@0
   269
	asm("cmp dword ptr [eax], 0 ");
sl@0
   270
	asm("jz short no_resched ");
sl@0
   271
sl@0
   272
	asm("mov dword ptr [esi+%0], 1" : : "i" _FOFF(TSubScheduler, iKernLockCount));
sl@0
   273
	asm("push edi ");
sl@0
   274
	asm("push ebp ");
sl@0
   275
	asm("push ebx ");
sl@0
   276
	asm("push gs ");
sl@0
   277
	asm("push fs ");
sl@0
   278
	asm("sti ");
sl@0
   279
sl@0
   280
	// Reschedule - return with local interrupts disabled, iKernLockCount=0
sl@0
   281
	asm("push 1 ");
sl@0
   282
	asm("call %a0" : : "i" (TScheduler_Reschedule));
sl@0
   283
	asm("add esp, 4 ");
sl@0
   284
sl@0
   285
	asm("xor eax, eax ");
sl@0
   286
	asm("lock xchg eax, [esi+%0]" : : "i" _FOFF(TSubScheduler, iReschedIPIs));
sl@0
   287
	asm("test eax, eax ");
sl@0
   288
	asm("jz short no_resched_ipis_ul ");
sl@0
   289
sl@0
   290
	asm("unlock_do_resched_ipis: ");
sl@0
   291
	asm("push eax ");
sl@0
   292
	asm("call %a0" : : "i" (&send_resched_ipis));
sl@0
   293
	asm("add esp, 4 ");
sl@0
   294
sl@0
   295
	asm("no_resched_ipis_ul: ");
sl@0
   296
	asm("pop fs ");
sl@0
   297
	asm("pop gs ");
sl@0
   298
	asm("pop ebx ");
sl@0
   299
	asm("pop ebp ");
sl@0
   300
	asm("pop edi ");
sl@0
   301
sl@0
   302
	asm("still_locked: ");
sl@0
   303
	asm("sti ");
sl@0
   304
	asm("pop esi ");
sl@0
   305
	asm("ret ");
sl@0
   306
sl@0
   307
	asm("no_resched: ");
sl@0
   308
	asm("xor eax, eax ");
sl@0
   309
	asm("lock xchg eax, [esi+%0]" : : "i" _FOFF(TSubScheduler, iReschedIPIs));
sl@0
   310
	asm("test eax, eax ");
sl@0
   311
	asm("jz short still_locked ");
sl@0
   312
	asm("push edi ");
sl@0
   313
	asm("push ebp ");
sl@0
   314
	asm("push ebx ");
sl@0
   315
	asm("push gs ");
sl@0
   316
	asm("push fs ");
sl@0
   317
	asm("jmp short unlock_do_resched_ipis ");
sl@0
   318
	}
sl@0
   319
sl@0
   320
sl@0
   321
/**	Locks the kernel
sl@0
   322
	Defer IDFCs and preemption
sl@0
   323
sl@0
   324
	@pre	Thread or IDFC context. Don't call from ISRs.
sl@0
   325
 */
sl@0
   326
EXPORT_C __NAKED__ void NKern::Lock()
sl@0
   327
	{
sl@0
   328
	asm("cli");		// stop thread migration between reading APIC ID and subscheduler stuff
sl@0
   329
	asm("mov eax, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID));
sl@0
   330
	asm("shr eax, 24");
sl@0
   331
	asm("mov ecx, [eax*4+%0]" : : "i"(&SubSchedulerLookupTable));
sl@0
   332
	asm("inc dword ptr [ecx+%0]": : "i"_FOFF(TSubScheduler, iKernLockCount));
sl@0
   333
	asm("sti");
sl@0
   334
	asm("ret");
sl@0
   335
	}
sl@0
   336
sl@0
   337
sl@0
   338
/**	Locks the kernel and returns a pointer to the current thread
sl@0
   339
	Defer IDFCs and preemption
sl@0
   340
sl@0
   341
	@pre	Thread or IDFC context. Don't call from ISRs.
sl@0
   342
 */
sl@0
   343
EXPORT_C __NAKED__ NThread* NKern::LockC()
sl@0
   344
	{
sl@0
   345
	asm("cli");		// stop thread migration between reading APIC ID and subscheduler stuff
sl@0
   346
	asm("mov eax, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID));
sl@0
   347
	asm("shr eax, 24");
sl@0
   348
	asm("mov ecx, [eax*4+%0]" : : "i"(&SubSchedulerLookupTable));
sl@0
   349
	asm("inc dword ptr [ecx+%0]": : "i"_FOFF(TSubScheduler, iKernLockCount));
sl@0
   350
	asm("mov eax, [ecx+%0]" : : "i"_FOFF(TSubScheduler, iCurrentThread));
sl@0
   351
	asm("sti");
sl@0
   352
	asm("ret");
sl@0
   353
	}
sl@0
   354
sl@0
   355
sl@0
   356
/**	Allows IDFCs and rescheduling if they are pending.
sl@0
   357
	If IDFCs or a reschedule are pending and iKernCSLocked is exactly equal to 1
sl@0
   358
	calls the scheduler to process the IDFCs and possibly reschedule.
sl@0
   359
sl@0
   360
	@return	Nonzero if a reschedule actually occurred, zero if not.
sl@0
   361
	@pre	Thread or IDFC context. Don't call from ISRs.
sl@0
   362
 */
sl@0
   363
EXPORT_C __NAKED__ TInt NKern::PreemptionPoint()
sl@0
   364
	{
sl@0
   365
	asm("mov eax, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID));
sl@0
   366
	asm("shr eax, 24");
sl@0
   367
	asm("mov ecx, [eax*4+%0]" : : "i"(&SubSchedulerLookupTable));
sl@0
   368
#ifdef _DEBUG
sl@0
   369
	asm("cmp dword ptr [ecx+%0], 0": : "i"_FOFF(TSubScheduler, iKernLockCount));
sl@0
   370
	asm("jg _dbg1_pp");
sl@0
   371
	asm("int 0xff");
sl@0
   372
	asm("_dbg1_pp:");
sl@0
   373
#endif
sl@0
   374
	asm("cmp dword ptr [ecx+%0], 1": : "i"_FOFF(TSubScheduler, iKernLockCount));
sl@0
   375
	asm("jnz still_locked_pp");
sl@0
   376
//	asm("cmp dword ptr [ecx]TSubScheduler.iRescheduleNeededFlag, 0		VC6 ignores the "dword ptr"
sl@0
   377
	asm("lea eax, [ecx+%0]": : "i"_FOFF(TSubScheduler, iRescheduleNeededFlag));
sl@0
   378
	asm("cmp dword ptr [eax], 0");
sl@0
   379
	asm("jnz do_resched");
sl@0
   380
	asm("cli");
sl@0
   381
	asm("lock xchg eax, [ecx+%0]": : "i"_FOFF(TSubScheduler, iReschedIPIs));
sl@0
   382
	asm("test eax, eax");
sl@0
   383
	asm("jz pp_no_resched_ipis");
sl@0
   384
	asm("push eax");
sl@0
   385
	asm("call %a0": :"i"(&send_resched_ipis));
sl@0
   386
	asm("add esp, 4");
sl@0
   387
	asm("pp_no_resched_ipis:");
sl@0
   388
	asm("sti");
sl@0
   389
sl@0
   390
	asm("still_locked_pp:");
sl@0
   391
	asm("xor eax, eax");
sl@0
   392
	asm("ret");
sl@0
   393
sl@0
   394
	asm("do_resched:");
sl@0
   395
	asm("call %a0" : : "i"(NKern_Unlock));
sl@0
   396
	asm("call %a0" : : "i"(NKern_Lock));
sl@0
   397
	asm("mov eax, 1");
sl@0
   398
	asm("ret");
sl@0
   399
	}
sl@0
   400
sl@0
   401
sl@0
   402
/** Complete the saving of a thread's context
sl@0
   403
sl@0
   404
This saves the FPU registers if necessary once we know that we are definitely
sl@0
   405
switching threads.
sl@0
   406
sl@0
   407
@internalComponent
sl@0
   408
*/
sl@0
   409
__NAKED__ void NThread::CompleteContextSave()
sl@0
   410
	{
sl@0
   411
	THISCALL_PROLOG0()
sl@0
   412
	asm("mov edx, [ecx+%0]": : "i"_FOFF(NThreadBase,iSavedSP));	// EDX points to saved state on thread stack
sl@0
   413
	asm("test byte ptr [edx], 8");						// test thread's saved TS flag
sl@0
   414
	asm("jnz no_fpu");									// if set, thread did not use FPU
sl@0
   415
	asm("clts");
sl@0
   416
	asm("fnsave [ecx+%0]": : "i"_FOFF(NThread, iCoprocessorState));	// else thread did use FPU - save its state
sl@0
   417
	asm("or byte ptr [edx], 8");						// set TS flag so thread aborts next time it uses FPU
sl@0
   418
	asm("fwait");
sl@0
   419
sl@0
   420
	asm("no_fpu:");
sl@0
   421
	THISCALL_EPILOG0()
sl@0
   422
	}
sl@0
   423
sl@0
   424
sl@0
   425
/** Check if the kernel is locked the specified number of times.
sl@0
   426
sl@0
   427
	@param aCount	The number of times the kernel should be locked
sl@0
   428
					If zero, tests if it is locked at all
sl@0
   429
	@return TRUE if the tested condition is true.
sl@0
   430
sl@0
   431
	@internalTechnology
sl@0
   432
*/
sl@0
   433
EXPORT_C __NAKED__ TBool NKern::KernelLocked(TInt /*aCount*/)
sl@0
   434
	{
sl@0
   435
	asm("pushfd");
sl@0
   436
	asm("cli");
sl@0
   437
	asm("mov eax, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID));
sl@0
   438
	asm("shr eax, 24");
sl@0
   439
	asm("mov eax, [eax*4+%0]" : : "i"(&SubSchedulerLookupTable));
sl@0
   440
	asm("mov edx, [eax+%0]": : "i"_FOFF(TSubScheduler, iKernLockCount));
sl@0
   441
	asm("popfd");
sl@0
   442
	asm("cmp edx, 0");
sl@0
   443
	asm("jz not_locked");
sl@0
   444
	asm("mov eax, [esp+4]");
sl@0
   445
	asm("cmp eax, 0");
sl@0
   446
	asm("jz locked");
sl@0
   447
	asm("cmp eax, edx");
sl@0
   448
	asm("jnz not_locked");
sl@0
   449
	asm("locked:");
sl@0
   450
	asm("mov eax, 1");
sl@0
   451
	asm("ret");
sl@0
   452
	asm("not_locked:");
sl@0
   453
	asm("xor eax, eax");
sl@0
   454
	asm("ret");
sl@0
   455
	}
sl@0
   456
sl@0
   457
sl@0
   458
// Only call this if thread migration is disabled, i.e.
sl@0
   459
// interrupts disabled, kernel locked or current thread in 'freeze cpu' mode
sl@0
   460
extern "C" __NAKED__ TSubScheduler& SubScheduler()
sl@0
   461
	{
sl@0
   462
	asm("mov eax, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID));
sl@0
   463
	asm("shr eax, 24");
sl@0
   464
	asm("mov eax, [eax*4+%0]" : : "i"(&SubSchedulerLookupTable));
sl@0
   465
	asm("ret");
sl@0
   466
	}
sl@0
   467
sl@0
   468
/** Returns the NThread control block for the currently scheduled thread.
sl@0
   469
sl@0
   470
    Note that this is the calling thread if called from a thread context, or the
sl@0
   471
	interrupted thread if called from an interrupt context.
sl@0
   472
	
sl@0
   473
	@return A pointer to the NThread for the currently scheduled thread.
sl@0
   474
	
sl@0
   475
	@pre Call in any context.
sl@0
   476
*/
sl@0
   477
EXPORT_C __NAKED__ NThread* NKern::CurrentThread()
sl@0
   478
	{
sl@0
   479
	asm("pushfd");
sl@0
   480
	asm("cli");		// stop thread migration between reading APIC ID and thread pointer
sl@0
   481
	asm("mov eax, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID));
sl@0
   482
	asm("shr eax, 24");
sl@0
   483
	asm("mov eax, [eax*4+%0]" : : "i"(&SubSchedulerLookupTable));
sl@0
   484
	asm("cmp eax, 0");
sl@0
   485
	asm("jz done");
sl@0
   486
	asm("test al, 3");
sl@0
   487
	asm("jnz bad_ct");
sl@0
   488
	asm("mov eax, [eax+%0]": : "i"_FOFF(TSubScheduler, iCurrentThread));
sl@0
   489
	asm("done:");
sl@0
   490
	asm("popfd");
sl@0
   491
	asm("ret");
sl@0
   492
	asm("bad_ct:");
sl@0
   493
	asm("popfd");
sl@0
   494
	asm("xor eax, eax");
sl@0
   495
	asm("ret");
sl@0
   496
	}
sl@0
   497
sl@0
   498
sl@0
   499
/** Returns the NThread control block for the currently scheduled thread.
sl@0
   500
sl@0
   501
    Note that this is the calling thread if called from a thread context, or the
sl@0
   502
	interrupted thread if called from an interrupt context.
sl@0
   503
	
sl@0
   504
	@return A pointer to the NThread for the currently scheduled thread.
sl@0
   505
	
sl@0
   506
	@pre Call with migration disabled - i.e. from an ISR, IDFC, with interrupts
sl@0
   507
			disabled or with preemption disabled.
sl@0
   508
*/
sl@0
   509
extern "C" __NAKED__ NThread* NCurrentThreadL()
sl@0
   510
	{
sl@0
   511
	asm("mov eax, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID));
sl@0
   512
	asm("shr eax, 24");
sl@0
   513
	asm("mov eax, [eax*4+%0]" : : "i"(&SubSchedulerLookupTable));
sl@0
   514
	asm("mov eax, [eax+%0]": : "i"_FOFF(TSubScheduler, iCurrentThread));
sl@0
   515
	asm("ret");
sl@0
   516
	}
sl@0
   517
sl@0
   518
sl@0
   519
/** Returns the CPU number of the calling CPU.
sl@0
   520
sl@0
   521
	@return the CPU number of the calling CPU.
sl@0
   522
	
sl@0
   523
	@pre Call in any context.
sl@0
   524
*/
sl@0
   525
EXPORT_C __NAKED__ TInt NKern::CurrentCpu()
sl@0
   526
	{
sl@0
   527
	asm("xor eax, eax");
sl@0
   528
	asm("str ax");
sl@0
   529
	asm("sub al, 0x28");
sl@0
   530
	asm("shr al, 3");
sl@0
   531
	asm("ret");
sl@0
   532
	}
sl@0
   533
sl@0
   534
sl@0
   535
/**	Return the current processor context type (thread, IDFC or interrupt)
sl@0
   536
sl@0
   537
	@return	A value from NKern::TContext enumeration (but never EEscaped)
sl@0
   538
	@pre	Any context
sl@0
   539
sl@0
   540
	@see	NKern::TContext
sl@0
   541
 */
sl@0
   542
EXPORT_C __NAKED__ TInt NKern::CurrentContext()
sl@0
   543
	{
sl@0
   544
	asm("pushfd");
sl@0
   545
	asm("cli");		// stop thread migration between reading APIC ID and subscheduler stuff
sl@0
   546
	asm("mov edx, ds:[%0]": :"i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID));
sl@0
   547
	asm("xor eax, eax");
sl@0
   548
	asm("shr edx, 24");
sl@0
   549
	asm("mov edx, [edx*4+%0]" : : "i"(&SubSchedulerLookupTable));
sl@0
   550
	asm("cmp edx, eax");
sl@0
   551
	asm("jz bad_cc");
sl@0
   552
	asm("test dl, 3");
sl@0
   553
	asm("jnz bad_cc");
sl@0
   554
	asm("cmp eax, [edx+52+%0]": : "i"_FOFF(TSubScheduler,iExtras)); // i_IrqNestCount
sl@0
   555
	asm("jle irq");
sl@0
   556
	asm("cmp al, [edx+%0]": : "i"_FOFF(TSubScheduler, iInIDFC));
sl@0
   557
	asm("jz thread");
sl@0
   558
	asm("jmp idfc");
sl@0
   559
sl@0
   560
	asm("bad_cc:");			// no subscheduler yet [initialising] - return EInterrupt
sl@0
   561
	asm("irq:");			// return NKern::EInterrupt [=2]
sl@0
   562
	asm("inc eax");
sl@0
   563
	asm("idfc:");			// return NKern::EIDFC [=1]
sl@0
   564
	asm("inc eax");
sl@0
   565
	asm("thread:");			// return NKern::EThread [=0]
sl@0
   566
	asm("popfd");
sl@0
   567
	asm("ret");
sl@0
   568
	}
sl@0
   569
sl@0
   570
sl@0
   571
#ifdef __USE_LOGICAL_DEST_MODE__
sl@0
   572
extern "C" __NAKED__ void __fastcall do_send_resched_ipis(TUint32)
sl@0
   573
	{
sl@0
   574
	asm("shl ecx, 24 ");		// CPUs mask into bits 24-31
sl@0
   575
	asm("jz short sri0 ");		// no CPUs, so nothing to do
sl@0
   576
	asm("pushfd ");
sl@0
   577
	asm("cli ");
sl@0
   578
	asm("mov ds:[%0], ecx" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ICRH));
sl@0
   579
	asm("mov eax, %0" : : "i" (RESCHED_IPI_VECTOR | 0x4800));
sl@0
   580
	asm("mov ds:[%0], eax" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ICRL));
sl@0
   581
	asm("popfd ");
sl@0
   582
	asm("sri0: ");
sl@0
   583
	asm("ret ");
sl@0
   584
	}
sl@0
   585
#endif
sl@0
   586
sl@0
   587
extern "C" __NAKED__ void __fastcall send_ipi(TUint32)
sl@0
   588
	{
sl@0
   589
	asm("pushfd ");
sl@0
   590
	asm("cli ");
sl@0
   591
	asm("mov ds:[%0], ecx" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ICRH));
sl@0
   592
	asm("mov eax, %0" : : "i" (RESCHED_IPI_VECTOR | 0x4000));
sl@0
   593
	asm("mov ds:[%0], eax" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ICRL));
sl@0
   594
	asm("popfd ");
sl@0
   595
	asm("ret ");
sl@0
   596
	}
sl@0
   597
sl@0
   598
// Send a reschedule IPI to the current processor
sl@0
   599
// *** DON'T DO ANY TRACING OR INSTRUMENTATION ***
sl@0
   600
extern "C" __NAKED__ void send_self_resched_ipi()
sl@0
   601
	{
sl@0
   602
	asm("pushfd ");
sl@0
   603
	asm("cli ");
sl@0
   604
	asm("xor ecx, ecx ");
sl@0
   605
	asm("mov ds:[%0], ecx" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ICRH));
sl@0
   606
	asm("mov eax, %0" : : "i" (RESCHED_IPI_VECTOR | 0x44000));	// destination shorthand = self
sl@0
   607
	asm("mov ds:[%0], eax" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ICRL));
sl@0
   608
	asm("popfd ");
sl@0
   609
	asm("ret ");
sl@0
   610
	}
sl@0
   611
sl@0
   612
extern "C" __NAKED__ void send_irq_ipi(TSubScheduler*)
sl@0
   613
	{
sl@0
   614
	asm("mov ecx, [esp+4] ");
sl@0
   615
	asm("pushfd ");
sl@0
   616
	asm("mov edx, [ecx+%0]" : : "i" _FOFF(TSubScheduler, i_APICID));
sl@0
   617
	asm("cli ");
sl@0
   618
	asm("mov ds:[%0], edx" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ICRH));
sl@0
   619
	asm("mov eax, %0" : : "i" (TRANSFERRED_IRQ_VECTOR | 0x4000));
sl@0
   620
	asm("mov ds:[%0], eax" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ICRL));
sl@0
   621
	asm("popfd ");
sl@0
   622
	asm("ret ");
sl@0
   623
	}
sl@0
   624