os/kernelhwsrv/kernel/eka/nkern/x86/vectors.cia
author sl
Tue, 10 Jun 2014 14:32:02 +0200
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
sl@0
     1
// Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
// e32\nkern\x86\vectors.cia
sl@0
    15
// 
sl@0
    16
//
sl@0
    17
sl@0
    18
#include <x86.h>
sl@0
    19
#include "vectors.h"
sl@0
    20
sl@0
    21
#ifdef __GCC32__
sl@0
    22
#define DECLARE_X86_INT(n)			GLDEF_C __NAKED__ void __X86Vector##n() { asm("push 0x"#n);	asm("jmp %a0": :"i"(&__X86VectorIrq)); }
sl@0
    23
#define DECLARE_X86_EXC_NOERR(n)	GLDEF_C __NAKED__ void __X86Vector##n() { asm("push 0"); asm("push 0x"#n); asm("jmp %a0": : "i"(&__X86VectorExc)); }
sl@0
    24
#define DECLARE_X86_EXC_ERR(n)		GLDEF_C __NAKED__ void __X86Vector##n() { asm("push 0x"#n); asm("jmp %a0": : "i"(&__X86VectorExc)); }
sl@0
    25
#else
sl@0
    26
#define DECLARE_X86_INT(n)			GLDEF_C __NAKED__ void __X86Vector##n() { __asm push 0x##n __asm jmp __X86VectorIrq }
sl@0
    27
#define DECLARE_X86_EXC_NOERR(n)	GLDEF_C __NAKED__ void __X86Vector##n() { __asm push 0 __asm push 0x##n __asm jmp __X86VectorExc }
sl@0
    28
#define DECLARE_X86_EXC_ERR(n)		GLDEF_C __NAKED__ void __X86Vector##n() { __asm push 0x##n __asm jmp __X86VectorExc }
sl@0
    29
#endif
sl@0
    30
sl@0
    31
const TLinAddr NKern_WaitForAnyRequest = (TLinAddr)&NKern::WaitForAnyRequest;
sl@0
    32
const TLinAddr NKern_LockSystem = (TLinAddr)&NKern::LockSystem;
sl@0
    33
const TLinAddr NKern_UnlockSystem = (TLinAddr)&NKern::UnlockSystem;
sl@0
    34
const TLinAddr NKern_Unlock = (TLinAddr)&NKern::Unlock;
sl@0
    35
const TLinAddr TScheduler_Reschedule = (TLinAddr)&TScheduler::Reschedule;
sl@0
    36
sl@0
    37
#ifdef __CHECK_LOCK_STATE__
sl@0
    38
/******************************************************************************
sl@0
    39
 * Check that the kernel is unlocked, no fast mutex is held and the thread
sl@0
    40
 * is not in a critical section when returning to user mode.
sl@0
    41
 ******************************************************************************/
sl@0
    42
extern "C" __NAKED__ void check_lock_state()
sl@0
    43
	{
sl@0
    44
	asm("push ecx ");
sl@0
    45
	asm("mov ecx, [%a0]" : : "i" (&TheScheduler.iCurrentThread));
sl@0
    46
	asm("cmp dword ptr [%a0], 0" : : "i" (&TheScheduler.iKernCSLocked));
sl@0
    47
	asm("jnz short bad_lock_state1 ");
sl@0
    48
	asm("cmp dword ptr [ecx+%0], 0" : : "i" _FOFF(NThreadBase, iHeldFastMutex));
sl@0
    49
	asm("jne short bad_lock_state2 ");
sl@0
    50
	asm("cmp dword ptr [ecx+%0], 0" : : "i" _FOFF(NThreadBase, iCsCount));
sl@0
    51
	asm("jne short bad_lock_state3 ");
sl@0
    52
	asm("pop ecx ");
sl@0
    53
	asm("ret ");
sl@0
    54
	asm("bad_lock_state1: ");
sl@0
    55
	asm("int 0xff ");
sl@0
    56
	asm("bad_lock_state2: ");
sl@0
    57
	asm("int 0xff ");
sl@0
    58
	asm("bad_lock_state3: ");
sl@0
    59
	asm("int 0xff ");
sl@0
    60
	}
sl@0
    61
#endif
sl@0
    62
sl@0
    63
/******************************************************************************
sl@0
    64
* Int 20h Handler - Fast Executive Calls
sl@0
    65
* Enter with:
sl@0
    66
*		Call number in EAX
sl@0
    67
*		Parameter in ECX if any
sl@0
    68
* On entry SS:ESP references current threads supervisor stack
sl@0
    69
* [ESP+0] = return EIP
sl@0
    70
* [ESP+4] = return CS
sl@0
    71
* [ESP+8] = return EFLAGS
sl@0
    72
* [ESP+12] = return ESP if privilege change occurred
sl@0
    73
* [ESP+16] = return SS if privilege change occurred
sl@0
    74
*******************************************************************************/
sl@0
    75
GLDEF_C __NAKED__ void __X86Vector20()
sl@0
    76
	{
sl@0
    77
	// Interrupts enabled on entry
sl@0
    78
	asm("cld");
sl@0
    79
	asm("test eax, eax");
sl@0
    80
	asm("je wait_for_any_request");
sl@0
    81
	asm("push ds");
sl@0
    82
	asm("push es");
sl@0
    83
	asm("push gs");
sl@0
    84
	asm("push ecx");
sl@0
    85
	asm("mov cx, ds");
sl@0
    86
	asm("mov dx, ss");
sl@0
    87
	asm("mov ds, dx");
sl@0
    88
	asm("mov gs, cx");
sl@0
    89
	asm("mov ecx, [%a0]": :"i"(&TheScheduler.iCurrentThread));
sl@0
    90
	asm("mov es, dx");
sl@0
    91
	asm("mov edx, [ecx+%0]" : : "i"_FOFF(NThreadBase,iFastExecTable)); 
sl@0
    92
	asm("cmp eax, [edx]");
sl@0
    93
	asm("jae fast_exec_invalid");
sl@0
    94
	asm("cli");
sl@0
    95
	asm("call [edx+eax*4]");
sl@0
    96
	asm("add esp, 4");
sl@0
    97
	asm("fast_exec_exit:");
sl@0
    98
	asm("test dword ptr [esp+16], 3 ");	// returning to user mode?
sl@0
    99
	asm("jz short fast_exec_exit2 ");	// no - don't do lock check or user mode callbacks
sl@0
   100
#ifdef __CHECK_LOCK_STATE__
sl@0
   101
	asm("call %a0" : : "i" (&check_lock_state));
sl@0
   102
#endif
sl@0
   103
	asm("push eax");
sl@0
   104
#ifdef __GCC32__
sl@0
   105
	asm("mov ecx, [%a0]": : "i"(&TheScheduler.iCurrentThread));
sl@0
   106
	asm("push ecx");
sl@0
   107
	asm("call __ZN11NThreadBase21CallUserModeCallbacksEv ");
sl@0
   108
	asm("add esp,4"); 
sl@0
   109
#else
sl@0
   110
	TheScheduler.iCurrentThread->CallUserModeCallbacks();
sl@0
   111
#endif	
sl@0
   112
	asm("pop eax");
sl@0
   113
sl@0
   114
	asm("fast_exec_exit2: ");
sl@0
   115
	asm("pop gs");
sl@0
   116
	asm("pop es");
sl@0
   117
	asm("pop ds");
sl@0
   118
	asm("iretd");
sl@0
   119
sl@0
   120
	asm("wait_for_any_request:");
sl@0
   121
	asm("push ds");
sl@0
   122
	asm("push es");
sl@0
   123
	asm("mov cx, ss");
sl@0
   124
	asm("mov ds, cx");
sl@0
   125
	asm("mov es, cx");
sl@0
   126
	asm("call %a0" : : "i" (NKern_WaitForAnyRequest));
sl@0
   127
sl@0
   128
	asm("test dword ptr [esp+12], 3 ");	// returning to user mode?
sl@0
   129
	asm("jz short wfar_exit2 ");		// no - don't do lock check or user mode callbacks
sl@0
   130
	asm("push eax");
sl@0
   131
	asm("cli");
sl@0
   132
#ifdef __GCC32__
sl@0
   133
	asm("mov ecx, [%a0]": : "i"(&TheScheduler.iCurrentThread));
sl@0
   134
	asm("push ecx");
sl@0
   135
	asm("call __ZN11NThreadBase21CallUserModeCallbacksEv");
sl@0
   136
	asm("add esp,4"); 
sl@0
   137
#else
sl@0
   138
	TheScheduler.iCurrentThread->CallUserModeCallbacks();
sl@0
   139
#endif	
sl@0
   140
	asm("pop eax");
sl@0
   141
#ifdef __CHECK_LOCK_STATE__
sl@0
   142
	asm("call %a0" : : "i" (&check_lock_state));
sl@0
   143
#endif
sl@0
   144
sl@0
   145
	asm("wfar_exit2: ");
sl@0
   146
	asm("pop es");
sl@0
   147
	asm("pop ds");
sl@0
   148
	asm("iretd");
sl@0
   149
sl@0
   150
	asm("fast_exec_invalid:");
sl@0
   151
	asm("pop ecx");
sl@0
   152
	asm("push ebp");
sl@0
   153
	asm("push edi");
sl@0
   154
	asm("push esi");
sl@0
   155
	asm("push ebx");
sl@0
   156
	asm("push edx");
sl@0
   157
	asm("push ecx");
sl@0
   158
	asm("mov edi, [%a0]": :"i"(&TheScheduler.iCurrentThread));			// edi=TheCurrentThread
sl@0
   159
	asm("mov esi, [edi+%0]" : : "i"_FOFF(NThreadBase,iSlowExecTable));	// esi=slow exec table base
sl@0
   160
	asm("call [esi-8]");												// call invalid exec handler
sl@0
   161
	asm("pop ecx");
sl@0
   162
	asm("pop edx");
sl@0
   163
	asm("pop ebx");
sl@0
   164
	asm("pop esi");
sl@0
   165
	asm("pop edi");
sl@0
   166
	asm("pop ebp");
sl@0
   167
	asm("jmp fast_exec_exit");
sl@0
   168
	}
sl@0
   169
sl@0
   170
/******************************************************************************
sl@0
   171
* Int 21h Handler - Slow Executive Calls
sl@0
   172
* Enter with:
sl@0
   173
*		Call number in EAX
sl@0
   174
*		Parameters in ECX, EDX, EBX, ESI in that order
sl@0
   175
* On entry SS:ESP references current threads supervisor stack
sl@0
   176
* Must preserve EBX, EBP, ESI, EDI
sl@0
   177
* [ESP+0] = return EIP
sl@0
   178
* [ESP+4] = return CS
sl@0
   179
* [ESP+8] = return EFLAGS
sl@0
   180
* [ESP+12] = return ESP if privilege change occurred
sl@0
   181
* [ESP+16] = return SS if privilege change occurred
sl@0
   182
*******************************************************************************/
sl@0
   183
GLDEF_C __NAKED__ void __X86Vector21()
sl@0
   184
	{
sl@0
   185
	// Interrupts enabled on entry
sl@0
   186
	asm("sub esp, 32");								// reserve space for additional arguments
sl@0
   187
	asm("cld");
sl@0
   188
	asm("push ds");
sl@0
   189
	asm("push es");
sl@0
   190
	asm("push gs");
sl@0
   191
	asm("push ebp");
sl@0
   192
	asm("mov bp, ds");
sl@0
   193
	asm("mov gs, bp");
sl@0
   194
	asm("mov bp, ss");
sl@0
   195
	asm("mov ds, bp");
sl@0
   196
	asm("mov es, bp");
sl@0
   197
	asm("push edi");
sl@0
   198
	asm("mov edi, [%a0]": : "i"(&TheScheduler.iCurrentThread));	// edi=TheCurrentThread
sl@0
   199
	asm("push esi");
sl@0
   200
	asm("mov esi, [edi+%0]" : : "i"_FOFF(NThreadBase,iSlowExecTable)); // esi=slow exec table base 
sl@0
   201
	asm("push ebx");
sl@0
   202
	asm("push edx");
sl@0
   203
	asm("push ecx");
sl@0
   204
	asm("lea ebp, [esi+eax*8]");					// ebp points to exec table entry
sl@0
   205
	asm("cmp eax, [esi-12]");
sl@0
   206
	asm("jae slow_exec_invalid");
sl@0
   207
	asm("mov ebx, [ebp]");							// ebx=flags
sl@0
   208
	asm("test ebx, 0x1c000000");					// additional arguments required?
sl@0
   209
	asm("jz slow_exec_no_extra_args");
sl@0
   210
	asm("mov edx, [esp+8]");						// edx points to additional args
sl@0
   211
	asm("lea eax, [esp+36]");						// address of copied additional arguments
sl@0
   212
	asm("mov [esp+8], eax");						// replace supplied address
sl@0
   213
	asm("mov ecx, ebx");
sl@0
   214
	asm("shr ecx, 26");
sl@0
   215
	asm("and cl, 7");								// ecx=number of additional arguments-1
sl@0
   216
	asm("test edx, edx");
sl@0
   217
	asm("jnz slow_exec_extra_args_present");		// if arg ptr not NULL, copy args
sl@0
   218
	asm("slow_exec_zero_args:");
sl@0
   219
	asm("mov [esp+ecx*4+36], edx");					// else zero args
sl@0
   220
	asm("dec ecx");
sl@0
   221
	asm("jns slow_exec_zero_args");
sl@0
   222
	asm("jmp slow_exec_no_extra_args");
sl@0
   223
sl@0
   224
	asm("slow_exec_extra_args_present:");
sl@0
   225
	asm("slow_exec_copy_args:");
sl@0
   226
	asm("mov eax, gs:[edx+ecx*4]");					// get argument
sl@0
   227
	asm("mov [esp+ecx*4+36], eax");					// copy it
sl@0
   228
	asm("dec ecx");
sl@0
   229
	asm("jns slow_exec_copy_args");
sl@0
   230
sl@0
   231
	asm("slow_exec_no_extra_args:");
sl@0
   232
	asm("test ebx, 0x80000000");					// test EClaim
sl@0
   233
	asm("jz slow_exec_no_claim");
sl@0
   234
	asm("call %a0" : : "i"(NKern_LockSystem)); 		// trashes eax, ecx, edx
sl@0
   235
	asm("slow_exec_no_claim:");
sl@0
   236
	asm("test ebx, 0x20000000");					// test EPreprocess
sl@0
   237
	asm("jz slow_exec_no_preprocess");
sl@0
   238
	asm("call [esi-4]");							// trashes eax, ecx, edx, edi
sl@0
   239
	asm("slow_exec_no_preprocess:");
sl@0
   240
	asm("call [ebp+4]");							// call exec function
sl@0
   241
	asm("test ebx, 0x40000000");					// test ERelease
sl@0
   242
	asm("jz slow_exec_no_release");
sl@0
   243
	asm("mov edi, eax");							// save return value in EDI
sl@0
   244
	asm("call %a0" : : "i"(NKern_UnlockSystem)); // trashes eax, ecx, edx
sl@0
   245
	asm("mov eax, edi");							// restore return value
sl@0
   246
	asm("slow_exec_no_release:");
sl@0
   247
sl@0
   248
	asm("slow_exec_exit:");
sl@0
   249
	asm("test dword ptr [esp+72], 3 ");				// returning to user mode?
sl@0
   250
	asm("jz short slow_exec_exit2 ");				// no - don't do lock check or user mode callbacks
sl@0
   251
#ifdef __CHECK_LOCK_STATE__
sl@0
   252
	asm("call %a0" : : "i" (&check_lock_state));
sl@0
   253
#endif
sl@0
   254
	asm("push eax");
sl@0
   255
	asm("cli");
sl@0
   256
#ifdef __GCC32__
sl@0
   257
	asm("mov ecx, [%a0]": : "i"(&TheScheduler.iCurrentThread));
sl@0
   258
	asm("push ecx");
sl@0
   259
	asm("call __ZN11NThreadBase21CallUserModeCallbacksEv");
sl@0
   260
	asm("add esp,4"); 
sl@0
   261
#else
sl@0
   262
	TheScheduler.iCurrentThread->CallUserModeCallbacks();
sl@0
   263
#endif
sl@0
   264
	asm("pop eax");
sl@0
   265
sl@0
   266
	asm("slow_exec_exit2: ");
sl@0
   267
	asm("pop ecx");
sl@0
   268
	asm("pop edx");
sl@0
   269
	asm("pop ebx");
sl@0
   270
	asm("pop esi");
sl@0
   271
	asm("pop edi");
sl@0
   272
	asm("pop ebp");
sl@0
   273
	asm("pop gs");
sl@0
   274
	asm("pop es");
sl@0
   275
	asm("pop ds");
sl@0
   276
	asm("add esp, 32");								// remove additional arguments
sl@0
   277
	asm("iretd");
sl@0
   278
sl@0
   279
	asm("slow_exec_invalid:");
sl@0
   280
	asm("call [esi-8]");							// call invalid exec handler
sl@0
   281
	asm("jmp slow_exec_exit");
sl@0
   282
	}
sl@0
   283
sl@0
   284
const TUint32 irq_start_trace_header = ((4<<BTrace::ESizeIndex) + (BTrace::ECpuUsage<<BTrace::ECategoryIndex*8) + (BTrace::EIrqStart<<BTrace::ESubCategoryIndex*8));
sl@0
   285
const TUint32 irq_end_trace_header   = ((4<<BTrace::ESizeIndex) + (BTrace::ECpuUsage<<BTrace::ECategoryIndex*8) + (BTrace::EIrqEnd<<BTrace::ESubCategoryIndex*8));
sl@0
   286
sl@0
   287
/******************************************************************************
sl@0
   288
* IRQ Preamble/Postamble Common Code
sl@0
   289
* On entry SS:ESP references current threads supervisor stack
sl@0
   290
* [ESP+0] = vector number
sl@0
   291
* [ESP+4] = return EIP
sl@0
   292
* [ESP+8] = return CS
sl@0
   293
* [ESP+12] = return EFLAGS
sl@0
   294
* [ESP+16] = return ESP if privilege change occurred
sl@0
   295
* [ESP+20] = return SS if privilege change occurred
sl@0
   296
*******************************************************************************/
sl@0
   297
__NAKED__ void __X86VectorIrq()
sl@0
   298
	{
sl@0
   299
	asm("push ds");
sl@0
   300
	asm("push es");
sl@0
   301
	asm("push eax");
sl@0
   302
	asm("mov ax, ss");
sl@0
   303
	asm("cld");
sl@0
   304
	asm("push ecx");
sl@0
   305
	asm("push edx");
sl@0
   306
	asm("mov ds, ax");
sl@0
   307
	asm("mov es, ax");
sl@0
   308
	asm("mov eax, esp");									// eax points to saved stuff
sl@0
   309
	asm("inc dword ptr [%a0]": : "i"(&X86_IrqNestCount));	// nest count starts at -1
sl@0
   310
	asm("jnz nested_irq_entry");
sl@0
   311
#ifdef __GCC32__
sl@0
   312
	asm("mov esp, %0" : : "i"(&X86_IrqStack)); 
sl@0
   313
	asm("add esp, %0" : : "i"(IRQ_STACK_SIZE));
sl@0
   314
#else
sl@0
   315
	_asm lea esp, X86_IrqStack[IRQ_STACK_SIZE]
sl@0
   316
#endif
sl@0
   317
	asm("push eax");
sl@0
   318
	asm("nested_irq_entry:");
sl@0
   319
sl@0
   320
#ifdef BTRACE_CPU_USAGE
sl@0
   321
	asm("cmp byte ptr [%a0], 0": : "i"(&TheScheduler.iCpuUsageFilter));
sl@0
   322
	asm("jz no_trace");
sl@0
   323
	asm("push eax");
sl@0
   324
	asm("push %0": :"i"(irq_start_trace_header));
sl@0
   325
	asm("call dword ptr [%a0]": : "i"(&TheScheduler.iBTraceHandler));
sl@0
   326
	asm("pop eax");
sl@0
   327
	asm("pop eax");
sl@0
   328
	asm("no_trace:");
sl@0
   329
#endif
sl@0
   330
	asm("call [%a0]": : "i"(&X86_IrqHandler));
sl@0
   331
sl@0
   332
	// Postamble. Interrupts disabled here.
sl@0
   333
	asm("xor eax, eax");
sl@0
   334
	asm("dec dword ptr [%a0]": : "i"(&X86_IrqNestCount));
sl@0
   335
	asm("jns nested_irq_exit");
sl@0
   336
	asm("cmp eax, [%a0]": : "i"(&TheScheduler.iKernCSLocked));
sl@0
   337
	asm("lea edx, %a0": : "i"(&TheScheduler.iRescheduleNeededFlag));
sl@0
   338
	asm("jnz irq_kernel_locked_exit");
sl@0
   339
	asm("cmp eax, [edx]");
sl@0
   340
	asm("jz irq_kernel_locked_exit");
sl@0
   341
	asm("inc eax");
sl@0
   342
	asm("mov [%a0], eax": : "i"(&TheScheduler.iKernCSLocked));
sl@0
   343
	asm("pop eax");
sl@0
   344
	asm("mov esp, eax");
sl@0
   345
	asm("sti");
sl@0
   346
	asm("call %a0" : : "i"(TScheduler_Reschedule));
sl@0
   347
	asm("jmp irq_exit");
sl@0
   348
sl@0
   349
	asm("irq_kernel_locked_exit:");
sl@0
   350
	asm("pop eax");
sl@0
   351
	asm("mov esp, eax");
sl@0
   352
sl@0
   353
	asm("nested_irq_exit:");
sl@0
   354
#ifdef BTRACE_CPU_USAGE
sl@0
   355
	asm("cmp byte ptr [%a0], 0": : "i"(&TheScheduler.iCpuUsageFilter));
sl@0
   356
	asm("jz no_trace2");
sl@0
   357
	asm("push %0": : "i"(irq_end_trace_header));
sl@0
   358
	asm("call dword ptr [%a0]": : "i"(&TheScheduler.iBTraceHandler));
sl@0
   359
	asm("pop eax");
sl@0
   360
	asm("no_trace2:");
sl@0
   361
#endif
sl@0
   362
	asm("irq_exit:");
sl@0
   363
	asm("test dword ptr [esp+28], 3 ");	// check if we came from kernel mode
sl@0
   364
	asm("jz short irq_exit2 ");
sl@0
   365
#ifdef __CHECK_LOCK_STATE__
sl@0
   366
	asm("call %a0" : : "i" (&check_lock_state));
sl@0
   367
#endif
sl@0
   368
#ifdef __GCC32__
sl@0
   369
	asm("mov ecx, [%a0]": : "i"(&TheScheduler.iCurrentThread));
sl@0
   370
	asm("push ecx");
sl@0
   371
	asm("call __ZN11NThreadBase21CallUserModeCallbacksEv");
sl@0
   372
	asm("add esp,4"); 
sl@0
   373
#else
sl@0
   374
	TheScheduler.iCurrentThread->CallUserModeCallbacks();
sl@0
   375
#endif
sl@0
   376
	asm("irq_exit2:");
sl@0
   377
	asm("pop edx");
sl@0
   378
	asm("pop ecx");
sl@0
   379
	asm("pop eax");
sl@0
   380
	asm("pop es");
sl@0
   381
	asm("pop ds");
sl@0
   382
	asm("add esp, 4");
sl@0
   383
	asm("iretd");
sl@0
   384
	}
sl@0
   385
sl@0
   386
sl@0
   387
/******************************************************************************
sl@0
   388
* General Exception Handler
sl@0
   389
* On entry SS:ESP references current threads supervisor stack
sl@0
   390
* [ESP+0] = vector number
sl@0
   391
* [ESP+4] = error code (filled with 0 for exceptions without error codes)
sl@0
   392
* [ESP+8] = return EIP
sl@0
   393
* [ESP+12] = return CS
sl@0
   394
* [ESP+16] = return EFLAGS
sl@0
   395
* [ESP+20] = return ESP if privilege change occurred
sl@0
   396
* [ESP+24] = return SS if privilege change occurred
sl@0
   397
*******************************************************************************/
sl@0
   398
GLDEF_C __NAKED__ void __X86VectorExc()
sl@0
   399
	{
sl@0
   400
	asm("push ds");
sl@0
   401
	asm("push es");
sl@0
   402
	asm("push fs");
sl@0
   403
	asm("push gs");
sl@0
   404
	asm("cld");
sl@0
   405
	asm("push ebp");
sl@0
   406
	asm("mov bp, ds");
sl@0
   407
	asm("push edi");
sl@0
   408
	asm("mov gs, bp");
sl@0
   409
	asm("mov bp, ss");
sl@0
   410
	asm("push esi");
sl@0
   411
	asm("push ebx");
sl@0
   412
	asm("push ecx");
sl@0
   413
	asm("push edx");
sl@0
   414
	asm("push eax");
sl@0
   415
	asm("mov eax, cr2");
sl@0
   416
	asm("mov ds, bp");
sl@0
   417
	asm("mov es, bp");
sl@0
   418
	asm("push eax");
sl@0
   419
	asm("sub esp, 8");
sl@0
   420
	asm("mov ebp, esp");		// ebp points to exception info frame
sl@0
   421
	asm("mov edi, [%a0]": : "i"(&TheScheduler.iCurrentThread));
sl@0
   422
	asm("xor eax, eax");
sl@0
   423
	asm("mov ax, ss");
sl@0
   424
	asm("mov [ebp+4], eax");	// SS
sl@0
   425
	asm("mov eax, ebp");
sl@0
   426
	asm("add eax, 76");		// EAX = ESP at point of exception if ring 0
sl@0
   427
	asm("test dword ptr [ebp+68], 3");	// check if we came from kernel mode
sl@0
   428
	asm("jz ring0_exception");
sl@0
   429
	asm("mov byte ptr [edi+11], 1");
sl@0
   430
	asm("add eax, 8");		// EAX = ESP at point of exception if ring 3
sl@0
   431
	asm("ring0_exception:");
sl@0
   432
	asm("mov [ebp], eax");
sl@0
   433
	asm("cmp dword ptr [%a0], -1": : "i"(&X86_IrqNestCount));
sl@0
   434
	asm("jnz fatal_exception_irq");
sl@0
   435
	asm("cmp dword ptr [%a0], 0": : "i"(&TheScheduler.iKernCSLocked));
sl@0
   436
	asm("jnz fatal_exception_locked");
sl@0
   437
	asm("cmp dword ptr [ebp+%0], 7": :"i"_FOFF(TX86ExcInfo,iExcId));	// check for device not available
sl@0
   438
	asm("jne not_fpu");
sl@0
   439
	asm("mov dword ptr [%a0], 1": :"i"(&TheScheduler.iKernCSLocked));
sl@0
   440
	asm("clts");
sl@0
   441
	asm("frstor [edi+%0]": :"i"_FOFF(NThread,iCoprocessorState));
sl@0
   442
	asm("call %a0": :"i"(NKern_Unlock));
sl@0
   443
	asm("add esp, 12");
sl@0
   444
	asm("jmp proceed");
sl@0
   445
sl@0
   446
	asm("not_fpu:");
sl@0
   447
	asm("mov eax, [edi+%0]" : : "i"_FOFF(NThreadBase,iHandlers)); 
sl@0
   448
	asm("push edi");		// pass current thread parameter
sl@0
   449
	asm("push ebp");		// pass frame address
sl@0
   450
	asm("call [eax+%0]" : : "i"_FOFF(SNThreadHandlers,iExceptionHandler)); 
sl@0
   451
	asm("add esp, 20");		// remove parameters, esp, ss, fault address
sl@0
   452
sl@0
   453
	asm("proceed:");
sl@0
   454
	asm("mov byte ptr [edi+11], 0 ");
sl@0
   455
	asm("test dword ptr [esp+56], 3 ");	// check if we came from kernel mode
sl@0
   456
	asm("jz short proceed2 ");
sl@0
   457
	asm("cli");
sl@0
   458
#ifdef __CHECK_LOCK_STATE__
sl@0
   459
	asm("call %a0" : : "i" (&check_lock_state));
sl@0
   460
#endif
sl@0
   461
#ifdef __GCC32__
sl@0
   462
	asm("mov ecx, [%a0]": : "i"(&TheScheduler.iCurrentThread));
sl@0
   463
	asm("push ecx");
sl@0
   464
	asm("call __ZN11NThreadBase21CallUserModeCallbacksEv");
sl@0
   465
	asm("add esp,4"); 
sl@0
   466
#else
sl@0
   467
	TheScheduler.iCurrentThread->CallUserModeCallbacks();
sl@0
   468
#endif	
sl@0
   469
	asm("proceed2:");
sl@0
   470
	asm("pop eax");
sl@0
   471
	asm("pop edx");
sl@0
   472
	asm("pop ecx");
sl@0
   473
	asm("pop ebx");
sl@0
   474
	asm("pop esi");
sl@0
   475
	asm("pop edi");
sl@0
   476
	asm("pop ebp");
sl@0
   477
	asm("pop gs");
sl@0
   478
	asm("pop fs");
sl@0
   479
	asm("pop es");
sl@0
   480
	asm("pop ds");
sl@0
   481
	asm("add esp, 8");		// skip vector number and error code
sl@0
   482
	asm("iretd");
sl@0
   483
sl@0
   484
	asm("fatal_exception_irq:");
sl@0
   485
	asm("fatal_exception_locked:");
sl@0
   486
	asm("lea eax, %a0": :"i"(&TheScheduler)); 
sl@0
   487
	asm("lea eax, [eax+%0]": :"i"_FOFF(TScheduler,iMonitorExceptionHandler));
sl@0
   488
	asm("mov eax,[eax]");
sl@0
   489
sl@0
   490
	asm("test eax, eax");
sl@0
   491
	asm("jnz monitor_exception");
sl@0
   492
	asm("push ebp");
sl@0
   493
	asm("call %a0": :"i"(&__X86ExcFault));	// doesn't return
sl@0
   494
sl@0
   495
	asm("monitor_exception:");
sl@0
   496
	asm("jmp eax");
sl@0
   497
	}
sl@0
   498
sl@0
   499
sl@0
   500
/******************************************************************************
sl@0
   501
* Exception Handlers
sl@0
   502
*******************************************************************************/
sl@0
   503
sl@0
   504
DECLARE_X86_EXC_NOERR(00)
sl@0
   505
DECLARE_X86_EXC_NOERR(01)
sl@0
   506
DECLARE_X86_EXC_NOERR(02)
sl@0
   507
DECLARE_X86_EXC_NOERR(03)
sl@0
   508
DECLARE_X86_EXC_NOERR(04)
sl@0
   509
DECLARE_X86_EXC_NOERR(05)
sl@0
   510
DECLARE_X86_EXC_NOERR(06)
sl@0
   511
DECLARE_X86_EXC_NOERR(07)
sl@0
   512
DECLARE_X86_EXC_ERR(08)
sl@0
   513
DECLARE_X86_EXC_NOERR(09)
sl@0
   514
DECLARE_X86_EXC_ERR(0A)
sl@0
   515
DECLARE_X86_EXC_ERR(0B)
sl@0
   516
DECLARE_X86_EXC_ERR(0C)
sl@0
   517
DECLARE_X86_EXC_ERR(0D)
sl@0
   518
DECLARE_X86_EXC_ERR(0E)
sl@0
   519
DECLARE_X86_EXC_NOERR(0F)
sl@0
   520
DECLARE_X86_EXC_NOERR(10)
sl@0
   521
DECLARE_X86_EXC_ERR(11)
sl@0
   522
DECLARE_X86_EXC_NOERR(12)
sl@0
   523
DECLARE_X86_EXC_NOERR(13)
sl@0
   524
DECLARE_X86_EXC_NOERR(14)
sl@0
   525
DECLARE_X86_EXC_NOERR(15)
sl@0
   526
DECLARE_X86_EXC_NOERR(16)
sl@0
   527
DECLARE_X86_EXC_NOERR(17)
sl@0
   528
DECLARE_X86_EXC_NOERR(18)
sl@0
   529
DECLARE_X86_EXC_NOERR(19)
sl@0
   530
DECLARE_X86_EXC_NOERR(1A)
sl@0
   531
DECLARE_X86_EXC_NOERR(1B)
sl@0
   532
DECLARE_X86_EXC_NOERR(1C)
sl@0
   533
DECLARE_X86_EXC_NOERR(1D)
sl@0
   534
DECLARE_X86_EXC_NOERR(1E)
sl@0
   535
DECLARE_X86_EXC_NOERR(1F)
sl@0
   536
sl@0
   537
/******************************************************************************
sl@0
   538
* Interrupt Handlers
sl@0
   539
*******************************************************************************/
sl@0
   540
sl@0
   541
DECLARE_X86_INT(30)
sl@0
   542
DECLARE_X86_INT(31)
sl@0
   543
DECLARE_X86_INT(32)
sl@0
   544
DECLARE_X86_INT(33)
sl@0
   545
DECLARE_X86_INT(34)
sl@0
   546
DECLARE_X86_INT(35)
sl@0
   547
DECLARE_X86_INT(36)
sl@0
   548
DECLARE_X86_INT(37)
sl@0
   549
DECLARE_X86_INT(38)
sl@0
   550
DECLARE_X86_INT(39)
sl@0
   551
DECLARE_X86_INT(3A)
sl@0
   552
DECLARE_X86_INT(3B)
sl@0
   553
DECLARE_X86_INT(3C)
sl@0
   554
DECLARE_X86_INT(3D)
sl@0
   555
DECLARE_X86_INT(3E)
sl@0
   556
DECLARE_X86_INT(3F)
sl@0
   557
sl@0
   558
/*const*/ PFV TheExcVectors[64]=
sl@0
   559
{
sl@0
   560
__X86Vector00,	__X86Vector01,	__X86Vector02,	__X86Vector03,
sl@0
   561
__X86Vector04,	__X86Vector05,	__X86Vector06,	__X86Vector07,
sl@0
   562
__X86Vector08,	__X86Vector09,	__X86Vector0A,	__X86Vector0B,
sl@0
   563
__X86Vector0C,	__X86Vector0D,	__X86Vector0E,	__X86Vector0F,
sl@0
   564
__X86Vector10,	__X86Vector11,	__X86Vector12,	__X86Vector13,
sl@0
   565
__X86Vector14,	__X86Vector15,	__X86Vector16,	__X86Vector17,
sl@0
   566
__X86Vector18,	__X86Vector19,	__X86Vector1A,	__X86Vector1B,
sl@0
   567
__X86Vector1C,	__X86Vector1D,	__X86Vector1E,	__X86Vector1F,
sl@0
   568
__X86Vector20,	__X86Vector21,	NULL,			NULL,
sl@0
   569
NULL,			NULL,			NULL,			NULL,
sl@0
   570
NULL,			NULL,			NULL,			NULL,
sl@0
   571
NULL,			NULL,			NULL,			NULL,
sl@0
   572
__X86Vector30,	__X86Vector31,	__X86Vector32,	__X86Vector33,
sl@0
   573
__X86Vector34,	__X86Vector35,	__X86Vector36,	__X86Vector37,
sl@0
   574
__X86Vector38,	__X86Vector39,	__X86Vector3A,	__X86Vector3B,
sl@0
   575
__X86Vector3C,	__X86Vector3D,	__X86Vector3E,	__X86Vector3F
sl@0
   576
};
sl@0
   577
sl@0
   578
EXPORT_C __NAKED__ TUint32 X86::IrqReturnAddress()
sl@0
   579
	{
sl@0
   580
	asm("mov eax, %0": :"i"(&X86_IrqStack[0]));
sl@0
   581
	asm("mov eax, [eax + %0]": :"i"(IRQ_STACK_SIZE - 4));	// pointer to saved supervisor stack pointer
sl@0
   582
	asm("mov eax, [eax+24]");								// return address from original interrupt
sl@0
   583
	asm("ret");
sl@0
   584
	}
sl@0
   585
sl@0
   586
__NAKED__ TUint32 get_cr0()
sl@0
   587
	{
sl@0
   588
	asm("mov eax, cr0");
sl@0
   589
	asm("ret");
sl@0
   590
	}
sl@0
   591
sl@0
   592
__NAKED__ TUint32 get_cr3()
sl@0
   593
	{
sl@0
   594
	asm("mov eax, cr0");
sl@0
   595
	asm("ret");
sl@0
   596
	}
sl@0
   597
sl@0
   598
__NAKED__ TUint32 get_esp()
sl@0
   599
	{
sl@0
   600
	asm("mov eax, esp");
sl@0
   601
	asm("ret");
sl@0
   602
	}
sl@0
   603
sl@0
   604
__NAKED__ void __lidt(SX86Des* /*aTable*/, TInt /*aLimit*/)
sl@0
   605
	{
sl@0
   606
	asm("mov eax, [esp+4]");
sl@0
   607
	asm("mov ecx, [esp+8]");
sl@0
   608
	asm("shl ecx, 3");
sl@0
   609
	asm("sub ecx, 1");
sl@0
   610
	asm("sub esp, 8");
sl@0
   611
	asm("mov word ptr [esp], cx");
sl@0
   612
	asm("mov dword ptr [esp+2], eax");
sl@0
   613
	asm("lidt [esp]"); 
sl@0
   614
	asm("add esp, 8");
sl@0
   615
	asm("mov eax, 0x28");
sl@0
   616
	asm("ltr ax");
sl@0
   617
	asm("ret");
sl@0
   618
	}
sl@0
   619