1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/os/kernelhwsrv/kernel/eka/nkern/x86/vectors.cia Fri Jun 15 03:10:57 2012 +0200
1.3 @@ -0,0 +1,619 @@
1.4 +// Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
1.5 +// All rights reserved.
1.6 +// This component and the accompanying materials are made available
1.7 +// under the terms of the License "Eclipse Public License v1.0"
1.8 +// which accompanies this distribution, and is available
1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
1.10 +//
1.11 +// Initial Contributors:
1.12 +// Nokia Corporation - initial contribution.
1.13 +//
1.14 +// Contributors:
1.15 +//
1.16 +// Description:
1.17 +// e32\nkern\x86\vectors.cia
1.18 +//
1.19 +//
1.20 +
1.21 +#include <x86.h>
1.22 +#include "vectors.h"
1.23 +
1.24 +#ifdef __GCC32__
1.25 +#define DECLARE_X86_INT(n) GLDEF_C __NAKED__ void __X86Vector##n() { asm("push 0x"#n); asm("jmp %a0": :"i"(&__X86VectorIrq)); }
1.26 +#define DECLARE_X86_EXC_NOERR(n) GLDEF_C __NAKED__ void __X86Vector##n() { asm("push 0"); asm("push 0x"#n); asm("jmp %a0": : "i"(&__X86VectorExc)); }
1.27 +#define DECLARE_X86_EXC_ERR(n) GLDEF_C __NAKED__ void __X86Vector##n() { asm("push 0x"#n); asm("jmp %a0": : "i"(&__X86VectorExc)); }
1.28 +#else
1.29 +#define DECLARE_X86_INT(n) GLDEF_C __NAKED__ void __X86Vector##n() { __asm push 0x##n __asm jmp __X86VectorIrq }
1.30 +#define DECLARE_X86_EXC_NOERR(n) GLDEF_C __NAKED__ void __X86Vector##n() { __asm push 0 __asm push 0x##n __asm jmp __X86VectorExc }
1.31 +#define DECLARE_X86_EXC_ERR(n) GLDEF_C __NAKED__ void __X86Vector##n() { __asm push 0x##n __asm jmp __X86VectorExc }
1.32 +#endif
1.33 +
1.34 +const TLinAddr NKern_WaitForAnyRequest = (TLinAddr)&NKern::WaitForAnyRequest;
1.35 +const TLinAddr NKern_LockSystem = (TLinAddr)&NKern::LockSystem;
1.36 +const TLinAddr NKern_UnlockSystem = (TLinAddr)&NKern::UnlockSystem;
1.37 +const TLinAddr NKern_Unlock = (TLinAddr)&NKern::Unlock;
1.38 +const TLinAddr TScheduler_Reschedule = (TLinAddr)&TScheduler::Reschedule;
1.39 +
1.40 +#ifdef __CHECK_LOCK_STATE__
1.41 +/******************************************************************************
1.42 + * Check that the kernel is unlocked, no fast mutex is held and the thread
1.43 + * is not in a critical section when returning to user mode.
1.44 + ******************************************************************************/
1.45 +extern "C" __NAKED__ void check_lock_state()
1.46 + {
1.47 + asm("push ecx ");
1.48 + asm("mov ecx, [%a0]" : : "i" (&TheScheduler.iCurrentThread));
1.49 + asm("cmp dword ptr [%a0], 0" : : "i" (&TheScheduler.iKernCSLocked));
1.50 + asm("jnz short bad_lock_state1 ");
1.51 + asm("cmp dword ptr [ecx+%0], 0" : : "i" _FOFF(NThreadBase, iHeldFastMutex));
1.52 + asm("jne short bad_lock_state2 ");
1.53 + asm("cmp dword ptr [ecx+%0], 0" : : "i" _FOFF(NThreadBase, iCsCount));
1.54 + asm("jne short bad_lock_state3 ");
1.55 + asm("pop ecx ");
1.56 + asm("ret ");
1.57 + asm("bad_lock_state1: ");
1.58 + asm("int 0xff ");
1.59 + asm("bad_lock_state2: ");
1.60 + asm("int 0xff ");
1.61 + asm("bad_lock_state3: ");
1.62 + asm("int 0xff ");
1.63 + }
1.64 +#endif
1.65 +
1.66 +/******************************************************************************
1.67 +* Int 20h Handler - Fast Executive Calls
1.68 +* Enter with:
1.69 +* Call number in EAX
1.70 +* Parameter in ECX if any
1.71 +* On entry SS:ESP references current threads supervisor stack
1.72 +* [ESP+0] = return EIP
1.73 +* [ESP+4] = return CS
1.74 +* [ESP+8] = return EFLAGS
1.75 +* [ESP+12] = return ESP if privilege change occurred
1.76 +* [ESP+16] = return SS if privilege change occurred
1.77 +*******************************************************************************/
1.78 +GLDEF_C __NAKED__ void __X86Vector20()
1.79 + {
1.80 + // Interrupts enabled on entry
1.81 + asm("cld");
1.82 + asm("test eax, eax");
1.83 + asm("je wait_for_any_request");
1.84 + asm("push ds");
1.85 + asm("push es");
1.86 + asm("push gs");
1.87 + asm("push ecx");
1.88 + asm("mov cx, ds");
1.89 + asm("mov dx, ss");
1.90 + asm("mov ds, dx");
1.91 + asm("mov gs, cx");
1.92 + asm("mov ecx, [%a0]": :"i"(&TheScheduler.iCurrentThread));
1.93 + asm("mov es, dx");
1.94 + asm("mov edx, [ecx+%0]" : : "i"_FOFF(NThreadBase,iFastExecTable));
1.95 + asm("cmp eax, [edx]");
1.96 + asm("jae fast_exec_invalid");
1.97 + asm("cli");
1.98 + asm("call [edx+eax*4]");
1.99 + asm("add esp, 4");
1.100 + asm("fast_exec_exit:");
1.101 + asm("test dword ptr [esp+16], 3 "); // returning to user mode?
1.102 + asm("jz short fast_exec_exit2 "); // no - don't do lock check or user mode callbacks
1.103 +#ifdef __CHECK_LOCK_STATE__
1.104 + asm("call %a0" : : "i" (&check_lock_state));
1.105 +#endif
1.106 + asm("push eax");
1.107 +#ifdef __GCC32__
1.108 + asm("mov ecx, [%a0]": : "i"(&TheScheduler.iCurrentThread));
1.109 + asm("push ecx");
1.110 + asm("call __ZN11NThreadBase21CallUserModeCallbacksEv ");
1.111 + asm("add esp,4");
1.112 +#else
1.113 + TheScheduler.iCurrentThread->CallUserModeCallbacks();
1.114 +#endif
1.115 + asm("pop eax");
1.116 +
1.117 + asm("fast_exec_exit2: ");
1.118 + asm("pop gs");
1.119 + asm("pop es");
1.120 + asm("pop ds");
1.121 + asm("iretd");
1.122 +
1.123 + asm("wait_for_any_request:");
1.124 + asm("push ds");
1.125 + asm("push es");
1.126 + asm("mov cx, ss");
1.127 + asm("mov ds, cx");
1.128 + asm("mov es, cx");
1.129 + asm("call %a0" : : "i" (NKern_WaitForAnyRequest));
1.130 +
1.131 + asm("test dword ptr [esp+12], 3 "); // returning to user mode?
1.132 + asm("jz short wfar_exit2 "); // no - don't do lock check or user mode callbacks
1.133 + asm("push eax");
1.134 + asm("cli");
1.135 +#ifdef __GCC32__
1.136 + asm("mov ecx, [%a0]": : "i"(&TheScheduler.iCurrentThread));
1.137 + asm("push ecx");
1.138 + asm("call __ZN11NThreadBase21CallUserModeCallbacksEv");
1.139 + asm("add esp,4");
1.140 +#else
1.141 + TheScheduler.iCurrentThread->CallUserModeCallbacks();
1.142 +#endif
1.143 + asm("pop eax");
1.144 +#ifdef __CHECK_LOCK_STATE__
1.145 + asm("call %a0" : : "i" (&check_lock_state));
1.146 +#endif
1.147 +
1.148 + asm("wfar_exit2: ");
1.149 + asm("pop es");
1.150 + asm("pop ds");
1.151 + asm("iretd");
1.152 +
1.153 + asm("fast_exec_invalid:");
1.154 + asm("pop ecx");
1.155 + asm("push ebp");
1.156 + asm("push edi");
1.157 + asm("push esi");
1.158 + asm("push ebx");
1.159 + asm("push edx");
1.160 + asm("push ecx");
1.161 + asm("mov edi, [%a0]": :"i"(&TheScheduler.iCurrentThread)); // edi=TheCurrentThread
1.162 + asm("mov esi, [edi+%0]" : : "i"_FOFF(NThreadBase,iSlowExecTable)); // esi=slow exec table base
1.163 + asm("call [esi-8]"); // call invalid exec handler
1.164 + asm("pop ecx");
1.165 + asm("pop edx");
1.166 + asm("pop ebx");
1.167 + asm("pop esi");
1.168 + asm("pop edi");
1.169 + asm("pop ebp");
1.170 + asm("jmp fast_exec_exit");
1.171 + }
1.172 +
1.173 +/******************************************************************************
1.174 +* Int 21h Handler - Slow Executive Calls
1.175 +* Enter with:
1.176 +* Call number in EAX
1.177 +* Parameters in ECX, EDX, EBX, ESI in that order
1.178 +* On entry SS:ESP references current threads supervisor stack
1.179 +* Must preserve EBX, EBP, ESI, EDI
1.180 +* [ESP+0] = return EIP
1.181 +* [ESP+4] = return CS
1.182 +* [ESP+8] = return EFLAGS
1.183 +* [ESP+12] = return ESP if privilege change occurred
1.184 +* [ESP+16] = return SS if privilege change occurred
1.185 +*******************************************************************************/
1.186 +GLDEF_C __NAKED__ void __X86Vector21()
1.187 + {
1.188 + // Interrupts enabled on entry
1.189 + asm("sub esp, 32"); // reserve space for additional arguments
1.190 + asm("cld");
1.191 + asm("push ds");
1.192 + asm("push es");
1.193 + asm("push gs");
1.194 + asm("push ebp");
1.195 + asm("mov bp, ds");
1.196 + asm("mov gs, bp");
1.197 + asm("mov bp, ss");
1.198 + asm("mov ds, bp");
1.199 + asm("mov es, bp");
1.200 + asm("push edi");
1.201 + asm("mov edi, [%a0]": : "i"(&TheScheduler.iCurrentThread)); // edi=TheCurrentThread
1.202 + asm("push esi");
1.203 + asm("mov esi, [edi+%0]" : : "i"_FOFF(NThreadBase,iSlowExecTable)); // esi=slow exec table base
1.204 + asm("push ebx");
1.205 + asm("push edx");
1.206 + asm("push ecx");
1.207 + asm("lea ebp, [esi+eax*8]"); // ebp points to exec table entry
1.208 + asm("cmp eax, [esi-12]");
1.209 + asm("jae slow_exec_invalid");
1.210 + asm("mov ebx, [ebp]"); // ebx=flags
1.211 + asm("test ebx, 0x1c000000"); // additional arguments required?
1.212 + asm("jz slow_exec_no_extra_args");
1.213 + asm("mov edx, [esp+8]"); // edx points to additional args
1.214 + asm("lea eax, [esp+36]"); // address of copied additional arguments
1.215 + asm("mov [esp+8], eax"); // replace supplied address
1.216 + asm("mov ecx, ebx");
1.217 + asm("shr ecx, 26");
1.218 + asm("and cl, 7"); // ecx=number of additional arguments-1
1.219 + asm("test edx, edx");
1.220 + asm("jnz slow_exec_extra_args_present"); // if arg ptr not NULL, copy args
1.221 + asm("slow_exec_zero_args:");
1.222 + asm("mov [esp+ecx*4+36], edx"); // else zero args
1.223 + asm("dec ecx");
1.224 + asm("jns slow_exec_zero_args");
1.225 + asm("jmp slow_exec_no_extra_args");
1.226 +
1.227 + asm("slow_exec_extra_args_present:");
1.228 + asm("slow_exec_copy_args:");
1.229 + asm("mov eax, gs:[edx+ecx*4]"); // get argument
1.230 + asm("mov [esp+ecx*4+36], eax"); // copy it
1.231 + asm("dec ecx");
1.232 + asm("jns slow_exec_copy_args");
1.233 +
1.234 + asm("slow_exec_no_extra_args:");
1.235 + asm("test ebx, 0x80000000"); // test EClaim
1.236 + asm("jz slow_exec_no_claim");
1.237 + asm("call %a0" : : "i"(NKern_LockSystem)); // trashes eax, ecx, edx
1.238 + asm("slow_exec_no_claim:");
1.239 + asm("test ebx, 0x20000000"); // test EPreprocess
1.240 + asm("jz slow_exec_no_preprocess");
1.241 + asm("call [esi-4]"); // trashes eax, ecx, edx, edi
1.242 + asm("slow_exec_no_preprocess:");
1.243 + asm("call [ebp+4]"); // call exec function
1.244 + asm("test ebx, 0x40000000"); // test ERelease
1.245 + asm("jz slow_exec_no_release");
1.246 + asm("mov edi, eax"); // save return value in EDI
1.247 + asm("call %a0" : : "i"(NKern_UnlockSystem)); // trashes eax, ecx, edx
1.248 + asm("mov eax, edi"); // restore return value
1.249 + asm("slow_exec_no_release:");
1.250 +
1.251 + asm("slow_exec_exit:");
1.252 + asm("test dword ptr [esp+72], 3 "); // returning to user mode?
1.253 + asm("jz short slow_exec_exit2 "); // no - don't do lock check or user mode callbacks
1.254 +#ifdef __CHECK_LOCK_STATE__
1.255 + asm("call %a0" : : "i" (&check_lock_state));
1.256 +#endif
1.257 + asm("push eax");
1.258 + asm("cli");
1.259 +#ifdef __GCC32__
1.260 + asm("mov ecx, [%a0]": : "i"(&TheScheduler.iCurrentThread));
1.261 + asm("push ecx");
1.262 + asm("call __ZN11NThreadBase21CallUserModeCallbacksEv");
1.263 + asm("add esp,4");
1.264 +#else
1.265 + TheScheduler.iCurrentThread->CallUserModeCallbacks();
1.266 +#endif
1.267 + asm("pop eax");
1.268 +
1.269 + asm("slow_exec_exit2: ");
1.270 + asm("pop ecx");
1.271 + asm("pop edx");
1.272 + asm("pop ebx");
1.273 + asm("pop esi");
1.274 + asm("pop edi");
1.275 + asm("pop ebp");
1.276 + asm("pop gs");
1.277 + asm("pop es");
1.278 + asm("pop ds");
1.279 + asm("add esp, 32"); // remove additional arguments
1.280 + asm("iretd");
1.281 +
1.282 + asm("slow_exec_invalid:");
1.283 + asm("call [esi-8]"); // call invalid exec handler
1.284 + asm("jmp slow_exec_exit");
1.285 + }
1.286 +
1.287 +const TUint32 irq_start_trace_header = ((4<<BTrace::ESizeIndex) + (BTrace::ECpuUsage<<BTrace::ECategoryIndex*8) + (BTrace::EIrqStart<<BTrace::ESubCategoryIndex*8));
1.288 +const TUint32 irq_end_trace_header = ((4<<BTrace::ESizeIndex) + (BTrace::ECpuUsage<<BTrace::ECategoryIndex*8) + (BTrace::EIrqEnd<<BTrace::ESubCategoryIndex*8));
1.289 +
1.290 +/******************************************************************************
1.291 +* IRQ Preamble/Postamble Common Code
1.292 +* On entry SS:ESP references current threads supervisor stack
1.293 +* [ESP+0] = vector number
1.294 +* [ESP+4] = return EIP
1.295 +* [ESP+8] = return CS
1.296 +* [ESP+12] = return EFLAGS
1.297 +* [ESP+16] = return ESP if privilege change occurred
1.298 +* [ESP+20] = return SS if privilege change occurred
1.299 +*******************************************************************************/
1.300 +__NAKED__ void __X86VectorIrq()
1.301 + {
1.302 + asm("push ds");
1.303 + asm("push es");
1.304 + asm("push eax");
1.305 + asm("mov ax, ss");
1.306 + asm("cld");
1.307 + asm("push ecx");
1.308 + asm("push edx");
1.309 + asm("mov ds, ax");
1.310 + asm("mov es, ax");
1.311 + asm("mov eax, esp"); // eax points to saved stuff
1.312 + asm("inc dword ptr [%a0]": : "i"(&X86_IrqNestCount)); // nest count starts at -1
1.313 + asm("jnz nested_irq_entry");
1.314 +#ifdef __GCC32__
1.315 + asm("mov esp, %0" : : "i"(&X86_IrqStack));
1.316 + asm("add esp, %0" : : "i"(IRQ_STACK_SIZE));
1.317 +#else
1.318 + _asm lea esp, X86_IrqStack[IRQ_STACK_SIZE]
1.319 +#endif
1.320 + asm("push eax");
1.321 + asm("nested_irq_entry:");
1.322 +
1.323 +#ifdef BTRACE_CPU_USAGE
1.324 + asm("cmp byte ptr [%a0], 0": : "i"(&TheScheduler.iCpuUsageFilter));
1.325 + asm("jz no_trace");
1.326 + asm("push eax");
1.327 + asm("push %0": :"i"(irq_start_trace_header));
1.328 + asm("call dword ptr [%a0]": : "i"(&TheScheduler.iBTraceHandler));
1.329 + asm("pop eax");
1.330 + asm("pop eax");
1.331 + asm("no_trace:");
1.332 +#endif
1.333 + asm("call [%a0]": : "i"(&X86_IrqHandler));
1.334 +
1.335 + // Postamble. Interrupts disabled here.
1.336 + asm("xor eax, eax");
1.337 + asm("dec dword ptr [%a0]": : "i"(&X86_IrqNestCount));
1.338 + asm("jns nested_irq_exit");
1.339 + asm("cmp eax, [%a0]": : "i"(&TheScheduler.iKernCSLocked));
1.340 + asm("lea edx, %a0": : "i"(&TheScheduler.iRescheduleNeededFlag));
1.341 + asm("jnz irq_kernel_locked_exit");
1.342 + asm("cmp eax, [edx]");
1.343 + asm("jz irq_kernel_locked_exit");
1.344 + asm("inc eax");
1.345 + asm("mov [%a0], eax": : "i"(&TheScheduler.iKernCSLocked));
1.346 + asm("pop eax");
1.347 + asm("mov esp, eax");
1.348 + asm("sti");
1.349 + asm("call %a0" : : "i"(TScheduler_Reschedule));
1.350 + asm("jmp irq_exit");
1.351 +
1.352 + asm("irq_kernel_locked_exit:");
1.353 + asm("pop eax");
1.354 + asm("mov esp, eax");
1.355 +
1.356 + asm("nested_irq_exit:");
1.357 +#ifdef BTRACE_CPU_USAGE
1.358 + asm("cmp byte ptr [%a0], 0": : "i"(&TheScheduler.iCpuUsageFilter));
1.359 + asm("jz no_trace2");
1.360 + asm("push %0": : "i"(irq_end_trace_header));
1.361 + asm("call dword ptr [%a0]": : "i"(&TheScheduler.iBTraceHandler));
1.362 + asm("pop eax");
1.363 + asm("no_trace2:");
1.364 +#endif
1.365 + asm("irq_exit:");
1.366 + asm("test dword ptr [esp+28], 3 "); // check if we came from kernel mode
1.367 + asm("jz short irq_exit2 ");
1.368 +#ifdef __CHECK_LOCK_STATE__
1.369 + asm("call %a0" : : "i" (&check_lock_state));
1.370 +#endif
1.371 +#ifdef __GCC32__
1.372 + asm("mov ecx, [%a0]": : "i"(&TheScheduler.iCurrentThread));
1.373 + asm("push ecx");
1.374 + asm("call __ZN11NThreadBase21CallUserModeCallbacksEv");
1.375 + asm("add esp,4");
1.376 +#else
1.377 + TheScheduler.iCurrentThread->CallUserModeCallbacks();
1.378 +#endif
1.379 + asm("irq_exit2:");
1.380 + asm("pop edx");
1.381 + asm("pop ecx");
1.382 + asm("pop eax");
1.383 + asm("pop es");
1.384 + asm("pop ds");
1.385 + asm("add esp, 4");
1.386 + asm("iretd");
1.387 + }
1.388 +
1.389 +
1.390 +/******************************************************************************
1.391 +* General Exception Handler
1.392 +* On entry SS:ESP references current threads supervisor stack
1.393 +* [ESP+0] = vector number
1.394 +* [ESP+4] = error code (filled with 0 for exceptions without error codes)
1.395 +* [ESP+8] = return EIP
1.396 +* [ESP+12] = return CS
1.397 +* [ESP+16] = return EFLAGS
1.398 +* [ESP+20] = return ESP if privilege change occurred
1.399 +* [ESP+24] = return SS if privilege change occurred
1.400 +*******************************************************************************/
1.401 +GLDEF_C __NAKED__ void __X86VectorExc()
1.402 + {
1.403 + asm("push ds");
1.404 + asm("push es");
1.405 + asm("push fs");
1.406 + asm("push gs");
1.407 + asm("cld");
1.408 + asm("push ebp");
1.409 + asm("mov bp, ds");
1.410 + asm("push edi");
1.411 + asm("mov gs, bp");
1.412 + asm("mov bp, ss");
1.413 + asm("push esi");
1.414 + asm("push ebx");
1.415 + asm("push ecx");
1.416 + asm("push edx");
1.417 + asm("push eax");
1.418 + asm("mov eax, cr2");
1.419 + asm("mov ds, bp");
1.420 + asm("mov es, bp");
1.421 + asm("push eax");
1.422 + asm("sub esp, 8");
1.423 + asm("mov ebp, esp"); // ebp points to exception info frame
1.424 + asm("mov edi, [%a0]": : "i"(&TheScheduler.iCurrentThread));
1.425 + asm("xor eax, eax");
1.426 + asm("mov ax, ss");
1.427 + asm("mov [ebp+4], eax"); // SS
1.428 + asm("mov eax, ebp");
1.429 + asm("add eax, 76"); // EAX = ESP at point of exception if ring 0
1.430 + asm("test dword ptr [ebp+68], 3"); // check if we came from kernel mode
1.431 + asm("jz ring0_exception");
1.432 + asm("mov byte ptr [edi+11], 1");
1.433 + asm("add eax, 8"); // EAX = ESP at point of exception if ring 3
1.434 + asm("ring0_exception:");
1.435 + asm("mov [ebp], eax");
1.436 + asm("cmp dword ptr [%a0], -1": : "i"(&X86_IrqNestCount));
1.437 + asm("jnz fatal_exception_irq");
1.438 + asm("cmp dword ptr [%a0], 0": : "i"(&TheScheduler.iKernCSLocked));
1.439 + asm("jnz fatal_exception_locked");
1.440 + asm("cmp dword ptr [ebp+%0], 7": :"i"_FOFF(TX86ExcInfo,iExcId)); // check for device not available
1.441 + asm("jne not_fpu");
1.442 + asm("mov dword ptr [%a0], 1": :"i"(&TheScheduler.iKernCSLocked));
1.443 + asm("clts");
1.444 + asm("frstor [edi+%0]": :"i"_FOFF(NThread,iCoprocessorState));
1.445 + asm("call %a0": :"i"(NKern_Unlock));
1.446 + asm("add esp, 12");
1.447 + asm("jmp proceed");
1.448 +
1.449 + asm("not_fpu:");
1.450 + asm("mov eax, [edi+%0]" : : "i"_FOFF(NThreadBase,iHandlers));
1.451 + asm("push edi"); // pass current thread parameter
1.452 + asm("push ebp"); // pass frame address
1.453 + asm("call [eax+%0]" : : "i"_FOFF(SNThreadHandlers,iExceptionHandler));
1.454 + asm("add esp, 20"); // remove parameters, esp, ss, fault address
1.455 +
1.456 + asm("proceed:");
1.457 + asm("mov byte ptr [edi+11], 0 ");
1.458 + asm("test dword ptr [esp+56], 3 "); // check if we came from kernel mode
1.459 + asm("jz short proceed2 ");
1.460 + asm("cli");
1.461 +#ifdef __CHECK_LOCK_STATE__
1.462 + asm("call %a0" : : "i" (&check_lock_state));
1.463 +#endif
1.464 +#ifdef __GCC32__
1.465 + asm("mov ecx, [%a0]": : "i"(&TheScheduler.iCurrentThread));
1.466 + asm("push ecx");
1.467 + asm("call __ZN11NThreadBase21CallUserModeCallbacksEv");
1.468 + asm("add esp,4");
1.469 +#else
1.470 + TheScheduler.iCurrentThread->CallUserModeCallbacks();
1.471 +#endif
1.472 + asm("proceed2:");
1.473 + asm("pop eax");
1.474 + asm("pop edx");
1.475 + asm("pop ecx");
1.476 + asm("pop ebx");
1.477 + asm("pop esi");
1.478 + asm("pop edi");
1.479 + asm("pop ebp");
1.480 + asm("pop gs");
1.481 + asm("pop fs");
1.482 + asm("pop es");
1.483 + asm("pop ds");
1.484 + asm("add esp, 8"); // skip vector number and error code
1.485 + asm("iretd");
1.486 +
1.487 + asm("fatal_exception_irq:");
1.488 + asm("fatal_exception_locked:");
1.489 + asm("lea eax, %a0": :"i"(&TheScheduler));
1.490 + asm("lea eax, [eax+%0]": :"i"_FOFF(TScheduler,iMonitorExceptionHandler));
1.491 + asm("mov eax,[eax]");
1.492 +
1.493 + asm("test eax, eax");
1.494 + asm("jnz monitor_exception");
1.495 + asm("push ebp");
1.496 + asm("call %a0": :"i"(&__X86ExcFault)); // doesn't return
1.497 +
1.498 + asm("monitor_exception:");
1.499 + asm("jmp eax");
1.500 + }
1.501 +
1.502 +
1.503 +/******************************************************************************
1.504 +* Exception Handlers
1.505 +*******************************************************************************/
1.506 +
1.507 +DECLARE_X86_EXC_NOERR(00)
1.508 +DECLARE_X86_EXC_NOERR(01)
1.509 +DECLARE_X86_EXC_NOERR(02)
1.510 +DECLARE_X86_EXC_NOERR(03)
1.511 +DECLARE_X86_EXC_NOERR(04)
1.512 +DECLARE_X86_EXC_NOERR(05)
1.513 +DECLARE_X86_EXC_NOERR(06)
1.514 +DECLARE_X86_EXC_NOERR(07)
1.515 +DECLARE_X86_EXC_ERR(08)
1.516 +DECLARE_X86_EXC_NOERR(09)
1.517 +DECLARE_X86_EXC_ERR(0A)
1.518 +DECLARE_X86_EXC_ERR(0B)
1.519 +DECLARE_X86_EXC_ERR(0C)
1.520 +DECLARE_X86_EXC_ERR(0D)
1.521 +DECLARE_X86_EXC_ERR(0E)
1.522 +DECLARE_X86_EXC_NOERR(0F)
1.523 +DECLARE_X86_EXC_NOERR(10)
1.524 +DECLARE_X86_EXC_ERR(11)
1.525 +DECLARE_X86_EXC_NOERR(12)
1.526 +DECLARE_X86_EXC_NOERR(13)
1.527 +DECLARE_X86_EXC_NOERR(14)
1.528 +DECLARE_X86_EXC_NOERR(15)
1.529 +DECLARE_X86_EXC_NOERR(16)
1.530 +DECLARE_X86_EXC_NOERR(17)
1.531 +DECLARE_X86_EXC_NOERR(18)
1.532 +DECLARE_X86_EXC_NOERR(19)
1.533 +DECLARE_X86_EXC_NOERR(1A)
1.534 +DECLARE_X86_EXC_NOERR(1B)
1.535 +DECLARE_X86_EXC_NOERR(1C)
1.536 +DECLARE_X86_EXC_NOERR(1D)
1.537 +DECLARE_X86_EXC_NOERR(1E)
1.538 +DECLARE_X86_EXC_NOERR(1F)
1.539 +
1.540 +/******************************************************************************
1.541 +* Interrupt Handlers
1.542 +*******************************************************************************/
1.543 +
1.544 +DECLARE_X86_INT(30)
1.545 +DECLARE_X86_INT(31)
1.546 +DECLARE_X86_INT(32)
1.547 +DECLARE_X86_INT(33)
1.548 +DECLARE_X86_INT(34)
1.549 +DECLARE_X86_INT(35)
1.550 +DECLARE_X86_INT(36)
1.551 +DECLARE_X86_INT(37)
1.552 +DECLARE_X86_INT(38)
1.553 +DECLARE_X86_INT(39)
1.554 +DECLARE_X86_INT(3A)
1.555 +DECLARE_X86_INT(3B)
1.556 +DECLARE_X86_INT(3C)
1.557 +DECLARE_X86_INT(3D)
1.558 +DECLARE_X86_INT(3E)
1.559 +DECLARE_X86_INT(3F)
1.560 +
1.561 +/*const*/ PFV TheExcVectors[64]=
1.562 +{
1.563 +__X86Vector00, __X86Vector01, __X86Vector02, __X86Vector03,
1.564 +__X86Vector04, __X86Vector05, __X86Vector06, __X86Vector07,
1.565 +__X86Vector08, __X86Vector09, __X86Vector0A, __X86Vector0B,
1.566 +__X86Vector0C, __X86Vector0D, __X86Vector0E, __X86Vector0F,
1.567 +__X86Vector10, __X86Vector11, __X86Vector12, __X86Vector13,
1.568 +__X86Vector14, __X86Vector15, __X86Vector16, __X86Vector17,
1.569 +__X86Vector18, __X86Vector19, __X86Vector1A, __X86Vector1B,
1.570 +__X86Vector1C, __X86Vector1D, __X86Vector1E, __X86Vector1F,
1.571 +__X86Vector20, __X86Vector21, NULL, NULL,
1.572 +NULL, NULL, NULL, NULL,
1.573 +NULL, NULL, NULL, NULL,
1.574 +NULL, NULL, NULL, NULL,
1.575 +__X86Vector30, __X86Vector31, __X86Vector32, __X86Vector33,
1.576 +__X86Vector34, __X86Vector35, __X86Vector36, __X86Vector37,
1.577 +__X86Vector38, __X86Vector39, __X86Vector3A, __X86Vector3B,
1.578 +__X86Vector3C, __X86Vector3D, __X86Vector3E, __X86Vector3F
1.579 +};
1.580 +
1.581 +EXPORT_C __NAKED__ TUint32 X86::IrqReturnAddress()
1.582 + {
1.583 + asm("mov eax, %0": :"i"(&X86_IrqStack[0]));
1.584 + asm("mov eax, [eax + %0]": :"i"(IRQ_STACK_SIZE - 4)); // pointer to saved supervisor stack pointer
1.585 + asm("mov eax, [eax+24]"); // return address from original interrupt
1.586 + asm("ret");
1.587 + }
1.588 +
1.589 +__NAKED__ TUint32 get_cr0()
1.590 + {
1.591 + asm("mov eax, cr0");
1.592 + asm("ret");
1.593 + }
1.594 +
1.595 +__NAKED__ TUint32 get_cr3()
1.596 + {
1.597 + asm("mov eax, cr0");
1.598 + asm("ret");
1.599 + }
1.600 +
1.601 +__NAKED__ TUint32 get_esp()
1.602 + {
1.603 + asm("mov eax, esp");
1.604 + asm("ret");
1.605 + }
1.606 +
1.607 +__NAKED__ void __lidt(SX86Des* /*aTable*/, TInt /*aLimit*/)
1.608 + {
1.609 + asm("mov eax, [esp+4]");
1.610 + asm("mov ecx, [esp+8]");
1.611 + asm("shl ecx, 3");
1.612 + asm("sub ecx, 1");
1.613 + asm("sub esp, 8");
1.614 + asm("mov word ptr [esp], cx");
1.615 + asm("mov dword ptr [esp+2], eax");
1.616 + asm("lidt [esp]");
1.617 + asm("add esp, 8");
1.618 + asm("mov eax, 0x28");
1.619 + asm("ltr ax");
1.620 + asm("ret");
1.621 + }
1.622 +