os/kernelhwsrv/kernel/eka/nkern/x86/vectors.cia
author sl
Tue, 10 Jun 2014 14:32:02 +0200
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
     1 // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
     2 // All rights reserved.
     3 // This component and the accompanying materials are made available
     4 // under the terms of the License "Eclipse Public License v1.0"
     5 // which accompanies this distribution, and is available
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
     7 //
     8 // Initial Contributors:
     9 // Nokia Corporation - initial contribution.
    10 //
    11 // Contributors:
    12 //
    13 // Description:
    14 // e32\nkern\x86\vectors.cia
    15 // 
    16 //
    17 
    18 #include <x86.h>
    19 #include "vectors.h"
    20 
    21 #ifdef __GCC32__
    22 #define DECLARE_X86_INT(n)			GLDEF_C __NAKED__ void __X86Vector##n() { asm("push 0x"#n);	asm("jmp %a0": :"i"(&__X86VectorIrq)); }
    23 #define DECLARE_X86_EXC_NOERR(n)	GLDEF_C __NAKED__ void __X86Vector##n() { asm("push 0"); asm("push 0x"#n); asm("jmp %a0": : "i"(&__X86VectorExc)); }
    24 #define DECLARE_X86_EXC_ERR(n)		GLDEF_C __NAKED__ void __X86Vector##n() { asm("push 0x"#n); asm("jmp %a0": : "i"(&__X86VectorExc)); }
    25 #else
    26 #define DECLARE_X86_INT(n)			GLDEF_C __NAKED__ void __X86Vector##n() { __asm push 0x##n __asm jmp __X86VectorIrq }
    27 #define DECLARE_X86_EXC_NOERR(n)	GLDEF_C __NAKED__ void __X86Vector##n() { __asm push 0 __asm push 0x##n __asm jmp __X86VectorExc }
    28 #define DECLARE_X86_EXC_ERR(n)		GLDEF_C __NAKED__ void __X86Vector##n() { __asm push 0x##n __asm jmp __X86VectorExc }
    29 #endif
    30 
    31 const TLinAddr NKern_WaitForAnyRequest = (TLinAddr)&NKern::WaitForAnyRequest;
    32 const TLinAddr NKern_LockSystem = (TLinAddr)&NKern::LockSystem;
    33 const TLinAddr NKern_UnlockSystem = (TLinAddr)&NKern::UnlockSystem;
    34 const TLinAddr NKern_Unlock = (TLinAddr)&NKern::Unlock;
    35 const TLinAddr TScheduler_Reschedule = (TLinAddr)&TScheduler::Reschedule;
    36 
    37 #ifdef __CHECK_LOCK_STATE__
    38 /******************************************************************************
    39  * Check that the kernel is unlocked, no fast mutex is held and the thread
    40  * is not in a critical section when returning to user mode.
    41  ******************************************************************************/
    42 extern "C" __NAKED__ void check_lock_state()
    43 	{
    44 	asm("push ecx ");
    45 	asm("mov ecx, [%a0]" : : "i" (&TheScheduler.iCurrentThread));
    46 	asm("cmp dword ptr [%a0], 0" : : "i" (&TheScheduler.iKernCSLocked));
    47 	asm("jnz short bad_lock_state1 ");
    48 	asm("cmp dword ptr [ecx+%0], 0" : : "i" _FOFF(NThreadBase, iHeldFastMutex));
    49 	asm("jne short bad_lock_state2 ");
    50 	asm("cmp dword ptr [ecx+%0], 0" : : "i" _FOFF(NThreadBase, iCsCount));
    51 	asm("jne short bad_lock_state3 ");
    52 	asm("pop ecx ");
    53 	asm("ret ");
    54 	asm("bad_lock_state1: ");
    55 	asm("int 0xff ");
    56 	asm("bad_lock_state2: ");
    57 	asm("int 0xff ");
    58 	asm("bad_lock_state3: ");
    59 	asm("int 0xff ");
    60 	}
    61 #endif
    62 
    63 /******************************************************************************
    64 * Int 20h Handler - Fast Executive Calls
    65 * Enter with:
    66 *		Call number in EAX
    67 *		Parameter in ECX if any
    68 * On entry SS:ESP references current threads supervisor stack
    69 * [ESP+0] = return EIP
    70 * [ESP+4] = return CS
    71 * [ESP+8] = return EFLAGS
    72 * [ESP+12] = return ESP if privilege change occurred
    73 * [ESP+16] = return SS if privilege change occurred
    74 *******************************************************************************/
    75 GLDEF_C __NAKED__ void __X86Vector20()
    76 	{
    77 	// Interrupts enabled on entry
    78 	asm("cld");
    79 	asm("test eax, eax");
    80 	asm("je wait_for_any_request");
    81 	asm("push ds");
    82 	asm("push es");
    83 	asm("push gs");
    84 	asm("push ecx");
    85 	asm("mov cx, ds");
    86 	asm("mov dx, ss");
    87 	asm("mov ds, dx");
    88 	asm("mov gs, cx");
    89 	asm("mov ecx, [%a0]": :"i"(&TheScheduler.iCurrentThread));
    90 	asm("mov es, dx");
    91 	asm("mov edx, [ecx+%0]" : : "i"_FOFF(NThreadBase,iFastExecTable)); 
    92 	asm("cmp eax, [edx]");
    93 	asm("jae fast_exec_invalid");
    94 	asm("cli");
    95 	asm("call [edx+eax*4]");
    96 	asm("add esp, 4");
    97 	asm("fast_exec_exit:");
    98 	asm("test dword ptr [esp+16], 3 ");	// returning to user mode?
    99 	asm("jz short fast_exec_exit2 ");	// no - don't do lock check or user mode callbacks
   100 #ifdef __CHECK_LOCK_STATE__
   101 	asm("call %a0" : : "i" (&check_lock_state));
   102 #endif
   103 	asm("push eax");
   104 #ifdef __GCC32__
   105 	asm("mov ecx, [%a0]": : "i"(&TheScheduler.iCurrentThread));
   106 	asm("push ecx");
   107 	asm("call __ZN11NThreadBase21CallUserModeCallbacksEv ");
   108 	asm("add esp,4"); 
   109 #else
   110 	TheScheduler.iCurrentThread->CallUserModeCallbacks();
   111 #endif	
   112 	asm("pop eax");
   113 
   114 	asm("fast_exec_exit2: ");
   115 	asm("pop gs");
   116 	asm("pop es");
   117 	asm("pop ds");
   118 	asm("iretd");
   119 
   120 	asm("wait_for_any_request:");
   121 	asm("push ds");
   122 	asm("push es");
   123 	asm("mov cx, ss");
   124 	asm("mov ds, cx");
   125 	asm("mov es, cx");
   126 	asm("call %a0" : : "i" (NKern_WaitForAnyRequest));
   127 
   128 	asm("test dword ptr [esp+12], 3 ");	// returning to user mode?
   129 	asm("jz short wfar_exit2 ");		// no - don't do lock check or user mode callbacks
   130 	asm("push eax");
   131 	asm("cli");
   132 #ifdef __GCC32__
   133 	asm("mov ecx, [%a0]": : "i"(&TheScheduler.iCurrentThread));
   134 	asm("push ecx");
   135 	asm("call __ZN11NThreadBase21CallUserModeCallbacksEv");
   136 	asm("add esp,4"); 
   137 #else
   138 	TheScheduler.iCurrentThread->CallUserModeCallbacks();
   139 #endif	
   140 	asm("pop eax");
   141 #ifdef __CHECK_LOCK_STATE__
   142 	asm("call %a0" : : "i" (&check_lock_state));
   143 #endif
   144 
   145 	asm("wfar_exit2: ");
   146 	asm("pop es");
   147 	asm("pop ds");
   148 	asm("iretd");
   149 
   150 	asm("fast_exec_invalid:");
   151 	asm("pop ecx");
   152 	asm("push ebp");
   153 	asm("push edi");
   154 	asm("push esi");
   155 	asm("push ebx");
   156 	asm("push edx");
   157 	asm("push ecx");
   158 	asm("mov edi, [%a0]": :"i"(&TheScheduler.iCurrentThread));			// edi=TheCurrentThread
   159 	asm("mov esi, [edi+%0]" : : "i"_FOFF(NThreadBase,iSlowExecTable));	// esi=slow exec table base
   160 	asm("call [esi-8]");												// call invalid exec handler
   161 	asm("pop ecx");
   162 	asm("pop edx");
   163 	asm("pop ebx");
   164 	asm("pop esi");
   165 	asm("pop edi");
   166 	asm("pop ebp");
   167 	asm("jmp fast_exec_exit");
   168 	}
   169 
   170 /******************************************************************************
   171 * Int 21h Handler - Slow Executive Calls
   172 * Enter with:
   173 *		Call number in EAX
   174 *		Parameters in ECX, EDX, EBX, ESI in that order
   175 * On entry SS:ESP references current threads supervisor stack
   176 * Must preserve EBX, EBP, ESI, EDI
   177 * [ESP+0] = return EIP
   178 * [ESP+4] = return CS
   179 * [ESP+8] = return EFLAGS
   180 * [ESP+12] = return ESP if privilege change occurred
   181 * [ESP+16] = return SS if privilege change occurred
   182 *******************************************************************************/
   183 GLDEF_C __NAKED__ void __X86Vector21()
   184 	{
   185 	// Interrupts enabled on entry
   186 	asm("sub esp, 32");								// reserve space for additional arguments
   187 	asm("cld");
   188 	asm("push ds");
   189 	asm("push es");
   190 	asm("push gs");
   191 	asm("push ebp");
   192 	asm("mov bp, ds");
   193 	asm("mov gs, bp");
   194 	asm("mov bp, ss");
   195 	asm("mov ds, bp");
   196 	asm("mov es, bp");
   197 	asm("push edi");
   198 	asm("mov edi, [%a0]": : "i"(&TheScheduler.iCurrentThread));	// edi=TheCurrentThread
   199 	asm("push esi");
   200 	asm("mov esi, [edi+%0]" : : "i"_FOFF(NThreadBase,iSlowExecTable)); // esi=slow exec table base 
   201 	asm("push ebx");
   202 	asm("push edx");
   203 	asm("push ecx");
   204 	asm("lea ebp, [esi+eax*8]");					// ebp points to exec table entry
   205 	asm("cmp eax, [esi-12]");
   206 	asm("jae slow_exec_invalid");
   207 	asm("mov ebx, [ebp]");							// ebx=flags
   208 	asm("test ebx, 0x1c000000");					// additional arguments required?
   209 	asm("jz slow_exec_no_extra_args");
   210 	asm("mov edx, [esp+8]");						// edx points to additional args
   211 	asm("lea eax, [esp+36]");						// address of copied additional arguments
   212 	asm("mov [esp+8], eax");						// replace supplied address
   213 	asm("mov ecx, ebx");
   214 	asm("shr ecx, 26");
   215 	asm("and cl, 7");								// ecx=number of additional arguments-1
   216 	asm("test edx, edx");
   217 	asm("jnz slow_exec_extra_args_present");		// if arg ptr not NULL, copy args
   218 	asm("slow_exec_zero_args:");
   219 	asm("mov [esp+ecx*4+36], edx");					// else zero args
   220 	asm("dec ecx");
   221 	asm("jns slow_exec_zero_args");
   222 	asm("jmp slow_exec_no_extra_args");
   223 
   224 	asm("slow_exec_extra_args_present:");
   225 	asm("slow_exec_copy_args:");
   226 	asm("mov eax, gs:[edx+ecx*4]");					// get argument
   227 	asm("mov [esp+ecx*4+36], eax");					// copy it
   228 	asm("dec ecx");
   229 	asm("jns slow_exec_copy_args");
   230 
   231 	asm("slow_exec_no_extra_args:");
   232 	asm("test ebx, 0x80000000");					// test EClaim
   233 	asm("jz slow_exec_no_claim");
   234 	asm("call %a0" : : "i"(NKern_LockSystem)); 		// trashes eax, ecx, edx
   235 	asm("slow_exec_no_claim:");
   236 	asm("test ebx, 0x20000000");					// test EPreprocess
   237 	asm("jz slow_exec_no_preprocess");
   238 	asm("call [esi-4]");							// trashes eax, ecx, edx, edi
   239 	asm("slow_exec_no_preprocess:");
   240 	asm("call [ebp+4]");							// call exec function
   241 	asm("test ebx, 0x40000000");					// test ERelease
   242 	asm("jz slow_exec_no_release");
   243 	asm("mov edi, eax");							// save return value in EDI
   244 	asm("call %a0" : : "i"(NKern_UnlockSystem)); // trashes eax, ecx, edx
   245 	asm("mov eax, edi");							// restore return value
   246 	asm("slow_exec_no_release:");
   247 
   248 	asm("slow_exec_exit:");
   249 	asm("test dword ptr [esp+72], 3 ");				// returning to user mode?
   250 	asm("jz short slow_exec_exit2 ");				// no - don't do lock check or user mode callbacks
   251 #ifdef __CHECK_LOCK_STATE__
   252 	asm("call %a0" : : "i" (&check_lock_state));
   253 #endif
   254 	asm("push eax");
   255 	asm("cli");
   256 #ifdef __GCC32__
   257 	asm("mov ecx, [%a0]": : "i"(&TheScheduler.iCurrentThread));
   258 	asm("push ecx");
   259 	asm("call __ZN11NThreadBase21CallUserModeCallbacksEv");
   260 	asm("add esp,4"); 
   261 #else
   262 	TheScheduler.iCurrentThread->CallUserModeCallbacks();
   263 #endif
   264 	asm("pop eax");
   265 
   266 	asm("slow_exec_exit2: ");
   267 	asm("pop ecx");
   268 	asm("pop edx");
   269 	asm("pop ebx");
   270 	asm("pop esi");
   271 	asm("pop edi");
   272 	asm("pop ebp");
   273 	asm("pop gs");
   274 	asm("pop es");
   275 	asm("pop ds");
   276 	asm("add esp, 32");								// remove additional arguments
   277 	asm("iretd");
   278 
   279 	asm("slow_exec_invalid:");
   280 	asm("call [esi-8]");							// call invalid exec handler
   281 	asm("jmp slow_exec_exit");
   282 	}
   283 
   284 const TUint32 irq_start_trace_header = ((4<<BTrace::ESizeIndex) + (BTrace::ECpuUsage<<BTrace::ECategoryIndex*8) + (BTrace::EIrqStart<<BTrace::ESubCategoryIndex*8));
   285 const TUint32 irq_end_trace_header   = ((4<<BTrace::ESizeIndex) + (BTrace::ECpuUsage<<BTrace::ECategoryIndex*8) + (BTrace::EIrqEnd<<BTrace::ESubCategoryIndex*8));
   286 
   287 /******************************************************************************
   288 * IRQ Preamble/Postamble Common Code
   289 * On entry SS:ESP references current threads supervisor stack
   290 * [ESP+0] = vector number
   291 * [ESP+4] = return EIP
   292 * [ESP+8] = return CS
   293 * [ESP+12] = return EFLAGS
   294 * [ESP+16] = return ESP if privilege change occurred
   295 * [ESP+20] = return SS if privilege change occurred
   296 *******************************************************************************/
   297 __NAKED__ void __X86VectorIrq()
   298 	{
   299 	asm("push ds");
   300 	asm("push es");
   301 	asm("push eax");
   302 	asm("mov ax, ss");
   303 	asm("cld");
   304 	asm("push ecx");
   305 	asm("push edx");
   306 	asm("mov ds, ax");
   307 	asm("mov es, ax");
   308 	asm("mov eax, esp");									// eax points to saved stuff
   309 	asm("inc dword ptr [%a0]": : "i"(&X86_IrqNestCount));	// nest count starts at -1
   310 	asm("jnz nested_irq_entry");
   311 #ifdef __GCC32__
   312 	asm("mov esp, %0" : : "i"(&X86_IrqStack)); 
   313 	asm("add esp, %0" : : "i"(IRQ_STACK_SIZE));
   314 #else
   315 	_asm lea esp, X86_IrqStack[IRQ_STACK_SIZE]
   316 #endif
   317 	asm("push eax");
   318 	asm("nested_irq_entry:");
   319 
   320 #ifdef BTRACE_CPU_USAGE
   321 	asm("cmp byte ptr [%a0], 0": : "i"(&TheScheduler.iCpuUsageFilter));
   322 	asm("jz no_trace");
   323 	asm("push eax");
   324 	asm("push %0": :"i"(irq_start_trace_header));
   325 	asm("call dword ptr [%a0]": : "i"(&TheScheduler.iBTraceHandler));
   326 	asm("pop eax");
   327 	asm("pop eax");
   328 	asm("no_trace:");
   329 #endif
   330 	asm("call [%a0]": : "i"(&X86_IrqHandler));
   331 
   332 	// Postamble. Interrupts disabled here.
   333 	asm("xor eax, eax");
   334 	asm("dec dword ptr [%a0]": : "i"(&X86_IrqNestCount));
   335 	asm("jns nested_irq_exit");
   336 	asm("cmp eax, [%a0]": : "i"(&TheScheduler.iKernCSLocked));
   337 	asm("lea edx, %a0": : "i"(&TheScheduler.iRescheduleNeededFlag));
   338 	asm("jnz irq_kernel_locked_exit");
   339 	asm("cmp eax, [edx]");
   340 	asm("jz irq_kernel_locked_exit");
   341 	asm("inc eax");
   342 	asm("mov [%a0], eax": : "i"(&TheScheduler.iKernCSLocked));
   343 	asm("pop eax");
   344 	asm("mov esp, eax");
   345 	asm("sti");
   346 	asm("call %a0" : : "i"(TScheduler_Reschedule));
   347 	asm("jmp irq_exit");
   348 
   349 	asm("irq_kernel_locked_exit:");
   350 	asm("pop eax");
   351 	asm("mov esp, eax");
   352 
   353 	asm("nested_irq_exit:");
   354 #ifdef BTRACE_CPU_USAGE
   355 	asm("cmp byte ptr [%a0], 0": : "i"(&TheScheduler.iCpuUsageFilter));
   356 	asm("jz no_trace2");
   357 	asm("push %0": : "i"(irq_end_trace_header));
   358 	asm("call dword ptr [%a0]": : "i"(&TheScheduler.iBTraceHandler));
   359 	asm("pop eax");
   360 	asm("no_trace2:");
   361 #endif
   362 	asm("irq_exit:");
   363 	asm("test dword ptr [esp+28], 3 ");	// check if we came from kernel mode
   364 	asm("jz short irq_exit2 ");
   365 #ifdef __CHECK_LOCK_STATE__
   366 	asm("call %a0" : : "i" (&check_lock_state));
   367 #endif
   368 #ifdef __GCC32__
   369 	asm("mov ecx, [%a0]": : "i"(&TheScheduler.iCurrentThread));
   370 	asm("push ecx");
   371 	asm("call __ZN11NThreadBase21CallUserModeCallbacksEv");
   372 	asm("add esp,4"); 
   373 #else
   374 	TheScheduler.iCurrentThread->CallUserModeCallbacks();
   375 #endif
   376 	asm("irq_exit2:");
   377 	asm("pop edx");
   378 	asm("pop ecx");
   379 	asm("pop eax");
   380 	asm("pop es");
   381 	asm("pop ds");
   382 	asm("add esp, 4");
   383 	asm("iretd");
   384 	}
   385 
   386 
   387 /******************************************************************************
   388 * General Exception Handler
   389 * On entry SS:ESP references current threads supervisor stack
   390 * [ESP+0] = vector number
   391 * [ESP+4] = error code (filled with 0 for exceptions without error codes)
   392 * [ESP+8] = return EIP
   393 * [ESP+12] = return CS
   394 * [ESP+16] = return EFLAGS
   395 * [ESP+20] = return ESP if privilege change occurred
   396 * [ESP+24] = return SS if privilege change occurred
   397 *******************************************************************************/
   398 GLDEF_C __NAKED__ void __X86VectorExc()
   399 	{
   400 	asm("push ds");
   401 	asm("push es");
   402 	asm("push fs");
   403 	asm("push gs");
   404 	asm("cld");
   405 	asm("push ebp");
   406 	asm("mov bp, ds");
   407 	asm("push edi");
   408 	asm("mov gs, bp");
   409 	asm("mov bp, ss");
   410 	asm("push esi");
   411 	asm("push ebx");
   412 	asm("push ecx");
   413 	asm("push edx");
   414 	asm("push eax");
   415 	asm("mov eax, cr2");
   416 	asm("mov ds, bp");
   417 	asm("mov es, bp");
   418 	asm("push eax");
   419 	asm("sub esp, 8");
   420 	asm("mov ebp, esp");		// ebp points to exception info frame
   421 	asm("mov edi, [%a0]": : "i"(&TheScheduler.iCurrentThread));
   422 	asm("xor eax, eax");
   423 	asm("mov ax, ss");
   424 	asm("mov [ebp+4], eax");	// SS
   425 	asm("mov eax, ebp");
   426 	asm("add eax, 76");		// EAX = ESP at point of exception if ring 0
   427 	asm("test dword ptr [ebp+68], 3");	// check if we came from kernel mode
   428 	asm("jz ring0_exception");
   429 	asm("mov byte ptr [edi+11], 1");
   430 	asm("add eax, 8");		// EAX = ESP at point of exception if ring 3
   431 	asm("ring0_exception:");
   432 	asm("mov [ebp], eax");
   433 	asm("cmp dword ptr [%a0], -1": : "i"(&X86_IrqNestCount));
   434 	asm("jnz fatal_exception_irq");
   435 	asm("cmp dword ptr [%a0], 0": : "i"(&TheScheduler.iKernCSLocked));
   436 	asm("jnz fatal_exception_locked");
   437 	asm("cmp dword ptr [ebp+%0], 7": :"i"_FOFF(TX86ExcInfo,iExcId));	// check for device not available
   438 	asm("jne not_fpu");
   439 	asm("mov dword ptr [%a0], 1": :"i"(&TheScheduler.iKernCSLocked));
   440 	asm("clts");
   441 	asm("frstor [edi+%0]": :"i"_FOFF(NThread,iCoprocessorState));
   442 	asm("call %a0": :"i"(NKern_Unlock));
   443 	asm("add esp, 12");
   444 	asm("jmp proceed");
   445 
   446 	asm("not_fpu:");
   447 	asm("mov eax, [edi+%0]" : : "i"_FOFF(NThreadBase,iHandlers)); 
   448 	asm("push edi");		// pass current thread parameter
   449 	asm("push ebp");		// pass frame address
   450 	asm("call [eax+%0]" : : "i"_FOFF(SNThreadHandlers,iExceptionHandler)); 
   451 	asm("add esp, 20");		// remove parameters, esp, ss, fault address
   452 
   453 	asm("proceed:");
   454 	asm("mov byte ptr [edi+11], 0 ");
   455 	asm("test dword ptr [esp+56], 3 ");	// check if we came from kernel mode
   456 	asm("jz short proceed2 ");
   457 	asm("cli");
   458 #ifdef __CHECK_LOCK_STATE__
   459 	asm("call %a0" : : "i" (&check_lock_state));
   460 #endif
   461 #ifdef __GCC32__
   462 	asm("mov ecx, [%a0]": : "i"(&TheScheduler.iCurrentThread));
   463 	asm("push ecx");
   464 	asm("call __ZN11NThreadBase21CallUserModeCallbacksEv");
   465 	asm("add esp,4"); 
   466 #else
   467 	TheScheduler.iCurrentThread->CallUserModeCallbacks();
   468 #endif	
   469 	asm("proceed2:");
   470 	asm("pop eax");
   471 	asm("pop edx");
   472 	asm("pop ecx");
   473 	asm("pop ebx");
   474 	asm("pop esi");
   475 	asm("pop edi");
   476 	asm("pop ebp");
   477 	asm("pop gs");
   478 	asm("pop fs");
   479 	asm("pop es");
   480 	asm("pop ds");
   481 	asm("add esp, 8");		// skip vector number and error code
   482 	asm("iretd");
   483 
   484 	asm("fatal_exception_irq:");
   485 	asm("fatal_exception_locked:");
   486 	asm("lea eax, %a0": :"i"(&TheScheduler)); 
   487 	asm("lea eax, [eax+%0]": :"i"_FOFF(TScheduler,iMonitorExceptionHandler));
   488 	asm("mov eax,[eax]");
   489 
   490 	asm("test eax, eax");
   491 	asm("jnz monitor_exception");
   492 	asm("push ebp");
   493 	asm("call %a0": :"i"(&__X86ExcFault));	// doesn't return
   494 
   495 	asm("monitor_exception:");
   496 	asm("jmp eax");
   497 	}
   498 
   499 
   500 /******************************************************************************
   501 * Exception Handlers
   502 *******************************************************************************/
   503 
   504 DECLARE_X86_EXC_NOERR(00)
   505 DECLARE_X86_EXC_NOERR(01)
   506 DECLARE_X86_EXC_NOERR(02)
   507 DECLARE_X86_EXC_NOERR(03)
   508 DECLARE_X86_EXC_NOERR(04)
   509 DECLARE_X86_EXC_NOERR(05)
   510 DECLARE_X86_EXC_NOERR(06)
   511 DECLARE_X86_EXC_NOERR(07)
   512 DECLARE_X86_EXC_ERR(08)
   513 DECLARE_X86_EXC_NOERR(09)
   514 DECLARE_X86_EXC_ERR(0A)
   515 DECLARE_X86_EXC_ERR(0B)
   516 DECLARE_X86_EXC_ERR(0C)
   517 DECLARE_X86_EXC_ERR(0D)
   518 DECLARE_X86_EXC_ERR(0E)
   519 DECLARE_X86_EXC_NOERR(0F)
   520 DECLARE_X86_EXC_NOERR(10)
   521 DECLARE_X86_EXC_ERR(11)
   522 DECLARE_X86_EXC_NOERR(12)
   523 DECLARE_X86_EXC_NOERR(13)
   524 DECLARE_X86_EXC_NOERR(14)
   525 DECLARE_X86_EXC_NOERR(15)
   526 DECLARE_X86_EXC_NOERR(16)
   527 DECLARE_X86_EXC_NOERR(17)
   528 DECLARE_X86_EXC_NOERR(18)
   529 DECLARE_X86_EXC_NOERR(19)
   530 DECLARE_X86_EXC_NOERR(1A)
   531 DECLARE_X86_EXC_NOERR(1B)
   532 DECLARE_X86_EXC_NOERR(1C)
   533 DECLARE_X86_EXC_NOERR(1D)
   534 DECLARE_X86_EXC_NOERR(1E)
   535 DECLARE_X86_EXC_NOERR(1F)
   536 
   537 /******************************************************************************
   538 * Interrupt Handlers
   539 *******************************************************************************/
   540 
   541 DECLARE_X86_INT(30)
   542 DECLARE_X86_INT(31)
   543 DECLARE_X86_INT(32)
   544 DECLARE_X86_INT(33)
   545 DECLARE_X86_INT(34)
   546 DECLARE_X86_INT(35)
   547 DECLARE_X86_INT(36)
   548 DECLARE_X86_INT(37)
   549 DECLARE_X86_INT(38)
   550 DECLARE_X86_INT(39)
   551 DECLARE_X86_INT(3A)
   552 DECLARE_X86_INT(3B)
   553 DECLARE_X86_INT(3C)
   554 DECLARE_X86_INT(3D)
   555 DECLARE_X86_INT(3E)
   556 DECLARE_X86_INT(3F)
   557 
   558 /*const*/ PFV TheExcVectors[64]=
   559 {
   560 __X86Vector00,	__X86Vector01,	__X86Vector02,	__X86Vector03,
   561 __X86Vector04,	__X86Vector05,	__X86Vector06,	__X86Vector07,
   562 __X86Vector08,	__X86Vector09,	__X86Vector0A,	__X86Vector0B,
   563 __X86Vector0C,	__X86Vector0D,	__X86Vector0E,	__X86Vector0F,
   564 __X86Vector10,	__X86Vector11,	__X86Vector12,	__X86Vector13,
   565 __X86Vector14,	__X86Vector15,	__X86Vector16,	__X86Vector17,
   566 __X86Vector18,	__X86Vector19,	__X86Vector1A,	__X86Vector1B,
   567 __X86Vector1C,	__X86Vector1D,	__X86Vector1E,	__X86Vector1F,
   568 __X86Vector20,	__X86Vector21,	NULL,			NULL,
   569 NULL,			NULL,			NULL,			NULL,
   570 NULL,			NULL,			NULL,			NULL,
   571 NULL,			NULL,			NULL,			NULL,
   572 __X86Vector30,	__X86Vector31,	__X86Vector32,	__X86Vector33,
   573 __X86Vector34,	__X86Vector35,	__X86Vector36,	__X86Vector37,
   574 __X86Vector38,	__X86Vector39,	__X86Vector3A,	__X86Vector3B,
   575 __X86Vector3C,	__X86Vector3D,	__X86Vector3E,	__X86Vector3F
   576 };
   577 
   578 EXPORT_C __NAKED__ TUint32 X86::IrqReturnAddress()
   579 	{
   580 	asm("mov eax, %0": :"i"(&X86_IrqStack[0]));
   581 	asm("mov eax, [eax + %0]": :"i"(IRQ_STACK_SIZE - 4));	// pointer to saved supervisor stack pointer
   582 	asm("mov eax, [eax+24]");								// return address from original interrupt
   583 	asm("ret");
   584 	}
   585 
   586 __NAKED__ TUint32 get_cr0()
   587 	{
   588 	asm("mov eax, cr0");
   589 	asm("ret");
   590 	}
   591 
   592 __NAKED__ TUint32 get_cr3()
   593 	{
   594 	asm("mov eax, cr0");
   595 	asm("ret");
   596 	}
   597 
   598 __NAKED__ TUint32 get_esp()
   599 	{
   600 	asm("mov eax, esp");
   601 	asm("ret");
   602 	}
   603 
   604 __NAKED__ void __lidt(SX86Des* /*aTable*/, TInt /*aLimit*/)
   605 	{
   606 	asm("mov eax, [esp+4]");
   607 	asm("mov ecx, [esp+8]");
   608 	asm("shl ecx, 3");
   609 	asm("sub ecx, 1");
   610 	asm("sub esp, 8");
   611 	asm("mov word ptr [esp], cx");
   612 	asm("mov dword ptr [esp+2], eax");
   613 	asm("lidt [esp]"); 
   614 	asm("add esp, 8");
   615 	asm("mov eax, 0x28");
   616 	asm("ltr ax");
   617 	asm("ret");
   618 	}
   619