First public contribution.
1 // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32\nkern\x86\vectors.cia
22 #define DECLARE_X86_INT(n) GLDEF_C __NAKED__ void __X86Vector##n() { asm("push 0x"#n); asm("jmp %a0": :"i"(&__X86VectorIrq)); }
23 #define DECLARE_X86_EXC_NOERR(n) GLDEF_C __NAKED__ void __X86Vector##n() { asm("push 0"); asm("push 0x"#n); asm("jmp %a0": : "i"(&__X86VectorExc)); }
24 #define DECLARE_X86_EXC_ERR(n) GLDEF_C __NAKED__ void __X86Vector##n() { asm("push 0x"#n); asm("jmp %a0": : "i"(&__X86VectorExc)); }
26 #define DECLARE_X86_INT(n) GLDEF_C __NAKED__ void __X86Vector##n() { __asm push 0x##n __asm jmp __X86VectorIrq }
27 #define DECLARE_X86_EXC_NOERR(n) GLDEF_C __NAKED__ void __X86Vector##n() { __asm push 0 __asm push 0x##n __asm jmp __X86VectorExc }
28 #define DECLARE_X86_EXC_ERR(n) GLDEF_C __NAKED__ void __X86Vector##n() { __asm push 0x##n __asm jmp __X86VectorExc }
31 const TLinAddr NKern_WaitForAnyRequest = (TLinAddr)&NKern::WaitForAnyRequest;
32 const TLinAddr NKern_LockSystem = (TLinAddr)&NKern::LockSystem;
33 const TLinAddr NKern_UnlockSystem = (TLinAddr)&NKern::UnlockSystem;
34 const TLinAddr NKern_Unlock = (TLinAddr)&NKern::Unlock;
35 const TLinAddr TScheduler_Reschedule = (TLinAddr)&TScheduler::Reschedule;
37 #ifdef __CHECK_LOCK_STATE__
38 /******************************************************************************
39 * Check that the kernel is unlocked, no fast mutex is held and the thread
40 * is not in a critical section when returning to user mode.
41 ******************************************************************************/
42 extern "C" __NAKED__ void check_lock_state()
45 asm("mov ecx, [%a0]" : : "i" (&TheScheduler.iCurrentThread));
46 asm("cmp dword ptr [%a0], 0" : : "i" (&TheScheduler.iKernCSLocked));
47 asm("jnz short bad_lock_state1 ");
48 asm("cmp dword ptr [ecx+%0], 0" : : "i" _FOFF(NThreadBase, iHeldFastMutex));
49 asm("jne short bad_lock_state2 ");
50 asm("cmp dword ptr [ecx+%0], 0" : : "i" _FOFF(NThreadBase, iCsCount));
51 asm("jne short bad_lock_state3 ");
54 asm("bad_lock_state1: ");
56 asm("bad_lock_state2: ");
58 asm("bad_lock_state3: ");
63 /******************************************************************************
64 * Int 20h Handler - Fast Executive Calls
67 * Parameter in ECX if any
68 * On entry SS:ESP references current threads supervisor stack
69 * [ESP+0] = return EIP
71 * [ESP+8] = return EFLAGS
72 * [ESP+12] = return ESP if privilege change occurred
73 * [ESP+16] = return SS if privilege change occurred
74 *******************************************************************************/
75 GLDEF_C __NAKED__ void __X86Vector20()
77 // Interrupts enabled on entry
80 asm("je wait_for_any_request");
89 asm("mov ecx, [%a0]": :"i"(&TheScheduler.iCurrentThread));
91 asm("mov edx, [ecx+%0]" : : "i"_FOFF(NThreadBase,iFastExecTable));
92 asm("cmp eax, [edx]");
93 asm("jae fast_exec_invalid");
95 asm("call [edx+eax*4]");
97 asm("fast_exec_exit:");
98 asm("test dword ptr [esp+16], 3 "); // returning to user mode?
99 asm("jz short fast_exec_exit2 "); // no - don't do lock check or user mode callbacks
100 #ifdef __CHECK_LOCK_STATE__
101 asm("call %a0" : : "i" (&check_lock_state));
105 asm("mov ecx, [%a0]": : "i"(&TheScheduler.iCurrentThread));
107 asm("call __ZN11NThreadBase21CallUserModeCallbacksEv ");
110 TheScheduler.iCurrentThread->CallUserModeCallbacks();
114 asm("fast_exec_exit2: ");
120 asm("wait_for_any_request:");
126 asm("call %a0" : : "i" (NKern_WaitForAnyRequest));
128 asm("test dword ptr [esp+12], 3 "); // returning to user mode?
129 asm("jz short wfar_exit2 "); // no - don't do lock check or user mode callbacks
133 asm("mov ecx, [%a0]": : "i"(&TheScheduler.iCurrentThread));
135 asm("call __ZN11NThreadBase21CallUserModeCallbacksEv");
138 TheScheduler.iCurrentThread->CallUserModeCallbacks();
141 #ifdef __CHECK_LOCK_STATE__
142 asm("call %a0" : : "i" (&check_lock_state));
150 asm("fast_exec_invalid:");
158 asm("mov edi, [%a0]": :"i"(&TheScheduler.iCurrentThread)); // edi=TheCurrentThread
159 asm("mov esi, [edi+%0]" : : "i"_FOFF(NThreadBase,iSlowExecTable)); // esi=slow exec table base
160 asm("call [esi-8]"); // call invalid exec handler
167 asm("jmp fast_exec_exit");
170 /******************************************************************************
171 * Int 21h Handler - Slow Executive Calls
174 * Parameters in ECX, EDX, EBX, ESI in that order
175 * On entry SS:ESP references current threads supervisor stack
176 * Must preserve EBX, EBP, ESI, EDI
177 * [ESP+0] = return EIP
178 * [ESP+4] = return CS
179 * [ESP+8] = return EFLAGS
180 * [ESP+12] = return ESP if privilege change occurred
181 * [ESP+16] = return SS if privilege change occurred
182 *******************************************************************************/
183 GLDEF_C __NAKED__ void __X86Vector21()
185 // Interrupts enabled on entry
186 asm("sub esp, 32"); // reserve space for additional arguments
198 asm("mov edi, [%a0]": : "i"(&TheScheduler.iCurrentThread)); // edi=TheCurrentThread
200 asm("mov esi, [edi+%0]" : : "i"_FOFF(NThreadBase,iSlowExecTable)); // esi=slow exec table base
204 asm("lea ebp, [esi+eax*8]"); // ebp points to exec table entry
205 asm("cmp eax, [esi-12]");
206 asm("jae slow_exec_invalid");
207 asm("mov ebx, [ebp]"); // ebx=flags
208 asm("test ebx, 0x1c000000"); // additional arguments required?
209 asm("jz slow_exec_no_extra_args");
210 asm("mov edx, [esp+8]"); // edx points to additional args
211 asm("lea eax, [esp+36]"); // address of copied additional arguments
212 asm("mov [esp+8], eax"); // replace supplied address
215 asm("and cl, 7"); // ecx=number of additional arguments-1
216 asm("test edx, edx");
217 asm("jnz slow_exec_extra_args_present"); // if arg ptr not NULL, copy args
218 asm("slow_exec_zero_args:");
219 asm("mov [esp+ecx*4+36], edx"); // else zero args
221 asm("jns slow_exec_zero_args");
222 asm("jmp slow_exec_no_extra_args");
224 asm("slow_exec_extra_args_present:");
225 asm("slow_exec_copy_args:");
226 asm("mov eax, gs:[edx+ecx*4]"); // get argument
227 asm("mov [esp+ecx*4+36], eax"); // copy it
229 asm("jns slow_exec_copy_args");
231 asm("slow_exec_no_extra_args:");
232 asm("test ebx, 0x80000000"); // test EClaim
233 asm("jz slow_exec_no_claim");
234 asm("call %a0" : : "i"(NKern_LockSystem)); // trashes eax, ecx, edx
235 asm("slow_exec_no_claim:");
236 asm("test ebx, 0x20000000"); // test EPreprocess
237 asm("jz slow_exec_no_preprocess");
238 asm("call [esi-4]"); // trashes eax, ecx, edx, edi
239 asm("slow_exec_no_preprocess:");
240 asm("call [ebp+4]"); // call exec function
241 asm("test ebx, 0x40000000"); // test ERelease
242 asm("jz slow_exec_no_release");
243 asm("mov edi, eax"); // save return value in EDI
244 asm("call %a0" : : "i"(NKern_UnlockSystem)); // trashes eax, ecx, edx
245 asm("mov eax, edi"); // restore return value
246 asm("slow_exec_no_release:");
248 asm("slow_exec_exit:");
249 asm("test dword ptr [esp+72], 3 "); // returning to user mode?
250 asm("jz short slow_exec_exit2 "); // no - don't do lock check or user mode callbacks
251 #ifdef __CHECK_LOCK_STATE__
252 asm("call %a0" : : "i" (&check_lock_state));
257 asm("mov ecx, [%a0]": : "i"(&TheScheduler.iCurrentThread));
259 asm("call __ZN11NThreadBase21CallUserModeCallbacksEv");
262 TheScheduler.iCurrentThread->CallUserModeCallbacks();
266 asm("slow_exec_exit2: ");
276 asm("add esp, 32"); // remove additional arguments
279 asm("slow_exec_invalid:");
280 asm("call [esi-8]"); // call invalid exec handler
281 asm("jmp slow_exec_exit");
284 const TUint32 irq_start_trace_header = ((4<<BTrace::ESizeIndex) + (BTrace::ECpuUsage<<BTrace::ECategoryIndex*8) + (BTrace::EIrqStart<<BTrace::ESubCategoryIndex*8));
285 const TUint32 irq_end_trace_header = ((4<<BTrace::ESizeIndex) + (BTrace::ECpuUsage<<BTrace::ECategoryIndex*8) + (BTrace::EIrqEnd<<BTrace::ESubCategoryIndex*8));
287 /******************************************************************************
288 * IRQ Preamble/Postamble Common Code
289 * On entry SS:ESP references current threads supervisor stack
290 * [ESP+0] = vector number
291 * [ESP+4] = return EIP
292 * [ESP+8] = return CS
293 * [ESP+12] = return EFLAGS
294 * [ESP+16] = return ESP if privilege change occurred
295 * [ESP+20] = return SS if privilege change occurred
296 *******************************************************************************/
297 __NAKED__ void __X86VectorIrq()
308 asm("mov eax, esp"); // eax points to saved stuff
309 asm("inc dword ptr [%a0]": : "i"(&X86_IrqNestCount)); // nest count starts at -1
310 asm("jnz nested_irq_entry");
312 asm("mov esp, %0" : : "i"(&X86_IrqStack));
313 asm("add esp, %0" : : "i"(IRQ_STACK_SIZE));
315 _asm lea esp, X86_IrqStack[IRQ_STACK_SIZE]
318 asm("nested_irq_entry:");
320 #ifdef BTRACE_CPU_USAGE
321 asm("cmp byte ptr [%a0], 0": : "i"(&TheScheduler.iCpuUsageFilter));
324 asm("push %0": :"i"(irq_start_trace_header));
325 asm("call dword ptr [%a0]": : "i"(&TheScheduler.iBTraceHandler));
330 asm("call [%a0]": : "i"(&X86_IrqHandler));
332 // Postamble. Interrupts disabled here.
334 asm("dec dword ptr [%a0]": : "i"(&X86_IrqNestCount));
335 asm("jns nested_irq_exit");
336 asm("cmp eax, [%a0]": : "i"(&TheScheduler.iKernCSLocked));
337 asm("lea edx, %a0": : "i"(&TheScheduler.iRescheduleNeededFlag));
338 asm("jnz irq_kernel_locked_exit");
339 asm("cmp eax, [edx]");
340 asm("jz irq_kernel_locked_exit");
342 asm("mov [%a0], eax": : "i"(&TheScheduler.iKernCSLocked));
346 asm("call %a0" : : "i"(TScheduler_Reschedule));
349 asm("irq_kernel_locked_exit:");
353 asm("nested_irq_exit:");
354 #ifdef BTRACE_CPU_USAGE
355 asm("cmp byte ptr [%a0], 0": : "i"(&TheScheduler.iCpuUsageFilter));
357 asm("push %0": : "i"(irq_end_trace_header));
358 asm("call dword ptr [%a0]": : "i"(&TheScheduler.iBTraceHandler));
363 asm("test dword ptr [esp+28], 3 "); // check if we came from kernel mode
364 asm("jz short irq_exit2 ");
365 #ifdef __CHECK_LOCK_STATE__
366 asm("call %a0" : : "i" (&check_lock_state));
369 asm("mov ecx, [%a0]": : "i"(&TheScheduler.iCurrentThread));
371 asm("call __ZN11NThreadBase21CallUserModeCallbacksEv");
374 TheScheduler.iCurrentThread->CallUserModeCallbacks();
387 /******************************************************************************
388 * General Exception Handler
389 * On entry SS:ESP references current threads supervisor stack
390 * [ESP+0] = vector number
391 * [ESP+4] = error code (filled with 0 for exceptions without error codes)
392 * [ESP+8] = return EIP
393 * [ESP+12] = return CS
394 * [ESP+16] = return EFLAGS
395 * [ESP+20] = return ESP if privilege change occurred
396 * [ESP+24] = return SS if privilege change occurred
397 *******************************************************************************/
398 GLDEF_C __NAKED__ void __X86VectorExc()
420 asm("mov ebp, esp"); // ebp points to exception info frame
421 asm("mov edi, [%a0]": : "i"(&TheScheduler.iCurrentThread));
424 asm("mov [ebp+4], eax"); // SS
426 asm("add eax, 76"); // EAX = ESP at point of exception if ring 0
427 asm("test dword ptr [ebp+68], 3"); // check if we came from kernel mode
428 asm("jz ring0_exception");
429 asm("mov byte ptr [edi+11], 1");
430 asm("add eax, 8"); // EAX = ESP at point of exception if ring 3
431 asm("ring0_exception:");
432 asm("mov [ebp], eax");
433 asm("cmp dword ptr [%a0], -1": : "i"(&X86_IrqNestCount));
434 asm("jnz fatal_exception_irq");
435 asm("cmp dword ptr [%a0], 0": : "i"(&TheScheduler.iKernCSLocked));
436 asm("jnz fatal_exception_locked");
437 asm("cmp dword ptr [ebp+%0], 7": :"i"_FOFF(TX86ExcInfo,iExcId)); // check for device not available
439 asm("mov dword ptr [%a0], 1": :"i"(&TheScheduler.iKernCSLocked));
441 asm("frstor [edi+%0]": :"i"_FOFF(NThread,iCoprocessorState));
442 asm("call %a0": :"i"(NKern_Unlock));
447 asm("mov eax, [edi+%0]" : : "i"_FOFF(NThreadBase,iHandlers));
448 asm("push edi"); // pass current thread parameter
449 asm("push ebp"); // pass frame address
450 asm("call [eax+%0]" : : "i"_FOFF(SNThreadHandlers,iExceptionHandler));
451 asm("add esp, 20"); // remove parameters, esp, ss, fault address
454 asm("mov byte ptr [edi+11], 0 ");
455 asm("test dword ptr [esp+56], 3 "); // check if we came from kernel mode
456 asm("jz short proceed2 ");
458 #ifdef __CHECK_LOCK_STATE__
459 asm("call %a0" : : "i" (&check_lock_state));
462 asm("mov ecx, [%a0]": : "i"(&TheScheduler.iCurrentThread));
464 asm("call __ZN11NThreadBase21CallUserModeCallbacksEv");
467 TheScheduler.iCurrentThread->CallUserModeCallbacks();
481 asm("add esp, 8"); // skip vector number and error code
484 asm("fatal_exception_irq:");
485 asm("fatal_exception_locked:");
486 asm("lea eax, %a0": :"i"(&TheScheduler));
487 asm("lea eax, [eax+%0]": :"i"_FOFF(TScheduler,iMonitorExceptionHandler));
488 asm("mov eax,[eax]");
490 asm("test eax, eax");
491 asm("jnz monitor_exception");
493 asm("call %a0": :"i"(&__X86ExcFault)); // doesn't return
495 asm("monitor_exception:");
500 /******************************************************************************
502 *******************************************************************************/
504 DECLARE_X86_EXC_NOERR(00)
505 DECLARE_X86_EXC_NOERR(01)
506 DECLARE_X86_EXC_NOERR(02)
507 DECLARE_X86_EXC_NOERR(03)
508 DECLARE_X86_EXC_NOERR(04)
509 DECLARE_X86_EXC_NOERR(05)
510 DECLARE_X86_EXC_NOERR(06)
511 DECLARE_X86_EXC_NOERR(07)
512 DECLARE_X86_EXC_ERR(08)
513 DECLARE_X86_EXC_NOERR(09)
514 DECLARE_X86_EXC_ERR(0A)
515 DECLARE_X86_EXC_ERR(0B)
516 DECLARE_X86_EXC_ERR(0C)
517 DECLARE_X86_EXC_ERR(0D)
518 DECLARE_X86_EXC_ERR(0E)
519 DECLARE_X86_EXC_NOERR(0F)
520 DECLARE_X86_EXC_NOERR(10)
521 DECLARE_X86_EXC_ERR(11)
522 DECLARE_X86_EXC_NOERR(12)
523 DECLARE_X86_EXC_NOERR(13)
524 DECLARE_X86_EXC_NOERR(14)
525 DECLARE_X86_EXC_NOERR(15)
526 DECLARE_X86_EXC_NOERR(16)
527 DECLARE_X86_EXC_NOERR(17)
528 DECLARE_X86_EXC_NOERR(18)
529 DECLARE_X86_EXC_NOERR(19)
530 DECLARE_X86_EXC_NOERR(1A)
531 DECLARE_X86_EXC_NOERR(1B)
532 DECLARE_X86_EXC_NOERR(1C)
533 DECLARE_X86_EXC_NOERR(1D)
534 DECLARE_X86_EXC_NOERR(1E)
535 DECLARE_X86_EXC_NOERR(1F)
537 /******************************************************************************
539 *******************************************************************************/
558 /*const*/ PFV TheExcVectors[64]=
560 __X86Vector00, __X86Vector01, __X86Vector02, __X86Vector03,
561 __X86Vector04, __X86Vector05, __X86Vector06, __X86Vector07,
562 __X86Vector08, __X86Vector09, __X86Vector0A, __X86Vector0B,
563 __X86Vector0C, __X86Vector0D, __X86Vector0E, __X86Vector0F,
564 __X86Vector10, __X86Vector11, __X86Vector12, __X86Vector13,
565 __X86Vector14, __X86Vector15, __X86Vector16, __X86Vector17,
566 __X86Vector18, __X86Vector19, __X86Vector1A, __X86Vector1B,
567 __X86Vector1C, __X86Vector1D, __X86Vector1E, __X86Vector1F,
568 __X86Vector20, __X86Vector21, NULL, NULL,
569 NULL, NULL, NULL, NULL,
570 NULL, NULL, NULL, NULL,
571 NULL, NULL, NULL, NULL,
572 __X86Vector30, __X86Vector31, __X86Vector32, __X86Vector33,
573 __X86Vector34, __X86Vector35, __X86Vector36, __X86Vector37,
574 __X86Vector38, __X86Vector39, __X86Vector3A, __X86Vector3B,
575 __X86Vector3C, __X86Vector3D, __X86Vector3E, __X86Vector3F
578 EXPORT_C __NAKED__ TUint32 X86::IrqReturnAddress()
580 asm("mov eax, %0": :"i"(&X86_IrqStack[0]));
581 asm("mov eax, [eax + %0]": :"i"(IRQ_STACK_SIZE - 4)); // pointer to saved supervisor stack pointer
582 asm("mov eax, [eax+24]"); // return address from original interrupt
586 __NAKED__ TUint32 get_cr0()
592 __NAKED__ TUint32 get_cr3()
598 __NAKED__ TUint32 get_esp()
604 __NAKED__ void __lidt(SX86Des* /*aTable*/, TInt /*aLimit*/)
606 asm("mov eax, [esp+4]");
607 asm("mov ecx, [esp+8]");
611 asm("mov word ptr [esp], cx");
612 asm("mov dword ptr [esp+2], eax");
615 asm("mov eax, 0x28");