Update contrib.
1 // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32\nkernsmp\x86\ncutilf.cia
22 #define __ASM_CALL(func) _asm call func
23 #elif defined(__GCC32__)
24 #define __ASM_CALL(func) asm("call _" #func);
26 #error Unknown x86 compiler
29 #if defined(__INCLUDE_SPIN_LOCK_CHECKS__)
30 #define SPIN_LOCK_ENTRY_CHECK() __ASM_CALL(spin_lock_entry_check)
31 #define SPIN_LOCK_MARK_ACQ() __ASM_CALL(spin_lock_mark_acq)
32 #define SPIN_UNLOCK_ENTRY_CHECK() __ASM_CALL(spin_unlock_entry_check)
34 #define RWSPIN_RLOCK_ENTRY_CHECK() __ASM_CALL(rwspin_rlock_entry_check)
35 #define RWSPIN_RLOCK_MARK_ACQ() __ASM_CALL(rwspin_rlock_mark_acq)
36 #define RWSPIN_RUNLOCK_ENTRY_CHECK() __ASM_CALL(rwspin_runlock_entry_check)
38 #define RWSPIN_WLOCK_ENTRY_CHECK() __ASM_CALL(rwspin_wlock_entry_check)
39 #define RWSPIN_WLOCK_MARK_ACQ() __ASM_CALL(rwspin_wlock_mark_acq)
40 #define RWSPIN_WUNLOCK_ENTRY_CHECK() __ASM_CALL(rwspin_wunlock_entry_check)
43 #define SPIN_LOCK_ENTRY_CHECK()
44 #define SPIN_LOCK_MARK_ACQ()
45 #define SPIN_UNLOCK_ENTRY_CHECK()
47 #define RWSPIN_RLOCK_ENTRY_CHECK()
48 #define RWSPIN_RLOCK_MARK_ACQ()
49 #define RWSPIN_RUNLOCK_ENTRY_CHECK()
51 #define RWSPIN_WLOCK_ENTRY_CHECK()
52 #define RWSPIN_WLOCK_MARK_ACQ()
53 #define RWSPIN_WUNLOCK_ENTRY_CHECK()
58 /******************************************************************************
60 ******************************************************************************/
62 /** Returns a timestamp value which is consistent across CPUs.
65 EXPORT_C __NAKED__ TUint64 NKern::Timestamp()
68 asm("cli "); // stop thread migration between reading APIC ID and thread pointer
69 asm("mov ecx, ds:[%0]" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID));
71 asm("mov ecx, [ecx*4+%0]" : : "i" (&SubSchedulerLookupTable));
73 asm("jz short use_tsc_only ");
75 asm("jnz short use_tsc_only ");
77 asm("add eax, [ecx+80+%0]" : : "i" _FOFF(TSubScheduler, iExtras));
78 asm("adc edx, [ecx+84+%0]" : : "i" _FOFF(TSubScheduler, iExtras));
82 asm("use_tsc_only: ");
88 /** Get the current value of the CPU timestamp counter
91 EXPORT_C __NAKED__ TUint64 X86::Timestamp()
99 /******************************************************************************
102 * [this+0] in count (byte)
103 * [this+1] out count (byte)
104 * [this+6] order (byte)
105 * [this+7] holding CPU (byte)
106 ******************************************************************************/
108 #if defined(__INCLUDE_SPIN_LOCK_CHECKS__)
109 extern "C" __NAKED__ void spin_lock_entry_check()
111 /* ecx points to lock */
117 asm("mov edx, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID));
119 asm("mov edx, [edx*4+%0]" : : "i" (&SubSchedulerLookupTable));
120 asm("cmp edx, 0 "); /* Skip checks if subschedulers not yet initialised */
121 asm("je short slec_ok ");
122 asm("test edx, 3 "); /* Skip checks if subscheduler for this CPU not yet initialised */
123 asm("jnz short slec_ok ");
124 asm("movzx ecx, word ptr [ecx+6] "); /* CL = order, CH = holding CPU */
125 asm("cmp cl, 0x20 ");
126 asm("jae short slec_preemption "); /* This lock requires preemption to be disabled */
128 /* check interrupts disabled */
129 asm("test dword ptr [esp], 0x200 "); /* Interrupts enabled? */
130 asm("jz short slec_1 "); /* No - OK */
131 asm("int 0xff "); /* Yes - die */
133 asm("slec_preemption: ");
134 asm("cmp cl, 0xff ");
135 asm("je short slec_1 "); /* EOrderNone - don't check interrupts or preemption */
136 asm("cmp dword ptr [edx+52+%0], 0" : : "i"_FOFF(TSubScheduler, iExtras));
137 asm("jge short slec_preemption_die "); /* If called from ISR, die */
138 asm("cmp dword ptr [edx+%0], 0" : : "i" _FOFF(TSubScheduler, iKernLockCount));
139 asm("jnz short slec_1 "); /* Preemption disabled - OK */
140 asm("slec_preemption_die: ");
141 asm("int 0xff "); /* Preemption enabled - die */
144 asm("lea eax, [edx+%0]" : : "i" _FOFF(TSubScheduler, iCpuNum));
145 asm("cmp ch, [eax] ");
146 asm("jnz short slec_2 "); /* Not already held by this CPU - OK */
147 asm("int 0xff "); /* Already held by this CPU - die */
150 asm("lea edx, [edx+%0]" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck));
151 asm("bsf eax, [edx] "); /* find LSB of low dword */
152 asm("jnz short slec_3 "); /* skip if low dword nonzero */
153 asm("bsf eax, [edx+4] "); /* else find LSB of high dword */
154 asm("lea eax, [eax+32] "); /* add 32 to eax without changing flags */
155 asm("jnz short slec_3 "); /* skip if high dword nonzero */
156 asm("mov eax, 0x7f "); /* else set EAX = 0x7F */
159 asm("cmp cl, al "); /* check order of this lock against lowest currently held order */
160 asm("jl short slec_ok "); /* if this lock has lower order, OK - signed comparison so EOrderNone always works */
161 asm("int 0xff "); /* ordering violation - die */
171 extern "C" __NAKED__ void spin_lock_mark_acq()
173 /* ecx points to lock */
179 asm("mov edx, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID));
181 asm("mov edx, [edx*4+%0]" : : "i"(&SubSchedulerLookupTable));
182 asm("cmp edx, 0 "); /* Skip checks if subschedulers not yet initialised */
183 asm("je short slma_ok ");
184 asm("test edx, 3 "); /* Skip checks if subscheduler for this CPU not yet initialised */
185 asm("jnz short slma_ok ");
186 asm("mov eax, [edx+%0]" : : "i" _FOFF(TSubScheduler, iCpuNum));
187 asm("mov [ecx+7], al "); /* set byte 7 to holding CPU number */
188 asm("movzx ecx, byte ptr [ecx+6] "); /* CL = order */
189 asm("cmp ecx, 0x40 ");
190 asm("jae short slma_ok "); /* if EOrderNone, done */
191 asm("bts [edx+%0], ecx" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck));
201 extern "C" __NAKED__ void spin_unlock_entry_check()
203 /* ecx points to lock */
209 asm("mov edx, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID));
211 asm("mov edx, [edx*4+%0]" : : "i"(&SubSchedulerLookupTable));
212 asm("cmp edx, 0 "); /* Skip checks if subschedulers not yet initialised */
213 asm("je short suec_ok ");
214 asm("test edx, 3 "); /* Skip checks if subscheduler for this CPU not yet initialised */
215 asm("jnz short suec_ok ");
216 asm("mov eax, [edx+%0]" : : "i" _FOFF(TSubScheduler, iCpuNum)); /* eax = current CPU number */
217 asm("shl eax, 8 "); /* AL = 0, AH = current CPU number */
218 asm("xor ax, [ecx+6] "); /* AL = order, AH = holding CPU ^ current CPU number */
219 asm("cmp al, 0x20 ");
220 asm("jae short suec_preemption "); /* This lock requires preemption to be disabled */
222 /* check interrupts disabled */
223 asm("test dword ptr [esp], 0x200 "); /* Interrupts enabled? */
224 asm("jz short suec_1 "); /* No - OK */
225 asm("int 0xff "); /* Yes - die */
227 asm("suec_preemption: ");
228 asm("cmp al, 0xff ");
229 asm("je short suec_1 "); /* EOrderNone - don't check interrupts or preemption */
230 asm("cmp dword ptr [edx+%0], 0" : : "i" _FOFF(TSubScheduler, iKernLockCount));
231 asm("jnz short suec_1 "); /* Preemption disabled - OK */
232 asm("int 0xff "); /* Preemption enabled - die */
235 asm("cmp ah, 0 "); /* Check if holding CPU ^ current CPU number == 0 */
236 asm("jz short suec_2 "); /* Already held by this CPU - OK */
237 asm("int 0xff "); /* We don't hold lock - die */
240 asm("mov byte ptr [ecx+7], 0xff "); /* reset holding CPU */
241 asm("cmp eax, 0x40 "); /* EAX = lock order */
242 asm("jae short suec_ok "); /* if EOrderNone, done */
243 asm("btr [edx+%0], eax" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck));
244 asm("jc short suec_ok "); /* bit should have been set originally */
245 asm("int 0xff "); /* if not, die - something must have got corrupted */
257 /******************************************************************************
258 * Plain old spin lock
260 * Fundamental algorithm:
261 * lock() { old_in = in++; while(out!=old_in) __chill(); }
262 * unlock() { ++out; }
264 * [this+0] in count (byte)
265 * [this+1] out count (byte)
266 * [this+6] order value
267 * [this+7] holding CPU number, 0xFF if none
269 ******************************************************************************/
270 __NAKED__ EXPORT_C void TSpinLock::LockIrq()
274 SPIN_LOCK_ENTRY_CHECK()
276 asm("lock xadd [ecx], al "); /* al = in++ */
277 asm("sl_lockirq_loop: ");
278 asm("cmp al, [ecx+1] "); /* compare al to out */
279 asm("jnz short sl_lockirq_loop2 ");
281 asm("lock add dword ptr [esp], 0 "); /* make sure subsequent accesses don't happen until lock acquired */
284 asm("sl_lockirq_loop2: ");
286 asm("jmp short sl_lockirq_loop ");
289 __NAKED__ EXPORT_C void TSpinLock::UnlockIrq()
292 SPIN_UNLOCK_ENTRY_CHECK()
293 asm("lock inc byte ptr [ecx+1] "); /* ++out */
298 extern "C" TBool __fastcall spin_lock_flash_irq(TSpinLock* a)
305 __NAKED__ EXPORT_C TBool TSpinLock::FlashIrq()
308 asm("mov ax, [ecx] ");
311 asm("and eax, 0xff ");
312 asm("jne %a0" : : "i" (&spin_lock_flash_irq));
316 __NAKED__ EXPORT_C void TSpinLock::LockOnly()
319 SPIN_LOCK_ENTRY_CHECK()
321 asm("lock xadd [ecx], al "); /* al = in++ */
322 asm("sl_lockonly_loop: ");
323 asm("cmp al, [ecx+1] "); /* compare al to out */
324 asm("jnz short sl_lockonly_loop2 ");
326 asm("lock add dword ptr [esp], 0 "); /* make sure subsequent accesses don't happen until lock acquired */
329 asm("sl_lockonly_loop2: ");
331 asm("jmp short sl_lockonly_loop ");
334 __NAKED__ EXPORT_C void TSpinLock::UnlockOnly()
337 SPIN_UNLOCK_ENTRY_CHECK()
338 asm("lock inc byte ptr [ecx+1] "); /* ++out */
342 extern "C" TBool __fastcall spin_lock_flash_only(TSpinLock* a)
349 __NAKED__ EXPORT_C TBool TSpinLock::FlashOnly()
352 asm("mov ax, [ecx] ");
355 asm("and eax, 0xff ");
356 asm("jne %a0" : : "i" (&spin_lock_flash_only));
360 __NAKED__ EXPORT_C TInt TSpinLock::LockIrqSave()
365 SPIN_LOCK_ENTRY_CHECK()
367 asm("lock xadd [ecx], al "); /* al = in++ */
368 asm("sl_lockirqs_loop: ");
369 asm("cmp al, [ecx+1] "); /* compare al to out */
370 asm("jnz short sl_lockirqs_loop2 ");
372 asm("lock add dword ptr [esp], 0 "); /* make sure subsequent accesses don't happen until lock acquired */
373 asm("pop eax "); /* retrieve saved EFLAGS */
374 asm("and eax, 0x200 "); /* return just interrupt mask bit */
377 asm("sl_lockirqs_loop2: ");
379 asm("jmp short sl_lockirqs_loop ");
382 __NAKED__ EXPORT_C void TSpinLock::UnlockIrqRestore(TInt)
385 SPIN_UNLOCK_ENTRY_CHECK()
386 asm("lock inc byte ptr [ecx+1] "); /* ++out */
387 asm("test dword ptr [esp+4], 0x200 ");
388 asm("jz short sl_unlockirqr_1 ");
390 asm("sl_unlockirqr_1: ");
394 __NAKED__ EXPORT_C TBool TSpinLock::FlashIrqRestore(TInt)
396 /* don't mess with stacked args, yet */
398 asm("mov ax, [ecx] ");
401 asm("and eax, 0xff ");
402 asm("jne short sl_flashirqr_1 ");
404 /* now we can remove stacked arg since we don't need it */
407 asm("sl_flashirqr_1: ");
409 asm("test dword ptr [esp+4], 0x200 ");
410 asm("jnz short sl_flashirqr_2 ");
411 asm("call %a0" : : "i" (&spin_lock_flash_only));
412 asm("jmp short sl_flashirqr_3 ");
413 asm("sl_flashirqr_2: ");
414 asm("call %a0" : : "i" (&spin_lock_flash_irq));
415 asm("sl_flashirqr_3: ");
419 extern "C" TBool __fastcall spin_lock_flash_preempt(TSpinLock* a)
422 NKern::PreemptionPoint();
427 __NAKED__ EXPORT_C TBool TSpinLock::FlashPreempt()
430 asm("mov ax, [ecx] ");
433 asm("and eax, 0xff ");
434 asm("jne %a0" : : "i" (&spin_lock_flash_preempt));
439 /******************************************************************************
440 * Read/Write Spin lock
442 * Structure ( (in.r,in.w) , (out.r,out.w) )
443 * Fundamental algorithm:
444 * lockr() { old_in = (in.r++,in.w); while(out.w!=old_in.w) __chill(); }
445 * unlockr() { ++out.r; }
446 * lockw() { old_in = (in.r,in.w++); while(out!=old_in) __chill(); }
447 * unlockw() { ++out.w; }
453 * [this+4] Bit mask of CPUs which hold read locks
454 * [this+6] order value
455 * [this+7] CPU number which holds write lock, 0xFF if none
457 ******************************************************************************/
459 #if defined(__INCLUDE_SPIN_LOCK_CHECKS__)
460 extern "C" __NAKED__ void rwspin_rlock_entry_check()
462 /* ecx points to lock */
468 asm("mov edx, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID));
470 asm("mov edx, [edx*4+%0]" : : "i" (&SubSchedulerLookupTable));
471 asm("cmp edx, 0 "); /* Skip checks if subschedulers not yet initialised */
472 asm("je short rwrlec_ok ");
473 asm("test edx, 3 "); /* Skip checks if subscheduler for this CPU not yet initialised */
474 asm("jnz short rwrlec_ok ");
475 asm("movzx ecx, word ptr [ecx+6] "); /* CL = order, CH = holding CPU for write lock */
476 asm("cmp cl, 0x20 ");
477 asm("jae short rwrlec_preemption "); /* This lock requires preemption to be disabled */
479 /* check interrupts disabled */
480 asm("test dword ptr [esp], 0x200 "); /* Interrupts enabled? */
481 asm("jz short rwrlec_1 "); /* No - OK */
482 asm("int 0xff "); /* Yes - die */
484 asm("rwrlec_preemption: ");
485 asm("cmp cl, 0xff ");
486 asm("je short rwrlec_1 "); /* EOrderNone - don't check interrupts or preemption */
487 asm("cmp dword ptr [edx+52+%0], 0" : : "i"_FOFF(TSubScheduler, iExtras));
488 asm("jge short rwrlec_preemption_die "); /* If called from ISR, die */
489 asm("cmp dword ptr [edx+%0], 0" : : "i" _FOFF(TSubScheduler, iKernLockCount));
490 asm("jnz short rwrlec_1 "); /* Preemption disabled - OK */
491 asm("rwrlec_preemption_die: ");
492 asm("int 0xff "); /* Preemption enabled - die */
495 asm("lea eax, [edx+%0]" : : "i" _FOFF(TSubScheduler, iCpuNum));
496 asm("cmp ch, [eax] ");
497 asm("jnz short rwrlec_2 "); /* Not already held by this CPU for write - OK */
498 asm("int 0xff "); /* Already held by this CPU for write - die */
501 asm("mov eax, [edx+%0]" : : "i" _FOFF(TSubScheduler, iCpuMask));
502 asm("test al, [ecx+4] "); /* Test if already held by this CPU for read */
503 asm("jz short rwrlec_3 ");
504 asm("int 0xff "); /* if so, die */
507 asm("lea edx, [edx+%0]" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck));
508 asm("bsf eax, [edx] "); /* find LSB of low dword */
509 asm("jnz short rwrlec_3 "); /* skip if low dword nonzero */
510 asm("bsf eax, [edx+4] "); /* else find LSB of high dword */
511 asm("lea eax, [eax+32] "); /* add 32 to eax without changing flags */
512 asm("jnz short rwrlec_4 "); /* skip if high dword nonzero */
513 asm("mov eax, 0x7f "); /* else set EAX = 0x7F */
516 asm("cmp cl, al "); /* check order of this lock against lowest currently held order */
517 asm("jl short rwrlec_ok "); /* if this lock has lower order, OK - signed comparison so EOrderNone always works */
518 asm("int 0xff "); /* ordering violation - die */
528 extern "C" __NAKED__ void rwspin_rlock_mark_acq()
530 /* ecx points to lock */
536 asm("mov edx, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID));
538 asm("mov edx, [edx*4+%0]" : : "i"(&SubSchedulerLookupTable));
539 asm("cmp edx, 0 "); /* Skip checks if subschedulers not yet initialised */
540 asm("je short rwrlma_ok ");
541 asm("test edx, 3 "); /* Skip checks if subscheduler for this CPU not yet initialised */
542 asm("jnz short rwrlma_ok ");
543 asm("mov eax, [edx+%0]" : : "i" _FOFF(TSubScheduler, iCpuMask));
544 asm("lock or [ecx+4], al "); /* set bit in byte 4 corresponding to this CPU */
545 asm("movzx ecx, byte ptr [ecx+6] "); /* CL = order */
546 asm("cmp ecx, 0x40 ");
547 asm("jae short rwrlma_ok "); /* if EOrderNone, done */
548 asm("bts [edx+%0], ecx" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck));
558 extern "C" __NAKED__ void rwspin_runlock_entry_check()
560 /* ecx points to lock */
567 asm("mov edx, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID));
569 asm("mov edx, [edx*4+%0]" : : "i"(&SubSchedulerLookupTable));
570 asm("cmp edx, 0 "); /* Skip checks if subschedulers not yet initialised */
571 asm("je short rwruec_ok ");
572 asm("test edx, 3 "); /* Skip checks if subscheduler for this CPU not yet initialised */
573 asm("jnz short rwruec_ok ");
574 asm("mov eax, [ecx+4] "); /* AL = R-mask, EAX byte 2 = order */
575 asm("and eax, 0x00ffffff "); /* mask out W CPU */
576 asm("cmp eax, 0x00200000 ");
577 asm("jae short rwruec_preemption "); /* This lock requires preemption to be disabled */
579 /* check interrupts disabled */
580 asm("test dword ptr [esp], 0x200 "); /* Interrupts enabled? */
581 asm("jz short rwruec_1 "); /* No - OK */
582 asm("int 0xff "); /* Yes - die */
584 asm("rwruec_preemption: ");
585 asm("cmp eax, 0x00ff0000 ");
586 asm("jae short rwruec_1 "); /* EOrderNone - don't check interrupts or preemption */
587 asm("cmp dword ptr [edx+%0], 0" : : "i" _FOFF(TSubScheduler, iKernLockCount));
588 asm("jnz short rwruec_1 "); /* Preemption disabled - OK */
589 asm("int 0xff "); /* Preemption enabled - die */
592 asm("mov ebx, [edx+%0]" : : "i" _FOFF(TSubScheduler, iCpuMask));
593 asm("test al, bl "); /* Check if current CPU holds read lock */
594 asm("jnz short rwruec_2 "); /* Already held by this CPU - OK */
595 asm("int 0xff "); /* We don't hold lock - die */
599 asm("lock and [ecx+4], bl "); /* clear bit in R-holding CPU mask */
600 asm("shr eax, 16 "); /* EAX = lock order */
601 asm("cmp eax, 0x40 ");
602 asm("jae short rwruec_ok "); /* if EOrderNone, done */
603 asm("btr [edx+%0], eax" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck));
604 asm("jc short rwruec_ok "); /* bit should have been set originally */
605 asm("int 0xff "); /* if not, die - something must have got corrupted */
617 extern "C" __NAKED__ void rwspin_wlock_entry_check()
619 /* ecx points to lock */
625 asm("mov edx, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID));
627 asm("mov edx, [edx*4+%0]" : : "i" (&SubSchedulerLookupTable));
628 asm("cmp edx, 0 "); /* Skip checks if subschedulers not yet initialised */
629 asm("je short rwwlec_ok ");
630 asm("test edx, 3 "); /* Skip checks if subscheduler for this CPU not yet initialised */
631 asm("jnz short rwwlec_ok ");
632 asm("movzx ecx, word ptr [ecx+6] "); /* CL = order, CH = write lock holding CPU */
633 asm("cmp cl, 0x20 ");
634 asm("jae short rwwlec_preemption "); /* This lock requires preemption to be disabled */
636 /* check interrupts disabled */
637 asm("test dword ptr [esp], 0x200 "); /* Interrupts enabled? */
638 asm("jz short rwwlec_1 "); /* No - OK */
639 asm("int 0xff "); /* Yes - die */
641 asm("rwwlec_preemption: ");
642 asm("cmp cl, 0xff ");
643 asm("je short rwwlec_1 "); /* EOrderNone - don't check interrupts or preemption */
644 asm("cmp dword ptr [edx+52+%0], 0" : : "i"_FOFF(TSubScheduler, iExtras));
645 asm("jge short rwwlec_preemption_die "); /* If called from ISR, die */
646 asm("cmp dword ptr [edx+%0], 0" : : "i" _FOFF(TSubScheduler, iKernLockCount));
647 asm("jnz short rwwlec_1 "); /* Preemption disabled - OK */
648 asm("rwwlec_preemption_die: ");
649 asm("int 0xff "); /* Preemption enabled - die */
652 asm("lea eax, [edx+%0]" : : "i" _FOFF(TSubScheduler, iCpuNum));
653 asm("cmp ch, [eax] ");
654 asm("jnz short rwwlec_2 "); /* Not already held by this CPU for write - OK */
655 asm("int 0xff "); /* Already held by this CPU for write - die */
658 asm("mov eax, [edx+%0]" : : "i" _FOFF(TSubScheduler, iCpuMask));
659 asm("test al, [ecx+4] "); /* Test if already held by this CPU for read */
660 asm("jz short rwwlec_3 ");
661 asm("int 0xff "); /* if so, die */
664 asm("lea edx, [edx+%0]" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck));
665 asm("bsf eax, [edx] "); /* find LSB of low dword */
666 asm("jnz short rwwlec_4 "); /* skip if low dword nonzero */
667 asm("bsf eax, [edx+4] "); /* else find LSB of high dword */
668 asm("lea eax, [eax+32] "); /* add 32 to eax without changing flags */
669 asm("jnz short rwwlec_4 "); /* skip if high dword nonzero */
670 asm("mov eax, 0x7f "); /* else set EAX = 0x7F */
673 asm("cmp cl, al "); /* check order of this lock against lowest currently held order */
674 asm("jl short rwwlec_ok "); /* if this lock has lower order, OK - signed comparison so EOrderNone always works */
675 asm("int 0xff "); /* ordering violation - die */
685 extern "C" __NAKED__ void rwspin_wlock_mark_acq()
687 /* ecx points to lock */
693 asm("mov edx, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID));
695 asm("mov edx, [edx*4+%0]" : : "i"(&SubSchedulerLookupTable));
696 asm("cmp edx, 0 "); /* Skip checks if subschedulers not yet initialised */
697 asm("je short rwwlma_ok ");
698 asm("test edx, 3 "); /* Skip checks if subscheduler for this CPU not yet initialised */
699 asm("jnz short rwwlma_ok ");
700 asm("mov eax, [edx+%0]" : : "i" _FOFF(TSubScheduler, iCpuNum));
701 asm("mov [ecx+7], al "); /* set byte 7 to holding CPU number */
702 asm("movzx ecx, byte ptr [ecx+6] "); /* CL = order */
703 asm("cmp ecx, 0x40 ");
704 asm("jae short rwwlma_ok "); /* if EOrderNone, done */
705 asm("bts [edx+%0], ecx" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck));
715 extern "C" __NAKED__ void rwspin_wunlock_entry_check()
717 /* ecx points to lock */
723 asm("mov edx, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID));
725 asm("mov edx, [edx*4+%0]" : : "i"(&SubSchedulerLookupTable));
726 asm("cmp edx, 0 "); /* Skip checks if subschedulers not yet initialised */
727 asm("je short rwwuec_ok ");
728 asm("test edx, 3 "); /* Skip checks if subscheduler for this CPU not yet initialised */
729 asm("jnz short rwwuec_ok ");
730 asm("mov eax, [edx+%0]" : : "i" _FOFF(TSubScheduler, iCpuNum)); /* eax = current CPU number */
731 asm("shl eax, 8 "); /* AL = 0, AH = current CPU number */
732 asm("xor ax, [ecx+6] "); /* AL = order, AH = holding CPU ^ current CPU number */
733 asm("cmp al, 0x20 ");
734 asm("jae short rwwuec_preemption "); /* This lock requires preemption to be disabled */
736 /* check interrupts disabled */
737 asm("test dword ptr [esp], 0x200 "); /* Interrupts enabled? */
738 asm("jz short rwwuec_1 "); /* No - OK */
739 asm("int 0xff "); /* Yes - die */
741 asm("rwwuec_preemption: ");
742 asm("cmp al, 0xff ");
743 asm("je short rwwuec_1 "); /* EOrderNone - don't check interrupts or preemption */
744 asm("cmp dword ptr [edx+%0], 0" : : "i" _FOFF(TSubScheduler, iKernLockCount));
745 asm("jnz short rwwuec_1 "); /* Preemption disabled - OK */
746 asm("int 0xff "); /* Preemption enabled - die */
749 asm("cmp ah, 0 "); /* Check if holding CPU ^ current CPU number == 0 */
750 asm("jz short rwwuec_2 "); /* Already held by this CPU - OK */
751 asm("int 0xff "); /* We don't hold lock - die */
754 asm("mov byte ptr [ecx+7], 0xff "); /* reset holding CPU */
755 asm("cmp eax, 0x40 "); /* EAX = lock order */
756 asm("jae short rwwuec_ok "); /* if EOrderNone, done */
757 asm("btr [edx+%0], eax" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck));
758 asm("jc short rwwuec_ok "); /* bit should have been set originally */
759 asm("int 0xff "); /* if not, die - something must have got corrupted */
771 /*-----------------------------------------------------------------------------
772 - Read locks disabling IRQ
773 -----------------------------------------------------------------------------*/
774 __NAKED__ EXPORT_C void TRWSpinLock::LockIrqR()
778 RWSPIN_RLOCK_ENTRY_CHECK()
779 asm("mov ax, 0x100 ");
780 asm("lock xadd [ecx], ax "); /* ah = in.r++, al = in.w */
781 asm("rwl_rlockirq_loop: ");
782 asm("cmp al, [ecx+2] "); /* compare al to out.w */
783 asm("jnz short rwl_rlockirq_loop2 ");
784 RWSPIN_RLOCK_MARK_ACQ()
785 asm("lock add dword ptr [esp], 0 "); /* make sure subsequent accesses don't happen until lock acquired */
788 asm("rwl_rlockirq_loop2: ");
790 asm("jmp short rwl_rlockirq_loop ");
793 __NAKED__ EXPORT_C void TRWSpinLock::UnlockIrqR()
796 RWSPIN_RUNLOCK_ENTRY_CHECK()
797 asm("lock add word ptr [ecx+2], 0x100 "); /* ++out.r */
802 extern "C" TBool __fastcall rwspin_rlock_flash_irq(TRWSpinLock* a)
809 __NAKED__ EXPORT_C TBool TRWSpinLock::FlashIrqR()
812 asm("mov eax, [ecx] "); /* al=in.w, ah=in.r, byte2=out.w, byte3=out.r */
813 asm("mov edx, eax ");
814 asm("shr edx, 16 "); /* dl=out.w */
815 asm("xor eax, edx "); /* al = in.w ^ out.w = 0 if no writers waiting */
816 asm("and eax, 0xff ");
817 asm("jne %a0" : : "i" (&rwspin_rlock_flash_irq));
822 /*-----------------------------------------------------------------------------
823 - Write locks disabling IRQ
824 -----------------------------------------------------------------------------*/
825 __NAKED__ EXPORT_C void TRWSpinLock::LockIrqW()
829 RWSPIN_WLOCK_ENTRY_CHECK()
830 asm("mov ax, [ecx] "); /* ah = in.r, al = in.w */
831 asm("rwl_wlockirq_loop3: ");
832 asm("mov edx, eax ");
833 asm("inc dl "); /* dh = in.r, dl = in.w+1 */
834 asm("lock cmpxchg [ecx], dx "); /* attempt to update in.w */
835 asm("jne short rwl_wlockirq_loop3 "); /* loop if failed */
836 asm("rwl_wlockirq_loop: ");
837 asm("cmp ax, [ecx+2] "); /* compare ax to (out.w,out.r) */
838 asm("jnz short rwl_wlockirq_loop2 ");
839 RWSPIN_WLOCK_MARK_ACQ()
840 asm("lock add dword ptr [esp], 0 "); /* make sure subsequent accesses don't happen until lock acquired */
843 asm("rwl_wlockirq_loop2: ");
845 asm("jmp short rwl_wlockirq_loop ");
848 __NAKED__ EXPORT_C void TRWSpinLock::UnlockIrqW()
851 RWSPIN_WUNLOCK_ENTRY_CHECK()
852 asm("mov ax, [ecx+2] "); /* ah = out.r, al = out.w */
853 asm("rwl_wunlockirq_loop: ");
854 asm("mov edx, eax ");
855 asm("inc dl "); /* dh = out.r, dl = out.w+1 */
856 asm("lock cmpxchg [ecx+2], dx "); /* attempt to update out.w */
857 asm("jne short rwl_wunlockirq_loop "); /* loop if failed */
862 extern "C" TBool __fastcall rwspin_wlock_flash_irq(TRWSpinLock* a)
869 __NAKED__ EXPORT_C TBool TRWSpinLock::FlashIrqW()
872 asm("mov eax, [ecx] "); /* al=in.w, ah=in.r, byte2=out.w, byte3=out.r */
873 asm("mov edx, eax ");
874 asm("shr edx, 16 "); /* dl=out.w, dh=out.r */
875 asm("inc dl "); /* dx==ax now means no-one else is waiting for lock */
876 asm("xor eax, edx ");
877 asm("and eax, 0xffff ");
878 asm("jne %a0" : : "i" (&rwspin_wlock_flash_irq));
884 /*-----------------------------------------------------------------------------
885 - Read locks leaving IRQ alone
886 -----------------------------------------------------------------------------*/
887 __NAKED__ EXPORT_C void TRWSpinLock::LockOnlyR()
890 RWSPIN_RLOCK_ENTRY_CHECK()
891 asm("mov ax, 0x100 ");
892 asm("lock xadd [ecx], ax "); /* ah = in.r++, al = in.w */
893 asm("rwl_rlockonly_loop: ");
894 asm("cmp al, [ecx+2] "); /* compare al to out.w */
895 asm("jnz short rwl_rlockonly_loop2 ");
896 RWSPIN_RLOCK_MARK_ACQ()
897 asm("lock add dword ptr [esp], 0 "); /* make sure subsequent accesses don't happen until lock acquired */
900 asm("rwl_rlockonly_loop2: ");
902 asm("jmp short rwl_rlockonly_loop ");
905 __NAKED__ EXPORT_C void TRWSpinLock::UnlockOnlyR()
908 RWSPIN_RUNLOCK_ENTRY_CHECK()
909 asm("lock add word ptr [ecx+2], 0x100 "); /* ++out.r */
913 extern "C" TBool __fastcall rwspin_rlock_flash_only(TRWSpinLock* a)
920 __NAKED__ EXPORT_C TBool TRWSpinLock::FlashOnlyR()
923 asm("mov eax, [ecx] "); /* al=in.w, ah=in.r, byte2=out.w, byte3=out.r */
924 asm("mov edx, eax ");
925 asm("shr edx, 16 "); /* dl=out.w */
926 asm("xor eax, edx "); /* al = in.w ^ out.w = 0 if no writers waiting */
927 asm("and eax, 0xff ");
928 asm("jne %a0" : : "i" (&rwspin_rlock_flash_only));
933 /*-----------------------------------------------------------------------------
934 - Write locks leaving IRQ alone
935 -----------------------------------------------------------------------------*/
936 __NAKED__ EXPORT_C void TRWSpinLock::LockOnlyW()
939 RWSPIN_WLOCK_ENTRY_CHECK()
940 asm("mov ax, [ecx] "); /* ah = in.r, al = in.w */
941 asm("rwl_wlockonly_loop3: ");
942 asm("mov edx, eax ");
943 asm("inc dl "); /* dh = in.r, dl = in.w+1 */
944 asm("lock cmpxchg [ecx], dx "); /* attempt to update in.w */
945 asm("jne short rwl_wlockonly_loop3 "); /* loop if failed */
946 asm("rwl_wlockonly_loop: ");
947 asm("cmp ax, [ecx+2] "); /* compare ax to (out.w,out.r) */
948 asm("jnz short rwl_wlockonly_loop2 ");
949 RWSPIN_WLOCK_MARK_ACQ()
950 asm("lock add dword ptr [esp], 0 "); /* make sure subsequent accesses don't happen until lock acquired */
953 asm("rwl_wlockonly_loop2: ");
955 asm("jmp short rwl_wlockonly_loop ");
958 __NAKED__ EXPORT_C void TRWSpinLock::UnlockOnlyW()
961 RWSPIN_WUNLOCK_ENTRY_CHECK()
962 asm("mov ax, [ecx+2] "); /* ah = out.r, al = out.w */
963 asm("rwl_wunlockonly_loop: ");
964 asm("mov edx, eax ");
965 asm("inc dl "); /* dh = out.r, dl = out.w+1 */
966 asm("lock cmpxchg [ecx+2], dx "); /* attempt to update out.w */
967 asm("jne short rwl_wunlockonly_loop "); /* loop if failed */
971 extern "C" TBool __fastcall rwspin_wlock_flash_only(TRWSpinLock* a)
978 __NAKED__ EXPORT_C TBool TRWSpinLock::FlashOnlyW()
981 asm("mov eax, [ecx] "); /* al=in.w, ah=in.r, byte2=out.w, byte3=out.r */
982 asm("mov edx, eax ");
983 asm("shr edx, 16 "); /* dl=out.w, dh=out.r */
984 asm("inc dl "); /* dx==ax now means no-one else is waiting for lock */
985 asm("xor eax, edx ");
986 asm("and eax, 0xffff ");
987 asm("jne %a0" : : "i" (&rwspin_wlock_flash_only));
993 /*-----------------------------------------------------------------------------
994 - Read locks disabling IRQ with save/restore IRQ state
995 -----------------------------------------------------------------------------*/
996 __NAKED__ EXPORT_C TInt TRWSpinLock::LockIrqSaveR()
1001 RWSPIN_RLOCK_ENTRY_CHECK()
1002 asm("mov ax, 0x100 ");
1003 asm("lock xadd [ecx], ax "); /* ah = in.r++, al = in.w */
1004 asm("rwl_rlockirqs_loop: ");
1005 asm("cmp al, [ecx+2] "); /* compare al to out.w */
1006 asm("jnz short rwl_rlockirqs_loop2 ");
1007 RWSPIN_RLOCK_MARK_ACQ()
1008 asm("lock add dword ptr [esp], 0 "); /* make sure subsequent accesses don't happen until lock acquired */
1009 asm("pop eax "); /* retrieve saved EFLAGS */
1010 asm("and eax, 0x200 "); /* return just interrupt mask bit */
1013 asm("rwl_rlockirqs_loop2: ");
1015 asm("jmp short rwl_rlockirqs_loop ");
1018 __NAKED__ EXPORT_C void TRWSpinLock::UnlockIrqRestoreR(TInt)
1021 RWSPIN_RUNLOCK_ENTRY_CHECK()
1022 asm("lock add word ptr [ecx+2], 0x100 "); /* ++out.r */
1023 asm("test dword ptr [esp+4], 0x200 ");
1024 asm("jz short rwl_runlockirqr_1 ");
1026 asm("rwl_runlockirqr_1: ");
1030 __NAKED__ EXPORT_C TBool TRWSpinLock::FlashIrqRestoreR(TInt)
1032 /* don't mess with stacked args, yet */
1034 asm("mov eax, [ecx] "); /* al=in.w, ah=in.r, byte2=out.w, byte3=out.r */
1035 asm("mov edx, eax ");
1036 asm("shr edx, 16 "); /* dl=out.w */
1037 asm("xor eax, edx "); /* al = in.w ^ out.w = 0 if no writers waiting */
1038 asm("and eax, 0xff ");
1039 asm("jne short rwl_rflashirqr_1 ");
1041 /* now we can remove stacked arg since we don't need it */
1044 asm("rwl_rflashirqr_1: ");
1046 asm("test dword ptr [esp+4], 0x200 ");
1047 asm("jnz short rwl_rflashirqr_2 ");
1048 asm("call %a0" : : "i" (&rwspin_rlock_flash_only));
1049 asm("jmp short rwl_rflashirqr_3 ");
1050 asm("rwl_rflashirqr_2: ");
1051 asm("call %a0" : : "i" (&rwspin_rlock_flash_irq));
1052 asm("rwl_rflashirqr_3: ");
1057 /*-----------------------------------------------------------------------------
1058 - Write locks disabling IRQ with save/restore IRQ state
1059 -----------------------------------------------------------------------------*/
1060 __NAKED__ EXPORT_C TInt TRWSpinLock::LockIrqSaveW()
1065 RWSPIN_WLOCK_ENTRY_CHECK()
1066 asm("mov ax, [ecx] "); /* ah = in.r, al = in.w */
1067 asm("rwl_wlockirqs_loop3: ");
1068 asm("mov edx, eax ");
1069 asm("inc dl "); /* dh = in.r, dl = in.w+1 */
1070 asm("lock cmpxchg [ecx], dx "); /* attempt to update in.w */
1071 asm("jne short rwl_wlockirqs_loop3 "); /* loop if failed */
1072 asm("rwl_wlockirqs_loop: ");
1073 asm("cmp ax, [ecx+2] "); /* compare ax to (out.w,out.r) */
1074 asm("jnz short rwl_wlockirqs_loop2 ");
1075 RWSPIN_WLOCK_MARK_ACQ()
1076 asm("lock add dword ptr [esp], 0 "); /* make sure subsequent accesses don't happen until lock acquired */
1077 asm("pop eax "); /* retrieve saved EFLAGS */
1078 asm("and eax, 0x200 "); /* return just interrupt mask bit */
1081 asm("rwl_wlockirqs_loop2: ");
1083 asm("jmp short rwl_wlockirqs_loop ");
1086 __NAKED__ EXPORT_C void TRWSpinLock::UnlockIrqRestoreW(TInt)
1089 RWSPIN_WUNLOCK_ENTRY_CHECK()
1090 asm("mov ax, [ecx+2] "); /* ah = out.r, al = out.w */
1091 asm("rwl_wunlockirqr_loop: ");
1092 asm("mov edx, eax ");
1093 asm("inc dl "); /* dh = out.r, dl = out.w+1 */
1094 asm("lock cmpxchg [ecx+2], dx "); /* attempt to update out.w */
1095 asm("jne short rwl_wunlockirqr_loop "); /* loop if failed */
1096 asm("test dword ptr [esp+4], 0x200 ");
1097 asm("jz short rwl_wunlockirqr_1 ");
1099 asm("rwl_wunlockirqr_1: ");
1103 __NAKED__ EXPORT_C TBool TRWSpinLock::FlashIrqRestoreW(TInt)
1105 /* don't mess with stacked args, yet */
1107 asm("mov eax, [ecx] "); /* al=in.w, ah=in.r, byte2=out.w, byte3=out.r */
1108 asm("mov edx, eax ");
1109 asm("shr edx, 16 "); /* dl=out.w, dh=out.r */
1110 asm("inc dl "); /* dx==ax now means no-one else is waiting for lock */
1111 asm("xor eax, edx ");
1112 asm("and eax, 0xffff ");
1113 asm("jne short rwl_wflashirqr_1 ");
1115 /* now we can remove stacked arg since we don't need it */
1118 asm("rwl_wflashirqr_1: ");
1120 asm("test dword ptr [esp+4], 0x200 ");
1121 asm("jnz short rwl_wflashirqr_2 ");
1122 asm("call %a0" : : "i" (&rwspin_wlock_flash_only));
1123 asm("jmp short rwl_wflashirqr_3 ");
1124 asm("rwl_wflashirqr_2: ");
1125 asm("call %a0" : : "i" (&rwspin_wlock_flash_irq));
1126 asm("rwl_wflashirqr_3: ");
1131 /*-----------------------------------------------------------------------------
1132 - Read lock flash allowing preemption
1133 -----------------------------------------------------------------------------*/
1134 extern "C" TBool __fastcall rwspin_rlock_flash_preempt(TRWSpinLock* a)
1137 NKern::PreemptionPoint();
1142 __NAKED__ EXPORT_C TBool TRWSpinLock::FlashPreemptR()
1145 asm("mov eax, [ecx] "); /* al=in.w, ah=in.r, byte2=out.w, byte3=out.r */
1146 asm("mov edx, eax ");
1147 asm("shr edx, 16 "); /* dl=out.w */
1148 asm("xor eax, edx "); /* al = in.w ^ out.w = 0 if no writers waiting */
1149 asm("and eax, 0xff ");
1150 asm("jne %a0" : : "i" (&rwspin_rlock_flash_preempt));
1155 /*-----------------------------------------------------------------------------
1156 - Write lock flash allowing preemption
1157 -----------------------------------------------------------------------------*/
1158 extern "C" TBool __fastcall rwspin_wlock_flash_preempt(TRWSpinLock* a)
1161 NKern::PreemptionPoint();
1166 __NAKED__ EXPORT_C TBool TRWSpinLock::FlashPreemptW()
1169 asm("mov eax, [ecx] "); /* al=in.w, ah=in.r, byte2=out.w, byte3=out.r */
1170 asm("mov edx, eax ");
1171 asm("shr edx, 16 "); /* dl=out.w, dh=out.r */
1172 asm("inc dl "); /* dx==ax now means no-one else is waiting for lock */
1173 asm("xor eax, edx ");
1174 asm("and eax, 0xffff ");
1175 asm("jne %a0" : : "i" (&rwspin_wlock_flash_preempt));