os/kernelhwsrv/kernel/eka/nkernsmp/x86/ncutilf.cia
author sl
Tue, 10 Jun 2014 14:32:02 +0200
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
sl@0
     1
// Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
// e32\nkernsmp\x86\ncutilf.cia
sl@0
    15
// 
sl@0
    16
//
sl@0
    17
sl@0
    18
#include <x86.h>
sl@0
    19
#include <apic.h>
sl@0
    20
sl@0
    21
#if defined(__VC32__)
sl@0
    22
#define	__ASM_CALL(func)	_asm call func
sl@0
    23
#elif defined(__GCC32__)
sl@0
    24
#define	__ASM_CALL(func)	asm("call _" #func);
sl@0
    25
#else
sl@0
    26
#error Unknown x86 compiler
sl@0
    27
#endif
sl@0
    28
sl@0
    29
#if defined(__INCLUDE_SPIN_LOCK_CHECKS__)
sl@0
    30
#define	SPIN_LOCK_ENTRY_CHECK()			__ASM_CALL(spin_lock_entry_check)
sl@0
    31
#define	SPIN_LOCK_MARK_ACQ()			__ASM_CALL(spin_lock_mark_acq)
sl@0
    32
#define	SPIN_UNLOCK_ENTRY_CHECK()		__ASM_CALL(spin_unlock_entry_check)
sl@0
    33
sl@0
    34
#define	RWSPIN_RLOCK_ENTRY_CHECK()		__ASM_CALL(rwspin_rlock_entry_check)
sl@0
    35
#define	RWSPIN_RLOCK_MARK_ACQ()			__ASM_CALL(rwspin_rlock_mark_acq)
sl@0
    36
#define	RWSPIN_RUNLOCK_ENTRY_CHECK()	__ASM_CALL(rwspin_runlock_entry_check)
sl@0
    37
sl@0
    38
#define	RWSPIN_WLOCK_ENTRY_CHECK()		__ASM_CALL(rwspin_wlock_entry_check)
sl@0
    39
#define	RWSPIN_WLOCK_MARK_ACQ()			__ASM_CALL(rwspin_wlock_mark_acq)
sl@0
    40
#define	RWSPIN_WUNLOCK_ENTRY_CHECK()	__ASM_CALL(rwspin_wunlock_entry_check)
sl@0
    41
sl@0
    42
#else
sl@0
    43
#define	SPIN_LOCK_ENTRY_CHECK()
sl@0
    44
#define	SPIN_LOCK_MARK_ACQ()
sl@0
    45
#define	SPIN_UNLOCK_ENTRY_CHECK()
sl@0
    46
sl@0
    47
#define	RWSPIN_RLOCK_ENTRY_CHECK()
sl@0
    48
#define	RWSPIN_RLOCK_MARK_ACQ()
sl@0
    49
#define	RWSPIN_RUNLOCK_ENTRY_CHECK()
sl@0
    50
sl@0
    51
#define	RWSPIN_WLOCK_ENTRY_CHECK()
sl@0
    52
#define	RWSPIN_WLOCK_MARK_ACQ()
sl@0
    53
#define	RWSPIN_WUNLOCK_ENTRY_CHECK()
sl@0
    54
sl@0
    55
#endif
sl@0
    56
sl@0
    57
sl@0
    58
/******************************************************************************
sl@0
    59
 * Timestamp
sl@0
    60
 ******************************************************************************/
sl@0
    61
sl@0
    62
/** Returns a timestamp value which is consistent across CPUs.
sl@0
    63
sl@0
    64
*/
sl@0
    65
EXPORT_C __NAKED__ TUint64 NKern::Timestamp()
sl@0
    66
	{
sl@0
    67
	asm("pushfd ");
sl@0
    68
	asm("cli ");		// stop thread migration between reading APIC ID and thread pointer
sl@0
    69
	asm("mov ecx, ds:[%0]" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID));
sl@0
    70
	asm("shr ecx, 24 ");
sl@0
    71
	asm("mov ecx, [ecx*4+%0]" : : "i" (&SubSchedulerLookupTable));
sl@0
    72
	asm("cmp ecx, 0 ");
sl@0
    73
	asm("jz short use_tsc_only ");
sl@0
    74
	asm("test cl, 3 ");
sl@0
    75
	asm("jnz short use_tsc_only ");
sl@0
    76
	asm("rdtsc ");
sl@0
    77
	asm("add eax, [ecx+80+%0]" : : "i" _FOFF(TSubScheduler, iExtras));
sl@0
    78
	asm("adc edx, [ecx+84+%0]" : : "i" _FOFF(TSubScheduler, iExtras));
sl@0
    79
	asm("popfd ");
sl@0
    80
	asm("ret ");
sl@0
    81
sl@0
    82
	asm("use_tsc_only: ");
sl@0
    83
	asm("rdtsc ");
sl@0
    84
	asm("popfd ");
sl@0
    85
	asm("ret ");
sl@0
    86
	}
sl@0
    87
sl@0
    88
/** Get the current value of the CPU timestamp counter
sl@0
    89
sl@0
    90
*/
sl@0
    91
EXPORT_C __NAKED__ TUint64 X86::Timestamp()
sl@0
    92
	{
sl@0
    93
	asm("rdtsc ");
sl@0
    94
	asm("ret ");
sl@0
    95
	}
sl@0
    96
sl@0
    97
sl@0
    98
sl@0
    99
/******************************************************************************
sl@0
   100
 * Spin locks
sl@0
   101
 *
sl@0
   102
 * [this+0]		in count (byte)
sl@0
   103
 * [this+1]		out count (byte)
sl@0
   104
 * [this+6]		order (byte)
sl@0
   105
 * [this+7]		holding CPU (byte)
sl@0
   106
 ******************************************************************************/
sl@0
   107
sl@0
   108
#if defined(__INCLUDE_SPIN_LOCK_CHECKS__)
sl@0
   109
extern "C" __NAKED__ void spin_lock_entry_check()
sl@0
   110
	{
sl@0
   111
	/* ecx points to lock */
sl@0
   112
	asm("push eax ");
sl@0
   113
	asm("push ecx ");
sl@0
   114
	asm("push edx ");
sl@0
   115
	asm("pushfd ");
sl@0
   116
	asm("cli ");
sl@0
   117
	asm("mov edx, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID));
sl@0
   118
	asm("shr edx, 24");
sl@0
   119
	asm("mov edx, [edx*4+%0]" : : "i" (&SubSchedulerLookupTable));
sl@0
   120
	asm("cmp edx, 0 ");						/* Skip checks if subschedulers not yet initialised */
sl@0
   121
	asm("je short slec_ok ");
sl@0
   122
	asm("test edx, 3 ");					/* Skip checks if subscheduler for this CPU not yet initialised */
sl@0
   123
	asm("jnz short slec_ok ");
sl@0
   124
	asm("movzx ecx, word ptr [ecx+6] ");	/* CL = order, CH = holding CPU */
sl@0
   125
	asm("cmp cl, 0x20 ");
sl@0
   126
	asm("jae short slec_preemption ");		/* This lock requires preemption to be disabled */
sl@0
   127
sl@0
   128
	/* check interrupts disabled */
sl@0
   129
	asm("test dword ptr [esp], 0x200 ");	/* Interrupts enabled? */
sl@0
   130
	asm("jz short slec_1 ");				/* No - OK */
sl@0
   131
	asm("int 0xff ");						/* Yes - die */
sl@0
   132
sl@0
   133
	asm("slec_preemption: ");
sl@0
   134
	asm("cmp cl, 0xff ");
sl@0
   135
	asm("je short slec_1 ");			/* EOrderNone - don't check interrupts or preemption */
sl@0
   136
	asm("cmp dword ptr [edx+52+%0], 0" : : "i"_FOFF(TSubScheduler, iExtras));
sl@0
   137
	asm("jge short slec_preemption_die ");	/* If called from ISR, die */
sl@0
   138
	asm("cmp dword ptr [edx+%0], 0" : : "i" _FOFF(TSubScheduler, iKernLockCount));
sl@0
   139
	asm("jnz short slec_1 ");			/* Preemption disabled - OK */
sl@0
   140
	asm("slec_preemption_die: ");
sl@0
   141
	asm("int 0xff ");					/* Preemption enabled - die */
sl@0
   142
sl@0
   143
	asm("slec_1: ");
sl@0
   144
	asm("lea eax, [edx+%0]" : : "i" _FOFF(TSubScheduler, iCpuNum));
sl@0
   145
	asm("cmp ch, [eax] ");
sl@0
   146
	asm("jnz short slec_2 ");			/* Not already held by this CPU - OK */
sl@0
   147
	asm("int 0xff ");					/* Already held by this CPU - die */
sl@0
   148
sl@0
   149
	asm("slec_2: ");
sl@0
   150
	asm("lea edx, [edx+%0]" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck));
sl@0
   151
	asm("bsf eax, [edx] ");				/* find LSB of low dword */
sl@0
   152
	asm("jnz short slec_3 ");			/* skip if low dword nonzero */
sl@0
   153
	asm("bsf eax, [edx+4] ");			/* else find LSB of high dword */
sl@0
   154
	asm("lea eax, [eax+32] ");			/* add 32 to eax without changing flags */
sl@0
   155
	asm("jnz short slec_3 ");			/* skip if high dword nonzero */
sl@0
   156
	asm("mov eax, 0x7f ");				/* else set EAX = 0x7F */
sl@0
   157
sl@0
   158
	asm("slec_3: ");
sl@0
   159
	asm("cmp cl, al ");					/* check order of this lock against lowest currently held order */
sl@0
   160
	asm("jl short slec_ok ");			/* if this lock has lower order, OK - signed comparison so EOrderNone always works */
sl@0
   161
	asm("int 0xff ");					/* ordering violation - die */
sl@0
   162
sl@0
   163
	asm("slec_ok: ");
sl@0
   164
	asm("popfd ");
sl@0
   165
	asm("pop edx ");
sl@0
   166
	asm("pop ecx ");
sl@0
   167
	asm("pop eax ");
sl@0
   168
	asm("ret ");
sl@0
   169
	}
sl@0
   170
sl@0
   171
extern "C" __NAKED__ void spin_lock_mark_acq()
sl@0
   172
	{
sl@0
   173
	/* ecx points to lock */
sl@0
   174
	asm("push eax ");
sl@0
   175
	asm("push ecx ");
sl@0
   176
	asm("push edx ");
sl@0
   177
	asm("pushfd ");
sl@0
   178
	asm("cli ");
sl@0
   179
	asm("mov edx, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID));
sl@0
   180
	asm("shr edx, 24");
sl@0
   181
	asm("mov edx, [edx*4+%0]" : : "i"(&SubSchedulerLookupTable));
sl@0
   182
	asm("cmp edx, 0 ");						/* Skip checks if subschedulers not yet initialised */
sl@0
   183
	asm("je short slma_ok ");
sl@0
   184
	asm("test edx, 3 ");					/* Skip checks if subscheduler for this CPU not yet initialised */
sl@0
   185
	asm("jnz short slma_ok ");
sl@0
   186
	asm("mov eax, [edx+%0]" : : "i" _FOFF(TSubScheduler, iCpuNum));
sl@0
   187
	asm("mov [ecx+7], al ");				/* set byte 7 to holding CPU number */
sl@0
   188
	asm("movzx ecx, byte ptr [ecx+6] ");	/* CL = order */
sl@0
   189
	asm("cmp ecx, 0x40 ");
sl@0
   190
	asm("jae short slma_ok ");				/* if EOrderNone, done */
sl@0
   191
	asm("bts [edx+%0], ecx" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck));
sl@0
   192
sl@0
   193
	asm("slma_ok: ");
sl@0
   194
	asm("popfd ");
sl@0
   195
	asm("pop edx ");
sl@0
   196
	asm("pop ecx ");
sl@0
   197
	asm("pop eax ");
sl@0
   198
	asm("ret ");
sl@0
   199
	}
sl@0
   200
sl@0
   201
extern "C" __NAKED__ void spin_unlock_entry_check()
sl@0
   202
	{
sl@0
   203
	/* ecx points to lock */
sl@0
   204
	asm("push eax ");
sl@0
   205
	asm("push ecx ");
sl@0
   206
	asm("push edx ");
sl@0
   207
	asm("pushfd ");
sl@0
   208
	asm("cli ");
sl@0
   209
	asm("mov edx, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID));
sl@0
   210
	asm("shr edx, 24");
sl@0
   211
	asm("mov edx, [edx*4+%0]" : : "i"(&SubSchedulerLookupTable));
sl@0
   212
	asm("cmp edx, 0 ");						/* Skip checks if subschedulers not yet initialised */
sl@0
   213
	asm("je short suec_ok ");
sl@0
   214
	asm("test edx, 3 ");					/* Skip checks if subscheduler for this CPU not yet initialised */
sl@0
   215
	asm("jnz short suec_ok ");
sl@0
   216
	asm("mov eax, [edx+%0]" : : "i" _FOFF(TSubScheduler, iCpuNum));		/* eax = current CPU number */
sl@0
   217
	asm("shl eax, 8 ");						/* AL = 0, AH = current CPU number */
sl@0
   218
	asm("xor ax, [ecx+6] ");				/* AL = order, AH = holding CPU ^ current CPU number */
sl@0
   219
	asm("cmp al, 0x20 ");
sl@0
   220
	asm("jae short suec_preemption ");		/* This lock requires preemption to be disabled */
sl@0
   221
sl@0
   222
	/* check interrupts disabled */
sl@0
   223
	asm("test dword ptr [esp], 0x200 ");	/* Interrupts enabled? */
sl@0
   224
	asm("jz short suec_1 ");				/* No - OK */
sl@0
   225
	asm("int 0xff ");						/* Yes - die */
sl@0
   226
sl@0
   227
	asm("suec_preemption: ");
sl@0
   228
	asm("cmp al, 0xff ");
sl@0
   229
	asm("je short suec_1 ");			/* EOrderNone - don't check interrupts or preemption */
sl@0
   230
	asm("cmp dword ptr [edx+%0], 0" : : "i" _FOFF(TSubScheduler, iKernLockCount));
sl@0
   231
	asm("jnz short suec_1 ");			/* Preemption disabled - OK */
sl@0
   232
	asm("int 0xff ");					/* Preemption enabled - die */
sl@0
   233
sl@0
   234
	asm("suec_1: ");
sl@0
   235
	asm("cmp ah, 0 ");					/* Check if holding CPU ^ current CPU number == 0 */
sl@0
   236
	asm("jz short suec_2 ");			/* Already held by this CPU - OK */
sl@0
   237
	asm("int 0xff ");					/* We don't hold lock - die */
sl@0
   238
sl@0
   239
	asm("suec_2: ");
sl@0
   240
	asm("mov byte ptr [ecx+7], 0xff ");	/* reset holding CPU */
sl@0
   241
	asm("cmp eax, 0x40 ");				/* EAX = lock order */
sl@0
   242
	asm("jae short suec_ok ");			/* if EOrderNone, done */
sl@0
   243
	asm("btr [edx+%0], eax" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck));
sl@0
   244
	asm("jc short suec_ok ");			/* bit should have been set originally */
sl@0
   245
	asm("int 0xff ");					/* if not, die - something must have got corrupted */
sl@0
   246
sl@0
   247
	asm("suec_ok: ");
sl@0
   248
	asm("popfd ");
sl@0
   249
	asm("pop edx ");
sl@0
   250
	asm("pop ecx ");
sl@0
   251
	asm("pop eax ");
sl@0
   252
	asm("ret ");
sl@0
   253
	}
sl@0
   254
#endif
sl@0
   255
sl@0
   256
sl@0
   257
/******************************************************************************
sl@0
   258
 * Plain old spin lock
sl@0
   259
 *
sl@0
   260
 * Fundamental algorithm:
sl@0
   261
 *	lock()		{ old_in = in++; while(out!=old_in) __chill(); }
sl@0
   262
 *	unlock()	{ ++out; }
sl@0
   263
 *
sl@0
   264
 * [this+0]		in count (byte)
sl@0
   265
 * [this+1]		out count (byte)
sl@0
   266
 * [this+6]		order value
sl@0
   267
 * [this+7]		holding CPU number, 0xFF if none
sl@0
   268
 *
sl@0
   269
 ******************************************************************************/
sl@0
   270
__NAKED__ EXPORT_C void TSpinLock::LockIrq()
sl@0
   271
	{
sl@0
   272
	THISCALL_PROLOG0()
sl@0
   273
	asm("cli ");
sl@0
   274
	SPIN_LOCK_ENTRY_CHECK()
sl@0
   275
	asm("mov al, 1 ");
sl@0
   276
	asm("lock xadd [ecx], al ");			/* al = in++ */
sl@0
   277
	asm("sl_lockirq_loop: ");
sl@0
   278
	asm("cmp al, [ecx+1] ");				/* compare al to out */
sl@0
   279
	asm("jnz short sl_lockirq_loop2 ");
sl@0
   280
	SPIN_LOCK_MARK_ACQ()
sl@0
   281
	asm("lock add dword ptr [esp], 0 ");	/* make sure subsequent accesses don't happen until lock acquired */
sl@0
   282
	THISCALL_EPILOG0()
sl@0
   283
sl@0
   284
	asm("sl_lockirq_loop2: ");
sl@0
   285
	X86_PAUSE
sl@0
   286
	asm("jmp short sl_lockirq_loop ");
sl@0
   287
	}
sl@0
   288
sl@0
   289
__NAKED__ EXPORT_C void TSpinLock::UnlockIrq()
sl@0
   290
	{
sl@0
   291
	THISCALL_PROLOG0()
sl@0
   292
	SPIN_UNLOCK_ENTRY_CHECK()
sl@0
   293
	asm("lock inc byte ptr [ecx+1] ");		/* ++out */
sl@0
   294
	asm("sti ");
sl@0
   295
	THISCALL_EPILOG0()
sl@0
   296
	}
sl@0
   297
sl@0
   298
extern "C" TBool __fastcall spin_lock_flash_irq(TSpinLock* a)
sl@0
   299
	{
sl@0
   300
	a->UnlockIrq();
sl@0
   301
	a->LockIrq();
sl@0
   302
	return TRUE;
sl@0
   303
	}
sl@0
   304
sl@0
   305
__NAKED__ EXPORT_C TBool TSpinLock::FlashIrq()
sl@0
   306
	{
sl@0
   307
	THISCALL_PROLOG0()
sl@0
   308
	asm("mov ax, [ecx] ");
sl@0
   309
	asm("inc ah ");
sl@0
   310
	asm("xor al, ah ");
sl@0
   311
	asm("and eax, 0xff ");
sl@0
   312
	asm("jne %a0" : : "i" (&spin_lock_flash_irq));
sl@0
   313
	THISCALL_EPILOG0()
sl@0
   314
	}
sl@0
   315
sl@0
   316
__NAKED__ EXPORT_C void TSpinLock::LockOnly()
sl@0
   317
	{
sl@0
   318
	THISCALL_PROLOG0()
sl@0
   319
	SPIN_LOCK_ENTRY_CHECK()
sl@0
   320
	asm("mov al, 1 ");
sl@0
   321
	asm("lock xadd [ecx], al ");			/* al = in++ */
sl@0
   322
	asm("sl_lockonly_loop: ");
sl@0
   323
	asm("cmp al, [ecx+1] ");				/* compare al to out */
sl@0
   324
	asm("jnz short sl_lockonly_loop2 ");
sl@0
   325
	SPIN_LOCK_MARK_ACQ()
sl@0
   326
	asm("lock add dword ptr [esp], 0 ");	/* make sure subsequent accesses don't happen until lock acquired */
sl@0
   327
	THISCALL_EPILOG0()
sl@0
   328
sl@0
   329
	asm("sl_lockonly_loop2: ");
sl@0
   330
	X86_PAUSE
sl@0
   331
	asm("jmp short sl_lockonly_loop ");
sl@0
   332
	}
sl@0
   333
sl@0
   334
__NAKED__ EXPORT_C void TSpinLock::UnlockOnly()
sl@0
   335
	{
sl@0
   336
	THISCALL_PROLOG0()
sl@0
   337
	SPIN_UNLOCK_ENTRY_CHECK()
sl@0
   338
	asm("lock inc byte ptr [ecx+1] ");		/* ++out */
sl@0
   339
	THISCALL_EPILOG0()
sl@0
   340
	}
sl@0
   341
sl@0
   342
extern "C" TBool __fastcall spin_lock_flash_only(TSpinLock* a)
sl@0
   343
	{
sl@0
   344
	a->UnlockOnly();
sl@0
   345
	a->LockOnly();
sl@0
   346
	return TRUE;
sl@0
   347
	}
sl@0
   348
sl@0
   349
__NAKED__ EXPORT_C TBool TSpinLock::FlashOnly()
sl@0
   350
	{
sl@0
   351
	THISCALL_PROLOG0()
sl@0
   352
	asm("mov ax, [ecx] ");
sl@0
   353
	asm("inc ah ");
sl@0
   354
	asm("xor al, ah ");
sl@0
   355
	asm("and eax, 0xff ");
sl@0
   356
	asm("jne %a0" : : "i" (&spin_lock_flash_only));
sl@0
   357
	THISCALL_EPILOG0()
sl@0
   358
	}
sl@0
   359
sl@0
   360
__NAKED__ EXPORT_C TInt TSpinLock::LockIrqSave()
sl@0
   361
	{
sl@0
   362
	THISCALL_PROLOG0()
sl@0
   363
	asm("pushfd ");
sl@0
   364
	asm("cli ");
sl@0
   365
	SPIN_LOCK_ENTRY_CHECK()
sl@0
   366
	asm("mov al, 1 ");
sl@0
   367
	asm("lock xadd [ecx], al ");			/* al = in++ */
sl@0
   368
	asm("sl_lockirqs_loop: ");
sl@0
   369
	asm("cmp al, [ecx+1] ");				/* compare al to out */
sl@0
   370
	asm("jnz short sl_lockirqs_loop2 ");
sl@0
   371
	SPIN_LOCK_MARK_ACQ()
sl@0
   372
	asm("lock add dword ptr [esp], 0 ");	/* make sure subsequent accesses don't happen until lock acquired */
sl@0
   373
	asm("pop eax ");						/* retrieve saved EFLAGS */
sl@0
   374
	asm("and eax, 0x200 ");					/* return just interrupt mask bit */
sl@0
   375
	THISCALL_EPILOG0()
sl@0
   376
sl@0
   377
	asm("sl_lockirqs_loop2: ");
sl@0
   378
	X86_PAUSE
sl@0
   379
	asm("jmp short sl_lockirqs_loop ");
sl@0
   380
	}
sl@0
   381
sl@0
   382
__NAKED__ EXPORT_C void TSpinLock::UnlockIrqRestore(TInt)
sl@0
   383
	{
sl@0
   384
	THISCALL_PROLOG1()
sl@0
   385
	SPIN_UNLOCK_ENTRY_CHECK()
sl@0
   386
	asm("lock inc byte ptr [ecx+1] ");		/* ++out */
sl@0
   387
	asm("test dword ptr [esp+4], 0x200 ");
sl@0
   388
	asm("jz short sl_unlockirqr_1 ");
sl@0
   389
	asm("sti ");
sl@0
   390
	asm("sl_unlockirqr_1: ");
sl@0
   391
	THISCALL_EPILOG1()
sl@0
   392
	}
sl@0
   393
sl@0
   394
__NAKED__ EXPORT_C TBool TSpinLock::FlashIrqRestore(TInt)
sl@0
   395
	{
sl@0
   396
	/* don't mess with stacked args, yet */
sl@0
   397
	THISCALL_PROLOG0()
sl@0
   398
	asm("mov ax, [ecx] ");
sl@0
   399
	asm("inc ah ");
sl@0
   400
	asm("xor al, ah ");
sl@0
   401
	asm("and eax, 0xff ");
sl@0
   402
	asm("jne short sl_flashirqr_1 ");
sl@0
   403
sl@0
   404
	/* now we can remove stacked arg since we don't need it */
sl@0
   405
	THISCALL_EPILOG1()
sl@0
   406
sl@0
   407
	asm("sl_flashirqr_1: ");
sl@0
   408
	THISCALL_PROLOG1()
sl@0
   409
	asm("test dword ptr [esp+4], 0x200 ");
sl@0
   410
	asm("jnz short sl_flashirqr_2 ");
sl@0
   411
	asm("call %a0" : : "i" (&spin_lock_flash_only));
sl@0
   412
	asm("jmp short sl_flashirqr_3 ");
sl@0
   413
	asm("sl_flashirqr_2: ");
sl@0
   414
	asm("call %a0" : : "i" (&spin_lock_flash_irq));
sl@0
   415
	asm("sl_flashirqr_3: ");
sl@0
   416
	THISCALL_EPILOG1()
sl@0
   417
	}
sl@0
   418
sl@0
   419
extern "C" TBool __fastcall spin_lock_flash_preempt(TSpinLock* a)
sl@0
   420
	{
sl@0
   421
	a->UnlockOnly();
sl@0
   422
	NKern::PreemptionPoint();
sl@0
   423
	a->LockOnly();
sl@0
   424
	return TRUE;
sl@0
   425
	}
sl@0
   426
sl@0
   427
__NAKED__ EXPORT_C TBool TSpinLock::FlashPreempt()
sl@0
   428
	{
sl@0
   429
	THISCALL_PROLOG0()
sl@0
   430
	asm("mov ax, [ecx] ");
sl@0
   431
	asm("inc ah ");
sl@0
   432
	asm("xor al, ah ");
sl@0
   433
	asm("and eax, 0xff ");
sl@0
   434
	asm("jne %a0" : : "i" (&spin_lock_flash_preempt));
sl@0
   435
	THISCALL_EPILOG0()
sl@0
   436
	}
sl@0
   437
sl@0
   438
sl@0
   439
/******************************************************************************
sl@0
   440
 * Read/Write Spin lock
sl@0
   441
 *
sl@0
   442
 * Structure ( (in.r,in.w) , (out.r,out.w) )
sl@0
   443
 * Fundamental algorithm:
sl@0
   444
 *	lockr()		{ old_in = (in.r++,in.w); while(out.w!=old_in.w) __chill(); }
sl@0
   445
 *	unlockr()	{ ++out.r; }
sl@0
   446
 *	lockw()		{ old_in = (in.r,in.w++); while(out!=old_in) __chill(); }
sl@0
   447
 *	unlockw()	{ ++out.w; }
sl@0
   448
 *
sl@0
   449
 * [this+0]		in.w
sl@0
   450
 * [this+1]		in.r
sl@0
   451
 * [this+2]		out.w
sl@0
   452
 * [this+3]		out.r
sl@0
   453
 * [this+4]		Bit mask of CPUs which hold read locks
sl@0
   454
 * [this+6]		order value
sl@0
   455
 * [this+7]		CPU number which holds write lock, 0xFF if none
sl@0
   456
 *
sl@0
   457
 ******************************************************************************/
sl@0
   458
sl@0
   459
#if defined(__INCLUDE_SPIN_LOCK_CHECKS__)
sl@0
   460
extern "C" __NAKED__ void rwspin_rlock_entry_check()
sl@0
   461
	{
sl@0
   462
	/* ecx points to lock */
sl@0
   463
	asm("push eax ");
sl@0
   464
	asm("push ecx ");
sl@0
   465
	asm("push edx ");
sl@0
   466
	asm("pushfd ");
sl@0
   467
	asm("cli ");
sl@0
   468
	asm("mov edx, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID));
sl@0
   469
	asm("shr edx, 24");
sl@0
   470
	asm("mov edx, [edx*4+%0]" : : "i" (&SubSchedulerLookupTable));
sl@0
   471
	asm("cmp edx, 0 ");						/* Skip checks if subschedulers not yet initialised */
sl@0
   472
	asm("je short rwrlec_ok ");
sl@0
   473
	asm("test edx, 3 ");					/* Skip checks if subscheduler for this CPU not yet initialised */
sl@0
   474
	asm("jnz short rwrlec_ok ");
sl@0
   475
	asm("movzx ecx, word ptr [ecx+6] ");	/* CL = order, CH = holding CPU for write lock */
sl@0
   476
	asm("cmp cl, 0x20 ");
sl@0
   477
	asm("jae short rwrlec_preemption ");		/* This lock requires preemption to be disabled */
sl@0
   478
sl@0
   479
	/* check interrupts disabled */
sl@0
   480
	asm("test dword ptr [esp], 0x200 ");	/* Interrupts enabled? */
sl@0
   481
	asm("jz short rwrlec_1 ");				/* No - OK */
sl@0
   482
	asm("int 0xff ");						/* Yes - die */
sl@0
   483
sl@0
   484
	asm("rwrlec_preemption: ");
sl@0
   485
	asm("cmp cl, 0xff ");
sl@0
   486
	asm("je short rwrlec_1 ");			/* EOrderNone - don't check interrupts or preemption */
sl@0
   487
	asm("cmp dword ptr [edx+52+%0], 0" : : "i"_FOFF(TSubScheduler, iExtras));
sl@0
   488
	asm("jge short rwrlec_preemption_die ");	/* If called from ISR, die */
sl@0
   489
	asm("cmp dword ptr [edx+%0], 0" : : "i" _FOFF(TSubScheduler, iKernLockCount));
sl@0
   490
	asm("jnz short rwrlec_1 ");			/* Preemption disabled - OK */
sl@0
   491
	asm("rwrlec_preemption_die: ");
sl@0
   492
	asm("int 0xff ");					/* Preemption enabled - die */
sl@0
   493
sl@0
   494
	asm("rwrlec_1: ");
sl@0
   495
	asm("lea eax, [edx+%0]" : : "i" _FOFF(TSubScheduler, iCpuNum));
sl@0
   496
	asm("cmp ch, [eax] ");
sl@0
   497
	asm("jnz short rwrlec_2 ");			/* Not already held by this CPU for write - OK */
sl@0
   498
	asm("int 0xff ");					/* Already held by this CPU for write - die */
sl@0
   499
sl@0
   500
	asm("rwrlec_2: ");
sl@0
   501
	asm("mov eax, [edx+%0]" : : "i" _FOFF(TSubScheduler, iCpuMask));
sl@0
   502
	asm("test al, [ecx+4] ");			/* Test if already held by this CPU for read */
sl@0
   503
	asm("jz short rwrlec_3 ");
sl@0
   504
	asm("int 0xff ");					/* if so, die */
sl@0
   505
sl@0
   506
	asm("rwrlec_3: ");
sl@0
   507
	asm("lea edx, [edx+%0]" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck));
sl@0
   508
	asm("bsf eax, [edx] ");				/* find LSB of low dword */
sl@0
   509
	asm("jnz short rwrlec_3 ");			/* skip if low dword nonzero */
sl@0
   510
	asm("bsf eax, [edx+4] ");			/* else find LSB of high dword */
sl@0
   511
	asm("lea eax, [eax+32] ");			/* add 32 to eax without changing flags */
sl@0
   512
	asm("jnz short rwrlec_4 ");			/* skip if high dword nonzero */
sl@0
   513
	asm("mov eax, 0x7f ");				/* else set EAX = 0x7F */
sl@0
   514
sl@0
   515
	asm("rwrlec_4: ");
sl@0
   516
	asm("cmp cl, al ");					/* check order of this lock against lowest currently held order */
sl@0
   517
	asm("jl short rwrlec_ok ");			/* if this lock has lower order, OK - signed comparison so EOrderNone always works */
sl@0
   518
	asm("int 0xff ");					/* ordering violation - die */
sl@0
   519
sl@0
   520
	asm("rwrlec_ok: ");
sl@0
   521
	asm("popfd ");
sl@0
   522
	asm("pop edx ");
sl@0
   523
	asm("pop ecx ");
sl@0
   524
	asm("pop eax ");
sl@0
   525
	asm("ret ");
sl@0
   526
	}
sl@0
   527
sl@0
   528
extern "C" __NAKED__ void rwspin_rlock_mark_acq()
sl@0
   529
	{
sl@0
   530
	/* ecx points to lock */
sl@0
   531
	asm("push eax ");
sl@0
   532
	asm("push ecx ");
sl@0
   533
	asm("push edx ");
sl@0
   534
	asm("pushfd ");
sl@0
   535
	asm("cli ");
sl@0
   536
	asm("mov edx, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID));
sl@0
   537
	asm("shr edx, 24");
sl@0
   538
	asm("mov edx, [edx*4+%0]" : : "i"(&SubSchedulerLookupTable));
sl@0
   539
	asm("cmp edx, 0 ");						/* Skip checks if subschedulers not yet initialised */
sl@0
   540
	asm("je short rwrlma_ok ");
sl@0
   541
	asm("test edx, 3 ");					/* Skip checks if subscheduler for this CPU not yet initialised */
sl@0
   542
	asm("jnz short rwrlma_ok ");
sl@0
   543
	asm("mov eax, [edx+%0]" : : "i" _FOFF(TSubScheduler, iCpuMask));
sl@0
   544
	asm("lock or [ecx+4], al ");			/* set bit in byte 4 corresponding to this CPU */
sl@0
   545
	asm("movzx ecx, byte ptr [ecx+6] ");	/* CL = order */
sl@0
   546
	asm("cmp ecx, 0x40 ");
sl@0
   547
	asm("jae short rwrlma_ok ");			/* if EOrderNone, done */
sl@0
   548
	asm("bts [edx+%0], ecx" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck));
sl@0
   549
sl@0
   550
	asm("rwrlma_ok: ");
sl@0
   551
	asm("popfd ");
sl@0
   552
	asm("pop edx ");
sl@0
   553
	asm("pop ecx ");
sl@0
   554
	asm("pop eax ");
sl@0
   555
	asm("ret ");
sl@0
   556
	}
sl@0
   557
sl@0
   558
extern "C" __NAKED__ void rwspin_runlock_entry_check()
sl@0
   559
	{
sl@0
   560
	/* ecx points to lock */
sl@0
   561
	asm("push eax ");
sl@0
   562
	asm("push ebx ");
sl@0
   563
	asm("push ecx ");
sl@0
   564
	asm("push edx ");
sl@0
   565
	asm("pushfd ");
sl@0
   566
	asm("cli ");
sl@0
   567
	asm("mov edx, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID));
sl@0
   568
	asm("shr edx, 24");
sl@0
   569
	asm("mov edx, [edx*4+%0]" : : "i"(&SubSchedulerLookupTable));
sl@0
   570
	asm("cmp edx, 0 ");						/* Skip checks if subschedulers not yet initialised */
sl@0
   571
	asm("je short rwruec_ok ");
sl@0
   572
	asm("test edx, 3 ");					/* Skip checks if subscheduler for this CPU not yet initialised */
sl@0
   573
	asm("jnz short rwruec_ok ");
sl@0
   574
	asm("mov eax, [ecx+4] ");				/* AL = R-mask, EAX byte 2 = order */
sl@0
   575
	asm("and eax, 0x00ffffff ");			/* mask out W CPU */
sl@0
   576
	asm("cmp eax, 0x00200000 ");
sl@0
   577
	asm("jae short rwruec_preemption ");	/* This lock requires preemption to be disabled */
sl@0
   578
sl@0
   579
	/* check interrupts disabled */
sl@0
   580
	asm("test dword ptr [esp], 0x200 ");	/* Interrupts enabled? */
sl@0
   581
	asm("jz short rwruec_1 ");				/* No - OK */
sl@0
   582
	asm("int 0xff ");						/* Yes - die */
sl@0
   583
sl@0
   584
	asm("rwruec_preemption: ");
sl@0
   585
	asm("cmp eax, 0x00ff0000 ");
sl@0
   586
	asm("jae short rwruec_1 ");			/* EOrderNone - don't check interrupts or preemption */
sl@0
   587
	asm("cmp dword ptr [edx+%0], 0" : : "i" _FOFF(TSubScheduler, iKernLockCount));
sl@0
   588
	asm("jnz short rwruec_1 ");			/* Preemption disabled - OK */
sl@0
   589
	asm("int 0xff ");					/* Preemption enabled - die */
sl@0
   590
sl@0
   591
	asm("rwruec_1: ");
sl@0
   592
	asm("mov ebx, [edx+%0]" : : "i" _FOFF(TSubScheduler, iCpuMask));
sl@0
   593
	asm("test al, bl ");				/* Check if current CPU holds read lock */
sl@0
   594
	asm("jnz short rwruec_2 ");			/* Already held by this CPU - OK */
sl@0
   595
	asm("int 0xff ");					/* We don't hold lock - die */
sl@0
   596
sl@0
   597
	asm("rwruec_2: ");
sl@0
   598
	asm("not bl ");
sl@0
   599
	asm("lock and [ecx+4], bl ");		/* clear bit in R-holding CPU mask */
sl@0
   600
	asm("shr eax, 16 ");				/* EAX = lock order */
sl@0
   601
	asm("cmp eax, 0x40 ");
sl@0
   602
	asm("jae short rwruec_ok ");		/* if EOrderNone, done */
sl@0
   603
	asm("btr [edx+%0], eax" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck));
sl@0
   604
	asm("jc short rwruec_ok ");			/* bit should have been set originally */
sl@0
   605
	asm("int 0xff ");					/* if not, die - something must have got corrupted */
sl@0
   606
sl@0
   607
	asm("rwruec_ok: ");
sl@0
   608
	asm("popfd ");
sl@0
   609
	asm("pop edx ");
sl@0
   610
	asm("pop ecx ");
sl@0
   611
	asm("pop ebx ");
sl@0
   612
	asm("pop eax ");
sl@0
   613
	asm("ret ");
sl@0
   614
	}
sl@0
   615
sl@0
   616
sl@0
   617
extern "C" __NAKED__ void rwspin_wlock_entry_check()
sl@0
   618
	{
sl@0
   619
	/* ecx points to lock */
sl@0
   620
	asm("push eax ");
sl@0
   621
	asm("push ecx ");
sl@0
   622
	asm("push edx ");
sl@0
   623
	asm("pushfd ");
sl@0
   624
	asm("cli ");
sl@0
   625
	asm("mov edx, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID));
sl@0
   626
	asm("shr edx, 24");
sl@0
   627
	asm("mov edx, [edx*4+%0]" : : "i" (&SubSchedulerLookupTable));
sl@0
   628
	asm("cmp edx, 0 ");						/* Skip checks if subschedulers not yet initialised */
sl@0
   629
	asm("je short rwwlec_ok ");
sl@0
   630
	asm("test edx, 3 ");					/* Skip checks if subscheduler for this CPU not yet initialised */
sl@0
   631
	asm("jnz short rwwlec_ok ");
sl@0
   632
	asm("movzx ecx, word ptr [ecx+6] ");	/* CL = order, CH = write lock holding CPU */
sl@0
   633
	asm("cmp cl, 0x20 ");
sl@0
   634
	asm("jae short rwwlec_preemption ");	/* This lock requires preemption to be disabled */
sl@0
   635
sl@0
   636
	/* check interrupts disabled */
sl@0
   637
	asm("test dword ptr [esp], 0x200 ");	/* Interrupts enabled? */
sl@0
   638
	asm("jz short rwwlec_1 ");				/* No - OK */
sl@0
   639
	asm("int 0xff ");						/* Yes - die */
sl@0
   640
sl@0
   641
	asm("rwwlec_preemption: ");
sl@0
   642
	asm("cmp cl, 0xff ");
sl@0
   643
	asm("je short rwwlec_1 ");			/* EOrderNone - don't check interrupts or preemption */
sl@0
   644
	asm("cmp dword ptr [edx+52+%0], 0" : : "i"_FOFF(TSubScheduler, iExtras));
sl@0
   645
	asm("jge short rwwlec_preemption_die ");	/* If called from ISR, die */
sl@0
   646
	asm("cmp dword ptr [edx+%0], 0" : : "i" _FOFF(TSubScheduler, iKernLockCount));
sl@0
   647
	asm("jnz short rwwlec_1 ");			/* Preemption disabled - OK */
sl@0
   648
	asm("rwwlec_preemption_die: ");
sl@0
   649
	asm("int 0xff ");					/* Preemption enabled - die */
sl@0
   650
sl@0
   651
	asm("rwwlec_1: ");
sl@0
   652
	asm("lea eax, [edx+%0]" : : "i" _FOFF(TSubScheduler, iCpuNum));
sl@0
   653
	asm("cmp ch, [eax] ");
sl@0
   654
	asm("jnz short rwwlec_2 ");			/* Not already held by this CPU for write - OK */
sl@0
   655
	asm("int 0xff ");					/* Already held by this CPU for write - die */
sl@0
   656
sl@0
   657
	asm("rwwlec_2: ");
sl@0
   658
	asm("mov eax, [edx+%0]" : : "i" _FOFF(TSubScheduler, iCpuMask));
sl@0
   659
	asm("test al, [ecx+4] ");			/* Test if already held by this CPU for read */
sl@0
   660
	asm("jz short rwwlec_3 ");
sl@0
   661
	asm("int 0xff ");					/* if so, die */
sl@0
   662
sl@0
   663
	asm("rwwlec_3: ");
sl@0
   664
	asm("lea edx, [edx+%0]" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck));
sl@0
   665
	asm("bsf eax, [edx] ");				/* find LSB of low dword */
sl@0
   666
	asm("jnz short rwwlec_4 ");			/* skip if low dword nonzero */
sl@0
   667
	asm("bsf eax, [edx+4] ");			/* else find LSB of high dword */
sl@0
   668
	asm("lea eax, [eax+32] ");			/* add 32 to eax without changing flags */
sl@0
   669
	asm("jnz short rwwlec_4 ");			/* skip if high dword nonzero */
sl@0
   670
	asm("mov eax, 0x7f ");				/* else set EAX = 0x7F */
sl@0
   671
sl@0
   672
	asm("rwwlec_4: ");
sl@0
   673
	asm("cmp cl, al ");					/* check order of this lock against lowest currently held order */
sl@0
   674
	asm("jl short rwwlec_ok ");			/* if this lock has lower order, OK - signed comparison so EOrderNone always works */
sl@0
   675
	asm("int 0xff ");					/* ordering violation - die */
sl@0
   676
sl@0
   677
	asm("rwwlec_ok: ");
sl@0
   678
	asm("popfd ");
sl@0
   679
	asm("pop edx ");
sl@0
   680
	asm("pop ecx ");
sl@0
   681
	asm("pop eax ");
sl@0
   682
	asm("ret ");
sl@0
   683
	}
sl@0
   684
sl@0
   685
extern "C" __NAKED__ void rwspin_wlock_mark_acq()
sl@0
   686
	{
sl@0
   687
	/* ecx points to lock */
sl@0
   688
	asm("push eax ");
sl@0
   689
	asm("push ecx ");
sl@0
   690
	asm("push edx ");
sl@0
   691
	asm("pushfd ");
sl@0
   692
	asm("cli ");
sl@0
   693
	asm("mov edx, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID));
sl@0
   694
	asm("shr edx, 24");
sl@0
   695
	asm("mov edx, [edx*4+%0]" : : "i"(&SubSchedulerLookupTable));
sl@0
   696
	asm("cmp edx, 0 ");						/* Skip checks if subschedulers not yet initialised */
sl@0
   697
	asm("je short rwwlma_ok ");
sl@0
   698
	asm("test edx, 3 ");					/* Skip checks if subscheduler for this CPU not yet initialised */
sl@0
   699
	asm("jnz short rwwlma_ok ");
sl@0
   700
	asm("mov eax, [edx+%0]" : : "i" _FOFF(TSubScheduler, iCpuNum));
sl@0
   701
	asm("mov [ecx+7], al ");				/* set byte 7 to holding CPU number */
sl@0
   702
	asm("movzx ecx, byte ptr [ecx+6] ");	/* CL = order */
sl@0
   703
	asm("cmp ecx, 0x40 ");
sl@0
   704
	asm("jae short rwwlma_ok ");				/* if EOrderNone, done */
sl@0
   705
	asm("bts [edx+%0], ecx" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck));
sl@0
   706
sl@0
   707
	asm("rwwlma_ok: ");
sl@0
   708
	asm("popfd ");
sl@0
   709
	asm("pop edx ");
sl@0
   710
	asm("pop ecx ");
sl@0
   711
	asm("pop eax ");
sl@0
   712
	asm("ret ");
sl@0
   713
	}
sl@0
   714
sl@0
   715
extern "C" __NAKED__ void rwspin_wunlock_entry_check()
sl@0
   716
	{
sl@0
   717
	/* ecx points to lock */
sl@0
   718
	asm("push eax ");
sl@0
   719
	asm("push ecx ");
sl@0
   720
	asm("push edx ");
sl@0
   721
	asm("pushfd ");
sl@0
   722
	asm("cli ");
sl@0
   723
	asm("mov edx, ds:[%0]" : : "i"(X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID));
sl@0
   724
	asm("shr edx, 24");
sl@0
   725
	asm("mov edx, [edx*4+%0]" : : "i"(&SubSchedulerLookupTable));
sl@0
   726
	asm("cmp edx, 0 ");						/* Skip checks if subschedulers not yet initialised */
sl@0
   727
	asm("je short rwwuec_ok ");
sl@0
   728
	asm("test edx, 3 ");					/* Skip checks if subscheduler for this CPU not yet initialised */
sl@0
   729
	asm("jnz short rwwuec_ok ");
sl@0
   730
	asm("mov eax, [edx+%0]" : : "i" _FOFF(TSubScheduler, iCpuNum));		/* eax = current CPU number */
sl@0
   731
	asm("shl eax, 8 ");						/* AL = 0, AH = current CPU number */
sl@0
   732
	asm("xor ax, [ecx+6] ");				/* AL = order, AH = holding CPU ^ current CPU number */
sl@0
   733
	asm("cmp al, 0x20 ");
sl@0
   734
	asm("jae short rwwuec_preemption ");		/* This lock requires preemption to be disabled */
sl@0
   735
sl@0
   736
	/* check interrupts disabled */
sl@0
   737
	asm("test dword ptr [esp], 0x200 ");	/* Interrupts enabled? */
sl@0
   738
	asm("jz short rwwuec_1 ");				/* No - OK */
sl@0
   739
	asm("int 0xff ");						/* Yes - die */
sl@0
   740
sl@0
   741
	asm("rwwuec_preemption: ");
sl@0
   742
	asm("cmp al, 0xff ");
sl@0
   743
	asm("je short rwwuec_1 ");			/* EOrderNone - don't check interrupts or preemption */
sl@0
   744
	asm("cmp dword ptr [edx+%0], 0" : : "i" _FOFF(TSubScheduler, iKernLockCount));
sl@0
   745
	asm("jnz short rwwuec_1 ");			/* Preemption disabled - OK */
sl@0
   746
	asm("int 0xff ");					/* Preemption enabled - die */
sl@0
   747
sl@0
   748
	asm("rwwuec_1: ");
sl@0
   749
	asm("cmp ah, 0 ");					/* Check if holding CPU ^ current CPU number == 0 */
sl@0
   750
	asm("jz short rwwuec_2 ");			/* Already held by this CPU - OK */
sl@0
   751
	asm("int 0xff ");					/* We don't hold lock - die */
sl@0
   752
sl@0
   753
	asm("rwwuec_2: ");
sl@0
   754
	asm("mov byte ptr [ecx+7], 0xff ");	/* reset holding CPU */
sl@0
   755
	asm("cmp eax, 0x40 ");				/* EAX = lock order */
sl@0
   756
	asm("jae short rwwuec_ok ");			/* if EOrderNone, done */
sl@0
   757
	asm("btr [edx+%0], eax" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck));
sl@0
   758
	asm("jc short rwwuec_ok ");			/* bit should have been set originally */
sl@0
   759
	asm("int 0xff ");					/* if not, die - something must have got corrupted */
sl@0
   760
sl@0
   761
	asm("rwwuec_ok: ");
sl@0
   762
	asm("popfd ");
sl@0
   763
	asm("pop edx ");
sl@0
   764
	asm("pop ecx ");
sl@0
   765
	asm("pop eax ");
sl@0
   766
	asm("ret ");
sl@0
   767
	}
sl@0
   768
#endif
sl@0
   769
sl@0
   770
sl@0
   771
/*-----------------------------------------------------------------------------
sl@0
   772
 - Read locks disabling IRQ
sl@0
   773
 -----------------------------------------------------------------------------*/
sl@0
   774
__NAKED__ EXPORT_C void TRWSpinLock::LockIrqR()
sl@0
   775
	{
sl@0
   776
	THISCALL_PROLOG0()
sl@0
   777
	asm("cli ");
sl@0
   778
	RWSPIN_RLOCK_ENTRY_CHECK()
sl@0
   779
	asm("mov ax, 0x100 ");
sl@0
   780
	asm("lock xadd [ecx], ax ");			/* ah = in.r++, al = in.w */
sl@0
   781
	asm("rwl_rlockirq_loop: ");
sl@0
   782
	asm("cmp al, [ecx+2] ");				/* compare al to out.w */
sl@0
   783
	asm("jnz short rwl_rlockirq_loop2 ");
sl@0
   784
	RWSPIN_RLOCK_MARK_ACQ()
sl@0
   785
	asm("lock add dword ptr [esp], 0 ");	/* make sure subsequent accesses don't happen until lock acquired */
sl@0
   786
	THISCALL_EPILOG0()
sl@0
   787
sl@0
   788
	asm("rwl_rlockirq_loop2: ");
sl@0
   789
	X86_PAUSE
sl@0
   790
	asm("jmp short rwl_rlockirq_loop ");
sl@0
   791
	}
sl@0
   792
sl@0
   793
__NAKED__ EXPORT_C void TRWSpinLock::UnlockIrqR()
sl@0
   794
	{
sl@0
   795
	THISCALL_PROLOG0()
sl@0
   796
	RWSPIN_RUNLOCK_ENTRY_CHECK()
sl@0
   797
	asm("lock add word ptr [ecx+2], 0x100 ");	/* ++out.r */
sl@0
   798
	asm("sti ");
sl@0
   799
	THISCALL_EPILOG0()
sl@0
   800
	}
sl@0
   801
sl@0
   802
extern "C" TBool __fastcall rwspin_rlock_flash_irq(TRWSpinLock* a)
sl@0
   803
	{
sl@0
   804
	a->UnlockIrqR();
sl@0
   805
	a->LockIrqR();
sl@0
   806
	return TRUE;
sl@0
   807
	}
sl@0
   808
sl@0
   809
__NAKED__ EXPORT_C TBool TRWSpinLock::FlashIrqR()
sl@0
   810
	{
sl@0
   811
	THISCALL_PROLOG0()
sl@0
   812
	asm("mov eax, [ecx] ");		/* al=in.w, ah=in.r, byte2=out.w, byte3=out.r */
sl@0
   813
	asm("mov edx, eax ");
sl@0
   814
	asm("shr edx, 16 ");		/* dl=out.w */
sl@0
   815
	asm("xor eax, edx ");		/* al = in.w ^ out.w = 0 if no writers waiting */
sl@0
   816
	asm("and eax, 0xff ");
sl@0
   817
	asm("jne %a0" : : "i" (&rwspin_rlock_flash_irq));
sl@0
   818
	THISCALL_EPILOG0()
sl@0
   819
	}
sl@0
   820
sl@0
   821
sl@0
   822
/*-----------------------------------------------------------------------------
sl@0
   823
 - Write locks disabling IRQ
sl@0
   824
 -----------------------------------------------------------------------------*/
sl@0
   825
__NAKED__ EXPORT_C void TRWSpinLock::LockIrqW()
sl@0
   826
	{
sl@0
   827
	THISCALL_PROLOG0()
sl@0
   828
	asm("cli ");
sl@0
   829
	RWSPIN_WLOCK_ENTRY_CHECK()
sl@0
   830
	asm("mov ax, [ecx] ");					/* ah = in.r, al = in.w */
sl@0
   831
	asm("rwl_wlockirq_loop3: ");
sl@0
   832
	asm("mov edx, eax ");
sl@0
   833
	asm("inc dl ");							/* dh = in.r, dl = in.w+1 */
sl@0
   834
	asm("lock cmpxchg [ecx], dx ");			/* attempt to update in.w */
sl@0
   835
	asm("jne short rwl_wlockirq_loop3 ");	/* loop if failed */
sl@0
   836
	asm("rwl_wlockirq_loop: ");
sl@0
   837
	asm("cmp ax, [ecx+2] ");				/* compare ax to (out.w,out.r) */
sl@0
   838
	asm("jnz short rwl_wlockirq_loop2 ");
sl@0
   839
	RWSPIN_WLOCK_MARK_ACQ()
sl@0
   840
	asm("lock add dword ptr [esp], 0 ");	/* make sure subsequent accesses don't happen until lock acquired */
sl@0
   841
	THISCALL_EPILOG0()
sl@0
   842
sl@0
   843
	asm("rwl_wlockirq_loop2: ");
sl@0
   844
	X86_PAUSE
sl@0
   845
	asm("jmp short rwl_wlockirq_loop ");
sl@0
   846
	}
sl@0
   847
sl@0
   848
__NAKED__ EXPORT_C void TRWSpinLock::UnlockIrqW()
sl@0
   849
	{
sl@0
   850
	THISCALL_PROLOG0()
sl@0
   851
	RWSPIN_WUNLOCK_ENTRY_CHECK()
sl@0
   852
	asm("mov ax, [ecx+2] ");				/* ah = out.r, al = out.w */
sl@0
   853
	asm("rwl_wunlockirq_loop: ");
sl@0
   854
	asm("mov edx, eax ");
sl@0
   855
	asm("inc dl ");							/* dh = out.r, dl = out.w+1 */
sl@0
   856
	asm("lock cmpxchg [ecx+2], dx ");		/* attempt to update out.w */
sl@0
   857
	asm("jne short rwl_wunlockirq_loop ");	/* loop if failed */
sl@0
   858
	asm("sti ");
sl@0
   859
	THISCALL_EPILOG0()
sl@0
   860
	}
sl@0
   861
sl@0
   862
extern "C" TBool __fastcall rwspin_wlock_flash_irq(TRWSpinLock* a)
sl@0
   863
	{
sl@0
   864
	a->UnlockIrqW();
sl@0
   865
	a->LockIrqW();
sl@0
   866
	return TRUE;
sl@0
   867
	}
sl@0
   868
sl@0
   869
__NAKED__ EXPORT_C TBool TRWSpinLock::FlashIrqW()
sl@0
   870
	{
sl@0
   871
	THISCALL_PROLOG0()
sl@0
   872
	asm("mov eax, [ecx] ");		/* al=in.w, ah=in.r, byte2=out.w, byte3=out.r */
sl@0
   873
	asm("mov edx, eax ");
sl@0
   874
	asm("shr edx, 16 ");		/* dl=out.w, dh=out.r */
sl@0
   875
	asm("inc dl ");				/* dx==ax now means no-one else is waiting for lock */
sl@0
   876
	asm("xor eax, edx ");
sl@0
   877
	asm("and eax, 0xffff ");
sl@0
   878
	asm("jne %a0" : : "i" (&rwspin_wlock_flash_irq));
sl@0
   879
	THISCALL_EPILOG0()
sl@0
   880
	}
sl@0
   881
sl@0
   882
sl@0
   883
sl@0
   884
/*-----------------------------------------------------------------------------
sl@0
   885
 - Read locks leaving IRQ alone
sl@0
   886
 -----------------------------------------------------------------------------*/
sl@0
   887
__NAKED__ EXPORT_C void TRWSpinLock::LockOnlyR()
sl@0
   888
	{
sl@0
   889
	THISCALL_PROLOG0()
sl@0
   890
	RWSPIN_RLOCK_ENTRY_CHECK()
sl@0
   891
	asm("mov ax, 0x100 ");
sl@0
   892
	asm("lock xadd [ecx], ax ");			/* ah = in.r++, al = in.w */
sl@0
   893
	asm("rwl_rlockonly_loop: ");
sl@0
   894
	asm("cmp al, [ecx+2] ");				/* compare al to out.w */
sl@0
   895
	asm("jnz short rwl_rlockonly_loop2 ");
sl@0
   896
	RWSPIN_RLOCK_MARK_ACQ()
sl@0
   897
	asm("lock add dword ptr [esp], 0 ");	/* make sure subsequent accesses don't happen until lock acquired */
sl@0
   898
	THISCALL_EPILOG0()
sl@0
   899
sl@0
   900
	asm("rwl_rlockonly_loop2: ");
sl@0
   901
	X86_PAUSE
sl@0
   902
	asm("jmp short rwl_rlockonly_loop ");
sl@0
   903
	}
sl@0
   904
sl@0
   905
__NAKED__ EXPORT_C void TRWSpinLock::UnlockOnlyR()
sl@0
   906
	{
sl@0
   907
	THISCALL_PROLOG0()
sl@0
   908
	RWSPIN_RUNLOCK_ENTRY_CHECK()
sl@0
   909
	asm("lock add word ptr [ecx+2], 0x100 ");	/* ++out.r */
sl@0
   910
	THISCALL_EPILOG0()
sl@0
   911
	}
sl@0
   912
sl@0
   913
extern "C" TBool __fastcall rwspin_rlock_flash_only(TRWSpinLock* a)
sl@0
   914
	{
sl@0
   915
	a->UnlockOnlyR();
sl@0
   916
	a->LockOnlyR();
sl@0
   917
	return TRUE;
sl@0
   918
	}
sl@0
   919
sl@0
   920
__NAKED__ EXPORT_C TBool TRWSpinLock::FlashOnlyR()
sl@0
   921
	{
sl@0
   922
	THISCALL_PROLOG0()
sl@0
   923
	asm("mov eax, [ecx] ");		/* al=in.w, ah=in.r, byte2=out.w, byte3=out.r */
sl@0
   924
	asm("mov edx, eax ");
sl@0
   925
	asm("shr edx, 16 ");		/* dl=out.w */
sl@0
   926
	asm("xor eax, edx ");		/* al = in.w ^ out.w = 0 if no writers waiting */
sl@0
   927
	asm("and eax, 0xff ");
sl@0
   928
	asm("jne %a0" : : "i" (&rwspin_rlock_flash_only));
sl@0
   929
	THISCALL_EPILOG0()
sl@0
   930
	}
sl@0
   931
sl@0
   932
sl@0
   933
/*-----------------------------------------------------------------------------
sl@0
   934
 - Write locks leaving IRQ alone
sl@0
   935
 -----------------------------------------------------------------------------*/
sl@0
   936
__NAKED__ EXPORT_C void TRWSpinLock::LockOnlyW()
sl@0
   937
	{
sl@0
   938
	THISCALL_PROLOG0()
sl@0
   939
	RWSPIN_WLOCK_ENTRY_CHECK()
sl@0
   940
	asm("mov ax, [ecx] ");					/* ah = in.r, al = in.w */
sl@0
   941
	asm("rwl_wlockonly_loop3: ");
sl@0
   942
	asm("mov edx, eax ");
sl@0
   943
	asm("inc dl ");							/* dh = in.r, dl = in.w+1 */
sl@0
   944
	asm("lock cmpxchg [ecx], dx ");			/* attempt to update in.w */
sl@0
   945
	asm("jne short rwl_wlockonly_loop3 ");	/* loop if failed */
sl@0
   946
	asm("rwl_wlockonly_loop: ");
sl@0
   947
	asm("cmp ax, [ecx+2] ");				/* compare ax to (out.w,out.r) */
sl@0
   948
	asm("jnz short rwl_wlockonly_loop2 ");
sl@0
   949
	RWSPIN_WLOCK_MARK_ACQ()
sl@0
   950
	asm("lock add dword ptr [esp], 0 ");	/* make sure subsequent accesses don't happen until lock acquired */
sl@0
   951
	THISCALL_EPILOG0()
sl@0
   952
sl@0
   953
	asm("rwl_wlockonly_loop2: ");
sl@0
   954
	X86_PAUSE
sl@0
   955
	asm("jmp short rwl_wlockonly_loop ");
sl@0
   956
	}
sl@0
   957
sl@0
   958
__NAKED__ EXPORT_C void TRWSpinLock::UnlockOnlyW()
sl@0
   959
	{
sl@0
   960
	THISCALL_PROLOG0()
sl@0
   961
	RWSPIN_WUNLOCK_ENTRY_CHECK()
sl@0
   962
	asm("mov ax, [ecx+2] ");				/* ah = out.r, al = out.w */
sl@0
   963
	asm("rwl_wunlockonly_loop: ");
sl@0
   964
	asm("mov edx, eax ");
sl@0
   965
	asm("inc dl ");							/* dh = out.r, dl = out.w+1 */
sl@0
   966
	asm("lock cmpxchg [ecx+2], dx ");		/* attempt to update out.w */
sl@0
   967
	asm("jne short rwl_wunlockonly_loop ");	/* loop if failed */
sl@0
   968
	THISCALL_EPILOG0()
sl@0
   969
	}
sl@0
   970
sl@0
   971
extern "C" TBool __fastcall rwspin_wlock_flash_only(TRWSpinLock* a)
sl@0
   972
	{
sl@0
   973
	a->UnlockOnlyW();
sl@0
   974
	a->LockOnlyW();
sl@0
   975
	return TRUE;
sl@0
   976
	}
sl@0
   977
sl@0
   978
__NAKED__ EXPORT_C TBool TRWSpinLock::FlashOnlyW()
sl@0
   979
	{
sl@0
   980
	THISCALL_PROLOG0()
sl@0
   981
	asm("mov eax, [ecx] ");		/* al=in.w, ah=in.r, byte2=out.w, byte3=out.r */
sl@0
   982
	asm("mov edx, eax ");
sl@0
   983
	asm("shr edx, 16 ");		/* dl=out.w, dh=out.r */
sl@0
   984
	asm("inc dl ");				/* dx==ax now means no-one else is waiting for lock */
sl@0
   985
	asm("xor eax, edx ");
sl@0
   986
	asm("and eax, 0xffff ");
sl@0
   987
	asm("jne %a0" : : "i" (&rwspin_wlock_flash_only));
sl@0
   988
	THISCALL_EPILOG0()
sl@0
   989
	}
sl@0
   990
sl@0
   991
sl@0
   992
sl@0
   993
/*-----------------------------------------------------------------------------
sl@0
   994
 - Read locks disabling IRQ with save/restore IRQ state
sl@0
   995
 -----------------------------------------------------------------------------*/
sl@0
   996
__NAKED__ EXPORT_C TInt TRWSpinLock::LockIrqSaveR()
sl@0
   997
	{
sl@0
   998
	THISCALL_PROLOG0()
sl@0
   999
	asm("pushfd ");
sl@0
  1000
	asm("cli ");
sl@0
  1001
	RWSPIN_RLOCK_ENTRY_CHECK()
sl@0
  1002
	asm("mov ax, 0x100 ");
sl@0
  1003
	asm("lock xadd [ecx], ax ");			/* ah = in.r++, al = in.w */
sl@0
  1004
	asm("rwl_rlockirqs_loop: ");
sl@0
  1005
	asm("cmp al, [ecx+2] ");				/* compare al to out.w */
sl@0
  1006
	asm("jnz short rwl_rlockirqs_loop2 ");
sl@0
  1007
	RWSPIN_RLOCK_MARK_ACQ()
sl@0
  1008
	asm("lock add dword ptr [esp], 0 ");	/* make sure subsequent accesses don't happen until lock acquired */
sl@0
  1009
	asm("pop eax ");						/* retrieve saved EFLAGS */
sl@0
  1010
	asm("and eax, 0x200 ");					/* return just interrupt mask bit */
sl@0
  1011
	THISCALL_EPILOG0()
sl@0
  1012
sl@0
  1013
	asm("rwl_rlockirqs_loop2: ");
sl@0
  1014
	X86_PAUSE
sl@0
  1015
	asm("jmp short rwl_rlockirqs_loop ");
sl@0
  1016
	}
sl@0
  1017
sl@0
  1018
__NAKED__ EXPORT_C void TRWSpinLock::UnlockIrqRestoreR(TInt)
sl@0
  1019
	{
sl@0
  1020
	THISCALL_PROLOG1()
sl@0
  1021
	RWSPIN_RUNLOCK_ENTRY_CHECK()
sl@0
  1022
	asm("lock add word ptr [ecx+2], 0x100 ");	/* ++out.r */
sl@0
  1023
	asm("test dword ptr [esp+4], 0x200 ");
sl@0
  1024
	asm("jz short rwl_runlockirqr_1 ");
sl@0
  1025
	asm("sti ");
sl@0
  1026
	asm("rwl_runlockirqr_1: ");
sl@0
  1027
	THISCALL_EPILOG1()
sl@0
  1028
	}
sl@0
  1029
sl@0
  1030
__NAKED__ EXPORT_C TBool TRWSpinLock::FlashIrqRestoreR(TInt)
sl@0
  1031
	{
sl@0
  1032
	/* don't mess with stacked args, yet */
sl@0
  1033
	THISCALL_PROLOG0()
sl@0
  1034
	asm("mov eax, [ecx] ");		/* al=in.w, ah=in.r, byte2=out.w, byte3=out.r */
sl@0
  1035
	asm("mov edx, eax ");
sl@0
  1036
	asm("shr edx, 16 ");		/* dl=out.w */
sl@0
  1037
	asm("xor eax, edx ");		/* al = in.w ^ out.w = 0 if no writers waiting */
sl@0
  1038
	asm("and eax, 0xff ");
sl@0
  1039
	asm("jne short rwl_rflashirqr_1 ");
sl@0
  1040
sl@0
  1041
	/* now we can remove stacked arg since we don't need it */
sl@0
  1042
	THISCALL_EPILOG1()
sl@0
  1043
sl@0
  1044
	asm("rwl_rflashirqr_1: ");
sl@0
  1045
	THISCALL_PROLOG1()
sl@0
  1046
	asm("test dword ptr [esp+4], 0x200 ");
sl@0
  1047
	asm("jnz short rwl_rflashirqr_2 ");
sl@0
  1048
	asm("call %a0" : : "i" (&rwspin_rlock_flash_only));
sl@0
  1049
	asm("jmp short rwl_rflashirqr_3 ");
sl@0
  1050
	asm("rwl_rflashirqr_2: ");
sl@0
  1051
	asm("call %a0" : : "i" (&rwspin_rlock_flash_irq));
sl@0
  1052
	asm("rwl_rflashirqr_3: ");
sl@0
  1053
	THISCALL_EPILOG1()
sl@0
  1054
	}
sl@0
  1055
sl@0
  1056
sl@0
  1057
/*-----------------------------------------------------------------------------
sl@0
  1058
 - Write locks disabling IRQ with save/restore IRQ state
sl@0
  1059
 -----------------------------------------------------------------------------*/
sl@0
  1060
__NAKED__ EXPORT_C TInt TRWSpinLock::LockIrqSaveW()
sl@0
  1061
	{
sl@0
  1062
	THISCALL_PROLOG0()
sl@0
  1063
	asm("pushfd ");
sl@0
  1064
	asm("cli ");
sl@0
  1065
	RWSPIN_WLOCK_ENTRY_CHECK()
sl@0
  1066
	asm("mov ax, [ecx] ");					/* ah = in.r, al = in.w */
sl@0
  1067
	asm("rwl_wlockirqs_loop3: ");
sl@0
  1068
	asm("mov edx, eax ");
sl@0
  1069
	asm("inc dl ");							/* dh = in.r, dl = in.w+1 */
sl@0
  1070
	asm("lock cmpxchg [ecx], dx ");			/* attempt to update in.w */
sl@0
  1071
	asm("jne short rwl_wlockirqs_loop3 ");	/* loop if failed */
sl@0
  1072
	asm("rwl_wlockirqs_loop: ");
sl@0
  1073
	asm("cmp ax, [ecx+2] ");				/* compare ax to (out.w,out.r) */
sl@0
  1074
	asm("jnz short rwl_wlockirqs_loop2 ");
sl@0
  1075
	RWSPIN_WLOCK_MARK_ACQ()
sl@0
  1076
	asm("lock add dword ptr [esp], 0 ");	/* make sure subsequent accesses don't happen until lock acquired */
sl@0
  1077
	asm("pop eax ");						/* retrieve saved EFLAGS */
sl@0
  1078
	asm("and eax, 0x200 ");					/* return just interrupt mask bit */
sl@0
  1079
	THISCALL_EPILOG0()
sl@0
  1080
sl@0
  1081
	asm("rwl_wlockirqs_loop2: ");
sl@0
  1082
	X86_PAUSE
sl@0
  1083
	asm("jmp short rwl_wlockirqs_loop ");
sl@0
  1084
	}
sl@0
  1085
sl@0
  1086
__NAKED__ EXPORT_C void TRWSpinLock::UnlockIrqRestoreW(TInt)
sl@0
  1087
	{
sl@0
  1088
	THISCALL_PROLOG1()
sl@0
  1089
	RWSPIN_WUNLOCK_ENTRY_CHECK()
sl@0
  1090
	asm("mov ax, [ecx+2] ");				/* ah = out.r, al = out.w */
sl@0
  1091
	asm("rwl_wunlockirqr_loop: ");
sl@0
  1092
	asm("mov edx, eax ");
sl@0
  1093
	asm("inc dl ");							/* dh = out.r, dl = out.w+1 */
sl@0
  1094
	asm("lock cmpxchg [ecx+2], dx ");		/* attempt to update out.w */
sl@0
  1095
	asm("jne short rwl_wunlockirqr_loop ");	/* loop if failed */
sl@0
  1096
	asm("test dword ptr [esp+4], 0x200 ");
sl@0
  1097
	asm("jz short rwl_wunlockirqr_1 ");
sl@0
  1098
	asm("sti ");
sl@0
  1099
	asm("rwl_wunlockirqr_1: ");
sl@0
  1100
	THISCALL_EPILOG1()
sl@0
  1101
	}
sl@0
  1102
sl@0
  1103
__NAKED__ EXPORT_C TBool TRWSpinLock::FlashIrqRestoreW(TInt)
sl@0
  1104
	{
sl@0
  1105
	/* don't mess with stacked args, yet */
sl@0
  1106
	THISCALL_PROLOG0()
sl@0
  1107
	asm("mov eax, [ecx] ");		/* al=in.w, ah=in.r, byte2=out.w, byte3=out.r */
sl@0
  1108
	asm("mov edx, eax ");
sl@0
  1109
	asm("shr edx, 16 ");		/* dl=out.w, dh=out.r */
sl@0
  1110
	asm("inc dl ");				/* dx==ax now means no-one else is waiting for lock */
sl@0
  1111
	asm("xor eax, edx ");
sl@0
  1112
	asm("and eax, 0xffff ");
sl@0
  1113
	asm("jne short rwl_wflashirqr_1 ");
sl@0
  1114
sl@0
  1115
	/* now we can remove stacked arg since we don't need it */
sl@0
  1116
	THISCALL_EPILOG1()
sl@0
  1117
sl@0
  1118
	asm("rwl_wflashirqr_1: ");
sl@0
  1119
	THISCALL_PROLOG1()
sl@0
  1120
	asm("test dword ptr [esp+4], 0x200 ");
sl@0
  1121
	asm("jnz short rwl_wflashirqr_2 ");
sl@0
  1122
	asm("call %a0" : : "i" (&rwspin_wlock_flash_only));
sl@0
  1123
	asm("jmp short rwl_wflashirqr_3 ");
sl@0
  1124
	asm("rwl_wflashirqr_2: ");
sl@0
  1125
	asm("call %a0" : : "i" (&rwspin_wlock_flash_irq));
sl@0
  1126
	asm("rwl_wflashirqr_3: ");
sl@0
  1127
	THISCALL_EPILOG1()
sl@0
  1128
	}
sl@0
  1129
sl@0
  1130
sl@0
  1131
/*-----------------------------------------------------------------------------
sl@0
  1132
 - Read lock flash allowing preemption
sl@0
  1133
 -----------------------------------------------------------------------------*/
sl@0
  1134
extern "C" TBool __fastcall rwspin_rlock_flash_preempt(TRWSpinLock* a)
sl@0
  1135
	{
sl@0
  1136
	a->UnlockOnlyR();
sl@0
  1137
	NKern::PreemptionPoint();
sl@0
  1138
	a->LockOnlyR();
sl@0
  1139
	return TRUE;
sl@0
  1140
	}
sl@0
  1141
sl@0
  1142
__NAKED__ EXPORT_C TBool TRWSpinLock::FlashPreemptR()
sl@0
  1143
	{
sl@0
  1144
	THISCALL_PROLOG0()
sl@0
  1145
	asm("mov eax, [ecx] ");		/* al=in.w, ah=in.r, byte2=out.w, byte3=out.r */
sl@0
  1146
	asm("mov edx, eax ");
sl@0
  1147
	asm("shr edx, 16 ");		/* dl=out.w */
sl@0
  1148
	asm("xor eax, edx ");		/* al = in.w ^ out.w = 0 if no writers waiting */
sl@0
  1149
	asm("and eax, 0xff ");
sl@0
  1150
	asm("jne %a0" : : "i" (&rwspin_rlock_flash_preempt));
sl@0
  1151
	THISCALL_EPILOG0()
sl@0
  1152
	}
sl@0
  1153
sl@0
  1154
sl@0
  1155
/*-----------------------------------------------------------------------------
sl@0
  1156
 - Write lock flash allowing preemption
sl@0
  1157
 -----------------------------------------------------------------------------*/
sl@0
  1158
extern "C" TBool __fastcall rwspin_wlock_flash_preempt(TRWSpinLock* a)
sl@0
  1159
	{
sl@0
  1160
	a->UnlockOnlyW();
sl@0
  1161
	NKern::PreemptionPoint();
sl@0
  1162
	a->LockOnlyW();
sl@0
  1163
	return TRUE;
sl@0
  1164
	}
sl@0
  1165
sl@0
  1166
__NAKED__ EXPORT_C TBool TRWSpinLock::FlashPreemptW()
sl@0
  1167
	{
sl@0
  1168
	THISCALL_PROLOG0()
sl@0
  1169
	asm("mov eax, [ecx] ");		/* al=in.w, ah=in.r, byte2=out.w, byte3=out.r */
sl@0
  1170
	asm("mov edx, eax ");
sl@0
  1171
	asm("shr edx, 16 ");		/* dl=out.w, dh=out.r */
sl@0
  1172
	asm("inc dl ");				/* dx==ax now means no-one else is waiting for lock */
sl@0
  1173
	asm("xor eax, edx ");
sl@0
  1174
	asm("and eax, 0xffff ");
sl@0
  1175
	asm("jne %a0" : : "i" (&rwspin_wlock_flash_preempt));
sl@0
  1176
	THISCALL_EPILOG0()
sl@0
  1177
	}
sl@0
  1178