os/kernelhwsrv/kernel/eka/include/x86hlp_gcc.inl
author sl
Tue, 10 Jun 2014 14:32:02 +0200
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
sl@0
     1
// Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
// e32\common\x86\x86hlp_gcc.inl
sl@0
    15
// If there are no exports then GCC 3.4.x does not generate a .reloc 
sl@0
    16
// section, without which rombuild can't relocate the .code section
sl@0
    17
// to its ROM address. Your ROM then goes boom early in the boot sequence.
sl@0
    18
// This unused export forces the PE to be generated with a .reloc section.
sl@0
    19
// 
sl@0
    20
//
sl@0
    21
sl@0
    22
EXPORT_C void __ignore_this_export()
sl@0
    23
	{
sl@0
    24
	}
sl@0
    25
sl@0
    26
static void DivisionByZero()
sl@0
    27
	{
sl@0
    28
	asm("int 0");
sl@0
    29
	}
sl@0
    30
sl@0
    31
extern "C" {
sl@0
    32
sl@0
    33
void __NAKED__ _alloca()
sl@0
    34
{
sl@0
    35
	// GCC passes the param in eax and expects no return value
sl@0
    36
	asm("pop ecx");
sl@0
    37
	asm("sub esp, eax");
sl@0
    38
	asm("push ecx");
sl@0
    39
	asm("ret");
sl@0
    40
}
sl@0
    41
sl@0
    42
void __NAKED__ _allmul()
sl@0
    43
//
sl@0
    44
// Multiply two 64 bit integers returning a 64 bit result
sl@0
    45
// On entry:
sl@0
    46
//		[esp+4], [esp+8] = arg 1
sl@0
    47
//		[esp+12], [esp+16] = arg 1
sl@0
    48
// Return result in edx:eax
sl@0
    49
// Remove arguments from stack
sl@0
    50
//
sl@0
    51
	{
sl@0
    52
	asm("mov eax, [esp+4]");		// eax = low1
sl@0
    53
	asm("mul dword ptr [esp+16]");	// edx:eax = low1*high2
sl@0
    54
	asm("mov ecx, eax");			// keep low 32 bits of product
sl@0
    55
	asm("mov eax, [esp+8]");		// eax = high1
sl@0
    56
	asm("mul dword ptr [esp+12]");	// edx:eax = high1*low2
sl@0
    57
	asm("add ecx, eax");			// accumulate low 32 bits of product
sl@0
    58
	asm("mov eax, [esp+4]");		// eax = low1
sl@0
    59
	asm("mul dword ptr [esp+12]");	// edx:eax = low1*low2
sl@0
    60
	asm("add edx, ecx");			// add cross terms to high 32 bits
sl@0
    61
	asm("ret");
sl@0
    62
	}
sl@0
    63
sl@0
    64
void __NAKED__ udiv64_divby0()
sl@0
    65
	{
sl@0
    66
	asm("int 0");					// division by zero exception
sl@0
    67
	asm("ret");
sl@0
    68
	}
sl@0
    69
sl@0
    70
__NAKED__ /*LOCAL_C*/ void UDiv64()
sl@0
    71
	{
sl@0
    72
	// unsigned divide edx:eax by edi:esi
sl@0
    73
	// quotient in ebx:eax, remainder in edi:edx
sl@0
    74
	// ecx, ebp, esi also modified
sl@0
    75
	asm("test edi, edi");
sl@0
    76
	asm("jnz short UDiv64a");			// branch if divisor >= 2^32
sl@0
    77
	asm("test esi, esi");
sl@0
    78
	asm("jz %a0": : "i"(&DivisionByZero)); // if divisor=0, branch to error routine
sl@0
    79
	asm("mov ebx, eax");				// ebx=dividend low
sl@0
    80
	asm("mov eax, edx");				// eax=dividend high
sl@0
    81
	asm("xor edx, edx");				// edx=0
sl@0
    82
	asm("div esi");						// quotient high now in eax
sl@0
    83
	asm("xchg eax, ebx");				// quotient high in ebx, dividend low in eax
sl@0
    84
	asm("div esi");						// quotient now in ebx:eax, remainder in edi:edx
sl@0
    85
	asm("ret");
sl@0
    86
	asm("UDiv64e:");
sl@0
    87
	asm("xor eax, eax");				// set result to 0xFFFFFFFF
sl@0
    88
	asm("dec eax");
sl@0
    89
	asm("jmp short UDiv64f");
sl@0
    90
	asm("UDiv64a:");
sl@0
    91
	asm("js short UDiv64b");			// skip if divisor msb set
sl@0
    92
	asm("bsr ecx, edi");				// ecx=bit number of divisor msb - 32
sl@0
    93
	asm("inc cl");
sl@0
    94
	asm("push edi");					// save divisor high
sl@0
    95
	asm("push esi");					// save divisor low
sl@0
    96
	asm("shrd esi, edi, cl");			// shift divisor right so that msb is bit 31
sl@0
    97
	asm("mov ebx, edx");				// dividend into ebx:ebp
sl@0
    98
	asm("mov ebp, eax");
sl@0
    99
	asm("shrd eax, edx, cl");			// shift dividend right same number of bits
sl@0
   100
	asm("shr edx, cl");
sl@0
   101
	asm("cmp edx, esi");				// check if approx quotient will be 2^32
sl@0
   102
	asm("jae short UDiv64e");			// if so, true result must be 0xFFFFFFFF
sl@0
   103
	asm("div esi");						// approximate quotient now in eax
sl@0
   104
	asm("UDiv64f:");
sl@0
   105
	asm("mov ecx, eax");				// into ecx
sl@0
   106
	asm("mul edi");						// multiply approx. quotient by divisor high
sl@0
   107
	asm("mov esi, eax");				// ls dword into esi, ms into edi
sl@0
   108
	asm("mov edi, edx");
sl@0
   109
	asm("mov eax, ecx");				// approx. quotient into eax
sl@0
   110
	asm("mul dword ptr [esp]");			// multiply approx. quotient by divisor low
sl@0
   111
	asm("add edx, esi");				// edi:edx:eax now equals approx. quotient * divisor
sl@0
   112
	asm("adc edi, 0");
sl@0
   113
	asm("xor esi, esi");
sl@0
   114
	asm("sub ebp, eax");				// subtract dividend - approx. quotient *divisor
sl@0
   115
	asm("sbb ebx, edx");
sl@0
   116
	asm("sbb esi, edi");
sl@0
   117
	asm("jnc short UDiv64c");			// if no borrow, result OK
sl@0
   118
	asm("dec ecx");						// else result is one too big
sl@0
   119
	asm("add ebp, [esp]");				// and add divisor to get correct remainder
sl@0
   120
	asm("adc ebx, [esp+4]");
sl@0
   121
	asm("UDiv64c:");
sl@0
   122
	asm("mov eax, ecx");				// result into ebx:eax, remainder into edi:edx
sl@0
   123
	asm("mov edi, ebx");
sl@0
   124
	asm("mov edx, ebp");
sl@0
   125
	asm("xor ebx, ebx");
sl@0
   126
	asm("add esp, 8");					// remove temporary values from stack
sl@0
   127
	asm("ret");
sl@0
   128
	asm("UDiv64b:");
sl@0
   129
	asm("mov ebx, 1");
sl@0
   130
	asm("sub eax, esi");				// subtract divisor from dividend
sl@0
   131
	asm("sbb edx, edi");
sl@0
   132
	asm("jnc short UDiv64d");			// if no borrow, result=1, remainder in edx:eax
sl@0
   133
	asm("add eax, esi");				// else add back
sl@0
   134
	asm("adc edx, edi");
sl@0
   135
	asm("dec ebx");						// and decrement quotient
sl@0
   136
	asm("UDiv64d:");
sl@0
   137
	asm("mov edi, edx");				// remainder into edi:edx
sl@0
   138
	asm("mov edx, eax");
sl@0
   139
	asm("mov eax, ebx");				// result in ebx:eax
sl@0
   140
	asm("xor ebx, ebx");
sl@0
   141
	asm("ret");
sl@0
   142
	}
sl@0
   143
sl@0
   144
__NAKED__ void _aulldvrm()
sl@0
   145
//
sl@0
   146
// Divide two 64 bit unsigned integers, returning a 64 bit result
sl@0
   147
// and a 64 bit remainder
sl@0
   148
//
sl@0
   149
// On entry:
sl@0
   150
//		[esp+4], [esp+8] = dividend
sl@0
   151
//		[esp+12], [esp+16] = divisor
sl@0
   152
//
sl@0
   153
// Return (dividend / divisor) in edx:eax
sl@0
   154
// Return (dividend % divisor) in ebx:ecx
sl@0
   155
//
sl@0
   156
// Remove arguments from stack
sl@0
   157
//
sl@0
   158
	{
sl@0
   159
	asm("push ebp");
sl@0
   160
	asm("push edi");
sl@0
   161
	asm("push esi");
sl@0
   162
	asm("mov eax, [esp+16]");
sl@0
   163
	asm("mov edx, [esp+20]");
sl@0
   164
	asm("mov esi, [esp+24]");
sl@0
   165
	asm("mov edi, [esp+28]");
sl@0
   166
	asm("call %a0": : "i"(&UDiv64));
sl@0
   167
	asm("mov ecx, edx");
sl@0
   168
	asm("mov edx, ebx");
sl@0
   169
	asm("mov ebx, edi");
sl@0
   170
	asm("pop esi");
sl@0
   171
	asm("pop edi");
sl@0
   172
	asm("pop ebp");
sl@0
   173
	asm("ret");
sl@0
   174
	}
sl@0
   175
sl@0
   176
__NAKED__ void _alldvrm()
sl@0
   177
//
sl@0
   178
// Divide two 64 bit signed integers, returning a 64 bit result
sl@0
   179
// and a 64 bit remainder
sl@0
   180
//
sl@0
   181
// On entry:
sl@0
   182
//		[esp+4], [esp+8] = dividend
sl@0
   183
//		[esp+12], [esp+16] = divisor
sl@0
   184
//
sl@0
   185
// Return (dividend / divisor) in edx:eax
sl@0
   186
// Return (dividend % divisor) in ebx:ecx
sl@0
   187
//
sl@0
   188
// Remove arguments from stack
sl@0
   189
//
sl@0
   190
	{
sl@0
   191
	asm("push ebp");
sl@0
   192
	asm("push edi");
sl@0
   193
	asm("push esi");
sl@0
   194
	asm("mov eax, [esp+16]");
sl@0
   195
	asm("mov edx, [esp+20]");
sl@0
   196
	asm("mov esi, [esp+24]");
sl@0
   197
	asm("mov edi, [esp+28]");
sl@0
   198
	asm("test edx, edx");
sl@0
   199
	asm("jns alldrvm_dividend_nonnegative");
sl@0
   200
	asm("neg edx");
sl@0
   201
	asm("neg eax");
sl@0
   202
	asm("sbb edx, 0");
sl@0
   203
	asm("alldrvm_dividend_nonnegative:");
sl@0
   204
	asm("test edi, edi");
sl@0
   205
	asm("jns alldrvm_divisor_nonnegative");
sl@0
   206
	asm("neg edi");
sl@0
   207
	asm("neg esi");
sl@0
   208
	asm("sbb edi, 0");
sl@0
   209
	asm("alldrvm_divisor_nonnegative:");
sl@0
   210
	asm("call %a0": : "i"(&UDiv64));
sl@0
   211
	asm("mov ebp, [esp+20]");
sl@0
   212
	asm("mov ecx, edx");
sl@0
   213
	asm("xor ebp, [esp+28]");
sl@0
   214
	asm("mov edx, ebx");
sl@0
   215
	asm("mov ebx, edi");
sl@0
   216
	asm("jns alldrvm_quotient_nonnegative");
sl@0
   217
	asm("neg edx");
sl@0
   218
	asm("neg eax");
sl@0
   219
	asm("sbb edx, 0");
sl@0
   220
	asm("alldrvm_quotient_nonnegative:");
sl@0
   221
	asm("cmp dword ptr [esp+20], 0");
sl@0
   222
	asm("jns alldrvm_rem_nonnegative");
sl@0
   223
	asm("neg ebx");
sl@0
   224
	asm("neg ecx");
sl@0
   225
	asm("sbb ebx, 0");
sl@0
   226
	asm("alldrvm_rem_nonnegative:");
sl@0
   227
	asm("pop esi");
sl@0
   228
	asm("pop edi");
sl@0
   229
	asm("pop ebp");
sl@0
   230
	asm("ret");
sl@0
   231
	}
sl@0
   232
sl@0
   233
//__NAKED__ void _aulldiv()
sl@0
   234
__NAKED__ void __udivdi3 ()
sl@0
   235
//
sl@0
   236
// Divide two 64 bit unsigned integers returning a 64 bit result
sl@0
   237
// On entry:
sl@0
   238
//		[esp+4], [esp+8] = dividend
sl@0
   239
//		[esp+12], [esp+16] = divisor
sl@0
   240
// Return result in edx:eax
sl@0
   241
// Remove arguments from stack
sl@0
   242
//
sl@0
   243
	{
sl@0
   244
	asm("push ebp");
sl@0
   245
	asm("push edi");
sl@0
   246
	asm("push esi");
sl@0
   247
	asm("push ebx");
sl@0
   248
	asm("mov eax, [esp+20]");
sl@0
   249
	asm("mov edx, [esp+24]");
sl@0
   250
	asm("mov esi, [esp+28]");
sl@0
   251
	asm("mov edi, [esp+32]");
sl@0
   252
	asm("call %a0": : "i"(&UDiv64));
sl@0
   253
	asm("mov edx, ebx");
sl@0
   254
	asm("pop ebx");
sl@0
   255
	asm("pop esi");
sl@0
   256
	asm("pop edi");
sl@0
   257
	asm("pop ebp");
sl@0
   258
	asm("ret");
sl@0
   259
	}
sl@0
   260
sl@0
   261
sl@0
   262
__NAKED__ void __divdi3()
sl@0
   263
sl@0
   264
//
sl@0
   265
// Divide two 64 bit signed integers returning a 64 bit result
sl@0
   266
// On entry:
sl@0
   267
//		[esp+4], [esp+8] = dividend
sl@0
   268
//		[esp+12], [esp+16] = divisor
sl@0
   269
// Return result in edx:eax
sl@0
   270
// Remove arguments from stack
sl@0
   271
//
sl@0
   272
	{
sl@0
   273
	asm("push ebp");
sl@0
   274
	asm("push edi");
sl@0
   275
	asm("push esi");
sl@0
   276
	asm("push ebx");
sl@0
   277
	asm("mov eax, [esp+20]");
sl@0
   278
	asm("mov edx, [esp+24]");
sl@0
   279
	asm("mov esi, [esp+28]");
sl@0
   280
	asm("mov edi, [esp+32]");
sl@0
   281
	asm("test edx, edx");
sl@0
   282
	asm("jns divdi_dividend_nonnegative");
sl@0
   283
	asm("neg edx");
sl@0
   284
	asm("neg eax");
sl@0
   285
	asm("sbb edx, 0");
sl@0
   286
	asm("divdi_dividend_nonnegative:");
sl@0
   287
	asm("test edi, edi");
sl@0
   288
	asm("jns divdi_divisor_nonnegative");
sl@0
   289
	asm("neg edi");
sl@0
   290
	asm("neg esi");
sl@0
   291
	asm("sbb edi, 0");
sl@0
   292
	asm("divdi_divisor_nonnegative:");
sl@0
   293
	asm("call %a0": : "i"(&UDiv64));
sl@0
   294
	asm("mov ecx, [esp+24]");
sl@0
   295
	asm("mov edx, ebx");
sl@0
   296
	asm("xor ecx, [esp+32]");
sl@0
   297
	asm("jns divdi_quotient_nonnegative");
sl@0
   298
	asm("neg edx");
sl@0
   299
	asm("neg eax");
sl@0
   300
	asm("sbb edx, 0");
sl@0
   301
	asm("divdi_quotient_nonnegative:");
sl@0
   302
	asm("pop ebx");
sl@0
   303
	asm("pop esi");
sl@0
   304
	asm("pop edi");
sl@0
   305
	asm("pop ebp");
sl@0
   306
	asm("ret");
sl@0
   307
	}
sl@0
   308
sl@0
   309
__NAKED__ void __umoddi3()
sl@0
   310
//
sl@0
   311
// Divide two 64 bit unsigned integers and return 64 bit remainder
sl@0
   312
// On entry:
sl@0
   313
//		[esp+4], [esp+8] = dividend
sl@0
   314
//		[esp+12], [esp+16] = divisor
sl@0
   315
// Return result in edx:eax
sl@0
   316
// Remove arguments from stack
sl@0
   317
//
sl@0
   318
	{
sl@0
   319
	asm("push ebp");
sl@0
   320
	asm("push edi");
sl@0
   321
	asm("push esi");
sl@0
   322
	asm("push ebx");
sl@0
   323
	asm("mov eax, [esp+20]");
sl@0
   324
	asm("mov edx, [esp+24]");
sl@0
   325
	asm("mov esi, [esp+28]");
sl@0
   326
	asm("mov edi, [esp+32]");
sl@0
   327
	asm("call %a0": : "i"(&UDiv64));
sl@0
   328
	asm("mov eax, edx");
sl@0
   329
	asm("mov edx, edi");
sl@0
   330
	asm("pop ebx");
sl@0
   331
	asm("pop esi");
sl@0
   332
	asm("pop edi");
sl@0
   333
	asm("pop ebp");
sl@0
   334
	asm("ret");
sl@0
   335
	}
sl@0
   336
sl@0
   337
__NAKED__ void __moddi3()
sl@0
   338
//
sl@0
   339
// Divide two 64 bit signed integers and return 64 bit remainder
sl@0
   340
// On entry:
sl@0
   341
//		[esp+4], [esp+8] = dividend
sl@0
   342
//		[esp+12], [esp+16] = divisor
sl@0
   343
// Return result in edx:eax
sl@0
   344
// Remove arguments from stack
sl@0
   345
//
sl@0
   346
	{
sl@0
   347
	asm("push ebp");
sl@0
   348
	asm("push edi");
sl@0
   349
	asm("push esi");
sl@0
   350
	asm("push ebx");
sl@0
   351
	asm("mov eax, [esp+20]");
sl@0
   352
	asm("mov edx, [esp+24]");
sl@0
   353
	asm("mov esi, [esp+28]");
sl@0
   354
	asm("mov edi, [esp+32]");
sl@0
   355
	asm("test edx, edx");
sl@0
   356
	asm("jns dividend_nonnegative");
sl@0
   357
	asm("neg edx");
sl@0
   358
	asm("neg eax");
sl@0
   359
	asm("sbb edx, 0");
sl@0
   360
	asm("dividend_nonnegative:");
sl@0
   361
	asm("test edi, edi");
sl@0
   362
	asm("jns divisor_nonnegative");
sl@0
   363
	asm("neg edi");
sl@0
   364
	asm("neg esi");
sl@0
   365
	asm("sbb edi, 0");
sl@0
   366
	asm("divisor_nonnegative:");
sl@0
   367
	asm("call %a0": : "i"(&UDiv64));
sl@0
   368
	asm("mov eax, edx");
sl@0
   369
	asm("mov edx, edi");
sl@0
   370
	asm("cmp dword ptr [esp+24], 0");
sl@0
   371
	asm("jns rem_nonnegative");
sl@0
   372
	asm("neg edx");
sl@0
   373
	asm("neg eax");
sl@0
   374
	asm("sbb edx, 0");
sl@0
   375
	asm("rem_nonnegative:");
sl@0
   376
	asm("pop ebx");
sl@0
   377
	asm("pop esi");
sl@0
   378
	asm("pop edi");
sl@0
   379
	asm("pop ebp");
sl@0
   380
	asm("ret");
sl@0
   381
	}
sl@0
   382
sl@0
   383
__NAKED__ void _allshr()
sl@0
   384
//
sl@0
   385
// Arithmetic shift right EDX:EAX by CL
sl@0
   386
//
sl@0
   387
	{
sl@0
   388
	asm("cmp cl, 64");
sl@0
   389
	asm("jae asr_count_ge_64");
sl@0
   390
	asm("cmp cl, 32");
sl@0
   391
	asm("jae asr_count_ge_32");
sl@0
   392
	asm("shrd eax, edx, cl");
sl@0
   393
	asm("sar edx, cl");
sl@0
   394
	asm("ret");
sl@0
   395
	asm("asr_count_ge_32:");
sl@0
   396
	asm("sub cl, 32");
sl@0
   397
	asm("mov eax, edx");
sl@0
   398
	asm("cdq");
sl@0
   399
	asm("sar eax, cl");
sl@0
   400
	asm("ret");
sl@0
   401
	asm("asr_count_ge_64:");
sl@0
   402
	asm("sar edx, 32");
sl@0
   403
	asm("mov eax, edx");
sl@0
   404
	asm("ret");
sl@0
   405
	}
sl@0
   406
sl@0
   407
__NAKED__ void _allshl()
sl@0
   408
//
sl@0
   409
// shift left EDX:EAX by CL
sl@0
   410
//
sl@0
   411
	{
sl@0
   412
	asm("cmp cl, 64");
sl@0
   413
	asm("jae lsl_count_ge_64");
sl@0
   414
	asm("cmp cl, 32");
sl@0
   415
	asm("jae lsl_count_ge_32");
sl@0
   416
	asm("shld edx, eax, cl");
sl@0
   417
	asm("shl eax, cl");
sl@0
   418
	asm("ret");
sl@0
   419
	asm("lsl_count_ge_32:");
sl@0
   420
	asm("sub cl, 32");
sl@0
   421
	asm("mov edx, eax");
sl@0
   422
	asm("xor eax, eax");
sl@0
   423
	asm("shl edx, cl");
sl@0
   424
	asm("ret");
sl@0
   425
	asm("lsl_count_ge_64:");
sl@0
   426
	asm("xor edx, edx");
sl@0
   427
	asm("xor eax, eax");
sl@0
   428
	asm("ret");
sl@0
   429
	}
sl@0
   430
sl@0
   431
__NAKED__ void _aullshr()
sl@0
   432
//
sl@0
   433
// Logical shift right EDX:EAX by CL
sl@0
   434
//
sl@0
   435
	{
sl@0
   436
	asm("cmp cl, 64");
sl@0
   437
	asm("jae lsr_count_ge_64");
sl@0
   438
	asm("cmp cl, 32");
sl@0
   439
	asm("jae lsr_count_ge_32");
sl@0
   440
	asm("shrd eax, edx, cl");
sl@0
   441
	asm("shr edx, cl");
sl@0
   442
	asm("ret");
sl@0
   443
	asm("lsr_count_ge_32:");
sl@0
   444
	asm("sub cl, 32");
sl@0
   445
	asm("mov eax, edx");
sl@0
   446
	asm("xor edx, edx");
sl@0
   447
	asm("shr eax, cl");
sl@0
   448
	asm("ret");
sl@0
   449
	asm("lsr_count_ge_64:");
sl@0
   450
	asm("xor edx, edx");
sl@0
   451
	asm("xor eax, eax");
sl@0
   452
	asm("ret");
sl@0
   453
	}
sl@0
   454
sl@0
   455
}