sl@0: // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies). sl@0: // All rights reserved. sl@0: // This component and the accompanying materials are made available sl@0: // under the terms of the License "Eclipse Public License v1.0" sl@0: // which accompanies this distribution, and is available sl@0: // at the URL "http://www.eclipse.org/legal/epl-v10.html". sl@0: // sl@0: // Initial Contributors: sl@0: // Nokia Corporation - initial contribution. sl@0: // sl@0: // Contributors: sl@0: // sl@0: // Description: sl@0: // e32\common\x86\x86hlp_gcc.inl sl@0: // If there are no exports then GCC 3.4.x does not generate a .reloc sl@0: // section, without which rombuild can't relocate the .code section sl@0: // to its ROM address. Your ROM then goes boom early in the boot sequence. sl@0: // This unused export forces the PE to be generated with a .reloc section. sl@0: // sl@0: // sl@0: sl@0: EXPORT_C void __ignore_this_export() sl@0: { sl@0: } sl@0: sl@0: static void DivisionByZero() sl@0: { sl@0: asm("int 0"); sl@0: } sl@0: sl@0: extern "C" { sl@0: sl@0: void __NAKED__ _alloca() sl@0: { sl@0: // GCC passes the param in eax and expects no return value sl@0: asm("pop ecx"); sl@0: asm("sub esp, eax"); sl@0: asm("push ecx"); sl@0: asm("ret"); sl@0: } sl@0: sl@0: void __NAKED__ _allmul() sl@0: // sl@0: // Multiply two 64 bit integers returning a 64 bit result sl@0: // On entry: sl@0: // [esp+4], [esp+8] = arg 1 sl@0: // [esp+12], [esp+16] = arg 1 sl@0: // Return result in edx:eax sl@0: // Remove arguments from stack sl@0: // sl@0: { sl@0: asm("mov eax, [esp+4]"); // eax = low1 sl@0: asm("mul dword ptr [esp+16]"); // edx:eax = low1*high2 sl@0: asm("mov ecx, eax"); // keep low 32 bits of product sl@0: asm("mov eax, [esp+8]"); // eax = high1 sl@0: asm("mul dword ptr [esp+12]"); // edx:eax = high1*low2 sl@0: asm("add ecx, eax"); // accumulate low 32 bits of product sl@0: asm("mov eax, [esp+4]"); // eax = low1 sl@0: asm("mul dword ptr [esp+12]"); // edx:eax = low1*low2 sl@0: asm("add edx, ecx"); // add cross terms to high 32 bits sl@0: asm("ret"); sl@0: } sl@0: sl@0: void __NAKED__ udiv64_divby0() sl@0: { sl@0: asm("int 0"); // division by zero exception sl@0: asm("ret"); sl@0: } sl@0: sl@0: __NAKED__ /*LOCAL_C*/ void UDiv64() sl@0: { sl@0: // unsigned divide edx:eax by edi:esi sl@0: // quotient in ebx:eax, remainder in edi:edx sl@0: // ecx, ebp, esi also modified sl@0: asm("test edi, edi"); sl@0: asm("jnz short UDiv64a"); // branch if divisor >= 2^32 sl@0: asm("test esi, esi"); sl@0: asm("jz %a0": : "i"(&DivisionByZero)); // if divisor=0, branch to error routine sl@0: asm("mov ebx, eax"); // ebx=dividend low sl@0: asm("mov eax, edx"); // eax=dividend high sl@0: asm("xor edx, edx"); // edx=0 sl@0: asm("div esi"); // quotient high now in eax sl@0: asm("xchg eax, ebx"); // quotient high in ebx, dividend low in eax sl@0: asm("div esi"); // quotient now in ebx:eax, remainder in edi:edx sl@0: asm("ret"); sl@0: asm("UDiv64e:"); sl@0: asm("xor eax, eax"); // set result to 0xFFFFFFFF sl@0: asm("dec eax"); sl@0: asm("jmp short UDiv64f"); sl@0: asm("UDiv64a:"); sl@0: asm("js short UDiv64b"); // skip if divisor msb set sl@0: asm("bsr ecx, edi"); // ecx=bit number of divisor msb - 32 sl@0: asm("inc cl"); sl@0: asm("push edi"); // save divisor high sl@0: asm("push esi"); // save divisor low sl@0: asm("shrd esi, edi, cl"); // shift divisor right so that msb is bit 31 sl@0: asm("mov ebx, edx"); // dividend into ebx:ebp sl@0: asm("mov ebp, eax"); sl@0: asm("shrd eax, edx, cl"); // shift dividend right same number of bits sl@0: asm("shr edx, cl"); sl@0: asm("cmp edx, esi"); // check if approx quotient will be 2^32 sl@0: asm("jae short UDiv64e"); // if so, true result must be 0xFFFFFFFF sl@0: asm("div esi"); // approximate quotient now in eax sl@0: asm("UDiv64f:"); sl@0: asm("mov ecx, eax"); // into ecx sl@0: asm("mul edi"); // multiply approx. quotient by divisor high sl@0: asm("mov esi, eax"); // ls dword into esi, ms into edi sl@0: asm("mov edi, edx"); sl@0: asm("mov eax, ecx"); // approx. quotient into eax sl@0: asm("mul dword ptr [esp]"); // multiply approx. quotient by divisor low sl@0: asm("add edx, esi"); // edi:edx:eax now equals approx. quotient * divisor sl@0: asm("adc edi, 0"); sl@0: asm("xor esi, esi"); sl@0: asm("sub ebp, eax"); // subtract dividend - approx. quotient *divisor sl@0: asm("sbb ebx, edx"); sl@0: asm("sbb esi, edi"); sl@0: asm("jnc short UDiv64c"); // if no borrow, result OK sl@0: asm("dec ecx"); // else result is one too big sl@0: asm("add ebp, [esp]"); // and add divisor to get correct remainder sl@0: asm("adc ebx, [esp+4]"); sl@0: asm("UDiv64c:"); sl@0: asm("mov eax, ecx"); // result into ebx:eax, remainder into edi:edx sl@0: asm("mov edi, ebx"); sl@0: asm("mov edx, ebp"); sl@0: asm("xor ebx, ebx"); sl@0: asm("add esp, 8"); // remove temporary values from stack sl@0: asm("ret"); sl@0: asm("UDiv64b:"); sl@0: asm("mov ebx, 1"); sl@0: asm("sub eax, esi"); // subtract divisor from dividend sl@0: asm("sbb edx, edi"); sl@0: asm("jnc short UDiv64d"); // if no borrow, result=1, remainder in edx:eax sl@0: asm("add eax, esi"); // else add back sl@0: asm("adc edx, edi"); sl@0: asm("dec ebx"); // and decrement quotient sl@0: asm("UDiv64d:"); sl@0: asm("mov edi, edx"); // remainder into edi:edx sl@0: asm("mov edx, eax"); sl@0: asm("mov eax, ebx"); // result in ebx:eax sl@0: asm("xor ebx, ebx"); sl@0: asm("ret"); sl@0: } sl@0: sl@0: __NAKED__ void _aulldvrm() sl@0: // sl@0: // Divide two 64 bit unsigned integers, returning a 64 bit result sl@0: // and a 64 bit remainder sl@0: // sl@0: // On entry: sl@0: // [esp+4], [esp+8] = dividend sl@0: // [esp+12], [esp+16] = divisor sl@0: // sl@0: // Return (dividend / divisor) in edx:eax sl@0: // Return (dividend % divisor) in ebx:ecx sl@0: // sl@0: // Remove arguments from stack sl@0: // sl@0: { sl@0: asm("push ebp"); sl@0: asm("push edi"); sl@0: asm("push esi"); sl@0: asm("mov eax, [esp+16]"); sl@0: asm("mov edx, [esp+20]"); sl@0: asm("mov esi, [esp+24]"); sl@0: asm("mov edi, [esp+28]"); sl@0: asm("call %a0": : "i"(&UDiv64)); sl@0: asm("mov ecx, edx"); sl@0: asm("mov edx, ebx"); sl@0: asm("mov ebx, edi"); sl@0: asm("pop esi"); sl@0: asm("pop edi"); sl@0: asm("pop ebp"); sl@0: asm("ret"); sl@0: } sl@0: sl@0: __NAKED__ void _alldvrm() sl@0: // sl@0: // Divide two 64 bit signed integers, returning a 64 bit result sl@0: // and a 64 bit remainder sl@0: // sl@0: // On entry: sl@0: // [esp+4], [esp+8] = dividend sl@0: // [esp+12], [esp+16] = divisor sl@0: // sl@0: // Return (dividend / divisor) in edx:eax sl@0: // Return (dividend % divisor) in ebx:ecx sl@0: // sl@0: // Remove arguments from stack sl@0: // sl@0: { sl@0: asm("push ebp"); sl@0: asm("push edi"); sl@0: asm("push esi"); sl@0: asm("mov eax, [esp+16]"); sl@0: asm("mov edx, [esp+20]"); sl@0: asm("mov esi, [esp+24]"); sl@0: asm("mov edi, [esp+28]"); sl@0: asm("test edx, edx"); sl@0: asm("jns alldrvm_dividend_nonnegative"); sl@0: asm("neg edx"); sl@0: asm("neg eax"); sl@0: asm("sbb edx, 0"); sl@0: asm("alldrvm_dividend_nonnegative:"); sl@0: asm("test edi, edi"); sl@0: asm("jns alldrvm_divisor_nonnegative"); sl@0: asm("neg edi"); sl@0: asm("neg esi"); sl@0: asm("sbb edi, 0"); sl@0: asm("alldrvm_divisor_nonnegative:"); sl@0: asm("call %a0": : "i"(&UDiv64)); sl@0: asm("mov ebp, [esp+20]"); sl@0: asm("mov ecx, edx"); sl@0: asm("xor ebp, [esp+28]"); sl@0: asm("mov edx, ebx"); sl@0: asm("mov ebx, edi"); sl@0: asm("jns alldrvm_quotient_nonnegative"); sl@0: asm("neg edx"); sl@0: asm("neg eax"); sl@0: asm("sbb edx, 0"); sl@0: asm("alldrvm_quotient_nonnegative:"); sl@0: asm("cmp dword ptr [esp+20], 0"); sl@0: asm("jns alldrvm_rem_nonnegative"); sl@0: asm("neg ebx"); sl@0: asm("neg ecx"); sl@0: asm("sbb ebx, 0"); sl@0: asm("alldrvm_rem_nonnegative:"); sl@0: asm("pop esi"); sl@0: asm("pop edi"); sl@0: asm("pop ebp"); sl@0: asm("ret"); sl@0: } sl@0: sl@0: //__NAKED__ void _aulldiv() sl@0: __NAKED__ void __udivdi3 () sl@0: // sl@0: // Divide two 64 bit unsigned integers returning a 64 bit result sl@0: // On entry: sl@0: // [esp+4], [esp+8] = dividend sl@0: // [esp+12], [esp+16] = divisor sl@0: // Return result in edx:eax sl@0: // Remove arguments from stack sl@0: // sl@0: { sl@0: asm("push ebp"); sl@0: asm("push edi"); sl@0: asm("push esi"); sl@0: asm("push ebx"); sl@0: asm("mov eax, [esp+20]"); sl@0: asm("mov edx, [esp+24]"); sl@0: asm("mov esi, [esp+28]"); sl@0: asm("mov edi, [esp+32]"); sl@0: asm("call %a0": : "i"(&UDiv64)); sl@0: asm("mov edx, ebx"); sl@0: asm("pop ebx"); sl@0: asm("pop esi"); sl@0: asm("pop edi"); sl@0: asm("pop ebp"); sl@0: asm("ret"); sl@0: } sl@0: sl@0: sl@0: __NAKED__ void __divdi3() sl@0: sl@0: // sl@0: // Divide two 64 bit signed integers returning a 64 bit result sl@0: // On entry: sl@0: // [esp+4], [esp+8] = dividend sl@0: // [esp+12], [esp+16] = divisor sl@0: // Return result in edx:eax sl@0: // Remove arguments from stack sl@0: // sl@0: { sl@0: asm("push ebp"); sl@0: asm("push edi"); sl@0: asm("push esi"); sl@0: asm("push ebx"); sl@0: asm("mov eax, [esp+20]"); sl@0: asm("mov edx, [esp+24]"); sl@0: asm("mov esi, [esp+28]"); sl@0: asm("mov edi, [esp+32]"); sl@0: asm("test edx, edx"); sl@0: asm("jns divdi_dividend_nonnegative"); sl@0: asm("neg edx"); sl@0: asm("neg eax"); sl@0: asm("sbb edx, 0"); sl@0: asm("divdi_dividend_nonnegative:"); sl@0: asm("test edi, edi"); sl@0: asm("jns divdi_divisor_nonnegative"); sl@0: asm("neg edi"); sl@0: asm("neg esi"); sl@0: asm("sbb edi, 0"); sl@0: asm("divdi_divisor_nonnegative:"); sl@0: asm("call %a0": : "i"(&UDiv64)); sl@0: asm("mov ecx, [esp+24]"); sl@0: asm("mov edx, ebx"); sl@0: asm("xor ecx, [esp+32]"); sl@0: asm("jns divdi_quotient_nonnegative"); sl@0: asm("neg edx"); sl@0: asm("neg eax"); sl@0: asm("sbb edx, 0"); sl@0: asm("divdi_quotient_nonnegative:"); sl@0: asm("pop ebx"); sl@0: asm("pop esi"); sl@0: asm("pop edi"); sl@0: asm("pop ebp"); sl@0: asm("ret"); sl@0: } sl@0: sl@0: __NAKED__ void __umoddi3() sl@0: // sl@0: // Divide two 64 bit unsigned integers and return 64 bit remainder sl@0: // On entry: sl@0: // [esp+4], [esp+8] = dividend sl@0: // [esp+12], [esp+16] = divisor sl@0: // Return result in edx:eax sl@0: // Remove arguments from stack sl@0: // sl@0: { sl@0: asm("push ebp"); sl@0: asm("push edi"); sl@0: asm("push esi"); sl@0: asm("push ebx"); sl@0: asm("mov eax, [esp+20]"); sl@0: asm("mov edx, [esp+24]"); sl@0: asm("mov esi, [esp+28]"); sl@0: asm("mov edi, [esp+32]"); sl@0: asm("call %a0": : "i"(&UDiv64)); sl@0: asm("mov eax, edx"); sl@0: asm("mov edx, edi"); sl@0: asm("pop ebx"); sl@0: asm("pop esi"); sl@0: asm("pop edi"); sl@0: asm("pop ebp"); sl@0: asm("ret"); sl@0: } sl@0: sl@0: __NAKED__ void __moddi3() sl@0: // sl@0: // Divide two 64 bit signed integers and return 64 bit remainder sl@0: // On entry: sl@0: // [esp+4], [esp+8] = dividend sl@0: // [esp+12], [esp+16] = divisor sl@0: // Return result in edx:eax sl@0: // Remove arguments from stack sl@0: // sl@0: { sl@0: asm("push ebp"); sl@0: asm("push edi"); sl@0: asm("push esi"); sl@0: asm("push ebx"); sl@0: asm("mov eax, [esp+20]"); sl@0: asm("mov edx, [esp+24]"); sl@0: asm("mov esi, [esp+28]"); sl@0: asm("mov edi, [esp+32]"); sl@0: asm("test edx, edx"); sl@0: asm("jns dividend_nonnegative"); sl@0: asm("neg edx"); sl@0: asm("neg eax"); sl@0: asm("sbb edx, 0"); sl@0: asm("dividend_nonnegative:"); sl@0: asm("test edi, edi"); sl@0: asm("jns divisor_nonnegative"); sl@0: asm("neg edi"); sl@0: asm("neg esi"); sl@0: asm("sbb edi, 0"); sl@0: asm("divisor_nonnegative:"); sl@0: asm("call %a0": : "i"(&UDiv64)); sl@0: asm("mov eax, edx"); sl@0: asm("mov edx, edi"); sl@0: asm("cmp dword ptr [esp+24], 0"); sl@0: asm("jns rem_nonnegative"); sl@0: asm("neg edx"); sl@0: asm("neg eax"); sl@0: asm("sbb edx, 0"); sl@0: asm("rem_nonnegative:"); sl@0: asm("pop ebx"); sl@0: asm("pop esi"); sl@0: asm("pop edi"); sl@0: asm("pop ebp"); sl@0: asm("ret"); sl@0: } sl@0: sl@0: __NAKED__ void _allshr() sl@0: // sl@0: // Arithmetic shift right EDX:EAX by CL sl@0: // sl@0: { sl@0: asm("cmp cl, 64"); sl@0: asm("jae asr_count_ge_64"); sl@0: asm("cmp cl, 32"); sl@0: asm("jae asr_count_ge_32"); sl@0: asm("shrd eax, edx, cl"); sl@0: asm("sar edx, cl"); sl@0: asm("ret"); sl@0: asm("asr_count_ge_32:"); sl@0: asm("sub cl, 32"); sl@0: asm("mov eax, edx"); sl@0: asm("cdq"); sl@0: asm("sar eax, cl"); sl@0: asm("ret"); sl@0: asm("asr_count_ge_64:"); sl@0: asm("sar edx, 32"); sl@0: asm("mov eax, edx"); sl@0: asm("ret"); sl@0: } sl@0: sl@0: __NAKED__ void _allshl() sl@0: // sl@0: // shift left EDX:EAX by CL sl@0: // sl@0: { sl@0: asm("cmp cl, 64"); sl@0: asm("jae lsl_count_ge_64"); sl@0: asm("cmp cl, 32"); sl@0: asm("jae lsl_count_ge_32"); sl@0: asm("shld edx, eax, cl"); sl@0: asm("shl eax, cl"); sl@0: asm("ret"); sl@0: asm("lsl_count_ge_32:"); sl@0: asm("sub cl, 32"); sl@0: asm("mov edx, eax"); sl@0: asm("xor eax, eax"); sl@0: asm("shl edx, cl"); sl@0: asm("ret"); sl@0: asm("lsl_count_ge_64:"); sl@0: asm("xor edx, edx"); sl@0: asm("xor eax, eax"); sl@0: asm("ret"); sl@0: } sl@0: sl@0: __NAKED__ void _aullshr() sl@0: // sl@0: // Logical shift right EDX:EAX by CL sl@0: // sl@0: { sl@0: asm("cmp cl, 64"); sl@0: asm("jae lsr_count_ge_64"); sl@0: asm("cmp cl, 32"); sl@0: asm("jae lsr_count_ge_32"); sl@0: asm("shrd eax, edx, cl"); sl@0: asm("shr edx, cl"); sl@0: asm("ret"); sl@0: asm("lsr_count_ge_32:"); sl@0: asm("sub cl, 32"); sl@0: asm("mov eax, edx"); sl@0: asm("xor edx, edx"); sl@0: asm("shr eax, cl"); sl@0: asm("ret"); sl@0: asm("lsr_count_ge_64:"); sl@0: asm("xor edx, edx"); sl@0: asm("xor eax, eax"); sl@0: asm("ret"); sl@0: } sl@0: sl@0: }