os/security/crypto/weakcryptospi/source/bigint/vchelp.cpp
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
sl@0
     1
/*
sl@0
     2
* Copyright (c) 1995-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     3
* All rights reserved.
sl@0
     4
* This component and the accompanying materials are made available
sl@0
     5
* under the terms of the License "Eclipse Public License v1.0"
sl@0
     6
* which accompanies this distribution, and is available
sl@0
     7
* at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     8
*
sl@0
     9
* Initial Contributors:
sl@0
    10
* Nokia Corporation - initial contribution.
sl@0
    11
*
sl@0
    12
* Contributors:
sl@0
    13
*
sl@0
    14
* Description: 
sl@0
    15
* e32\nklib\x86\vchelp.cpp
sl@0
    16
*
sl@0
    17
*/
sl@0
    18
sl@0
    19
sl@0
    20
#ifndef __NAKED__
sl@0
    21
#define __NAKED__	__declspec(naked)
sl@0
    22
#endif
sl@0
    23
sl@0
    24
#include <e32def.h>
sl@0
    25
sl@0
    26
#pragma warning ( disable : 4414 )  // short jump to function converted to near
sl@0
    27
sl@0
    28
extern "C" {
sl@0
    29
__NAKED__ void _allmul()
sl@0
    30
//
sl@0
    31
// Multiply two 64 bit integers returning a 64 bit result
sl@0
    32
// On entry:
sl@0
    33
//		[esp+4], [esp+8] = arg 1
sl@0
    34
//		[esp+12], [esp+16] = arg 1
sl@0
    35
// Return result in edx:eax
sl@0
    36
// Remove arguments from stack
sl@0
    37
//
sl@0
    38
	{
sl@0
    39
	_asm mov eax, [esp+4]			// eax = low1
sl@0
    40
	_asm mul dword ptr [esp+16]		// edx:eax = low1*high2
sl@0
    41
	_asm mov ecx, eax				// keep low 32 bits of product
sl@0
    42
	_asm mov eax, [esp+8]			// eax = high1
sl@0
    43
	_asm mul dword ptr [esp+12]		// edx:eax = high1*low2
sl@0
    44
	_asm add ecx, eax				// accumulate low 32 bits of product
sl@0
    45
	_asm mov eax, [esp+4]			// eax = low1
sl@0
    46
	_asm mul dword ptr [esp+12]		// edx:eax = low1*low2
sl@0
    47
	_asm add edx, ecx				// add cross terms to high 32 bits
sl@0
    48
	_asm ret 16
sl@0
    49
	}
sl@0
    50
sl@0
    51
void udiv64_divby0()
sl@0
    52
	{
sl@0
    53
	_asm int 0						// division by zero exception
sl@0
    54
	_asm ret
sl@0
    55
	}
sl@0
    56
sl@0
    57
__NAKED__ void UDiv64()
sl@0
    58
	{
sl@0
    59
	// unsigned divide edx:eax by edi:esi
sl@0
    60
	// quotient in ebx:eax, remainder in edi:edx
sl@0
    61
	// ecx, ebp, esi also modified
sl@0
    62
	_asm test edi, edi
sl@0
    63
	_asm jnz short UDiv64a				// branch if divisor >= 2^32
sl@0
    64
	_asm test esi, esi
sl@0
    65
//	_ASM_j(z,DivisionByZero)			// if divisor=0, branch to error routine
sl@0
    66
	_asm jz udiv64_divby0
sl@0
    67
	_asm mov ebx, eax					// ebx=dividend low
sl@0
    68
	_asm mov eax, edx					// eax=dividend high
sl@0
    69
	_asm xor edx, edx					// edx=0
sl@0
    70
	_asm div esi						// quotient high now in eax
sl@0
    71
	_asm xchg eax, ebx					// quotient high in ebx, dividend low in eax
sl@0
    72
	_asm div esi						// quotient now in ebx:eax, remainder in edi:edx
sl@0
    73
	_asm ret
sl@0
    74
	UDiv64e:
sl@0
    75
	_asm xor eax, eax					// set result to 0xFFFFFFFF
sl@0
    76
	_asm dec eax
sl@0
    77
	_asm jmp short UDiv64f
sl@0
    78
	UDiv64a:
sl@0
    79
	_asm js short UDiv64b				// skip if divisor msb set
sl@0
    80
	_asm bsr ecx, edi					// ecx=bit number of divisor msb - 32
sl@0
    81
	_asm inc cl
sl@0
    82
	_asm push edi						// save divisor high
sl@0
    83
	_asm push esi						// save divisor low
sl@0
    84
	_asm shrd esi, edi, cl				// shift divisor right so that msb is bit 31
sl@0
    85
	_asm mov ebx, edx					// dividend into ebx:ebp
sl@0
    86
	_asm mov ebp, eax
sl@0
    87
	_asm shrd eax, edx, cl				// shift dividend right same number of bits
sl@0
    88
	_asm shr edx, cl
sl@0
    89
	_asm cmp edx, esi					// check if approx quotient will be 2^32
sl@0
    90
	_asm jae short UDiv64e				// if so, true result must be 0xFFFFFFFF
sl@0
    91
	_asm div esi						// approximate quotient now in eax
sl@0
    92
	UDiv64f:
sl@0
    93
	_asm mov ecx, eax					// into ecx
sl@0
    94
	_asm mul edi						// multiply approx. quotient by divisor high
sl@0
    95
	_asm mov esi, eax					// ls dword into esi, ms into edi
sl@0
    96
	_asm mov edi, edx
sl@0
    97
	_asm mov eax, ecx					// approx. quotient into eax
sl@0
    98
	_asm mul dword ptr [esp]			// multiply approx. quotient by divisor low
sl@0
    99
	_asm add edx, esi					// edi:edx:eax now equals approx. quotient * divisor
sl@0
   100
	_asm adc edi, 0
sl@0
   101
	_asm xor esi, esi
sl@0
   102
	_asm sub ebp, eax					// subtract dividend - approx. quotient *divisor
sl@0
   103
	_asm sbb ebx, edx
sl@0
   104
	_asm sbb esi, edi
sl@0
   105
	_asm jnc short UDiv64c				// if no borrow, result OK
sl@0
   106
	_asm dec ecx						// else result is one too big
sl@0
   107
	_asm add ebp, [esp]					// and add divisor to get correct remainder
sl@0
   108
	_asm adc ebx, [esp+4]
sl@0
   109
	UDiv64c:
sl@0
   110
	_asm mov eax, ecx					// result into ebx:eax, remainder into edi:edx
sl@0
   111
	_asm mov edi, ebx
sl@0
   112
	_asm mov edx, ebp
sl@0
   113
	_asm xor ebx, ebx
sl@0
   114
	_asm add esp, 8						// remove temporary values from stack
sl@0
   115
	_asm ret
sl@0
   116
	UDiv64b:
sl@0
   117
	_asm mov ebx, 1
sl@0
   118
	_asm sub eax, esi					// subtract divisor from dividend
sl@0
   119
	_asm sbb edx, edi
sl@0
   120
	_asm jnc short UDiv64d				// if no borrow, result=1, remainder in edx:eax
sl@0
   121
	_asm add eax, esi					// else add back
sl@0
   122
	_asm adc edx, edi
sl@0
   123
	_asm dec ebx						// and decrement quotient
sl@0
   124
	UDiv64d:
sl@0
   125
	_asm mov edi, edx					// remainder into edi:edx
sl@0
   126
	_asm mov edx, eax
sl@0
   127
	_asm mov eax, ebx					// result in ebx:eax
sl@0
   128
	_asm xor ebx, ebx
sl@0
   129
	_asm ret
sl@0
   130
	}
sl@0
   131
sl@0
   132
__NAKED__ void _aulldiv()
sl@0
   133
//
sl@0
   134
// Divide two 64 bit unsigned integers returning a 64 bit result
sl@0
   135
// On entry:
sl@0
   136
//		[esp+4], [esp+8] = dividend
sl@0
   137
//		[esp+12], [esp+16] = divisor
sl@0
   138
// Return result in edx:eax
sl@0
   139
// Remove arguments from stack
sl@0
   140
//
sl@0
   141
	{
sl@0
   142
	_asm push ebp
sl@0
   143
	_asm push edi
sl@0
   144
	_asm push esi
sl@0
   145
	_asm push ebx
sl@0
   146
	_asm mov eax, [esp+20]
sl@0
   147
	_asm mov edx, [esp+24]
sl@0
   148
	_asm mov esi, [esp+28]
sl@0
   149
	_asm mov edi, [esp+32]
sl@0
   150
	_asm call UDiv64
sl@0
   151
	_asm mov edx, ebx
sl@0
   152
	_asm pop ebx
sl@0
   153
	_asm pop esi
sl@0
   154
	_asm pop edi
sl@0
   155
	_asm pop ebp
sl@0
   156
	_asm ret 16
sl@0
   157
	}
sl@0
   158
sl@0
   159
__NAKED__ void _alldiv()
sl@0
   160
//
sl@0
   161
// Divide two 64 bit signed integers returning a 64 bit result
sl@0
   162
// On entry:
sl@0
   163
//		[esp+4], [esp+8] = dividend
sl@0
   164
//		[esp+12], [esp+16] = divisor
sl@0
   165
// Return result in edx:eax
sl@0
   166
// Remove arguments from stack
sl@0
   167
//
sl@0
   168
	{
sl@0
   169
	_asm push ebp
sl@0
   170
	_asm push edi
sl@0
   171
	_asm push esi
sl@0
   172
	_asm push ebx
sl@0
   173
	_asm mov eax, [esp+20]
sl@0
   174
	_asm mov edx, [esp+24]
sl@0
   175
	_asm mov esi, [esp+28]
sl@0
   176
	_asm mov edi, [esp+32]
sl@0
   177
	_asm test edx, edx
sl@0
   178
	_asm jns dividend_nonnegative
sl@0
   179
	_asm neg edx
sl@0
   180
	_asm neg eax
sl@0
   181
	_asm sbb edx, 0
sl@0
   182
	dividend_nonnegative:
sl@0
   183
	_asm test edi, edi
sl@0
   184
	_asm jns divisor_nonnegative
sl@0
   185
	_asm neg edi
sl@0
   186
	_asm neg esi
sl@0
   187
	_asm sbb edi, 0
sl@0
   188
	divisor_nonnegative:
sl@0
   189
	_asm call UDiv64
sl@0
   190
	_asm mov ecx, [esp+24]
sl@0
   191
	_asm mov edx, ebx
sl@0
   192
	_asm xor ecx, [esp+32]
sl@0
   193
	_asm jns quotient_nonnegative
sl@0
   194
	_asm neg edx
sl@0
   195
	_asm neg eax
sl@0
   196
	_asm sbb edx, 0
sl@0
   197
	quotient_nonnegative:
sl@0
   198
	_asm pop ebx
sl@0
   199
	_asm pop esi
sl@0
   200
	_asm pop edi
sl@0
   201
	_asm pop ebp
sl@0
   202
	_asm ret 16
sl@0
   203
	}
sl@0
   204
sl@0
   205
__NAKED__ void _aullrem()
sl@0
   206
//
sl@0
   207
// Divide two 64 bit unsigned integers and return 64 bit remainder
sl@0
   208
// On entry:
sl@0
   209
//		[esp+4], [esp+8] = dividend
sl@0
   210
//		[esp+12], [esp+16] = divisor
sl@0
   211
// Return result in edx:eax
sl@0
   212
// Remove arguments from stack
sl@0
   213
//
sl@0
   214
	{
sl@0
   215
	_asm push ebp
sl@0
   216
	_asm push edi
sl@0
   217
	_asm push esi
sl@0
   218
	_asm push ebx
sl@0
   219
	_asm mov eax, [esp+20]
sl@0
   220
	_asm mov edx, [esp+24]
sl@0
   221
	_asm mov esi, [esp+28]
sl@0
   222
	_asm mov edi, [esp+32]
sl@0
   223
	_asm call UDiv64
sl@0
   224
	_asm mov eax, edx
sl@0
   225
	_asm mov edx, edi
sl@0
   226
	_asm pop ebx
sl@0
   227
	_asm pop esi
sl@0
   228
	_asm pop edi
sl@0
   229
	_asm pop ebp
sl@0
   230
	_asm ret 16
sl@0
   231
	}
sl@0
   232
sl@0
   233
__NAKED__ void _allrem()
sl@0
   234
//
sl@0
   235
// Divide two 64 bit signed integers and return 64 bit remainder
sl@0
   236
// On entry:
sl@0
   237
//		[esp+4], [esp+8] = dividend
sl@0
   238
//		[esp+12], [esp+16] = divisor
sl@0
   239
// Return result in edx:eax
sl@0
   240
// Remove arguments from stack
sl@0
   241
//
sl@0
   242
	{
sl@0
   243
	_asm push ebp
sl@0
   244
	_asm push edi
sl@0
   245
	_asm push esi
sl@0
   246
	_asm push ebx
sl@0
   247
	_asm mov eax, [esp+20]
sl@0
   248
	_asm mov edx, [esp+24]
sl@0
   249
	_asm mov esi, [esp+28]
sl@0
   250
	_asm mov edi, [esp+32]
sl@0
   251
	_asm test edx, edx
sl@0
   252
	_asm jns dividend_nonnegative
sl@0
   253
	_asm neg edx
sl@0
   254
	_asm neg eax
sl@0
   255
	_asm sbb edx, 0
sl@0
   256
	dividend_nonnegative:
sl@0
   257
	_asm test edi, edi
sl@0
   258
	_asm jns divisor_nonnegative
sl@0
   259
	_asm neg edi
sl@0
   260
	_asm neg esi
sl@0
   261
	_asm sbb edi, 0
sl@0
   262
	divisor_nonnegative:
sl@0
   263
	_asm call UDiv64
sl@0
   264
	_asm mov eax, edx
sl@0
   265
	_asm mov edx, edi
sl@0
   266
	_asm cmp dword ptr [esp+24], 0
sl@0
   267
	_asm jns rem_nonnegative
sl@0
   268
	_asm neg edx
sl@0
   269
	_asm neg eax
sl@0
   270
	_asm sbb edx, 0
sl@0
   271
	rem_nonnegative:
sl@0
   272
	_asm pop ebx
sl@0
   273
	_asm pop esi
sl@0
   274
	_asm pop edi
sl@0
   275
	_asm pop ebp
sl@0
   276
	_asm ret 16
sl@0
   277
	}
sl@0
   278
sl@0
   279
__NAKED__ void _allshr()
sl@0
   280
//
sl@0
   281
// Arithmetic shift right EDX:EAX by ECX
sl@0
   282
//
sl@0
   283
	{
sl@0
   284
	_asm cmp ecx, 64
sl@0
   285
	_asm jae asr_count_ge_64
sl@0
   286
	_asm cmp cl, 32
sl@0
   287
	_asm jae asr_count_ge_32
sl@0
   288
	_asm shrd eax, edx, cl
sl@0
   289
	_asm sar edx, cl
sl@0
   290
	_asm ret
sl@0
   291
	asr_count_ge_32:
sl@0
   292
	_asm sub cl, 32
sl@0
   293
	_asm mov eax, edx
sl@0
   294
	_asm cdq
sl@0
   295
	_asm sar eax, cl
sl@0
   296
	_asm ret
sl@0
   297
	asr_count_ge_64:
sl@0
   298
	_asm sar edx, 32
sl@0
   299
	_asm mov eax, edx
sl@0
   300
	_asm ret
sl@0
   301
	}
sl@0
   302
sl@0
   303
__NAKED__ void _allshl()
sl@0
   304
//
sl@0
   305
// shift left EDX:EAX by ECX
sl@0
   306
//
sl@0
   307
	{
sl@0
   308
	_asm cmp ecx, 64
sl@0
   309
	_asm jae lsl_count_ge_64
sl@0
   310
	_asm cmp cl, 32
sl@0
   311
	_asm jae lsl_count_ge_32
sl@0
   312
	_asm shld edx, eax, cl
sl@0
   313
	_asm shl eax, cl
sl@0
   314
	_asm ret
sl@0
   315
	lsl_count_ge_32:
sl@0
   316
	_asm sub cl, 32
sl@0
   317
	_asm mov edx, eax
sl@0
   318
	_asm xor eax, eax
sl@0
   319
	_asm shl edx, cl
sl@0
   320
	_asm ret
sl@0
   321
	lsl_count_ge_64:
sl@0
   322
	_asm xor edx, edx
sl@0
   323
	_asm xor eax, eax
sl@0
   324
	_asm ret
sl@0
   325
	}
sl@0
   326
sl@0
   327
__NAKED__ void _aullshr()
sl@0
   328
//
sl@0
   329
// Logical shift right EDX:EAX by ECX
sl@0
   330
//
sl@0
   331
	{
sl@0
   332
	_asm cmp ecx, 64
sl@0
   333
	_asm jae lsr_count_ge_64
sl@0
   334
	_asm cmp cl, 32
sl@0
   335
	_asm jae lsr_count_ge_32
sl@0
   336
	_asm shrd eax, edx, cl
sl@0
   337
	_asm shr edx, cl
sl@0
   338
	_asm ret
sl@0
   339
	lsr_count_ge_32:
sl@0
   340
	_asm sub cl, 32
sl@0
   341
	_asm mov eax, edx
sl@0
   342
	_asm xor edx, edx
sl@0
   343
	_asm shr eax, cl
sl@0
   344
	_asm ret
sl@0
   345
	lsr_count_ge_64:
sl@0
   346
	_asm xor edx, edx
sl@0
   347
	_asm xor eax, eax
sl@0
   348
	_asm ret
sl@0
   349
	}
sl@0
   350
}
sl@0
   351