sl@0
|
1 |
/* crypto/md32_common.h */
|
sl@0
|
2 |
/* ====================================================================
|
sl@0
|
3 |
* Copyright (c) 1999-2002 The OpenSSL Project. All rights reserved.
|
sl@0
|
4 |
*
|
sl@0
|
5 |
* Redistribution and use in source and binary forms, with or without
|
sl@0
|
6 |
* modification, are permitted provided that the following conditions
|
sl@0
|
7 |
* are met:
|
sl@0
|
8 |
*
|
sl@0
|
9 |
* 1. Redistributions of source code must retain the above copyright
|
sl@0
|
10 |
* notice, this list of conditions and the following disclaimer.
|
sl@0
|
11 |
*
|
sl@0
|
12 |
* 2. Redistributions in binary form must reproduce the above copyright
|
sl@0
|
13 |
* notice, this list of conditions and the following disclaimer in
|
sl@0
|
14 |
* the documentation and/or other materials provided with the
|
sl@0
|
15 |
* distribution.
|
sl@0
|
16 |
*
|
sl@0
|
17 |
* 3. All advertising materials mentioning features or use of this
|
sl@0
|
18 |
* software must display the following acknowledgment:
|
sl@0
|
19 |
* "This product includes software developed by the OpenSSL Project
|
sl@0
|
20 |
* for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
|
sl@0
|
21 |
*
|
sl@0
|
22 |
* 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
|
sl@0
|
23 |
* endorse or promote products derived from this software without
|
sl@0
|
24 |
* prior written permission. For written permission, please contact
|
sl@0
|
25 |
* licensing@OpenSSL.org.
|
sl@0
|
26 |
*
|
sl@0
|
27 |
* 5. Products derived from this software may not be called "OpenSSL"
|
sl@0
|
28 |
* nor may "OpenSSL" appear in their names without prior written
|
sl@0
|
29 |
* permission of the OpenSSL Project.
|
sl@0
|
30 |
*
|
sl@0
|
31 |
* 6. Redistributions of any form whatsoever must retain the following
|
sl@0
|
32 |
* acknowledgment:
|
sl@0
|
33 |
* "This product includes software developed by the OpenSSL Project
|
sl@0
|
34 |
* for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
|
sl@0
|
35 |
*
|
sl@0
|
36 |
* THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
|
sl@0
|
37 |
* EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
sl@0
|
38 |
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
sl@0
|
39 |
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
|
sl@0
|
40 |
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
sl@0
|
41 |
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
sl@0
|
42 |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
sl@0
|
43 |
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
sl@0
|
44 |
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
|
sl@0
|
45 |
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
sl@0
|
46 |
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
|
sl@0
|
47 |
* OF THE POSSIBILITY OF SUCH DAMAGE.
|
sl@0
|
48 |
* ====================================================================
|
sl@0
|
49 |
*
|
sl@0
|
50 |
* This product includes cryptographic software written by Eric Young
|
sl@0
|
51 |
* (eay@cryptsoft.com). This product includes software written by Tim
|
sl@0
|
52 |
* Hudson (tjh@cryptsoft.com).
|
sl@0
|
53 |
*
|
sl@0
|
54 |
*/
|
sl@0
|
55 |
|
sl@0
|
56 |
/*
|
sl@0
|
57 |
* This is a generic 32 bit "collector" for message digest algorithms.
|
sl@0
|
58 |
* Whenever needed it collects input character stream into chunks of
|
sl@0
|
59 |
* 32 bit values and invokes a block function that performs actual hash
|
sl@0
|
60 |
* calculations.
|
sl@0
|
61 |
*
|
sl@0
|
62 |
* Porting guide.
|
sl@0
|
63 |
*
|
sl@0
|
64 |
* Obligatory macros:
|
sl@0
|
65 |
*
|
sl@0
|
66 |
* DATA_ORDER_IS_BIG_ENDIAN or DATA_ORDER_IS_LITTLE_ENDIAN
|
sl@0
|
67 |
* this macro defines byte order of input stream.
|
sl@0
|
68 |
* HASH_CBLOCK
|
sl@0
|
69 |
* size of a unit chunk HASH_BLOCK operates on.
|
sl@0
|
70 |
* HASH_LONG
|
sl@0
|
71 |
* has to be at lest 32 bit wide, if it's wider, then
|
sl@0
|
72 |
* HASH_LONG_LOG2 *has to* be defined along
|
sl@0
|
73 |
* HASH_CTX
|
sl@0
|
74 |
* context structure that at least contains following
|
sl@0
|
75 |
* members:
|
sl@0
|
76 |
* typedef struct {
|
sl@0
|
77 |
* ...
|
sl@0
|
78 |
* HASH_LONG Nl,Nh;
|
sl@0
|
79 |
* HASH_LONG data[HASH_LBLOCK];
|
sl@0
|
80 |
* unsigned int num;
|
sl@0
|
81 |
* ...
|
sl@0
|
82 |
* } HASH_CTX;
|
sl@0
|
83 |
* HASH_UPDATE
|
sl@0
|
84 |
* name of "Update" function, implemented here.
|
sl@0
|
85 |
* HASH_TRANSFORM
|
sl@0
|
86 |
* name of "Transform" function, implemented here.
|
sl@0
|
87 |
* HASH_FINAL
|
sl@0
|
88 |
* name of "Final" function, implemented here.
|
sl@0
|
89 |
* HASH_BLOCK_HOST_ORDER
|
sl@0
|
90 |
* name of "block" function treating *aligned* input message
|
sl@0
|
91 |
* in host byte order, implemented externally.
|
sl@0
|
92 |
* HASH_BLOCK_DATA_ORDER
|
sl@0
|
93 |
* name of "block" function treating *unaligned* input message
|
sl@0
|
94 |
* in original (data) byte order, implemented externally (it
|
sl@0
|
95 |
* actually is optional if data and host are of the same
|
sl@0
|
96 |
* "endianess").
|
sl@0
|
97 |
* HASH_MAKE_STRING
|
sl@0
|
98 |
* macro convering context variables to an ASCII hash string.
|
sl@0
|
99 |
*
|
sl@0
|
100 |
* Optional macros:
|
sl@0
|
101 |
*
|
sl@0
|
102 |
* B_ENDIAN or L_ENDIAN
|
sl@0
|
103 |
* defines host byte-order.
|
sl@0
|
104 |
* HASH_LONG_LOG2
|
sl@0
|
105 |
* defaults to 2 if not states otherwise.
|
sl@0
|
106 |
* HASH_LBLOCK
|
sl@0
|
107 |
* assumed to be HASH_CBLOCK/4 if not stated otherwise.
|
sl@0
|
108 |
* HASH_BLOCK_DATA_ORDER_ALIGNED
|
sl@0
|
109 |
* alternative "block" function capable of treating
|
sl@0
|
110 |
* aligned input message in original (data) order,
|
sl@0
|
111 |
* implemented externally.
|
sl@0
|
112 |
*
|
sl@0
|
113 |
* MD5 example:
|
sl@0
|
114 |
*
|
sl@0
|
115 |
* #define DATA_ORDER_IS_LITTLE_ENDIAN
|
sl@0
|
116 |
*
|
sl@0
|
117 |
* #define HASH_LONG MD5_LONG
|
sl@0
|
118 |
* #define HASH_LONG_LOG2 MD5_LONG_LOG2
|
sl@0
|
119 |
* #define HASH_CTX MD5_CTX
|
sl@0
|
120 |
* #define HASH_CBLOCK MD5_CBLOCK
|
sl@0
|
121 |
* #define HASH_LBLOCK MD5_LBLOCK
|
sl@0
|
122 |
* #define HASH_UPDATE MD5_Update
|
sl@0
|
123 |
* #define HASH_TRANSFORM MD5_Transform
|
sl@0
|
124 |
* #define HASH_FINAL MD5_Final
|
sl@0
|
125 |
* #define HASH_BLOCK_HOST_ORDER md5_block_host_order
|
sl@0
|
126 |
* #define HASH_BLOCK_DATA_ORDER md5_block_data_order
|
sl@0
|
127 |
*
|
sl@0
|
128 |
* <appro@fy.chalmers.se>
|
sl@0
|
129 |
*/
|
sl@0
|
130 |
|
sl@0
|
131 |
#if !defined(DATA_ORDER_IS_BIG_ENDIAN) && !defined(DATA_ORDER_IS_LITTLE_ENDIAN)
|
sl@0
|
132 |
#error "DATA_ORDER must be defined!"
|
sl@0
|
133 |
#endif
|
sl@0
|
134 |
|
sl@0
|
135 |
#ifndef HASH_CBLOCK
|
sl@0
|
136 |
#error "HASH_CBLOCK must be defined!"
|
sl@0
|
137 |
#endif
|
sl@0
|
138 |
#ifndef HASH_LONG
|
sl@0
|
139 |
#error "HASH_LONG must be defined!"
|
sl@0
|
140 |
#endif
|
sl@0
|
141 |
#ifndef HASH_CTX
|
sl@0
|
142 |
#error "HASH_CTX must be defined!"
|
sl@0
|
143 |
#endif
|
sl@0
|
144 |
|
sl@0
|
145 |
#ifndef HASH_UPDATE
|
sl@0
|
146 |
#error "HASH_UPDATE must be defined!"
|
sl@0
|
147 |
#endif
|
sl@0
|
148 |
#ifndef HASH_TRANSFORM
|
sl@0
|
149 |
#error "HASH_TRANSFORM must be defined!"
|
sl@0
|
150 |
#endif
|
sl@0
|
151 |
#ifndef HASH_FINAL
|
sl@0
|
152 |
#error "HASH_FINAL must be defined!"
|
sl@0
|
153 |
#endif
|
sl@0
|
154 |
|
sl@0
|
155 |
#ifndef HASH_BLOCK_HOST_ORDER
|
sl@0
|
156 |
#error "HASH_BLOCK_HOST_ORDER must be defined!"
|
sl@0
|
157 |
#endif
|
sl@0
|
158 |
|
sl@0
|
159 |
#if 0
|
sl@0
|
160 |
/*
|
sl@0
|
161 |
* Moved below as it's required only if HASH_BLOCK_DATA_ORDER_ALIGNED
|
sl@0
|
162 |
* isn't defined.
|
sl@0
|
163 |
*/
|
sl@0
|
164 |
#ifndef HASH_BLOCK_DATA_ORDER
|
sl@0
|
165 |
#error "HASH_BLOCK_DATA_ORDER must be defined!"
|
sl@0
|
166 |
#endif
|
sl@0
|
167 |
#endif
|
sl@0
|
168 |
|
sl@0
|
169 |
#ifndef HASH_LBLOCK
|
sl@0
|
170 |
#define HASH_LBLOCK (HASH_CBLOCK/4)
|
sl@0
|
171 |
#endif
|
sl@0
|
172 |
|
sl@0
|
173 |
#ifndef HASH_LONG_LOG2
|
sl@0
|
174 |
#define HASH_LONG_LOG2 2
|
sl@0
|
175 |
#endif
|
sl@0
|
176 |
|
sl@0
|
177 |
/*
|
sl@0
|
178 |
* Engage compiler specific rotate intrinsic function if available.
|
sl@0
|
179 |
*/
|
sl@0
|
180 |
#undef ROTATE
|
sl@0
|
181 |
#ifndef PEDANTIC
|
sl@0
|
182 |
# if defined(_MSC_VER) || defined(__ICC)
|
sl@0
|
183 |
# define ROTATE(a,n) _lrotl(a,n)
|
sl@0
|
184 |
# elif defined(__MWERKS__)
|
sl@0
|
185 |
# if defined(__POWERPC__)
|
sl@0
|
186 |
# define ROTATE(a,n) __rlwinm(a,n,0,31)
|
sl@0
|
187 |
# elif defined(__MC68K__)
|
sl@0
|
188 |
/* Motorola specific tweak. <appro@fy.chalmers.se> */
|
sl@0
|
189 |
# define ROTATE(a,n) ( n<24 ? __rol(a,n) : __ror(a,32-n) )
|
sl@0
|
190 |
# else
|
sl@0
|
191 |
# define ROTATE(a,n) __rol(a,n)
|
sl@0
|
192 |
# endif
|
sl@0
|
193 |
# elif defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
|
sl@0
|
194 |
/*
|
sl@0
|
195 |
* Some GNU C inline assembler templates. Note that these are
|
sl@0
|
196 |
* rotates by *constant* number of bits! But that's exactly
|
sl@0
|
197 |
* what we need here...
|
sl@0
|
198 |
* <appro@fy.chalmers.se>
|
sl@0
|
199 |
*/
|
sl@0
|
200 |
# if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
|
sl@0
|
201 |
# define ROTATE(a,n) ({ register unsigned int ret; \
|
sl@0
|
202 |
asm ( \
|
sl@0
|
203 |
"roll %1,%0" \
|
sl@0
|
204 |
: "=r"(ret) \
|
sl@0
|
205 |
: "I"(n), "0"(a) \
|
sl@0
|
206 |
: "cc"); \
|
sl@0
|
207 |
ret; \
|
sl@0
|
208 |
})
|
sl@0
|
209 |
# elif defined(__powerpc) || defined(__ppc__) || defined(__powerpc64__)
|
sl@0
|
210 |
# define ROTATE(a,n) ({ register unsigned int ret; \
|
sl@0
|
211 |
asm ( \
|
sl@0
|
212 |
"rlwinm %0,%1,%2,0,31" \
|
sl@0
|
213 |
: "=r"(ret) \
|
sl@0
|
214 |
: "r"(a), "I"(n)); \
|
sl@0
|
215 |
ret; \
|
sl@0
|
216 |
})
|
sl@0
|
217 |
# endif
|
sl@0
|
218 |
# endif
|
sl@0
|
219 |
#endif /* PEDANTIC */
|
sl@0
|
220 |
|
sl@0
|
221 |
#if HASH_LONG_LOG2==2 /* Engage only if sizeof(HASH_LONG)== 4 */
|
sl@0
|
222 |
/* A nice byte order reversal from Wei Dai <weidai@eskimo.com> */
|
sl@0
|
223 |
#ifdef ROTATE
|
sl@0
|
224 |
/* 5 instructions with rotate instruction, else 9 */
|
sl@0
|
225 |
#define REVERSE_FETCH32(a,l) ( \
|
sl@0
|
226 |
l=*(const HASH_LONG *)(a), \
|
sl@0
|
227 |
((ROTATE(l,8)&0x00FF00FF)|(ROTATE((l&0x00FF00FF),24))) \
|
sl@0
|
228 |
)
|
sl@0
|
229 |
#else
|
sl@0
|
230 |
/* 6 instructions with rotate instruction, else 8 */
|
sl@0
|
231 |
#define REVERSE_FETCH32(a,l) ( \
|
sl@0
|
232 |
l=*(const HASH_LONG *)(a), \
|
sl@0
|
233 |
l=(((l>>8)&0x00FF00FF)|((l&0x00FF00FF)<<8)), \
|
sl@0
|
234 |
ROTATE(l,16) \
|
sl@0
|
235 |
)
|
sl@0
|
236 |
/*
|
sl@0
|
237 |
* Originally the middle line started with l=(((l&0xFF00FF00)>>8)|...
|
sl@0
|
238 |
* It's rewritten as above for two reasons:
|
sl@0
|
239 |
* - RISCs aren't good at long constants and have to explicitely
|
sl@0
|
240 |
* compose 'em with several (well, usually 2) instructions in a
|
sl@0
|
241 |
* register before performing the actual operation and (as you
|
sl@0
|
242 |
* already realized:-) having same constant should inspire the
|
sl@0
|
243 |
* compiler to permanently allocate the only register for it;
|
sl@0
|
244 |
* - most modern CPUs have two ALUs, but usually only one has
|
sl@0
|
245 |
* circuitry for shifts:-( this minor tweak inspires compiler
|
sl@0
|
246 |
* to schedule shift instructions in a better way...
|
sl@0
|
247 |
*
|
sl@0
|
248 |
* <appro@fy.chalmers.se>
|
sl@0
|
249 |
*/
|
sl@0
|
250 |
#endif
|
sl@0
|
251 |
#endif
|
sl@0
|
252 |
|
sl@0
|
253 |
#ifndef ROTATE
|
sl@0
|
254 |
#define ROTATE(a,n) (((a)<<(n))|(((a)&0xffffffff)>>(32-(n))))
|
sl@0
|
255 |
#endif
|
sl@0
|
256 |
|
sl@0
|
257 |
/*
|
sl@0
|
258 |
* Make some obvious choices. E.g., HASH_BLOCK_DATA_ORDER_ALIGNED
|
sl@0
|
259 |
* and HASH_BLOCK_HOST_ORDER ought to be the same if input data
|
sl@0
|
260 |
* and host are of the same "endianess". It's possible to mask
|
sl@0
|
261 |
* this with blank #define HASH_BLOCK_DATA_ORDER though...
|
sl@0
|
262 |
*
|
sl@0
|
263 |
* <appro@fy.chalmers.se>
|
sl@0
|
264 |
*/
|
sl@0
|
265 |
#if defined(B_ENDIAN)
|
sl@0
|
266 |
# if defined(DATA_ORDER_IS_BIG_ENDIAN)
|
sl@0
|
267 |
# if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) && HASH_LONG_LOG2==2
|
sl@0
|
268 |
# define HASH_BLOCK_DATA_ORDER_ALIGNED HASH_BLOCK_HOST_ORDER
|
sl@0
|
269 |
# endif
|
sl@0
|
270 |
# endif
|
sl@0
|
271 |
#elif defined(L_ENDIAN)
|
sl@0
|
272 |
# if defined(DATA_ORDER_IS_LITTLE_ENDIAN)
|
sl@0
|
273 |
# if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) && HASH_LONG_LOG2==2
|
sl@0
|
274 |
# define HASH_BLOCK_DATA_ORDER_ALIGNED HASH_BLOCK_HOST_ORDER
|
sl@0
|
275 |
# endif
|
sl@0
|
276 |
# endif
|
sl@0
|
277 |
#endif
|
sl@0
|
278 |
|
sl@0
|
279 |
#if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED)
|
sl@0
|
280 |
#ifndef HASH_BLOCK_DATA_ORDER
|
sl@0
|
281 |
#error "HASH_BLOCK_DATA_ORDER must be defined!"
|
sl@0
|
282 |
#endif
|
sl@0
|
283 |
#endif
|
sl@0
|
284 |
|
sl@0
|
285 |
#if defined(DATA_ORDER_IS_BIG_ENDIAN)
|
sl@0
|
286 |
|
sl@0
|
287 |
#ifndef PEDANTIC
|
sl@0
|
288 |
# if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
|
sl@0
|
289 |
# if ((defined(__i386) || defined(__i386__)) && !defined(I386_ONLY)) || \
|
sl@0
|
290 |
(defined(__x86_64) || defined(__x86_64__))
|
sl@0
|
291 |
/*
|
sl@0
|
292 |
* This gives ~30-40% performance improvement in SHA-256 compiled
|
sl@0
|
293 |
* with gcc [on P4]. Well, first macro to be frank. We can pull
|
sl@0
|
294 |
* this trick on x86* platforms only, because these CPUs can fetch
|
sl@0
|
295 |
* unaligned data without raising an exception.
|
sl@0
|
296 |
*/
|
sl@0
|
297 |
# define HOST_c2l(c,l) ({ unsigned int r=*((const unsigned int *)(c)); \
|
sl@0
|
298 |
asm ("bswapl %0":"=r"(r):"0"(r)); \
|
sl@0
|
299 |
(c)+=4; (l)=r; })
|
sl@0
|
300 |
# define HOST_l2c(l,c) ({ unsigned int r=(l); \
|
sl@0
|
301 |
asm ("bswapl %0":"=r"(r):"0"(r)); \
|
sl@0
|
302 |
*((unsigned int *)(c))=r; (c)+=4; r; })
|
sl@0
|
303 |
# endif
|
sl@0
|
304 |
# endif
|
sl@0
|
305 |
#endif
|
sl@0
|
306 |
|
sl@0
|
307 |
#ifndef HOST_c2l
|
sl@0
|
308 |
#define HOST_c2l(c,l) (l =(((unsigned long)(*((c)++)))<<24), \
|
sl@0
|
309 |
l|=(((unsigned long)(*((c)++)))<<16), \
|
sl@0
|
310 |
l|=(((unsigned long)(*((c)++)))<< 8), \
|
sl@0
|
311 |
l|=(((unsigned long)(*((c)++))) ), \
|
sl@0
|
312 |
l)
|
sl@0
|
313 |
#endif
|
sl@0
|
314 |
#define HOST_p_c2l(c,l,n) { \
|
sl@0
|
315 |
switch (n) { \
|
sl@0
|
316 |
case 0: l =((unsigned long)(*((c)++)))<<24; \
|
sl@0
|
317 |
case 1: l|=((unsigned long)(*((c)++)))<<16; \
|
sl@0
|
318 |
case 2: l|=((unsigned long)(*((c)++)))<< 8; \
|
sl@0
|
319 |
case 3: l|=((unsigned long)(*((c)++))); \
|
sl@0
|
320 |
} }
|
sl@0
|
321 |
#define HOST_p_c2l_p(c,l,sc,len) { \
|
sl@0
|
322 |
switch (sc) { \
|
sl@0
|
323 |
case 0: l =((unsigned long)(*((c)++)))<<24; \
|
sl@0
|
324 |
if (--len == 0) break; \
|
sl@0
|
325 |
case 1: l|=((unsigned long)(*((c)++)))<<16; \
|
sl@0
|
326 |
if (--len == 0) break; \
|
sl@0
|
327 |
case 2: l|=((unsigned long)(*((c)++)))<< 8; \
|
sl@0
|
328 |
} }
|
sl@0
|
329 |
/* NOTE the pointer is not incremented at the end of this */
|
sl@0
|
330 |
#define HOST_c2l_p(c,l,n) { \
|
sl@0
|
331 |
l=0; (c)+=n; \
|
sl@0
|
332 |
switch (n) { \
|
sl@0
|
333 |
case 3: l =((unsigned long)(*(--(c))))<< 8; \
|
sl@0
|
334 |
case 2: l|=((unsigned long)(*(--(c))))<<16; \
|
sl@0
|
335 |
case 1: l|=((unsigned long)(*(--(c))))<<24; \
|
sl@0
|
336 |
} }
|
sl@0
|
337 |
#ifndef HOST_l2c
|
sl@0
|
338 |
#define HOST_l2c(l,c) (*((c)++)=(unsigned char)(((l)>>24)&0xff), \
|
sl@0
|
339 |
*((c)++)=(unsigned char)(((l)>>16)&0xff), \
|
sl@0
|
340 |
*((c)++)=(unsigned char)(((l)>> 8)&0xff), \
|
sl@0
|
341 |
*((c)++)=(unsigned char)(((l) )&0xff), \
|
sl@0
|
342 |
l)
|
sl@0
|
343 |
#endif
|
sl@0
|
344 |
|
sl@0
|
345 |
#elif defined(DATA_ORDER_IS_LITTLE_ENDIAN)
|
sl@0
|
346 |
|
sl@0
|
347 |
#if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
|
sl@0
|
348 |
# ifndef B_ENDIAN
|
sl@0
|
349 |
/* See comment in DATA_ORDER_IS_BIG_ENDIAN section. */
|
sl@0
|
350 |
# define HOST_c2l(c,l) ((l)=*((const unsigned int *)(c)), (c)+=4, l)
|
sl@0
|
351 |
# define HOST_l2c(l,c) (*((unsigned int *)(c))=(l), (c)+=4, l)
|
sl@0
|
352 |
# endif
|
sl@0
|
353 |
#endif
|
sl@0
|
354 |
|
sl@0
|
355 |
#ifndef HOST_c2l
|
sl@0
|
356 |
#define HOST_c2l(c,l) (l =(((unsigned long)(*((c)++))) ), \
|
sl@0
|
357 |
l|=(((unsigned long)(*((c)++)))<< 8), \
|
sl@0
|
358 |
l|=(((unsigned long)(*((c)++)))<<16), \
|
sl@0
|
359 |
l|=(((unsigned long)(*((c)++)))<<24), \
|
sl@0
|
360 |
l)
|
sl@0
|
361 |
#endif
|
sl@0
|
362 |
#define HOST_p_c2l(c,l,n) { \
|
sl@0
|
363 |
switch (n) { \
|
sl@0
|
364 |
case 0: l =((unsigned long)(*((c)++))); \
|
sl@0
|
365 |
case 1: l|=((unsigned long)(*((c)++)))<< 8; \
|
sl@0
|
366 |
case 2: l|=((unsigned long)(*((c)++)))<<16; \
|
sl@0
|
367 |
case 3: l|=((unsigned long)(*((c)++)))<<24; \
|
sl@0
|
368 |
} }
|
sl@0
|
369 |
#define HOST_p_c2l_p(c,l,sc,len) { \
|
sl@0
|
370 |
switch (sc) { \
|
sl@0
|
371 |
case 0: l =((unsigned long)(*((c)++))); \
|
sl@0
|
372 |
if (--len == 0) break; \
|
sl@0
|
373 |
case 1: l|=((unsigned long)(*((c)++)))<< 8; \
|
sl@0
|
374 |
if (--len == 0) break; \
|
sl@0
|
375 |
case 2: l|=((unsigned long)(*((c)++)))<<16; \
|
sl@0
|
376 |
} }
|
sl@0
|
377 |
/* NOTE the pointer is not incremented at the end of this */
|
sl@0
|
378 |
#define HOST_c2l_p(c,l,n) { \
|
sl@0
|
379 |
l=0; (c)+=n; \
|
sl@0
|
380 |
switch (n) { \
|
sl@0
|
381 |
case 3: l =((unsigned long)(*(--(c))))<<16; \
|
sl@0
|
382 |
case 2: l|=((unsigned long)(*(--(c))))<< 8; \
|
sl@0
|
383 |
case 1: l|=((unsigned long)(*(--(c)))); \
|
sl@0
|
384 |
} }
|
sl@0
|
385 |
#ifndef HOST_l2c
|
sl@0
|
386 |
#define HOST_l2c(l,c) (*((c)++)=(unsigned char)(((l) )&0xff), \
|
sl@0
|
387 |
*((c)++)=(unsigned char)(((l)>> 8)&0xff), \
|
sl@0
|
388 |
*((c)++)=(unsigned char)(((l)>>16)&0xff), \
|
sl@0
|
389 |
*((c)++)=(unsigned char)(((l)>>24)&0xff), \
|
sl@0
|
390 |
l)
|
sl@0
|
391 |
#endif
|
sl@0
|
392 |
|
sl@0
|
393 |
#endif
|
sl@0
|
394 |
|
sl@0
|
395 |
/*
|
sl@0
|
396 |
* Time for some action:-)
|
sl@0
|
397 |
*/
|
sl@0
|
398 |
|
sl@0
|
399 |
EXPORT_C int HASH_UPDATE (HASH_CTX *c, const void *data_, size_t len)
|
sl@0
|
400 |
{
|
sl@0
|
401 |
const unsigned char *data=data_;
|
sl@0
|
402 |
register HASH_LONG * p;
|
sl@0
|
403 |
register HASH_LONG l;
|
sl@0
|
404 |
size_t sw,sc,ew,ec;
|
sl@0
|
405 |
|
sl@0
|
406 |
if (len==0) return 1;
|
sl@0
|
407 |
|
sl@0
|
408 |
l=(c->Nl+(((HASH_LONG)len)<<3))&0xffffffffUL;
|
sl@0
|
409 |
/* 95-05-24 eay Fixed a bug with the overflow handling, thanks to
|
sl@0
|
410 |
* Wei Dai <weidai@eskimo.com> for pointing it out. */
|
sl@0
|
411 |
if (l < c->Nl) /* overflow */
|
sl@0
|
412 |
c->Nh++;
|
sl@0
|
413 |
c->Nh+=(len>>29); /* might cause compiler warning on 16-bit */
|
sl@0
|
414 |
c->Nl=l;
|
sl@0
|
415 |
|
sl@0
|
416 |
if (c->num != 0)
|
sl@0
|
417 |
{
|
sl@0
|
418 |
p=c->data;
|
sl@0
|
419 |
sw=c->num>>2;
|
sl@0
|
420 |
sc=c->num&0x03;
|
sl@0
|
421 |
|
sl@0
|
422 |
if ((c->num+len) >= HASH_CBLOCK)
|
sl@0
|
423 |
{
|
sl@0
|
424 |
l=p[sw]; HOST_p_c2l(data,l,sc); p[sw++]=l;
|
sl@0
|
425 |
for (; sw<HASH_LBLOCK; sw++)
|
sl@0
|
426 |
{
|
sl@0
|
427 |
HOST_c2l(data,l); p[sw]=l;
|
sl@0
|
428 |
}
|
sl@0
|
429 |
HASH_BLOCK_HOST_ORDER (c,p,1);
|
sl@0
|
430 |
len-=(HASH_CBLOCK-c->num);
|
sl@0
|
431 |
c->num=0;
|
sl@0
|
432 |
/* drop through and do the rest */
|
sl@0
|
433 |
}
|
sl@0
|
434 |
else
|
sl@0
|
435 |
{
|
sl@0
|
436 |
c->num+=(unsigned int)len;
|
sl@0
|
437 |
if ((sc+len) < 4) /* ugly, add char's to a word */
|
sl@0
|
438 |
{
|
sl@0
|
439 |
l=p[sw]; HOST_p_c2l_p(data,l,sc,len); p[sw]=l;
|
sl@0
|
440 |
}
|
sl@0
|
441 |
else
|
sl@0
|
442 |
{
|
sl@0
|
443 |
ew=(c->num>>2);
|
sl@0
|
444 |
ec=(c->num&0x03);
|
sl@0
|
445 |
if (sc)
|
sl@0
|
446 |
l=p[sw];
|
sl@0
|
447 |
HOST_p_c2l(data,l,sc);
|
sl@0
|
448 |
p[sw++]=l;
|
sl@0
|
449 |
for (; sw < ew; sw++)
|
sl@0
|
450 |
{
|
sl@0
|
451 |
HOST_c2l(data,l); p[sw]=l;
|
sl@0
|
452 |
}
|
sl@0
|
453 |
if (ec)
|
sl@0
|
454 |
{
|
sl@0
|
455 |
HOST_c2l_p(data,l,ec); p[sw]=l;
|
sl@0
|
456 |
}
|
sl@0
|
457 |
}
|
sl@0
|
458 |
return 1;
|
sl@0
|
459 |
}
|
sl@0
|
460 |
}
|
sl@0
|
461 |
|
sl@0
|
462 |
sw=len/HASH_CBLOCK;
|
sl@0
|
463 |
if (sw > 0)
|
sl@0
|
464 |
{
|
sl@0
|
465 |
#if defined(HASH_BLOCK_DATA_ORDER_ALIGNED)
|
sl@0
|
466 |
/*
|
sl@0
|
467 |
* Note that HASH_BLOCK_DATA_ORDER_ALIGNED gets defined
|
sl@0
|
468 |
* only if sizeof(HASH_LONG)==4.
|
sl@0
|
469 |
*/
|
sl@0
|
470 |
if ((((size_t)data)%4) == 0)
|
sl@0
|
471 |
{
|
sl@0
|
472 |
/* data is properly aligned so that we can cast it: */
|
sl@0
|
473 |
HASH_BLOCK_DATA_ORDER_ALIGNED (c,(const HASH_LONG *)data,sw);
|
sl@0
|
474 |
sw*=HASH_CBLOCK;
|
sl@0
|
475 |
data+=sw;
|
sl@0
|
476 |
len-=sw;
|
sl@0
|
477 |
}
|
sl@0
|
478 |
else
|
sl@0
|
479 |
#if !defined(HASH_BLOCK_DATA_ORDER)
|
sl@0
|
480 |
while (sw--)
|
sl@0
|
481 |
{
|
sl@0
|
482 |
memcpy (p=c->data,data,HASH_CBLOCK);
|
sl@0
|
483 |
HASH_BLOCK_DATA_ORDER_ALIGNED(c,p,1);
|
sl@0
|
484 |
data+=HASH_CBLOCK;
|
sl@0
|
485 |
len-=HASH_CBLOCK;
|
sl@0
|
486 |
}
|
sl@0
|
487 |
#endif
|
sl@0
|
488 |
#endif
|
sl@0
|
489 |
#if defined(HASH_BLOCK_DATA_ORDER)
|
sl@0
|
490 |
{
|
sl@0
|
491 |
HASH_BLOCK_DATA_ORDER(c,data,sw);
|
sl@0
|
492 |
sw*=HASH_CBLOCK;
|
sl@0
|
493 |
data+=sw;
|
sl@0
|
494 |
len-=sw;
|
sl@0
|
495 |
}
|
sl@0
|
496 |
#endif
|
sl@0
|
497 |
}
|
sl@0
|
498 |
|
sl@0
|
499 |
if (len!=0)
|
sl@0
|
500 |
{
|
sl@0
|
501 |
p = c->data;
|
sl@0
|
502 |
c->num = len;
|
sl@0
|
503 |
ew=len>>2; /* words to copy */
|
sl@0
|
504 |
ec=len&0x03;
|
sl@0
|
505 |
for (; ew; ew--,p++)
|
sl@0
|
506 |
{
|
sl@0
|
507 |
HOST_c2l(data,l); *p=l;
|
sl@0
|
508 |
}
|
sl@0
|
509 |
HOST_c2l_p(data,l,ec);
|
sl@0
|
510 |
*p=l;
|
sl@0
|
511 |
}
|
sl@0
|
512 |
return 1;
|
sl@0
|
513 |
}
|
sl@0
|
514 |
|
sl@0
|
515 |
|
sl@0
|
516 |
EXPORT_C void HASH_TRANSFORM (HASH_CTX *c, const unsigned char *data)
|
sl@0
|
517 |
{
|
sl@0
|
518 |
#if defined(HASH_BLOCK_DATA_ORDER_ALIGNED)
|
sl@0
|
519 |
if ((((size_t)data)%4) == 0)
|
sl@0
|
520 |
/* data is properly aligned so that we can cast it: */
|
sl@0
|
521 |
HASH_BLOCK_DATA_ORDER_ALIGNED (c,(const HASH_LONG *)data,1);
|
sl@0
|
522 |
else
|
sl@0
|
523 |
#if !defined(HASH_BLOCK_DATA_ORDER)
|
sl@0
|
524 |
{
|
sl@0
|
525 |
memcpy (c->data,data,HASH_CBLOCK);
|
sl@0
|
526 |
HASH_BLOCK_DATA_ORDER_ALIGNED (c,c->data,1);
|
sl@0
|
527 |
}
|
sl@0
|
528 |
#endif
|
sl@0
|
529 |
#endif
|
sl@0
|
530 |
#if defined(HASH_BLOCK_DATA_ORDER)
|
sl@0
|
531 |
HASH_BLOCK_DATA_ORDER (c,data,1);
|
sl@0
|
532 |
#endif
|
sl@0
|
533 |
}
|
sl@0
|
534 |
|
sl@0
|
535 |
|
sl@0
|
536 |
EXPORT_C int HASH_FINAL (unsigned char *md, HASH_CTX *c)
|
sl@0
|
537 |
{
|
sl@0
|
538 |
register HASH_LONG *p;
|
sl@0
|
539 |
register unsigned long l;
|
sl@0
|
540 |
register int i,j;
|
sl@0
|
541 |
static const unsigned char end[4]={0x80,0x00,0x00,0x00};
|
sl@0
|
542 |
const unsigned char *cp=end;
|
sl@0
|
543 |
|
sl@0
|
544 |
/* c->num should definitly have room for at least one more byte. */
|
sl@0
|
545 |
p=c->data;
|
sl@0
|
546 |
i=c->num>>2;
|
sl@0
|
547 |
j=c->num&0x03;
|
sl@0
|
548 |
|
sl@0
|
549 |
#if 0
|
sl@0
|
550 |
/* purify often complains about the following line as an
|
sl@0
|
551 |
* Uninitialized Memory Read. While this can be true, the
|
sl@0
|
552 |
* following p_c2l macro will reset l when that case is true.
|
sl@0
|
553 |
* This is because j&0x03 contains the number of 'valid' bytes
|
sl@0
|
554 |
* already in p[i]. If and only if j&0x03 == 0, the UMR will
|
sl@0
|
555 |
* occur but this is also the only time p_c2l will do
|
sl@0
|
556 |
* l= *(cp++) instead of l|= *(cp++)
|
sl@0
|
557 |
* Many thanks to Alex Tang <altitude@cic.net> for pickup this
|
sl@0
|
558 |
* 'potential bug' */
|
sl@0
|
559 |
#ifdef PURIFY
|
sl@0
|
560 |
if (j==0) p[i]=0; /* Yeah, but that's not the way to fix it:-) */
|
sl@0
|
561 |
#endif
|
sl@0
|
562 |
l=p[i];
|
sl@0
|
563 |
#else
|
sl@0
|
564 |
l = (j==0) ? 0 : p[i];
|
sl@0
|
565 |
#endif
|
sl@0
|
566 |
HOST_p_c2l(cp,l,j); p[i++]=l; /* i is the next 'undefined word' */
|
sl@0
|
567 |
|
sl@0
|
568 |
if (i>(HASH_LBLOCK-2)) /* save room for Nl and Nh */
|
sl@0
|
569 |
{
|
sl@0
|
570 |
if (i<HASH_LBLOCK) p[i]=0;
|
sl@0
|
571 |
HASH_BLOCK_HOST_ORDER (c,p,1);
|
sl@0
|
572 |
i=0;
|
sl@0
|
573 |
}
|
sl@0
|
574 |
for (; i<(HASH_LBLOCK-2); i++)
|
sl@0
|
575 |
p[i]=0;
|
sl@0
|
576 |
|
sl@0
|
577 |
#if defined(DATA_ORDER_IS_BIG_ENDIAN)
|
sl@0
|
578 |
p[HASH_LBLOCK-2]=c->Nh;
|
sl@0
|
579 |
p[HASH_LBLOCK-1]=c->Nl;
|
sl@0
|
580 |
#elif defined(DATA_ORDER_IS_LITTLE_ENDIAN)
|
sl@0
|
581 |
p[HASH_LBLOCK-2]=c->Nl;
|
sl@0
|
582 |
p[HASH_LBLOCK-1]=c->Nh;
|
sl@0
|
583 |
#endif
|
sl@0
|
584 |
HASH_BLOCK_HOST_ORDER (c,p,1);
|
sl@0
|
585 |
|
sl@0
|
586 |
#ifndef HASH_MAKE_STRING
|
sl@0
|
587 |
#error "HASH_MAKE_STRING must be defined!"
|
sl@0
|
588 |
#else
|
sl@0
|
589 |
HASH_MAKE_STRING(c,md);
|
sl@0
|
590 |
#endif
|
sl@0
|
591 |
|
sl@0
|
592 |
c->num=0;
|
sl@0
|
593 |
/* clear stuff, HASH_BLOCK may be leaving some stuff on the stack
|
sl@0
|
594 |
* but I'm not worried :-)
|
sl@0
|
595 |
OPENSSL_cleanse((void *)c,sizeof(HASH_CTX));
|
sl@0
|
596 |
*/
|
sl@0
|
597 |
return 1;
|
sl@0
|
598 |
}
|
sl@0
|
599 |
|
sl@0
|
600 |
#ifndef MD32_REG_T
|
sl@0
|
601 |
#define MD32_REG_T long
|
sl@0
|
602 |
/*
|
sl@0
|
603 |
* This comment was originaly written for MD5, which is why it
|
sl@0
|
604 |
* discusses A-D. But it basically applies to all 32-bit digests,
|
sl@0
|
605 |
* which is why it was moved to common header file.
|
sl@0
|
606 |
*
|
sl@0
|
607 |
* In case you wonder why A-D are declared as long and not
|
sl@0
|
608 |
* as MD5_LONG. Doing so results in slight performance
|
sl@0
|
609 |
* boost on LP64 architectures. The catch is we don't
|
sl@0
|
610 |
* really care if 32 MSBs of a 64-bit register get polluted
|
sl@0
|
611 |
* with eventual overflows as we *save* only 32 LSBs in
|
sl@0
|
612 |
* *either* case. Now declaring 'em long excuses the compiler
|
sl@0
|
613 |
* from keeping 32 MSBs zeroed resulting in 13% performance
|
sl@0
|
614 |
* improvement under SPARC Solaris7/64 and 5% under AlphaLinux.
|
sl@0
|
615 |
* Well, to be honest it should say that this *prevents*
|
sl@0
|
616 |
* performance degradation.
|
sl@0
|
617 |
* <appro@fy.chalmers.se>
|
sl@0
|
618 |
* Apparently there're LP64 compilers that generate better
|
sl@0
|
619 |
* code if A-D are declared int. Most notably GCC-x86_64
|
sl@0
|
620 |
* generates better code.
|
sl@0
|
621 |
* <appro@fy.chalmers.se>
|
sl@0
|
622 |
*/
|
sl@0
|
623 |
#endif
|