os/ossrv/ssl/libcrypto/src/crypto/md32_common.h
changeset 0 bde4ae8d615e
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/os/ossrv/ssl/libcrypto/src/crypto/md32_common.h	Fri Jun 15 03:10:57 2012 +0200
     1.3 @@ -0,0 +1,623 @@
     1.4 +/* crypto/md32_common.h */
     1.5 +/* ====================================================================
     1.6 + * Copyright (c) 1999-2002 The OpenSSL Project.  All rights reserved.
     1.7 + *
     1.8 + * Redistribution and use in source and binary forms, with or without
     1.9 + * modification, are permitted provided that the following conditions
    1.10 + * are met:
    1.11 + *
    1.12 + * 1. Redistributions of source code must retain the above copyright
    1.13 + *    notice, this list of conditions and the following disclaimer. 
    1.14 + *
    1.15 + * 2. Redistributions in binary form must reproduce the above copyright
    1.16 + *    notice, this list of conditions and the following disclaimer in
    1.17 + *    the documentation and/or other materials provided with the
    1.18 + *    distribution.
    1.19 + *
    1.20 + * 3. All advertising materials mentioning features or use of this
    1.21 + *    software must display the following acknowledgment:
    1.22 + *    "This product includes software developed by the OpenSSL Project
    1.23 + *    for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
    1.24 + *
    1.25 + * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
    1.26 + *    endorse or promote products derived from this software without
    1.27 + *    prior written permission. For written permission, please contact
    1.28 + *    licensing@OpenSSL.org.
    1.29 + *
    1.30 + * 5. Products derived from this software may not be called "OpenSSL"
    1.31 + *    nor may "OpenSSL" appear in their names without prior written
    1.32 + *    permission of the OpenSSL Project.
    1.33 + *
    1.34 + * 6. Redistributions of any form whatsoever must retain the following
    1.35 + *    acknowledgment:
    1.36 + *    "This product includes software developed by the OpenSSL Project
    1.37 + *    for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
    1.38 + *
    1.39 + * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
    1.40 + * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
    1.41 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
    1.42 + * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
    1.43 + * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
    1.44 + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
    1.45 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
    1.46 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
    1.47 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
    1.48 + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
    1.49 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
    1.50 + * OF THE POSSIBILITY OF SUCH DAMAGE.
    1.51 + * ====================================================================
    1.52 + *
    1.53 + * This product includes cryptographic software written by Eric Young
    1.54 + * (eay@cryptsoft.com).  This product includes software written by Tim
    1.55 + * Hudson (tjh@cryptsoft.com).
    1.56 + *
    1.57 + */
    1.58 +
    1.59 +/*
    1.60 + * This is a generic 32 bit "collector" for message digest algorithms.
    1.61 + * Whenever needed it collects input character stream into chunks of
    1.62 + * 32 bit values and invokes a block function that performs actual hash
    1.63 + * calculations.
    1.64 + *
    1.65 + * Porting guide.
    1.66 + *
    1.67 + * Obligatory macros:
    1.68 + *
    1.69 + * DATA_ORDER_IS_BIG_ENDIAN or DATA_ORDER_IS_LITTLE_ENDIAN
    1.70 + *	this macro defines byte order of input stream.
    1.71 + * HASH_CBLOCK
    1.72 + *	size of a unit chunk HASH_BLOCK operates on.
    1.73 + * HASH_LONG
    1.74 + *	has to be at lest 32 bit wide, if it's wider, then
    1.75 + *	HASH_LONG_LOG2 *has to* be defined along
    1.76 + * HASH_CTX
    1.77 + *	context structure that at least contains following
    1.78 + *	members:
    1.79 + *		typedef struct {
    1.80 + *			...
    1.81 + *			HASH_LONG	Nl,Nh;
    1.82 + *			HASH_LONG	data[HASH_LBLOCK];
    1.83 + *			unsigned int	num;
    1.84 + *			...
    1.85 + *			} HASH_CTX;
    1.86 + * HASH_UPDATE
    1.87 + *	name of "Update" function, implemented here.
    1.88 + * HASH_TRANSFORM
    1.89 + *	name of "Transform" function, implemented here.
    1.90 + * HASH_FINAL
    1.91 + *	name of "Final" function, implemented here.
    1.92 + * HASH_BLOCK_HOST_ORDER
    1.93 + *	name of "block" function treating *aligned* input message
    1.94 + *	in host byte order, implemented externally.
    1.95 + * HASH_BLOCK_DATA_ORDER
    1.96 + *	name of "block" function treating *unaligned* input message
    1.97 + *	in original (data) byte order, implemented externally (it
    1.98 + *	actually is optional if data and host are of the same
    1.99 + *	"endianess").
   1.100 + * HASH_MAKE_STRING
   1.101 + *	macro convering context variables to an ASCII hash string.
   1.102 + *
   1.103 + * Optional macros:
   1.104 + *
   1.105 + * B_ENDIAN or L_ENDIAN
   1.106 + *	defines host byte-order.
   1.107 + * HASH_LONG_LOG2
   1.108 + *	defaults to 2 if not states otherwise.
   1.109 + * HASH_LBLOCK
   1.110 + *	assumed to be HASH_CBLOCK/4 if not stated otherwise.
   1.111 + * HASH_BLOCK_DATA_ORDER_ALIGNED
   1.112 + *	alternative "block" function capable of treating
   1.113 + *	aligned input message in original (data) order,
   1.114 + *	implemented externally.
   1.115 + *
   1.116 + * MD5 example:
   1.117 + *
   1.118 + *	#define DATA_ORDER_IS_LITTLE_ENDIAN
   1.119 + *
   1.120 + *	#define HASH_LONG		MD5_LONG
   1.121 + *	#define HASH_LONG_LOG2		MD5_LONG_LOG2
   1.122 + *	#define HASH_CTX		MD5_CTX
   1.123 + *	#define HASH_CBLOCK		MD5_CBLOCK
   1.124 + *	#define HASH_LBLOCK		MD5_LBLOCK
   1.125 + *	#define HASH_UPDATE		MD5_Update
   1.126 + *	#define HASH_TRANSFORM		MD5_Transform
   1.127 + *	#define HASH_FINAL		MD5_Final
   1.128 + *	#define HASH_BLOCK_HOST_ORDER	md5_block_host_order
   1.129 + *	#define HASH_BLOCK_DATA_ORDER	md5_block_data_order
   1.130 + *
   1.131 + *					<appro@fy.chalmers.se>
   1.132 + */
   1.133 +
   1.134 +#if !defined(DATA_ORDER_IS_BIG_ENDIAN) && !defined(DATA_ORDER_IS_LITTLE_ENDIAN)
   1.135 +#error "DATA_ORDER must be defined!"
   1.136 +#endif
   1.137 +
   1.138 +#ifndef HASH_CBLOCK
   1.139 +#error "HASH_CBLOCK must be defined!"
   1.140 +#endif
   1.141 +#ifndef HASH_LONG
   1.142 +#error "HASH_LONG must be defined!"
   1.143 +#endif
   1.144 +#ifndef HASH_CTX
   1.145 +#error "HASH_CTX must be defined!"
   1.146 +#endif
   1.147 +
   1.148 +#ifndef HASH_UPDATE
   1.149 +#error "HASH_UPDATE must be defined!"
   1.150 +#endif
   1.151 +#ifndef HASH_TRANSFORM
   1.152 +#error "HASH_TRANSFORM must be defined!"
   1.153 +#endif
   1.154 +#ifndef HASH_FINAL
   1.155 +#error "HASH_FINAL must be defined!"
   1.156 +#endif
   1.157 +
   1.158 +#ifndef HASH_BLOCK_HOST_ORDER
   1.159 +#error "HASH_BLOCK_HOST_ORDER must be defined!"
   1.160 +#endif
   1.161 +
   1.162 +#if 0
   1.163 +/*
   1.164 + * Moved below as it's required only if HASH_BLOCK_DATA_ORDER_ALIGNED
   1.165 + * isn't defined.
   1.166 + */
   1.167 +#ifndef HASH_BLOCK_DATA_ORDER
   1.168 +#error "HASH_BLOCK_DATA_ORDER must be defined!"
   1.169 +#endif
   1.170 +#endif
   1.171 +
   1.172 +#ifndef HASH_LBLOCK
   1.173 +#define HASH_LBLOCK	(HASH_CBLOCK/4)
   1.174 +#endif
   1.175 +
   1.176 +#ifndef HASH_LONG_LOG2
   1.177 +#define HASH_LONG_LOG2	2
   1.178 +#endif
   1.179 +
   1.180 +/*
   1.181 + * Engage compiler specific rotate intrinsic function if available.
   1.182 + */
   1.183 +#undef ROTATE
   1.184 +#ifndef PEDANTIC
   1.185 +# if defined(_MSC_VER) || defined(__ICC)
   1.186 +#  define ROTATE(a,n)	_lrotl(a,n)
   1.187 +# elif defined(__MWERKS__)
   1.188 +#  if defined(__POWERPC__)
   1.189 +#   define ROTATE(a,n)	__rlwinm(a,n,0,31)
   1.190 +#  elif defined(__MC68K__)
   1.191 +    /* Motorola specific tweak. <appro@fy.chalmers.se> */
   1.192 +#   define ROTATE(a,n)	( n<24 ? __rol(a,n) : __ror(a,32-n) )
   1.193 +#  else
   1.194 +#   define ROTATE(a,n)	__rol(a,n)
   1.195 +#  endif
   1.196 +# elif defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
   1.197 +  /*
   1.198 +   * Some GNU C inline assembler templates. Note that these are
   1.199 +   * rotates by *constant* number of bits! But that's exactly
   1.200 +   * what we need here...
   1.201 +   * 					<appro@fy.chalmers.se>
   1.202 +   */
   1.203 +#  if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
   1.204 +#   define ROTATE(a,n)	({ register unsigned int ret;	\
   1.205 +				asm (			\
   1.206 +				"roll %1,%0"		\
   1.207 +				: "=r"(ret)		\
   1.208 +				: "I"(n), "0"(a)	\
   1.209 +				: "cc");		\
   1.210 +			   ret;				\
   1.211 +			})
   1.212 +#  elif defined(__powerpc) || defined(__ppc__) || defined(__powerpc64__)
   1.213 +#   define ROTATE(a,n)	({ register unsigned int ret;	\
   1.214 +				asm (			\
   1.215 +				"rlwinm %0,%1,%2,0,31"	\
   1.216 +				: "=r"(ret)		\
   1.217 +				: "r"(a), "I"(n));	\
   1.218 +			   ret;				\
   1.219 +			})
   1.220 +#  endif
   1.221 +# endif
   1.222 +#endif /* PEDANTIC */
   1.223 +
   1.224 +#if HASH_LONG_LOG2==2	/* Engage only if sizeof(HASH_LONG)== 4 */
   1.225 +/* A nice byte order reversal from Wei Dai <weidai@eskimo.com> */
   1.226 +#ifdef ROTATE
   1.227 +/* 5 instructions with rotate instruction, else 9 */
   1.228 +#define REVERSE_FETCH32(a,l)	(					\
   1.229 +		l=*(const HASH_LONG *)(a),				\
   1.230 +		((ROTATE(l,8)&0x00FF00FF)|(ROTATE((l&0x00FF00FF),24)))	\
   1.231 +				)
   1.232 +#else
   1.233 +/* 6 instructions with rotate instruction, else 8 */
   1.234 +#define REVERSE_FETCH32(a,l)	(				\
   1.235 +		l=*(const HASH_LONG *)(a),			\
   1.236 +		l=(((l>>8)&0x00FF00FF)|((l&0x00FF00FF)<<8)),	\
   1.237 +		ROTATE(l,16)					\
   1.238 +				)
   1.239 +/*
   1.240 + * Originally the middle line started with l=(((l&0xFF00FF00)>>8)|...
   1.241 + * It's rewritten as above for two reasons:
   1.242 + *	- RISCs aren't good at long constants and have to explicitely
   1.243 + *	  compose 'em with several (well, usually 2) instructions in a
   1.244 + *	  register before performing the actual operation and (as you
   1.245 + *	  already realized:-) having same constant should inspire the
   1.246 + *	  compiler to permanently allocate the only register for it;
   1.247 + *	- most modern CPUs have two ALUs, but usually only one has
   1.248 + *	  circuitry for shifts:-( this minor tweak inspires compiler
   1.249 + *	  to schedule shift instructions in a better way...
   1.250 + *
   1.251 + *				<appro@fy.chalmers.se>
   1.252 + */
   1.253 +#endif
   1.254 +#endif
   1.255 +
   1.256 +#ifndef ROTATE
   1.257 +#define ROTATE(a,n)     (((a)<<(n))|(((a)&0xffffffff)>>(32-(n))))
   1.258 +#endif
   1.259 +
   1.260 +/*
   1.261 + * Make some obvious choices. E.g., HASH_BLOCK_DATA_ORDER_ALIGNED
   1.262 + * and HASH_BLOCK_HOST_ORDER ought to be the same if input data
   1.263 + * and host are of the same "endianess". It's possible to mask
   1.264 + * this with blank #define HASH_BLOCK_DATA_ORDER though...
   1.265 + *
   1.266 + *				<appro@fy.chalmers.se>
   1.267 + */
   1.268 +#if defined(B_ENDIAN)
   1.269 +#  if defined(DATA_ORDER_IS_BIG_ENDIAN)
   1.270 +#    if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) && HASH_LONG_LOG2==2
   1.271 +#      define HASH_BLOCK_DATA_ORDER_ALIGNED	HASH_BLOCK_HOST_ORDER
   1.272 +#    endif
   1.273 +#  endif
   1.274 +#elif defined(L_ENDIAN)
   1.275 +#  if defined(DATA_ORDER_IS_LITTLE_ENDIAN)
   1.276 +#    if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) && HASH_LONG_LOG2==2
   1.277 +#      define HASH_BLOCK_DATA_ORDER_ALIGNED	HASH_BLOCK_HOST_ORDER
   1.278 +#    endif
   1.279 +#  endif
   1.280 +#endif
   1.281 +
   1.282 +#if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED)
   1.283 +#ifndef HASH_BLOCK_DATA_ORDER
   1.284 +#error "HASH_BLOCK_DATA_ORDER must be defined!"
   1.285 +#endif
   1.286 +#endif
   1.287 +
   1.288 +#if defined(DATA_ORDER_IS_BIG_ENDIAN)
   1.289 +
   1.290 +#ifndef PEDANTIC
   1.291 +# if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
   1.292 +#  if ((defined(__i386) || defined(__i386__)) && !defined(I386_ONLY)) || \
   1.293 +      (defined(__x86_64) || defined(__x86_64__))
   1.294 +    /*
   1.295 +     * This gives ~30-40% performance improvement in SHA-256 compiled
   1.296 +     * with gcc [on P4]. Well, first macro to be frank. We can pull
   1.297 +     * this trick on x86* platforms only, because these CPUs can fetch
   1.298 +     * unaligned data without raising an exception.
   1.299 +     */
   1.300 +#   define HOST_c2l(c,l)	({ unsigned int r=*((const unsigned int *)(c));	\
   1.301 +				   asm ("bswapl %0":"=r"(r):"0"(r));	\
   1.302 +				   (c)+=4; (l)=r;			})
   1.303 +#   define HOST_l2c(l,c)	({ unsigned int r=(l);			\
   1.304 +				   asm ("bswapl %0":"=r"(r):"0"(r));	\
   1.305 +				   *((unsigned int *)(c))=r; (c)+=4; r;	})
   1.306 +#  endif
   1.307 +# endif
   1.308 +#endif
   1.309 +
   1.310 +#ifndef HOST_c2l
   1.311 +#define HOST_c2l(c,l)	(l =(((unsigned long)(*((c)++)))<<24),		\
   1.312 +			 l|=(((unsigned long)(*((c)++)))<<16),		\
   1.313 +			 l|=(((unsigned long)(*((c)++)))<< 8),		\
   1.314 +			 l|=(((unsigned long)(*((c)++)))    ),		\
   1.315 +			 l)
   1.316 +#endif
   1.317 +#define HOST_p_c2l(c,l,n)	{					\
   1.318 +			switch (n) {					\
   1.319 +			case 0: l =((unsigned long)(*((c)++)))<<24;	\
   1.320 +			case 1: l|=((unsigned long)(*((c)++)))<<16;	\
   1.321 +			case 2: l|=((unsigned long)(*((c)++)))<< 8;	\
   1.322 +			case 3: l|=((unsigned long)(*((c)++)));		\
   1.323 +				} }
   1.324 +#define HOST_p_c2l_p(c,l,sc,len) {					\
   1.325 +			switch (sc) {					\
   1.326 +			case 0: l =((unsigned long)(*((c)++)))<<24;	\
   1.327 +				if (--len == 0) break;			\
   1.328 +			case 1: l|=((unsigned long)(*((c)++)))<<16;	\
   1.329 +				if (--len == 0) break;			\
   1.330 +			case 2: l|=((unsigned long)(*((c)++)))<< 8;	\
   1.331 +				} }
   1.332 +/* NOTE the pointer is not incremented at the end of this */
   1.333 +#define HOST_c2l_p(c,l,n)	{					\
   1.334 +			l=0; (c)+=n;					\
   1.335 +			switch (n) {					\
   1.336 +			case 3: l =((unsigned long)(*(--(c))))<< 8;	\
   1.337 +			case 2: l|=((unsigned long)(*(--(c))))<<16;	\
   1.338 +			case 1: l|=((unsigned long)(*(--(c))))<<24;	\
   1.339 +				} }
   1.340 +#ifndef HOST_l2c
   1.341 +#define HOST_l2c(l,c)	(*((c)++)=(unsigned char)(((l)>>24)&0xff),	\
   1.342 +			 *((c)++)=(unsigned char)(((l)>>16)&0xff),	\
   1.343 +			 *((c)++)=(unsigned char)(((l)>> 8)&0xff),	\
   1.344 +			 *((c)++)=(unsigned char)(((l)    )&0xff),	\
   1.345 +			 l)
   1.346 +#endif
   1.347 +
   1.348 +#elif defined(DATA_ORDER_IS_LITTLE_ENDIAN)
   1.349 +
   1.350 +#if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
   1.351 +# ifndef B_ENDIAN
   1.352 +   /* See comment in DATA_ORDER_IS_BIG_ENDIAN section. */
   1.353 +#  define HOST_c2l(c,l)	((l)=*((const unsigned int *)(c)), (c)+=4, l)
   1.354 +#  define HOST_l2c(l,c)	(*((unsigned int *)(c))=(l), (c)+=4, l)
   1.355 +# endif
   1.356 +#endif
   1.357 +
   1.358 +#ifndef HOST_c2l
   1.359 +#define HOST_c2l(c,l)	(l =(((unsigned long)(*((c)++)))    ),		\
   1.360 +			 l|=(((unsigned long)(*((c)++)))<< 8),		\
   1.361 +			 l|=(((unsigned long)(*((c)++)))<<16),		\
   1.362 +			 l|=(((unsigned long)(*((c)++)))<<24),		\
   1.363 +			 l)
   1.364 +#endif
   1.365 +#define HOST_p_c2l(c,l,n)	{					\
   1.366 +			switch (n) {					\
   1.367 +			case 0: l =((unsigned long)(*((c)++)));		\
   1.368 +			case 1: l|=((unsigned long)(*((c)++)))<< 8;	\
   1.369 +			case 2: l|=((unsigned long)(*((c)++)))<<16;	\
   1.370 +			case 3: l|=((unsigned long)(*((c)++)))<<24;	\
   1.371 +				} }
   1.372 +#define HOST_p_c2l_p(c,l,sc,len) {					\
   1.373 +			switch (sc) {					\
   1.374 +			case 0: l =((unsigned long)(*((c)++)));		\
   1.375 +				if (--len == 0) break;			\
   1.376 +			case 1: l|=((unsigned long)(*((c)++)))<< 8;	\
   1.377 +				if (--len == 0) break;			\
   1.378 +			case 2: l|=((unsigned long)(*((c)++)))<<16;	\
   1.379 +				} }
   1.380 +/* NOTE the pointer is not incremented at the end of this */
   1.381 +#define HOST_c2l_p(c,l,n)	{					\
   1.382 +			l=0; (c)+=n;					\
   1.383 +			switch (n) {					\
   1.384 +			case 3: l =((unsigned long)(*(--(c))))<<16;	\
   1.385 +			case 2: l|=((unsigned long)(*(--(c))))<< 8;	\
   1.386 +			case 1: l|=((unsigned long)(*(--(c))));		\
   1.387 +				} }
   1.388 +#ifndef HOST_l2c
   1.389 +#define HOST_l2c(l,c)	(*((c)++)=(unsigned char)(((l)    )&0xff),	\
   1.390 +			 *((c)++)=(unsigned char)(((l)>> 8)&0xff),	\
   1.391 +			 *((c)++)=(unsigned char)(((l)>>16)&0xff),	\
   1.392 +			 *((c)++)=(unsigned char)(((l)>>24)&0xff),	\
   1.393 +			 l)
   1.394 +#endif
   1.395 +
   1.396 +#endif
   1.397 +
   1.398 +/*
   1.399 + * Time for some action:-)
   1.400 + */
   1.401 +
   1.402 +EXPORT_C int HASH_UPDATE (HASH_CTX *c, const void *data_, size_t len)
   1.403 +	{
   1.404 +	const unsigned char *data=data_;
   1.405 +	register HASH_LONG * p;
   1.406 +	register HASH_LONG l;
   1.407 +	size_t sw,sc,ew,ec;
   1.408 +
   1.409 +	if (len==0) return 1;
   1.410 +
   1.411 +	l=(c->Nl+(((HASH_LONG)len)<<3))&0xffffffffUL;
   1.412 +	/* 95-05-24 eay Fixed a bug with the overflow handling, thanks to
   1.413 +	 * Wei Dai <weidai@eskimo.com> for pointing it out. */
   1.414 +	if (l < c->Nl) /* overflow */
   1.415 +		c->Nh++;
   1.416 +	c->Nh+=(len>>29);	/* might cause compiler warning on 16-bit */
   1.417 +	c->Nl=l;
   1.418 +
   1.419 +	if (c->num != 0)
   1.420 +		{
   1.421 +		p=c->data;
   1.422 +		sw=c->num>>2;
   1.423 +		sc=c->num&0x03;
   1.424 +
   1.425 +		if ((c->num+len) >= HASH_CBLOCK)
   1.426 +			{
   1.427 +			l=p[sw]; HOST_p_c2l(data,l,sc); p[sw++]=l;
   1.428 +			for (; sw<HASH_LBLOCK; sw++)
   1.429 +				{
   1.430 +				HOST_c2l(data,l); p[sw]=l;
   1.431 +				}
   1.432 +			HASH_BLOCK_HOST_ORDER (c,p,1);
   1.433 +			len-=(HASH_CBLOCK-c->num);
   1.434 +			c->num=0;
   1.435 +			/* drop through and do the rest */
   1.436 +			}
   1.437 +		else
   1.438 +			{
   1.439 +			c->num+=(unsigned int)len;
   1.440 +			if ((sc+len) < 4) /* ugly, add char's to a word */
   1.441 +				{
   1.442 +				l=p[sw]; HOST_p_c2l_p(data,l,sc,len); p[sw]=l;
   1.443 +				}
   1.444 +			else
   1.445 +				{
   1.446 +				ew=(c->num>>2);
   1.447 +				ec=(c->num&0x03);
   1.448 +				if (sc)
   1.449 +					l=p[sw];
   1.450 +				HOST_p_c2l(data,l,sc);
   1.451 +				p[sw++]=l;
   1.452 +				for (; sw < ew; sw++)
   1.453 +					{
   1.454 +					HOST_c2l(data,l); p[sw]=l;
   1.455 +					}
   1.456 +				if (ec)
   1.457 +					{
   1.458 +					HOST_c2l_p(data,l,ec); p[sw]=l;
   1.459 +					}
   1.460 +				}
   1.461 +			return 1;
   1.462 +			}
   1.463 +		}
   1.464 +
   1.465 +	sw=len/HASH_CBLOCK;
   1.466 +	if (sw > 0)
   1.467 +		{
   1.468 +#if defined(HASH_BLOCK_DATA_ORDER_ALIGNED)
   1.469 +		/*
   1.470 +		 * Note that HASH_BLOCK_DATA_ORDER_ALIGNED gets defined
   1.471 +		 * only if sizeof(HASH_LONG)==4.
   1.472 +		 */
   1.473 +		if ((((size_t)data)%4) == 0)
   1.474 +			{
   1.475 +			/* data is properly aligned so that we can cast it: */
   1.476 +			HASH_BLOCK_DATA_ORDER_ALIGNED (c,(const HASH_LONG *)data,sw);
   1.477 +			sw*=HASH_CBLOCK;
   1.478 +			data+=sw;
   1.479 +			len-=sw;
   1.480 +			}
   1.481 +		else
   1.482 +#if !defined(HASH_BLOCK_DATA_ORDER)
   1.483 +			while (sw--)
   1.484 +				{
   1.485 +				memcpy (p=c->data,data,HASH_CBLOCK);
   1.486 +				HASH_BLOCK_DATA_ORDER_ALIGNED(c,p,1);
   1.487 +				data+=HASH_CBLOCK;
   1.488 +				len-=HASH_CBLOCK;
   1.489 +				}
   1.490 +#endif
   1.491 +#endif
   1.492 +#if defined(HASH_BLOCK_DATA_ORDER)
   1.493 +			{
   1.494 +			HASH_BLOCK_DATA_ORDER(c,data,sw);
   1.495 +			sw*=HASH_CBLOCK;
   1.496 +			data+=sw;
   1.497 +			len-=sw;
   1.498 +			}
   1.499 +#endif
   1.500 +		}
   1.501 +
   1.502 +	if (len!=0)
   1.503 +		{
   1.504 +		p = c->data;
   1.505 +		c->num = len;
   1.506 +		ew=len>>2;	/* words to copy */
   1.507 +		ec=len&0x03;
   1.508 +		for (; ew; ew--,p++)
   1.509 +			{
   1.510 +			HOST_c2l(data,l); *p=l;
   1.511 +			}
   1.512 +		HOST_c2l_p(data,l,ec);
   1.513 +		*p=l;
   1.514 +		}
   1.515 +	return 1;
   1.516 +	}
   1.517 +
   1.518 +
   1.519 +EXPORT_C void HASH_TRANSFORM (HASH_CTX *c, const unsigned char *data)
   1.520 +	{
   1.521 +#if defined(HASH_BLOCK_DATA_ORDER_ALIGNED)
   1.522 +	if ((((size_t)data)%4) == 0)
   1.523 +		/* data is properly aligned so that we can cast it: */
   1.524 +		HASH_BLOCK_DATA_ORDER_ALIGNED (c,(const HASH_LONG *)data,1);
   1.525 +	else
   1.526 +#if !defined(HASH_BLOCK_DATA_ORDER)
   1.527 +		{
   1.528 +		memcpy (c->data,data,HASH_CBLOCK);
   1.529 +		HASH_BLOCK_DATA_ORDER_ALIGNED (c,c->data,1);
   1.530 +		}
   1.531 +#endif
   1.532 +#endif
   1.533 +#if defined(HASH_BLOCK_DATA_ORDER)
   1.534 +	HASH_BLOCK_DATA_ORDER (c,data,1);
   1.535 +#endif
   1.536 +	}
   1.537 +
   1.538 +
   1.539 +EXPORT_C int HASH_FINAL (unsigned char *md, HASH_CTX *c)
   1.540 +	{
   1.541 +	register HASH_LONG *p;
   1.542 +	register unsigned long l;
   1.543 +	register int i,j;
   1.544 +	static const unsigned char end[4]={0x80,0x00,0x00,0x00};
   1.545 +	const unsigned char *cp=end;
   1.546 +
   1.547 +	/* c->num should definitly have room for at least one more byte. */
   1.548 +	p=c->data;
   1.549 +	i=c->num>>2;
   1.550 +	j=c->num&0x03;
   1.551 +
   1.552 +#if 0
   1.553 +	/* purify often complains about the following line as an
   1.554 +	 * Uninitialized Memory Read.  While this can be true, the
   1.555 +	 * following p_c2l macro will reset l when that case is true.
   1.556 +	 * This is because j&0x03 contains the number of 'valid' bytes
   1.557 +	 * already in p[i].  If and only if j&0x03 == 0, the UMR will
   1.558 +	 * occur but this is also the only time p_c2l will do
   1.559 +	 * l= *(cp++) instead of l|= *(cp++)
   1.560 +	 * Many thanks to Alex Tang <altitude@cic.net> for pickup this
   1.561 +	 * 'potential bug' */
   1.562 +#ifdef PURIFY
   1.563 +	if (j==0) p[i]=0; /* Yeah, but that's not the way to fix it:-) */
   1.564 +#endif
   1.565 +	l=p[i];
   1.566 +#else
   1.567 +	l = (j==0) ? 0 : p[i];
   1.568 +#endif
   1.569 +	HOST_p_c2l(cp,l,j); p[i++]=l; /* i is the next 'undefined word' */
   1.570 +
   1.571 +	if (i>(HASH_LBLOCK-2)) /* save room for Nl and Nh */
   1.572 +		{
   1.573 +		if (i<HASH_LBLOCK) p[i]=0;
   1.574 +		HASH_BLOCK_HOST_ORDER (c,p,1);
   1.575 +		i=0;
   1.576 +		}
   1.577 +	for (; i<(HASH_LBLOCK-2); i++)
   1.578 +		p[i]=0;
   1.579 +
   1.580 +#if   defined(DATA_ORDER_IS_BIG_ENDIAN)
   1.581 +	p[HASH_LBLOCK-2]=c->Nh;
   1.582 +	p[HASH_LBLOCK-1]=c->Nl;
   1.583 +#elif defined(DATA_ORDER_IS_LITTLE_ENDIAN)
   1.584 +	p[HASH_LBLOCK-2]=c->Nl;
   1.585 +	p[HASH_LBLOCK-1]=c->Nh;
   1.586 +#endif
   1.587 +	HASH_BLOCK_HOST_ORDER (c,p,1);
   1.588 +
   1.589 +#ifndef HASH_MAKE_STRING
   1.590 +#error "HASH_MAKE_STRING must be defined!"
   1.591 +#else
   1.592 +	HASH_MAKE_STRING(c,md);
   1.593 +#endif
   1.594 +
   1.595 +	c->num=0;
   1.596 +	/* clear stuff, HASH_BLOCK may be leaving some stuff on the stack
   1.597 +	 * but I'm not worried :-)
   1.598 +	OPENSSL_cleanse((void *)c,sizeof(HASH_CTX));
   1.599 +	 */
   1.600 +	return 1;
   1.601 +	}
   1.602 +
   1.603 +#ifndef MD32_REG_T
   1.604 +#define MD32_REG_T long
   1.605 +/*
   1.606 + * This comment was originaly written for MD5, which is why it
   1.607 + * discusses A-D. But it basically applies to all 32-bit digests,
   1.608 + * which is why it was moved to common header file.
   1.609 + *
   1.610 + * In case you wonder why A-D are declared as long and not
   1.611 + * as MD5_LONG. Doing so results in slight performance
   1.612 + * boost on LP64 architectures. The catch is we don't
   1.613 + * really care if 32 MSBs of a 64-bit register get polluted
   1.614 + * with eventual overflows as we *save* only 32 LSBs in
   1.615 + * *either* case. Now declaring 'em long excuses the compiler
   1.616 + * from keeping 32 MSBs zeroed resulting in 13% performance
   1.617 + * improvement under SPARC Solaris7/64 and 5% under AlphaLinux.
   1.618 + * Well, to be honest it should say that this *prevents* 
   1.619 + * performance degradation.
   1.620 + *				<appro@fy.chalmers.se>
   1.621 + * Apparently there're LP64 compilers that generate better
   1.622 + * code if A-D are declared int. Most notably GCC-x86_64
   1.623 + * generates better code.
   1.624 + *				<appro@fy.chalmers.se>
   1.625 + */
   1.626 +#endif