sl@0: /* sl@0: * Support for VIA PadLock Advanced Cryptography Engine (ACE) sl@0: * Written by Michal Ludvig sl@0: * http://www.logix.cz/michal sl@0: * sl@0: * Big thanks to Andy Polyakov for a help with optimization, sl@0: * assembler fixes, port to MS Windows and a lot of other sl@0: * valuable work on this engine! sl@0: */ sl@0: sl@0: /* ==================================================================== sl@0: * Copyright (c) 1999-2001 The OpenSSL Project. All rights reserved. sl@0: * sl@0: * Redistribution and use in source and binary forms, with or without sl@0: * modification, are permitted provided that the following conditions sl@0: * are met: sl@0: * sl@0: * 1. Redistributions of source code must retain the above copyright sl@0: * notice, this list of conditions and the following disclaimer. sl@0: * sl@0: * 2. Redistributions in binary form must reproduce the above copyright sl@0: * notice, this list of conditions and the following disclaimer in sl@0: * the documentation and/or other materials provided with the sl@0: * distribution. sl@0: * sl@0: * 3. All advertising materials mentioning features or use of this sl@0: * software must display the following acknowledgment: sl@0: * "This product includes software developed by the OpenSSL Project sl@0: * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)" sl@0: * sl@0: * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to sl@0: * endorse or promote products derived from this software without sl@0: * prior written permission. For written permission, please contact sl@0: * licensing@OpenSSL.org. sl@0: * sl@0: * 5. Products derived from this software may not be called "OpenSSL" sl@0: * nor may "OpenSSL" appear in their names without prior written sl@0: * permission of the OpenSSL Project. sl@0: * sl@0: * 6. Redistributions of any form whatsoever must retain the following sl@0: * acknowledgment: sl@0: * "This product includes software developed by the OpenSSL Project sl@0: * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)" sl@0: * sl@0: * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY sl@0: * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE sl@0: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR sl@0: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR sl@0: * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, sl@0: * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT sl@0: * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; sl@0: * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) sl@0: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, sl@0: * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) sl@0: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED sl@0: * OF THE POSSIBILITY OF SUCH DAMAGE. sl@0: * ==================================================================== sl@0: * sl@0: * This product includes cryptographic software written by Eric Young sl@0: * (eay@cryptsoft.com). This product includes software written by Tim sl@0: * Hudson (tjh@cryptsoft.com). sl@0: * sl@0: */ sl@0: sl@0: sl@0: #include sl@0: #include sl@0: sl@0: #include sl@0: #include sl@0: #include sl@0: #include sl@0: #include sl@0: #ifndef OPENSSL_NO_AES sl@0: #include sl@0: #endif sl@0: #include sl@0: #include sl@0: sl@0: #ifndef OPENSSL_NO_HW sl@0: #ifndef OPENSSL_NO_HW_PADLOCK sl@0: sl@0: /* Attempt to have a single source for both 0.9.7 and 0.9.8 :-) */ sl@0: #if (OPENSSL_VERSION_NUMBER >= 0x00908000L) sl@0: # ifndef OPENSSL_NO_DYNAMIC_ENGINE sl@0: # define DYNAMIC_ENGINE sl@0: # endif sl@0: #elif (OPENSSL_VERSION_NUMBER >= 0x00907000L) sl@0: # ifdef ENGINE_DYNAMIC_SUPPORT sl@0: # define DYNAMIC_ENGINE sl@0: # endif sl@0: #else sl@0: # error "Only OpenSSL >= 0.9.7 is supported" sl@0: #endif sl@0: sl@0: /* VIA PadLock AES is available *ONLY* on some x86 CPUs. sl@0: Not only that it doesn't exist elsewhere, but it sl@0: even can't be compiled on other platforms! sl@0: sl@0: In addition, because of the heavy use of inline assembler, sl@0: compiler choice is limited to GCC and Microsoft C. */ sl@0: #undef COMPILE_HW_PADLOCK sl@0: #if !defined(I386_ONLY) && !defined(OPENSSL_NO_INLINE_ASM) sl@0: # if (defined(__GNUC__) && (defined(__i386__) || defined(__i386))) || \ sl@0: (defined(_MSC_VER) && defined(_M_IX86)) sl@0: # define COMPILE_HW_PADLOCK sl@0: static ENGINE *ENGINE_padlock (void); sl@0: # endif sl@0: #endif sl@0: sl@0: EXPORT_C void ENGINE_load_padlock (void) sl@0: { sl@0: /* On non-x86 CPUs it just returns. */ sl@0: #ifdef COMPILE_HW_PADLOCK sl@0: ENGINE *toadd = ENGINE_padlock (); sl@0: if (!toadd) return; sl@0: ENGINE_add (toadd); sl@0: ENGINE_free (toadd); sl@0: ERR_clear_error (); sl@0: #endif sl@0: } sl@0: sl@0: #ifdef COMPILE_HW_PADLOCK sl@0: /* We do these includes here to avoid header problems on platforms that sl@0: do not have the VIA padlock anyway... */ sl@0: #ifdef _MSC_VER sl@0: # include sl@0: # define alloca _alloca sl@0: #else sl@0: # include sl@0: #endif sl@0: sl@0: /* Function for ENGINE detection and control */ sl@0: static int padlock_available(void); sl@0: static int padlock_init(ENGINE *e); sl@0: sl@0: /* RNG Stuff */ sl@0: static RAND_METHOD padlock_rand; sl@0: sl@0: /* Cipher Stuff */ sl@0: #ifndef OPENSSL_NO_AES sl@0: static int padlock_ciphers(ENGINE *e, const EVP_CIPHER **cipher, const int **nids, int nid); sl@0: #endif sl@0: sl@0: /* Engine names */ sl@0: static const char *padlock_id = "padlock"; sl@0: static char padlock_name[100]; sl@0: sl@0: /* Available features */ sl@0: static int padlock_use_ace = 0; /* Advanced Cryptography Engine */ sl@0: static int padlock_use_rng = 0; /* Random Number Generator */ sl@0: #ifndef OPENSSL_NO_AES sl@0: static int padlock_aes_align_required = 1; sl@0: #endif sl@0: sl@0: /* ===== Engine "management" functions ===== */ sl@0: sl@0: /* Prepare the ENGINE structure for registration */ sl@0: static int sl@0: padlock_bind_helper(ENGINE *e) sl@0: { sl@0: /* Check available features */ sl@0: padlock_available(); sl@0: sl@0: #if 1 /* disable RNG for now, see commentary in vicinity of RNG code */ sl@0: padlock_use_rng=0; sl@0: #endif sl@0: sl@0: /* Generate a nice engine name with available features */ sl@0: BIO_snprintf(padlock_name, sizeof(padlock_name), sl@0: "VIA PadLock (%s, %s)", sl@0: padlock_use_rng ? "RNG" : "no-RNG", sl@0: padlock_use_ace ? "ACE" : "no-ACE"); sl@0: sl@0: /* Register everything or return with an error */ sl@0: if (!ENGINE_set_id(e, padlock_id) || sl@0: !ENGINE_set_name(e, padlock_name) || sl@0: sl@0: !ENGINE_set_init_function(e, padlock_init) || sl@0: #ifndef OPENSSL_NO_AES sl@0: (padlock_use_ace && !ENGINE_set_ciphers (e, padlock_ciphers)) || sl@0: #endif sl@0: (padlock_use_rng && !ENGINE_set_RAND (e, &padlock_rand))) { sl@0: return 0; sl@0: } sl@0: sl@0: /* Everything looks good */ sl@0: return 1; sl@0: } sl@0: sl@0: /* Constructor */ sl@0: static ENGINE * sl@0: ENGINE_padlock(void) sl@0: { sl@0: ENGINE *eng = ENGINE_new(); sl@0: sl@0: if (!eng) { sl@0: return NULL; sl@0: } sl@0: sl@0: if (!padlock_bind_helper(eng)) { sl@0: ENGINE_free(eng); sl@0: return NULL; sl@0: } sl@0: sl@0: return eng; sl@0: } sl@0: sl@0: /* Check availability of the engine */ sl@0: static int sl@0: padlock_init(ENGINE *e) sl@0: { sl@0: return (padlock_use_rng || padlock_use_ace); sl@0: } sl@0: sl@0: /* This stuff is needed if this ENGINE is being compiled into a self-contained sl@0: * shared-library. sl@0: */ sl@0: #ifdef DYNAMIC_ENGINE sl@0: static int sl@0: padlock_bind_fn(ENGINE *e, const char *id) sl@0: { sl@0: if (id && (strcmp(id, padlock_id) != 0)) { sl@0: return 0; sl@0: } sl@0: sl@0: if (!padlock_bind_helper(e)) { sl@0: return 0; sl@0: } sl@0: sl@0: return 1; sl@0: } sl@0: sl@0: IMPLEMENT_DYNAMIC_CHECK_FN (); sl@0: IMPLEMENT_DYNAMIC_BIND_FN (padlock_bind_fn); sl@0: #endif /* DYNAMIC_ENGINE */ sl@0: sl@0: /* ===== Here comes the "real" engine ===== */ sl@0: sl@0: #ifndef OPENSSL_NO_AES sl@0: /* Some AES-related constants */ sl@0: #define AES_BLOCK_SIZE 16 sl@0: #define AES_KEY_SIZE_128 16 sl@0: #define AES_KEY_SIZE_192 24 sl@0: #define AES_KEY_SIZE_256 32 sl@0: sl@0: /* Here we store the status information relevant to the sl@0: current context. */ sl@0: /* BIG FAT WARNING: sl@0: * Inline assembler in PADLOCK_XCRYPT_ASM() sl@0: * depends on the order of items in this structure. sl@0: * Don't blindly modify, reorder, etc! sl@0: */ sl@0: struct padlock_cipher_data sl@0: { sl@0: unsigned char iv[AES_BLOCK_SIZE]; /* Initialization vector */ sl@0: union { unsigned int pad[4]; sl@0: struct { sl@0: int rounds:4; sl@0: int dgst:1; /* n/a in C3 */ sl@0: int align:1; /* n/a in C3 */ sl@0: int ciphr:1; /* n/a in C3 */ sl@0: unsigned int keygen:1; sl@0: int interm:1; sl@0: unsigned int encdec:1; sl@0: int ksize:2; sl@0: } b; sl@0: } cword; /* Control word */ sl@0: AES_KEY ks; /* Encryption key */ sl@0: }; sl@0: sl@0: /* sl@0: * Essentially this variable belongs in thread local storage. sl@0: * Having this variable global on the other hand can only cause sl@0: * few bogus key reloads [if any at all on single-CPU system], sl@0: * so we accept the penatly... sl@0: */ sl@0: static volatile struct padlock_cipher_data *padlock_saved_context; sl@0: #endif sl@0: sl@0: /* sl@0: * ======================================================= sl@0: * Inline assembler section(s). sl@0: * ======================================================= sl@0: * Order of arguments is chosen to facilitate Windows port sl@0: * using __fastcall calling convention. If you wish to add sl@0: * more routines, keep in mind that first __fastcall sl@0: * argument is passed in %ecx and second - in %edx. sl@0: * ======================================================= sl@0: */ sl@0: #if defined(__GNUC__) && __GNUC__>=2 sl@0: /* sl@0: * As for excessive "push %ebx"/"pop %ebx" found all over. sl@0: * When generating position-independent code GCC won't let sl@0: * us use "b" in assembler templates nor even respect "ebx" sl@0: * in "clobber description." Therefore the trouble... sl@0: */ sl@0: sl@0: /* Helper function - check if a CPUID instruction sl@0: is available on this CPU */ sl@0: static int sl@0: padlock_insn_cpuid_available(void) sl@0: { sl@0: int result = -1; sl@0: sl@0: /* We're checking if the bit #21 of EFLAGS sl@0: can be toggled. If yes = CPUID is available. */ sl@0: asm volatile ( sl@0: "pushf\n" sl@0: "popl %%eax\n" sl@0: "xorl $0x200000, %%eax\n" sl@0: "movl %%eax, %%ecx\n" sl@0: "andl $0x200000, %%ecx\n" sl@0: "pushl %%eax\n" sl@0: "popf\n" sl@0: "pushf\n" sl@0: "popl %%eax\n" sl@0: "andl $0x200000, %%eax\n" sl@0: "xorl %%eax, %%ecx\n" sl@0: "movl %%ecx, %0\n" sl@0: : "=r" (result) : : "eax", "ecx"); sl@0: sl@0: return (result == 0); sl@0: } sl@0: sl@0: /* Load supported features of the CPU to see if sl@0: the PadLock is available. */ sl@0: static int sl@0: padlock_available(void) sl@0: { sl@0: char vendor_string[16]; sl@0: unsigned int eax, edx; sl@0: sl@0: /* First check if the CPUID instruction is available at all... */ sl@0: if (! padlock_insn_cpuid_available()) sl@0: return 0; sl@0: sl@0: /* Are we running on the Centaur (VIA) CPU? */ sl@0: eax = 0x00000000; sl@0: vendor_string[12] = 0; sl@0: asm volatile ( sl@0: "pushl %%ebx\n" sl@0: "cpuid\n" sl@0: "movl %%ebx,(%%edi)\n" sl@0: "movl %%edx,4(%%edi)\n" sl@0: "movl %%ecx,8(%%edi)\n" sl@0: "popl %%ebx" sl@0: : "+a"(eax) : "D"(vendor_string) : "ecx", "edx"); sl@0: if (strcmp(vendor_string, "CentaurHauls") != 0) sl@0: return 0; sl@0: sl@0: /* Check for Centaur Extended Feature Flags presence */ sl@0: eax = 0xC0000000; sl@0: asm volatile ("pushl %%ebx; cpuid; popl %%ebx" sl@0: : "+a"(eax) : : "ecx", "edx"); sl@0: if (eax < 0xC0000001) sl@0: return 0; sl@0: sl@0: /* Read the Centaur Extended Feature Flags */ sl@0: eax = 0xC0000001; sl@0: asm volatile ("pushl %%ebx; cpuid; popl %%ebx" sl@0: : "+a"(eax), "=d"(edx) : : "ecx"); sl@0: sl@0: /* Fill up some flags */ sl@0: padlock_use_ace = ((edx & (0x3<<6)) == (0x3<<6)); sl@0: padlock_use_rng = ((edx & (0x3<<2)) == (0x3<<2)); sl@0: sl@0: return padlock_use_ace + padlock_use_rng; sl@0: } sl@0: sl@0: #ifndef OPENSSL_NO_AES sl@0: /* Our own htonl()/ntohl() */ sl@0: static inline void sl@0: padlock_bswapl(AES_KEY *ks) sl@0: { sl@0: size_t i = sizeof(ks->rd_key)/sizeof(ks->rd_key[0]); sl@0: unsigned int *key = ks->rd_key; sl@0: sl@0: while (i--) { sl@0: asm volatile ("bswapl %0" : "+r"(*key)); sl@0: key++; sl@0: } sl@0: } sl@0: #endif sl@0: sl@0: /* Force key reload from memory to the CPU microcode. sl@0: Loading EFLAGS from the stack clears EFLAGS[30] sl@0: which does the trick. */ sl@0: static inline void sl@0: padlock_reload_key(void) sl@0: { sl@0: asm volatile ("pushfl; popfl"); sl@0: } sl@0: sl@0: #ifndef OPENSSL_NO_AES sl@0: /* sl@0: * This is heuristic key context tracing. At first one sl@0: * believes that one should use atomic swap instructions, sl@0: * but it's not actually necessary. Point is that if sl@0: * padlock_saved_context was changed by another thread sl@0: * after we've read it and before we compare it with cdata, sl@0: * our key *shall* be reloaded upon thread context switch sl@0: * and we are therefore set in either case... sl@0: */ sl@0: static inline void sl@0: padlock_verify_context(struct padlock_cipher_data *cdata) sl@0: { sl@0: asm volatile ( sl@0: "pushfl\n" sl@0: " btl $30,(%%esp)\n" sl@0: " jnc 1f\n" sl@0: " cmpl %2,%1\n" sl@0: " je 1f\n" sl@0: " popfl\n" sl@0: " subl $4,%%esp\n" sl@0: "1: addl $4,%%esp\n" sl@0: " movl %2,%0" sl@0: :"+m"(padlock_saved_context) sl@0: : "r"(padlock_saved_context), "r"(cdata) : "cc"); sl@0: } sl@0: sl@0: /* Template for padlock_xcrypt_* modes */ sl@0: /* BIG FAT WARNING: sl@0: * The offsets used with 'leal' instructions sl@0: * describe items of the 'padlock_cipher_data' sl@0: * structure. sl@0: */ sl@0: #define PADLOCK_XCRYPT_ASM(name,rep_xcrypt) \ sl@0: static inline void *name(size_t cnt, \ sl@0: struct padlock_cipher_data *cdata, \ sl@0: void *out, const void *inp) \ sl@0: { void *iv; \ sl@0: asm volatile ( "pushl %%ebx\n" \ sl@0: " leal 16(%0),%%edx\n" \ sl@0: " leal 32(%0),%%ebx\n" \ sl@0: rep_xcrypt "\n" \ sl@0: " popl %%ebx" \ sl@0: : "=a"(iv), "=c"(cnt), "=D"(out), "=S"(inp) \ sl@0: : "0"(cdata), "1"(cnt), "2"(out), "3"(inp) \ sl@0: : "edx", "cc", "memory"); \ sl@0: return iv; \ sl@0: } sl@0: sl@0: /* Generate all functions with appropriate opcodes */ sl@0: PADLOCK_XCRYPT_ASM(padlock_xcrypt_ecb, ".byte 0xf3,0x0f,0xa7,0xc8") /* rep xcryptecb */ sl@0: PADLOCK_XCRYPT_ASM(padlock_xcrypt_cbc, ".byte 0xf3,0x0f,0xa7,0xd0") /* rep xcryptcbc */ sl@0: PADLOCK_XCRYPT_ASM(padlock_xcrypt_cfb, ".byte 0xf3,0x0f,0xa7,0xe0") /* rep xcryptcfb */ sl@0: PADLOCK_XCRYPT_ASM(padlock_xcrypt_ofb, ".byte 0xf3,0x0f,0xa7,0xe8") /* rep xcryptofb */ sl@0: #endif sl@0: sl@0: /* The RNG call itself */ sl@0: static inline unsigned int sl@0: padlock_xstore(void *addr, unsigned int edx_in) sl@0: { sl@0: unsigned int eax_out; sl@0: sl@0: asm volatile (".byte 0x0f,0xa7,0xc0" /* xstore */ sl@0: : "=a"(eax_out),"=m"(*(unsigned *)addr) sl@0: : "D"(addr), "d" (edx_in) sl@0: ); sl@0: sl@0: return eax_out; sl@0: } sl@0: sl@0: /* Why not inline 'rep movsd'? I failed to find information on what sl@0: * value in Direction Flag one can expect and consequently have to sl@0: * apply "better-safe-than-sorry" approach and assume "undefined." sl@0: * I could explicitly clear it and restore the original value upon sl@0: * return from padlock_aes_cipher, but it's presumably too much sl@0: * trouble for too little gain... sl@0: * sl@0: * In case you wonder 'rep xcrypt*' instructions above are *not* sl@0: * affected by the Direction Flag and pointers advance toward sl@0: * larger addresses unconditionally. sl@0: */ sl@0: static inline unsigned char * sl@0: padlock_memcpy(void *dst,const void *src,size_t n) sl@0: { sl@0: long *d=dst; sl@0: const long *s=src; sl@0: sl@0: n /= sizeof(*d); sl@0: do { *d++ = *s++; } while (--n); sl@0: sl@0: return dst; sl@0: } sl@0: sl@0: #elif defined(_MSC_VER) sl@0: /* sl@0: * Unlike GCC these are real functions. In order to minimize impact sl@0: * on performance we adhere to __fastcall calling convention in sl@0: * order to get two first arguments passed through %ecx and %edx. sl@0: * Which kind of suits very well, as instructions in question use sl@0: * both %ecx and %edx as input:-) sl@0: */ sl@0: #define REP_XCRYPT(code) \ sl@0: _asm _emit 0xf3 \ sl@0: _asm _emit 0x0f _asm _emit 0xa7 \ sl@0: _asm _emit code sl@0: sl@0: /* BIG FAT WARNING: sl@0: * The offsets used with 'lea' instructions sl@0: * describe items of the 'padlock_cipher_data' sl@0: * structure. sl@0: */ sl@0: #define PADLOCK_XCRYPT_ASM(name,code) \ sl@0: static void * __fastcall \ sl@0: name (size_t cnt, void *cdata, \ sl@0: void *outp, const void *inp) \ sl@0: { _asm mov eax,edx \ sl@0: _asm lea edx,[eax+16] \ sl@0: _asm lea ebx,[eax+32] \ sl@0: _asm mov edi,outp \ sl@0: _asm mov esi,inp \ sl@0: REP_XCRYPT(code) \ sl@0: } sl@0: sl@0: PADLOCK_XCRYPT_ASM(padlock_xcrypt_ecb,0xc8) sl@0: PADLOCK_XCRYPT_ASM(padlock_xcrypt_cbc,0xd0) sl@0: PADLOCK_XCRYPT_ASM(padlock_xcrypt_cfb,0xe0) sl@0: PADLOCK_XCRYPT_ASM(padlock_xcrypt_ofb,0xe8) sl@0: sl@0: static int __fastcall sl@0: padlock_xstore(void *outp,unsigned int code) sl@0: { _asm mov edi,ecx sl@0: _asm _emit 0x0f _asm _emit 0xa7 _asm _emit 0xc0 sl@0: } sl@0: sl@0: static void __fastcall sl@0: padlock_reload_key(void) sl@0: { _asm pushfd _asm popfd } sl@0: sl@0: static void __fastcall sl@0: padlock_verify_context(void *cdata) sl@0: { _asm { sl@0: pushfd sl@0: bt DWORD PTR[esp],30 sl@0: jnc skip sl@0: cmp ecx,padlock_saved_context sl@0: je skip sl@0: popfd sl@0: sub esp,4 sl@0: skip: add esp,4 sl@0: mov padlock_saved_context,ecx sl@0: } sl@0: } sl@0: sl@0: static int sl@0: padlock_available(void) sl@0: { _asm { sl@0: pushfd sl@0: pop eax sl@0: mov ecx,eax sl@0: xor eax,1<<21 sl@0: push eax sl@0: popfd sl@0: pushfd sl@0: pop eax sl@0: xor eax,ecx sl@0: bt eax,21 sl@0: jnc noluck sl@0: mov eax,0 sl@0: cpuid sl@0: xor eax,eax sl@0: cmp ebx,'tneC' sl@0: jne noluck sl@0: cmp edx,'Hrua' sl@0: jne noluck sl@0: cmp ecx,'slua' sl@0: jne noluck sl@0: mov eax,0xC0000000 sl@0: cpuid sl@0: mov edx,eax sl@0: xor eax,eax sl@0: cmp edx,0xC0000001 sl@0: jb noluck sl@0: mov eax,0xC0000001 sl@0: cpuid sl@0: xor eax,eax sl@0: bt edx,6 sl@0: jnc skip_a sl@0: bt edx,7 sl@0: jnc skip_a sl@0: mov padlock_use_ace,1 sl@0: inc eax sl@0: skip_a: bt edx,2 sl@0: jnc skip_r sl@0: bt edx,3 sl@0: jnc skip_r sl@0: mov padlock_use_rng,1 sl@0: inc eax sl@0: skip_r: sl@0: noluck: sl@0: } sl@0: } sl@0: sl@0: static void __fastcall sl@0: padlock_bswapl(void *key) sl@0: { _asm { sl@0: pushfd sl@0: cld sl@0: mov esi,ecx sl@0: mov edi,ecx sl@0: mov ecx,60 sl@0: up: lodsd sl@0: bswap eax sl@0: stosd sl@0: loop up sl@0: popfd sl@0: } sl@0: } sl@0: sl@0: /* MS actually specifies status of Direction Flag and compiler even sl@0: * manages to compile following as 'rep movsd' all by itself... sl@0: */ sl@0: #define padlock_memcpy(o,i,n) ((unsigned char *)memcpy((o),(i),(n)&~3U)) sl@0: #endif sl@0: sl@0: /* ===== AES encryption/decryption ===== */ sl@0: #ifndef OPENSSL_NO_AES sl@0: sl@0: #if defined(NID_aes_128_cfb128) && ! defined (NID_aes_128_cfb) sl@0: #define NID_aes_128_cfb NID_aes_128_cfb128 sl@0: #endif sl@0: sl@0: #if defined(NID_aes_128_ofb128) && ! defined (NID_aes_128_ofb) sl@0: #define NID_aes_128_ofb NID_aes_128_ofb128 sl@0: #endif sl@0: sl@0: #if defined(NID_aes_192_cfb128) && ! defined (NID_aes_192_cfb) sl@0: #define NID_aes_192_cfb NID_aes_192_cfb128 sl@0: #endif sl@0: sl@0: #if defined(NID_aes_192_ofb128) && ! defined (NID_aes_192_ofb) sl@0: #define NID_aes_192_ofb NID_aes_192_ofb128 sl@0: #endif sl@0: sl@0: #if defined(NID_aes_256_cfb128) && ! defined (NID_aes_256_cfb) sl@0: #define NID_aes_256_cfb NID_aes_256_cfb128 sl@0: #endif sl@0: sl@0: #if defined(NID_aes_256_ofb128) && ! defined (NID_aes_256_ofb) sl@0: #define NID_aes_256_ofb NID_aes_256_ofb128 sl@0: #endif sl@0: sl@0: /* List of supported ciphers. */ sl@0: static int padlock_cipher_nids[] = { sl@0: NID_aes_128_ecb, sl@0: NID_aes_128_cbc, sl@0: NID_aes_128_cfb, sl@0: NID_aes_128_ofb, sl@0: sl@0: NID_aes_192_ecb, sl@0: NID_aes_192_cbc, sl@0: #if 0 sl@0: NID_aes_192_cfb, /* FIXME: AES192/256 CFB/OFB don't work. */ sl@0: NID_aes_192_ofb, sl@0: #endif sl@0: sl@0: NID_aes_256_ecb, sl@0: NID_aes_256_cbc, sl@0: #if 0 sl@0: NID_aes_256_cfb, sl@0: NID_aes_256_ofb, sl@0: #endif sl@0: }; sl@0: static int padlock_cipher_nids_num = (sizeof(padlock_cipher_nids)/ sl@0: sizeof(padlock_cipher_nids[0])); sl@0: sl@0: /* Function prototypes ... */ sl@0: static int padlock_aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key, sl@0: const unsigned char *iv, int enc); sl@0: static int padlock_aes_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, sl@0: const unsigned char *in, size_t nbytes); sl@0: sl@0: #define NEAREST_ALIGNED(ptr) ( (unsigned char *)(ptr) + \ sl@0: ( (0x10 - ((size_t)(ptr) & 0x0F)) & 0x0F ) ) sl@0: #define ALIGNED_CIPHER_DATA(ctx) ((struct padlock_cipher_data *)\ sl@0: NEAREST_ALIGNED(ctx->cipher_data)) sl@0: sl@0: #define EVP_CIPHER_block_size_ECB AES_BLOCK_SIZE sl@0: #define EVP_CIPHER_block_size_CBC AES_BLOCK_SIZE sl@0: #define EVP_CIPHER_block_size_OFB 1 sl@0: #define EVP_CIPHER_block_size_CFB 1 sl@0: /* Declaring so many ciphers by hand would be a pain. sl@0: Instead introduce a bit of preprocessor magic :-) */ sl@0: #define DECLARE_AES_EVP(ksize,lmode,umode) \ sl@0: static const EVP_CIPHER padlock_aes_##ksize##_##lmode = { \ sl@0: NID_aes_##ksize##_##lmode, \ sl@0: EVP_CIPHER_block_size_##umode, \ sl@0: AES_KEY_SIZE_##ksize, \ sl@0: AES_BLOCK_SIZE, \ sl@0: 0 | EVP_CIPH_##umode##_MODE, \ sl@0: padlock_aes_init_key, \ sl@0: padlock_aes_cipher, \ sl@0: NULL, \ sl@0: sizeof(struct padlock_cipher_data) + 16, \ sl@0: EVP_CIPHER_set_asn1_iv, \ sl@0: EVP_CIPHER_get_asn1_iv, \ sl@0: NULL, \ sl@0: NULL \ sl@0: } sl@0: sl@0: DECLARE_AES_EVP(128,ecb,ECB); sl@0: DECLARE_AES_EVP(128,cbc,CBC); sl@0: DECLARE_AES_EVP(128,cfb,CFB); sl@0: DECLARE_AES_EVP(128,ofb,OFB); sl@0: sl@0: DECLARE_AES_EVP(192,ecb,ECB); sl@0: DECLARE_AES_EVP(192,cbc,CBC); sl@0: DECLARE_AES_EVP(192,cfb,CFB); sl@0: DECLARE_AES_EVP(192,ofb,OFB); sl@0: sl@0: DECLARE_AES_EVP(256,ecb,ECB); sl@0: DECLARE_AES_EVP(256,cbc,CBC); sl@0: DECLARE_AES_EVP(256,cfb,CFB); sl@0: DECLARE_AES_EVP(256,ofb,OFB); sl@0: sl@0: static int sl@0: padlock_ciphers (ENGINE *e, const EVP_CIPHER **cipher, const int **nids, int nid) sl@0: { sl@0: /* No specific cipher => return a list of supported nids ... */ sl@0: if (!cipher) { sl@0: *nids = padlock_cipher_nids; sl@0: return padlock_cipher_nids_num; sl@0: } sl@0: sl@0: /* ... or the requested "cipher" otherwise */ sl@0: switch (nid) { sl@0: case NID_aes_128_ecb: sl@0: *cipher = &padlock_aes_128_ecb; sl@0: break; sl@0: case NID_aes_128_cbc: sl@0: *cipher = &padlock_aes_128_cbc; sl@0: break; sl@0: case NID_aes_128_cfb: sl@0: *cipher = &padlock_aes_128_cfb; sl@0: break; sl@0: case NID_aes_128_ofb: sl@0: *cipher = &padlock_aes_128_ofb; sl@0: break; sl@0: sl@0: case NID_aes_192_ecb: sl@0: *cipher = &padlock_aes_192_ecb; sl@0: break; sl@0: case NID_aes_192_cbc: sl@0: *cipher = &padlock_aes_192_cbc; sl@0: break; sl@0: case NID_aes_192_cfb: sl@0: *cipher = &padlock_aes_192_cfb; sl@0: break; sl@0: case NID_aes_192_ofb: sl@0: *cipher = &padlock_aes_192_ofb; sl@0: break; sl@0: sl@0: case NID_aes_256_ecb: sl@0: *cipher = &padlock_aes_256_ecb; sl@0: break; sl@0: case NID_aes_256_cbc: sl@0: *cipher = &padlock_aes_256_cbc; sl@0: break; sl@0: case NID_aes_256_cfb: sl@0: *cipher = &padlock_aes_256_cfb; sl@0: break; sl@0: case NID_aes_256_ofb: sl@0: *cipher = &padlock_aes_256_ofb; sl@0: break; sl@0: sl@0: default: sl@0: /* Sorry, we don't support this NID */ sl@0: *cipher = NULL; sl@0: return 0; sl@0: } sl@0: sl@0: return 1; sl@0: } sl@0: sl@0: /* Prepare the encryption key for PadLock usage */ sl@0: static int sl@0: padlock_aes_init_key (EVP_CIPHER_CTX *ctx, const unsigned char *key, sl@0: const unsigned char *iv, int enc) sl@0: { sl@0: struct padlock_cipher_data *cdata; sl@0: int key_len = EVP_CIPHER_CTX_key_length(ctx) * 8; sl@0: sl@0: if (key==NULL) return 0; /* ERROR */ sl@0: sl@0: cdata = ALIGNED_CIPHER_DATA(ctx); sl@0: memset(cdata, 0, sizeof(struct padlock_cipher_data)); sl@0: sl@0: /* Prepare Control word. */ sl@0: if (EVP_CIPHER_CTX_mode(ctx) == EVP_CIPH_OFB_MODE) sl@0: cdata->cword.b.encdec = 0; sl@0: else sl@0: cdata->cword.b.encdec = (ctx->encrypt == 0); sl@0: cdata->cword.b.rounds = 10 + (key_len - 128) / 32; sl@0: cdata->cword.b.ksize = (key_len - 128) / 64; sl@0: sl@0: switch(key_len) { sl@0: case 128: sl@0: /* PadLock can generate an extended key for sl@0: AES128 in hardware */ sl@0: memcpy(cdata->ks.rd_key, key, AES_KEY_SIZE_128); sl@0: cdata->cword.b.keygen = 0; sl@0: break; sl@0: sl@0: case 192: sl@0: case 256: sl@0: /* Generate an extended AES key in software. sl@0: Needed for AES192/AES256 */ sl@0: /* Well, the above applies to Stepping 8 CPUs sl@0: and is listed as hardware errata. They most sl@0: likely will fix it at some point and then sl@0: a check for stepping would be due here. */ sl@0: if (EVP_CIPHER_CTX_mode(ctx) == EVP_CIPH_CFB_MODE || sl@0: EVP_CIPHER_CTX_mode(ctx) == EVP_CIPH_OFB_MODE || sl@0: enc) sl@0: AES_set_encrypt_key(key, key_len, &cdata->ks); sl@0: else sl@0: AES_set_decrypt_key(key, key_len, &cdata->ks); sl@0: #ifndef AES_ASM sl@0: /* OpenSSL C functions use byte-swapped extended key. */ sl@0: padlock_bswapl(&cdata->ks); sl@0: #endif sl@0: cdata->cword.b.keygen = 1; sl@0: break; sl@0: sl@0: default: sl@0: /* ERROR */ sl@0: return 0; sl@0: } sl@0: sl@0: /* sl@0: * This is done to cover for cases when user reuses the sl@0: * context for new key. The catch is that if we don't do sl@0: * this, padlock_eas_cipher might proceed with old key... sl@0: */ sl@0: padlock_reload_key (); sl@0: sl@0: return 1; sl@0: } sl@0: sl@0: /* sl@0: * Simplified version of padlock_aes_cipher() used when sl@0: * 1) both input and output buffers are at aligned addresses. sl@0: * or when sl@0: * 2) running on a newer CPU that doesn't require aligned buffers. sl@0: */ sl@0: static int sl@0: padlock_aes_cipher_omnivorous(EVP_CIPHER_CTX *ctx, unsigned char *out_arg, sl@0: const unsigned char *in_arg, size_t nbytes) sl@0: { sl@0: struct padlock_cipher_data *cdata; sl@0: void *iv; sl@0: sl@0: cdata = ALIGNED_CIPHER_DATA(ctx); sl@0: padlock_verify_context(cdata); sl@0: sl@0: switch (EVP_CIPHER_CTX_mode(ctx)) { sl@0: case EVP_CIPH_ECB_MODE: sl@0: padlock_xcrypt_ecb(nbytes/AES_BLOCK_SIZE, cdata, out_arg, in_arg); sl@0: break; sl@0: sl@0: case EVP_CIPH_CBC_MODE: sl@0: memcpy(cdata->iv, ctx->iv, AES_BLOCK_SIZE); sl@0: iv = padlock_xcrypt_cbc(nbytes/AES_BLOCK_SIZE, cdata, out_arg, in_arg); sl@0: memcpy(ctx->iv, iv, AES_BLOCK_SIZE); sl@0: break; sl@0: sl@0: case EVP_CIPH_CFB_MODE: sl@0: memcpy(cdata->iv, ctx->iv, AES_BLOCK_SIZE); sl@0: iv = padlock_xcrypt_cfb(nbytes/AES_BLOCK_SIZE, cdata, out_arg, in_arg); sl@0: memcpy(ctx->iv, iv, AES_BLOCK_SIZE); sl@0: break; sl@0: sl@0: case EVP_CIPH_OFB_MODE: sl@0: memcpy(cdata->iv, ctx->iv, AES_BLOCK_SIZE); sl@0: padlock_xcrypt_ofb(nbytes/AES_BLOCK_SIZE, cdata, out_arg, in_arg); sl@0: memcpy(ctx->iv, cdata->iv, AES_BLOCK_SIZE); sl@0: break; sl@0: sl@0: default: sl@0: return 0; sl@0: } sl@0: sl@0: memset(cdata->iv, 0, AES_BLOCK_SIZE); sl@0: sl@0: return 1; sl@0: } sl@0: sl@0: #ifndef PADLOCK_CHUNK sl@0: # define PADLOCK_CHUNK 512 /* Must be a power of 2 larger than 16 */ sl@0: #endif sl@0: #if PADLOCK_CHUNK<16 || PADLOCK_CHUNK&(PADLOCK_CHUNK-1) sl@0: # error "insane PADLOCK_CHUNK..." sl@0: #endif sl@0: sl@0: /* Re-align the arguments to 16-Bytes boundaries and run the sl@0: encryption function itself. This function is not AES-specific. */ sl@0: static int sl@0: padlock_aes_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg, sl@0: const unsigned char *in_arg, size_t nbytes) sl@0: { sl@0: struct padlock_cipher_data *cdata; sl@0: const void *inp; sl@0: unsigned char *out; sl@0: void *iv; sl@0: int inp_misaligned, out_misaligned, realign_in_loop; sl@0: size_t chunk, allocated=0; sl@0: sl@0: /* ctx->num is maintained in byte-oriented modes, sl@0: such as CFB and OFB... */ sl@0: if ((chunk = ctx->num)) { /* borrow chunk variable */ sl@0: unsigned char *ivp=ctx->iv; sl@0: sl@0: switch (EVP_CIPHER_CTX_mode(ctx)) { sl@0: case EVP_CIPH_CFB_MODE: sl@0: if (chunk >= AES_BLOCK_SIZE) sl@0: return 0; /* bogus value */ sl@0: sl@0: if (ctx->encrypt) sl@0: while (chunknum = chunk%AES_BLOCK_SIZE; sl@0: break; sl@0: case EVP_CIPH_OFB_MODE: sl@0: if (chunk >= AES_BLOCK_SIZE) sl@0: return 0; /* bogus value */ sl@0: sl@0: while (chunknum = chunk%AES_BLOCK_SIZE; sl@0: break; sl@0: } sl@0: } sl@0: sl@0: if (nbytes == 0) sl@0: return 1; sl@0: #if 0 sl@0: if (nbytes % AES_BLOCK_SIZE) sl@0: return 0; /* are we expected to do tail processing? */ sl@0: #else sl@0: /* nbytes is always multiple of AES_BLOCK_SIZE in ECB and CBC sl@0: modes and arbitrary value in byte-oriented modes, such as sl@0: CFB and OFB... */ sl@0: #endif sl@0: sl@0: /* VIA promises CPUs that won't require alignment in the future. sl@0: For now padlock_aes_align_required is initialized to 1 and sl@0: the condition is never met... */ sl@0: /* C7 core is capable to manage unaligned input in non-ECB[!] sl@0: mode, but performance penalties appear to be approximately sl@0: same as for software alignment below or ~3x. They promise to sl@0: improve it in the future, but for now we can just as well sl@0: pretend that it can only handle aligned input... */ sl@0: if (!padlock_aes_align_required && (nbytes%AES_BLOCK_SIZE)==0) sl@0: return padlock_aes_cipher_omnivorous(ctx, out_arg, in_arg, nbytes); sl@0: sl@0: inp_misaligned = (((size_t)in_arg) & 0x0F); sl@0: out_misaligned = (((size_t)out_arg) & 0x0F); sl@0: sl@0: /* Note that even if output is aligned and input not, sl@0: * I still prefer to loop instead of copy the whole sl@0: * input and then encrypt in one stroke. This is done sl@0: * in order to improve L1 cache utilization... */ sl@0: realign_in_loop = out_misaligned|inp_misaligned; sl@0: sl@0: if (!realign_in_loop && (nbytes%AES_BLOCK_SIZE)==0) sl@0: return padlock_aes_cipher_omnivorous(ctx, out_arg, in_arg, nbytes); sl@0: sl@0: /* this takes one "if" out of the loops */ sl@0: chunk = nbytes; sl@0: chunk %= PADLOCK_CHUNK; sl@0: if (chunk==0) chunk = PADLOCK_CHUNK; sl@0: sl@0: if (out_misaligned) { sl@0: /* optmize for small input */ sl@0: allocated = (chunkiv, ctx->iv, AES_BLOCK_SIZE); sl@0: goto cbc_shortcut; sl@0: do { sl@0: if (iv != cdata->iv) sl@0: memcpy(cdata->iv, iv, AES_BLOCK_SIZE); sl@0: chunk = PADLOCK_CHUNK; sl@0: cbc_shortcut: /* optimize for small input */ sl@0: if (inp_misaligned) sl@0: inp = padlock_memcpy(out, in_arg, chunk); sl@0: else sl@0: inp = in_arg; sl@0: in_arg += chunk; sl@0: sl@0: iv = padlock_xcrypt_cbc(chunk/AES_BLOCK_SIZE, cdata, out, inp); sl@0: sl@0: if (out_misaligned) sl@0: out_arg = padlock_memcpy(out_arg, out, chunk) + chunk; sl@0: else sl@0: out = out_arg+=chunk; sl@0: sl@0: } while (nbytes -= chunk); sl@0: memcpy(ctx->iv, iv, AES_BLOCK_SIZE); sl@0: break; sl@0: sl@0: case EVP_CIPH_CFB_MODE: sl@0: memcpy (iv = cdata->iv, ctx->iv, AES_BLOCK_SIZE); sl@0: chunk &= ~(AES_BLOCK_SIZE-1); sl@0: if (chunk) goto cfb_shortcut; sl@0: else goto cfb_skiploop; sl@0: do { sl@0: if (iv != cdata->iv) sl@0: memcpy(cdata->iv, iv, AES_BLOCK_SIZE); sl@0: chunk = PADLOCK_CHUNK; sl@0: cfb_shortcut: /* optimize for small input */ sl@0: if (inp_misaligned) sl@0: inp = padlock_memcpy(out, in_arg, chunk); sl@0: else sl@0: inp = in_arg; sl@0: in_arg += chunk; sl@0: sl@0: iv = padlock_xcrypt_cfb(chunk/AES_BLOCK_SIZE, cdata, out, inp); sl@0: sl@0: if (out_misaligned) sl@0: out_arg = padlock_memcpy(out_arg, out, chunk) + chunk; sl@0: else sl@0: out = out_arg+=chunk; sl@0: sl@0: nbytes -= chunk; sl@0: } while (nbytes >= AES_BLOCK_SIZE); sl@0: sl@0: cfb_skiploop: sl@0: if (nbytes) { sl@0: unsigned char *ivp = cdata->iv; sl@0: sl@0: if (iv != ivp) { sl@0: memcpy(ivp, iv, AES_BLOCK_SIZE); sl@0: iv = ivp; sl@0: } sl@0: ctx->num = nbytes; sl@0: if (cdata->cword.b.encdec) { sl@0: cdata->cword.b.encdec=0; sl@0: padlock_reload_key(); sl@0: padlock_xcrypt_ecb(1,cdata,ivp,ivp); sl@0: cdata->cword.b.encdec=1; sl@0: padlock_reload_key(); sl@0: while(nbytes) { sl@0: unsigned char c = *(in_arg++); sl@0: *(out_arg++) = c ^ *ivp; sl@0: *(ivp++) = c, nbytes--; sl@0: } sl@0: } sl@0: else { padlock_reload_key(); sl@0: padlock_xcrypt_ecb(1,cdata,ivp,ivp); sl@0: padlock_reload_key(); sl@0: while (nbytes) { sl@0: *ivp = *(out_arg++) = *(in_arg++) ^ *ivp; sl@0: ivp++, nbytes--; sl@0: } sl@0: } sl@0: } sl@0: memcpy(ctx->iv, iv, AES_BLOCK_SIZE); sl@0: break; sl@0: sl@0: case EVP_CIPH_OFB_MODE: sl@0: memcpy(cdata->iv, ctx->iv, AES_BLOCK_SIZE); sl@0: chunk &= ~(AES_BLOCK_SIZE-1); sl@0: if (chunk) do { sl@0: if (inp_misaligned) sl@0: inp = padlock_memcpy(out, in_arg, chunk); sl@0: else sl@0: inp = in_arg; sl@0: in_arg += chunk; sl@0: sl@0: padlock_xcrypt_ofb(chunk/AES_BLOCK_SIZE, cdata, out, inp); sl@0: sl@0: if (out_misaligned) sl@0: out_arg = padlock_memcpy(out_arg, out, chunk) + chunk; sl@0: else sl@0: out = out_arg+=chunk; sl@0: sl@0: nbytes -= chunk; sl@0: chunk = PADLOCK_CHUNK; sl@0: } while (nbytes >= AES_BLOCK_SIZE); sl@0: sl@0: if (nbytes) { sl@0: unsigned char *ivp = cdata->iv; sl@0: sl@0: ctx->num = nbytes; sl@0: padlock_reload_key(); /* empirically found */ sl@0: padlock_xcrypt_ecb(1,cdata,ivp,ivp); sl@0: padlock_reload_key(); /* empirically found */ sl@0: while (nbytes) { sl@0: *(out_arg++) = *(in_arg++) ^ *ivp; sl@0: ivp++, nbytes--; sl@0: } sl@0: } sl@0: memcpy(ctx->iv, cdata->iv, AES_BLOCK_SIZE); sl@0: break; sl@0: sl@0: default: sl@0: return 0; sl@0: } sl@0: sl@0: /* Clean the realign buffer if it was used */ sl@0: if (out_misaligned) { sl@0: volatile unsigned long *p=(void *)out; sl@0: size_t n = allocated/sizeof(*p); sl@0: while (n--) *p++=0; sl@0: } sl@0: sl@0: memset(cdata->iv, 0, AES_BLOCK_SIZE); sl@0: sl@0: return 1; sl@0: } sl@0: sl@0: #endif /* OPENSSL_NO_AES */ sl@0: sl@0: /* ===== Random Number Generator ===== */ sl@0: /* sl@0: * This code is not engaged. The reason is that it does not comply sl@0: * with recommendations for VIA RNG usage for secure applications sl@0: * (posted at http://www.via.com.tw/en/viac3/c3.jsp) nor does it sl@0: * provide meaningful error control... sl@0: */ sl@0: /* Wrapper that provides an interface between the API and sl@0: the raw PadLock RNG */ sl@0: static int sl@0: padlock_rand_bytes(unsigned char *output, int count) sl@0: { sl@0: unsigned int eax, buf; sl@0: sl@0: while (count >= 8) { sl@0: eax = padlock_xstore(output, 0); sl@0: if (!(eax&(1<<6))) return 0; /* RNG disabled */ sl@0: /* this ---vv--- covers DC bias, Raw Bits and String Filter */ sl@0: if (eax&(0x1F<<10)) return 0; sl@0: if ((eax&0x1F)==0) continue; /* no data, retry... */ sl@0: if ((eax&0x1F)!=8) return 0; /* fatal failure... */ sl@0: output += 8; sl@0: count -= 8; sl@0: } sl@0: while (count > 0) { sl@0: eax = padlock_xstore(&buf, 3); sl@0: if (!(eax&(1<<6))) return 0; /* RNG disabled */ sl@0: /* this ---vv--- covers DC bias, Raw Bits and String Filter */ sl@0: if (eax&(0x1F<<10)) return 0; sl@0: if ((eax&0x1F)==0) continue; /* no data, retry... */ sl@0: if ((eax&0x1F)!=1) return 0; /* fatal failure... */ sl@0: *output++ = (unsigned char)buf; sl@0: count--; sl@0: } sl@0: *(volatile unsigned int *)&buf=0; sl@0: sl@0: return 1; sl@0: } sl@0: sl@0: /* Dummy but necessary function */ sl@0: static int sl@0: padlock_rand_status(void) sl@0: { sl@0: return 1; sl@0: } sl@0: sl@0: /* Prepare structure for registration */ sl@0: static RAND_METHOD padlock_rand = { sl@0: NULL, /* seed */ sl@0: padlock_rand_bytes, /* bytes */ sl@0: NULL, /* cleanup */ sl@0: NULL, /* add */ sl@0: padlock_rand_bytes, /* pseudorand */ sl@0: padlock_rand_status, /* rand status */ sl@0: }; sl@0: sl@0: #endif /* COMPILE_HW_PADLOCK */ sl@0: sl@0: #endif /* !OPENSSL_NO_HW_PADLOCK */ sl@0: #endif /* !OPENSSL_NO_HW */