File openssl-0.9.7d-padlock-engine.diff of Package compat-openssl097g
Index: crypto/engine/hw_padlock.c
===================================================================
--- /dev/null
+++ crypto/engine/hw_padlock.c
@@ -0,0 +1,734 @@
+/*
+ * Support for VIA PadLock Advanced Cryptography Engine (ACE)
+ * Written by Michal Ludvig <michal@logix.cz>
+ * http://www.logix.cz/michal
+ *
+ * Date: May 13, 2004
+ */
+
+/* ====================================================================
+ * Copyright (c) 1999-2001 The OpenSSL Project. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ * software must display the following acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ * endorse or promote products derived from this software without
+ * prior written permission. For written permission, please contact
+ * licensing@OpenSSL.org.
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ * nor may "OpenSSL" appear in their names without prior written
+ * permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ * acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ *
+ * This product includes cryptographic software written by Eric Young
+ * (eay@cryptsoft.com). This product includes software written by Tim
+ * Hudson (tjh@cryptsoft.com).
+ *
+ */
+
+
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+
+#include <netinet/in.h> /* we need htonl() */
+
+#include <openssl/crypto.h>
+#include <openssl/dso.h>
+#include <openssl/engine.h>
+#include <openssl/evp.h>
+#include <openssl/aes.h>
+
+#ifndef OPENSSL_NO_HW
+#ifndef OPENSSL_NO_HW_PADLOCK
+
+/* Attempt to have a single source for both 0.9.7 and 0.9.8 :-) */
+#if (OPENSSL_VERSION_NUMBER >= 0x00908000L)
+# ifndef OPENSSL_NO_DYNAMIC_ENGINE
+# define DYNAMIC_ENGINE
+# endif
+#elif (OPENSSL_VERSION_NUMBER >= 0x00907000L)
+# ifdef ENGINE_DYNAMIC_SUPPORT
+# define DYNAMIC_ENGINE
+# endif
+#else
+# error "Only OpenSSL >= 0.9.7 is supported"
+#endif
+
+/* VIA PadLock AES is available *ONLY* on some x86 CPUs.
+ Not only that it doesn't exist elsewhere, but it
+ even can't be compiled on other platforms!
+
+ In addition, because of the heavy use of inline assembler,
+ you must use GNU GCC for now. This is not a "technology
+ limitation", I simply didn't have a chance to test other
+ compilers and instead chosen the safe way :-) */
+#if defined(__i386__) && !defined(__amd64__) && defined(__GNUC__)
+#define COMPILE_HW_PADLOCK
+#else
+#undef COMPILE_HW_PADLOCK
+#endif
+
+static ENGINE *ENGINE_padlock (void);
+
+void ENGINE_load_padlock (void)
+{
+/* On non-x86 CPUs it just returns. */
+#ifdef COMPILE_HW_PADLOCK
+ ENGINE *toadd = ENGINE_padlock ();
+ if (!toadd) return;
+ ENGINE_add (toadd);
+ ENGINE_free (toadd);
+ ERR_clear_error ();
+#endif
+}
+
+#ifdef COMPILE_HW_PADLOCK
+/* Function for ENGINE detection and control */
+static int padlock_available(void);
+static int padlock_init(ENGINE *e);
+
+/* RNG Stuff */
+static RAND_METHOD padlock_rand;
+
+/* Cipher Stuff */
+static int padlock_ciphers(ENGINE *e, const EVP_CIPHER **cipher, const int **nids, int nid);
+
+/* Engine names */
+static const char *padlock_id = "padlock";
+static char padlock_name[100];
+
+/* Available features */
+static int padlock_use_ace = 0; /* Advanced Cryptography Engine */
+static int padlock_use_rng = 0; /* Random Number Generator */
+
+/* ===== Engine "management" functions ===== */
+
+/* Prepare the ENGINE structure for registration */
+static int
+padlock_bind_helper(ENGINE *e)
+{
+ /* Check available features */
+ padlock_available();
+
+ /* Generate a nice engine name with available features */
+ snprintf(padlock_name, sizeof(padlock_name), "VIA PadLock (%s, %s)",
+ padlock_use_rng ? "RNG" : "no-RNG",
+ padlock_use_ace ? "ACE" : "no-ACE");
+
+ /* Register everything or return with an error */
+ if (!ENGINE_set_id(e, padlock_id) ||
+ !ENGINE_set_name(e, padlock_name) ||
+
+ !ENGINE_set_init_function(e, padlock_init) ||
+
+ (padlock_use_ace && !ENGINE_set_ciphers (e, padlock_ciphers)) ||
+ (padlock_use_rng && !ENGINE_set_RAND (e, &padlock_rand))) {
+ return 0;
+ }
+
+ /* Everything looks good */
+ return 1;
+}
+
+/* Constructor */
+static ENGINE *
+ENGINE_padlock(void)
+{
+ ENGINE *eng = ENGINE_new();
+
+ if (!eng) {
+ return NULL;
+ }
+
+ if (!padlock_bind_helper(eng)) {
+ ENGINE_free(eng);
+ return NULL;
+ }
+
+ return eng;
+}
+
+/* Helper function - check if a CPUID instruction
+ is available on this CPU */
+static int
+padlock_insn_cpuid_available(void)
+{
+ uint32_t result = -1;
+
+ /* TODO: handle the "red-zone" once this
+ module is enabled on AMD64 */
+ /* We're checking if the bit #21 of EFLAGS
+ can be toggled. If yes = CPUID is available. */
+ asm volatile (
+ "pushf\n"
+ "popl %%eax\n"
+ "xorl $0x200000, %%eax\n"
+ "movl %%eax, %%ecx\n"
+ "andl $0x200000, %%ecx\n"
+ "pushl %%eax\n"
+ "popf\n"
+ "pushf\n"
+ "popl %%eax\n"
+ "andl $0x200000, %%eax\n"
+ "xorl %%eax, %%ecx\n"
+ "movl %%ecx, %0\n"
+ : "=r" (result) : : "eax", "ecx");
+
+ return (result == 0);
+}
+
+static int
+padlock_available(void)
+{
+ uint32_t eax, edx;
+
+ /* First check if the CPUID instruction is available at all... */
+ if (! padlock_insn_cpuid_available())
+ return 0;
+
+ /* Check for Centaur Extended Feature Flags presence */
+ eax = 0xC0000000;
+ asm volatile ("pushl %%ebx; cpuid; popl %%ebx" : "+a"(eax) : : "ecx", "edx");
+ if (eax < 0xC0000001)
+ return 0;
+
+ /* Read the Centaur Extended Feature Flags */
+ eax = 0xC0000001;
+ asm volatile ("pushl %%ebx; cpuid; popl %%ebx" : "+a"(eax), "=d"(edx) : : "ecx");
+
+ /* Fill up some flags */
+ padlock_use_ace = ((edx & (0x3<<6)) == (0x3<<6));
+ padlock_use_rng = ((edx & (0x3<<2)) == (0x3<<2));
+
+ return padlock_use_ace + padlock_use_rng;
+}
+
+/* Check availability of the engine */
+static int
+padlock_init(ENGINE *e)
+{
+ return (padlock_use_rng || padlock_use_ace);
+}
+
+/* This stuff is needed if this ENGINE is being compiled into a self-contained
+ * shared-library.
+ */
+#ifdef DYNAMIC_ENGINE
+static int
+padlock_bind_fn(ENGINE *e, const char *id)
+{
+ if (id && (strcmp(id, padlock_id) != 0)) {
+ return 0;
+ }
+
+ if (!padlock_bind_helper(e)) {
+ return 0;
+ }
+
+ return 1;
+}
+
+IMPLEMENT_DYNAMIC_CHECK_FN ();
+IMPLEMENT_DYNAMIC_BIND_FN (padlock_bind_fn);
+#endif /* DYNAMIC_ENGINE */
+
+/* ===== Here comes the "real" engine ===== */
+
+#if defined(NID_aes_128_cfb128) && ! defined (NID_aes_128_cfb)
+#define NID_aes_128_cfb NID_aes_128_cfb128
+#endif
+
+#if defined(NID_aes_128_ofb128) && ! defined (NID_aes_128_ofb)
+#define NID_aes_128_ofb NID_aes_128_ofb128
+#endif
+
+#if defined(NID_aes_192_cfb128) && ! defined (NID_aes_192_cfb)
+#define NID_aes_192_cfb NID_aes_192_cfb128
+#endif
+
+#if defined(NID_aes_192_ofb128) && ! defined (NID_aes_192_ofb)
+#define NID_aes_192_ofb NID_aes_192_ofb128
+#endif
+
+#if defined(NID_aes_256_cfb128) && ! defined (NID_aes_256_cfb)
+#define NID_aes_256_cfb NID_aes_256_cfb128
+#endif
+
+#if defined(NID_aes_256_ofb128) && ! defined (NID_aes_256_ofb)
+#define NID_aes_256_ofb NID_aes_256_ofb128
+#endif
+
+/* List of supported ciphers. */
+static int padlock_cipher_nids[] = {
+ NID_aes_128_ecb,
+ NID_aes_128_cbc,
+ NID_aes_128_cfb,
+ NID_aes_128_ofb,
+
+ NID_aes_192_ecb,
+ NID_aes_192_cbc,
+// NID_aes_192_cfb, /* FIXME: AES192/256 CFB/OFB don't work. */
+// NID_aes_192_ofb,
+
+ NID_aes_256_ecb,
+ NID_aes_256_cbc,
+// NID_aes_256_cfb,
+// NID_aes_256_ofb,
+};
+static int padlock_cipher_nids_num = (sizeof(padlock_cipher_nids)/
+ sizeof(padlock_cipher_nids[0]));
+
+/* Function prototypes ... */
+static int padlock_aes_init_key_128(EVP_CIPHER_CTX *ctx, const unsigned char *key,
+ const unsigned char *iv, int enc);
+static int padlock_aes_init_key_192(EVP_CIPHER_CTX *ctx, const unsigned char *key,
+ const unsigned char *iv, int enc);
+static int padlock_aes_init_key_256(EVP_CIPHER_CTX *ctx, const unsigned char *key,
+ const unsigned char *iv, int enc);
+static int padlock_aes_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
+ const unsigned char *in, unsigned int inl);
+
+/* Some AES-related constants */
+#define AES_BLOCK_SIZE 16
+#define AES_KEY_SIZE_128 16
+#define AES_KEY_SIZE_192 24
+#define AES_KEY_SIZE_256 32
+
+#define AES_KEY_WORDS (4 * (AES_MAXNR + 1))
+#define AES_KEY_BYTES (AES_KEY_WORDS * 4)
+
+/* Here we store the plain key for AES128
+ and the extended key for AES192/AES256 */
+struct padlock_aes_key
+{
+ uint32_t aes_key[AES_KEY_WORDS];
+ int extended;
+};
+
+/* Declaring so many ciphers by hand would be a pain.
+ Instead introduce a bit of preprocessor magic :-) */
+#define DECLARE_AES_EVP(ksize,lmode,umode) \
+static const EVP_CIPHER padlock_aes_##ksize##_##lmode = { \
+ NID_aes_##ksize##_##lmode, \
+ AES_BLOCK_SIZE, \
+ AES_KEY_SIZE_##ksize, \
+ AES_BLOCK_SIZE, \
+ 0 | EVP_CIPH_##umode##_MODE, \
+ padlock_aes_init_key_##ksize, \
+ padlock_aes_cipher, \
+ NULL, \
+ sizeof(struct padlock_aes_key), \
+ EVP_CIPHER_set_asn1_iv, \
+ EVP_CIPHER_get_asn1_iv, \
+ NULL, \
+ NULL \
+}
+
+DECLARE_AES_EVP(128,ecb,ECB);
+DECLARE_AES_EVP(128,cbc,CBC);
+DECLARE_AES_EVP(128,cfb,CFB);
+DECLARE_AES_EVP(128,ofb,OFB);
+
+DECLARE_AES_EVP(192,ecb,ECB);
+DECLARE_AES_EVP(192,cbc,CBC);
+DECLARE_AES_EVP(192,cfb,CFB);
+DECLARE_AES_EVP(192,ofb,OFB);
+
+DECLARE_AES_EVP(256,ecb,ECB);
+DECLARE_AES_EVP(256,cbc,CBC);
+DECLARE_AES_EVP(256,cfb,CFB);
+DECLARE_AES_EVP(256,ofb,OFB);
+
+static int
+padlock_ciphers (ENGINE *e, const EVP_CIPHER **cipher, const int **nids, int nid)
+{
+ /* No specific cipher => return a list of supported nids ... */
+ if (!cipher) {
+ *nids = padlock_cipher_nids;
+ return padlock_cipher_nids_num;
+ }
+
+ /* ... or the requested "cipher" otherwise */
+ switch (nid) {
+ case NID_aes_128_ecb:
+ *cipher = &padlock_aes_128_ecb;
+ break;
+ case NID_aes_128_cbc:
+ *cipher = &padlock_aes_128_cbc;
+ break;
+ case NID_aes_128_cfb:
+ *cipher = &padlock_aes_128_cfb;
+ break;
+ case NID_aes_128_ofb:
+ *cipher = &padlock_aes_128_ofb;
+ break;
+
+ case NID_aes_192_ecb:
+ *cipher = &padlock_aes_192_ecb;
+ break;
+ case NID_aes_192_cbc:
+ *cipher = &padlock_aes_192_cbc;
+ break;
+ case NID_aes_192_cfb:
+ *cipher = &padlock_aes_192_cfb;
+ break;
+ case NID_aes_192_ofb:
+ *cipher = &padlock_aes_192_ofb;
+ break;
+
+ case NID_aes_256_ecb:
+ *cipher = &padlock_aes_256_ecb;
+ break;
+ case NID_aes_256_cbc:
+ *cipher = &padlock_aes_256_cbc;
+ break;
+ case NID_aes_256_cfb:
+ *cipher = &padlock_aes_256_cfb;
+ break;
+ case NID_aes_256_ofb:
+ *cipher = &padlock_aes_256_ofb;
+ break;
+
+ default:
+ /* Sorry, we don't support this NID */
+ *cipher = NULL;
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Generate an extended AES key in software. Needed for AES192/AES256 */
+static int
+padlock_aes_init_key (EVP_CIPHER_CTX *ctx, const unsigned char *key,
+ const unsigned char *iv, int enc,
+ int key_len)
+{
+ AES_KEY ks;
+ struct padlock_aes_key *tmp_aes_key = NULL;
+ int i;
+
+ tmp_aes_key = (struct padlock_aes_key *) (ctx->cipher_data);
+ memset(tmp_aes_key, 0, sizeof(struct padlock_aes_key));
+ if (key) {
+ if (enc)
+ AES_set_encrypt_key(key, key_len, &ks);
+ else
+ AES_set_decrypt_key(key, key_len, &ks);
+
+ /* OpenSSL internal functions use byte-swapped extended key. */
+ for (i = 0; i < AES_KEY_WORDS; i++)
+ tmp_aes_key->aes_key[i] = htonl(ks.rd_key[i]);
+
+ tmp_aes_key->extended = 1;
+ }
+
+ return 1;
+}
+
+/* PadLock can generate an extended key for AES128 in hardware */
+static int
+padlock_aes_init_key_128(EVP_CIPHER_CTX *ctx, const unsigned char *key,
+ const unsigned char *iv, int enc)
+{
+ struct padlock_aes_key *tmp_aes_key = NULL;
+
+ tmp_aes_key = (struct padlock_aes_key *) (ctx->cipher_data);
+ memset(tmp_aes_key, 0, sizeof(struct padlock_aes_key));
+ if (key) {
+ memcpy (tmp_aes_key->aes_key, key, AES_KEY_SIZE_128);
+ tmp_aes_key->extended = 0;
+ }
+
+ return 1;
+}
+
+static int
+padlock_aes_init_key_192(EVP_CIPHER_CTX *ctx, const unsigned char *key,
+ const unsigned char *iv, int enc)
+{
+ return padlock_aes_init_key(ctx, key, iv, enc, 192);
+}
+
+static int
+padlock_aes_init_key_256(EVP_CIPHER_CTX *ctx, const unsigned char *key,
+ const unsigned char *iv, int enc)
+{
+ return padlock_aes_init_key(ctx, key, iv, enc, 256);
+}
+
+/* Data for the VIA PadLock *must* be aligned on 16-Bytes boundaries! */
+static void *
+padlock_aligned_malloc(size_t size, size_t alignment, void **index)
+{
+ char *ptr;
+
+ ptr = malloc(size + alignment);
+ *index = ptr;
+ if (alignment > 1 && ((long)ptr & (alignment - 1))) {
+ ptr += alignment - ((long)ptr & (alignment - 1));
+ }
+
+ return ptr;
+}
+
+/* Control word. */
+union cword {
+ uint32_t cword[4];
+ struct {
+ int rounds:4;
+ int algo:3;
+ int keygen:1;
+ int interm:1;
+ int encdec:1;
+ int ksize:2;
+ } b;
+};
+
+/* Template for all modes */
+#define PADLOCK_XCRYPT_ASM(name,opcode) \
+static inline void name(uint8_t *input, uint8_t *output, uint8_t *key, \
+ uint8_t **iv, void *control_word, uint32_t count) \
+{ \
+ asm volatile ("pushfl; popfl\n" \
+ "movl %%ebx, -4(%%esp)\n" \
+ "movl %%eax, %%ebx\n" \
+ "movl %0, %%eax\n" \
+ "movl (%%eax), %%eax\n" \
+ opcode "\n" \
+ "movl %0, %%ebx\n" \
+ "movl %%eax, (%%ebx)\n" \
+ "movl -4(%%esp), %%ebx\n" \
+ : "+m"(iv), "+S"(input), "+D"(output) \
+ : "a"(key), "c"(count), "d"(control_word)); \
+}
+
+/* Generate all functions with appropriate opcodes */
+PADLOCK_XCRYPT_ASM(padlock_xcrypt_ecb, ".byte 0xf3,0x0f,0xa7,0xc8"); /* rep xcryptecb */
+PADLOCK_XCRYPT_ASM(padlock_xcrypt_cbc, ".byte 0xf3,0x0f,0xa7,0xd0"); /* rep xcryptcbc */
+PADLOCK_XCRYPT_ASM(padlock_xcrypt_cfb, ".byte 0xf3,0x0f,0xa7,0xe0"); /* rep xcryptcfb */
+PADLOCK_XCRYPT_ASM(padlock_xcrypt_ofb, ".byte 0xf3,0x0f,0xa7,0xe8"); /* rep xcryptofb */
+
+/* Re-align the arguments to 16-Bytes boundaries and run the
+ encryption function itself. This function is not AES-specific. */
+static inline void
+padlock_aligner(uint8_t *out_arg, const uint8_t *in_arg, uint8_t *iv_arg,
+ void *key, union cword *cword, size_t nbytes,
+ size_t blocksize, int mode)
+{
+ /* Don't blindly modify this structure - the items must
+ be 16-Bytes aligned! */
+ struct padlock_xcrypt_data {
+ uint8_t iv[2*blocksize]; /* Initialization vector */
+ };
+
+ uint8_t *in, *out, *iv;
+ void *index = NULL;
+ char bigbuf[sizeof(struct padlock_xcrypt_data) + 16];
+ struct padlock_xcrypt_data *data;
+
+ memset(bigbuf, 0, sizeof (bigbuf));
+
+ /* Place 'data' at the first 16-Bytes aligned address in 'bigbuf' */
+ if (((long)bigbuf) & 0x0F)
+ data = (void*)(bigbuf + 16 - ((long)bigbuf & 0x0F));
+ else
+ data = (void*)bigbuf;
+
+ /* Always make a local copy of IV - xcrypt may change it! */
+ iv = data->iv;
+ if (iv_arg)
+ memcpy(iv, iv_arg, blocksize);
+
+ /* Align 'in_arg' */
+ if (((long)in_arg) & 0x0F) {
+ in = padlock_aligned_malloc(nbytes, 16, &index);
+ memcpy(in, in_arg, nbytes);
+ }
+ else
+ in = (uint8_t*)in_arg;
+
+ /* Align 'out_arg' */
+ if (((long)out_arg) & 0x0F) {
+ if (index)
+ out = in; /* xcrypt can work "in place" */
+ else
+ out = padlock_aligned_malloc(nbytes, 16, &index);
+ }
+ else
+ out = out_arg;
+
+ /* Run xcrypt for a requested mode */
+ switch (mode) {
+ case EVP_CIPH_ECB_MODE:
+ padlock_xcrypt_ecb(in, out, key, &iv, cword, nbytes/blocksize);
+ break;
+
+ case EVP_CIPH_CBC_MODE:
+ padlock_xcrypt_cbc(in, out, key, &iv, cword, nbytes/blocksize);
+ break;
+
+ case EVP_CIPH_CFB_MODE:
+ padlock_xcrypt_cfb(in, out, key, &iv, cword, nbytes/blocksize);
+ break;
+
+ case EVP_CIPH_OFB_MODE:
+ padlock_xcrypt_ofb(in, out, key, &iv, cword, nbytes/blocksize);
+ break;
+
+ default:
+ break;
+ }
+
+ /* Copy the 16-Byte aligned output to the caller's buffer. */
+ if (out != out_arg)
+ memcpy(out_arg, out, nbytes);
+
+ /* Save modified IV */
+ memcpy (iv_arg, iv, blocksize);
+
+ if (index)
+ free(index);
+
+}
+
+/* Entry point specific for AES cipher */
+static int
+padlock_aes_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
+ const unsigned char *in, unsigned int in_len)
+{
+ char bigbuf[sizeof(union cword) + 16];
+ union cword *cword;
+ void *key_arg, *key, *iv, *index_key=NULL;
+ int mode = EVP_CIPHER_CTX_mode(ctx);
+ int key_len = EVP_CIPHER_CTX_key_length(ctx);
+ struct padlock_aes_key *aes_key = (struct padlock_aes_key *) (ctx->cipher_data);
+
+ /* Place 'data' at the first 16-Bytes aligned address in 'bigbuf'. */
+ if (((long)bigbuf) & 0x0F)
+ cword = (void*)(bigbuf + 16 - ((long)bigbuf & 0x0F));
+ else
+ cword = (void*)bigbuf;
+
+ /* Prepare Control word. */
+ memset (cword, 0, sizeof(union cword));
+ cword->b.encdec = (ctx->encrypt == 0);
+ cword->b.rounds = 10 + (key_len - 16) / 4;
+ cword->b.ksize = (key_len - 16) / 8;
+ cword->b.keygen = aes_key->extended;
+
+ key_arg = aes_key->aes_key;
+ if ((long)key_arg & 0x0f) {
+ key = padlock_aligned_malloc(AES_KEY_BYTES, 16, &index_key);
+ memcpy (key, key_arg, AES_KEY_BYTES);
+ } else
+ key = key_arg;
+
+ iv = ctx->iv;
+
+ padlock_aligner(out, in, iv, key, cword,
+ in_len, AES_BLOCK_SIZE, mode);
+
+ if (index_key)
+ free(index_key);
+
+ return 1;
+}
+
+/* ===== Random Number Generator ===== */
+
+/* The RNG call itself */
+static inline uint32_t
+padlock_xstore(unsigned char **output_addr, uint32_t edx_in,
+ uint32_t count)
+{
+ uint32_t eax_out;
+
+ asm volatile (".byte 0xf3,0x0f,0xa7,0xc0" /* rep xstore */
+ : "+D"(*output_addr), "=a"(eax_out) /* output */
+ : "d" (edx_in), "c"(count) /* input */
+ );
+
+ return eax_out;
+}
+
+/* Wrapper that provides an interface between the API and
+ the raw PadLock RNG */
+static int
+padlock_rand_bytes(unsigned char *output, int count)
+{
+ unsigned char *pptr = output;
+ unsigned char buf[8];
+ int orig_count = count;
+
+ /* xstore always stores at least 4 bytes - we must avoid
+ overwriting of innocent data! */
+ if (count > 4) {
+ /* 3 ... magic constant, see PadLock RNG docs */
+ padlock_xstore(&pptr, 3, count - 4);
+ count = 4;
+ }
+ if (count > 0) {
+ pptr = buf;
+ padlock_xstore(&pptr, 3, count);
+ memcpy(output + orig_count - count, buf, count);
+ }
+ return 1;
+}
+
+/* Dummy but necessary function */
+static int
+padlock_rand_status(void)
+{
+ return 1;
+}
+
+/* Prepare structure for registration */
+static RAND_METHOD padlock_rand = {
+ NULL, /* seed */
+ padlock_rand_bytes, /* bytes */
+ NULL, /* cleanup */
+ NULL, /* add */
+ padlock_rand_bytes, /* pseudorand */
+ padlock_rand_status, /* rand status */
+};
+
+#endif /* COMPILE_HW_PADLOCK */
+
+#endif /* !OPENSSL_NO_HW_PADLOCK */
+#endif /* !OPENSSL_NO_HW */