zte's code,first commit

Change-Id: I9a04da59e459a9bc0d67f101f700d9d7dc8d681b
diff --git a/ap/lib/libssl/openssl-1.1.1o/crypto/aes/aes_cbc.c b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/aes_cbc.c
new file mode 100644
index 0000000..342841f
--- /dev/null
+++ b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/aes_cbc.c
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2002-2016 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the OpenSSL license (the "License").  You may not use
+ * this file except in compliance with the License.  You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include <openssl/aes.h>
+#include <openssl/modes.h>
+
+void AES_cbc_encrypt(const unsigned char *in, unsigned char *out,
+                     size_t len, const AES_KEY *key,
+                     unsigned char *ivec, const int enc)
+{
+
+    if (enc)
+        CRYPTO_cbc128_encrypt(in, out, len, key, ivec,
+                              (block128_f) AES_encrypt);
+    else
+        CRYPTO_cbc128_decrypt(in, out, len, key, ivec,
+                              (block128_f) AES_decrypt);
+}
diff --git a/ap/lib/libssl/openssl-1.1.1o/crypto/aes/aes_cfb.c b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/aes_cfb.c
new file mode 100644
index 0000000..f010e3c
--- /dev/null
+++ b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/aes_cfb.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2002-2016 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the OpenSSL license (the "License").  You may not use
+ * this file except in compliance with the License.  You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include <openssl/aes.h>
+#include <openssl/modes.h>
+
+/*
+ * The input and output encrypted as though 128bit cfb mode is being used.
+ * The extra state information to record how much of the 128bit block we have
+ * used is contained in *num;
+ */
+
+void AES_cfb128_encrypt(const unsigned char *in, unsigned char *out,
+                        size_t length, const AES_KEY *key,
+                        unsigned char *ivec, int *num, const int enc)
+{
+
+    CRYPTO_cfb128_encrypt(in, out, length, key, ivec, num, enc,
+                          (block128_f) AES_encrypt);
+}
+
+/* N.B. This expects the input to be packed, MS bit first */
+void AES_cfb1_encrypt(const unsigned char *in, unsigned char *out,
+                      size_t length, const AES_KEY *key,
+                      unsigned char *ivec, int *num, const int enc)
+{
+    CRYPTO_cfb128_1_encrypt(in, out, length, key, ivec, num, enc,
+                            (block128_f) AES_encrypt);
+}
+
+void AES_cfb8_encrypt(const unsigned char *in, unsigned char *out,
+                      size_t length, const AES_KEY *key,
+                      unsigned char *ivec, int *num, const int enc)
+{
+    CRYPTO_cfb128_8_encrypt(in, out, length, key, ivec, num, enc,
+                            (block128_f) AES_encrypt);
+}
diff --git a/ap/lib/libssl/openssl-1.1.1o/crypto/aes/aes_core.c b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/aes_core.c
new file mode 100644
index 0000000..ad00c72
--- /dev/null
+++ b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/aes_core.c
@@ -0,0 +1,1997 @@
+/*
+ * Copyright 2002-2020 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the OpenSSL license (the "License").  You may not use
+ * this file except in compliance with the License.  You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+/**
+ * rijndael-alg-fst.c
+ *
+ * @version 3.0 (December 2000)
+ *
+ * Optimised ANSI C code for the Rijndael cipher (now AES)
+ *
+ * @author Vincent Rijmen
+ * @author Antoon Bosselaers
+ * @author Paulo Barreto
+ *
+ * This code is hereby placed in the public domain.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ''AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* Note: rewritten a little bit to provide error control and an OpenSSL-
+   compatible API */
+
+#include <assert.h>
+
+#include <stdlib.h>
+#include <openssl/crypto.h>
+#include <openssl/aes.h>
+#include "aes_local.h"
+
+#if defined(OPENSSL_AES_CONST_TIME) && !defined(AES_ASM)
+typedef union {
+    unsigned char b[8];
+    u32 w[2];
+    u64 d;
+} uni;
+
+/*
+ * Compute w := (w * x) mod (x^8 + x^4 + x^3 + x^1 + 1)
+ * Therefore the name "xtime".
+ */
+static void XtimeWord(u32 *w)
+{
+    u32 a, b;
+
+    a = *w;
+    b = a & 0x80808080u;
+    a ^= b;
+    b -= b >> 7;
+    b &= 0x1B1B1B1Bu;
+    b ^= a << 1;
+    *w = b;
+}
+
+static void XtimeLong(u64 *w)
+{
+    u64 a, b;
+
+    a = *w;
+    b = a & 0x8080808080808080uLL;
+    a ^= b;
+    b -= b >> 7;
+    b &= 0x1B1B1B1B1B1B1B1BuLL;
+    b ^= a << 1;
+    *w = b;
+}
+
+/*
+ * This computes w := S * w ^ -1 + c, where c = {01100011}.
+ * Instead of using GF(2^8) mod (x^8+x^4+x^3+x+1} we do the inversion
+ * in GF(GF(GF(2^2)^2)^2) mod (X^2+X+8)
+ * and GF(GF(2^2)^2) mod (X^2+X+2)
+ * and GF(2^2) mod (X^2+X+1)
+ * The first part of the algorithm below transfers the coordinates
+ * {0x01,0x02,0x04,0x08,0x10,0x20,0x40,0x80} =>
+ * {1,Y,Y^2,Y^3,Y^4,Y^5,Y^6,Y^7} with Y=0x41:
+ * {0x01,0x41,0x66,0x6c,0x56,0x9a,0x58,0xc4}
+ * The last part undoes the coordinate transfer and the final affine
+ * transformation S:
+ * b[i] = b[i] + b[(i+4)%8] + b[(i+5)%8] + b[(i+6)%8] + b[(i+7)%8] + c[i]
+ * in one step.
+ * The multiplication in GF(2^2^2^2) is done in ordinary coords:
+ * A = (a0*1 + a1*x^4)
+ * B = (b0*1 + b1*x^4)
+ * AB = ((a0*b0 + 8*a1*b1)*1 + (a1*b0 + (a0+a1)*b1)*x^4)
+ * When A = (a0,a1) is given we want to solve AB = 1:
+ * (a) 1 = a0*b0 + 8*a1*b1
+ * (b) 0 = a1*b0 + (a0+a1)*b1
+ * => multiply (a) by a1 and (b) by a0
+ * (c) a1 = a1*a0*b0 + (8*a1*a1)*b1
+ * (d) 0 = a1*a0*b0 + (a0*a0+a1*a0)*b1
+ * => add (c) + (d)
+ * (e) a1 = (a0*a0 + a1*a0 + 8*a1*a1)*b1
+ * => therefore
+ * b1 = (a0*a0 + a1*a0 + 8*a1*a1)^-1 * a1
+ * => and adding (a1*b0) to (b) we get
+ * (f) a1*b0 = (a0+a1)*b1
+ * => therefore
+ * b0 = (a0*a0 + a1*a0 + 8*a1*a1)^-1 * (a0+a1)
+ * Note this formula also works for the case
+ * (a0+a1)*a0 + 8*a1*a1 = 0
+ * if the inverse element for 0^-1 is mapped to 0.
+ * Repeat the same for GF(2^2^2) and GF(2^2).
+ * We get the following algorithm:
+ * inv8(a0,a1):
+ *   x0 = a0^a1
+ *   [y0,y1] = mul4([x0,a1],[a0,a1]); (*)
+ *   y1 = mul4(8,y1);
+ *   t = inv4(y0^y1);
+ *   [b0,b1] = mul4([x0,a1],[t,t]); (*)
+ *   return [b0,b1];
+ * The non-linear multiplies (*) can be done in parallel at no extra cost.
+ */
+static void SubWord(u32 *w)
+{
+    u32 x, y, a1, a2, a3, a4, a5, a6;
+
+    x = *w;
+    y = ((x & 0xFEFEFEFEu) >> 1) | ((x & 0x01010101u) << 7);
+    x &= 0xDDDDDDDDu;
+    x ^= y & 0x57575757u;
+    y = ((y & 0xFEFEFEFEu) >> 1) | ((y & 0x01010101u) << 7);
+    x ^= y & 0x1C1C1C1Cu;
+    y = ((y & 0xFEFEFEFEu) >> 1) | ((y & 0x01010101u) << 7);
+    x ^= y & 0x4A4A4A4Au;
+    y = ((y & 0xFEFEFEFEu) >> 1) | ((y & 0x01010101u) << 7);
+    x ^= y & 0x42424242u;
+    y = ((y & 0xFEFEFEFEu) >> 1) | ((y & 0x01010101u) << 7);
+    x ^= y & 0x64646464u;
+    y = ((y & 0xFEFEFEFEu) >> 1) | ((y & 0x01010101u) << 7);
+    x ^= y & 0xE0E0E0E0u;
+    a1 = x;
+    a1 ^= (x & 0xF0F0F0F0u) >> 4;
+    a2 = ((x & 0xCCCCCCCCu) >> 2) | ((x & 0x33333333u) << 2);
+    a3 = x & a1;
+    a3 ^= (a3 & 0xAAAAAAAAu) >> 1;
+    a3 ^= (((x << 1) & a1) ^ ((a1 << 1) & x)) & 0xAAAAAAAAu;
+    a4 = a2 & a1;
+    a4 ^= (a4 & 0xAAAAAAAAu) >> 1;
+    a4 ^= (((a2 << 1) & a1) ^ ((a1 << 1) & a2)) & 0xAAAAAAAAu;
+    a5 = (a3 & 0xCCCCCCCCu) >> 2;
+    a3 ^= ((a4 << 2) ^ a4) & 0xCCCCCCCCu;
+    a4 = a5 & 0x22222222u;
+    a4 |= a4 >> 1;
+    a4 ^= (a5 << 1) & 0x22222222u;
+    a3 ^= a4;
+    a5 = a3 & 0xA0A0A0A0u;
+    a5 |= a5 >> 1;
+    a5 ^= (a3 << 1) & 0xA0A0A0A0u;
+    a4 = a5 & 0xC0C0C0C0u;
+    a6 = a4 >> 2;
+    a4 ^= (a5 << 2) & 0xC0C0C0C0u;
+    a5 = a6 & 0x20202020u;
+    a5 |= a5 >> 1;
+    a5 ^= (a6 << 1) & 0x20202020u;
+    a4 |= a5;
+    a3 ^= a4 >> 4;
+    a3 &= 0x0F0F0F0Fu;
+    a2 = a3;
+    a2 ^= (a3 & 0x0C0C0C0Cu) >> 2;
+    a4 = a3 & a2;
+    a4 ^= (a4 & 0x0A0A0A0A0Au) >> 1;
+    a4 ^= (((a3 << 1) & a2) ^ ((a2 << 1) & a3)) & 0x0A0A0A0Au;
+    a5 = a4 & 0x08080808u;
+    a5 |= a5 >> 1;
+    a5 ^= (a4 << 1) & 0x08080808u;
+    a4 ^= a5 >> 2;
+    a4 &= 0x03030303u;
+    a4 ^= (a4 & 0x02020202u) >> 1;
+    a4 |= a4 << 2;
+    a3 = a2 & a4;
+    a3 ^= (a3 & 0x0A0A0A0Au) >> 1;
+    a3 ^= (((a2 << 1) & a4) ^ ((a4 << 1) & a2)) & 0x0A0A0A0Au;
+    a3 |= a3 << 4;
+    a2 = ((a1 & 0xCCCCCCCCu) >> 2) | ((a1 & 0x33333333u) << 2);
+    x = a1 & a3;
+    x ^= (x & 0xAAAAAAAAu) >> 1;
+    x ^= (((a1 << 1) & a3) ^ ((a3 << 1) & a1)) & 0xAAAAAAAAu;
+    a4 = a2 & a3;
+    a4 ^= (a4 & 0xAAAAAAAAu) >> 1;
+    a4 ^= (((a2 << 1) & a3) ^ ((a3 << 1) & a2)) & 0xAAAAAAAAu;
+    a5 = (x & 0xCCCCCCCCu) >> 2;
+    x ^= ((a4 << 2) ^ a4) & 0xCCCCCCCCu;
+    a4 = a5 & 0x22222222u;
+    a4 |= a4 >> 1;
+    a4 ^= (a5 << 1) & 0x22222222u;
+    x ^= a4;
+    y = ((x & 0xFEFEFEFEu) >> 1) | ((x & 0x01010101u) << 7);
+    x &= 0x39393939u;
+    x ^= y & 0x3F3F3F3Fu;
+    y = ((y & 0xFCFCFCFCu) >> 2) | ((y & 0x03030303u) << 6);
+    x ^= y & 0x97979797u;
+    y = ((y & 0xFEFEFEFEu) >> 1) | ((y & 0x01010101u) << 7);
+    x ^= y & 0x9B9B9B9Bu;
+    y = ((y & 0xFEFEFEFEu) >> 1) | ((y & 0x01010101u) << 7);
+    x ^= y & 0x3C3C3C3Cu;
+    y = ((y & 0xFEFEFEFEu) >> 1) | ((y & 0x01010101u) << 7);
+    x ^= y & 0xDDDDDDDDu;
+    y = ((y & 0xFEFEFEFEu) >> 1) | ((y & 0x01010101u) << 7);
+    x ^= y & 0x72727272u;
+    x ^= 0x63636363u;
+    *w = x;
+}
+
+static void SubLong(u64 *w)
+{
+    u64 x, y, a1, a2, a3, a4, a5, a6;
+
+    x = *w;
+    y = ((x & 0xFEFEFEFEFEFEFEFEuLL) >> 1) | ((x & 0x0101010101010101uLL) << 7);
+    x &= 0xDDDDDDDDDDDDDDDDuLL;
+    x ^= y & 0x5757575757575757uLL;
+    y = ((y & 0xFEFEFEFEFEFEFEFEuLL) >> 1) | ((y & 0x0101010101010101uLL) << 7);
+    x ^= y & 0x1C1C1C1C1C1C1C1CuLL;
+    y = ((y & 0xFEFEFEFEFEFEFEFEuLL) >> 1) | ((y & 0x0101010101010101uLL) << 7);
+    x ^= y & 0x4A4A4A4A4A4A4A4AuLL;
+    y = ((y & 0xFEFEFEFEFEFEFEFEuLL) >> 1) | ((y & 0x0101010101010101uLL) << 7);
+    x ^= y & 0x4242424242424242uLL;
+    y = ((y & 0xFEFEFEFEFEFEFEFEuLL) >> 1) | ((y & 0x0101010101010101uLL) << 7);
+    x ^= y & 0x6464646464646464uLL;
+    y = ((y & 0xFEFEFEFEFEFEFEFEuLL) >> 1) | ((y & 0x0101010101010101uLL) << 7);
+    x ^= y & 0xE0E0E0E0E0E0E0E0uLL;
+    a1 = x;
+    a1 ^= (x & 0xF0F0F0F0F0F0F0F0uLL) >> 4;
+    a2 = ((x & 0xCCCCCCCCCCCCCCCCuLL) >> 2) | ((x & 0x3333333333333333uLL) << 2);
+    a3 = x & a1;
+    a3 ^= (a3 & 0xAAAAAAAAAAAAAAAAuLL) >> 1;
+    a3 ^= (((x << 1) & a1) ^ ((a1 << 1) & x)) & 0xAAAAAAAAAAAAAAAAuLL;
+    a4 = a2 & a1;
+    a4 ^= (a4 & 0xAAAAAAAAAAAAAAAAuLL) >> 1;
+    a4 ^= (((a2 << 1) & a1) ^ ((a1 << 1) & a2)) & 0xAAAAAAAAAAAAAAAAuLL;
+    a5 = (a3 & 0xCCCCCCCCCCCCCCCCuLL) >> 2;
+    a3 ^= ((a4 << 2) ^ a4) & 0xCCCCCCCCCCCCCCCCuLL;
+    a4 = a5 & 0x2222222222222222uLL;
+    a4 |= a4 >> 1;
+    a4 ^= (a5 << 1) & 0x2222222222222222uLL;
+    a3 ^= a4;
+    a5 = a3 & 0xA0A0A0A0A0A0A0A0uLL;
+    a5 |= a5 >> 1;
+    a5 ^= (a3 << 1) & 0xA0A0A0A0A0A0A0A0uLL;
+    a4 = a5 & 0xC0C0C0C0C0C0C0C0uLL;
+    a6 = a4 >> 2;
+    a4 ^= (a5 << 2) & 0xC0C0C0C0C0C0C0C0uLL;
+    a5 = a6 & 0x2020202020202020uLL;
+    a5 |= a5 >> 1;
+    a5 ^= (a6 << 1) & 0x2020202020202020uLL;
+    a4 |= a5;
+    a3 ^= a4 >> 4;
+    a3 &= 0x0F0F0F0F0F0F0F0FuLL;
+    a2 = a3;
+    a2 ^= (a3 & 0x0C0C0C0C0C0C0C0CuLL) >> 2;
+    a4 = a3 & a2;
+    a4 ^= (a4 & 0x0A0A0A0A0A0A0A0AuLL) >> 1;
+    a4 ^= (((a3 << 1) & a2) ^ ((a2 << 1) & a3)) & 0x0A0A0A0A0A0A0A0AuLL;
+    a5 = a4 & 0x0808080808080808uLL;
+    a5 |= a5 >> 1;
+    a5 ^= (a4 << 1) & 0x0808080808080808uLL;
+    a4 ^= a5 >> 2;
+    a4 &= 0x0303030303030303uLL;
+    a4 ^= (a4 & 0x0202020202020202uLL) >> 1;
+    a4 |= a4 << 2;
+    a3 = a2 & a4;
+    a3 ^= (a3 & 0x0A0A0A0A0A0A0A0AuLL) >> 1;
+    a3 ^= (((a2 << 1) & a4) ^ ((a4 << 1) & a2)) & 0x0A0A0A0A0A0A0A0AuLL;
+    a3 |= a3 << 4;
+    a2 = ((a1 & 0xCCCCCCCCCCCCCCCCuLL) >> 2) | ((a1 & 0x3333333333333333uLL) << 2);
+    x = a1 & a3;
+    x ^= (x & 0xAAAAAAAAAAAAAAAAuLL) >> 1;
+    x ^= (((a1 << 1) & a3) ^ ((a3 << 1) & a1)) & 0xAAAAAAAAAAAAAAAAuLL;
+    a4 = a2 & a3;
+    a4 ^= (a4 & 0xAAAAAAAAAAAAAAAAuLL) >> 1;
+    a4 ^= (((a2 << 1) & a3) ^ ((a3 << 1) & a2)) & 0xAAAAAAAAAAAAAAAAuLL;
+    a5 = (x & 0xCCCCCCCCCCCCCCCCuLL) >> 2;
+    x ^= ((a4 << 2) ^ a4) & 0xCCCCCCCCCCCCCCCCuLL;
+    a4 = a5 & 0x2222222222222222uLL;
+    a4 |= a4 >> 1;
+    a4 ^= (a5 << 1) & 0x2222222222222222uLL;
+    x ^= a4;
+    y = ((x & 0xFEFEFEFEFEFEFEFEuLL) >> 1) | ((x & 0x0101010101010101uLL) << 7);
+    x &= 0x3939393939393939uLL;
+    x ^= y & 0x3F3F3F3F3F3F3F3FuLL;
+    y = ((y & 0xFCFCFCFCFCFCFCFCuLL) >> 2) | ((y & 0x0303030303030303uLL) << 6);
+    x ^= y & 0x9797979797979797uLL;
+    y = ((y & 0xFEFEFEFEFEFEFEFEuLL) >> 1) | ((y & 0x0101010101010101uLL) << 7);
+    x ^= y & 0x9B9B9B9B9B9B9B9BuLL;
+    y = ((y & 0xFEFEFEFEFEFEFEFEuLL) >> 1) | ((y & 0x0101010101010101uLL) << 7);
+    x ^= y & 0x3C3C3C3C3C3C3C3CuLL;
+    y = ((y & 0xFEFEFEFEFEFEFEFEuLL) >> 1) | ((y & 0x0101010101010101uLL) << 7);
+    x ^= y & 0xDDDDDDDDDDDDDDDDuLL;
+    y = ((y & 0xFEFEFEFEFEFEFEFEuLL) >> 1) | ((y & 0x0101010101010101uLL) << 7);
+    x ^= y & 0x7272727272727272uLL;
+    x ^= 0x6363636363636363uLL;
+    *w = x;
+}
+
+/*
+ * This computes w := (S^-1 * (w + c))^-1
+ */
+static void InvSubLong(u64 *w)
+{
+    u64 x, y, a1, a2, a3, a4, a5, a6;
+
+    x = *w;
+    x ^= 0x6363636363636363uLL;
+    y = ((x & 0xFEFEFEFEFEFEFEFEuLL) >> 1) | ((x & 0x0101010101010101uLL) << 7);
+    x &= 0xFDFDFDFDFDFDFDFDuLL;
+    x ^= y & 0x5E5E5E5E5E5E5E5EuLL;
+    y = ((y & 0xFEFEFEFEFEFEFEFEuLL) >> 1) | ((y & 0x0101010101010101uLL) << 7);
+    x ^= y & 0xF3F3F3F3F3F3F3F3uLL;
+    y = ((y & 0xFEFEFEFEFEFEFEFEuLL) >> 1) | ((y & 0x0101010101010101uLL) << 7);
+    x ^= y & 0xF5F5F5F5F5F5F5F5uLL;
+    y = ((y & 0xFEFEFEFEFEFEFEFEuLL) >> 1) | ((y & 0x0101010101010101uLL) << 7);
+    x ^= y & 0x7878787878787878uLL;
+    y = ((y & 0xFEFEFEFEFEFEFEFEuLL) >> 1) | ((y & 0x0101010101010101uLL) << 7);
+    x ^= y & 0x7777777777777777uLL;
+    y = ((y & 0xFEFEFEFEFEFEFEFEuLL) >> 1) | ((y & 0x0101010101010101uLL) << 7);
+    x ^= y & 0x1515151515151515uLL;
+    y = ((y & 0xFEFEFEFEFEFEFEFEuLL) >> 1) | ((y & 0x0101010101010101uLL) << 7);
+    x ^= y & 0xA5A5A5A5A5A5A5A5uLL;
+    a1 = x;
+    a1 ^= (x & 0xF0F0F0F0F0F0F0F0uLL) >> 4;
+    a2 = ((x & 0xCCCCCCCCCCCCCCCCuLL) >> 2) | ((x & 0x3333333333333333uLL) << 2);
+    a3 = x & a1;
+    a3 ^= (a3 & 0xAAAAAAAAAAAAAAAAuLL) >> 1;
+    a3 ^= (((x << 1) & a1) ^ ((a1 << 1) & x)) & 0xAAAAAAAAAAAAAAAAuLL;
+    a4 = a2 & a1;
+    a4 ^= (a4 & 0xAAAAAAAAAAAAAAAAuLL) >> 1;
+    a4 ^= (((a2 << 1) & a1) ^ ((a1 << 1) & a2)) & 0xAAAAAAAAAAAAAAAAuLL;
+    a5 = (a3 & 0xCCCCCCCCCCCCCCCCuLL) >> 2;
+    a3 ^= ((a4 << 2) ^ a4) & 0xCCCCCCCCCCCCCCCCuLL;
+    a4 = a5 & 0x2222222222222222uLL;
+    a4 |= a4 >> 1;
+    a4 ^= (a5 << 1) & 0x2222222222222222uLL;
+    a3 ^= a4;
+    a5 = a3 & 0xA0A0A0A0A0A0A0A0uLL;
+    a5 |= a5 >> 1;
+    a5 ^= (a3 << 1) & 0xA0A0A0A0A0A0A0A0uLL;
+    a4 = a5 & 0xC0C0C0C0C0C0C0C0uLL;
+    a6 = a4 >> 2;
+    a4 ^= (a5 << 2) & 0xC0C0C0C0C0C0C0C0uLL;
+    a5 = a6 & 0x2020202020202020uLL;
+    a5 |= a5 >> 1;
+    a5 ^= (a6 << 1) & 0x2020202020202020uLL;
+    a4 |= a5;
+    a3 ^= a4 >> 4;
+    a3 &= 0x0F0F0F0F0F0F0F0FuLL;
+    a2 = a3;
+    a2 ^= (a3 & 0x0C0C0C0C0C0C0C0CuLL) >> 2;
+    a4 = a3 & a2;
+    a4 ^= (a4 & 0x0A0A0A0A0A0A0A0AuLL) >> 1;
+    a4 ^= (((a3 << 1) & a2) ^ ((a2 << 1) & a3)) & 0x0A0A0A0A0A0A0A0AuLL;
+    a5 = a4 & 0x0808080808080808uLL;
+    a5 |= a5 >> 1;
+    a5 ^= (a4 << 1) & 0x0808080808080808uLL;
+    a4 ^= a5 >> 2;
+    a4 &= 0x0303030303030303uLL;
+    a4 ^= (a4 & 0x0202020202020202uLL) >> 1;
+    a4 |= a4 << 2;
+    a3 = a2 & a4;
+    a3 ^= (a3 & 0x0A0A0A0A0A0A0A0AuLL) >> 1;
+    a3 ^= (((a2 << 1) & a4) ^ ((a4 << 1) & a2)) & 0x0A0A0A0A0A0A0A0AuLL;
+    a3 |= a3 << 4;
+    a2 = ((a1 & 0xCCCCCCCCCCCCCCCCuLL) >> 2) | ((a1 & 0x3333333333333333uLL) << 2);
+    x = a1 & a3;
+    x ^= (x & 0xAAAAAAAAAAAAAAAAuLL) >> 1;
+    x ^= (((a1 << 1) & a3) ^ ((a3 << 1) & a1)) & 0xAAAAAAAAAAAAAAAAuLL;
+    a4 = a2 & a3;
+    a4 ^= (a4 & 0xAAAAAAAAAAAAAAAAuLL) >> 1;
+    a4 ^= (((a2 << 1) & a3) ^ ((a3 << 1) & a2)) & 0xAAAAAAAAAAAAAAAAuLL;
+    a5 = (x & 0xCCCCCCCCCCCCCCCCuLL) >> 2;
+    x ^= ((a4 << 2) ^ a4) & 0xCCCCCCCCCCCCCCCCuLL;
+    a4 = a5 & 0x2222222222222222uLL;
+    a4 |= a4 >> 1;
+    a4 ^= (a5 << 1) & 0x2222222222222222uLL;
+    x ^= a4;
+    y = ((x & 0xFEFEFEFEFEFEFEFEuLL) >> 1) | ((x & 0x0101010101010101uLL) << 7);
+    x &= 0xB5B5B5B5B5B5B5B5uLL;
+    x ^= y & 0x4040404040404040uLL;
+    y = ((y & 0xFEFEFEFEFEFEFEFEuLL) >> 1) | ((y & 0x0101010101010101uLL) << 7);
+    x ^= y & 0x8080808080808080uLL;
+    y = ((y & 0xFEFEFEFEFEFEFEFEuLL) >> 1) | ((y & 0x0101010101010101uLL) << 7);
+    x ^= y & 0x1616161616161616uLL;
+    y = ((y & 0xFEFEFEFEFEFEFEFEuLL) >> 1) | ((y & 0x0101010101010101uLL) << 7);
+    x ^= y & 0xEBEBEBEBEBEBEBEBuLL;
+    y = ((y & 0xFEFEFEFEFEFEFEFEuLL) >> 1) | ((y & 0x0101010101010101uLL) << 7);
+    x ^= y & 0x9797979797979797uLL;
+    y = ((y & 0xFEFEFEFEFEFEFEFEuLL) >> 1) | ((y & 0x0101010101010101uLL) << 7);
+    x ^= y & 0xFBFBFBFBFBFBFBFBuLL;
+    y = ((y & 0xFEFEFEFEFEFEFEFEuLL) >> 1) | ((y & 0x0101010101010101uLL) << 7);
+    x ^= y & 0x7D7D7D7D7D7D7D7DuLL;
+    *w = x;
+}
+
+static void ShiftRows(u64 *state)
+{
+    unsigned char s[4];
+    unsigned char *s0;
+    int r;
+
+    s0 = (unsigned char *)state;
+    for (r = 0; r < 4; r++) {
+        s[0] = s0[0*4 + r];
+        s[1] = s0[1*4 + r];
+        s[2] = s0[2*4 + r];
+        s[3] = s0[3*4 + r];
+        s0[0*4 + r] = s[(r+0) % 4];
+        s0[1*4 + r] = s[(r+1) % 4];
+        s0[2*4 + r] = s[(r+2) % 4];
+        s0[3*4 + r] = s[(r+3) % 4];
+    }
+}
+
+static void InvShiftRows(u64 *state)
+{
+    unsigned char s[4];
+    unsigned char *s0;
+    int r;
+
+    s0 = (unsigned char *)state;
+    for (r = 0; r < 4; r++) {
+        s[0] = s0[0*4 + r];
+        s[1] = s0[1*4 + r];
+        s[2] = s0[2*4 + r];
+        s[3] = s0[3*4 + r];
+        s0[0*4 + r] = s[(4-r) % 4];
+        s0[1*4 + r] = s[(5-r) % 4];
+        s0[2*4 + r] = s[(6-r) % 4];
+        s0[3*4 + r] = s[(7-r) % 4];
+    }
+}
+
+static void MixColumns(u64 *state)
+{
+    uni s1;
+    uni s;
+    int c;
+
+    for (c = 0; c < 2; c++) {
+        s1.d = state[c];
+        s.d = s1.d;
+        s.d ^= ((s.d & 0xFFFF0000FFFF0000uLL) >> 16)
+               | ((s.d & 0x0000FFFF0000FFFFuLL) << 16);
+        s.d ^= ((s.d & 0xFF00FF00FF00FF00uLL) >> 8)
+               | ((s.d & 0x00FF00FF00FF00FFuLL) << 8);
+        s.d ^= s1.d;
+        XtimeLong(&s1.d);
+        s.d ^= s1.d;
+        s.b[0] ^= s1.b[1];
+        s.b[1] ^= s1.b[2];
+        s.b[2] ^= s1.b[3];
+        s.b[3] ^= s1.b[0];
+        s.b[4] ^= s1.b[5];
+        s.b[5] ^= s1.b[6];
+        s.b[6] ^= s1.b[7];
+        s.b[7] ^= s1.b[4];
+        state[c] = s.d;
+    }
+}
+
+static void InvMixColumns(u64 *state)
+{
+    uni s1;
+    uni s;
+    int c;
+
+    for (c = 0; c < 2; c++) {
+        s1.d = state[c];
+        s.d = s1.d;
+        s.d ^= ((s.d & 0xFFFF0000FFFF0000uLL) >> 16)
+               | ((s.d & 0x0000FFFF0000FFFFuLL) << 16);
+        s.d ^= ((s.d & 0xFF00FF00FF00FF00uLL) >> 8)
+               | ((s.d & 0x00FF00FF00FF00FFuLL) << 8);
+        s.d ^= s1.d;
+        XtimeLong(&s1.d);
+        s.d ^= s1.d;
+        s.b[0] ^= s1.b[1];
+        s.b[1] ^= s1.b[2];
+        s.b[2] ^= s1.b[3];
+        s.b[3] ^= s1.b[0];
+        s.b[4] ^= s1.b[5];
+        s.b[5] ^= s1.b[6];
+        s.b[6] ^= s1.b[7];
+        s.b[7] ^= s1.b[4];
+        XtimeLong(&s1.d);
+        s1.d ^= ((s1.d & 0xFFFF0000FFFF0000uLL) >> 16)
+                | ((s1.d & 0x0000FFFF0000FFFFuLL) << 16);
+        s.d ^= s1.d;
+        XtimeLong(&s1.d);
+        s1.d ^= ((s1.d & 0xFF00FF00FF00FF00uLL) >> 8)
+                | ((s1.d & 0x00FF00FF00FF00FFuLL) << 8);
+        s.d ^= s1.d;
+        state[c] = s.d;
+    }
+}
+
+static void AddRoundKey(u64 *state, const u64 *w)
+{
+    state[0] ^= w[0];
+    state[1] ^= w[1];
+}
+
+static void Cipher(const unsigned char *in, unsigned char *out,
+                   const u64 *w, int nr)
+{
+    u64 state[2];
+    int i;
+
+    memcpy(state, in, 16);
+
+    AddRoundKey(state, w);
+
+    for (i = 1; i < nr; i++) {
+        SubLong(&state[0]);
+        SubLong(&state[1]);
+        ShiftRows(state);
+        MixColumns(state);
+        AddRoundKey(state, w + i*2);
+    }
+
+    SubLong(&state[0]);
+    SubLong(&state[1]);
+    ShiftRows(state);
+    AddRoundKey(state, w + nr*2);
+
+    memcpy(out, state, 16);
+}
+
+static void InvCipher(const unsigned char *in, unsigned char *out,
+                      const u64 *w, int nr)
+
+{
+    u64 state[2];
+    int i;
+
+    memcpy(state, in, 16);
+
+    AddRoundKey(state, w + nr*2);
+
+    for (i = nr - 1; i > 0; i--) {
+        InvShiftRows(state);
+        InvSubLong(&state[0]);
+        InvSubLong(&state[1]);
+        AddRoundKey(state, w + i*2);
+        InvMixColumns(state);
+    }
+
+    InvShiftRows(state);
+    InvSubLong(&state[0]);
+    InvSubLong(&state[1]);
+    AddRoundKey(state, w);
+
+    memcpy(out, state, 16);
+}
+
+static void RotWord(u32 *x)
+{
+    unsigned char *w0;
+    unsigned char tmp;
+
+    w0 = (unsigned char *)x;
+    tmp = w0[0];
+    w0[0] = w0[1];
+    w0[1] = w0[2];
+    w0[2] = w0[3];
+    w0[3] = tmp;
+}
+
+static void KeyExpansion(const unsigned char *key, u64 *w,
+                         int nr, int nk)
+{
+    u32 rcon;
+    uni prev;
+    u32 temp;
+    int i, n;
+
+    memcpy(w, key, nk*4);
+    memcpy(&rcon, "\1\0\0\0", 4);
+    n = nk/2;
+    prev.d = w[n-1];
+    for (i = n; i < (nr+1)*2; i++) {
+        temp = prev.w[1];
+        if (i % n == 0) {
+            RotWord(&temp);
+            SubWord(&temp);
+            temp ^= rcon;
+            XtimeWord(&rcon);
+        } else if (nk > 6 && i % n == 2) {
+            SubWord(&temp);
+        }
+        prev.d = w[i-n];
+        prev.w[0] ^= temp;
+        prev.w[1] ^= prev.w[0];
+        w[i] = prev.d;
+    }
+}
+
+/**
+ * Expand the cipher key into the encryption key schedule.
+ */
+int AES_set_encrypt_key(const unsigned char *userKey, const int bits,
+                        AES_KEY *key)
+{
+    u64 *rk;
+
+    if (!userKey || !key)
+        return -1;
+    if (bits != 128 && bits != 192 && bits != 256)
+        return -2;
+
+    rk = (u64*)key->rd_key;
+
+    if (bits == 128)
+        key->rounds = 10;
+    else if (bits == 192)
+        key->rounds = 12;
+    else
+        key->rounds = 14;
+
+    KeyExpansion(userKey, rk, key->rounds, bits/32);
+    return 0;
+}
+
+/**
+ * Expand the cipher key into the decryption key schedule.
+ */
+int AES_set_decrypt_key(const unsigned char *userKey, const int bits,
+                        AES_KEY *key)
+{
+    return AES_set_encrypt_key(userKey, bits, key);
+}
+
+/*
+ * Encrypt a single block
+ * in and out can overlap
+ */
+void AES_encrypt(const unsigned char *in, unsigned char *out,
+                 const AES_KEY *key)
+{
+    const u64 *rk;
+
+    assert(in && out && key);
+    rk = (u64*)key->rd_key;
+
+    Cipher(in, out, rk, key->rounds);
+}
+
+/*
+ * Decrypt a single block
+ * in and out can overlap
+ */
+void AES_decrypt(const unsigned char *in, unsigned char *out,
+                 const AES_KEY *key)
+{
+    const u64 *rk;
+
+    assert(in && out && key);
+    rk = (u64*)key->rd_key;
+
+    InvCipher(in, out, rk, key->rounds);
+}
+#elif !defined(AES_ASM)
+/*-
+Te0[x] = S [x].[02, 01, 01, 03];
+Te1[x] = S [x].[03, 02, 01, 01];
+Te2[x] = S [x].[01, 03, 02, 01];
+Te3[x] = S [x].[01, 01, 03, 02];
+
+Td0[x] = Si[x].[0e, 09, 0d, 0b];
+Td1[x] = Si[x].[0b, 0e, 09, 0d];
+Td2[x] = Si[x].[0d, 0b, 0e, 09];
+Td3[x] = Si[x].[09, 0d, 0b, 0e];
+Td4[x] = Si[x].[01];
+*/
+
+static const u32 Te0[256] = {
+    0xc66363a5U, 0xf87c7c84U, 0xee777799U, 0xf67b7b8dU,
+    0xfff2f20dU, 0xd66b6bbdU, 0xde6f6fb1U, 0x91c5c554U,
+    0x60303050U, 0x02010103U, 0xce6767a9U, 0x562b2b7dU,
+    0xe7fefe19U, 0xb5d7d762U, 0x4dababe6U, 0xec76769aU,
+    0x8fcaca45U, 0x1f82829dU, 0x89c9c940U, 0xfa7d7d87U,
+    0xeffafa15U, 0xb25959ebU, 0x8e4747c9U, 0xfbf0f00bU,
+    0x41adadecU, 0xb3d4d467U, 0x5fa2a2fdU, 0x45afafeaU,
+    0x239c9cbfU, 0x53a4a4f7U, 0xe4727296U, 0x9bc0c05bU,
+    0x75b7b7c2U, 0xe1fdfd1cU, 0x3d9393aeU, 0x4c26266aU,
+    0x6c36365aU, 0x7e3f3f41U, 0xf5f7f702U, 0x83cccc4fU,
+    0x6834345cU, 0x51a5a5f4U, 0xd1e5e534U, 0xf9f1f108U,
+    0xe2717193U, 0xabd8d873U, 0x62313153U, 0x2a15153fU,
+    0x0804040cU, 0x95c7c752U, 0x46232365U, 0x9dc3c35eU,
+    0x30181828U, 0x379696a1U, 0x0a05050fU, 0x2f9a9ab5U,
+    0x0e070709U, 0x24121236U, 0x1b80809bU, 0xdfe2e23dU,
+    0xcdebeb26U, 0x4e272769U, 0x7fb2b2cdU, 0xea75759fU,
+    0x1209091bU, 0x1d83839eU, 0x582c2c74U, 0x341a1a2eU,
+    0x361b1b2dU, 0xdc6e6eb2U, 0xb45a5aeeU, 0x5ba0a0fbU,
+    0xa45252f6U, 0x763b3b4dU, 0xb7d6d661U, 0x7db3b3ceU,
+    0x5229297bU, 0xdde3e33eU, 0x5e2f2f71U, 0x13848497U,
+    0xa65353f5U, 0xb9d1d168U, 0x00000000U, 0xc1eded2cU,
+    0x40202060U, 0xe3fcfc1fU, 0x79b1b1c8U, 0xb65b5bedU,
+    0xd46a6abeU, 0x8dcbcb46U, 0x67bebed9U, 0x7239394bU,
+    0x944a4adeU, 0x984c4cd4U, 0xb05858e8U, 0x85cfcf4aU,
+    0xbbd0d06bU, 0xc5efef2aU, 0x4faaaae5U, 0xedfbfb16U,
+    0x864343c5U, 0x9a4d4dd7U, 0x66333355U, 0x11858594U,
+    0x8a4545cfU, 0xe9f9f910U, 0x04020206U, 0xfe7f7f81U,
+    0xa05050f0U, 0x783c3c44U, 0x259f9fbaU, 0x4ba8a8e3U,
+    0xa25151f3U, 0x5da3a3feU, 0x804040c0U, 0x058f8f8aU,
+    0x3f9292adU, 0x219d9dbcU, 0x70383848U, 0xf1f5f504U,
+    0x63bcbcdfU, 0x77b6b6c1U, 0xafdada75U, 0x42212163U,
+    0x20101030U, 0xe5ffff1aU, 0xfdf3f30eU, 0xbfd2d26dU,
+    0x81cdcd4cU, 0x180c0c14U, 0x26131335U, 0xc3ecec2fU,
+    0xbe5f5fe1U, 0x359797a2U, 0x884444ccU, 0x2e171739U,
+    0x93c4c457U, 0x55a7a7f2U, 0xfc7e7e82U, 0x7a3d3d47U,
+    0xc86464acU, 0xba5d5de7U, 0x3219192bU, 0xe6737395U,
+    0xc06060a0U, 0x19818198U, 0x9e4f4fd1U, 0xa3dcdc7fU,
+    0x44222266U, 0x542a2a7eU, 0x3b9090abU, 0x0b888883U,
+    0x8c4646caU, 0xc7eeee29U, 0x6bb8b8d3U, 0x2814143cU,
+    0xa7dede79U, 0xbc5e5ee2U, 0x160b0b1dU, 0xaddbdb76U,
+    0xdbe0e03bU, 0x64323256U, 0x743a3a4eU, 0x140a0a1eU,
+    0x924949dbU, 0x0c06060aU, 0x4824246cU, 0xb85c5ce4U,
+    0x9fc2c25dU, 0xbdd3d36eU, 0x43acacefU, 0xc46262a6U,
+    0x399191a8U, 0x319595a4U, 0xd3e4e437U, 0xf279798bU,
+    0xd5e7e732U, 0x8bc8c843U, 0x6e373759U, 0xda6d6db7U,
+    0x018d8d8cU, 0xb1d5d564U, 0x9c4e4ed2U, 0x49a9a9e0U,
+    0xd86c6cb4U, 0xac5656faU, 0xf3f4f407U, 0xcfeaea25U,
+    0xca6565afU, 0xf47a7a8eU, 0x47aeaee9U, 0x10080818U,
+    0x6fbabad5U, 0xf0787888U, 0x4a25256fU, 0x5c2e2e72U,
+    0x381c1c24U, 0x57a6a6f1U, 0x73b4b4c7U, 0x97c6c651U,
+    0xcbe8e823U, 0xa1dddd7cU, 0xe874749cU, 0x3e1f1f21U,
+    0x964b4bddU, 0x61bdbddcU, 0x0d8b8b86U, 0x0f8a8a85U,
+    0xe0707090U, 0x7c3e3e42U, 0x71b5b5c4U, 0xcc6666aaU,
+    0x904848d8U, 0x06030305U, 0xf7f6f601U, 0x1c0e0e12U,
+    0xc26161a3U, 0x6a35355fU, 0xae5757f9U, 0x69b9b9d0U,
+    0x17868691U, 0x99c1c158U, 0x3a1d1d27U, 0x279e9eb9U,
+    0xd9e1e138U, 0xebf8f813U, 0x2b9898b3U, 0x22111133U,
+    0xd26969bbU, 0xa9d9d970U, 0x078e8e89U, 0x339494a7U,
+    0x2d9b9bb6U, 0x3c1e1e22U, 0x15878792U, 0xc9e9e920U,
+    0x87cece49U, 0xaa5555ffU, 0x50282878U, 0xa5dfdf7aU,
+    0x038c8c8fU, 0x59a1a1f8U, 0x09898980U, 0x1a0d0d17U,
+    0x65bfbfdaU, 0xd7e6e631U, 0x844242c6U, 0xd06868b8U,
+    0x824141c3U, 0x299999b0U, 0x5a2d2d77U, 0x1e0f0f11U,
+    0x7bb0b0cbU, 0xa85454fcU, 0x6dbbbbd6U, 0x2c16163aU,
+};
+static const u32 Te1[256] = {
+    0xa5c66363U, 0x84f87c7cU, 0x99ee7777U, 0x8df67b7bU,
+    0x0dfff2f2U, 0xbdd66b6bU, 0xb1de6f6fU, 0x5491c5c5U,
+    0x50603030U, 0x03020101U, 0xa9ce6767U, 0x7d562b2bU,
+    0x19e7fefeU, 0x62b5d7d7U, 0xe64dababU, 0x9aec7676U,
+    0x458fcacaU, 0x9d1f8282U, 0x4089c9c9U, 0x87fa7d7dU,
+    0x15effafaU, 0xebb25959U, 0xc98e4747U, 0x0bfbf0f0U,
+    0xec41adadU, 0x67b3d4d4U, 0xfd5fa2a2U, 0xea45afafU,
+    0xbf239c9cU, 0xf753a4a4U, 0x96e47272U, 0x5b9bc0c0U,
+    0xc275b7b7U, 0x1ce1fdfdU, 0xae3d9393U, 0x6a4c2626U,
+    0x5a6c3636U, 0x417e3f3fU, 0x02f5f7f7U, 0x4f83ccccU,
+    0x5c683434U, 0xf451a5a5U, 0x34d1e5e5U, 0x08f9f1f1U,
+    0x93e27171U, 0x73abd8d8U, 0x53623131U, 0x3f2a1515U,
+    0x0c080404U, 0x5295c7c7U, 0x65462323U, 0x5e9dc3c3U,
+    0x28301818U, 0xa1379696U, 0x0f0a0505U, 0xb52f9a9aU,
+    0x090e0707U, 0x36241212U, 0x9b1b8080U, 0x3ddfe2e2U,
+    0x26cdebebU, 0x694e2727U, 0xcd7fb2b2U, 0x9fea7575U,
+    0x1b120909U, 0x9e1d8383U, 0x74582c2cU, 0x2e341a1aU,
+    0x2d361b1bU, 0xb2dc6e6eU, 0xeeb45a5aU, 0xfb5ba0a0U,
+    0xf6a45252U, 0x4d763b3bU, 0x61b7d6d6U, 0xce7db3b3U,
+    0x7b522929U, 0x3edde3e3U, 0x715e2f2fU, 0x97138484U,
+    0xf5a65353U, 0x68b9d1d1U, 0x00000000U, 0x2cc1ededU,
+    0x60402020U, 0x1fe3fcfcU, 0xc879b1b1U, 0xedb65b5bU,
+    0xbed46a6aU, 0x468dcbcbU, 0xd967bebeU, 0x4b723939U,
+    0xde944a4aU, 0xd4984c4cU, 0xe8b05858U, 0x4a85cfcfU,
+    0x6bbbd0d0U, 0x2ac5efefU, 0xe54faaaaU, 0x16edfbfbU,
+    0xc5864343U, 0xd79a4d4dU, 0x55663333U, 0x94118585U,
+    0xcf8a4545U, 0x10e9f9f9U, 0x06040202U, 0x81fe7f7fU,
+    0xf0a05050U, 0x44783c3cU, 0xba259f9fU, 0xe34ba8a8U,
+    0xf3a25151U, 0xfe5da3a3U, 0xc0804040U, 0x8a058f8fU,
+    0xad3f9292U, 0xbc219d9dU, 0x48703838U, 0x04f1f5f5U,
+    0xdf63bcbcU, 0xc177b6b6U, 0x75afdadaU, 0x63422121U,
+    0x30201010U, 0x1ae5ffffU, 0x0efdf3f3U, 0x6dbfd2d2U,
+    0x4c81cdcdU, 0x14180c0cU, 0x35261313U, 0x2fc3ececU,
+    0xe1be5f5fU, 0xa2359797U, 0xcc884444U, 0x392e1717U,
+    0x5793c4c4U, 0xf255a7a7U, 0x82fc7e7eU, 0x477a3d3dU,
+    0xacc86464U, 0xe7ba5d5dU, 0x2b321919U, 0x95e67373U,
+    0xa0c06060U, 0x98198181U, 0xd19e4f4fU, 0x7fa3dcdcU,
+    0x66442222U, 0x7e542a2aU, 0xab3b9090U, 0x830b8888U,
+    0xca8c4646U, 0x29c7eeeeU, 0xd36bb8b8U, 0x3c281414U,
+    0x79a7dedeU, 0xe2bc5e5eU, 0x1d160b0bU, 0x76addbdbU,
+    0x3bdbe0e0U, 0x56643232U, 0x4e743a3aU, 0x1e140a0aU,
+    0xdb924949U, 0x0a0c0606U, 0x6c482424U, 0xe4b85c5cU,
+    0x5d9fc2c2U, 0x6ebdd3d3U, 0xef43acacU, 0xa6c46262U,
+    0xa8399191U, 0xa4319595U, 0x37d3e4e4U, 0x8bf27979U,
+    0x32d5e7e7U, 0x438bc8c8U, 0x596e3737U, 0xb7da6d6dU,
+    0x8c018d8dU, 0x64b1d5d5U, 0xd29c4e4eU, 0xe049a9a9U,
+    0xb4d86c6cU, 0xfaac5656U, 0x07f3f4f4U, 0x25cfeaeaU,
+    0xafca6565U, 0x8ef47a7aU, 0xe947aeaeU, 0x18100808U,
+    0xd56fbabaU, 0x88f07878U, 0x6f4a2525U, 0x725c2e2eU,
+    0x24381c1cU, 0xf157a6a6U, 0xc773b4b4U, 0x5197c6c6U,
+    0x23cbe8e8U, 0x7ca1ddddU, 0x9ce87474U, 0x213e1f1fU,
+    0xdd964b4bU, 0xdc61bdbdU, 0x860d8b8bU, 0x850f8a8aU,
+    0x90e07070U, 0x427c3e3eU, 0xc471b5b5U, 0xaacc6666U,
+    0xd8904848U, 0x05060303U, 0x01f7f6f6U, 0x121c0e0eU,
+    0xa3c26161U, 0x5f6a3535U, 0xf9ae5757U, 0xd069b9b9U,
+    0x91178686U, 0x5899c1c1U, 0x273a1d1dU, 0xb9279e9eU,
+    0x38d9e1e1U, 0x13ebf8f8U, 0xb32b9898U, 0x33221111U,
+    0xbbd26969U, 0x70a9d9d9U, 0x89078e8eU, 0xa7339494U,
+    0xb62d9b9bU, 0x223c1e1eU, 0x92158787U, 0x20c9e9e9U,
+    0x4987ceceU, 0xffaa5555U, 0x78502828U, 0x7aa5dfdfU,
+    0x8f038c8cU, 0xf859a1a1U, 0x80098989U, 0x171a0d0dU,
+    0xda65bfbfU, 0x31d7e6e6U, 0xc6844242U, 0xb8d06868U,
+    0xc3824141U, 0xb0299999U, 0x775a2d2dU, 0x111e0f0fU,
+    0xcb7bb0b0U, 0xfca85454U, 0xd66dbbbbU, 0x3a2c1616U,
+};
+static const u32 Te2[256] = {
+    0x63a5c663U, 0x7c84f87cU, 0x7799ee77U, 0x7b8df67bU,
+    0xf20dfff2U, 0x6bbdd66bU, 0x6fb1de6fU, 0xc55491c5U,
+    0x30506030U, 0x01030201U, 0x67a9ce67U, 0x2b7d562bU,
+    0xfe19e7feU, 0xd762b5d7U, 0xabe64dabU, 0x769aec76U,
+    0xca458fcaU, 0x829d1f82U, 0xc94089c9U, 0x7d87fa7dU,
+    0xfa15effaU, 0x59ebb259U, 0x47c98e47U, 0xf00bfbf0U,
+    0xadec41adU, 0xd467b3d4U, 0xa2fd5fa2U, 0xafea45afU,
+    0x9cbf239cU, 0xa4f753a4U, 0x7296e472U, 0xc05b9bc0U,
+    0xb7c275b7U, 0xfd1ce1fdU, 0x93ae3d93U, 0x266a4c26U,
+    0x365a6c36U, 0x3f417e3fU, 0xf702f5f7U, 0xcc4f83ccU,
+    0x345c6834U, 0xa5f451a5U, 0xe534d1e5U, 0xf108f9f1U,
+    0x7193e271U, 0xd873abd8U, 0x31536231U, 0x153f2a15U,
+    0x040c0804U, 0xc75295c7U, 0x23654623U, 0xc35e9dc3U,
+    0x18283018U, 0x96a13796U, 0x050f0a05U, 0x9ab52f9aU,
+    0x07090e07U, 0x12362412U, 0x809b1b80U, 0xe23ddfe2U,
+    0xeb26cdebU, 0x27694e27U, 0xb2cd7fb2U, 0x759fea75U,
+    0x091b1209U, 0x839e1d83U, 0x2c74582cU, 0x1a2e341aU,
+    0x1b2d361bU, 0x6eb2dc6eU, 0x5aeeb45aU, 0xa0fb5ba0U,
+    0x52f6a452U, 0x3b4d763bU, 0xd661b7d6U, 0xb3ce7db3U,
+    0x297b5229U, 0xe33edde3U, 0x2f715e2fU, 0x84971384U,
+    0x53f5a653U, 0xd168b9d1U, 0x00000000U, 0xed2cc1edU,
+    0x20604020U, 0xfc1fe3fcU, 0xb1c879b1U, 0x5bedb65bU,
+    0x6abed46aU, 0xcb468dcbU, 0xbed967beU, 0x394b7239U,
+    0x4ade944aU, 0x4cd4984cU, 0x58e8b058U, 0xcf4a85cfU,
+    0xd06bbbd0U, 0xef2ac5efU, 0xaae54faaU, 0xfb16edfbU,
+    0x43c58643U, 0x4dd79a4dU, 0x33556633U, 0x85941185U,
+    0x45cf8a45U, 0xf910e9f9U, 0x02060402U, 0x7f81fe7fU,
+    0x50f0a050U, 0x3c44783cU, 0x9fba259fU, 0xa8e34ba8U,
+    0x51f3a251U, 0xa3fe5da3U, 0x40c08040U, 0x8f8a058fU,
+    0x92ad3f92U, 0x9dbc219dU, 0x38487038U, 0xf504f1f5U,
+    0xbcdf63bcU, 0xb6c177b6U, 0xda75afdaU, 0x21634221U,
+    0x10302010U, 0xff1ae5ffU, 0xf30efdf3U, 0xd26dbfd2U,
+    0xcd4c81cdU, 0x0c14180cU, 0x13352613U, 0xec2fc3ecU,
+    0x5fe1be5fU, 0x97a23597U, 0x44cc8844U, 0x17392e17U,
+    0xc45793c4U, 0xa7f255a7U, 0x7e82fc7eU, 0x3d477a3dU,
+    0x64acc864U, 0x5de7ba5dU, 0x192b3219U, 0x7395e673U,
+    0x60a0c060U, 0x81981981U, 0x4fd19e4fU, 0xdc7fa3dcU,
+    0x22664422U, 0x2a7e542aU, 0x90ab3b90U, 0x88830b88U,
+    0x46ca8c46U, 0xee29c7eeU, 0xb8d36bb8U, 0x143c2814U,
+    0xde79a7deU, 0x5ee2bc5eU, 0x0b1d160bU, 0xdb76addbU,
+    0xe03bdbe0U, 0x32566432U, 0x3a4e743aU, 0x0a1e140aU,
+    0x49db9249U, 0x060a0c06U, 0x246c4824U, 0x5ce4b85cU,
+    0xc25d9fc2U, 0xd36ebdd3U, 0xacef43acU, 0x62a6c462U,
+    0x91a83991U, 0x95a43195U, 0xe437d3e4U, 0x798bf279U,
+    0xe732d5e7U, 0xc8438bc8U, 0x37596e37U, 0x6db7da6dU,
+    0x8d8c018dU, 0xd564b1d5U, 0x4ed29c4eU, 0xa9e049a9U,
+    0x6cb4d86cU, 0x56faac56U, 0xf407f3f4U, 0xea25cfeaU,
+    0x65afca65U, 0x7a8ef47aU, 0xaee947aeU, 0x08181008U,
+    0xbad56fbaU, 0x7888f078U, 0x256f4a25U, 0x2e725c2eU,
+    0x1c24381cU, 0xa6f157a6U, 0xb4c773b4U, 0xc65197c6U,
+    0xe823cbe8U, 0xdd7ca1ddU, 0x749ce874U, 0x1f213e1fU,
+    0x4bdd964bU, 0xbddc61bdU, 0x8b860d8bU, 0x8a850f8aU,
+    0x7090e070U, 0x3e427c3eU, 0xb5c471b5U, 0x66aacc66U,
+    0x48d89048U, 0x03050603U, 0xf601f7f6U, 0x0e121c0eU,
+    0x61a3c261U, 0x355f6a35U, 0x57f9ae57U, 0xb9d069b9U,
+    0x86911786U, 0xc15899c1U, 0x1d273a1dU, 0x9eb9279eU,
+    0xe138d9e1U, 0xf813ebf8U, 0x98b32b98U, 0x11332211U,
+    0x69bbd269U, 0xd970a9d9U, 0x8e89078eU, 0x94a73394U,
+    0x9bb62d9bU, 0x1e223c1eU, 0x87921587U, 0xe920c9e9U,
+    0xce4987ceU, 0x55ffaa55U, 0x28785028U, 0xdf7aa5dfU,
+    0x8c8f038cU, 0xa1f859a1U, 0x89800989U, 0x0d171a0dU,
+    0xbfda65bfU, 0xe631d7e6U, 0x42c68442U, 0x68b8d068U,
+    0x41c38241U, 0x99b02999U, 0x2d775a2dU, 0x0f111e0fU,
+    0xb0cb7bb0U, 0x54fca854U, 0xbbd66dbbU, 0x163a2c16U,
+};
+static const u32 Te3[256] = {
+    0x6363a5c6U, 0x7c7c84f8U, 0x777799eeU, 0x7b7b8df6U,
+    0xf2f20dffU, 0x6b6bbdd6U, 0x6f6fb1deU, 0xc5c55491U,
+    0x30305060U, 0x01010302U, 0x6767a9ceU, 0x2b2b7d56U,
+    0xfefe19e7U, 0xd7d762b5U, 0xababe64dU, 0x76769aecU,
+    0xcaca458fU, 0x82829d1fU, 0xc9c94089U, 0x7d7d87faU,
+    0xfafa15efU, 0x5959ebb2U, 0x4747c98eU, 0xf0f00bfbU,
+    0xadadec41U, 0xd4d467b3U, 0xa2a2fd5fU, 0xafafea45U,
+    0x9c9cbf23U, 0xa4a4f753U, 0x727296e4U, 0xc0c05b9bU,
+    0xb7b7c275U, 0xfdfd1ce1U, 0x9393ae3dU, 0x26266a4cU,
+    0x36365a6cU, 0x3f3f417eU, 0xf7f702f5U, 0xcccc4f83U,
+    0x34345c68U, 0xa5a5f451U, 0xe5e534d1U, 0xf1f108f9U,
+    0x717193e2U, 0xd8d873abU, 0x31315362U, 0x15153f2aU,
+    0x04040c08U, 0xc7c75295U, 0x23236546U, 0xc3c35e9dU,
+    0x18182830U, 0x9696a137U, 0x05050f0aU, 0x9a9ab52fU,
+    0x0707090eU, 0x12123624U, 0x80809b1bU, 0xe2e23ddfU,
+    0xebeb26cdU, 0x2727694eU, 0xb2b2cd7fU, 0x75759feaU,
+    0x09091b12U, 0x83839e1dU, 0x2c2c7458U, 0x1a1a2e34U,
+    0x1b1b2d36U, 0x6e6eb2dcU, 0x5a5aeeb4U, 0xa0a0fb5bU,
+    0x5252f6a4U, 0x3b3b4d76U, 0xd6d661b7U, 0xb3b3ce7dU,
+    0x29297b52U, 0xe3e33eddU, 0x2f2f715eU, 0x84849713U,
+    0x5353f5a6U, 0xd1d168b9U, 0x00000000U, 0xeded2cc1U,
+    0x20206040U, 0xfcfc1fe3U, 0xb1b1c879U, 0x5b5bedb6U,
+    0x6a6abed4U, 0xcbcb468dU, 0xbebed967U, 0x39394b72U,
+    0x4a4ade94U, 0x4c4cd498U, 0x5858e8b0U, 0xcfcf4a85U,
+    0xd0d06bbbU, 0xefef2ac5U, 0xaaaae54fU, 0xfbfb16edU,
+    0x4343c586U, 0x4d4dd79aU, 0x33335566U, 0x85859411U,
+    0x4545cf8aU, 0xf9f910e9U, 0x02020604U, 0x7f7f81feU,
+    0x5050f0a0U, 0x3c3c4478U, 0x9f9fba25U, 0xa8a8e34bU,
+    0x5151f3a2U, 0xa3a3fe5dU, 0x4040c080U, 0x8f8f8a05U,
+    0x9292ad3fU, 0x9d9dbc21U, 0x38384870U, 0xf5f504f1U,
+    0xbcbcdf63U, 0xb6b6c177U, 0xdada75afU, 0x21216342U,
+    0x10103020U, 0xffff1ae5U, 0xf3f30efdU, 0xd2d26dbfU,
+    0xcdcd4c81U, 0x0c0c1418U, 0x13133526U, 0xecec2fc3U,
+    0x5f5fe1beU, 0x9797a235U, 0x4444cc88U, 0x1717392eU,
+    0xc4c45793U, 0xa7a7f255U, 0x7e7e82fcU, 0x3d3d477aU,
+    0x6464acc8U, 0x5d5de7baU, 0x19192b32U, 0x737395e6U,
+    0x6060a0c0U, 0x81819819U, 0x4f4fd19eU, 0xdcdc7fa3U,
+    0x22226644U, 0x2a2a7e54U, 0x9090ab3bU, 0x8888830bU,
+    0x4646ca8cU, 0xeeee29c7U, 0xb8b8d36bU, 0x14143c28U,
+    0xdede79a7U, 0x5e5ee2bcU, 0x0b0b1d16U, 0xdbdb76adU,
+    0xe0e03bdbU, 0x32325664U, 0x3a3a4e74U, 0x0a0a1e14U,
+    0x4949db92U, 0x06060a0cU, 0x24246c48U, 0x5c5ce4b8U,
+    0xc2c25d9fU, 0xd3d36ebdU, 0xacacef43U, 0x6262a6c4U,
+    0x9191a839U, 0x9595a431U, 0xe4e437d3U, 0x79798bf2U,
+    0xe7e732d5U, 0xc8c8438bU, 0x3737596eU, 0x6d6db7daU,
+    0x8d8d8c01U, 0xd5d564b1U, 0x4e4ed29cU, 0xa9a9e049U,
+    0x6c6cb4d8U, 0x5656faacU, 0xf4f407f3U, 0xeaea25cfU,
+    0x6565afcaU, 0x7a7a8ef4U, 0xaeaee947U, 0x08081810U,
+    0xbabad56fU, 0x787888f0U, 0x25256f4aU, 0x2e2e725cU,
+    0x1c1c2438U, 0xa6a6f157U, 0xb4b4c773U, 0xc6c65197U,
+    0xe8e823cbU, 0xdddd7ca1U, 0x74749ce8U, 0x1f1f213eU,
+    0x4b4bdd96U, 0xbdbddc61U, 0x8b8b860dU, 0x8a8a850fU,
+    0x707090e0U, 0x3e3e427cU, 0xb5b5c471U, 0x6666aaccU,
+    0x4848d890U, 0x03030506U, 0xf6f601f7U, 0x0e0e121cU,
+    0x6161a3c2U, 0x35355f6aU, 0x5757f9aeU, 0xb9b9d069U,
+    0x86869117U, 0xc1c15899U, 0x1d1d273aU, 0x9e9eb927U,
+    0xe1e138d9U, 0xf8f813ebU, 0x9898b32bU, 0x11113322U,
+    0x6969bbd2U, 0xd9d970a9U, 0x8e8e8907U, 0x9494a733U,
+    0x9b9bb62dU, 0x1e1e223cU, 0x87879215U, 0xe9e920c9U,
+    0xcece4987U, 0x5555ffaaU, 0x28287850U, 0xdfdf7aa5U,
+    0x8c8c8f03U, 0xa1a1f859U, 0x89898009U, 0x0d0d171aU,
+    0xbfbfda65U, 0xe6e631d7U, 0x4242c684U, 0x6868b8d0U,
+    0x4141c382U, 0x9999b029U, 0x2d2d775aU, 0x0f0f111eU,
+    0xb0b0cb7bU, 0x5454fca8U, 0xbbbbd66dU, 0x16163a2cU,
+};
+
+static const u32 Td0[256] = {
+    0x51f4a750U, 0x7e416553U, 0x1a17a4c3U, 0x3a275e96U,
+    0x3bab6bcbU, 0x1f9d45f1U, 0xacfa58abU, 0x4be30393U,
+    0x2030fa55U, 0xad766df6U, 0x88cc7691U, 0xf5024c25U,
+    0x4fe5d7fcU, 0xc52acbd7U, 0x26354480U, 0xb562a38fU,
+    0xdeb15a49U, 0x25ba1b67U, 0x45ea0e98U, 0x5dfec0e1U,
+    0xc32f7502U, 0x814cf012U, 0x8d4697a3U, 0x6bd3f9c6U,
+    0x038f5fe7U, 0x15929c95U, 0xbf6d7aebU, 0x955259daU,
+    0xd4be832dU, 0x587421d3U, 0x49e06929U, 0x8ec9c844U,
+    0x75c2896aU, 0xf48e7978U, 0x99583e6bU, 0x27b971ddU,
+    0xbee14fb6U, 0xf088ad17U, 0xc920ac66U, 0x7dce3ab4U,
+    0x63df4a18U, 0xe51a3182U, 0x97513360U, 0x62537f45U,
+    0xb16477e0U, 0xbb6bae84U, 0xfe81a01cU, 0xf9082b94U,
+    0x70486858U, 0x8f45fd19U, 0x94de6c87U, 0x527bf8b7U,
+    0xab73d323U, 0x724b02e2U, 0xe31f8f57U, 0x6655ab2aU,
+    0xb2eb2807U, 0x2fb5c203U, 0x86c57b9aU, 0xd33708a5U,
+    0x302887f2U, 0x23bfa5b2U, 0x02036abaU, 0xed16825cU,
+    0x8acf1c2bU, 0xa779b492U, 0xf307f2f0U, 0x4e69e2a1U,
+    0x65daf4cdU, 0x0605bed5U, 0xd134621fU, 0xc4a6fe8aU,
+    0x342e539dU, 0xa2f355a0U, 0x058ae132U, 0xa4f6eb75U,
+    0x0b83ec39U, 0x4060efaaU, 0x5e719f06U, 0xbd6e1051U,
+    0x3e218af9U, 0x96dd063dU, 0xdd3e05aeU, 0x4de6bd46U,
+    0x91548db5U, 0x71c45d05U, 0x0406d46fU, 0x605015ffU,
+    0x1998fb24U, 0xd6bde997U, 0x894043ccU, 0x67d99e77U,
+    0xb0e842bdU, 0x07898b88U, 0xe7195b38U, 0x79c8eedbU,
+    0xa17c0a47U, 0x7c420fe9U, 0xf8841ec9U, 0x00000000U,
+    0x09808683U, 0x322bed48U, 0x1e1170acU, 0x6c5a724eU,
+    0xfd0efffbU, 0x0f853856U, 0x3daed51eU, 0x362d3927U,
+    0x0a0fd964U, 0x685ca621U, 0x9b5b54d1U, 0x24362e3aU,
+    0x0c0a67b1U, 0x9357e70fU, 0xb4ee96d2U, 0x1b9b919eU,
+    0x80c0c54fU, 0x61dc20a2U, 0x5a774b69U, 0x1c121a16U,
+    0xe293ba0aU, 0xc0a02ae5U, 0x3c22e043U, 0x121b171dU,
+    0x0e090d0bU, 0xf28bc7adU, 0x2db6a8b9U, 0x141ea9c8U,
+    0x57f11985U, 0xaf75074cU, 0xee99ddbbU, 0xa37f60fdU,
+    0xf701269fU, 0x5c72f5bcU, 0x44663bc5U, 0x5bfb7e34U,
+    0x8b432976U, 0xcb23c6dcU, 0xb6edfc68U, 0xb8e4f163U,
+    0xd731dccaU, 0x42638510U, 0x13972240U, 0x84c61120U,
+    0x854a247dU, 0xd2bb3df8U, 0xaef93211U, 0xc729a16dU,
+    0x1d9e2f4bU, 0xdcb230f3U, 0x0d8652ecU, 0x77c1e3d0U,
+    0x2bb3166cU, 0xa970b999U, 0x119448faU, 0x47e96422U,
+    0xa8fc8cc4U, 0xa0f03f1aU, 0x567d2cd8U, 0x223390efU,
+    0x87494ec7U, 0xd938d1c1U, 0x8ccaa2feU, 0x98d40b36U,
+    0xa6f581cfU, 0xa57ade28U, 0xdab78e26U, 0x3fadbfa4U,
+    0x2c3a9de4U, 0x5078920dU, 0x6a5fcc9bU, 0x547e4662U,
+    0xf68d13c2U, 0x90d8b8e8U, 0x2e39f75eU, 0x82c3aff5U,
+    0x9f5d80beU, 0x69d0937cU, 0x6fd52da9U, 0xcf2512b3U,
+    0xc8ac993bU, 0x10187da7U, 0xe89c636eU, 0xdb3bbb7bU,
+    0xcd267809U, 0x6e5918f4U, 0xec9ab701U, 0x834f9aa8U,
+    0xe6956e65U, 0xaaffe67eU, 0x21bccf08U, 0xef15e8e6U,
+    0xbae79bd9U, 0x4a6f36ceU, 0xea9f09d4U, 0x29b07cd6U,
+    0x31a4b2afU, 0x2a3f2331U, 0xc6a59430U, 0x35a266c0U,
+    0x744ebc37U, 0xfc82caa6U, 0xe090d0b0U, 0x33a7d815U,
+    0xf104984aU, 0x41ecdaf7U, 0x7fcd500eU, 0x1791f62fU,
+    0x764dd68dU, 0x43efb04dU, 0xccaa4d54U, 0xe49604dfU,
+    0x9ed1b5e3U, 0x4c6a881bU, 0xc12c1fb8U, 0x4665517fU,
+    0x9d5eea04U, 0x018c355dU, 0xfa877473U, 0xfb0b412eU,
+    0xb3671d5aU, 0x92dbd252U, 0xe9105633U, 0x6dd64713U,
+    0x9ad7618cU, 0x37a10c7aU, 0x59f8148eU, 0xeb133c89U,
+    0xcea927eeU, 0xb761c935U, 0xe11ce5edU, 0x7a47b13cU,
+    0x9cd2df59U, 0x55f2733fU, 0x1814ce79U, 0x73c737bfU,
+    0x53f7cdeaU, 0x5ffdaa5bU, 0xdf3d6f14U, 0x7844db86U,
+    0xcaaff381U, 0xb968c43eU, 0x3824342cU, 0xc2a3405fU,
+    0x161dc372U, 0xbce2250cU, 0x283c498bU, 0xff0d9541U,
+    0x39a80171U, 0x080cb3deU, 0xd8b4e49cU, 0x6456c190U,
+    0x7bcb8461U, 0xd532b670U, 0x486c5c74U, 0xd0b85742U,
+};
+static const u32 Td1[256] = {
+    0x5051f4a7U, 0x537e4165U, 0xc31a17a4U, 0x963a275eU,
+    0xcb3bab6bU, 0xf11f9d45U, 0xabacfa58U, 0x934be303U,
+    0x552030faU, 0xf6ad766dU, 0x9188cc76U, 0x25f5024cU,
+    0xfc4fe5d7U, 0xd7c52acbU, 0x80263544U, 0x8fb562a3U,
+    0x49deb15aU, 0x6725ba1bU, 0x9845ea0eU, 0xe15dfec0U,
+    0x02c32f75U, 0x12814cf0U, 0xa38d4697U, 0xc66bd3f9U,
+    0xe7038f5fU, 0x9515929cU, 0xebbf6d7aU, 0xda955259U,
+    0x2dd4be83U, 0xd3587421U, 0x2949e069U, 0x448ec9c8U,
+    0x6a75c289U, 0x78f48e79U, 0x6b99583eU, 0xdd27b971U,
+    0xb6bee14fU, 0x17f088adU, 0x66c920acU, 0xb47dce3aU,
+    0x1863df4aU, 0x82e51a31U, 0x60975133U, 0x4562537fU,
+    0xe0b16477U, 0x84bb6baeU, 0x1cfe81a0U, 0x94f9082bU,
+    0x58704868U, 0x198f45fdU, 0x8794de6cU, 0xb7527bf8U,
+    0x23ab73d3U, 0xe2724b02U, 0x57e31f8fU, 0x2a6655abU,
+    0x07b2eb28U, 0x032fb5c2U, 0x9a86c57bU, 0xa5d33708U,
+    0xf2302887U, 0xb223bfa5U, 0xba02036aU, 0x5ced1682U,
+    0x2b8acf1cU, 0x92a779b4U, 0xf0f307f2U, 0xa14e69e2U,
+    0xcd65daf4U, 0xd50605beU, 0x1fd13462U, 0x8ac4a6feU,
+    0x9d342e53U, 0xa0a2f355U, 0x32058ae1U, 0x75a4f6ebU,
+    0x390b83ecU, 0xaa4060efU, 0x065e719fU, 0x51bd6e10U,
+    0xf93e218aU, 0x3d96dd06U, 0xaedd3e05U, 0x464de6bdU,
+    0xb591548dU, 0x0571c45dU, 0x6f0406d4U, 0xff605015U,
+    0x241998fbU, 0x97d6bde9U, 0xcc894043U, 0x7767d99eU,
+    0xbdb0e842U, 0x8807898bU, 0x38e7195bU, 0xdb79c8eeU,
+    0x47a17c0aU, 0xe97c420fU, 0xc9f8841eU, 0x00000000U,
+    0x83098086U, 0x48322bedU, 0xac1e1170U, 0x4e6c5a72U,
+    0xfbfd0effU, 0x560f8538U, 0x1e3daed5U, 0x27362d39U,
+    0x640a0fd9U, 0x21685ca6U, 0xd19b5b54U, 0x3a24362eU,
+    0xb10c0a67U, 0x0f9357e7U, 0xd2b4ee96U, 0x9e1b9b91U,
+    0x4f80c0c5U, 0xa261dc20U, 0x695a774bU, 0x161c121aU,
+    0x0ae293baU, 0xe5c0a02aU, 0x433c22e0U, 0x1d121b17U,
+    0x0b0e090dU, 0xadf28bc7U, 0xb92db6a8U, 0xc8141ea9U,
+    0x8557f119U, 0x4caf7507U, 0xbbee99ddU, 0xfda37f60U,
+    0x9ff70126U, 0xbc5c72f5U, 0xc544663bU, 0x345bfb7eU,
+    0x768b4329U, 0xdccb23c6U, 0x68b6edfcU, 0x63b8e4f1U,
+    0xcad731dcU, 0x10426385U, 0x40139722U, 0x2084c611U,
+    0x7d854a24U, 0xf8d2bb3dU, 0x11aef932U, 0x6dc729a1U,
+    0x4b1d9e2fU, 0xf3dcb230U, 0xec0d8652U, 0xd077c1e3U,
+    0x6c2bb316U, 0x99a970b9U, 0xfa119448U, 0x2247e964U,
+    0xc4a8fc8cU, 0x1aa0f03fU, 0xd8567d2cU, 0xef223390U,
+    0xc787494eU, 0xc1d938d1U, 0xfe8ccaa2U, 0x3698d40bU,
+    0xcfa6f581U, 0x28a57adeU, 0x26dab78eU, 0xa43fadbfU,
+    0xe42c3a9dU, 0x0d507892U, 0x9b6a5fccU, 0x62547e46U,
+    0xc2f68d13U, 0xe890d8b8U, 0x5e2e39f7U, 0xf582c3afU,
+    0xbe9f5d80U, 0x7c69d093U, 0xa96fd52dU, 0xb3cf2512U,
+    0x3bc8ac99U, 0xa710187dU, 0x6ee89c63U, 0x7bdb3bbbU,
+    0x09cd2678U, 0xf46e5918U, 0x01ec9ab7U, 0xa8834f9aU,
+    0x65e6956eU, 0x7eaaffe6U, 0x0821bccfU, 0xe6ef15e8U,
+    0xd9bae79bU, 0xce4a6f36U, 0xd4ea9f09U, 0xd629b07cU,
+    0xaf31a4b2U, 0x312a3f23U, 0x30c6a594U, 0xc035a266U,
+    0x37744ebcU, 0xa6fc82caU, 0xb0e090d0U, 0x1533a7d8U,
+    0x4af10498U, 0xf741ecdaU, 0x0e7fcd50U, 0x2f1791f6U,
+    0x8d764dd6U, 0x4d43efb0U, 0x54ccaa4dU, 0xdfe49604U,
+    0xe39ed1b5U, 0x1b4c6a88U, 0xb8c12c1fU, 0x7f466551U,
+    0x049d5eeaU, 0x5d018c35U, 0x73fa8774U, 0x2efb0b41U,
+    0x5ab3671dU, 0x5292dbd2U, 0x33e91056U, 0x136dd647U,
+    0x8c9ad761U, 0x7a37a10cU, 0x8e59f814U, 0x89eb133cU,
+    0xeecea927U, 0x35b761c9U, 0xede11ce5U, 0x3c7a47b1U,
+    0x599cd2dfU, 0x3f55f273U, 0x791814ceU, 0xbf73c737U,
+    0xea53f7cdU, 0x5b5ffdaaU, 0x14df3d6fU, 0x867844dbU,
+    0x81caaff3U, 0x3eb968c4U, 0x2c382434U, 0x5fc2a340U,
+    0x72161dc3U, 0x0cbce225U, 0x8b283c49U, 0x41ff0d95U,
+    0x7139a801U, 0xde080cb3U, 0x9cd8b4e4U, 0x906456c1U,
+    0x617bcb84U, 0x70d532b6U, 0x74486c5cU, 0x42d0b857U,
+};
+static const u32 Td2[256] = {
+    0xa75051f4U, 0x65537e41U, 0xa4c31a17U, 0x5e963a27U,
+    0x6bcb3babU, 0x45f11f9dU, 0x58abacfaU, 0x03934be3U,
+    0xfa552030U, 0x6df6ad76U, 0x769188ccU, 0x4c25f502U,
+    0xd7fc4fe5U, 0xcbd7c52aU, 0x44802635U, 0xa38fb562U,
+    0x5a49deb1U, 0x1b6725baU, 0x0e9845eaU, 0xc0e15dfeU,
+    0x7502c32fU, 0xf012814cU, 0x97a38d46U, 0xf9c66bd3U,
+    0x5fe7038fU, 0x9c951592U, 0x7aebbf6dU, 0x59da9552U,
+    0x832dd4beU, 0x21d35874U, 0x692949e0U, 0xc8448ec9U,
+    0x896a75c2U, 0x7978f48eU, 0x3e6b9958U, 0x71dd27b9U,
+    0x4fb6bee1U, 0xad17f088U, 0xac66c920U, 0x3ab47dceU,
+    0x4a1863dfU, 0x3182e51aU, 0x33609751U, 0x7f456253U,
+    0x77e0b164U, 0xae84bb6bU, 0xa01cfe81U, 0x2b94f908U,
+    0x68587048U, 0xfd198f45U, 0x6c8794deU, 0xf8b7527bU,
+    0xd323ab73U, 0x02e2724bU, 0x8f57e31fU, 0xab2a6655U,
+    0x2807b2ebU, 0xc2032fb5U, 0x7b9a86c5U, 0x08a5d337U,
+    0x87f23028U, 0xa5b223bfU, 0x6aba0203U, 0x825ced16U,
+    0x1c2b8acfU, 0xb492a779U, 0xf2f0f307U, 0xe2a14e69U,
+    0xf4cd65daU, 0xbed50605U, 0x621fd134U, 0xfe8ac4a6U,
+    0x539d342eU, 0x55a0a2f3U, 0xe132058aU, 0xeb75a4f6U,
+    0xec390b83U, 0xefaa4060U, 0x9f065e71U, 0x1051bd6eU,
+    0x8af93e21U, 0x063d96ddU, 0x05aedd3eU, 0xbd464de6U,
+    0x8db59154U, 0x5d0571c4U, 0xd46f0406U, 0x15ff6050U,
+    0xfb241998U, 0xe997d6bdU, 0x43cc8940U, 0x9e7767d9U,
+    0x42bdb0e8U, 0x8b880789U, 0x5b38e719U, 0xeedb79c8U,
+    0x0a47a17cU, 0x0fe97c42U, 0x1ec9f884U, 0x00000000U,
+    0x86830980U, 0xed48322bU, 0x70ac1e11U, 0x724e6c5aU,
+    0xfffbfd0eU, 0x38560f85U, 0xd51e3daeU, 0x3927362dU,
+    0xd9640a0fU, 0xa621685cU, 0x54d19b5bU, 0x2e3a2436U,
+    0x67b10c0aU, 0xe70f9357U, 0x96d2b4eeU, 0x919e1b9bU,
+    0xc54f80c0U, 0x20a261dcU, 0x4b695a77U, 0x1a161c12U,
+    0xba0ae293U, 0x2ae5c0a0U, 0xe0433c22U, 0x171d121bU,
+    0x0d0b0e09U, 0xc7adf28bU, 0xa8b92db6U, 0xa9c8141eU,
+    0x198557f1U, 0x074caf75U, 0xddbbee99U, 0x60fda37fU,
+    0x269ff701U, 0xf5bc5c72U, 0x3bc54466U, 0x7e345bfbU,
+    0x29768b43U, 0xc6dccb23U, 0xfc68b6edU, 0xf163b8e4U,
+    0xdccad731U, 0x85104263U, 0x22401397U, 0x112084c6U,
+    0x247d854aU, 0x3df8d2bbU, 0x3211aef9U, 0xa16dc729U,
+    0x2f4b1d9eU, 0x30f3dcb2U, 0x52ec0d86U, 0xe3d077c1U,
+    0x166c2bb3U, 0xb999a970U, 0x48fa1194U, 0x642247e9U,
+    0x8cc4a8fcU, 0x3f1aa0f0U, 0x2cd8567dU, 0x90ef2233U,
+    0x4ec78749U, 0xd1c1d938U, 0xa2fe8ccaU, 0x0b3698d4U,
+    0x81cfa6f5U, 0xde28a57aU, 0x8e26dab7U, 0xbfa43fadU,
+    0x9de42c3aU, 0x920d5078U, 0xcc9b6a5fU, 0x4662547eU,
+    0x13c2f68dU, 0xb8e890d8U, 0xf75e2e39U, 0xaff582c3U,
+    0x80be9f5dU, 0x937c69d0U, 0x2da96fd5U, 0x12b3cf25U,
+    0x993bc8acU, 0x7da71018U, 0x636ee89cU, 0xbb7bdb3bU,
+    0x7809cd26U, 0x18f46e59U, 0xb701ec9aU, 0x9aa8834fU,
+    0x6e65e695U, 0xe67eaaffU, 0xcf0821bcU, 0xe8e6ef15U,
+    0x9bd9bae7U, 0x36ce4a6fU, 0x09d4ea9fU, 0x7cd629b0U,
+    0xb2af31a4U, 0x23312a3fU, 0x9430c6a5U, 0x66c035a2U,
+    0xbc37744eU, 0xcaa6fc82U, 0xd0b0e090U, 0xd81533a7U,
+    0x984af104U, 0xdaf741ecU, 0x500e7fcdU, 0xf62f1791U,
+    0xd68d764dU, 0xb04d43efU, 0x4d54ccaaU, 0x04dfe496U,
+    0xb5e39ed1U, 0x881b4c6aU, 0x1fb8c12cU, 0x517f4665U,
+    0xea049d5eU, 0x355d018cU, 0x7473fa87U, 0x412efb0bU,
+    0x1d5ab367U, 0xd25292dbU, 0x5633e910U, 0x47136dd6U,
+    0x618c9ad7U, 0x0c7a37a1U, 0x148e59f8U, 0x3c89eb13U,
+    0x27eecea9U, 0xc935b761U, 0xe5ede11cU, 0xb13c7a47U,
+    0xdf599cd2U, 0x733f55f2U, 0xce791814U, 0x37bf73c7U,
+    0xcdea53f7U, 0xaa5b5ffdU, 0x6f14df3dU, 0xdb867844U,
+    0xf381caafU, 0xc43eb968U, 0x342c3824U, 0x405fc2a3U,
+    0xc372161dU, 0x250cbce2U, 0x498b283cU, 0x9541ff0dU,
+    0x017139a8U, 0xb3de080cU, 0xe49cd8b4U, 0xc1906456U,
+    0x84617bcbU, 0xb670d532U, 0x5c74486cU, 0x5742d0b8U,
+};
+static const u32 Td3[256] = {
+    0xf4a75051U, 0x4165537eU, 0x17a4c31aU, 0x275e963aU,
+    0xab6bcb3bU, 0x9d45f11fU, 0xfa58abacU, 0xe303934bU,
+    0x30fa5520U, 0x766df6adU, 0xcc769188U, 0x024c25f5U,
+    0xe5d7fc4fU, 0x2acbd7c5U, 0x35448026U, 0x62a38fb5U,
+    0xb15a49deU, 0xba1b6725U, 0xea0e9845U, 0xfec0e15dU,
+    0x2f7502c3U, 0x4cf01281U, 0x4697a38dU, 0xd3f9c66bU,
+    0x8f5fe703U, 0x929c9515U, 0x6d7aebbfU, 0x5259da95U,
+    0xbe832dd4U, 0x7421d358U, 0xe0692949U, 0xc9c8448eU,
+    0xc2896a75U, 0x8e7978f4U, 0x583e6b99U, 0xb971dd27U,
+    0xe14fb6beU, 0x88ad17f0U, 0x20ac66c9U, 0xce3ab47dU,
+    0xdf4a1863U, 0x1a3182e5U, 0x51336097U, 0x537f4562U,
+    0x6477e0b1U, 0x6bae84bbU, 0x81a01cfeU, 0x082b94f9U,
+    0x48685870U, 0x45fd198fU, 0xde6c8794U, 0x7bf8b752U,
+    0x73d323abU, 0x4b02e272U, 0x1f8f57e3U, 0x55ab2a66U,
+    0xeb2807b2U, 0xb5c2032fU, 0xc57b9a86U, 0x3708a5d3U,
+    0x2887f230U, 0xbfa5b223U, 0x036aba02U, 0x16825cedU,
+    0xcf1c2b8aU, 0x79b492a7U, 0x07f2f0f3U, 0x69e2a14eU,
+    0xdaf4cd65U, 0x05bed506U, 0x34621fd1U, 0xa6fe8ac4U,
+    0x2e539d34U, 0xf355a0a2U, 0x8ae13205U, 0xf6eb75a4U,
+    0x83ec390bU, 0x60efaa40U, 0x719f065eU, 0x6e1051bdU,
+    0x218af93eU, 0xdd063d96U, 0x3e05aeddU, 0xe6bd464dU,
+    0x548db591U, 0xc45d0571U, 0x06d46f04U, 0x5015ff60U,
+    0x98fb2419U, 0xbde997d6U, 0x4043cc89U, 0xd99e7767U,
+    0xe842bdb0U, 0x898b8807U, 0x195b38e7U, 0xc8eedb79U,
+    0x7c0a47a1U, 0x420fe97cU, 0x841ec9f8U, 0x00000000U,
+    0x80868309U, 0x2bed4832U, 0x1170ac1eU, 0x5a724e6cU,
+    0x0efffbfdU, 0x8538560fU, 0xaed51e3dU, 0x2d392736U,
+    0x0fd9640aU, 0x5ca62168U, 0x5b54d19bU, 0x362e3a24U,
+    0x0a67b10cU, 0x57e70f93U, 0xee96d2b4U, 0x9b919e1bU,
+    0xc0c54f80U, 0xdc20a261U, 0x774b695aU, 0x121a161cU,
+    0x93ba0ae2U, 0xa02ae5c0U, 0x22e0433cU, 0x1b171d12U,
+    0x090d0b0eU, 0x8bc7adf2U, 0xb6a8b92dU, 0x1ea9c814U,
+    0xf1198557U, 0x75074cafU, 0x99ddbbeeU, 0x7f60fda3U,
+    0x01269ff7U, 0x72f5bc5cU, 0x663bc544U, 0xfb7e345bU,
+    0x4329768bU, 0x23c6dccbU, 0xedfc68b6U, 0xe4f163b8U,
+    0x31dccad7U, 0x63851042U, 0x97224013U, 0xc6112084U,
+    0x4a247d85U, 0xbb3df8d2U, 0xf93211aeU, 0x29a16dc7U,
+    0x9e2f4b1dU, 0xb230f3dcU, 0x8652ec0dU, 0xc1e3d077U,
+    0xb3166c2bU, 0x70b999a9U, 0x9448fa11U, 0xe9642247U,
+    0xfc8cc4a8U, 0xf03f1aa0U, 0x7d2cd856U, 0x3390ef22U,
+    0x494ec787U, 0x38d1c1d9U, 0xcaa2fe8cU, 0xd40b3698U,
+    0xf581cfa6U, 0x7ade28a5U, 0xb78e26daU, 0xadbfa43fU,
+    0x3a9de42cU, 0x78920d50U, 0x5fcc9b6aU, 0x7e466254U,
+    0x8d13c2f6U, 0xd8b8e890U, 0x39f75e2eU, 0xc3aff582U,
+    0x5d80be9fU, 0xd0937c69U, 0xd52da96fU, 0x2512b3cfU,
+    0xac993bc8U, 0x187da710U, 0x9c636ee8U, 0x3bbb7bdbU,
+    0x267809cdU, 0x5918f46eU, 0x9ab701ecU, 0x4f9aa883U,
+    0x956e65e6U, 0xffe67eaaU, 0xbccf0821U, 0x15e8e6efU,
+    0xe79bd9baU, 0x6f36ce4aU, 0x9f09d4eaU, 0xb07cd629U,
+    0xa4b2af31U, 0x3f23312aU, 0xa59430c6U, 0xa266c035U,
+    0x4ebc3774U, 0x82caa6fcU, 0x90d0b0e0U, 0xa7d81533U,
+    0x04984af1U, 0xecdaf741U, 0xcd500e7fU, 0x91f62f17U,
+    0x4dd68d76U, 0xefb04d43U, 0xaa4d54ccU, 0x9604dfe4U,
+    0xd1b5e39eU, 0x6a881b4cU, 0x2c1fb8c1U, 0x65517f46U,
+    0x5eea049dU, 0x8c355d01U, 0x877473faU, 0x0b412efbU,
+    0x671d5ab3U, 0xdbd25292U, 0x105633e9U, 0xd647136dU,
+    0xd7618c9aU, 0xa10c7a37U, 0xf8148e59U, 0x133c89ebU,
+    0xa927eeceU, 0x61c935b7U, 0x1ce5ede1U, 0x47b13c7aU,
+    0xd2df599cU, 0xf2733f55U, 0x14ce7918U, 0xc737bf73U,
+    0xf7cdea53U, 0xfdaa5b5fU, 0x3d6f14dfU, 0x44db8678U,
+    0xaff381caU, 0x68c43eb9U, 0x24342c38U, 0xa3405fc2U,
+    0x1dc37216U, 0xe2250cbcU, 0x3c498b28U, 0x0d9541ffU,
+    0xa8017139U, 0x0cb3de08U, 0xb4e49cd8U, 0x56c19064U,
+    0xcb84617bU, 0x32b670d5U, 0x6c5c7448U, 0xb85742d0U,
+};
+static const u8 Td4[256] = {
+    0x52U, 0x09U, 0x6aU, 0xd5U, 0x30U, 0x36U, 0xa5U, 0x38U,
+    0xbfU, 0x40U, 0xa3U, 0x9eU, 0x81U, 0xf3U, 0xd7U, 0xfbU,
+    0x7cU, 0xe3U, 0x39U, 0x82U, 0x9bU, 0x2fU, 0xffU, 0x87U,
+    0x34U, 0x8eU, 0x43U, 0x44U, 0xc4U, 0xdeU, 0xe9U, 0xcbU,
+    0x54U, 0x7bU, 0x94U, 0x32U, 0xa6U, 0xc2U, 0x23U, 0x3dU,
+    0xeeU, 0x4cU, 0x95U, 0x0bU, 0x42U, 0xfaU, 0xc3U, 0x4eU,
+    0x08U, 0x2eU, 0xa1U, 0x66U, 0x28U, 0xd9U, 0x24U, 0xb2U,
+    0x76U, 0x5bU, 0xa2U, 0x49U, 0x6dU, 0x8bU, 0xd1U, 0x25U,
+    0x72U, 0xf8U, 0xf6U, 0x64U, 0x86U, 0x68U, 0x98U, 0x16U,
+    0xd4U, 0xa4U, 0x5cU, 0xccU, 0x5dU, 0x65U, 0xb6U, 0x92U,
+    0x6cU, 0x70U, 0x48U, 0x50U, 0xfdU, 0xedU, 0xb9U, 0xdaU,
+    0x5eU, 0x15U, 0x46U, 0x57U, 0xa7U, 0x8dU, 0x9dU, 0x84U,
+    0x90U, 0xd8U, 0xabU, 0x00U, 0x8cU, 0xbcU, 0xd3U, 0x0aU,
+    0xf7U, 0xe4U, 0x58U, 0x05U, 0xb8U, 0xb3U, 0x45U, 0x06U,
+    0xd0U, 0x2cU, 0x1eU, 0x8fU, 0xcaU, 0x3fU, 0x0fU, 0x02U,
+    0xc1U, 0xafU, 0xbdU, 0x03U, 0x01U, 0x13U, 0x8aU, 0x6bU,
+    0x3aU, 0x91U, 0x11U, 0x41U, 0x4fU, 0x67U, 0xdcU, 0xeaU,
+    0x97U, 0xf2U, 0xcfU, 0xceU, 0xf0U, 0xb4U, 0xe6U, 0x73U,
+    0x96U, 0xacU, 0x74U, 0x22U, 0xe7U, 0xadU, 0x35U, 0x85U,
+    0xe2U, 0xf9U, 0x37U, 0xe8U, 0x1cU, 0x75U, 0xdfU, 0x6eU,
+    0x47U, 0xf1U, 0x1aU, 0x71U, 0x1dU, 0x29U, 0xc5U, 0x89U,
+    0x6fU, 0xb7U, 0x62U, 0x0eU, 0xaaU, 0x18U, 0xbeU, 0x1bU,
+    0xfcU, 0x56U, 0x3eU, 0x4bU, 0xc6U, 0xd2U, 0x79U, 0x20U,
+    0x9aU, 0xdbU, 0xc0U, 0xfeU, 0x78U, 0xcdU, 0x5aU, 0xf4U,
+    0x1fU, 0xddU, 0xa8U, 0x33U, 0x88U, 0x07U, 0xc7U, 0x31U,
+    0xb1U, 0x12U, 0x10U, 0x59U, 0x27U, 0x80U, 0xecU, 0x5fU,
+    0x60U, 0x51U, 0x7fU, 0xa9U, 0x19U, 0xb5U, 0x4aU, 0x0dU,
+    0x2dU, 0xe5U, 0x7aU, 0x9fU, 0x93U, 0xc9U, 0x9cU, 0xefU,
+    0xa0U, 0xe0U, 0x3bU, 0x4dU, 0xaeU, 0x2aU, 0xf5U, 0xb0U,
+    0xc8U, 0xebU, 0xbbU, 0x3cU, 0x83U, 0x53U, 0x99U, 0x61U,
+    0x17U, 0x2bU, 0x04U, 0x7eU, 0xbaU, 0x77U, 0xd6U, 0x26U,
+    0xe1U, 0x69U, 0x14U, 0x63U, 0x55U, 0x21U, 0x0cU, 0x7dU,
+};
+static const u32 rcon[] = {
+    0x01000000, 0x02000000, 0x04000000, 0x08000000,
+    0x10000000, 0x20000000, 0x40000000, 0x80000000,
+    0x1B000000, 0x36000000, /* for 128-bit blocks, Rijndael never uses more than 10 rcon values */
+};
+
+/**
+ * Expand the cipher key into the encryption key schedule.
+ */
+int AES_set_encrypt_key(const unsigned char *userKey, const int bits,
+                        AES_KEY *key)
+{
+
+    u32 *rk;
+    int i = 0;
+    u32 temp;
+
+    if (!userKey || !key)
+        return -1;
+    if (bits != 128 && bits != 192 && bits != 256)
+        return -2;
+
+    rk = key->rd_key;
+
+    if (bits == 128)
+        key->rounds = 10;
+    else if (bits == 192)
+        key->rounds = 12;
+    else
+        key->rounds = 14;
+
+    rk[0] = GETU32(userKey     );
+    rk[1] = GETU32(userKey +  4);
+    rk[2] = GETU32(userKey +  8);
+    rk[3] = GETU32(userKey + 12);
+    if (bits == 128) {
+        while (1) {
+            temp  = rk[3];
+            rk[4] = rk[0] ^
+                (Te2[(temp >> 16) & 0xff] & 0xff000000) ^
+                (Te3[(temp >>  8) & 0xff] & 0x00ff0000) ^
+                (Te0[(temp      ) & 0xff] & 0x0000ff00) ^
+                (Te1[(temp >> 24)       ] & 0x000000ff) ^
+                rcon[i];
+            rk[5] = rk[1] ^ rk[4];
+            rk[6] = rk[2] ^ rk[5];
+            rk[7] = rk[3] ^ rk[6];
+            if (++i == 10) {
+                return 0;
+            }
+            rk += 4;
+        }
+    }
+    rk[4] = GETU32(userKey + 16);
+    rk[5] = GETU32(userKey + 20);
+    if (bits == 192) {
+        while (1) {
+            temp = rk[ 5];
+            rk[ 6] = rk[ 0] ^
+                (Te2[(temp >> 16) & 0xff] & 0xff000000) ^
+                (Te3[(temp >>  8) & 0xff] & 0x00ff0000) ^
+                (Te0[(temp      ) & 0xff] & 0x0000ff00) ^
+                (Te1[(temp >> 24)       ] & 0x000000ff) ^
+                rcon[i];
+            rk[ 7] = rk[ 1] ^ rk[ 6];
+            rk[ 8] = rk[ 2] ^ rk[ 7];
+            rk[ 9] = rk[ 3] ^ rk[ 8];
+            if (++i == 8) {
+                return 0;
+            }
+            rk[10] = rk[ 4] ^ rk[ 9];
+            rk[11] = rk[ 5] ^ rk[10];
+            rk += 6;
+        }
+    }
+    rk[6] = GETU32(userKey + 24);
+    rk[7] = GETU32(userKey + 28);
+    if (bits == 256) {
+        while (1) {
+            temp = rk[ 7];
+            rk[ 8] = rk[ 0] ^
+                (Te2[(temp >> 16) & 0xff] & 0xff000000) ^
+                (Te3[(temp >>  8) & 0xff] & 0x00ff0000) ^
+                (Te0[(temp      ) & 0xff] & 0x0000ff00) ^
+                (Te1[(temp >> 24)       ] & 0x000000ff) ^
+                rcon[i];
+            rk[ 9] = rk[ 1] ^ rk[ 8];
+            rk[10] = rk[ 2] ^ rk[ 9];
+            rk[11] = rk[ 3] ^ rk[10];
+            if (++i == 7) {
+                return 0;
+            }
+            temp = rk[11];
+            rk[12] = rk[ 4] ^
+                (Te2[(temp >> 24)       ] & 0xff000000) ^
+                (Te3[(temp >> 16) & 0xff] & 0x00ff0000) ^
+                (Te0[(temp >>  8) & 0xff] & 0x0000ff00) ^
+                (Te1[(temp      ) & 0xff] & 0x000000ff);
+            rk[13] = rk[ 5] ^ rk[12];
+            rk[14] = rk[ 6] ^ rk[13];
+            rk[15] = rk[ 7] ^ rk[14];
+
+            rk += 8;
+            }
+    }
+    return 0;
+}
+
+/**
+ * Expand the cipher key into the decryption key schedule.
+ */
+int AES_set_decrypt_key(const unsigned char *userKey, const int bits,
+                        AES_KEY *key)
+{
+
+    u32 *rk;
+    int i, j, status;
+    u32 temp;
+
+    /* first, start with an encryption schedule */
+    status = AES_set_encrypt_key(userKey, bits, key);
+    if (status < 0)
+        return status;
+
+    rk = key->rd_key;
+
+    /* invert the order of the round keys: */
+    for (i = 0, j = 4*(key->rounds); i < j; i += 4, j -= 4) {
+        temp = rk[i    ]; rk[i    ] = rk[j    ]; rk[j    ] = temp;
+        temp = rk[i + 1]; rk[i + 1] = rk[j + 1]; rk[j + 1] = temp;
+        temp = rk[i + 2]; rk[i + 2] = rk[j + 2]; rk[j + 2] = temp;
+        temp = rk[i + 3]; rk[i + 3] = rk[j + 3]; rk[j + 3] = temp;
+    }
+    /* apply the inverse MixColumn transform to all round keys but the first and the last: */
+    for (i = 1; i < (key->rounds); i++) {
+        rk += 4;
+        rk[0] =
+            Td0[Te1[(rk[0] >> 24)       ] & 0xff] ^
+            Td1[Te1[(rk[0] >> 16) & 0xff] & 0xff] ^
+            Td2[Te1[(rk[0] >>  8) & 0xff] & 0xff] ^
+            Td3[Te1[(rk[0]      ) & 0xff] & 0xff];
+        rk[1] =
+            Td0[Te1[(rk[1] >> 24)       ] & 0xff] ^
+            Td1[Te1[(rk[1] >> 16) & 0xff] & 0xff] ^
+            Td2[Te1[(rk[1] >>  8) & 0xff] & 0xff] ^
+            Td3[Te1[(rk[1]      ) & 0xff] & 0xff];
+        rk[2] =
+            Td0[Te1[(rk[2] >> 24)       ] & 0xff] ^
+            Td1[Te1[(rk[2] >> 16) & 0xff] & 0xff] ^
+            Td2[Te1[(rk[2] >>  8) & 0xff] & 0xff] ^
+            Td3[Te1[(rk[2]      ) & 0xff] & 0xff];
+        rk[3] =
+            Td0[Te1[(rk[3] >> 24)       ] & 0xff] ^
+            Td1[Te1[(rk[3] >> 16) & 0xff] & 0xff] ^
+            Td2[Te1[(rk[3] >>  8) & 0xff] & 0xff] ^
+            Td3[Te1[(rk[3]      ) & 0xff] & 0xff];
+    }
+    return 0;
+}
+
+/*
+ * Encrypt a single block
+ * in and out can overlap
+ */
+void AES_encrypt(const unsigned char *in, unsigned char *out,
+                 const AES_KEY *key) {
+
+    const u32 *rk;
+    u32 s0, s1, s2, s3, t0, t1, t2, t3;
+#ifndef FULL_UNROLL
+    int r;
+#endif /* ?FULL_UNROLL */
+
+    assert(in && out && key);
+    rk = key->rd_key;
+
+    /*
+     * map byte array block to cipher state
+     * and add initial round key:
+     */
+    s0 = GETU32(in     ) ^ rk[0];
+    s1 = GETU32(in +  4) ^ rk[1];
+    s2 = GETU32(in +  8) ^ rk[2];
+    s3 = GETU32(in + 12) ^ rk[3];
+#ifdef FULL_UNROLL
+    /* round 1: */
+    t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >>  8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[ 4];
+    t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >>  8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[ 5];
+    t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >>  8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[ 6];
+    t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >>  8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[ 7];
+    /* round 2: */
+    s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >>  8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[ 8];
+    s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >>  8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[ 9];
+    s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >>  8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[10];
+    s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >>  8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[11];
+    /* round 3: */
+    t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >>  8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[12];
+    t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >>  8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[13];
+    t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >>  8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[14];
+    t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >>  8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[15];
+    /* round 4: */
+    s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >>  8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[16];
+    s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >>  8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[17];
+    s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >>  8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[18];
+    s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >>  8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[19];
+    /* round 5: */
+    t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >>  8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[20];
+    t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >>  8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[21];
+    t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >>  8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[22];
+    t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >>  8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[23];
+    /* round 6: */
+    s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >>  8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[24];
+    s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >>  8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[25];
+    s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >>  8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[26];
+    s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >>  8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[27];
+    /* round 7: */
+    t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >>  8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[28];
+    t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >>  8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[29];
+    t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >>  8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[30];
+    t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >>  8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[31];
+    /* round 8: */
+    s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >>  8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[32];
+    s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >>  8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[33];
+    s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >>  8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[34];
+    s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >>  8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[35];
+    /* round 9: */
+    t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >>  8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[36];
+    t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >>  8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[37];
+    t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >>  8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[38];
+    t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >>  8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[39];
+    if (key->rounds > 10) {
+        /* round 10: */
+        s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >>  8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[40];
+        s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >>  8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[41];
+        s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >>  8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[42];
+        s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >>  8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[43];
+        /* round 11: */
+        t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >>  8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[44];
+        t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >>  8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[45];
+        t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >>  8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[46];
+        t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >>  8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[47];
+        if (key->rounds > 12) {
+            /* round 12: */
+            s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >>  8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[48];
+            s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >>  8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[49];
+            s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >>  8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[50];
+            s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >>  8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[51];
+            /* round 13: */
+            t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >>  8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[52];
+            t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >>  8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[53];
+            t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >>  8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[54];
+            t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >>  8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[55];
+        }
+    }
+    rk += key->rounds << 2;
+#else  /* !FULL_UNROLL */
+    /*
+     * Nr - 1 full rounds:
+     */
+    r = key->rounds >> 1;
+    for (;;) {
+        t0 =
+            Te0[(s0 >> 24)       ] ^
+            Te1[(s1 >> 16) & 0xff] ^
+            Te2[(s2 >>  8) & 0xff] ^
+            Te3[(s3      ) & 0xff] ^
+            rk[4];
+        t1 =
+            Te0[(s1 >> 24)       ] ^
+            Te1[(s2 >> 16) & 0xff] ^
+            Te2[(s3 >>  8) & 0xff] ^
+            Te3[(s0      ) & 0xff] ^
+            rk[5];
+        t2 =
+            Te0[(s2 >> 24)       ] ^
+            Te1[(s3 >> 16) & 0xff] ^
+            Te2[(s0 >>  8) & 0xff] ^
+            Te3[(s1      ) & 0xff] ^
+            rk[6];
+        t3 =
+            Te0[(s3 >> 24)       ] ^
+            Te1[(s0 >> 16) & 0xff] ^
+            Te2[(s1 >>  8) & 0xff] ^
+            Te3[(s2      ) & 0xff] ^
+            rk[7];
+
+        rk += 8;
+        if (--r == 0) {
+            break;
+        }
+
+        s0 =
+            Te0[(t0 >> 24)       ] ^
+            Te1[(t1 >> 16) & 0xff] ^
+            Te2[(t2 >>  8) & 0xff] ^
+            Te3[(t3      ) & 0xff] ^
+            rk[0];
+        s1 =
+            Te0[(t1 >> 24)       ] ^
+            Te1[(t2 >> 16) & 0xff] ^
+            Te2[(t3 >>  8) & 0xff] ^
+            Te3[(t0      ) & 0xff] ^
+            rk[1];
+        s2 =
+            Te0[(t2 >> 24)       ] ^
+            Te1[(t3 >> 16) & 0xff] ^
+            Te2[(t0 >>  8) & 0xff] ^
+            Te3[(t1      ) & 0xff] ^
+            rk[2];
+        s3 =
+            Te0[(t3 >> 24)       ] ^
+            Te1[(t0 >> 16) & 0xff] ^
+            Te2[(t1 >>  8) & 0xff] ^
+            Te3[(t2      ) & 0xff] ^
+            rk[3];
+    }
+#endif /* ?FULL_UNROLL */
+    /*
+     * apply last round and
+     * map cipher state to byte array block:
+     */
+    s0 =
+        (Te2[(t0 >> 24)       ] & 0xff000000) ^
+        (Te3[(t1 >> 16) & 0xff] & 0x00ff0000) ^
+        (Te0[(t2 >>  8) & 0xff] & 0x0000ff00) ^
+        (Te1[(t3      ) & 0xff] & 0x000000ff) ^
+        rk[0];
+    PUTU32(out     , s0);
+    s1 =
+        (Te2[(t1 >> 24)       ] & 0xff000000) ^
+        (Te3[(t2 >> 16) & 0xff] & 0x00ff0000) ^
+        (Te0[(t3 >>  8) & 0xff] & 0x0000ff00) ^
+        (Te1[(t0      ) & 0xff] & 0x000000ff) ^
+        rk[1];
+    PUTU32(out +  4, s1);
+    s2 =
+        (Te2[(t2 >> 24)       ] & 0xff000000) ^
+        (Te3[(t3 >> 16) & 0xff] & 0x00ff0000) ^
+        (Te0[(t0 >>  8) & 0xff] & 0x0000ff00) ^
+        (Te1[(t1      ) & 0xff] & 0x000000ff) ^
+        rk[2];
+    PUTU32(out +  8, s2);
+    s3 =
+        (Te2[(t3 >> 24)       ] & 0xff000000) ^
+        (Te3[(t0 >> 16) & 0xff] & 0x00ff0000) ^
+        (Te0[(t1 >>  8) & 0xff] & 0x0000ff00) ^
+        (Te1[(t2      ) & 0xff] & 0x000000ff) ^
+        rk[3];
+    PUTU32(out + 12, s3);
+}
+
+/*
+ * Decrypt a single block
+ * in and out can overlap
+ */
+void AES_decrypt(const unsigned char *in, unsigned char *out,
+                 const AES_KEY *key)
+{
+
+    const u32 *rk;
+    u32 s0, s1, s2, s3, t0, t1, t2, t3;
+#ifndef FULL_UNROLL
+    int r;
+#endif /* ?FULL_UNROLL */
+
+    assert(in && out && key);
+    rk = key->rd_key;
+
+    /*
+     * map byte array block to cipher state
+     * and add initial round key:
+     */
+    s0 = GETU32(in     ) ^ rk[0];
+    s1 = GETU32(in +  4) ^ rk[1];
+    s2 = GETU32(in +  8) ^ rk[2];
+    s3 = GETU32(in + 12) ^ rk[3];
+#ifdef FULL_UNROLL
+    /* round 1: */
+    t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >>  8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[ 4];
+    t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >>  8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[ 5];
+    t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >>  8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[ 6];
+    t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >>  8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[ 7];
+    /* round 2: */
+    s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >>  8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[ 8];
+    s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >>  8) & 0xff] ^ Td3[t2 & 0xff] ^ rk[ 9];
+    s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >>  8) & 0xff] ^ Td3[t3 & 0xff] ^ rk[10];
+    s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >>  8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[11];
+    /* round 3: */
+    t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >>  8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[12];
+    t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >>  8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[13];
+    t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >>  8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[14];
+    t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >>  8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[15];
+    /* round 4: */
+    s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >>  8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[16];
+    s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >>  8) & 0xff] ^ Td3[t2 & 0xff] ^ rk[17];
+    s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >>  8) & 0xff] ^ Td3[t3 & 0xff] ^ rk[18];
+    s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >>  8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[19];
+    /* round 5: */
+    t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >>  8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[20];
+    t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >>  8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[21];
+    t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >>  8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[22];
+    t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >>  8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[23];
+    /* round 6: */
+    s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >>  8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[24];
+    s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >>  8) & 0xff] ^ Td3[t2 & 0xff] ^ rk[25];
+    s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >>  8) & 0xff] ^ Td3[t3 & 0xff] ^ rk[26];
+    s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >>  8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[27];
+    /* round 7: */
+    t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >>  8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[28];
+    t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >>  8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[29];
+    t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >>  8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[30];
+    t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >>  8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[31];
+    /* round 8: */
+    s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >>  8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[32];
+    s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >>  8) & 0xff] ^ Td3[t2 & 0xff] ^ rk[33];
+    s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >>  8) & 0xff] ^ Td3[t3 & 0xff] ^ rk[34];
+    s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >>  8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[35];
+    /* round 9: */
+    t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >>  8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[36];
+    t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >>  8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[37];
+    t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >>  8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[38];
+    t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >>  8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[39];
+    if (key->rounds > 10) {
+        /* round 10: */
+        s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >>  8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[40];
+        s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >>  8) & 0xff] ^ Td3[t2 & 0xff] ^ rk[41];
+        s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >>  8) & 0xff] ^ Td3[t3 & 0xff] ^ rk[42];
+        s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >>  8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[43];
+        /* round 11: */
+        t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >>  8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[44];
+        t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >>  8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[45];
+        t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >>  8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[46];
+        t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >>  8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[47];
+        if (key->rounds > 12) {
+            /* round 12: */
+            s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >>  8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[48];
+            s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >>  8) & 0xff] ^ Td3[t2 & 0xff] ^ rk[49];
+            s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >>  8) & 0xff] ^ Td3[t3 & 0xff] ^ rk[50];
+            s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >>  8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[51];
+            /* round 13: */
+            t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >>  8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[52];
+            t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >>  8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[53];
+            t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >>  8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[54];
+            t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >>  8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[55];
+        }
+    }
+    rk += key->rounds << 2;
+#else  /* !FULL_UNROLL */
+    /*
+     * Nr - 1 full rounds:
+     */
+    r = key->rounds >> 1;
+    for (;;) {
+        t0 =
+            Td0[(s0 >> 24)       ] ^
+            Td1[(s3 >> 16) & 0xff] ^
+            Td2[(s2 >>  8) & 0xff] ^
+            Td3[(s1      ) & 0xff] ^
+            rk[4];
+        t1 =
+            Td0[(s1 >> 24)       ] ^
+            Td1[(s0 >> 16) & 0xff] ^
+            Td2[(s3 >>  8) & 0xff] ^
+            Td3[(s2      ) & 0xff] ^
+            rk[5];
+        t2 =
+            Td0[(s2 >> 24)       ] ^
+            Td1[(s1 >> 16) & 0xff] ^
+            Td2[(s0 >>  8) & 0xff] ^
+            Td3[(s3      ) & 0xff] ^
+            rk[6];
+        t3 =
+            Td0[(s3 >> 24)       ] ^
+            Td1[(s2 >> 16) & 0xff] ^
+            Td2[(s1 >>  8) & 0xff] ^
+            Td3[(s0      ) & 0xff] ^
+            rk[7];
+
+        rk += 8;
+        if (--r == 0) {
+            break;
+        }
+
+        s0 =
+            Td0[(t0 >> 24)       ] ^
+            Td1[(t3 >> 16) & 0xff] ^
+            Td2[(t2 >>  8) & 0xff] ^
+            Td3[(t1      ) & 0xff] ^
+            rk[0];
+        s1 =
+            Td0[(t1 >> 24)       ] ^
+            Td1[(t0 >> 16) & 0xff] ^
+            Td2[(t3 >>  8) & 0xff] ^
+            Td3[(t2      ) & 0xff] ^
+            rk[1];
+        s2 =
+            Td0[(t2 >> 24)       ] ^
+            Td1[(t1 >> 16) & 0xff] ^
+            Td2[(t0 >>  8) & 0xff] ^
+            Td3[(t3      ) & 0xff] ^
+            rk[2];
+        s3 =
+            Td0[(t3 >> 24)       ] ^
+            Td1[(t2 >> 16) & 0xff] ^
+            Td2[(t1 >>  8) & 0xff] ^
+            Td3[(t0      ) & 0xff] ^
+            rk[3];
+    }
+#endif /* ?FULL_UNROLL */
+    /*
+     * apply last round and
+     * map cipher state to byte array block:
+     */
+    s0 =
+        ((u32)Td4[(t0 >> 24)       ] << 24) ^
+        ((u32)Td4[(t3 >> 16) & 0xff] << 16) ^
+        ((u32)Td4[(t2 >>  8) & 0xff] <<  8) ^
+        ((u32)Td4[(t1      ) & 0xff])       ^
+        rk[0];
+    PUTU32(out     , s0);
+    s1 =
+        ((u32)Td4[(t1 >> 24)       ] << 24) ^
+        ((u32)Td4[(t0 >> 16) & 0xff] << 16) ^
+        ((u32)Td4[(t3 >>  8) & 0xff] <<  8) ^
+        ((u32)Td4[(t2      ) & 0xff])       ^
+        rk[1];
+    PUTU32(out +  4, s1);
+    s2 =
+        ((u32)Td4[(t2 >> 24)       ] << 24) ^
+        ((u32)Td4[(t1 >> 16) & 0xff] << 16) ^
+        ((u32)Td4[(t0 >>  8) & 0xff] <<  8) ^
+        ((u32)Td4[(t3      ) & 0xff])       ^
+        rk[2];
+    PUTU32(out +  8, s2);
+    s3 =
+        ((u32)Td4[(t3 >> 24)       ] << 24) ^
+        ((u32)Td4[(t2 >> 16) & 0xff] << 16) ^
+        ((u32)Td4[(t1 >>  8) & 0xff] <<  8) ^
+        ((u32)Td4[(t0      ) & 0xff])       ^
+        rk[3];
+    PUTU32(out + 12, s3);
+}
+
+#else /* AES_ASM */
+
+static const u8 Te4[256] = {
+    0x63U, 0x7cU, 0x77U, 0x7bU, 0xf2U, 0x6bU, 0x6fU, 0xc5U,
+    0x30U, 0x01U, 0x67U, 0x2bU, 0xfeU, 0xd7U, 0xabU, 0x76U,
+    0xcaU, 0x82U, 0xc9U, 0x7dU, 0xfaU, 0x59U, 0x47U, 0xf0U,
+    0xadU, 0xd4U, 0xa2U, 0xafU, 0x9cU, 0xa4U, 0x72U, 0xc0U,
+    0xb7U, 0xfdU, 0x93U, 0x26U, 0x36U, 0x3fU, 0xf7U, 0xccU,
+    0x34U, 0xa5U, 0xe5U, 0xf1U, 0x71U, 0xd8U, 0x31U, 0x15U,
+    0x04U, 0xc7U, 0x23U, 0xc3U, 0x18U, 0x96U, 0x05U, 0x9aU,
+    0x07U, 0x12U, 0x80U, 0xe2U, 0xebU, 0x27U, 0xb2U, 0x75U,
+    0x09U, 0x83U, 0x2cU, 0x1aU, 0x1bU, 0x6eU, 0x5aU, 0xa0U,
+    0x52U, 0x3bU, 0xd6U, 0xb3U, 0x29U, 0xe3U, 0x2fU, 0x84U,
+    0x53U, 0xd1U, 0x00U, 0xedU, 0x20U, 0xfcU, 0xb1U, 0x5bU,
+    0x6aU, 0xcbU, 0xbeU, 0x39U, 0x4aU, 0x4cU, 0x58U, 0xcfU,
+    0xd0U, 0xefU, 0xaaU, 0xfbU, 0x43U, 0x4dU, 0x33U, 0x85U,
+    0x45U, 0xf9U, 0x02U, 0x7fU, 0x50U, 0x3cU, 0x9fU, 0xa8U,
+    0x51U, 0xa3U, 0x40U, 0x8fU, 0x92U, 0x9dU, 0x38U, 0xf5U,
+    0xbcU, 0xb6U, 0xdaU, 0x21U, 0x10U, 0xffU, 0xf3U, 0xd2U,
+    0xcdU, 0x0cU, 0x13U, 0xecU, 0x5fU, 0x97U, 0x44U, 0x17U,
+    0xc4U, 0xa7U, 0x7eU, 0x3dU, 0x64U, 0x5dU, 0x19U, 0x73U,
+    0x60U, 0x81U, 0x4fU, 0xdcU, 0x22U, 0x2aU, 0x90U, 0x88U,
+    0x46U, 0xeeU, 0xb8U, 0x14U, 0xdeU, 0x5eU, 0x0bU, 0xdbU,
+    0xe0U, 0x32U, 0x3aU, 0x0aU, 0x49U, 0x06U, 0x24U, 0x5cU,
+    0xc2U, 0xd3U, 0xacU, 0x62U, 0x91U, 0x95U, 0xe4U, 0x79U,
+    0xe7U, 0xc8U, 0x37U, 0x6dU, 0x8dU, 0xd5U, 0x4eU, 0xa9U,
+    0x6cU, 0x56U, 0xf4U, 0xeaU, 0x65U, 0x7aU, 0xaeU, 0x08U,
+    0xbaU, 0x78U, 0x25U, 0x2eU, 0x1cU, 0xa6U, 0xb4U, 0xc6U,
+    0xe8U, 0xddU, 0x74U, 0x1fU, 0x4bU, 0xbdU, 0x8bU, 0x8aU,
+    0x70U, 0x3eU, 0xb5U, 0x66U, 0x48U, 0x03U, 0xf6U, 0x0eU,
+    0x61U, 0x35U, 0x57U, 0xb9U, 0x86U, 0xc1U, 0x1dU, 0x9eU,
+    0xe1U, 0xf8U, 0x98U, 0x11U, 0x69U, 0xd9U, 0x8eU, 0x94U,
+    0x9bU, 0x1eU, 0x87U, 0xe9U, 0xceU, 0x55U, 0x28U, 0xdfU,
+    0x8cU, 0xa1U, 0x89U, 0x0dU, 0xbfU, 0xe6U, 0x42U, 0x68U,
+    0x41U, 0x99U, 0x2dU, 0x0fU, 0xb0U, 0x54U, 0xbbU, 0x16U
+};
+static const u32 rcon[] = {
+    0x01000000, 0x02000000, 0x04000000, 0x08000000,
+    0x10000000, 0x20000000, 0x40000000, 0x80000000,
+    0x1B000000, 0x36000000, /* for 128-bit blocks, Rijndael never uses more than 10 rcon values */
+};
+
+/**
+ * Expand the cipher key into the encryption key schedule.
+ */
+int AES_set_encrypt_key(const unsigned char *userKey, const int bits,
+                        AES_KEY *key)
+{
+    u32 *rk;
+    int i = 0;
+    u32 temp;
+
+    if (!userKey || !key)
+        return -1;
+    if (bits != 128 && bits != 192 && bits != 256)
+        return -2;
+
+    rk = key->rd_key;
+
+    if (bits == 128)
+        key->rounds = 10;
+    else if (bits == 192)
+        key->rounds = 12;
+    else
+        key->rounds = 14;
+
+    rk[0] = GETU32(userKey     );
+    rk[1] = GETU32(userKey +  4);
+    rk[2] = GETU32(userKey +  8);
+    rk[3] = GETU32(userKey + 12);
+    if (bits == 128) {
+        while (1) {
+            temp  = rk[3];
+            rk[4] = rk[0] ^
+                ((u32)Te4[(temp >> 16) & 0xff] << 24) ^
+                ((u32)Te4[(temp >>  8) & 0xff] << 16) ^
+                ((u32)Te4[(temp      ) & 0xff] << 8) ^
+                ((u32)Te4[(temp >> 24)       ]) ^
+                rcon[i];
+            rk[5] = rk[1] ^ rk[4];
+            rk[6] = rk[2] ^ rk[5];
+            rk[7] = rk[3] ^ rk[6];
+            if (++i == 10) {
+                return 0;
+            }
+            rk += 4;
+        }
+    }
+    rk[4] = GETU32(userKey + 16);
+    rk[5] = GETU32(userKey + 20);
+    if (bits == 192) {
+        while (1) {
+            temp = rk[ 5];
+            rk[ 6] = rk[ 0] ^
+                ((u32)Te4[(temp >> 16) & 0xff] << 24) ^
+                ((u32)Te4[(temp >>  8) & 0xff] << 16) ^
+                ((u32)Te4[(temp      ) & 0xff] << 8) ^
+                ((u32)Te4[(temp >> 24)       ]) ^
+                rcon[i];
+            rk[ 7] = rk[ 1] ^ rk[ 6];
+            rk[ 8] = rk[ 2] ^ rk[ 7];
+            rk[ 9] = rk[ 3] ^ rk[ 8];
+            if (++i == 8) {
+                return 0;
+            }
+            rk[10] = rk[ 4] ^ rk[ 9];
+            rk[11] = rk[ 5] ^ rk[10];
+            rk += 6;
+        }
+    }
+    rk[6] = GETU32(userKey + 24);
+    rk[7] = GETU32(userKey + 28);
+    if (bits == 256) {
+        while (1) {
+            temp = rk[ 7];
+            rk[ 8] = rk[ 0] ^
+                ((u32)Te4[(temp >> 16) & 0xff] << 24) ^
+                ((u32)Te4[(temp >>  8) & 0xff] << 16) ^
+                ((u32)Te4[(temp      ) & 0xff] << 8) ^
+                ((u32)Te4[(temp >> 24)       ]) ^
+                rcon[i];
+            rk[ 9] = rk[ 1] ^ rk[ 8];
+            rk[10] = rk[ 2] ^ rk[ 9];
+            rk[11] = rk[ 3] ^ rk[10];
+            if (++i == 7) {
+                return 0;
+            }
+            temp = rk[11];
+            rk[12] = rk[ 4] ^
+                ((u32)Te4[(temp >> 24)       ] << 24) ^
+                ((u32)Te4[(temp >> 16) & 0xff] << 16) ^
+                ((u32)Te4[(temp >>  8) & 0xff] << 8) ^
+                ((u32)Te4[(temp      ) & 0xff]);
+            rk[13] = rk[ 5] ^ rk[12];
+            rk[14] = rk[ 6] ^ rk[13];
+            rk[15] = rk[ 7] ^ rk[14];
+
+            rk += 8;
+        }
+    }
+    return 0;
+}
+
+/**
+ * Expand the cipher key into the decryption key schedule.
+ */
+int AES_set_decrypt_key(const unsigned char *userKey, const int bits,
+                        AES_KEY *key)
+{
+
+    u32 *rk;
+    int i, j, status;
+    u32 temp;
+
+    /* first, start with an encryption schedule */
+    status = AES_set_encrypt_key(userKey, bits, key);
+    if (status < 0)
+        return status;
+
+    rk = key->rd_key;
+
+    /* invert the order of the round keys: */
+    for (i = 0, j = 4*(key->rounds); i < j; i += 4, j -= 4) {
+        temp = rk[i    ]; rk[i    ] = rk[j    ]; rk[j    ] = temp;
+        temp = rk[i + 1]; rk[i + 1] = rk[j + 1]; rk[j + 1] = temp;
+        temp = rk[i + 2]; rk[i + 2] = rk[j + 2]; rk[j + 2] = temp;
+        temp = rk[i + 3]; rk[i + 3] = rk[j + 3]; rk[j + 3] = temp;
+    }
+    /* apply the inverse MixColumn transform to all round keys but the first and the last: */
+    for (i = 1; i < (key->rounds); i++) {
+        rk += 4;
+        for (j = 0; j < 4; j++) {
+            u32 tp1, tp2, tp4, tp8, tp9, tpb, tpd, tpe, m;
+
+            tp1 = rk[j];
+            m = tp1 & 0x80808080;
+            tp2 = ((tp1 & 0x7f7f7f7f) << 1) ^
+                ((m - (m >> 7)) & 0x1b1b1b1b);
+            m = tp2 & 0x80808080;
+            tp4 = ((tp2 & 0x7f7f7f7f) << 1) ^
+                ((m - (m >> 7)) & 0x1b1b1b1b);
+            m = tp4 & 0x80808080;
+            tp8 = ((tp4 & 0x7f7f7f7f) << 1) ^
+                ((m - (m >> 7)) & 0x1b1b1b1b);
+            tp9 = tp8 ^ tp1;
+            tpb = tp9 ^ tp2;
+            tpd = tp9 ^ tp4;
+            tpe = tp8 ^ tp4 ^ tp2;
+#if defined(ROTATE)
+            rk[j] = tpe ^ ROTATE(tpd,16) ^
+                ROTATE(tp9,24) ^ ROTATE(tpb,8);
+#else
+            rk[j] = tpe ^ (tpd >> 16) ^ (tpd << 16) ^
+                (tp9 >> 8) ^ (tp9 << 24) ^
+                (tpb >> 24) ^ (tpb << 8);
+#endif
+        }
+    }
+    return 0;
+}
+
+#endif /* AES_ASM */
diff --git a/ap/lib/libssl/openssl-1.1.1o/crypto/aes/aes_ecb.c b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/aes_ecb.c
new file mode 100644
index 0000000..4fa360c
--- /dev/null
+++ b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/aes_ecb.c
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2002-2016 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the OpenSSL license (the "License").  You may not use
+ * this file except in compliance with the License.  You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include <assert.h>
+
+#include <openssl/aes.h>
+#include "aes_local.h"
+
+void AES_ecb_encrypt(const unsigned char *in, unsigned char *out,
+                     const AES_KEY *key, const int enc)
+{
+
+    assert(in && out && key);
+    assert((AES_ENCRYPT == enc) || (AES_DECRYPT == enc));
+
+    if (AES_ENCRYPT == enc)
+        AES_encrypt(in, out, key);
+    else
+        AES_decrypt(in, out, key);
+}
diff --git a/ap/lib/libssl/openssl-1.1.1o/crypto/aes/aes_ige.c b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/aes_ige.c
new file mode 100644
index 0000000..804b3a7
--- /dev/null
+++ b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/aes_ige.c
@@ -0,0 +1,288 @@
+/*
+ * Copyright 2006-2020 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the OpenSSL license (the "License").  You may not use
+ * this file except in compliance with the License.  You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include "internal/cryptlib.h"
+
+#include <openssl/aes.h>
+#include "aes_local.h"
+
+/* XXX: probably some better way to do this */
+#if defined(__i386__) || defined(__x86_64__)
+# define UNALIGNED_MEMOPS_ARE_FAST 1
+#else
+# define UNALIGNED_MEMOPS_ARE_FAST 0
+#endif
+
+#define N_WORDS (AES_BLOCK_SIZE / sizeof(unsigned long))
+typedef struct {
+    unsigned long data[N_WORDS];
+#if defined(__GNUC__) && UNALIGNED_MEMOPS_ARE_FAST
+} aes_block_t __attribute((__aligned__(1)));
+#else
+} aes_block_t;
+#endif
+
+#if UNALIGNED_MEMOPS_ARE_FAST
+# define load_block(d, s)        (d) = *(const aes_block_t *)(s)
+# define store_block(d, s)       *(aes_block_t *)(d) = (s)
+#else
+# define load_block(d, s)        memcpy((d).data, (s), AES_BLOCK_SIZE)
+# define store_block(d, s)       memcpy((d), (s).data, AES_BLOCK_SIZE)
+#endif
+
+/* N.B. The IV for this mode is _twice_ the block size */
+
+void AES_ige_encrypt(const unsigned char *in, unsigned char *out,
+                     size_t length, const AES_KEY *key,
+                     unsigned char *ivec, const int enc)
+{
+    size_t n;
+    size_t len = length;
+
+    if (length == 0)
+        return;
+
+    OPENSSL_assert(in && out && key && ivec);
+    OPENSSL_assert((AES_ENCRYPT == enc) || (AES_DECRYPT == enc));
+    OPENSSL_assert((length % AES_BLOCK_SIZE) == 0);
+
+    len = length / AES_BLOCK_SIZE;
+
+    if (AES_ENCRYPT == enc) {
+        if (in != out &&
+            (UNALIGNED_MEMOPS_ARE_FAST
+             || ((size_t)in | (size_t)out | (size_t)ivec) % sizeof(long) ==
+             0)) {
+            aes_block_t *ivp = (aes_block_t *) ivec;
+            aes_block_t *iv2p = (aes_block_t *) (ivec + AES_BLOCK_SIZE);
+
+            while (len) {
+                aes_block_t *inp = (aes_block_t *) in;
+                aes_block_t *outp = (aes_block_t *) out;
+
+                for (n = 0; n < N_WORDS; ++n)
+                    outp->data[n] = inp->data[n] ^ ivp->data[n];
+                AES_encrypt((unsigned char *)outp->data,
+                            (unsigned char *)outp->data, key);
+                for (n = 0; n < N_WORDS; ++n)
+                    outp->data[n] ^= iv2p->data[n];
+                ivp = outp;
+                iv2p = inp;
+                --len;
+                in += AES_BLOCK_SIZE;
+                out += AES_BLOCK_SIZE;
+            }
+            memcpy(ivec, ivp->data, AES_BLOCK_SIZE);
+            memcpy(ivec + AES_BLOCK_SIZE, iv2p->data, AES_BLOCK_SIZE);
+        } else {
+            aes_block_t tmp, tmp2;
+            aes_block_t iv;
+            aes_block_t iv2;
+
+            load_block(iv, ivec);
+            load_block(iv2, ivec + AES_BLOCK_SIZE);
+
+            while (len) {
+                load_block(tmp, in);
+                for (n = 0; n < N_WORDS; ++n)
+                    tmp2.data[n] = tmp.data[n] ^ iv.data[n];
+                AES_encrypt((unsigned char *)tmp2.data,
+                            (unsigned char *)tmp2.data, key);
+                for (n = 0; n < N_WORDS; ++n)
+                    tmp2.data[n] ^= iv2.data[n];
+                store_block(out, tmp2);
+                iv = tmp2;
+                iv2 = tmp;
+                --len;
+                in += AES_BLOCK_SIZE;
+                out += AES_BLOCK_SIZE;
+            }
+            memcpy(ivec, iv.data, AES_BLOCK_SIZE);
+            memcpy(ivec + AES_BLOCK_SIZE, iv2.data, AES_BLOCK_SIZE);
+        }
+    } else {
+        if (in != out &&
+            (UNALIGNED_MEMOPS_ARE_FAST
+             || ((size_t)in | (size_t)out | (size_t)ivec) % sizeof(long) ==
+             0)) {
+            aes_block_t *ivp = (aes_block_t *) ivec;
+            aes_block_t *iv2p = (aes_block_t *) (ivec + AES_BLOCK_SIZE);
+
+            while (len) {
+                aes_block_t tmp;
+                aes_block_t *inp = (aes_block_t *) in;
+                aes_block_t *outp = (aes_block_t *) out;
+
+                for (n = 0; n < N_WORDS; ++n)
+                    tmp.data[n] = inp->data[n] ^ iv2p->data[n];
+                AES_decrypt((unsigned char *)tmp.data,
+                            (unsigned char *)outp->data, key);
+                for (n = 0; n < N_WORDS; ++n)
+                    outp->data[n] ^= ivp->data[n];
+                ivp = inp;
+                iv2p = outp;
+                --len;
+                in += AES_BLOCK_SIZE;
+                out += AES_BLOCK_SIZE;
+            }
+            memcpy(ivec, ivp->data, AES_BLOCK_SIZE);
+            memcpy(ivec + AES_BLOCK_SIZE, iv2p->data, AES_BLOCK_SIZE);
+        } else {
+            aes_block_t tmp, tmp2;
+            aes_block_t iv;
+            aes_block_t iv2;
+
+            load_block(iv, ivec);
+            load_block(iv2, ivec + AES_BLOCK_SIZE);
+
+            while (len) {
+                load_block(tmp, in);
+                tmp2 = tmp;
+                for (n = 0; n < N_WORDS; ++n)
+                    tmp.data[n] ^= iv2.data[n];
+                AES_decrypt((unsigned char *)tmp.data,
+                            (unsigned char *)tmp.data, key);
+                for (n = 0; n < N_WORDS; ++n)
+                    tmp.data[n] ^= iv.data[n];
+                store_block(out, tmp);
+                iv = tmp2;
+                iv2 = tmp;
+                --len;
+                in += AES_BLOCK_SIZE;
+                out += AES_BLOCK_SIZE;
+            }
+            memcpy(ivec, iv.data, AES_BLOCK_SIZE);
+            memcpy(ivec + AES_BLOCK_SIZE, iv2.data, AES_BLOCK_SIZE);
+        }
+    }
+}
+
+/*
+ * Note that its effectively impossible to do biIGE in anything other
+ * than a single pass, so no provision is made for chaining.
+ */
+
+/* N.B. The IV for this mode is _four times_ the block size */
+
+void AES_bi_ige_encrypt(const unsigned char *in, unsigned char *out,
+                        size_t length, const AES_KEY *key,
+                        const AES_KEY *key2, const unsigned char *ivec,
+                        const int enc)
+{
+    size_t n;
+    size_t len = length;
+    unsigned char tmp[AES_BLOCK_SIZE];
+    unsigned char tmp2[AES_BLOCK_SIZE];
+    unsigned char tmp3[AES_BLOCK_SIZE];
+    unsigned char prev[AES_BLOCK_SIZE];
+    const unsigned char *iv;
+    const unsigned char *iv2;
+
+    OPENSSL_assert(in && out && key && ivec);
+    OPENSSL_assert((AES_ENCRYPT == enc) || (AES_DECRYPT == enc));
+    OPENSSL_assert((length % AES_BLOCK_SIZE) == 0);
+
+    if (AES_ENCRYPT == enc) {
+        /*
+         * XXX: Do a separate case for when in != out (strictly should check
+         * for overlap, too)
+         */
+
+        /* First the forward pass */
+        iv = ivec;
+        iv2 = ivec + AES_BLOCK_SIZE;
+        while (len >= AES_BLOCK_SIZE) {
+            for (n = 0; n < AES_BLOCK_SIZE; ++n)
+                out[n] = in[n] ^ iv[n];
+            AES_encrypt(out, out, key);
+            for (n = 0; n < AES_BLOCK_SIZE; ++n)
+                out[n] ^= iv2[n];
+            iv = out;
+            memcpy(prev, in, AES_BLOCK_SIZE);
+            iv2 = prev;
+            len -= AES_BLOCK_SIZE;
+            in += AES_BLOCK_SIZE;
+            out += AES_BLOCK_SIZE;
+        }
+
+        /* And now backwards */
+        iv = ivec + AES_BLOCK_SIZE * 2;
+        iv2 = ivec + AES_BLOCK_SIZE * 3;
+        len = length;
+        while (len >= AES_BLOCK_SIZE) {
+            out -= AES_BLOCK_SIZE;
+            /*
+             * XXX: reduce copies by alternating between buffers
+             */
+            memcpy(tmp, out, AES_BLOCK_SIZE);
+            for (n = 0; n < AES_BLOCK_SIZE; ++n)
+                out[n] ^= iv[n];
+            /*
+             * hexdump(stdout, "out ^ iv", out, AES_BLOCK_SIZE);
+             */
+            AES_encrypt(out, out, key);
+            /*
+             * hexdump(stdout,"enc", out, AES_BLOCK_SIZE);
+             */
+            /*
+             * hexdump(stdout,"iv2", iv2, AES_BLOCK_SIZE);
+             */
+            for (n = 0; n < AES_BLOCK_SIZE; ++n)
+                out[n] ^= iv2[n];
+            /*
+             * hexdump(stdout,"out", out, AES_BLOCK_SIZE);
+             */
+            iv = out;
+            memcpy(prev, tmp, AES_BLOCK_SIZE);
+            iv2 = prev;
+            len -= AES_BLOCK_SIZE;
+        }
+    } else {
+        /* First backwards */
+        iv = ivec + AES_BLOCK_SIZE * 2;
+        iv2 = ivec + AES_BLOCK_SIZE * 3;
+        in += length;
+        out += length;
+        while (len >= AES_BLOCK_SIZE) {
+            in -= AES_BLOCK_SIZE;
+            out -= AES_BLOCK_SIZE;
+            memcpy(tmp, in, AES_BLOCK_SIZE);
+            memcpy(tmp2, in, AES_BLOCK_SIZE);
+            for (n = 0; n < AES_BLOCK_SIZE; ++n)
+                tmp[n] ^= iv2[n];
+            AES_decrypt(tmp, out, key);
+            for (n = 0; n < AES_BLOCK_SIZE; ++n)
+                out[n] ^= iv[n];
+            memcpy(tmp3, tmp2, AES_BLOCK_SIZE);
+            iv = tmp3;
+            iv2 = out;
+            len -= AES_BLOCK_SIZE;
+        }
+
+        /* And now forwards */
+        iv = ivec;
+        iv2 = ivec + AES_BLOCK_SIZE;
+        len = length;
+        while (len >= AES_BLOCK_SIZE) {
+            memcpy(tmp, out, AES_BLOCK_SIZE);
+            memcpy(tmp2, out, AES_BLOCK_SIZE);
+            for (n = 0; n < AES_BLOCK_SIZE; ++n)
+                tmp[n] ^= iv2[n];
+            AES_decrypt(tmp, out, key);
+            for (n = 0; n < AES_BLOCK_SIZE; ++n)
+                out[n] ^= iv[n];
+            memcpy(tmp3, tmp2, AES_BLOCK_SIZE);
+            iv = tmp3;
+            iv2 = out;
+            len -= AES_BLOCK_SIZE;
+            in += AES_BLOCK_SIZE;
+            out += AES_BLOCK_SIZE;
+        }
+    }
+}
diff --git a/ap/lib/libssl/openssl-1.1.1o/crypto/aes/aes_local.h b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/aes_local.h
new file mode 100644
index 0000000..a9c0059
--- /dev/null
+++ b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/aes_local.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2002-2020 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the OpenSSL license (the "License").  You may not use
+ * this file except in compliance with the License.  You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#ifndef OSSL_CRYPTO_AES_LOCAL_H
+# define OSSL_CRYPTO_AES_LOCAL_H
+
+# include <openssl/e_os2.h>
+# include <stdio.h>
+# include <stdlib.h>
+# include <string.h>
+
+# if defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_AMD64) || defined(_M_X64))
+#  define SWAP(x) (_lrotl(x, 8) & 0x00ff00ff | _lrotr(x, 8) & 0xff00ff00)
+#  define GETU32(p) SWAP(*((u32 *)(p)))
+#  define PUTU32(ct, st) { *((u32 *)(ct)) = SWAP((st)); }
+# else
+#  define GETU32(pt) (((u32)(pt)[0] << 24) ^ ((u32)(pt)[1] << 16) ^ ((u32)(pt)[2] <<  8) ^ ((u32)(pt)[3]))
+#  define PUTU32(ct, st) { (ct)[0] = (u8)((st) >> 24); (ct)[1] = (u8)((st) >> 16); (ct)[2] = (u8)((st) >>  8); (ct)[3] = (u8)(st); }
+# endif
+
+typedef unsigned long long u64;
+# ifdef AES_LONG
+typedef unsigned long u32;
+# else
+typedef unsigned int u32;
+# endif
+typedef unsigned short u16;
+typedef unsigned char u8;
+
+# define MAXKC   (256/32)
+# define MAXKB   (256/8)
+# define MAXNR   14
+
+/* This controls loop-unrolling in aes_core.c */
+# undef FULL_UNROLL
+
+#endif                          /* !OSSL_CRYPTO_AES_LOCAL_H */
diff --git a/ap/lib/libssl/openssl-1.1.1o/crypto/aes/aes_misc.c b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/aes_misc.c
new file mode 100644
index 0000000..e0edc72
--- /dev/null
+++ b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/aes_misc.c
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2002-2016 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the OpenSSL license (the "License").  You may not use
+ * this file except in compliance with the License.  You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include <openssl/opensslv.h>
+#include <openssl/aes.h>
+#include "aes_local.h"
+
+const char *AES_options(void)
+{
+#ifdef FULL_UNROLL
+    return "aes(full)";
+#else
+    return "aes(partial)";
+#endif
+}
diff --git a/ap/lib/libssl/openssl-1.1.1o/crypto/aes/aes_ofb.c b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/aes_ofb.c
new file mode 100644
index 0000000..215b538
--- /dev/null
+++ b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/aes_ofb.c
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2002-2016 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the OpenSSL license (the "License").  You may not use
+ * this file except in compliance with the License.  You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include <openssl/aes.h>
+#include <openssl/modes.h>
+
+void AES_ofb128_encrypt(const unsigned char *in, unsigned char *out,
+                        size_t length, const AES_KEY *key,
+                        unsigned char *ivec, int *num)
+{
+    CRYPTO_ofb128_encrypt(in, out, length, key, ivec, num,
+                          (block128_f) AES_encrypt);
+}
diff --git a/ap/lib/libssl/openssl-1.1.1o/crypto/aes/aes_wrap.c b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/aes_wrap.c
new file mode 100644
index 0000000..cae0b21
--- /dev/null
+++ b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/aes_wrap.c
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2008-2016 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the OpenSSL license (the "License").  You may not use
+ * this file except in compliance with the License.  You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include "internal/cryptlib.h"
+#include <openssl/aes.h>
+#include <openssl/modes.h>
+
+int AES_wrap_key(AES_KEY *key, const unsigned char *iv,
+                 unsigned char *out,
+                 const unsigned char *in, unsigned int inlen)
+{
+    return CRYPTO_128_wrap(key, iv, out, in, inlen, (block128_f) AES_encrypt);
+}
+
+int AES_unwrap_key(AES_KEY *key, const unsigned char *iv,
+                   unsigned char *out,
+                   const unsigned char *in, unsigned int inlen)
+{
+    return CRYPTO_128_unwrap(key, iv, out, in, inlen,
+                             (block128_f) AES_decrypt);
+}
diff --git a/ap/lib/libssl/openssl-1.1.1o/crypto/aes/aes_x86core.c b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/aes_x86core.c
new file mode 100644
index 0000000..50b53ab
--- /dev/null
+++ b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/aes_x86core.c
@@ -0,0 +1,1074 @@
+/*
+ * Copyright 2006-2016 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the OpenSSL license (the "License").  You may not use
+ * this file except in compliance with the License.  You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+/*
+ * This is experimental x86[_64] derivative. It assumes little-endian
+ * byte order and expects CPU to sustain unaligned memory references.
+ * It is used as playground for cache-time attack mitigations and
+ * serves as reference C implementation for x86[_64] as well as some
+ * other assembly modules.
+ */
+
+/**
+ * rijndael-alg-fst.c
+ *
+ * @version 3.0 (December 2000)
+ *
+ * Optimised ANSI C code for the Rijndael cipher (now AES)
+ *
+ * @author Vincent Rijmen
+ * @author Antoon Bosselaers
+ * @author Paulo Barreto
+ *
+ * This code is hereby placed in the public domain.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ''AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include <assert.h>
+
+#include <stdlib.h>
+#include <openssl/aes.h>
+#include "aes_local.h"
+
+/*
+ * These two parameters control which table, 256-byte or 2KB, is
+ * referenced in outer and respectively inner rounds.
+ */
+#define AES_COMPACT_IN_OUTER_ROUNDS
+#ifdef  AES_COMPACT_IN_OUTER_ROUNDS
+/* AES_COMPACT_IN_OUTER_ROUNDS costs ~30% in performance, while
+ * adding AES_COMPACT_IN_INNER_ROUNDS reduces benchmark *further*
+ * by factor of ~2. */
+# undef  AES_COMPACT_IN_INNER_ROUNDS
+#endif
+
+#if 1
+static void prefetch256(const void *table)
+{
+    volatile unsigned long *t=(void *)table,ret;
+    unsigned long sum;
+    int i;
+
+    /* 32 is common least cache-line size */
+    for (sum=0,i=0;i<256/sizeof(t[0]);i+=32/sizeof(t[0]))   sum ^= t[i];
+
+    ret = sum;
+}
+#else
+# define prefetch256(t)
+#endif
+
+#undef GETU32
+#define GETU32(p) (*((u32*)(p)))
+
+#if (defined(_WIN32) || defined(_WIN64)) && !defined(__MINGW32__)
+typedef unsigned __int64 u64;
+#define U64(C)  C##UI64
+#elif defined(__arch64__)
+typedef unsigned long u64;
+#define U64(C)  C##UL
+#else
+typedef unsigned long long u64;
+#define U64(C)  C##ULL
+#endif
+
+#undef ROTATE
+#if defined(_MSC_VER)
+# define ROTATE(a,n)    _lrotl(a,n)
+#elif defined(__ICC)
+# define ROTATE(a,n)    _rotl(a,n)
+#elif defined(__GNUC__) && __GNUC__>=2
+# if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
+#   define ROTATE(a,n)  ({ register unsigned int ret;   \
+                asm (           \
+                "roll %1,%0"        \
+                : "=r"(ret)     \
+                : "I"(n), "0"(a)    \
+                : "cc");        \
+               ret;             \
+            })
+# endif
+#endif
+/*-
+Te [x] = S [x].[02, 01, 01, 03, 02, 01, 01, 03];
+Te0[x] = S [x].[02, 01, 01, 03];
+Te1[x] = S [x].[03, 02, 01, 01];
+Te2[x] = S [x].[01, 03, 02, 01];
+Te3[x] = S [x].[01, 01, 03, 02];
+*/
+#define Te0 (u32)((u64*)((u8*)Te+0))
+#define Te1 (u32)((u64*)((u8*)Te+3))
+#define Te2 (u32)((u64*)((u8*)Te+2))
+#define Te3 (u32)((u64*)((u8*)Te+1))
+/*-
+Td [x] = Si[x].[0e, 09, 0d, 0b, 0e, 09, 0d, 0b];
+Td0[x] = Si[x].[0e, 09, 0d, 0b];
+Td1[x] = Si[x].[0b, 0e, 09, 0d];
+Td2[x] = Si[x].[0d, 0b, 0e, 09];
+Td3[x] = Si[x].[09, 0d, 0b, 0e];
+Td4[x] = Si[x].[01];
+*/
+#define Td0 (u32)((u64*)((u8*)Td+0))
+#define Td1 (u32)((u64*)((u8*)Td+3))
+#define Td2 (u32)((u64*)((u8*)Td+2))
+#define Td3 (u32)((u64*)((u8*)Td+1))
+
+static const u64 Te[256] = {
+    U64(0xa56363c6a56363c6), U64(0x847c7cf8847c7cf8),
+    U64(0x997777ee997777ee), U64(0x8d7b7bf68d7b7bf6),
+    U64(0x0df2f2ff0df2f2ff), U64(0xbd6b6bd6bd6b6bd6),
+    U64(0xb16f6fdeb16f6fde), U64(0x54c5c59154c5c591),
+    U64(0x5030306050303060), U64(0x0301010203010102),
+    U64(0xa96767cea96767ce), U64(0x7d2b2b567d2b2b56),
+    U64(0x19fefee719fefee7), U64(0x62d7d7b562d7d7b5),
+    U64(0xe6abab4de6abab4d), U64(0x9a7676ec9a7676ec),
+    U64(0x45caca8f45caca8f), U64(0x9d82821f9d82821f),
+    U64(0x40c9c98940c9c989), U64(0x877d7dfa877d7dfa),
+    U64(0x15fafaef15fafaef), U64(0xeb5959b2eb5959b2),
+    U64(0xc947478ec947478e), U64(0x0bf0f0fb0bf0f0fb),
+    U64(0xecadad41ecadad41), U64(0x67d4d4b367d4d4b3),
+    U64(0xfda2a25ffda2a25f), U64(0xeaafaf45eaafaf45),
+    U64(0xbf9c9c23bf9c9c23), U64(0xf7a4a453f7a4a453),
+    U64(0x967272e4967272e4), U64(0x5bc0c09b5bc0c09b),
+    U64(0xc2b7b775c2b7b775), U64(0x1cfdfde11cfdfde1),
+    U64(0xae93933dae93933d), U64(0x6a26264c6a26264c),
+    U64(0x5a36366c5a36366c), U64(0x413f3f7e413f3f7e),
+    U64(0x02f7f7f502f7f7f5), U64(0x4fcccc834fcccc83),
+    U64(0x5c3434685c343468), U64(0xf4a5a551f4a5a551),
+    U64(0x34e5e5d134e5e5d1), U64(0x08f1f1f908f1f1f9),
+    U64(0x937171e2937171e2), U64(0x73d8d8ab73d8d8ab),
+    U64(0x5331316253313162), U64(0x3f15152a3f15152a),
+    U64(0x0c0404080c040408), U64(0x52c7c79552c7c795),
+    U64(0x6523234665232346), U64(0x5ec3c39d5ec3c39d),
+    U64(0x2818183028181830), U64(0xa1969637a1969637),
+    U64(0x0f05050a0f05050a), U64(0xb59a9a2fb59a9a2f),
+    U64(0x0907070e0907070e), U64(0x3612122436121224),
+    U64(0x9b80801b9b80801b), U64(0x3de2e2df3de2e2df),
+    U64(0x26ebebcd26ebebcd), U64(0x6927274e6927274e),
+    U64(0xcdb2b27fcdb2b27f), U64(0x9f7575ea9f7575ea),
+    U64(0x1b0909121b090912), U64(0x9e83831d9e83831d),
+    U64(0x742c2c58742c2c58), U64(0x2e1a1a342e1a1a34),
+    U64(0x2d1b1b362d1b1b36), U64(0xb26e6edcb26e6edc),
+    U64(0xee5a5ab4ee5a5ab4), U64(0xfba0a05bfba0a05b),
+    U64(0xf65252a4f65252a4), U64(0x4d3b3b764d3b3b76),
+    U64(0x61d6d6b761d6d6b7), U64(0xceb3b37dceb3b37d),
+    U64(0x7b2929527b292952), U64(0x3ee3e3dd3ee3e3dd),
+    U64(0x712f2f5e712f2f5e), U64(0x9784841397848413),
+    U64(0xf55353a6f55353a6), U64(0x68d1d1b968d1d1b9),
+    U64(0x0000000000000000), U64(0x2cededc12cededc1),
+    U64(0x6020204060202040), U64(0x1ffcfce31ffcfce3),
+    U64(0xc8b1b179c8b1b179), U64(0xed5b5bb6ed5b5bb6),
+    U64(0xbe6a6ad4be6a6ad4), U64(0x46cbcb8d46cbcb8d),
+    U64(0xd9bebe67d9bebe67), U64(0x4b3939724b393972),
+    U64(0xde4a4a94de4a4a94), U64(0xd44c4c98d44c4c98),
+    U64(0xe85858b0e85858b0), U64(0x4acfcf854acfcf85),
+    U64(0x6bd0d0bb6bd0d0bb), U64(0x2aefefc52aefefc5),
+    U64(0xe5aaaa4fe5aaaa4f), U64(0x16fbfbed16fbfbed),
+    U64(0xc5434386c5434386), U64(0xd74d4d9ad74d4d9a),
+    U64(0x5533336655333366), U64(0x9485851194858511),
+    U64(0xcf45458acf45458a), U64(0x10f9f9e910f9f9e9),
+    U64(0x0602020406020204), U64(0x817f7ffe817f7ffe),
+    U64(0xf05050a0f05050a0), U64(0x443c3c78443c3c78),
+    U64(0xba9f9f25ba9f9f25), U64(0xe3a8a84be3a8a84b),
+    U64(0xf35151a2f35151a2), U64(0xfea3a35dfea3a35d),
+    U64(0xc0404080c0404080), U64(0x8a8f8f058a8f8f05),
+    U64(0xad92923fad92923f), U64(0xbc9d9d21bc9d9d21),
+    U64(0x4838387048383870), U64(0x04f5f5f104f5f5f1),
+    U64(0xdfbcbc63dfbcbc63), U64(0xc1b6b677c1b6b677),
+    U64(0x75dadaaf75dadaaf), U64(0x6321214263212142),
+    U64(0x3010102030101020), U64(0x1affffe51affffe5),
+    U64(0x0ef3f3fd0ef3f3fd), U64(0x6dd2d2bf6dd2d2bf),
+    U64(0x4ccdcd814ccdcd81), U64(0x140c0c18140c0c18),
+    U64(0x3513132635131326), U64(0x2fececc32fececc3),
+    U64(0xe15f5fbee15f5fbe), U64(0xa2979735a2979735),
+    U64(0xcc444488cc444488), U64(0x3917172e3917172e),
+    U64(0x57c4c49357c4c493), U64(0xf2a7a755f2a7a755),
+    U64(0x827e7efc827e7efc), U64(0x473d3d7a473d3d7a),
+    U64(0xac6464c8ac6464c8), U64(0xe75d5dbae75d5dba),
+    U64(0x2b1919322b191932), U64(0x957373e6957373e6),
+    U64(0xa06060c0a06060c0), U64(0x9881811998818119),
+    U64(0xd14f4f9ed14f4f9e), U64(0x7fdcdca37fdcdca3),
+    U64(0x6622224466222244), U64(0x7e2a2a547e2a2a54),
+    U64(0xab90903bab90903b), U64(0x8388880b8388880b),
+    U64(0xca46468cca46468c), U64(0x29eeeec729eeeec7),
+    U64(0xd3b8b86bd3b8b86b), U64(0x3c1414283c141428),
+    U64(0x79dedea779dedea7), U64(0xe25e5ebce25e5ebc),
+    U64(0x1d0b0b161d0b0b16), U64(0x76dbdbad76dbdbad),
+    U64(0x3be0e0db3be0e0db), U64(0x5632326456323264),
+    U64(0x4e3a3a744e3a3a74), U64(0x1e0a0a141e0a0a14),
+    U64(0xdb494992db494992), U64(0x0a06060c0a06060c),
+    U64(0x6c2424486c242448), U64(0xe45c5cb8e45c5cb8),
+    U64(0x5dc2c29f5dc2c29f), U64(0x6ed3d3bd6ed3d3bd),
+    U64(0xefacac43efacac43), U64(0xa66262c4a66262c4),
+    U64(0xa8919139a8919139), U64(0xa4959531a4959531),
+    U64(0x37e4e4d337e4e4d3), U64(0x8b7979f28b7979f2),
+    U64(0x32e7e7d532e7e7d5), U64(0x43c8c88b43c8c88b),
+    U64(0x5937376e5937376e), U64(0xb76d6ddab76d6dda),
+    U64(0x8c8d8d018c8d8d01), U64(0x64d5d5b164d5d5b1),
+    U64(0xd24e4e9cd24e4e9c), U64(0xe0a9a949e0a9a949),
+    U64(0xb46c6cd8b46c6cd8), U64(0xfa5656acfa5656ac),
+    U64(0x07f4f4f307f4f4f3), U64(0x25eaeacf25eaeacf),
+    U64(0xaf6565caaf6565ca), U64(0x8e7a7af48e7a7af4),
+    U64(0xe9aeae47e9aeae47), U64(0x1808081018080810),
+    U64(0xd5baba6fd5baba6f), U64(0x887878f0887878f0),
+    U64(0x6f25254a6f25254a), U64(0x722e2e5c722e2e5c),
+    U64(0x241c1c38241c1c38), U64(0xf1a6a657f1a6a657),
+    U64(0xc7b4b473c7b4b473), U64(0x51c6c69751c6c697),
+    U64(0x23e8e8cb23e8e8cb), U64(0x7cdddda17cdddda1),
+    U64(0x9c7474e89c7474e8), U64(0x211f1f3e211f1f3e),
+    U64(0xdd4b4b96dd4b4b96), U64(0xdcbdbd61dcbdbd61),
+    U64(0x868b8b0d868b8b0d), U64(0x858a8a0f858a8a0f),
+    U64(0x907070e0907070e0), U64(0x423e3e7c423e3e7c),
+    U64(0xc4b5b571c4b5b571), U64(0xaa6666ccaa6666cc),
+    U64(0xd8484890d8484890), U64(0x0503030605030306),
+    U64(0x01f6f6f701f6f6f7), U64(0x120e0e1c120e0e1c),
+    U64(0xa36161c2a36161c2), U64(0x5f35356a5f35356a),
+    U64(0xf95757aef95757ae), U64(0xd0b9b969d0b9b969),
+    U64(0x9186861791868617), U64(0x58c1c19958c1c199),
+    U64(0x271d1d3a271d1d3a), U64(0xb99e9e27b99e9e27),
+    U64(0x38e1e1d938e1e1d9), U64(0x13f8f8eb13f8f8eb),
+    U64(0xb398982bb398982b), U64(0x3311112233111122),
+    U64(0xbb6969d2bb6969d2), U64(0x70d9d9a970d9d9a9),
+    U64(0x898e8e07898e8e07), U64(0xa7949433a7949433),
+    U64(0xb69b9b2db69b9b2d), U64(0x221e1e3c221e1e3c),
+    U64(0x9287871592878715), U64(0x20e9e9c920e9e9c9),
+    U64(0x49cece8749cece87), U64(0xff5555aaff5555aa),
+    U64(0x7828285078282850), U64(0x7adfdfa57adfdfa5),
+    U64(0x8f8c8c038f8c8c03), U64(0xf8a1a159f8a1a159),
+    U64(0x8089890980898909), U64(0x170d0d1a170d0d1a),
+    U64(0xdabfbf65dabfbf65), U64(0x31e6e6d731e6e6d7),
+    U64(0xc6424284c6424284), U64(0xb86868d0b86868d0),
+    U64(0xc3414182c3414182), U64(0xb0999929b0999929),
+    U64(0x772d2d5a772d2d5a), U64(0x110f0f1e110f0f1e),
+    U64(0xcbb0b07bcbb0b07b), U64(0xfc5454a8fc5454a8),
+    U64(0xd6bbbb6dd6bbbb6d), U64(0x3a16162c3a16162c)
+};
+
+static const u8 Te4[256] = {
+    0x63U, 0x7cU, 0x77U, 0x7bU, 0xf2U, 0x6bU, 0x6fU, 0xc5U,
+    0x30U, 0x01U, 0x67U, 0x2bU, 0xfeU, 0xd7U, 0xabU, 0x76U,
+    0xcaU, 0x82U, 0xc9U, 0x7dU, 0xfaU, 0x59U, 0x47U, 0xf0U,
+    0xadU, 0xd4U, 0xa2U, 0xafU, 0x9cU, 0xa4U, 0x72U, 0xc0U,
+    0xb7U, 0xfdU, 0x93U, 0x26U, 0x36U, 0x3fU, 0xf7U, 0xccU,
+    0x34U, 0xa5U, 0xe5U, 0xf1U, 0x71U, 0xd8U, 0x31U, 0x15U,
+    0x04U, 0xc7U, 0x23U, 0xc3U, 0x18U, 0x96U, 0x05U, 0x9aU,
+    0x07U, 0x12U, 0x80U, 0xe2U, 0xebU, 0x27U, 0xb2U, 0x75U,
+    0x09U, 0x83U, 0x2cU, 0x1aU, 0x1bU, 0x6eU, 0x5aU, 0xa0U,
+    0x52U, 0x3bU, 0xd6U, 0xb3U, 0x29U, 0xe3U, 0x2fU, 0x84U,
+    0x53U, 0xd1U, 0x00U, 0xedU, 0x20U, 0xfcU, 0xb1U, 0x5bU,
+    0x6aU, 0xcbU, 0xbeU, 0x39U, 0x4aU, 0x4cU, 0x58U, 0xcfU,
+    0xd0U, 0xefU, 0xaaU, 0xfbU, 0x43U, 0x4dU, 0x33U, 0x85U,
+    0x45U, 0xf9U, 0x02U, 0x7fU, 0x50U, 0x3cU, 0x9fU, 0xa8U,
+    0x51U, 0xa3U, 0x40U, 0x8fU, 0x92U, 0x9dU, 0x38U, 0xf5U,
+    0xbcU, 0xb6U, 0xdaU, 0x21U, 0x10U, 0xffU, 0xf3U, 0xd2U,
+    0xcdU, 0x0cU, 0x13U, 0xecU, 0x5fU, 0x97U, 0x44U, 0x17U,
+    0xc4U, 0xa7U, 0x7eU, 0x3dU, 0x64U, 0x5dU, 0x19U, 0x73U,
+    0x60U, 0x81U, 0x4fU, 0xdcU, 0x22U, 0x2aU, 0x90U, 0x88U,
+    0x46U, 0xeeU, 0xb8U, 0x14U, 0xdeU, 0x5eU, 0x0bU, 0xdbU,
+    0xe0U, 0x32U, 0x3aU, 0x0aU, 0x49U, 0x06U, 0x24U, 0x5cU,
+    0xc2U, 0xd3U, 0xacU, 0x62U, 0x91U, 0x95U, 0xe4U, 0x79U,
+    0xe7U, 0xc8U, 0x37U, 0x6dU, 0x8dU, 0xd5U, 0x4eU, 0xa9U,
+    0x6cU, 0x56U, 0xf4U, 0xeaU, 0x65U, 0x7aU, 0xaeU, 0x08U,
+    0xbaU, 0x78U, 0x25U, 0x2eU, 0x1cU, 0xa6U, 0xb4U, 0xc6U,
+    0xe8U, 0xddU, 0x74U, 0x1fU, 0x4bU, 0xbdU, 0x8bU, 0x8aU,
+    0x70U, 0x3eU, 0xb5U, 0x66U, 0x48U, 0x03U, 0xf6U, 0x0eU,
+    0x61U, 0x35U, 0x57U, 0xb9U, 0x86U, 0xc1U, 0x1dU, 0x9eU,
+    0xe1U, 0xf8U, 0x98U, 0x11U, 0x69U, 0xd9U, 0x8eU, 0x94U,
+    0x9bU, 0x1eU, 0x87U, 0xe9U, 0xceU, 0x55U, 0x28U, 0xdfU,
+    0x8cU, 0xa1U, 0x89U, 0x0dU, 0xbfU, 0xe6U, 0x42U, 0x68U,
+    0x41U, 0x99U, 0x2dU, 0x0fU, 0xb0U, 0x54U, 0xbbU, 0x16U
+};
+
+static const u64 Td[256] = {
+    U64(0x50a7f45150a7f451), U64(0x5365417e5365417e),
+    U64(0xc3a4171ac3a4171a), U64(0x965e273a965e273a),
+    U64(0xcb6bab3bcb6bab3b), U64(0xf1459d1ff1459d1f),
+    U64(0xab58faacab58faac), U64(0x9303e34b9303e34b),
+    U64(0x55fa302055fa3020), U64(0xf66d76adf66d76ad),
+    U64(0x9176cc889176cc88), U64(0x254c02f5254c02f5),
+    U64(0xfcd7e54ffcd7e54f), U64(0xd7cb2ac5d7cb2ac5),
+    U64(0x8044352680443526), U64(0x8fa362b58fa362b5),
+    U64(0x495ab1de495ab1de), U64(0x671bba25671bba25),
+    U64(0x980eea45980eea45), U64(0xe1c0fe5de1c0fe5d),
+    U64(0x02752fc302752fc3), U64(0x12f04c8112f04c81),
+    U64(0xa397468da397468d), U64(0xc6f9d36bc6f9d36b),
+    U64(0xe75f8f03e75f8f03), U64(0x959c9215959c9215),
+    U64(0xeb7a6dbfeb7a6dbf), U64(0xda595295da595295),
+    U64(0x2d83bed42d83bed4), U64(0xd3217458d3217458),
+    U64(0x2969e0492969e049), U64(0x44c8c98e44c8c98e),
+    U64(0x6a89c2756a89c275), U64(0x78798ef478798ef4),
+    U64(0x6b3e58996b3e5899), U64(0xdd71b927dd71b927),
+    U64(0xb64fe1beb64fe1be), U64(0x17ad88f017ad88f0),
+    U64(0x66ac20c966ac20c9), U64(0xb43ace7db43ace7d),
+    U64(0x184adf63184adf63), U64(0x82311ae582311ae5),
+    U64(0x6033519760335197), U64(0x457f5362457f5362),
+    U64(0xe07764b1e07764b1), U64(0x84ae6bbb84ae6bbb),
+    U64(0x1ca081fe1ca081fe), U64(0x942b08f9942b08f9),
+    U64(0x5868487058684870), U64(0x19fd458f19fd458f),
+    U64(0x876cde94876cde94), U64(0xb7f87b52b7f87b52),
+    U64(0x23d373ab23d373ab), U64(0xe2024b72e2024b72),
+    U64(0x578f1fe3578f1fe3), U64(0x2aab55662aab5566),
+    U64(0x0728ebb20728ebb2), U64(0x03c2b52f03c2b52f),
+    U64(0x9a7bc5869a7bc586), U64(0xa50837d3a50837d3),
+    U64(0xf2872830f2872830), U64(0xb2a5bf23b2a5bf23),
+    U64(0xba6a0302ba6a0302), U64(0x5c8216ed5c8216ed),
+    U64(0x2b1ccf8a2b1ccf8a), U64(0x92b479a792b479a7),
+    U64(0xf0f207f3f0f207f3), U64(0xa1e2694ea1e2694e),
+    U64(0xcdf4da65cdf4da65), U64(0xd5be0506d5be0506),
+    U64(0x1f6234d11f6234d1), U64(0x8afea6c48afea6c4),
+    U64(0x9d532e349d532e34), U64(0xa055f3a2a055f3a2),
+    U64(0x32e18a0532e18a05), U64(0x75ebf6a475ebf6a4),
+    U64(0x39ec830b39ec830b), U64(0xaaef6040aaef6040),
+    U64(0x069f715e069f715e), U64(0x51106ebd51106ebd),
+    U64(0xf98a213ef98a213e), U64(0x3d06dd963d06dd96),
+    U64(0xae053eddae053edd), U64(0x46bde64d46bde64d),
+    U64(0xb58d5491b58d5491), U64(0x055dc471055dc471),
+    U64(0x6fd406046fd40604), U64(0xff155060ff155060),
+    U64(0x24fb981924fb9819), U64(0x97e9bdd697e9bdd6),
+    U64(0xcc434089cc434089), U64(0x779ed967779ed967),
+    U64(0xbd42e8b0bd42e8b0), U64(0x888b8907888b8907),
+    U64(0x385b19e7385b19e7), U64(0xdbeec879dbeec879),
+    U64(0x470a7ca1470a7ca1), U64(0xe90f427ce90f427c),
+    U64(0xc91e84f8c91e84f8), U64(0x0000000000000000),
+    U64(0x8386800983868009), U64(0x48ed2b3248ed2b32),
+    U64(0xac70111eac70111e), U64(0x4e725a6c4e725a6c),
+    U64(0xfbff0efdfbff0efd), U64(0x5638850f5638850f),
+    U64(0x1ed5ae3d1ed5ae3d), U64(0x27392d3627392d36),
+    U64(0x64d90f0a64d90f0a), U64(0x21a65c6821a65c68),
+    U64(0xd1545b9bd1545b9b), U64(0x3a2e36243a2e3624),
+    U64(0xb1670a0cb1670a0c), U64(0x0fe757930fe75793),
+    U64(0xd296eeb4d296eeb4), U64(0x9e919b1b9e919b1b),
+    U64(0x4fc5c0804fc5c080), U64(0xa220dc61a220dc61),
+    U64(0x694b775a694b775a), U64(0x161a121c161a121c),
+    U64(0x0aba93e20aba93e2), U64(0xe52aa0c0e52aa0c0),
+    U64(0x43e0223c43e0223c), U64(0x1d171b121d171b12),
+    U64(0x0b0d090e0b0d090e), U64(0xadc78bf2adc78bf2),
+    U64(0xb9a8b62db9a8b62d), U64(0xc8a91e14c8a91e14),
+    U64(0x8519f1578519f157), U64(0x4c0775af4c0775af),
+    U64(0xbbdd99eebbdd99ee), U64(0xfd607fa3fd607fa3),
+    U64(0x9f2601f79f2601f7), U64(0xbcf5725cbcf5725c),
+    U64(0xc53b6644c53b6644), U64(0x347efb5b347efb5b),
+    U64(0x7629438b7629438b), U64(0xdcc623cbdcc623cb),
+    U64(0x68fcedb668fcedb6), U64(0x63f1e4b863f1e4b8),
+    U64(0xcadc31d7cadc31d7), U64(0x1085634210856342),
+    U64(0x4022971340229713), U64(0x2011c6842011c684),
+    U64(0x7d244a857d244a85), U64(0xf83dbbd2f83dbbd2),
+    U64(0x1132f9ae1132f9ae), U64(0x6da129c76da129c7),
+    U64(0x4b2f9e1d4b2f9e1d), U64(0xf330b2dcf330b2dc),
+    U64(0xec52860dec52860d), U64(0xd0e3c177d0e3c177),
+    U64(0x6c16b32b6c16b32b), U64(0x99b970a999b970a9),
+    U64(0xfa489411fa489411), U64(0x2264e9472264e947),
+    U64(0xc48cfca8c48cfca8), U64(0x1a3ff0a01a3ff0a0),
+    U64(0xd82c7d56d82c7d56), U64(0xef903322ef903322),
+    U64(0xc74e4987c74e4987), U64(0xc1d138d9c1d138d9),
+    U64(0xfea2ca8cfea2ca8c), U64(0x360bd498360bd498),
+    U64(0xcf81f5a6cf81f5a6), U64(0x28de7aa528de7aa5),
+    U64(0x268eb7da268eb7da), U64(0xa4bfad3fa4bfad3f),
+    U64(0xe49d3a2ce49d3a2c), U64(0x0d9278500d927850),
+    U64(0x9bcc5f6a9bcc5f6a), U64(0x62467e5462467e54),
+    U64(0xc2138df6c2138df6), U64(0xe8b8d890e8b8d890),
+    U64(0x5ef7392e5ef7392e), U64(0xf5afc382f5afc382),
+    U64(0xbe805d9fbe805d9f), U64(0x7c93d0697c93d069),
+    U64(0xa92dd56fa92dd56f), U64(0xb31225cfb31225cf),
+    U64(0x3b99acc83b99acc8), U64(0xa77d1810a77d1810),
+    U64(0x6e639ce86e639ce8), U64(0x7bbb3bdb7bbb3bdb),
+    U64(0x097826cd097826cd), U64(0xf418596ef418596e),
+    U64(0x01b79aec01b79aec), U64(0xa89a4f83a89a4f83),
+    U64(0x656e95e6656e95e6), U64(0x7ee6ffaa7ee6ffaa),
+    U64(0x08cfbc2108cfbc21), U64(0xe6e815efe6e815ef),
+    U64(0xd99be7bad99be7ba), U64(0xce366f4ace366f4a),
+    U64(0xd4099fead4099fea), U64(0xd67cb029d67cb029),
+    U64(0xafb2a431afb2a431), U64(0x31233f2a31233f2a),
+    U64(0x3094a5c63094a5c6), U64(0xc066a235c066a235),
+    U64(0x37bc4e7437bc4e74), U64(0xa6ca82fca6ca82fc),
+    U64(0xb0d090e0b0d090e0), U64(0x15d8a73315d8a733),
+    U64(0x4a9804f14a9804f1), U64(0xf7daec41f7daec41),
+    U64(0x0e50cd7f0e50cd7f), U64(0x2ff691172ff69117),
+    U64(0x8dd64d768dd64d76), U64(0x4db0ef434db0ef43),
+    U64(0x544daacc544daacc), U64(0xdf0496e4df0496e4),
+    U64(0xe3b5d19ee3b5d19e), U64(0x1b886a4c1b886a4c),
+    U64(0xb81f2cc1b81f2cc1), U64(0x7f5165467f516546),
+    U64(0x04ea5e9d04ea5e9d), U64(0x5d358c015d358c01),
+    U64(0x737487fa737487fa), U64(0x2e410bfb2e410bfb),
+    U64(0x5a1d67b35a1d67b3), U64(0x52d2db9252d2db92),
+    U64(0x335610e9335610e9), U64(0x1347d66d1347d66d),
+    U64(0x8c61d79a8c61d79a), U64(0x7a0ca1377a0ca137),
+    U64(0x8e14f8598e14f859), U64(0x893c13eb893c13eb),
+    U64(0xee27a9ceee27a9ce), U64(0x35c961b735c961b7),
+    U64(0xede51ce1ede51ce1), U64(0x3cb1477a3cb1477a),
+    U64(0x59dfd29c59dfd29c), U64(0x3f73f2553f73f255),
+    U64(0x79ce141879ce1418), U64(0xbf37c773bf37c773),
+    U64(0xeacdf753eacdf753), U64(0x5baafd5f5baafd5f),
+    U64(0x146f3ddf146f3ddf), U64(0x86db447886db4478),
+    U64(0x81f3afca81f3afca), U64(0x3ec468b93ec468b9),
+    U64(0x2c3424382c342438), U64(0x5f40a3c25f40a3c2),
+    U64(0x72c31d1672c31d16), U64(0x0c25e2bc0c25e2bc),
+    U64(0x8b493c288b493c28), U64(0x41950dff41950dff),
+    U64(0x7101a8397101a839), U64(0xdeb30c08deb30c08),
+    U64(0x9ce4b4d89ce4b4d8), U64(0x90c1566490c15664),
+    U64(0x6184cb7b6184cb7b), U64(0x70b632d570b632d5),
+    U64(0x745c6c48745c6c48), U64(0x4257b8d04257b8d0)
+};
+static const u8 Td4[256] = {
+    0x52U, 0x09U, 0x6aU, 0xd5U, 0x30U, 0x36U, 0xa5U, 0x38U,
+    0xbfU, 0x40U, 0xa3U, 0x9eU, 0x81U, 0xf3U, 0xd7U, 0xfbU,
+    0x7cU, 0xe3U, 0x39U, 0x82U, 0x9bU, 0x2fU, 0xffU, 0x87U,
+    0x34U, 0x8eU, 0x43U, 0x44U, 0xc4U, 0xdeU, 0xe9U, 0xcbU,
+    0x54U, 0x7bU, 0x94U, 0x32U, 0xa6U, 0xc2U, 0x23U, 0x3dU,
+    0xeeU, 0x4cU, 0x95U, 0x0bU, 0x42U, 0xfaU, 0xc3U, 0x4eU,
+    0x08U, 0x2eU, 0xa1U, 0x66U, 0x28U, 0xd9U, 0x24U, 0xb2U,
+    0x76U, 0x5bU, 0xa2U, 0x49U, 0x6dU, 0x8bU, 0xd1U, 0x25U,
+    0x72U, 0xf8U, 0xf6U, 0x64U, 0x86U, 0x68U, 0x98U, 0x16U,
+    0xd4U, 0xa4U, 0x5cU, 0xccU, 0x5dU, 0x65U, 0xb6U, 0x92U,
+    0x6cU, 0x70U, 0x48U, 0x50U, 0xfdU, 0xedU, 0xb9U, 0xdaU,
+    0x5eU, 0x15U, 0x46U, 0x57U, 0xa7U, 0x8dU, 0x9dU, 0x84U,
+    0x90U, 0xd8U, 0xabU, 0x00U, 0x8cU, 0xbcU, 0xd3U, 0x0aU,
+    0xf7U, 0xe4U, 0x58U, 0x05U, 0xb8U, 0xb3U, 0x45U, 0x06U,
+    0xd0U, 0x2cU, 0x1eU, 0x8fU, 0xcaU, 0x3fU, 0x0fU, 0x02U,
+    0xc1U, 0xafU, 0xbdU, 0x03U, 0x01U, 0x13U, 0x8aU, 0x6bU,
+    0x3aU, 0x91U, 0x11U, 0x41U, 0x4fU, 0x67U, 0xdcU, 0xeaU,
+    0x97U, 0xf2U, 0xcfU, 0xceU, 0xf0U, 0xb4U, 0xe6U, 0x73U,
+    0x96U, 0xacU, 0x74U, 0x22U, 0xe7U, 0xadU, 0x35U, 0x85U,
+    0xe2U, 0xf9U, 0x37U, 0xe8U, 0x1cU, 0x75U, 0xdfU, 0x6eU,
+    0x47U, 0xf1U, 0x1aU, 0x71U, 0x1dU, 0x29U, 0xc5U, 0x89U,
+    0x6fU, 0xb7U, 0x62U, 0x0eU, 0xaaU, 0x18U, 0xbeU, 0x1bU,
+    0xfcU, 0x56U, 0x3eU, 0x4bU, 0xc6U, 0xd2U, 0x79U, 0x20U,
+    0x9aU, 0xdbU, 0xc0U, 0xfeU, 0x78U, 0xcdU, 0x5aU, 0xf4U,
+    0x1fU, 0xddU, 0xa8U, 0x33U, 0x88U, 0x07U, 0xc7U, 0x31U,
+    0xb1U, 0x12U, 0x10U, 0x59U, 0x27U, 0x80U, 0xecU, 0x5fU,
+    0x60U, 0x51U, 0x7fU, 0xa9U, 0x19U, 0xb5U, 0x4aU, 0x0dU,
+    0x2dU, 0xe5U, 0x7aU, 0x9fU, 0x93U, 0xc9U, 0x9cU, 0xefU,
+    0xa0U, 0xe0U, 0x3bU, 0x4dU, 0xaeU, 0x2aU, 0xf5U, 0xb0U,
+    0xc8U, 0xebU, 0xbbU, 0x3cU, 0x83U, 0x53U, 0x99U, 0x61U,
+    0x17U, 0x2bU, 0x04U, 0x7eU, 0xbaU, 0x77U, 0xd6U, 0x26U,
+    0xe1U, 0x69U, 0x14U, 0x63U, 0x55U, 0x21U, 0x0cU, 0x7dU
+};
+
+static const u32 rcon[] = {
+    0x00000001U, 0x00000002U, 0x00000004U, 0x00000008U,
+    0x00000010U, 0x00000020U, 0x00000040U, 0x00000080U,
+    0x0000001bU, 0x00000036U, /* for 128-bit blocks, Rijndael never uses more than 10 rcon values */
+};
+
+/**
+ * Expand the cipher key into the encryption key schedule.
+ */
+int AES_set_encrypt_key(const unsigned char *userKey, const int bits,
+                        AES_KEY *key)
+{
+
+    u32 *rk;
+    int i = 0;
+    u32 temp;
+
+    if (!userKey || !key)
+        return -1;
+    if (bits != 128 && bits != 192 && bits != 256)
+        return -2;
+
+    rk = key->rd_key;
+
+    if (bits==128)
+        key->rounds = 10;
+    else if (bits==192)
+        key->rounds = 12;
+    else
+        key->rounds = 14;
+
+    rk[0] = GETU32(userKey     );
+    rk[1] = GETU32(userKey +  4);
+    rk[2] = GETU32(userKey +  8);
+    rk[3] = GETU32(userKey + 12);
+    if (bits == 128) {
+        while (1) {
+            temp  = rk[3];
+            rk[4] = rk[0] ^
+                ((u32)Te4[(temp >>  8) & 0xff]      ) ^
+                ((u32)Te4[(temp >> 16) & 0xff] <<  8) ^
+                ((u32)Te4[(temp >> 24)       ] << 16) ^
+                ((u32)Te4[(temp      ) & 0xff] << 24) ^
+                rcon[i];
+            rk[5] = rk[1] ^ rk[4];
+            rk[6] = rk[2] ^ rk[5];
+            rk[7] = rk[3] ^ rk[6];
+            if (++i == 10) {
+                return 0;
+            }
+            rk += 4;
+        }
+    }
+    rk[4] = GETU32(userKey + 16);
+    rk[5] = GETU32(userKey + 20);
+    if (bits == 192) {
+        while (1) {
+            temp = rk[ 5];
+            rk[ 6] = rk[ 0] ^
+                ((u32)Te4[(temp >>  8) & 0xff]      ) ^
+                ((u32)Te4[(temp >> 16) & 0xff] <<  8) ^
+                ((u32)Te4[(temp >> 24)       ] << 16) ^
+                ((u32)Te4[(temp      ) & 0xff] << 24) ^
+                rcon[i];
+            rk[ 7] = rk[ 1] ^ rk[ 6];
+            rk[ 8] = rk[ 2] ^ rk[ 7];
+            rk[ 9] = rk[ 3] ^ rk[ 8];
+            if (++i == 8) {
+                return 0;
+            }
+            rk[10] = rk[ 4] ^ rk[ 9];
+            rk[11] = rk[ 5] ^ rk[10];
+            rk += 6;
+        }
+    }
+    rk[6] = GETU32(userKey + 24);
+    rk[7] = GETU32(userKey + 28);
+    if (bits == 256) {
+        while (1) {
+            temp = rk[ 7];
+            rk[ 8] = rk[ 0] ^
+                ((u32)Te4[(temp >>  8) & 0xff]      ) ^
+                ((u32)Te4[(temp >> 16) & 0xff] <<  8) ^
+                ((u32)Te4[(temp >> 24)       ] << 16) ^
+                ((u32)Te4[(temp      ) & 0xff] << 24) ^
+                rcon[i];
+            rk[ 9] = rk[ 1] ^ rk[ 8];
+            rk[10] = rk[ 2] ^ rk[ 9];
+            rk[11] = rk[ 3] ^ rk[10];
+            if (++i == 7) {
+                return 0;
+            }
+            temp = rk[11];
+            rk[12] = rk[ 4] ^
+                ((u32)Te4[(temp      ) & 0xff]      ) ^
+                ((u32)Te4[(temp >>  8) & 0xff] <<  8) ^
+                ((u32)Te4[(temp >> 16) & 0xff] << 16) ^
+                ((u32)Te4[(temp >> 24)       ] << 24);
+            rk[13] = rk[ 5] ^ rk[12];
+            rk[14] = rk[ 6] ^ rk[13];
+            rk[15] = rk[ 7] ^ rk[14];
+
+            rk += 8;
+            }
+    }
+    return 0;
+}
+
+/**
+ * Expand the cipher key into the decryption key schedule.
+ */
+int AES_set_decrypt_key(const unsigned char *userKey, const int bits,
+                        AES_KEY *key)
+{
+
+    u32 *rk;
+    int i, j, status;
+    u32 temp;
+
+    /* first, start with an encryption schedule */
+    status = AES_set_encrypt_key(userKey, bits, key);
+    if (status < 0)
+        return status;
+
+    rk = key->rd_key;
+
+    /* invert the order of the round keys: */
+    for (i = 0, j = 4*(key->rounds); i < j; i += 4, j -= 4) {
+        temp = rk[i    ]; rk[i    ] = rk[j    ]; rk[j    ] = temp;
+        temp = rk[i + 1]; rk[i + 1] = rk[j + 1]; rk[j + 1] = temp;
+        temp = rk[i + 2]; rk[i + 2] = rk[j + 2]; rk[j + 2] = temp;
+        temp = rk[i + 3]; rk[i + 3] = rk[j + 3]; rk[j + 3] = temp;
+    }
+    /* apply the inverse MixColumn transform to all round keys but the first and the last: */
+    for (i = 1; i < (key->rounds); i++) {
+        rk += 4;
+#if 1
+        for (j = 0; j < 4; j++) {
+            u32 tp1, tp2, tp4, tp8, tp9, tpb, tpd, tpe, m;
+
+            tp1 = rk[j];
+            m = tp1 & 0x80808080;
+            tp2 = ((tp1 & 0x7f7f7f7f) << 1) ^
+                ((m - (m >> 7)) & 0x1b1b1b1b);
+            m = tp2 & 0x80808080;
+            tp4 = ((tp2 & 0x7f7f7f7f) << 1) ^
+                ((m - (m >> 7)) & 0x1b1b1b1b);
+            m = tp4 & 0x80808080;
+            tp8 = ((tp4 & 0x7f7f7f7f) << 1) ^
+                ((m - (m >> 7)) & 0x1b1b1b1b);
+            tp9 = tp8 ^ tp1;
+            tpb = tp9 ^ tp2;
+            tpd = tp9 ^ tp4;
+            tpe = tp8 ^ tp4 ^ tp2;
+#if defined(ROTATE)
+            rk[j] = tpe ^ ROTATE(tpd,16) ^
+                ROTATE(tp9,8) ^ ROTATE(tpb,24);
+#else
+            rk[j] = tpe ^ (tpd >> 16) ^ (tpd << 16) ^
+                (tp9 >> 24) ^ (tp9 << 8) ^
+                (tpb >> 8) ^ (tpb << 24);
+#endif
+        }
+#else
+        rk[0] =
+            Td0[Te2[(rk[0]      ) & 0xff] & 0xff] ^
+            Td1[Te2[(rk[0] >>  8) & 0xff] & 0xff] ^
+            Td2[Te2[(rk[0] >> 16) & 0xff] & 0xff] ^
+            Td3[Te2[(rk[0] >> 24)       ] & 0xff];
+        rk[1] =
+            Td0[Te2[(rk[1]      ) & 0xff] & 0xff] ^
+            Td1[Te2[(rk[1] >>  8) & 0xff] & 0xff] ^
+            Td2[Te2[(rk[1] >> 16) & 0xff] & 0xff] ^
+            Td3[Te2[(rk[1] >> 24)       ] & 0xff];
+        rk[2] =
+            Td0[Te2[(rk[2]      ) & 0xff] & 0xff] ^
+            Td1[Te2[(rk[2] >>  8) & 0xff] & 0xff] ^
+            Td2[Te2[(rk[2] >> 16) & 0xff] & 0xff] ^
+            Td3[Te2[(rk[2] >> 24)       ] & 0xff];
+        rk[3] =
+            Td0[Te2[(rk[3]      ) & 0xff] & 0xff] ^
+            Td1[Te2[(rk[3] >>  8) & 0xff] & 0xff] ^
+            Td2[Te2[(rk[3] >> 16) & 0xff] & 0xff] ^
+            Td3[Te2[(rk[3] >> 24)       ] & 0xff];
+#endif
+    }
+    return 0;
+}
+
+/*
+ * Encrypt a single block
+ * in and out can overlap
+ */
+void AES_encrypt(const unsigned char *in, unsigned char *out,
+                 const AES_KEY *key)
+{
+
+    const u32 *rk;
+    u32 s0, s1, s2, s3, t[4];
+    int r;
+
+    assert(in && out && key);
+    rk = key->rd_key;
+
+    /*
+     * map byte array block to cipher state
+     * and add initial round key:
+     */
+    s0 = GETU32(in     ) ^ rk[0];
+    s1 = GETU32(in +  4) ^ rk[1];
+    s2 = GETU32(in +  8) ^ rk[2];
+    s3 = GETU32(in + 12) ^ rk[3];
+
+#if defined(AES_COMPACT_IN_OUTER_ROUNDS)
+    prefetch256(Te4);
+
+    t[0] = (u32)Te4[(s0      ) & 0xff]       ^
+           (u32)Te4[(s1 >>  8) & 0xff] <<  8 ^
+           (u32)Te4[(s2 >> 16) & 0xff] << 16 ^
+           (u32)Te4[(s3 >> 24)       ] << 24;
+    t[1] = (u32)Te4[(s1      ) & 0xff]       ^
+           (u32)Te4[(s2 >>  8) & 0xff] <<  8 ^
+           (u32)Te4[(s3 >> 16) & 0xff] << 16 ^
+           (u32)Te4[(s0 >> 24)       ] << 24;
+    t[2] = (u32)Te4[(s2      ) & 0xff]       ^
+           (u32)Te4[(s3 >>  8) & 0xff] <<  8 ^
+           (u32)Te4[(s0 >> 16) & 0xff] << 16 ^
+           (u32)Te4[(s1 >> 24)       ] << 24;
+    t[3] = (u32)Te4[(s3      ) & 0xff]       ^
+           (u32)Te4[(s0 >>  8) & 0xff] <<  8 ^
+           (u32)Te4[(s1 >> 16) & 0xff] << 16 ^
+           (u32)Te4[(s2 >> 24)       ] << 24;
+
+    /* now do the linear transform using words */
+    {   int i;
+        u32 r0, r1, r2;
+
+        for (i = 0; i < 4; i++) {
+            r0 = t[i];
+            r1 = r0 & 0x80808080;
+            r2 = ((r0 & 0x7f7f7f7f) << 1) ^
+                ((r1 - (r1 >> 7)) & 0x1b1b1b1b);
+#if defined(ROTATE)
+            t[i] = r2 ^ ROTATE(r2,24) ^ ROTATE(r0,24) ^
+                ROTATE(r0,16) ^ ROTATE(r0,8);
+#else
+            t[i] = r2 ^ ((r2 ^ r0) << 24) ^ ((r2 ^ r0) >> 8) ^
+                (r0 << 16) ^ (r0 >> 16) ^
+                (r0 << 8) ^ (r0 >> 24);
+#endif
+            t[i] ^= rk[4+i];
+        }
+    }
+#else
+    t[0] =  Te0[(s0      ) & 0xff] ^
+        Te1[(s1 >>  8) & 0xff] ^
+        Te2[(s2 >> 16) & 0xff] ^
+        Te3[(s3 >> 24)       ] ^
+        rk[4];
+    t[1] =  Te0[(s1      ) & 0xff] ^
+        Te1[(s2 >>  8) & 0xff] ^
+        Te2[(s3 >> 16) & 0xff] ^
+        Te3[(s0 >> 24)       ] ^
+        rk[5];
+    t[2] =  Te0[(s2      ) & 0xff] ^
+        Te1[(s3 >>  8) & 0xff] ^
+        Te2[(s0 >> 16) & 0xff] ^
+        Te3[(s1 >> 24)       ] ^
+        rk[6];
+    t[3] =  Te0[(s3      ) & 0xff] ^
+        Te1[(s0 >>  8) & 0xff] ^
+        Te2[(s1 >> 16) & 0xff] ^
+        Te3[(s2 >> 24)       ] ^
+        rk[7];
+#endif
+    s0 = t[0]; s1 = t[1]; s2 = t[2]; s3 = t[3];
+
+    /*
+     * Nr - 2 full rounds:
+     */
+    for (rk+=8,r=key->rounds-2; r>0; rk+=4,r--) {
+#if defined(AES_COMPACT_IN_INNER_ROUNDS)
+        t[0] = (u32)Te4[(s0      ) & 0xff]       ^
+               (u32)Te4[(s1 >>  8) & 0xff] <<  8 ^
+               (u32)Te4[(s2 >> 16) & 0xff] << 16 ^
+               (u32)Te4[(s3 >> 24)       ] << 24;
+        t[1] = (u32)Te4[(s1      ) & 0xff]       ^
+               (u32)Te4[(s2 >>  8) & 0xff] <<  8 ^
+               (u32)Te4[(s3 >> 16) & 0xff] << 16 ^
+               (u32)Te4[(s0 >> 24)       ] << 24;
+        t[2] = (u32)Te4[(s2      ) & 0xff]       ^
+               (u32)Te4[(s3 >>  8) & 0xff] <<  8 ^
+               (u32)Te4[(s0 >> 16) & 0xff] << 16 ^
+               (u32)Te4[(s1 >> 24)       ] << 24;
+        t[3] = (u32)Te4[(s3      ) & 0xff]       ^
+               (u32)Te4[(s0 >>  8) & 0xff] <<  8 ^
+               (u32)Te4[(s1 >> 16) & 0xff] << 16 ^
+               (u32)Te4[(s2 >> 24)       ] << 24;
+
+        /* now do the linear transform using words */
+        {
+            int i;
+            u32 r0, r1, r2;
+
+            for (i = 0; i < 4; i++) {
+                r0 = t[i];
+                r1 = r0 & 0x80808080;
+                r2 = ((r0 & 0x7f7f7f7f) << 1) ^
+                    ((r1 - (r1 >> 7)) & 0x1b1b1b1b);
+#if defined(ROTATE)
+                t[i] = r2 ^ ROTATE(r2,24) ^ ROTATE(r0,24) ^
+                    ROTATE(r0,16) ^ ROTATE(r0,8);
+#else
+                t[i] = r2 ^ ((r2 ^ r0) << 24) ^ ((r2 ^ r0) >> 8) ^
+                    (r0 << 16) ^ (r0 >> 16) ^
+                    (r0 << 8) ^ (r0 >> 24);
+#endif
+                t[i] ^= rk[i];
+            }
+        }
+#else
+        t[0] =  Te0[(s0      ) & 0xff] ^
+            Te1[(s1 >>  8) & 0xff] ^
+            Te2[(s2 >> 16) & 0xff] ^
+            Te3[(s3 >> 24)       ] ^
+            rk[0];
+        t[1] =  Te0[(s1      ) & 0xff] ^
+            Te1[(s2 >>  8) & 0xff] ^
+            Te2[(s3 >> 16) & 0xff] ^
+            Te3[(s0 >> 24)       ] ^
+            rk[1];
+        t[2] =  Te0[(s2      ) & 0xff] ^
+            Te1[(s3 >>  8) & 0xff] ^
+            Te2[(s0 >> 16) & 0xff] ^
+            Te3[(s1 >> 24)       ] ^
+            rk[2];
+        t[3] =  Te0[(s3      ) & 0xff] ^
+            Te1[(s0 >>  8) & 0xff] ^
+            Te2[(s1 >> 16) & 0xff] ^
+            Te3[(s2 >> 24)       ] ^
+            rk[3];
+#endif
+        s0 = t[0]; s1 = t[1]; s2 = t[2]; s3 = t[3];
+    }
+    /*
+     * apply last round and
+     * map cipher state to byte array block:
+     */
+#if defined(AES_COMPACT_IN_OUTER_ROUNDS)
+    prefetch256(Te4);
+
+    *(u32*)(out+0) =
+           (u32)Te4[(s0      ) & 0xff]       ^
+           (u32)Te4[(s1 >>  8) & 0xff] <<  8 ^
+           (u32)Te4[(s2 >> 16) & 0xff] << 16 ^
+           (u32)Te4[(s3 >> 24)       ] << 24 ^
+        rk[0];
+    *(u32*)(out+4) =
+           (u32)Te4[(s1      ) & 0xff]       ^
+           (u32)Te4[(s2 >>  8) & 0xff] <<  8 ^
+           (u32)Te4[(s3 >> 16) & 0xff] << 16 ^
+           (u32)Te4[(s0 >> 24)       ] << 24 ^
+        rk[1];
+    *(u32*)(out+8) =
+           (u32)Te4[(s2      ) & 0xff]       ^
+           (u32)Te4[(s3 >>  8) & 0xff] <<  8 ^
+           (u32)Te4[(s0 >> 16) & 0xff] << 16 ^
+           (u32)Te4[(s1 >> 24)       ] << 24 ^
+        rk[2];
+    *(u32*)(out+12) =
+           (u32)Te4[(s3      ) & 0xff]       ^
+           (u32)Te4[(s0 >>  8) & 0xff] <<  8 ^
+           (u32)Te4[(s1 >> 16) & 0xff] << 16 ^
+           (u32)Te4[(s2 >> 24)       ] << 24 ^
+        rk[3];
+#else
+    *(u32*)(out+0) =
+        (Te2[(s0      ) & 0xff] & 0x000000ffU) ^
+        (Te3[(s1 >>  8) & 0xff] & 0x0000ff00U) ^
+        (Te0[(s2 >> 16) & 0xff] & 0x00ff0000U) ^
+        (Te1[(s3 >> 24)       ] & 0xff000000U) ^
+        rk[0];
+    *(u32*)(out+4) =
+        (Te2[(s1      ) & 0xff] & 0x000000ffU) ^
+        (Te3[(s2 >>  8) & 0xff] & 0x0000ff00U) ^
+        (Te0[(s3 >> 16) & 0xff] & 0x00ff0000U) ^
+        (Te1[(s0 >> 24)       ] & 0xff000000U) ^
+        rk[1];
+    *(u32*)(out+8) =
+        (Te2[(s2      ) & 0xff] & 0x000000ffU) ^
+        (Te3[(s3 >>  8) & 0xff] & 0x0000ff00U) ^
+        (Te0[(s0 >> 16) & 0xff] & 0x00ff0000U) ^
+        (Te1[(s1 >> 24)       ] & 0xff000000U) ^
+        rk[2];
+    *(u32*)(out+12) =
+        (Te2[(s3      ) & 0xff] & 0x000000ffU) ^
+        (Te3[(s0 >>  8) & 0xff] & 0x0000ff00U) ^
+        (Te0[(s1 >> 16) & 0xff] & 0x00ff0000U) ^
+        (Te1[(s2 >> 24)       ] & 0xff000000U) ^
+        rk[3];
+#endif
+}
+
+/*
+ * Decrypt a single block
+ * in and out can overlap
+ */
+void AES_decrypt(const unsigned char *in, unsigned char *out,
+                 const AES_KEY *key)
+{
+
+    const u32 *rk;
+    u32 s0, s1, s2, s3, t[4];
+    int r;
+
+    assert(in && out && key);
+    rk = key->rd_key;
+
+    /*
+     * map byte array block to cipher state
+     * and add initial round key:
+     */
+    s0 = GETU32(in     ) ^ rk[0];
+    s1 = GETU32(in +  4) ^ rk[1];
+    s2 = GETU32(in +  8) ^ rk[2];
+    s3 = GETU32(in + 12) ^ rk[3];
+
+#if defined(AES_COMPACT_IN_OUTER_ROUNDS)
+    prefetch256(Td4);
+
+    t[0] = (u32)Td4[(s0      ) & 0xff]       ^
+           (u32)Td4[(s3 >>  8) & 0xff] <<  8 ^
+           (u32)Td4[(s2 >> 16) & 0xff] << 16 ^
+           (u32)Td4[(s1 >> 24)       ] << 24;
+    t[1] = (u32)Td4[(s1      ) & 0xff]       ^
+           (u32)Td4[(s0 >>  8) & 0xff] <<  8 ^
+           (u32)Td4[(s3 >> 16) & 0xff] << 16 ^
+           (u32)Td4[(s2 >> 24)       ] << 24;
+    t[2] = (u32)Td4[(s2      ) & 0xff]       ^
+           (u32)Td4[(s1 >>  8) & 0xff] <<  8 ^
+           (u32)Td4[(s0 >> 16) & 0xff] << 16 ^
+           (u32)Td4[(s3 >> 24)       ] << 24;
+    t[3] = (u32)Td4[(s3      ) & 0xff]       ^
+           (u32)Td4[(s2 >>  8) & 0xff] <<  8 ^
+           (u32)Td4[(s1 >> 16) & 0xff] << 16 ^
+           (u32)Td4[(s0 >> 24)       ] << 24;
+
+    /* now do the linear transform using words */
+    {
+        int i;
+        u32 tp1, tp2, tp4, tp8, tp9, tpb, tpd, tpe, m;
+
+        for (i = 0; i < 4; i++) {
+            tp1 = t[i];
+            m = tp1 & 0x80808080;
+            tp2 = ((tp1 & 0x7f7f7f7f) << 1) ^
+                ((m - (m >> 7)) & 0x1b1b1b1b);
+            m = tp2 & 0x80808080;
+            tp4 = ((tp2 & 0x7f7f7f7f) << 1) ^
+                ((m - (m >> 7)) & 0x1b1b1b1b);
+            m = tp4 & 0x80808080;
+            tp8 = ((tp4 & 0x7f7f7f7f) << 1) ^
+                ((m - (m >> 7)) & 0x1b1b1b1b);
+            tp9 = tp8 ^ tp1;
+            tpb = tp9 ^ tp2;
+            tpd = tp9 ^ tp4;
+            tpe = tp8 ^ tp4 ^ tp2;
+#if defined(ROTATE)
+            t[i] = tpe ^ ROTATE(tpd,16) ^
+                ROTATE(tp9,8) ^ ROTATE(tpb,24);
+#else
+            t[i] = tpe ^ (tpd >> 16) ^ (tpd << 16) ^
+                (tp9 >> 24) ^ (tp9 << 8) ^
+                (tpb >> 8) ^ (tpb << 24);
+#endif
+            t[i] ^= rk[4+i];
+        }
+    }
+#else
+    t[0] =  Td0[(s0      ) & 0xff] ^
+        Td1[(s3 >>  8) & 0xff] ^
+        Td2[(s2 >> 16) & 0xff] ^
+        Td3[(s1 >> 24)       ] ^
+        rk[4];
+    t[1] =  Td0[(s1      ) & 0xff] ^
+        Td1[(s0 >>  8) & 0xff] ^
+        Td2[(s3 >> 16) & 0xff] ^
+        Td3[(s2 >> 24)       ] ^
+        rk[5];
+    t[2] =  Td0[(s2      ) & 0xff] ^
+        Td1[(s1 >>  8) & 0xff] ^
+        Td2[(s0 >> 16) & 0xff] ^
+        Td3[(s3 >> 24)       ] ^
+        rk[6];
+    t[3] =  Td0[(s3      ) & 0xff] ^
+        Td1[(s2 >>  8) & 0xff] ^
+        Td2[(s1 >> 16) & 0xff] ^
+        Td3[(s0 >> 24)       ] ^
+        rk[7];
+#endif
+    s0 = t[0]; s1 = t[1]; s2 = t[2]; s3 = t[3];
+
+    /*
+     * Nr - 2 full rounds:
+     */
+    for (rk+=8,r=key->rounds-2; r>0; rk+=4,r--) {
+#if defined(AES_COMPACT_IN_INNER_ROUNDS)
+        t[0] = (u32)Td4[(s0      ) & 0xff]       ^
+               (u32)Td4[(s3 >>  8) & 0xff] <<  8 ^
+               (u32)Td4[(s2 >> 16) & 0xff] << 16 ^
+               (u32)Td4[(s1 >> 24)       ] << 24;
+        t[1] = (u32)Td4[(s1      ) & 0xff]       ^
+               (u32)Td4[(s0 >>  8) & 0xff] <<  8 ^
+               (u32)Td4[(s3 >> 16) & 0xff] << 16 ^
+               (u32)Td4[(s2 >> 24)       ] << 24;
+        t[2] = (u32)Td4[(s2      ) & 0xff]       ^
+               (u32)Td4[(s1 >>  8) & 0xff] <<  8 ^
+               (u32)Td4[(s0 >> 16) & 0xff] << 16 ^
+               (u32)Td4[(s3 >> 24)       ] << 24;
+        t[3] = (u32)Td4[(s3      ) & 0xff]       ^
+               (u32)Td4[(s2 >>  8) & 0xff] <<  8 ^
+               (u32)Td4[(s1 >> 16) & 0xff] << 16 ^
+               (u32)Td4[(s0 >> 24)       ] << 24;
+
+    /* now do the linear transform using words */
+    {
+        int i;
+        u32 tp1, tp2, tp4, tp8, tp9, tpb, tpd, tpe, m;
+
+        for (i = 0; i < 4; i++) {
+            tp1 = t[i];
+            m = tp1 & 0x80808080;
+            tp2 = ((tp1 & 0x7f7f7f7f) << 1) ^
+                ((m - (m >> 7)) & 0x1b1b1b1b);
+            m = tp2 & 0x80808080;
+            tp4 = ((tp2 & 0x7f7f7f7f) << 1) ^
+                ((m - (m >> 7)) & 0x1b1b1b1b);
+            m = tp4 & 0x80808080;
+            tp8 = ((tp4 & 0x7f7f7f7f) << 1) ^
+                ((m - (m >> 7)) & 0x1b1b1b1b);
+            tp9 = tp8 ^ tp1;
+            tpb = tp9 ^ tp2;
+            tpd = tp9 ^ tp4;
+            tpe = tp8 ^ tp4 ^ tp2;
+#if defined(ROTATE)
+            t[i] = tpe ^ ROTATE(tpd,16) ^
+                ROTATE(tp9,8) ^ ROTATE(tpb,24);
+#else
+            t[i] = tpe ^ (tpd >> 16) ^ (tpd << 16) ^
+                (tp9 >> 24) ^ (tp9 << 8) ^
+                (tpb >> 8) ^ (tpb << 24);
+#endif
+            t[i] ^= rk[i];
+        }
+    }
+#else
+    t[0] =  Td0[(s0      ) & 0xff] ^
+        Td1[(s3 >>  8) & 0xff] ^
+        Td2[(s2 >> 16) & 0xff] ^
+        Td3[(s1 >> 24)       ] ^
+        rk[0];
+    t[1] =  Td0[(s1      ) & 0xff] ^
+        Td1[(s0 >>  8) & 0xff] ^
+        Td2[(s3 >> 16) & 0xff] ^
+        Td3[(s2 >> 24)       ] ^
+        rk[1];
+    t[2] =  Td0[(s2      ) & 0xff] ^
+        Td1[(s1 >>  8) & 0xff] ^
+        Td2[(s0 >> 16) & 0xff] ^
+        Td3[(s3 >> 24)       ] ^
+        rk[2];
+    t[3] =  Td0[(s3      ) & 0xff] ^
+        Td1[(s2 >>  8) & 0xff] ^
+        Td2[(s1 >> 16) & 0xff] ^
+        Td3[(s0 >> 24)       ] ^
+        rk[3];
+#endif
+    s0 = t[0]; s1 = t[1]; s2 = t[2]; s3 = t[3];
+    }
+    /*
+     * apply last round and
+     * map cipher state to byte array block:
+     */
+    prefetch256(Td4);
+
+    *(u32*)(out+0) =
+        ((u32)Td4[(s0      ) & 0xff])    ^
+        ((u32)Td4[(s3 >>  8) & 0xff] <<  8) ^
+        ((u32)Td4[(s2 >> 16) & 0xff] << 16) ^
+        ((u32)Td4[(s1 >> 24)       ] << 24) ^
+        rk[0];
+    *(u32*)(out+4) =
+        ((u32)Td4[(s1      ) & 0xff])     ^
+        ((u32)Td4[(s0 >>  8) & 0xff] <<  8) ^
+        ((u32)Td4[(s3 >> 16) & 0xff] << 16) ^
+        ((u32)Td4[(s2 >> 24)       ] << 24) ^
+        rk[1];
+    *(u32*)(out+8) =
+        ((u32)Td4[(s2      ) & 0xff])     ^
+        ((u32)Td4[(s1 >>  8) & 0xff] <<  8) ^
+        ((u32)Td4[(s0 >> 16) & 0xff] << 16) ^
+        ((u32)Td4[(s3 >> 24)       ] << 24) ^
+        rk[2];
+    *(u32*)(out+12) =
+        ((u32)Td4[(s3      ) & 0xff])     ^
+        ((u32)Td4[(s2 >>  8) & 0xff] <<  8) ^
+        ((u32)Td4[(s1 >> 16) & 0xff] << 16) ^
+        ((u32)Td4[(s0 >> 24)       ] << 24) ^
+        rk[3];
+}
diff --git a/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aes-armv4.pl b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aes-armv4.pl
new file mode 100644
index 0000000..1112eef
--- /dev/null
+++ b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aes-armv4.pl
@@ -0,0 +1,1245 @@
+#! /usr/bin/env perl
+# Copyright 2007-2020 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License").  You may not use
+# this file except in compliance with the License.  You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# AES for ARMv4
+
+# January 2007.
+#
+# Code uses single 1K S-box and is >2 times faster than code generated
+# by gcc-3.4.1. This is thanks to unique feature of ARMv4 ISA, which
+# allows to merge logical or arithmetic operation with shift or rotate
+# in one instruction and emit combined result every cycle. The module
+# is endian-neutral. The performance is ~42 cycles/byte for 128-bit
+# key [on single-issue Xscale PXA250 core].
+
+# May 2007.
+#
+# AES_set_[en|de]crypt_key is added.
+
+# July 2010.
+#
+# Rescheduling for dual-issue pipeline resulted in 12% improvement on
+# Cortex A8 core and ~25 cycles per byte processed with 128-bit key.
+
+# February 2011.
+#
+# Profiler-assisted and platform-specific optimization resulted in 16%
+# improvement on Cortex A8 core and ~21.5 cycles per byte.
+
+$flavour = shift;
+if ($flavour=~/\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; }
+else { while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {} }
+
+if ($flavour && $flavour ne "void") {
+    $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+    ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
+    ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
+    die "can't locate arm-xlate.pl";
+
+    open STDOUT,"| \"$^X\" $xlate $flavour $output";
+} else {
+    open STDOUT,">$output";
+}
+
+$s0="r0";
+$s1="r1";
+$s2="r2";
+$s3="r3";
+$t1="r4";
+$t2="r5";
+$t3="r6";
+$i1="r7";
+$i2="r8";
+$i3="r9";
+
+$tbl="r10";
+$key="r11";
+$rounds="r12";
+
+$code=<<___;
+#ifndef __KERNEL__
+# include "arm_arch.h"
+#else
+# define __ARM_ARCH__ __LINUX_ARM_ARCH__
+#endif
+
+.text
+#if defined(__thumb2__) && !defined(__APPLE__)
+.syntax	unified
+.thumb
+#else
+.code	32
+#undef __thumb2__
+#endif
+
+.type	AES_Te,%object
+.align	5
+AES_Te:
+.word	0xc66363a5, 0xf87c7c84, 0xee777799, 0xf67b7b8d
+.word	0xfff2f20d, 0xd66b6bbd, 0xde6f6fb1, 0x91c5c554
+.word	0x60303050, 0x02010103, 0xce6767a9, 0x562b2b7d
+.word	0xe7fefe19, 0xb5d7d762, 0x4dababe6, 0xec76769a
+.word	0x8fcaca45, 0x1f82829d, 0x89c9c940, 0xfa7d7d87
+.word	0xeffafa15, 0xb25959eb, 0x8e4747c9, 0xfbf0f00b
+.word	0x41adadec, 0xb3d4d467, 0x5fa2a2fd, 0x45afafea
+.word	0x239c9cbf, 0x53a4a4f7, 0xe4727296, 0x9bc0c05b
+.word	0x75b7b7c2, 0xe1fdfd1c, 0x3d9393ae, 0x4c26266a
+.word	0x6c36365a, 0x7e3f3f41, 0xf5f7f702, 0x83cccc4f
+.word	0x6834345c, 0x51a5a5f4, 0xd1e5e534, 0xf9f1f108
+.word	0xe2717193, 0xabd8d873, 0x62313153, 0x2a15153f
+.word	0x0804040c, 0x95c7c752, 0x46232365, 0x9dc3c35e
+.word	0x30181828, 0x379696a1, 0x0a05050f, 0x2f9a9ab5
+.word	0x0e070709, 0x24121236, 0x1b80809b, 0xdfe2e23d
+.word	0xcdebeb26, 0x4e272769, 0x7fb2b2cd, 0xea75759f
+.word	0x1209091b, 0x1d83839e, 0x582c2c74, 0x341a1a2e
+.word	0x361b1b2d, 0xdc6e6eb2, 0xb45a5aee, 0x5ba0a0fb
+.word	0xa45252f6, 0x763b3b4d, 0xb7d6d661, 0x7db3b3ce
+.word	0x5229297b, 0xdde3e33e, 0x5e2f2f71, 0x13848497
+.word	0xa65353f5, 0xb9d1d168, 0x00000000, 0xc1eded2c
+.word	0x40202060, 0xe3fcfc1f, 0x79b1b1c8, 0xb65b5bed
+.word	0xd46a6abe, 0x8dcbcb46, 0x67bebed9, 0x7239394b
+.word	0x944a4ade, 0x984c4cd4, 0xb05858e8, 0x85cfcf4a
+.word	0xbbd0d06b, 0xc5efef2a, 0x4faaaae5, 0xedfbfb16
+.word	0x864343c5, 0x9a4d4dd7, 0x66333355, 0x11858594
+.word	0x8a4545cf, 0xe9f9f910, 0x04020206, 0xfe7f7f81
+.word	0xa05050f0, 0x783c3c44, 0x259f9fba, 0x4ba8a8e3
+.word	0xa25151f3, 0x5da3a3fe, 0x804040c0, 0x058f8f8a
+.word	0x3f9292ad, 0x219d9dbc, 0x70383848, 0xf1f5f504
+.word	0x63bcbcdf, 0x77b6b6c1, 0xafdada75, 0x42212163
+.word	0x20101030, 0xe5ffff1a, 0xfdf3f30e, 0xbfd2d26d
+.word	0x81cdcd4c, 0x180c0c14, 0x26131335, 0xc3ecec2f
+.word	0xbe5f5fe1, 0x359797a2, 0x884444cc, 0x2e171739
+.word	0x93c4c457, 0x55a7a7f2, 0xfc7e7e82, 0x7a3d3d47
+.word	0xc86464ac, 0xba5d5de7, 0x3219192b, 0xe6737395
+.word	0xc06060a0, 0x19818198, 0x9e4f4fd1, 0xa3dcdc7f
+.word	0x44222266, 0x542a2a7e, 0x3b9090ab, 0x0b888883
+.word	0x8c4646ca, 0xc7eeee29, 0x6bb8b8d3, 0x2814143c
+.word	0xa7dede79, 0xbc5e5ee2, 0x160b0b1d, 0xaddbdb76
+.word	0xdbe0e03b, 0x64323256, 0x743a3a4e, 0x140a0a1e
+.word	0x924949db, 0x0c06060a, 0x4824246c, 0xb85c5ce4
+.word	0x9fc2c25d, 0xbdd3d36e, 0x43acacef, 0xc46262a6
+.word	0x399191a8, 0x319595a4, 0xd3e4e437, 0xf279798b
+.word	0xd5e7e732, 0x8bc8c843, 0x6e373759, 0xda6d6db7
+.word	0x018d8d8c, 0xb1d5d564, 0x9c4e4ed2, 0x49a9a9e0
+.word	0xd86c6cb4, 0xac5656fa, 0xf3f4f407, 0xcfeaea25
+.word	0xca6565af, 0xf47a7a8e, 0x47aeaee9, 0x10080818
+.word	0x6fbabad5, 0xf0787888, 0x4a25256f, 0x5c2e2e72
+.word	0x381c1c24, 0x57a6a6f1, 0x73b4b4c7, 0x97c6c651
+.word	0xcbe8e823, 0xa1dddd7c, 0xe874749c, 0x3e1f1f21
+.word	0x964b4bdd, 0x61bdbddc, 0x0d8b8b86, 0x0f8a8a85
+.word	0xe0707090, 0x7c3e3e42, 0x71b5b5c4, 0xcc6666aa
+.word	0x904848d8, 0x06030305, 0xf7f6f601, 0x1c0e0e12
+.word	0xc26161a3, 0x6a35355f, 0xae5757f9, 0x69b9b9d0
+.word	0x17868691, 0x99c1c158, 0x3a1d1d27, 0x279e9eb9
+.word	0xd9e1e138, 0xebf8f813, 0x2b9898b3, 0x22111133
+.word	0xd26969bb, 0xa9d9d970, 0x078e8e89, 0x339494a7
+.word	0x2d9b9bb6, 0x3c1e1e22, 0x15878792, 0xc9e9e920
+.word	0x87cece49, 0xaa5555ff, 0x50282878, 0xa5dfdf7a
+.word	0x038c8c8f, 0x59a1a1f8, 0x09898980, 0x1a0d0d17
+.word	0x65bfbfda, 0xd7e6e631, 0x844242c6, 0xd06868b8
+.word	0x824141c3, 0x299999b0, 0x5a2d2d77, 0x1e0f0f11
+.word	0x7bb0b0cb, 0xa85454fc, 0x6dbbbbd6, 0x2c16163a
+@ Te4[256]
+.byte	0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
+.byte	0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
+.byte	0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
+.byte	0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
+.byte	0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
+.byte	0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
+.byte	0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
+.byte	0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
+.byte	0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
+.byte	0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
+.byte	0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
+.byte	0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
+.byte	0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
+.byte	0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
+.byte	0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
+.byte	0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
+.byte	0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
+.byte	0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
+.byte	0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
+.byte	0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
+.byte	0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
+.byte	0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
+.byte	0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
+.byte	0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
+.byte	0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
+.byte	0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
+.byte	0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
+.byte	0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
+.byte	0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
+.byte	0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
+.byte	0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
+.byte	0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
+@ rcon[]
+.word	0x01000000, 0x02000000, 0x04000000, 0x08000000
+.word	0x10000000, 0x20000000, 0x40000000, 0x80000000
+.word	0x1B000000, 0x36000000, 0, 0, 0, 0, 0, 0
+.size	AES_Te,.-AES_Te
+
+@ void AES_encrypt(const unsigned char *in, unsigned char *out,
+@ 		 const AES_KEY *key) {
+.global AES_encrypt
+.type   AES_encrypt,%function
+.align	5
+AES_encrypt:
+#ifndef	__thumb2__
+	sub	r3,pc,#8		@ AES_encrypt
+#else
+	adr	r3,.
+#endif
+	stmdb   sp!,{r1,r4-r12,lr}
+#if defined(__thumb2__) || defined(__APPLE__)
+	adr	$tbl,AES_Te
+#else
+	sub	$tbl,r3,#AES_encrypt-AES_Te	@ Te
+#endif
+	mov	$rounds,r0		@ inp
+	mov	$key,r2
+#if __ARM_ARCH__<7
+	ldrb	$s0,[$rounds,#3]	@ load input data in endian-neutral
+	ldrb	$t1,[$rounds,#2]	@ manner...
+	ldrb	$t2,[$rounds,#1]
+	ldrb	$t3,[$rounds,#0]
+	orr	$s0,$s0,$t1,lsl#8
+	ldrb	$s1,[$rounds,#7]
+	orr	$s0,$s0,$t2,lsl#16
+	ldrb	$t1,[$rounds,#6]
+	orr	$s0,$s0,$t3,lsl#24
+	ldrb	$t2,[$rounds,#5]
+	ldrb	$t3,[$rounds,#4]
+	orr	$s1,$s1,$t1,lsl#8
+	ldrb	$s2,[$rounds,#11]
+	orr	$s1,$s1,$t2,lsl#16
+	ldrb	$t1,[$rounds,#10]
+	orr	$s1,$s1,$t3,lsl#24
+	ldrb	$t2,[$rounds,#9]
+	ldrb	$t3,[$rounds,#8]
+	orr	$s2,$s2,$t1,lsl#8
+	ldrb	$s3,[$rounds,#15]
+	orr	$s2,$s2,$t2,lsl#16
+	ldrb	$t1,[$rounds,#14]
+	orr	$s2,$s2,$t3,lsl#24
+	ldrb	$t2,[$rounds,#13]
+	ldrb	$t3,[$rounds,#12]
+	orr	$s3,$s3,$t1,lsl#8
+	orr	$s3,$s3,$t2,lsl#16
+	orr	$s3,$s3,$t3,lsl#24
+#else
+	ldr	$s0,[$rounds,#0]
+	ldr	$s1,[$rounds,#4]
+	ldr	$s2,[$rounds,#8]
+	ldr	$s3,[$rounds,#12]
+#ifdef __ARMEL__
+	rev	$s0,$s0
+	rev	$s1,$s1
+	rev	$s2,$s2
+	rev	$s3,$s3
+#endif
+#endif
+	bl	_armv4_AES_encrypt
+
+	ldr	$rounds,[sp],#4		@ pop out
+#if __ARM_ARCH__>=7
+#ifdef __ARMEL__
+	rev	$s0,$s0
+	rev	$s1,$s1
+	rev	$s2,$s2
+	rev	$s3,$s3
+#endif
+	str	$s0,[$rounds,#0]
+	str	$s1,[$rounds,#4]
+	str	$s2,[$rounds,#8]
+	str	$s3,[$rounds,#12]
+#else
+	mov	$t1,$s0,lsr#24		@ write output in endian-neutral
+	mov	$t2,$s0,lsr#16		@ manner...
+	mov	$t3,$s0,lsr#8
+	strb	$t1,[$rounds,#0]
+	strb	$t2,[$rounds,#1]
+	mov	$t1,$s1,lsr#24
+	strb	$t3,[$rounds,#2]
+	mov	$t2,$s1,lsr#16
+	strb	$s0,[$rounds,#3]
+	mov	$t3,$s1,lsr#8
+	strb	$t1,[$rounds,#4]
+	strb	$t2,[$rounds,#5]
+	mov	$t1,$s2,lsr#24
+	strb	$t3,[$rounds,#6]
+	mov	$t2,$s2,lsr#16
+	strb	$s1,[$rounds,#7]
+	mov	$t3,$s2,lsr#8
+	strb	$t1,[$rounds,#8]
+	strb	$t2,[$rounds,#9]
+	mov	$t1,$s3,lsr#24
+	strb	$t3,[$rounds,#10]
+	mov	$t2,$s3,lsr#16
+	strb	$s2,[$rounds,#11]
+	mov	$t3,$s3,lsr#8
+	strb	$t1,[$rounds,#12]
+	strb	$t2,[$rounds,#13]
+	strb	$t3,[$rounds,#14]
+	strb	$s3,[$rounds,#15]
+#endif
+#if __ARM_ARCH__>=5
+	ldmia	sp!,{r4-r12,pc}
+#else
+	ldmia   sp!,{r4-r12,lr}
+	tst	lr,#1
+	moveq	pc,lr			@ be binary compatible with V4, yet
+	bx	lr			@ interoperable with Thumb ISA:-)
+#endif
+.size	AES_encrypt,.-AES_encrypt
+
+.type   _armv4_AES_encrypt,%function
+.align	2
+_armv4_AES_encrypt:
+	str	lr,[sp,#-4]!		@ push lr
+	ldmia	$key!,{$t1-$i1}
+	eor	$s0,$s0,$t1
+	ldr	$rounds,[$key,#240-16]
+	eor	$s1,$s1,$t2
+	eor	$s2,$s2,$t3
+	eor	$s3,$s3,$i1
+	sub	$rounds,$rounds,#1
+	mov	lr,#255
+
+	and	$i1,lr,$s0
+	and	$i2,lr,$s0,lsr#8
+	and	$i3,lr,$s0,lsr#16
+	mov	$s0,$s0,lsr#24
+.Lenc_loop:
+	ldr	$t1,[$tbl,$i1,lsl#2]	@ Te3[s0>>0]
+	and	$i1,lr,$s1,lsr#16	@ i0
+	ldr	$t2,[$tbl,$i2,lsl#2]	@ Te2[s0>>8]
+	and	$i2,lr,$s1
+	ldr	$t3,[$tbl,$i3,lsl#2]	@ Te1[s0>>16]
+	and	$i3,lr,$s1,lsr#8
+	ldr	$s0,[$tbl,$s0,lsl#2]	@ Te0[s0>>24]
+	mov	$s1,$s1,lsr#24
+
+	ldr	$i1,[$tbl,$i1,lsl#2]	@ Te1[s1>>16]
+	ldr	$i2,[$tbl,$i2,lsl#2]	@ Te3[s1>>0]
+	ldr	$i3,[$tbl,$i3,lsl#2]	@ Te2[s1>>8]
+	eor	$s0,$s0,$i1,ror#8
+	ldr	$s1,[$tbl,$s1,lsl#2]	@ Te0[s1>>24]
+	and	$i1,lr,$s2,lsr#8	@ i0
+	eor	$t2,$t2,$i2,ror#8
+	and	$i2,lr,$s2,lsr#16	@ i1
+	eor	$t3,$t3,$i3,ror#8
+	and	$i3,lr,$s2
+	ldr	$i1,[$tbl,$i1,lsl#2]	@ Te2[s2>>8]
+	eor	$s1,$s1,$t1,ror#24
+	ldr	$i2,[$tbl,$i2,lsl#2]	@ Te1[s2>>16]
+	mov	$s2,$s2,lsr#24
+
+	ldr	$i3,[$tbl,$i3,lsl#2]	@ Te3[s2>>0]
+	eor	$s0,$s0,$i1,ror#16
+	ldr	$s2,[$tbl,$s2,lsl#2]	@ Te0[s2>>24]
+	and	$i1,lr,$s3		@ i0
+	eor	$s1,$s1,$i2,ror#8
+	and	$i2,lr,$s3,lsr#8	@ i1
+	eor	$t3,$t3,$i3,ror#16
+	and	$i3,lr,$s3,lsr#16	@ i2
+	ldr	$i1,[$tbl,$i1,lsl#2]	@ Te3[s3>>0]
+	eor	$s2,$s2,$t2,ror#16
+	ldr	$i2,[$tbl,$i2,lsl#2]	@ Te2[s3>>8]
+	mov	$s3,$s3,lsr#24
+
+	ldr	$i3,[$tbl,$i3,lsl#2]	@ Te1[s3>>16]
+	eor	$s0,$s0,$i1,ror#24
+	ldr	$i1,[$key],#16
+	eor	$s1,$s1,$i2,ror#16
+	ldr	$s3,[$tbl,$s3,lsl#2]	@ Te0[s3>>24]
+	eor	$s2,$s2,$i3,ror#8
+	ldr	$t1,[$key,#-12]
+	eor	$s3,$s3,$t3,ror#8
+
+	ldr	$t2,[$key,#-8]
+	eor	$s0,$s0,$i1
+	ldr	$t3,[$key,#-4]
+	and	$i1,lr,$s0
+	eor	$s1,$s1,$t1
+	and	$i2,lr,$s0,lsr#8
+	eor	$s2,$s2,$t2
+	and	$i3,lr,$s0,lsr#16
+	eor	$s3,$s3,$t3
+	mov	$s0,$s0,lsr#24
+
+	subs	$rounds,$rounds,#1
+	bne	.Lenc_loop
+
+	add	$tbl,$tbl,#2
+
+	ldrb	$t1,[$tbl,$i1,lsl#2]	@ Te4[s0>>0]
+	and	$i1,lr,$s1,lsr#16	@ i0
+	ldrb	$t2,[$tbl,$i2,lsl#2]	@ Te4[s0>>8]
+	and	$i2,lr,$s1
+	ldrb	$t3,[$tbl,$i3,lsl#2]	@ Te4[s0>>16]
+	and	$i3,lr,$s1,lsr#8
+	ldrb	$s0,[$tbl,$s0,lsl#2]	@ Te4[s0>>24]
+	mov	$s1,$s1,lsr#24
+
+	ldrb	$i1,[$tbl,$i1,lsl#2]	@ Te4[s1>>16]
+	ldrb	$i2,[$tbl,$i2,lsl#2]	@ Te4[s1>>0]
+	ldrb	$i3,[$tbl,$i3,lsl#2]	@ Te4[s1>>8]
+	eor	$s0,$i1,$s0,lsl#8
+	ldrb	$s1,[$tbl,$s1,lsl#2]	@ Te4[s1>>24]
+	and	$i1,lr,$s2,lsr#8	@ i0
+	eor	$t2,$i2,$t2,lsl#8
+	and	$i2,lr,$s2,lsr#16	@ i1
+	eor	$t3,$i3,$t3,lsl#8
+	and	$i3,lr,$s2
+	ldrb	$i1,[$tbl,$i1,lsl#2]	@ Te4[s2>>8]
+	eor	$s1,$t1,$s1,lsl#24
+	ldrb	$i2,[$tbl,$i2,lsl#2]	@ Te4[s2>>16]
+	mov	$s2,$s2,lsr#24
+
+	ldrb	$i3,[$tbl,$i3,lsl#2]	@ Te4[s2>>0]
+	eor	$s0,$i1,$s0,lsl#8
+	ldrb	$s2,[$tbl,$s2,lsl#2]	@ Te4[s2>>24]
+	and	$i1,lr,$s3		@ i0
+	eor	$s1,$s1,$i2,lsl#16
+	and	$i2,lr,$s3,lsr#8	@ i1
+	eor	$t3,$i3,$t3,lsl#8
+	and	$i3,lr,$s3,lsr#16	@ i2
+	ldrb	$i1,[$tbl,$i1,lsl#2]	@ Te4[s3>>0]
+	eor	$s2,$t2,$s2,lsl#24
+	ldrb	$i2,[$tbl,$i2,lsl#2]	@ Te4[s3>>8]
+	mov	$s3,$s3,lsr#24
+
+	ldrb	$i3,[$tbl,$i3,lsl#2]	@ Te4[s3>>16]
+	eor	$s0,$i1,$s0,lsl#8
+	ldr	$i1,[$key,#0]
+	ldrb	$s3,[$tbl,$s3,lsl#2]	@ Te4[s3>>24]
+	eor	$s1,$s1,$i2,lsl#8
+	ldr	$t1,[$key,#4]
+	eor	$s2,$s2,$i3,lsl#16
+	ldr	$t2,[$key,#8]
+	eor	$s3,$t3,$s3,lsl#24
+	ldr	$t3,[$key,#12]
+
+	eor	$s0,$s0,$i1
+	eor	$s1,$s1,$t1
+	eor	$s2,$s2,$t2
+	eor	$s3,$s3,$t3
+
+	sub	$tbl,$tbl,#2
+	ldr	pc,[sp],#4		@ pop and return
+.size	_armv4_AES_encrypt,.-_armv4_AES_encrypt
+
+.global AES_set_encrypt_key
+.type   AES_set_encrypt_key,%function
+.align	5
+AES_set_encrypt_key:
+_armv4_AES_set_encrypt_key:
+#ifndef	__thumb2__
+	sub	r3,pc,#8		@ AES_set_encrypt_key
+#else
+	adr	r3,.
+#endif
+	teq	r0,#0
+#ifdef	__thumb2__
+	itt	eq			@ Thumb2 thing, sanity check in ARM
+#endif
+	moveq	r0,#-1
+	beq	.Labrt
+	teq	r2,#0
+#ifdef	__thumb2__
+	itt	eq			@ Thumb2 thing, sanity check in ARM
+#endif
+	moveq	r0,#-1
+	beq	.Labrt
+
+	teq	r1,#128
+	beq	.Lok
+	teq	r1,#192
+	beq	.Lok
+	teq	r1,#256
+#ifdef	__thumb2__
+	itt	ne			@ Thumb2 thing, sanity check in ARM
+#endif
+	movne	r0,#-1
+	bne	.Labrt
+
+.Lok:	stmdb   sp!,{r4-r12,lr}
+	mov	$rounds,r0		@ inp
+	mov	lr,r1			@ bits
+	mov	$key,r2			@ key
+
+#if defined(__thumb2__) || defined(__APPLE__)
+	adr	$tbl,AES_Te+1024				@ Te4
+#else
+	sub	$tbl,r3,#_armv4_AES_set_encrypt_key-AES_Te-1024	@ Te4
+#endif
+
+#if __ARM_ARCH__<7
+	ldrb	$s0,[$rounds,#3]	@ load input data in endian-neutral
+	ldrb	$t1,[$rounds,#2]	@ manner...
+	ldrb	$t2,[$rounds,#1]
+	ldrb	$t3,[$rounds,#0]
+	orr	$s0,$s0,$t1,lsl#8
+	ldrb	$s1,[$rounds,#7]
+	orr	$s0,$s0,$t2,lsl#16
+	ldrb	$t1,[$rounds,#6]
+	orr	$s0,$s0,$t3,lsl#24
+	ldrb	$t2,[$rounds,#5]
+	ldrb	$t3,[$rounds,#4]
+	orr	$s1,$s1,$t1,lsl#8
+	ldrb	$s2,[$rounds,#11]
+	orr	$s1,$s1,$t2,lsl#16
+	ldrb	$t1,[$rounds,#10]
+	orr	$s1,$s1,$t3,lsl#24
+	ldrb	$t2,[$rounds,#9]
+	ldrb	$t3,[$rounds,#8]
+	orr	$s2,$s2,$t1,lsl#8
+	ldrb	$s3,[$rounds,#15]
+	orr	$s2,$s2,$t2,lsl#16
+	ldrb	$t1,[$rounds,#14]
+	orr	$s2,$s2,$t3,lsl#24
+	ldrb	$t2,[$rounds,#13]
+	ldrb	$t3,[$rounds,#12]
+	orr	$s3,$s3,$t1,lsl#8
+	str	$s0,[$key],#16
+	orr	$s3,$s3,$t2,lsl#16
+	str	$s1,[$key,#-12]
+	orr	$s3,$s3,$t3,lsl#24
+	str	$s2,[$key,#-8]
+	str	$s3,[$key,#-4]
+#else
+	ldr	$s0,[$rounds,#0]
+	ldr	$s1,[$rounds,#4]
+	ldr	$s2,[$rounds,#8]
+	ldr	$s3,[$rounds,#12]
+#ifdef __ARMEL__
+	rev	$s0,$s0
+	rev	$s1,$s1
+	rev	$s2,$s2
+	rev	$s3,$s3
+#endif
+	str	$s0,[$key],#16
+	str	$s1,[$key,#-12]
+	str	$s2,[$key,#-8]
+	str	$s3,[$key,#-4]
+#endif
+
+	teq	lr,#128
+	bne	.Lnot128
+	mov	$rounds,#10
+	str	$rounds,[$key,#240-16]
+	add	$t3,$tbl,#256			@ rcon
+	mov	lr,#255
+
+.L128_loop:
+	and	$t2,lr,$s3,lsr#24
+	and	$i1,lr,$s3,lsr#16
+	ldrb	$t2,[$tbl,$t2]
+	and	$i2,lr,$s3,lsr#8
+	ldrb	$i1,[$tbl,$i1]
+	and	$i3,lr,$s3
+	ldrb	$i2,[$tbl,$i2]
+	orr	$t2,$t2,$i1,lsl#24
+	ldrb	$i3,[$tbl,$i3]
+	orr	$t2,$t2,$i2,lsl#16
+	ldr	$t1,[$t3],#4			@ rcon[i++]
+	orr	$t2,$t2,$i3,lsl#8
+	eor	$t2,$t2,$t1
+	eor	$s0,$s0,$t2			@ rk[4]=rk[0]^...
+	eor	$s1,$s1,$s0			@ rk[5]=rk[1]^rk[4]
+	str	$s0,[$key],#16
+	eor	$s2,$s2,$s1			@ rk[6]=rk[2]^rk[5]
+	str	$s1,[$key,#-12]
+	eor	$s3,$s3,$s2			@ rk[7]=rk[3]^rk[6]
+	str	$s2,[$key,#-8]
+	subs	$rounds,$rounds,#1
+	str	$s3,[$key,#-4]
+	bne	.L128_loop
+	sub	r2,$key,#176
+	b	.Ldone
+
+.Lnot128:
+#if __ARM_ARCH__<7
+	ldrb	$i2,[$rounds,#19]
+	ldrb	$t1,[$rounds,#18]
+	ldrb	$t2,[$rounds,#17]
+	ldrb	$t3,[$rounds,#16]
+	orr	$i2,$i2,$t1,lsl#8
+	ldrb	$i3,[$rounds,#23]
+	orr	$i2,$i2,$t2,lsl#16
+	ldrb	$t1,[$rounds,#22]
+	orr	$i2,$i2,$t3,lsl#24
+	ldrb	$t2,[$rounds,#21]
+	ldrb	$t3,[$rounds,#20]
+	orr	$i3,$i3,$t1,lsl#8
+	orr	$i3,$i3,$t2,lsl#16
+	str	$i2,[$key],#8
+	orr	$i3,$i3,$t3,lsl#24
+	str	$i3,[$key,#-4]
+#else
+	ldr	$i2,[$rounds,#16]
+	ldr	$i3,[$rounds,#20]
+#ifdef __ARMEL__
+	rev	$i2,$i2
+	rev	$i3,$i3
+#endif
+	str	$i2,[$key],#8
+	str	$i3,[$key,#-4]
+#endif
+
+	teq	lr,#192
+	bne	.Lnot192
+	mov	$rounds,#12
+	str	$rounds,[$key,#240-24]
+	add	$t3,$tbl,#256			@ rcon
+	mov	lr,#255
+	mov	$rounds,#8
+
+.L192_loop:
+	and	$t2,lr,$i3,lsr#24
+	and	$i1,lr,$i3,lsr#16
+	ldrb	$t2,[$tbl,$t2]
+	and	$i2,lr,$i3,lsr#8
+	ldrb	$i1,[$tbl,$i1]
+	and	$i3,lr,$i3
+	ldrb	$i2,[$tbl,$i2]
+	orr	$t2,$t2,$i1,lsl#24
+	ldrb	$i3,[$tbl,$i3]
+	orr	$t2,$t2,$i2,lsl#16
+	ldr	$t1,[$t3],#4			@ rcon[i++]
+	orr	$t2,$t2,$i3,lsl#8
+	eor	$i3,$t2,$t1
+	eor	$s0,$s0,$i3			@ rk[6]=rk[0]^...
+	eor	$s1,$s1,$s0			@ rk[7]=rk[1]^rk[6]
+	str	$s0,[$key],#24
+	eor	$s2,$s2,$s1			@ rk[8]=rk[2]^rk[7]
+	str	$s1,[$key,#-20]
+	eor	$s3,$s3,$s2			@ rk[9]=rk[3]^rk[8]
+	str	$s2,[$key,#-16]
+	subs	$rounds,$rounds,#1
+	str	$s3,[$key,#-12]
+#ifdef	__thumb2__
+	itt	eq				@ Thumb2 thing, sanity check in ARM
+#endif
+	subeq	r2,$key,#216
+	beq	.Ldone
+
+	ldr	$i1,[$key,#-32]
+	ldr	$i2,[$key,#-28]
+	eor	$i1,$i1,$s3			@ rk[10]=rk[4]^rk[9]
+	eor	$i3,$i2,$i1			@ rk[11]=rk[5]^rk[10]
+	str	$i1,[$key,#-8]
+	str	$i3,[$key,#-4]
+	b	.L192_loop
+
+.Lnot192:
+#if __ARM_ARCH__<7
+	ldrb	$i2,[$rounds,#27]
+	ldrb	$t1,[$rounds,#26]
+	ldrb	$t2,[$rounds,#25]
+	ldrb	$t3,[$rounds,#24]
+	orr	$i2,$i2,$t1,lsl#8
+	ldrb	$i3,[$rounds,#31]
+	orr	$i2,$i2,$t2,lsl#16
+	ldrb	$t1,[$rounds,#30]
+	orr	$i2,$i2,$t3,lsl#24
+	ldrb	$t2,[$rounds,#29]
+	ldrb	$t3,[$rounds,#28]
+	orr	$i3,$i3,$t1,lsl#8
+	orr	$i3,$i3,$t2,lsl#16
+	str	$i2,[$key],#8
+	orr	$i3,$i3,$t3,lsl#24
+	str	$i3,[$key,#-4]
+#else
+	ldr	$i2,[$rounds,#24]
+	ldr	$i3,[$rounds,#28]
+#ifdef __ARMEL__
+	rev	$i2,$i2
+	rev	$i3,$i3
+#endif
+	str	$i2,[$key],#8
+	str	$i3,[$key,#-4]
+#endif
+
+	mov	$rounds,#14
+	str	$rounds,[$key,#240-32]
+	add	$t3,$tbl,#256			@ rcon
+	mov	lr,#255
+	mov	$rounds,#7
+
+.L256_loop:
+	and	$t2,lr,$i3,lsr#24
+	and	$i1,lr,$i3,lsr#16
+	ldrb	$t2,[$tbl,$t2]
+	and	$i2,lr,$i3,lsr#8
+	ldrb	$i1,[$tbl,$i1]
+	and	$i3,lr,$i3
+	ldrb	$i2,[$tbl,$i2]
+	orr	$t2,$t2,$i1,lsl#24
+	ldrb	$i3,[$tbl,$i3]
+	orr	$t2,$t2,$i2,lsl#16
+	ldr	$t1,[$t3],#4			@ rcon[i++]
+	orr	$t2,$t2,$i3,lsl#8
+	eor	$i3,$t2,$t1
+	eor	$s0,$s0,$i3			@ rk[8]=rk[0]^...
+	eor	$s1,$s1,$s0			@ rk[9]=rk[1]^rk[8]
+	str	$s0,[$key],#32
+	eor	$s2,$s2,$s1			@ rk[10]=rk[2]^rk[9]
+	str	$s1,[$key,#-28]
+	eor	$s3,$s3,$s2			@ rk[11]=rk[3]^rk[10]
+	str	$s2,[$key,#-24]
+	subs	$rounds,$rounds,#1
+	str	$s3,[$key,#-20]
+#ifdef	__thumb2__
+	itt	eq				@ Thumb2 thing, sanity check in ARM
+#endif
+	subeq	r2,$key,#256
+	beq	.Ldone
+
+	and	$t2,lr,$s3
+	and	$i1,lr,$s3,lsr#8
+	ldrb	$t2,[$tbl,$t2]
+	and	$i2,lr,$s3,lsr#16
+	ldrb	$i1,[$tbl,$i1]
+	and	$i3,lr,$s3,lsr#24
+	ldrb	$i2,[$tbl,$i2]
+	orr	$t2,$t2,$i1,lsl#8
+	ldrb	$i3,[$tbl,$i3]
+	orr	$t2,$t2,$i2,lsl#16
+	ldr	$t1,[$key,#-48]
+	orr	$t2,$t2,$i3,lsl#24
+
+	ldr	$i1,[$key,#-44]
+	ldr	$i2,[$key,#-40]
+	eor	$t1,$t1,$t2			@ rk[12]=rk[4]^...
+	ldr	$i3,[$key,#-36]
+	eor	$i1,$i1,$t1			@ rk[13]=rk[5]^rk[12]
+	str	$t1,[$key,#-16]
+	eor	$i2,$i2,$i1			@ rk[14]=rk[6]^rk[13]
+	str	$i1,[$key,#-12]
+	eor	$i3,$i3,$i2			@ rk[15]=rk[7]^rk[14]
+	str	$i2,[$key,#-8]
+	str	$i3,[$key,#-4]
+	b	.L256_loop
+
+.align	2
+.Ldone:	mov	r0,#0
+	ldmia   sp!,{r4-r12,lr}
+.Labrt:
+#if __ARM_ARCH__>=5
+	ret				@ bx lr
+#else
+	tst	lr,#1
+	moveq	pc,lr			@ be binary compatible with V4, yet
+	bx	lr			@ interoperable with Thumb ISA:-)
+#endif
+.size	AES_set_encrypt_key,.-AES_set_encrypt_key
+
+.global AES_set_decrypt_key
+.type   AES_set_decrypt_key,%function
+.align	5
+AES_set_decrypt_key:
+	str	lr,[sp,#-4]!            @ push lr
+	bl	_armv4_AES_set_encrypt_key
+	teq	r0,#0
+	ldr	lr,[sp],#4              @ pop lr
+	bne	.Labrt
+
+	mov	r0,r2			@ AES_set_encrypt_key preserves r2,
+	mov	r1,r2			@ which is AES_KEY *key
+	b	_armv4_AES_set_enc2dec_key
+.size	AES_set_decrypt_key,.-AES_set_decrypt_key
+
+@ void AES_set_enc2dec_key(const AES_KEY *inp,AES_KEY *out)
+.global	AES_set_enc2dec_key
+.type	AES_set_enc2dec_key,%function
+.align	5
+AES_set_enc2dec_key:
+_armv4_AES_set_enc2dec_key:
+	stmdb   sp!,{r4-r12,lr}
+
+	ldr	$rounds,[r0,#240]
+	mov	$i1,r0			@ input
+	add	$i2,r0,$rounds,lsl#4
+	mov	$key,r1			@ output
+	add	$tbl,r1,$rounds,lsl#4
+	str	$rounds,[r1,#240]
+
+.Linv:	ldr	$s0,[$i1],#16
+	ldr	$s1,[$i1,#-12]
+	ldr	$s2,[$i1,#-8]
+	ldr	$s3,[$i1,#-4]
+	ldr	$t1,[$i2],#-16
+	ldr	$t2,[$i2,#16+4]
+	ldr	$t3,[$i2,#16+8]
+	ldr	$i3,[$i2,#16+12]
+	str	$s0,[$tbl],#-16
+	str	$s1,[$tbl,#16+4]
+	str	$s2,[$tbl,#16+8]
+	str	$s3,[$tbl,#16+12]
+	str	$t1,[$key],#16
+	str	$t2,[$key,#-12]
+	str	$t3,[$key,#-8]
+	str	$i3,[$key,#-4]
+	teq	$i1,$i2
+	bne	.Linv
+
+	ldr	$s0,[$i1]
+	ldr	$s1,[$i1,#4]
+	ldr	$s2,[$i1,#8]
+	ldr	$s3,[$i1,#12]
+	str	$s0,[$key]
+	str	$s1,[$key,#4]
+	str	$s2,[$key,#8]
+	str	$s3,[$key,#12]
+	sub	$key,$key,$rounds,lsl#3
+___
+$mask80=$i1;
+$mask1b=$i2;
+$mask7f=$i3;
+$code.=<<___;
+	ldr	$s0,[$key,#16]!		@ prefetch tp1
+	mov	$mask80,#0x80
+	mov	$mask1b,#0x1b
+	orr	$mask80,$mask80,#0x8000
+	orr	$mask1b,$mask1b,#0x1b00
+	orr	$mask80,$mask80,$mask80,lsl#16
+	orr	$mask1b,$mask1b,$mask1b,lsl#16
+	sub	$rounds,$rounds,#1
+	mvn	$mask7f,$mask80
+	mov	$rounds,$rounds,lsl#2	@ (rounds-1)*4
+
+.Lmix:	and	$t1,$s0,$mask80
+	and	$s1,$s0,$mask7f
+	sub	$t1,$t1,$t1,lsr#7
+	and	$t1,$t1,$mask1b
+	eor	$s1,$t1,$s1,lsl#1	@ tp2
+
+	and	$t1,$s1,$mask80
+	and	$s2,$s1,$mask7f
+	sub	$t1,$t1,$t1,lsr#7
+	and	$t1,$t1,$mask1b
+	eor	$s2,$t1,$s2,lsl#1	@ tp4
+
+	and	$t1,$s2,$mask80
+	and	$s3,$s2,$mask7f
+	sub	$t1,$t1,$t1,lsr#7
+	and	$t1,$t1,$mask1b
+	eor	$s3,$t1,$s3,lsl#1	@ tp8
+
+	eor	$t1,$s1,$s2
+	eor	$t2,$s0,$s3		@ tp9
+	eor	$t1,$t1,$s3		@ tpe
+	eor	$t1,$t1,$s1,ror#24
+	eor	$t1,$t1,$t2,ror#24	@ ^= ROTATE(tpb=tp9^tp2,8)
+	eor	$t1,$t1,$s2,ror#16
+	eor	$t1,$t1,$t2,ror#16	@ ^= ROTATE(tpd=tp9^tp4,16)
+	eor	$t1,$t1,$t2,ror#8	@ ^= ROTATE(tp9,24)
+
+	ldr	$s0,[$key,#4]		@ prefetch tp1
+	str	$t1,[$key],#4
+	subs	$rounds,$rounds,#1
+	bne	.Lmix
+
+	mov	r0,#0
+#if __ARM_ARCH__>=5
+	ldmia	sp!,{r4-r12,pc}
+#else
+	ldmia   sp!,{r4-r12,lr}
+	tst	lr,#1
+	moveq	pc,lr			@ be binary compatible with V4, yet
+	bx	lr			@ interoperable with Thumb ISA:-)
+#endif
+.size	AES_set_enc2dec_key,.-AES_set_enc2dec_key
+
+.type	AES_Td,%object
+.align	5
+AES_Td:
+.word	0x51f4a750, 0x7e416553, 0x1a17a4c3, 0x3a275e96
+.word	0x3bab6bcb, 0x1f9d45f1, 0xacfa58ab, 0x4be30393
+.word	0x2030fa55, 0xad766df6, 0x88cc7691, 0xf5024c25
+.word	0x4fe5d7fc, 0xc52acbd7, 0x26354480, 0xb562a38f
+.word	0xdeb15a49, 0x25ba1b67, 0x45ea0e98, 0x5dfec0e1
+.word	0xc32f7502, 0x814cf012, 0x8d4697a3, 0x6bd3f9c6
+.word	0x038f5fe7, 0x15929c95, 0xbf6d7aeb, 0x955259da
+.word	0xd4be832d, 0x587421d3, 0x49e06929, 0x8ec9c844
+.word	0x75c2896a, 0xf48e7978, 0x99583e6b, 0x27b971dd
+.word	0xbee14fb6, 0xf088ad17, 0xc920ac66, 0x7dce3ab4
+.word	0x63df4a18, 0xe51a3182, 0x97513360, 0x62537f45
+.word	0xb16477e0, 0xbb6bae84, 0xfe81a01c, 0xf9082b94
+.word	0x70486858, 0x8f45fd19, 0x94de6c87, 0x527bf8b7
+.word	0xab73d323, 0x724b02e2, 0xe31f8f57, 0x6655ab2a
+.word	0xb2eb2807, 0x2fb5c203, 0x86c57b9a, 0xd33708a5
+.word	0x302887f2, 0x23bfa5b2, 0x02036aba, 0xed16825c
+.word	0x8acf1c2b, 0xa779b492, 0xf307f2f0, 0x4e69e2a1
+.word	0x65daf4cd, 0x0605bed5, 0xd134621f, 0xc4a6fe8a
+.word	0x342e539d, 0xa2f355a0, 0x058ae132, 0xa4f6eb75
+.word	0x0b83ec39, 0x4060efaa, 0x5e719f06, 0xbd6e1051
+.word	0x3e218af9, 0x96dd063d, 0xdd3e05ae, 0x4de6bd46
+.word	0x91548db5, 0x71c45d05, 0x0406d46f, 0x605015ff
+.word	0x1998fb24, 0xd6bde997, 0x894043cc, 0x67d99e77
+.word	0xb0e842bd, 0x07898b88, 0xe7195b38, 0x79c8eedb
+.word	0xa17c0a47, 0x7c420fe9, 0xf8841ec9, 0x00000000
+.word	0x09808683, 0x322bed48, 0x1e1170ac, 0x6c5a724e
+.word	0xfd0efffb, 0x0f853856, 0x3daed51e, 0x362d3927
+.word	0x0a0fd964, 0x685ca621, 0x9b5b54d1, 0x24362e3a
+.word	0x0c0a67b1, 0x9357e70f, 0xb4ee96d2, 0x1b9b919e
+.word	0x80c0c54f, 0x61dc20a2, 0x5a774b69, 0x1c121a16
+.word	0xe293ba0a, 0xc0a02ae5, 0x3c22e043, 0x121b171d
+.word	0x0e090d0b, 0xf28bc7ad, 0x2db6a8b9, 0x141ea9c8
+.word	0x57f11985, 0xaf75074c, 0xee99ddbb, 0xa37f60fd
+.word	0xf701269f, 0x5c72f5bc, 0x44663bc5, 0x5bfb7e34
+.word	0x8b432976, 0xcb23c6dc, 0xb6edfc68, 0xb8e4f163
+.word	0xd731dcca, 0x42638510, 0x13972240, 0x84c61120
+.word	0x854a247d, 0xd2bb3df8, 0xaef93211, 0xc729a16d
+.word	0x1d9e2f4b, 0xdcb230f3, 0x0d8652ec, 0x77c1e3d0
+.word	0x2bb3166c, 0xa970b999, 0x119448fa, 0x47e96422
+.word	0xa8fc8cc4, 0xa0f03f1a, 0x567d2cd8, 0x223390ef
+.word	0x87494ec7, 0xd938d1c1, 0x8ccaa2fe, 0x98d40b36
+.word	0xa6f581cf, 0xa57ade28, 0xdab78e26, 0x3fadbfa4
+.word	0x2c3a9de4, 0x5078920d, 0x6a5fcc9b, 0x547e4662
+.word	0xf68d13c2, 0x90d8b8e8, 0x2e39f75e, 0x82c3aff5
+.word	0x9f5d80be, 0x69d0937c, 0x6fd52da9, 0xcf2512b3
+.word	0xc8ac993b, 0x10187da7, 0xe89c636e, 0xdb3bbb7b
+.word	0xcd267809, 0x6e5918f4, 0xec9ab701, 0x834f9aa8
+.word	0xe6956e65, 0xaaffe67e, 0x21bccf08, 0xef15e8e6
+.word	0xbae79bd9, 0x4a6f36ce, 0xea9f09d4, 0x29b07cd6
+.word	0x31a4b2af, 0x2a3f2331, 0xc6a59430, 0x35a266c0
+.word	0x744ebc37, 0xfc82caa6, 0xe090d0b0, 0x33a7d815
+.word	0xf104984a, 0x41ecdaf7, 0x7fcd500e, 0x1791f62f
+.word	0x764dd68d, 0x43efb04d, 0xccaa4d54, 0xe49604df
+.word	0x9ed1b5e3, 0x4c6a881b, 0xc12c1fb8, 0x4665517f
+.word	0x9d5eea04, 0x018c355d, 0xfa877473, 0xfb0b412e
+.word	0xb3671d5a, 0x92dbd252, 0xe9105633, 0x6dd64713
+.word	0x9ad7618c, 0x37a10c7a, 0x59f8148e, 0xeb133c89
+.word	0xcea927ee, 0xb761c935, 0xe11ce5ed, 0x7a47b13c
+.word	0x9cd2df59, 0x55f2733f, 0x1814ce79, 0x73c737bf
+.word	0x53f7cdea, 0x5ffdaa5b, 0xdf3d6f14, 0x7844db86
+.word	0xcaaff381, 0xb968c43e, 0x3824342c, 0xc2a3405f
+.word	0x161dc372, 0xbce2250c, 0x283c498b, 0xff0d9541
+.word	0x39a80171, 0x080cb3de, 0xd8b4e49c, 0x6456c190
+.word	0x7bcb8461, 0xd532b670, 0x486c5c74, 0xd0b85742
+@ Td4[256]
+.byte	0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
+.byte	0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
+.byte	0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
+.byte	0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
+.byte	0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
+.byte	0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
+.byte	0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
+.byte	0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
+.byte	0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
+.byte	0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
+.byte	0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
+.byte	0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
+.byte	0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
+.byte	0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
+.byte	0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
+.byte	0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
+.byte	0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
+.byte	0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
+.byte	0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
+.byte	0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
+.byte	0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
+.byte	0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
+.byte	0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
+.byte	0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
+.byte	0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
+.byte	0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
+.byte	0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
+.byte	0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
+.byte	0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
+.byte	0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
+.byte	0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
+.byte	0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
+.size	AES_Td,.-AES_Td
+
+@ void AES_decrypt(const unsigned char *in, unsigned char *out,
+@ 		 const AES_KEY *key) {
+.global AES_decrypt
+.type   AES_decrypt,%function
+.align	5
+AES_decrypt:
+#ifndef	__thumb2__
+	sub	r3,pc,#8		@ AES_decrypt
+#else
+	adr	r3,.
+#endif
+	stmdb   sp!,{r1,r4-r12,lr}
+#if defined(__thumb2__) || defined(__APPLE__)
+	adr	$tbl,AES_Td
+#else
+	sub	$tbl,r3,#AES_decrypt-AES_Td	@ Td
+#endif
+	mov	$rounds,r0		@ inp
+	mov	$key,r2
+#if __ARM_ARCH__<7
+	ldrb	$s0,[$rounds,#3]	@ load input data in endian-neutral
+	ldrb	$t1,[$rounds,#2]	@ manner...
+	ldrb	$t2,[$rounds,#1]
+	ldrb	$t3,[$rounds,#0]
+	orr	$s0,$s0,$t1,lsl#8
+	ldrb	$s1,[$rounds,#7]
+	orr	$s0,$s0,$t2,lsl#16
+	ldrb	$t1,[$rounds,#6]
+	orr	$s0,$s0,$t3,lsl#24
+	ldrb	$t2,[$rounds,#5]
+	ldrb	$t3,[$rounds,#4]
+	orr	$s1,$s1,$t1,lsl#8
+	ldrb	$s2,[$rounds,#11]
+	orr	$s1,$s1,$t2,lsl#16
+	ldrb	$t1,[$rounds,#10]
+	orr	$s1,$s1,$t3,lsl#24
+	ldrb	$t2,[$rounds,#9]
+	ldrb	$t3,[$rounds,#8]
+	orr	$s2,$s2,$t1,lsl#8
+	ldrb	$s3,[$rounds,#15]
+	orr	$s2,$s2,$t2,lsl#16
+	ldrb	$t1,[$rounds,#14]
+	orr	$s2,$s2,$t3,lsl#24
+	ldrb	$t2,[$rounds,#13]
+	ldrb	$t3,[$rounds,#12]
+	orr	$s3,$s3,$t1,lsl#8
+	orr	$s3,$s3,$t2,lsl#16
+	orr	$s3,$s3,$t3,lsl#24
+#else
+	ldr	$s0,[$rounds,#0]
+	ldr	$s1,[$rounds,#4]
+	ldr	$s2,[$rounds,#8]
+	ldr	$s3,[$rounds,#12]
+#ifdef __ARMEL__
+	rev	$s0,$s0
+	rev	$s1,$s1
+	rev	$s2,$s2
+	rev	$s3,$s3
+#endif
+#endif
+	bl	_armv4_AES_decrypt
+
+	ldr	$rounds,[sp],#4		@ pop out
+#if __ARM_ARCH__>=7
+#ifdef __ARMEL__
+	rev	$s0,$s0
+	rev	$s1,$s1
+	rev	$s2,$s2
+	rev	$s3,$s3
+#endif
+	str	$s0,[$rounds,#0]
+	str	$s1,[$rounds,#4]
+	str	$s2,[$rounds,#8]
+	str	$s3,[$rounds,#12]
+#else
+	mov	$t1,$s0,lsr#24		@ write output in endian-neutral
+	mov	$t2,$s0,lsr#16		@ manner...
+	mov	$t3,$s0,lsr#8
+	strb	$t1,[$rounds,#0]
+	strb	$t2,[$rounds,#1]
+	mov	$t1,$s1,lsr#24
+	strb	$t3,[$rounds,#2]
+	mov	$t2,$s1,lsr#16
+	strb	$s0,[$rounds,#3]
+	mov	$t3,$s1,lsr#8
+	strb	$t1,[$rounds,#4]
+	strb	$t2,[$rounds,#5]
+	mov	$t1,$s2,lsr#24
+	strb	$t3,[$rounds,#6]
+	mov	$t2,$s2,lsr#16
+	strb	$s1,[$rounds,#7]
+	mov	$t3,$s2,lsr#8
+	strb	$t1,[$rounds,#8]
+	strb	$t2,[$rounds,#9]
+	mov	$t1,$s3,lsr#24
+	strb	$t3,[$rounds,#10]
+	mov	$t2,$s3,lsr#16
+	strb	$s2,[$rounds,#11]
+	mov	$t3,$s3,lsr#8
+	strb	$t1,[$rounds,#12]
+	strb	$t2,[$rounds,#13]
+	strb	$t3,[$rounds,#14]
+	strb	$s3,[$rounds,#15]
+#endif
+#if __ARM_ARCH__>=5
+	ldmia	sp!,{r4-r12,pc}
+#else
+	ldmia   sp!,{r4-r12,lr}
+	tst	lr,#1
+	moveq	pc,lr			@ be binary compatible with V4, yet
+	bx	lr			@ interoperable with Thumb ISA:-)
+#endif
+.size	AES_decrypt,.-AES_decrypt
+
+.type   _armv4_AES_decrypt,%function
+.align	2
+_armv4_AES_decrypt:
+	str	lr,[sp,#-4]!		@ push lr
+	ldmia	$key!,{$t1-$i1}
+	eor	$s0,$s0,$t1
+	ldr	$rounds,[$key,#240-16]
+	eor	$s1,$s1,$t2
+	eor	$s2,$s2,$t3
+	eor	$s3,$s3,$i1
+	sub	$rounds,$rounds,#1
+	mov	lr,#255
+
+	and	$i1,lr,$s0,lsr#16
+	and	$i2,lr,$s0,lsr#8
+	and	$i3,lr,$s0
+	mov	$s0,$s0,lsr#24
+.Ldec_loop:
+	ldr	$t1,[$tbl,$i1,lsl#2]	@ Td1[s0>>16]
+	and	$i1,lr,$s1		@ i0
+	ldr	$t2,[$tbl,$i2,lsl#2]	@ Td2[s0>>8]
+	and	$i2,lr,$s1,lsr#16
+	ldr	$t3,[$tbl,$i3,lsl#2]	@ Td3[s0>>0]
+	and	$i3,lr,$s1,lsr#8
+	ldr	$s0,[$tbl,$s0,lsl#2]	@ Td0[s0>>24]
+	mov	$s1,$s1,lsr#24
+
+	ldr	$i1,[$tbl,$i1,lsl#2]	@ Td3[s1>>0]
+	ldr	$i2,[$tbl,$i2,lsl#2]	@ Td1[s1>>16]
+	ldr	$i3,[$tbl,$i3,lsl#2]	@ Td2[s1>>8]
+	eor	$s0,$s0,$i1,ror#24
+	ldr	$s1,[$tbl,$s1,lsl#2]	@ Td0[s1>>24]
+	and	$i1,lr,$s2,lsr#8	@ i0
+	eor	$t2,$i2,$t2,ror#8
+	and	$i2,lr,$s2		@ i1
+	eor	$t3,$i3,$t3,ror#8
+	and	$i3,lr,$s2,lsr#16
+	ldr	$i1,[$tbl,$i1,lsl#2]	@ Td2[s2>>8]
+	eor	$s1,$s1,$t1,ror#8
+	ldr	$i2,[$tbl,$i2,lsl#2]	@ Td3[s2>>0]
+	mov	$s2,$s2,lsr#24
+
+	ldr	$i3,[$tbl,$i3,lsl#2]	@ Td1[s2>>16]
+	eor	$s0,$s0,$i1,ror#16
+	ldr	$s2,[$tbl,$s2,lsl#2]	@ Td0[s2>>24]
+	and	$i1,lr,$s3,lsr#16	@ i0
+	eor	$s1,$s1,$i2,ror#24
+	and	$i2,lr,$s3,lsr#8	@ i1
+	eor	$t3,$i3,$t3,ror#8
+	and	$i3,lr,$s3		@ i2
+	ldr	$i1,[$tbl,$i1,lsl#2]	@ Td1[s3>>16]
+	eor	$s2,$s2,$t2,ror#8
+	ldr	$i2,[$tbl,$i2,lsl#2]	@ Td2[s3>>8]
+	mov	$s3,$s3,lsr#24
+
+	ldr	$i3,[$tbl,$i3,lsl#2]	@ Td3[s3>>0]
+	eor	$s0,$s0,$i1,ror#8
+	ldr	$i1,[$key],#16
+	eor	$s1,$s1,$i2,ror#16
+	ldr	$s3,[$tbl,$s3,lsl#2]	@ Td0[s3>>24]
+	eor	$s2,$s2,$i3,ror#24
+
+	ldr	$t1,[$key,#-12]
+	eor	$s0,$s0,$i1
+	ldr	$t2,[$key,#-8]
+	eor	$s3,$s3,$t3,ror#8
+	ldr	$t3,[$key,#-4]
+	and	$i1,lr,$s0,lsr#16
+	eor	$s1,$s1,$t1
+	and	$i2,lr,$s0,lsr#8
+	eor	$s2,$s2,$t2
+	and	$i3,lr,$s0
+	eor	$s3,$s3,$t3
+	mov	$s0,$s0,lsr#24
+
+	subs	$rounds,$rounds,#1
+	bne	.Ldec_loop
+
+	add	$tbl,$tbl,#1024
+
+	ldr	$t2,[$tbl,#0]		@ prefetch Td4
+	ldr	$t3,[$tbl,#32]
+	ldr	$t1,[$tbl,#64]
+	ldr	$t2,[$tbl,#96]
+	ldr	$t3,[$tbl,#128]
+	ldr	$t1,[$tbl,#160]
+	ldr	$t2,[$tbl,#192]
+	ldr	$t3,[$tbl,#224]
+
+	ldrb	$s0,[$tbl,$s0]		@ Td4[s0>>24]
+	ldrb	$t1,[$tbl,$i1]		@ Td4[s0>>16]
+	and	$i1,lr,$s1		@ i0
+	ldrb	$t2,[$tbl,$i2]		@ Td4[s0>>8]
+	and	$i2,lr,$s1,lsr#16
+	ldrb	$t3,[$tbl,$i3]		@ Td4[s0>>0]
+	and	$i3,lr,$s1,lsr#8
+
+	add	$s1,$tbl,$s1,lsr#24
+	ldrb	$i1,[$tbl,$i1]		@ Td4[s1>>0]
+	ldrb	$s1,[$s1]		@ Td4[s1>>24]
+	ldrb	$i2,[$tbl,$i2]		@ Td4[s1>>16]
+	eor	$s0,$i1,$s0,lsl#24
+	ldrb	$i3,[$tbl,$i3]		@ Td4[s1>>8]
+	eor	$s1,$t1,$s1,lsl#8
+	and	$i1,lr,$s2,lsr#8	@ i0
+	eor	$t2,$t2,$i2,lsl#8
+	and	$i2,lr,$s2		@ i1
+	ldrb	$i1,[$tbl,$i1]		@ Td4[s2>>8]
+	eor	$t3,$t3,$i3,lsl#8
+	ldrb	$i2,[$tbl,$i2]		@ Td4[s2>>0]
+	and	$i3,lr,$s2,lsr#16
+
+	add	$s2,$tbl,$s2,lsr#24
+	ldrb	$s2,[$s2]		@ Td4[s2>>24]
+	eor	$s0,$s0,$i1,lsl#8
+	ldrb	$i3,[$tbl,$i3]		@ Td4[s2>>16]
+	eor	$s1,$i2,$s1,lsl#16
+	and	$i1,lr,$s3,lsr#16	@ i0
+	eor	$s2,$t2,$s2,lsl#16
+	and	$i2,lr,$s3,lsr#8	@ i1
+	ldrb	$i1,[$tbl,$i1]		@ Td4[s3>>16]
+	eor	$t3,$t3,$i3,lsl#16
+	ldrb	$i2,[$tbl,$i2]		@ Td4[s3>>8]
+	and	$i3,lr,$s3		@ i2
+
+	add	$s3,$tbl,$s3,lsr#24
+	ldrb	$i3,[$tbl,$i3]		@ Td4[s3>>0]
+	ldrb	$s3,[$s3]		@ Td4[s3>>24]
+	eor	$s0,$s0,$i1,lsl#16
+	ldr	$i1,[$key,#0]
+	eor	$s1,$s1,$i2,lsl#8
+	ldr	$t1,[$key,#4]
+	eor	$s2,$i3,$s2,lsl#8
+	ldr	$t2,[$key,#8]
+	eor	$s3,$t3,$s3,lsl#24
+	ldr	$t3,[$key,#12]
+
+	eor	$s0,$s0,$i1
+	eor	$s1,$s1,$t1
+	eor	$s2,$s2,$t2
+	eor	$s3,$s3,$t3
+
+	sub	$tbl,$tbl,#1024
+	ldr	pc,[sp],#4		@ pop and return
+.size	_armv4_AES_decrypt,.-_armv4_AES_decrypt
+.asciz	"AES for ARMv4, CRYPTOGAMS by <appro\@openssl.org>"
+.align	2
+___
+
+$code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm;	# make it possible to compile with -march=armv4
+$code =~ s/\bret\b/bx\tlr/gm;
+
+open SELF,$0;
+while(<SELF>) {
+	next if (/^#!/);
+	last if (!s/^#/@/ and !/^$/);
+	print;
+}
+close SELF;
+
+print $code;
+close STDOUT or die "error closing STDOUT: $!";	# enforce flush
diff --git a/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aes-c64xplus.pl b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aes-c64xplus.pl
new file mode 100644
index 0000000..cad3fcd
--- /dev/null
+++ b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aes-c64xplus.pl
@@ -0,0 +1,1382 @@
+#! /usr/bin/env perl
+# Copyright 2012-2020 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License").  You may not use
+# this file except in compliance with the License.  You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# [Endian-neutral] AES for C64x+.
+#
+# Even though SPLOOPs are scheduled for 13 cycles, and thus expected
+# performance is ~8.5 cycles per byte processed with 128-bit key,
+# measured performance turned to be ~10 cycles per byte. Discrepancy
+# must be caused by limitations of L1D memory banking(*), see SPRU871
+# TI publication for further details. If any consolation it's still
+# ~20% faster than TI's linear assembly module anyway... Compared to
+# aes_core.c compiled with cl6x 6.0 with -mv6400+ -o2 options this
+# code is 3.75x faster and almost 3x smaller (tables included).
+#
+# (*)	This means that there might be subtle correlation between data
+#	and timing and one can wonder if it can be ... attacked:-(
+#	On the other hand this also means that *if* one chooses to
+#	implement *4* T-tables variant [instead of 1 T-table as in
+#	this implementation, or in addition to], then one ought to
+#	*interleave* them. Even though it complicates addressing,
+#	references to interleaved tables would be guaranteed not to
+#	clash. I reckon that it should be possible to break 8 cycles
+#	per byte "barrier," i.e. improve by ~20%, naturally at the
+#	cost of 8x increased pressure on L1D. 8x because you'd have
+#	to interleave both Te and Td tables...
+
+while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+($TEA,$TEB)=("A5","B5");
+($KPA,$KPB)=("A3","B1");
+@K=("A6","B6","A7","B7");
+@s=("A8","B8","A9","B9");
+@Te0=@Td0=("A16","B16","A17","B17");
+@Te1=@Td1=("A18","B18","A19","B19");
+@Te2=@Td2=("A20","B20","A21","B21");
+@Te3=@Td3=("A22","B22","A23","B23");
+
+$code=<<___;
+	.text
+
+	.if	.ASSEMBLER_VERSION<7000000
+	.asg	0,__TI_EABI__
+	.endif
+	.if	__TI_EABI__
+	.nocmp
+	.asg	AES_encrypt,_AES_encrypt
+	.asg	AES_decrypt,_AES_decrypt
+	.asg	AES_set_encrypt_key,_AES_set_encrypt_key
+	.asg	AES_set_decrypt_key,_AES_set_decrypt_key
+	.asg	AES_ctr32_encrypt,_AES_ctr32_encrypt
+	.endif
+
+	.asg	B3,RA
+	.asg	A4,INP
+	.asg	B4,OUT
+	.asg	A6,KEY
+	.asg	A4,RET
+	.asg	B15,SP
+
+	.eval	24,EXT0
+	.eval	16,EXT1
+	.eval	8,EXT2
+	.eval	0,EXT3
+	.eval	8,TBL1
+	.eval	16,TBL2
+	.eval	24,TBL3
+
+	.if	.BIG_ENDIAN
+	.eval	24-EXT0,EXT0
+	.eval	24-EXT1,EXT1
+	.eval	24-EXT2,EXT2
+	.eval	24-EXT3,EXT3
+	.eval	32-TBL1,TBL1
+	.eval	32-TBL2,TBL2
+	.eval	32-TBL3,TBL3
+	.endif
+
+	.global	_AES_encrypt
+_AES_encrypt:
+	.asmfunc
+	MVK	1,B2
+__encrypt:
+	.if	__TI_EABI__
+   [B2]	LDNDW	*INP++,A9:A8			; load input
+||	MVKL	\$PCR_OFFSET(AES_Te,__encrypt),$TEA
+||	ADDKPC	__encrypt,B0
+   [B2]	LDNDW	*INP++,B9:B8
+||	MVKH	\$PCR_OFFSET(AES_Te,__encrypt),$TEA
+||	ADD	0,KEY,$KPA
+||	ADD	4,KEY,$KPB
+	.else
+   [B2]	LDNDW	*INP++,A9:A8			; load input
+||	MVKL	(AES_Te-__encrypt),$TEA
+||	ADDKPC	__encrypt,B0
+   [B2]	LDNDW	*INP++,B9:B8
+||	MVKH	(AES_Te-__encrypt),$TEA
+||	ADD	0,KEY,$KPA
+||	ADD	4,KEY,$KPB
+	.endif
+	LDW	*$KPA++[2],$Te0[0]		; zero round key
+||	LDW	*$KPB++[2],$Te0[1]
+||	MVK	60,A0
+||	ADD	B0,$TEA,$TEA			; AES_Te
+	LDW	*KEY[A0],B0			; rounds
+||	MVK	1024,A0				; sizeof(AES_Te)
+	LDW	*$KPA++[2],$Te0[2]
+||	LDW	*$KPB++[2],$Te0[3]
+||	MV	$TEA,$TEB
+	NOP
+	.if	.BIG_ENDIAN
+	MV	A9,$s[0]
+||	MV	A8,$s[1]
+||	MV	B9,$s[2]
+||	MV	B8,$s[3]
+	.else
+	MV	A8,$s[0]
+||	MV	A9,$s[1]
+||	MV	B8,$s[2]
+||	MV	B9,$s[3]
+	.endif
+	XOR	$Te0[0],$s[0],$s[0]
+||	XOR	$Te0[1],$s[1],$s[1]
+||	LDW	*$KPA++[2],$K[0]		; 1st round key
+||	LDW	*$KPB++[2],$K[1]
+	SUB	B0,2,B0
+
+	SPLOOPD	13
+||	MVC	B0,ILC
+||	LDW	*$KPA++[2],$K[2]
+||	LDW	*$KPB++[2],$K[3]
+;;====================================================================
+	EXTU	$s[1],EXT1,24,$Te1[1]
+||	EXTU	$s[0],EXT3,24,$Te3[0]
+	LDW	*${TEB}[$Te1[1]],$Te1[1]	; Te1[s1>>8],	t0
+||	LDW	*${TEA}[$Te3[0]],$Te3[0]	; Te3[s0>>24],	t1
+||	XOR	$s[2],$Te0[2],$s[2]		; modulo-scheduled
+||	XOR	$s[3],$Te0[3],$s[3]		; modulo-scheduled
+||	EXTU	$s[1],EXT3,24,$Te3[1]
+||	EXTU	$s[0],EXT1,24,$Te1[0]
+	LDW	*${TEB}[$Te3[1]],$Te3[1]	; Te3[s1>>24],	t2
+||	LDW	*${TEA}[$Te1[0]],$Te1[0]	; Te1[s0>>8],	t3
+||	EXTU	$s[2],EXT2,24,$Te2[2]
+||	EXTU	$s[3],EXT2,24,$Te2[3]
+	LDW	*${TEA}[$Te2[2]],$Te2[2]	; Te2[s2>>16],	t0
+||	LDW	*${TEB}[$Te2[3]],$Te2[3]	; Te2[s3>>16],	t1
+||	EXTU	$s[3],EXT3,24,$Te3[3]
+||	EXTU	$s[2],EXT1,24,$Te1[2]
+	LDW	*${TEB}[$Te3[3]],$Te3[3]	; Te3[s3>>24],	t0
+||	LDW	*${TEA}[$Te1[2]],$Te1[2]	; Te1[s2>>8],	t1
+||	EXTU	$s[0],EXT2,24,$Te2[0]
+||	EXTU	$s[1],EXT2,24,$Te2[1]
+	LDW	*${TEA}[$Te2[0]],$Te2[0]	; Te2[s0>>16],	t2
+||	LDW	*${TEB}[$Te2[1]],$Te2[1]	; Te2[s1>>16],	t3
+||	EXTU	$s[3],EXT1,24,$Te1[3]
+||	EXTU	$s[2],EXT3,24,$Te3[2]
+	LDW	*${TEB}[$Te1[3]],$Te1[3]	; Te1[s3>>8],	t2
+||	LDW	*${TEA}[$Te3[2]],$Te3[2]	; Te3[s2>>24],	t3
+||	ROTL	$Te1[1],TBL1,$Te3[0]		; t0
+||	ROTL	$Te3[0],TBL3,$Te1[1]		; t1
+||	EXTU	$s[0],EXT0,24,$Te0[0]
+||	EXTU	$s[1],EXT0,24,$Te0[1]
+	LDW	*${TEA}[$Te0[0]],$Te0[0]	; Te0[s0],	t0
+||	LDW	*${TEB}[$Te0[1]],$Te0[1]	; Te0[s1],	t1
+||	ROTL	$Te3[1],TBL3,$Te1[0]		; t2
+||	ROTL	$Te1[0],TBL1,$Te3[1]		; t3
+||	EXTU	$s[2],EXT0,24,$Te0[2]
+||	EXTU	$s[3],EXT0,24,$Te0[3]
+	LDW	*${TEA}[$Te0[2]],$Te0[2]	; Te0[s2],	t2
+||	LDW	*${TEB}[$Te0[3]],$Te0[3]	; Te0[s3],	t3
+||	ROTL	$Te2[2],TBL2,$Te2[2]		; t0
+||	ROTL	$Te2[3],TBL2,$Te2[3]		; t1
+||	XOR	$K[0],$Te3[0],$s[0]
+||	XOR	$K[1],$Te1[1],$s[1]
+	ROTL	$Te3[3],TBL3,$Te1[2]		; t0
+||	ROTL	$Te1[2],TBL1,$Te3[3]		; t1
+||	XOR	$K[2],$Te1[0],$s[2]
+||	XOR	$K[3],$Te3[1],$s[3]
+||	LDW	*$KPA++[2],$K[0]		; next round key
+||	LDW	*$KPB++[2],$K[1]
+	ROTL	$Te2[0],TBL2,$Te2[0]		; t2
+||	ROTL	$Te2[1],TBL2,$Te2[1]		; t3
+||	XOR	$s[0],$Te2[2],$s[0]
+||	XOR	$s[1],$Te2[3],$s[1]
+||	LDW	*$KPA++[2],$K[2]
+||	LDW	*$KPB++[2],$K[3]
+	ROTL	$Te1[3],TBL1,$Te3[2]		; t2
+||	ROTL	$Te3[2],TBL3,$Te1[3]		; t3
+||	XOR	$s[0],$Te1[2],$s[0]
+||	XOR	$s[1],$Te3[3],$s[1]
+	XOR	$s[2],$Te2[0],$s[2]
+||	XOR	$s[3],$Te2[1],$s[3]
+||	XOR	$s[0],$Te0[0],$s[0]
+||	XOR	$s[1],$Te0[1],$s[1]
+	SPKERNEL
+||	XOR.L	$s[2],$Te3[2],$s[2]
+||	XOR.L	$s[3],$Te1[3],$s[3]
+;;====================================================================
+	ADD.D	${TEA},A0,${TEA}		; point to Te4
+||	ADD.D	${TEB},A0,${TEB}
+||	EXTU	$s[1],EXT1,24,$Te1[1]
+||	EXTU	$s[0],EXT3,24,$Te3[0]
+	LDBU	*${TEB}[$Te1[1]],$Te1[1]	; Te1[s1>>8],	t0
+||	LDBU	*${TEA}[$Te3[0]],$Te3[0]	; Te3[s0>>24],	t1
+||	XOR	$s[2],$Te0[2],$s[2]		; modulo-scheduled
+||	XOR	$s[3],$Te0[3],$s[3]		; modulo-scheduled
+||	EXTU	$s[0],EXT0,24,$Te0[0]
+||	EXTU	$s[1],EXT0,24,$Te0[1]
+	LDBU	*${TEA}[$Te0[0]],$Te0[0]	; Te0[s0],	t0
+||	LDBU	*${TEB}[$Te0[1]],$Te0[1]	; Te0[s1],	t1
+||	EXTU	$s[3],EXT3,24,$Te3[3]
+||	EXTU	$s[2],EXT1,24,$Te1[2]
+	LDBU	*${TEB}[$Te3[3]],$Te3[3]	; Te3[s3>>24],	t0
+||	LDBU	*${TEA}[$Te1[2]],$Te1[2]	; Te1[s2>>8],	t1
+||	EXTU	$s[2],EXT2,24,$Te2[2]
+||	EXTU	$s[3],EXT2,24,$Te2[3]
+	LDBU	*${TEA}[$Te2[2]],$Te2[2]	; Te2[s2>>16],	t0
+||	LDBU	*${TEB}[$Te2[3]],$Te2[3]	; Te2[s3>>16],	t1
+||	EXTU	$s[1],EXT3,24,$Te3[1]
+||	EXTU	$s[0],EXT1,24,$Te1[0]
+	LDBU	*${TEB}[$Te3[1]],$Te3[1]	; Te3[s1>>24],	t2
+||	LDBU	*${TEA}[$Te1[0]],$Te1[0]	; Te1[s0>>8],	t3
+||	EXTU	$s[3],EXT1,24,$Te1[3]
+||	EXTU	$s[2],EXT3,24,$Te3[2]
+	LDBU	*${TEB}[$Te1[3]],$Te1[3]	; Te1[s3>>8],	t2
+||	LDBU	*${TEA}[$Te3[2]],$Te3[2]	; Te3[s2>>24],	t3
+||	EXTU	$s[2],EXT0,24,$Te0[2]
+||	EXTU	$s[3],EXT0,24,$Te0[3]
+	LDBU	*${TEA}[$Te0[2]],$Te0[2]	; Te0[s2],	t2
+||	LDBU	*${TEB}[$Te0[3]],$Te0[3]	; Te0[s3],	t3
+||	EXTU	$s[0],EXT2,24,$Te2[0]
+||	EXTU	$s[1],EXT2,24,$Te2[1]
+	LDBU	*${TEA}[$Te2[0]],$Te2[0]	; Te2[s0>>16],	t2
+||	LDBU	*${TEB}[$Te2[1]],$Te2[1]	; Te2[s1>>16],	t3
+
+	.if	.BIG_ENDIAN
+	PACK2	$Te0[0],$Te1[1],$Te0[0]
+||	PACK2	$Te0[1],$Te1[2],$Te0[1]
+	PACK2	$Te2[2],$Te3[3],$Te2[2]
+||	PACK2	$Te2[3],$Te3[0],$Te2[3]
+	PACKL4	$Te0[0],$Te2[2],$Te0[0]
+||	PACKL4	$Te0[1],$Te2[3],$Te0[1]
+	XOR	$K[0],$Te0[0],$Te0[0]		; s[0]
+||	XOR	$K[1],$Te0[1],$Te0[1]		; s[1]
+
+	PACK2	$Te0[2],$Te1[3],$Te0[2]
+||	PACK2	$Te0[3],$Te1[0],$Te0[3]
+	PACK2	$Te2[0],$Te3[1],$Te2[0]
+||	PACK2	$Te2[1],$Te3[2],$Te2[1]
+||	BNOP	RA
+	PACKL4	$Te0[2],$Te2[0],$Te0[2]
+||	PACKL4	$Te0[3],$Te2[1],$Te0[3]
+	XOR	$K[2],$Te0[2],$Te0[2]		; s[2]
+||	XOR	$K[3],$Te0[3],$Te0[3]		; s[3]
+
+	MV	$Te0[0],A9
+||	MV	$Te0[1],A8
+	MV	$Te0[2],B9
+||	MV	$Te0[3],B8
+|| [B2]	STNDW	A9:A8,*OUT++
+   [B2]	STNDW	B9:B8,*OUT++
+	.else
+	PACK2	$Te1[1],$Te0[0],$Te1[1]
+||	PACK2	$Te1[2],$Te0[1],$Te1[2]
+	PACK2	$Te3[3],$Te2[2],$Te3[3]
+||	PACK2	$Te3[0],$Te2[3],$Te3[0]
+	PACKL4	$Te3[3],$Te1[1],$Te1[1]
+||	PACKL4	$Te3[0],$Te1[2],$Te1[2]
+	XOR	$K[0],$Te1[1],$Te1[1]		; s[0]
+||	XOR	$K[1],$Te1[2],$Te1[2]		; s[1]
+
+	PACK2	$Te1[3],$Te0[2],$Te1[3]
+||	PACK2	$Te1[0],$Te0[3],$Te1[0]
+	PACK2	$Te3[1],$Te2[0],$Te3[1]
+||	PACK2	$Te3[2],$Te2[1],$Te3[2]
+||	BNOP	RA
+	PACKL4	$Te3[1],$Te1[3],$Te1[3]
+||	PACKL4	$Te3[2],$Te1[0],$Te1[0]
+	XOR	$K[2],$Te1[3],$Te1[3]		; s[2]
+||	XOR	$K[3],$Te1[0],$Te1[0]		; s[3]
+
+	MV	$Te1[1],A8
+||	MV	$Te1[2],A9
+	MV	$Te1[3],B8
+||	MV	$Te1[0],B9
+|| [B2]	STNDW	A9:A8,*OUT++
+   [B2]	STNDW	B9:B8,*OUT++
+	.endif
+	.endasmfunc
+
+	.global	_AES_decrypt
+_AES_decrypt:
+	.asmfunc
+	MVK	1,B2
+__decrypt:
+	.if	__TI_EABI__
+   [B2]	LDNDW	*INP++,A9:A8			; load input
+||	MVKL	\$PCR_OFFSET(AES_Td,__decrypt),$TEA
+||	ADDKPC	__decrypt,B0
+   [B2]	LDNDW	*INP++,B9:B8
+||	MVKH	\$PCR_OFFSET(AES_Td,__decrypt),$TEA
+||	ADD	0,KEY,$KPA
+||	ADD	4,KEY,$KPB
+	.else
+   [B2]	LDNDW	*INP++,A9:A8			; load input
+||	MVKL	(AES_Td-__decrypt),$TEA
+||	ADDKPC	__decrypt,B0
+   [B2]	LDNDW	*INP++,B9:B8
+||	MVKH	(AES_Td-__decrypt),$TEA
+||	ADD	0,KEY,$KPA
+||	ADD	4,KEY,$KPB
+	.endif
+	LDW	*$KPA++[2],$Td0[0]		; zero round key
+||	LDW	*$KPB++[2],$Td0[1]
+||	MVK	60,A0
+||	ADD	B0,$TEA,$TEA			; AES_Td
+	LDW	*KEY[A0],B0			; rounds
+||	MVK	1024,A0				; sizeof(AES_Td)
+	LDW	*$KPA++[2],$Td0[2]
+||	LDW	*$KPB++[2],$Td0[3]
+||	MV	$TEA,$TEB
+	NOP
+	.if	.BIG_ENDIAN
+	MV	A9,$s[0]
+||	MV	A8,$s[1]
+||	MV	B9,$s[2]
+||	MV	B8,$s[3]
+	.else
+	MV	A8,$s[0]
+||	MV	A9,$s[1]
+||	MV	B8,$s[2]
+||	MV	B9,$s[3]
+	.endif
+	XOR	$Td0[0],$s[0],$s[0]
+||	XOR	$Td0[1],$s[1],$s[1]
+||	LDW	*$KPA++[2],$K[0]		; 1st round key
+||	LDW	*$KPB++[2],$K[1]
+	SUB	B0,2,B0
+
+	SPLOOPD	13
+||	MVC	B0,ILC
+||	LDW	*$KPA++[2],$K[2]
+||	LDW	*$KPB++[2],$K[3]
+;;====================================================================
+	EXTU	$s[1],EXT3,24,$Td3[1]
+||	EXTU	$s[0],EXT1,24,$Td1[0]
+	LDW	*${TEB}[$Td3[1]],$Td3[1]	; Td3[s1>>24],	t0
+||	LDW	*${TEA}[$Td1[0]],$Td1[0]	; Td1[s0>>8],	t1
+||	XOR	$s[2],$Td0[2],$s[2]		; modulo-scheduled
+||	XOR	$s[3],$Td0[3],$s[3]		; modulo-scheduled
+||	EXTU	$s[1],EXT1,24,$Td1[1]
+||	EXTU	$s[0],EXT3,24,$Td3[0]
+	LDW	*${TEB}[$Td1[1]],$Td1[1]	; Td1[s1>>8],	t2
+||	LDW	*${TEA}[$Td3[0]],$Td3[0]	; Td3[s0>>24],	t3
+||	EXTU	$s[2],EXT2,24,$Td2[2]
+||	EXTU	$s[3],EXT2,24,$Td2[3]
+	LDW	*${TEA}[$Td2[2]],$Td2[2]	; Td2[s2>>16],	t0
+||	LDW	*${TEB}[$Td2[3]],$Td2[3]	; Td2[s3>>16],	t1
+||	EXTU	$s[3],EXT1,24,$Td1[3]
+||	EXTU	$s[2],EXT3,24,$Td3[2]
+	LDW	*${TEB}[$Td1[3]],$Td1[3]	; Td1[s3>>8],	t0
+||	LDW	*${TEA}[$Td3[2]],$Td3[2]	; Td3[s2>>24],	t1
+||	EXTU	$s[0],EXT2,24,$Td2[0]
+||	EXTU	$s[1],EXT2,24,$Td2[1]
+	LDW	*${TEA}[$Td2[0]],$Td2[0]	; Td2[s0>>16],	t2
+||	LDW	*${TEB}[$Td2[1]],$Td2[1]	; Td2[s1>>16],	t3
+||	EXTU	$s[3],EXT3,24,$Td3[3]
+||	EXTU	$s[2],EXT1,24,$Td1[2]
+	LDW	*${TEB}[$Td3[3]],$Td3[3]	; Td3[s3>>24],	t2
+||	LDW	*${TEA}[$Td1[2]],$Td1[2]	; Td1[s2>>8],	t3
+||	ROTL	$Td3[1],TBL3,$Td1[0]		; t0
+||	ROTL	$Td1[0],TBL1,$Td3[1]		; t1
+||	EXTU	$s[0],EXT0,24,$Td0[0]
+||	EXTU	$s[1],EXT0,24,$Td0[1]
+	LDW	*${TEA}[$Td0[0]],$Td0[0]	; Td0[s0],	t0
+||	LDW	*${TEB}[$Td0[1]],$Td0[1]	; Td0[s1],	t1
+||	ROTL	$Td1[1],TBL1,$Td3[0]		; t2
+||	ROTL	$Td3[0],TBL3,$Td1[1]		; t3
+||	EXTU	$s[2],EXT0,24,$Td0[2]
+||	EXTU	$s[3],EXT0,24,$Td0[3]
+	LDW	*${TEA}[$Td0[2]],$Td0[2]	; Td0[s2],	t2
+||	LDW	*${TEB}[$Td0[3]],$Td0[3]	; Td0[s3],	t3
+||	ROTL	$Td2[2],TBL2,$Td2[2]		; t0
+||	ROTL	$Td2[3],TBL2,$Td2[3]		; t1
+||	XOR	$K[0],$Td1[0],$s[0]
+||	XOR	$K[1],$Td3[1],$s[1]
+	ROTL	$Td1[3],TBL1,$Td3[2]		; t0
+||	ROTL	$Td3[2],TBL3,$Td1[3]		; t1
+||	XOR	$K[2],$Td3[0],$s[2]
+||	XOR	$K[3],$Td1[1],$s[3]
+||	LDW	*$KPA++[2],$K[0]		; next round key
+||	LDW	*$KPB++[2],$K[1]
+	ROTL	$Td2[0],TBL2,$Td2[0]		; t2
+||	ROTL	$Td2[1],TBL2,$Td2[1]		; t3
+||	XOR	$s[0],$Td2[2],$s[0]
+||	XOR	$s[1],$Td2[3],$s[1]
+||	LDW	*$KPA++[2],$K[2]
+||	LDW	*$KPB++[2],$K[3]
+	ROTL	$Td3[3],TBL3,$Td1[2]		; t2
+||	ROTL	$Td1[2],TBL1,$Td3[3]		; t3
+||	XOR	$s[0],$Td3[2],$s[0]
+||	XOR	$s[1],$Td1[3],$s[1]
+	XOR	$s[2],$Td2[0],$s[2]
+||	XOR	$s[3],$Td2[1],$s[3]
+||	XOR	$s[0],$Td0[0],$s[0]
+||	XOR	$s[1],$Td0[1],$s[1]
+	SPKERNEL
+||	XOR.L	$s[2],$Td1[2],$s[2]
+||	XOR.L	$s[3],$Td3[3],$s[3]
+;;====================================================================
+	ADD.D	${TEA},A0,${TEA}		; point to Td4
+||	ADD.D	${TEB},A0,${TEB}
+||	EXTU	$s[1],EXT3,24,$Td3[1]
+||	EXTU	$s[0],EXT1,24,$Td1[0]
+	LDBU	*${TEB}[$Td3[1]],$Td3[1]	; Td3[s1>>24],	t0
+||	LDBU	*${TEA}[$Td1[0]],$Td1[0]	; Td1[s0>>8],	t1
+||	XOR	$s[2],$Td0[2],$s[2]		; modulo-scheduled
+||	XOR	$s[3],$Td0[3],$s[3]		; modulo-scheduled
+||	EXTU	$s[0],EXT0,24,$Td0[0]
+||	EXTU	$s[1],EXT0,24,$Td0[1]
+	LDBU	*${TEA}[$Td0[0]],$Td0[0]	; Td0[s0],	t0
+||	LDBU	*${TEB}[$Td0[1]],$Td0[1]	; Td0[s1],	t1
+||	EXTU	$s[2],EXT2,24,$Td2[2]
+||	EXTU	$s[3],EXT2,24,$Td2[3]
+	LDBU	*${TEA}[$Td2[2]],$Td2[2]	; Td2[s2>>16],	t0
+||	LDBU	*${TEB}[$Td2[3]],$Td2[3]	; Td2[s3>>16],	t1
+||	EXTU	$s[3],EXT1,24,$Td1[3]
+||	EXTU	$s[2],EXT3,24,$Td3[2]
+	LDBU	*${TEB}[$Td1[3]],$Td1[3]	; Td1[s3>>8],	t0
+||	LDBU	*${TEA}[$Td3[2]],$Td3[2]	; Td3[s2>>24],	t1
+||	EXTU	$s[1],EXT1,24,$Td1[1]
+||	EXTU	$s[0],EXT3,24,$Td3[0]
+	LDBU	*${TEB}[$Td1[1]],$Td1[1]	; Td1[s1>>8],	t2
+||	LDBU	*${TEA}[$Td3[0]],$Td3[0]	; Td3[s0>>24],	t3
+||	EXTU	$s[0],EXT2,24,$Td2[0]
+||	EXTU	$s[1],EXT2,24,$Td2[1]
+	LDBU	*${TEA}[$Td2[0]],$Td2[0]	; Td2[s0>>16],	t2
+||	LDBU	*${TEB}[$Td2[1]],$Td2[1]	; Td2[s1>>16],	t3
+||	EXTU	$s[3],EXT3,24,$Td3[3]
+||	EXTU	$s[2],EXT1,24,$Td1[2]
+	LDBU	*${TEB}[$Td3[3]],$Td3[3]	; Td3[s3>>24],	t2
+||	LDBU	*${TEA}[$Td1[2]],$Td1[2]	; Td1[s2>>8],	t3
+||	EXTU	$s[2],EXT0,24,$Td0[2]
+||	EXTU	$s[3],EXT0,24,$Td0[3]
+	LDBU	*${TEA}[$Td0[2]],$Td0[2]	; Td0[s2],	t2
+||	LDBU	*${TEB}[$Td0[3]],$Td0[3]	; Td0[s3],	t3
+
+	.if	.BIG_ENDIAN
+	PACK2	$Td0[0],$Td1[3],$Td0[0]
+||	PACK2	$Td0[1],$Td1[0],$Td0[1]
+	PACK2	$Td2[2],$Td3[1],$Td2[2]
+||	PACK2	$Td2[3],$Td3[2],$Td2[3]
+	PACKL4	$Td0[0],$Td2[2],$Td0[0]
+||	PACKL4	$Td0[1],$Td2[3],$Td0[1]
+	XOR	$K[0],$Td0[0],$Td0[0]		; s[0]
+||	XOR	$K[1],$Td0[1],$Td0[1]		; s[1]
+
+	PACK2	$Td0[2],$Td1[1],$Td0[2]
+||	PACK2	$Td0[3],$Td1[2],$Td0[3]
+	PACK2	$Td2[0],$Td3[3],$Td2[0]
+||	PACK2	$Td2[1],$Td3[0],$Td2[1]
+||	BNOP	RA
+	PACKL4	$Td0[2],$Td2[0],$Td0[2]
+||	PACKL4	$Td0[3],$Td2[1],$Td0[3]
+	XOR	$K[2],$Td0[2],$Td0[2]		; s[2]
+||	XOR	$K[3],$Td0[3],$Td0[3]		; s[3]
+
+	MV	$Td0[0],A9
+||	MV	$Td0[1],A8
+	MV	$Td0[2],B9
+||	MV	$Td0[3],B8
+|| [B2]	STNDW	A9:A8,*OUT++
+   [B2]	STNDW	B9:B8,*OUT++
+	.else
+	PACK2	$Td1[3],$Td0[0],$Td1[3]
+||	PACK2	$Td1[0],$Td0[1],$Td1[0]
+	PACK2	$Td3[1],$Td2[2],$Td3[1]
+||	PACK2	$Td3[2],$Td2[3],$Td3[2]
+	PACKL4	$Td3[1],$Td1[3],$Td1[3]
+||	PACKL4	$Td3[2],$Td1[0],$Td1[0]
+	XOR	$K[0],$Td1[3],$Td1[3]		; s[0]
+||	XOR	$K[1],$Td1[0],$Td1[0]		; s[1]
+
+	PACK2	$Td1[1],$Td0[2],$Td1[1]
+||	PACK2	$Td1[2],$Td0[3],$Td1[2]
+	PACK2	$Td3[3],$Td2[0],$Td3[3]
+||	PACK2	$Td3[0],$Td2[1],$Td3[0]
+||	BNOP	RA
+	PACKL4	$Td3[3],$Td1[1],$Td1[1]
+||	PACKL4	$Td3[0],$Td1[2],$Td1[2]
+	XOR	$K[2],$Td1[1],$Td1[1]		; s[2]
+||	XOR	$K[3],$Td1[2],$Td1[2]		; s[3]
+
+	MV	$Td1[3],A8
+||	MV	$Td1[0],A9
+	MV	$Td1[1],B8
+||	MV	$Td1[2],B9
+|| [B2]	STNDW	A9:A8,*OUT++
+   [B2]	STNDW	B9:B8,*OUT++
+	.endif
+	.endasmfunc
+___
+{
+my @K=(@K,@s);			# extended key
+my @Te4=map("B$_",(16..19));
+
+my @Kx9=@Te0;			# used in AES_set_decrypt_key
+my @KxB=@Te1;
+my @KxD=@Te2;
+my @KxE=@Te3;
+
+$code.=<<___;
+	.asg	OUT,BITS
+
+	.global	_AES_set_encrypt_key
+_AES_set_encrypt_key:
+__set_encrypt_key:
+	.asmfunc
+	MV	INP,A0
+||	SHRU	BITS,5,BITS			; 128-192-256 -> 4-6-8
+||	MV	KEY,A1
+  [!A0]	B	RA
+||[!A0]	MVK	-1,RET
+||[!A0]	MVK	1,A1				; only one B RA
+  [!A1]	B	RA
+||[!A1]	MVK	-1,RET
+||[!A1]	MVK	0,A0
+||	MVK	0,B0
+||	MVK	0,A1
+   [A0]	LDNDW	*INP++,A9:A8
+|| [A0]	CMPEQ	4,BITS,B0
+|| [A0]	CMPLT	3,BITS,A1
+   [B0]	B	key128?
+|| [A1]	LDNDW	*INP++,B9:B8
+|| [A0]	CMPEQ	6,BITS,B0
+|| [A0]	CMPLT	5,BITS,A1
+   [B0]	B	key192?
+|| [A1]	LDNDW	*INP++,B17:B16
+|| [A0]	CMPEQ	8,BITS,B0
+|| [A0]	CMPLT	7,BITS,A1
+   [B0]	B	key256?
+|| [A1]	LDNDW	*INP++,B19:B18
+
+	.if	__TI_EABI__
+   [A0]	ADD	0,KEY,$KPA
+|| [A0]	ADD	4,KEY,$KPB
+|| [A0]	MVKL	\$PCR_OFFSET(AES_Te4,__set_encrypt_key),$TEA
+|| [A0]	ADDKPC	__set_encrypt_key,B6
+   [A0]	MVKH	\$PCR_OFFSET(AES_Te4,__set_encrypt_key),$TEA
+   [A0]	ADD	B6,$TEA,$TEA			; AES_Te4
+	.else
+   [A0]	ADD	0,KEY,$KPA
+|| [A0]	ADD	4,KEY,$KPB
+|| [A0]	MVKL	(AES_Te4-__set_encrypt_key),$TEA
+|| [A0]	ADDKPC	__set_encrypt_key,B6
+   [A0]	MVKH	(AES_Te4-__set_encrypt_key),$TEA
+   [A0]	ADD	B6,$TEA,$TEA			; AES_Te4
+	.endif
+	NOP
+	NOP
+
+	BNOP	RA,5
+||	MVK	-2,RET				; unknown bit length
+||	MVK	0,B0				; redundant
+;;====================================================================
+;;====================================================================
+key128?:
+	.if	.BIG_ENDIAN
+	MV	A9,$K[0]
+||	MV	A8,$K[1]
+||	MV	B9,$Te4[2]
+||	MV	B8,$K[3]
+	.else
+	MV	A8,$K[0]
+||	MV	A9,$K[1]
+||	MV	B8,$Te4[2]
+||	MV	B9,$K[3]
+	.endif
+
+	MVK	256,A0
+||	MVK	9,B0
+
+	SPLOOPD	14
+||	MVC	B0,ILC
+||	MV	$TEA,$TEB
+||	ADD	$TEA,A0,A30			; rcon
+;;====================================================================
+	LDW	*A30++[1],A31			; rcon[i]
+||	MV	$Te4[2],$K[2]
+||	EXTU	$K[3],EXT1,24,$Te4[0]
+	LDBU	*${TEB}[$Te4[0]],$Te4[0]
+||	MV	$K[3],A0
+||	EXTU	$K[3],EXT2,24,$Te4[1]
+	LDBU	*${TEB}[$Te4[1]],$Te4[1]
+||	EXTU	A0,EXT3,24,A0
+||	EXTU	$K[3],EXT0,24,$Te4[3]
+	.if	.BIG_ENDIAN
+	LDBU	*${TEA}[A0],$Te4[3]
+||	LDBU	*${TEB}[$Te4[3]],A0
+	.else
+	LDBU	*${TEA}[A0],A0
+||	LDBU	*${TEB}[$Te4[3]],$Te4[3]
+	.endif
+
+	STW	$K[0],*$KPA++[2]
+||	STW	$K[1],*$KPB++[2]
+	STW	$K[2],*$KPA++[2]
+||	STW	$K[3],*$KPB++[2]
+
+	XOR	A31,$K[0],$K[0]			; ^=rcon[i]
+	.if	.BIG_ENDIAN
+	PACK2	$Te4[0],$Te4[1],$Te4[1]
+	PACK2	$Te4[3],A0,$Te4[3]
+	PACKL4	$Te4[1],$Te4[3],$Te4[3]
+	.else
+	PACK2	$Te4[1],$Te4[0],$Te4[1]
+	PACK2	$Te4[3],A0,$Te4[3]
+	PACKL4	$Te4[3],$Te4[1],$Te4[3]
+	.endif
+	XOR	$Te4[3],$K[0],$Te4[0]		; K[0]
+	XOR	$Te4[0],$K[1],$K[1]		; K[1]
+	MV	$Te4[0],$K[0]
+||	XOR	$K[1],$K[2],$Te4[2]		; K[2]
+	XOR	$Te4[2],$K[3],$K[3]		; K[3]
+	SPKERNEL
+;;====================================================================
+	BNOP	RA
+	MV	$Te4[2],$K[2]
+||	STW	$K[0],*$KPA++[2]
+||	STW	$K[1],*$KPB++[2]
+	STW	$K[2],*$KPA++[2]
+||	STW	$K[3],*$KPB++[2]
+	MVK	10,B0				; rounds
+	STW	B0,*++${KPB}[15]
+	MVK	0,RET
+;;====================================================================
+;;====================================================================
+key192?:
+	.if	.BIG_ENDIAN
+	MV	A9,$K[0]
+||	MV	A8,$K[1]
+||	MV	B9,$K[2]
+||	MV	B8,$K[3]
+	MV	B17,$Te4[2]
+||	MV	B16,$K[5]
+	.else
+	MV	A8,$K[0]
+||	MV	A9,$K[1]
+||	MV	B8,$K[2]
+||	MV	B9,$K[3]
+	MV	B16,$Te4[2]
+||	MV	B17,$K[5]
+	.endif
+
+	MVK	256,A0
+||	MVK	6,B0
+	MV	$TEA,$TEB
+||	ADD	$TEA,A0,A30			; rcon
+;;====================================================================
+loop192?:
+	LDW	*A30++[1],A31			; rcon[i]
+||	MV	$Te4[2],$K[4]
+||	EXTU	$K[5],EXT1,24,$Te4[0]
+	LDBU	*${TEB}[$Te4[0]],$Te4[0]
+||	MV	$K[5],A0
+||	EXTU	$K[5],EXT2,24,$Te4[1]
+	LDBU	*${TEB}[$Te4[1]],$Te4[1]
+||	EXTU	A0,EXT3,24,A0
+||	EXTU	$K[5],EXT0,24,$Te4[3]
+	.if	.BIG_ENDIAN
+	LDBU	*${TEA}[A0],$Te4[3]
+||	LDBU	*${TEB}[$Te4[3]],A0
+	.else
+	LDBU	*${TEA}[A0],A0
+||	LDBU	*${TEB}[$Te4[3]],$Te4[3]
+	.endif
+
+	STW	$K[0],*$KPA++[2]
+||	STW	$K[1],*$KPB++[2]
+	STW	$K[2],*$KPA++[2]
+||	STW	$K[3],*$KPB++[2]
+	STW	$K[4],*$KPA++[2]
+||	STW	$K[5],*$KPB++[2]
+
+	XOR	A31,$K[0],$K[0]			; ^=rcon[i]
+	.if	.BIG_ENDIAN
+	PACK2	$Te4[0],$Te4[1],$Te4[1]
+||	PACK2	$Te4[3],A0,$Te4[3]
+	PACKL4	$Te4[1],$Te4[3],$Te4[3]
+	.else
+	PACK2	$Te4[1],$Te4[0],$Te4[1]
+||	PACK2	$Te4[3],A0,$Te4[3]
+	PACKL4	$Te4[3],$Te4[1],$Te4[3]
+	.endif
+	BDEC	loop192?,B0
+||	XOR	$Te4[3],$K[0],$Te4[0]		; K[0]
+	XOR	$Te4[0],$K[1],$K[1]		; K[1]
+	MV	$Te4[0],$K[0]
+||	XOR	$K[1],$K[2],$Te4[2]		; K[2]
+	XOR	$Te4[2],$K[3],$K[3]		; K[3]
+	MV	$Te4[2],$K[2]
+||	XOR	$K[3],$K[4],$Te4[2]		; K[4]
+	XOR	$Te4[2],$K[5],$K[5]		; K[5]
+;;====================================================================
+	BNOP	RA
+	STW	$K[0],*$KPA++[2]
+||	STW	$K[1],*$KPB++[2]
+	STW	$K[2],*$KPA++[2]
+||	STW	$K[3],*$KPB++[2]
+	MVK	12,B0				; rounds
+	STW	B0,*++${KPB}[7]
+	MVK	0,RET
+;;====================================================================
+;;====================================================================
+key256?:
+	.if	.BIG_ENDIAN
+	MV	A9,$K[0]
+||	MV	A8,$K[1]
+||	MV	B9,$K[2]
+||	MV	B8,$K[3]
+	MV	B17,$K[4]
+||	MV	B16,$K[5]
+||	MV	B19,$Te4[2]
+||	MV	B18,$K[7]
+	.else
+	MV	A8,$K[0]
+||	MV	A9,$K[1]
+||	MV	B8,$K[2]
+||	MV	B9,$K[3]
+	MV	B16,$K[4]
+||	MV	B17,$K[5]
+||	MV	B18,$Te4[2]
+||	MV	B19,$K[7]
+	.endif
+
+	MVK	256,A0
+||	MVK	6,B0
+	MV	$TEA,$TEB
+||	ADD	$TEA,A0,A30			; rcon
+;;====================================================================
+loop256?:
+	LDW	*A30++[1],A31			; rcon[i]
+||	MV	$Te4[2],$K[6]
+||	EXTU	$K[7],EXT1,24,$Te4[0]
+	LDBU	*${TEB}[$Te4[0]],$Te4[0]
+||	MV	$K[7],A0
+||	EXTU	$K[7],EXT2,24,$Te4[1]
+	LDBU	*${TEB}[$Te4[1]],$Te4[1]
+||	EXTU	A0,EXT3,24,A0
+||	EXTU	$K[7],EXT0,24,$Te4[3]
+	.if	.BIG_ENDIAN
+	LDBU	*${TEA}[A0],$Te4[3]
+||	LDBU	*${TEB}[$Te4[3]],A0
+	.else
+	LDBU	*${TEA}[A0],A0
+||	LDBU	*${TEB}[$Te4[3]],$Te4[3]
+	.endif
+
+	STW	$K[0],*$KPA++[2]
+||	STW	$K[1],*$KPB++[2]
+	STW	$K[2],*$KPA++[2]
+||	STW	$K[3],*$KPB++[2]
+	STW	$K[4],*$KPA++[2]
+||	STW	$K[5],*$KPB++[2]
+	STW	$K[6],*$KPA++[2]
+||	STW	$K[7],*$KPB++[2]
+||	XOR	A31,$K[0],$K[0]			; ^=rcon[i]
+	.if	.BIG_ENDIAN
+	PACK2	$Te4[0],$Te4[1],$Te4[1]
+||	PACK2	$Te4[3],A0,$Te4[3]
+	PACKL4	$Te4[1],$Te4[3],$Te4[3]
+||[!B0]	B	done256?
+	.else
+	PACK2	$Te4[1],$Te4[0],$Te4[1]
+||	PACK2	$Te4[3],A0,$Te4[3]
+	PACKL4	$Te4[3],$Te4[1],$Te4[3]
+||[!B0]	B	done256?
+	.endif
+	XOR	$Te4[3],$K[0],$Te4[0]		; K[0]
+	XOR	$Te4[0],$K[1],$K[1]		; K[1]
+	MV	$Te4[0],$K[0]
+||	XOR	$K[1],$K[2],$Te4[2]		; K[2]
+	XOR	$Te4[2],$K[3],$K[3]		; K[3]
+
+	MV	$Te4[2],$K[2]
+|| [B0]	EXTU	$K[3],EXT0,24,$Te4[0]
+|| [B0]	SUB	B0,1,B0
+	LDBU	*${TEB}[$Te4[0]],$Te4[0]
+||	MV	$K[3],A0
+||	EXTU	$K[3],EXT1,24,$Te4[1]
+	LDBU	*${TEB}[$Te4[1]],$Te4[1]
+||	EXTU	A0,EXT2,24,A0
+||	EXTU	$K[3],EXT3,24,$Te4[3]
+
+	.if	.BIG_ENDIAN
+	LDBU	*${TEA}[A0],$Te4[3]
+||	LDBU	*${TEB}[$Te4[3]],A0
+	NOP	3
+	PACK2	$Te4[0],$Te4[1],$Te4[1]
+	PACK2	$Te4[3],A0,$Te4[3]
+||	B	loop256?
+	PACKL4	$Te4[1],$Te4[3],$Te4[3]
+	.else
+	LDBU	*${TEA}[A0],A0
+||	LDBU	*${TEB}[$Te4[3]],$Te4[3]
+	NOP	3
+	PACK2	$Te4[1],$Te4[0],$Te4[1]
+	PACK2	$Te4[3],A0,$Te4[3]
+||	B	loop256?
+	PACKL4	$Te4[3],$Te4[1],$Te4[3]
+	.endif
+
+	XOR	$Te4[3],$K[4],$Te4[0]		; K[4]
+	XOR	$Te4[0],$K[5],$K[5]		; K[5]
+	MV	$Te4[0],$K[4]
+||	XOR	$K[5],$K[6],$Te4[2]		; K[6]
+	XOR	$Te4[2],$K[7],$K[7]		; K[7]
+;;====================================================================
+done256?:
+	BNOP	RA
+	STW	$K[0],*$KPA++[2]
+||	STW	$K[1],*$KPB++[2]
+	STW	$K[2],*$KPA++[2]
+||	STW	$K[3],*$KPB++[2]
+	MVK	14,B0				; rounds
+	STW	B0,*--${KPB}[1]
+	MVK	0,RET
+	.endasmfunc
+
+	.global	_AES_set_decrypt_key
+_AES_set_decrypt_key:
+	.asmfunc
+	B	__set_encrypt_key		; guarantee local call
+	MV	KEY,B30				; B30 is not modified
+	MV	RA, B31				; B31 is not modified
+	ADDKPC	ret?,RA,2
+ret?:						; B0 holds rounds or zero
+  [!B0]	BNOP	B31				; return if zero
+   [B0]	SHL	B0,4,A0				; offset to last round key
+   [B0]	SHRU	B0,1,B1
+   [B0]	SUB	B1,1,B1
+   [B0]	MVK	0x0000001B,B3			; AES polynomial
+   [B0]	MVKH	0x07000000,B3
+
+	SPLOOPD	9				; flip round keys
+||	MVC	B1,ILC
+||	MV	B30,$KPA
+||	ADD	B30,A0,$KPB
+||	MVK	16,A0				; sizeof(round key)
+;;====================================================================
+	LDW	*${KPA}[0],A16
+||	LDW	*${KPB}[0],B16
+	LDW	*${KPA}[1],A17
+||	LDW	*${KPB}[1],B17
+	LDW	*${KPA}[2],A18
+||	LDW	*${KPB}[2],B18
+	LDW	*${KPA}[3],A19
+||	ADD	$KPA,A0,$KPA
+||	LDW	*${KPB}[3],B19
+||	SUB	$KPB,A0,$KPB
+	NOP
+	STW	B16,*${KPA}[-4]
+||	STW	A16,*${KPB}[4]
+	STW	B17,*${KPA}[-3]
+||	STW	A17,*${KPB}[5]
+	STW	B18,*${KPA}[-2]
+||	STW	A18,*${KPB}[6]
+	STW	B19,*${KPA}[-1]
+||	STW	A19,*${KPB}[7]
+	SPKERNEL
+;;====================================================================
+	SUB	B0,1,B0				; skip last round
+||	ADD	B30,A0,$KPA			; skip first round
+||	ADD	B30,A0,$KPB
+||	MVC	GFPGFR,B30			; save GFPGFR
+	LDW	*${KPA}[0],$K[0]
+||	LDW	*${KPB}[1],$K[1]
+||	MVC	B3,GFPGFR
+	LDW	*${KPA}[2],$K[2]
+||	LDW	*${KPB}[3],$K[3]
+	MVK	0x00000909,A24
+||	MVK	0x00000B0B,B24
+	MVKH	0x09090000,A24
+||	MVKH	0x0B0B0000,B24
+	MVC	B0,ILC
+||	SUB	B0,1,B0
+
+	GMPY4	$K[0],A24,$Kx9[0]		; ·0x09
+||	GMPY4	$K[1],A24,$Kx9[1]
+||	MVK	0x00000D0D,A25
+||	MVK	0x00000E0E,B25
+	GMPY4	$K[2],A24,$Kx9[2]
+||	GMPY4	$K[3],A24,$Kx9[3]
+||	MVKH	0x0D0D0000,A25
+||	MVKH	0x0E0E0000,B25
+
+	GMPY4	$K[0],B24,$KxB[0]		; ·0x0B
+||	GMPY4	$K[1],B24,$KxB[1]
+	GMPY4	$K[2],B24,$KxB[2]
+||	GMPY4	$K[3],B24,$KxB[3]
+
+	SPLOOP	11				; InvMixColumns
+;;====================================================================
+	GMPY4	$K[0],A25,$KxD[0]		; ·0x0D
+||	GMPY4	$K[1],A25,$KxD[1]
+||	SWAP2	$Kx9[0],$Kx9[0]			; rotate by 16
+||	SWAP2	$Kx9[1],$Kx9[1]
+||	MV	$K[0],$s[0]			; this or DINT
+||	MV	$K[1],$s[1]
+|| [B0]	LDW	*${KPA}[4],$K[0]
+|| [B0]	LDW	*${KPB}[5],$K[1]
+	GMPY4	$K[2],A25,$KxD[2]
+||	GMPY4	$K[3],A25,$KxD[3]
+||	SWAP2	$Kx9[2],$Kx9[2]
+||	SWAP2	$Kx9[3],$Kx9[3]
+||	MV	$K[2],$s[2]
+||	MV	$K[3],$s[3]
+|| [B0]	LDW	*${KPA}[6],$K[2]
+|| [B0]	LDW	*${KPB}[7],$K[3]
+
+	GMPY4	$s[0],B25,$KxE[0]		; ·0x0E
+||	GMPY4	$s[1],B25,$KxE[1]
+||	XOR	$Kx9[0],$KxB[0],$KxB[0]
+||	XOR	$Kx9[1],$KxB[1],$KxB[1]
+	GMPY4	$s[2],B25,$KxE[2]
+||	GMPY4	$s[3],B25,$KxE[3]
+||	XOR	$Kx9[2],$KxB[2],$KxB[2]
+||	XOR	$Kx9[3],$KxB[3],$KxB[3]
+
+	ROTL	$KxB[0],TBL3,$KxB[0]
+||	ROTL	$KxB[1],TBL3,$KxB[1]
+||	SWAP2	$KxD[0],$KxD[0]			; rotate by 16
+||	SWAP2	$KxD[1],$KxD[1]
+	ROTL	$KxB[2],TBL3,$KxB[2]
+||	ROTL	$KxB[3],TBL3,$KxB[3]
+||	SWAP2	$KxD[2],$KxD[2]
+||	SWAP2	$KxD[3],$KxD[3]
+
+	XOR	$KxE[0],$KxD[0],$KxE[0]
+||	XOR	$KxE[1],$KxD[1],$KxE[1]
+|| [B0]	GMPY4	$K[0],A24,$Kx9[0]		; ·0x09
+|| [B0]	GMPY4	$K[1],A24,$Kx9[1]
+||	ADDAW	$KPA,4,$KPA
+	XOR	$KxE[2],$KxD[2],$KxE[2]
+||	XOR	$KxE[3],$KxD[3],$KxE[3]
+|| [B0]	GMPY4	$K[2],A24,$Kx9[2]
+|| [B0]	GMPY4	$K[3],A24,$Kx9[3]
+||	ADDAW	$KPB,4,$KPB
+
+	XOR	$KxB[0],$KxE[0],$KxE[0]
+||	XOR	$KxB[1],$KxE[1],$KxE[1]
+|| [B0]	GMPY4	$K[0],B24,$KxB[0]		; ·0x0B
+|| [B0]	GMPY4	$K[1],B24,$KxB[1]
+	XOR	$KxB[2],$KxE[2],$KxE[2]
+||	XOR	$KxB[3],$KxE[3],$KxE[3]
+|| [B0]	GMPY4	$K[2],B24,$KxB[2]
+|| [B0]	GMPY4	$K[3],B24,$KxB[3]
+||	STW	$KxE[0],*${KPA}[-4]
+||	STW	$KxE[1],*${KPB}[-3]
+	STW	$KxE[2],*${KPA}[-2]
+||	STW	$KxE[3],*${KPB}[-1]
+|| [B0]	SUB	B0,1,B0
+	SPKERNEL
+;;====================================================================
+	BNOP	B31,3
+	MVC	B30,GFPGFR			; restore GFPGFR(*)
+	MVK	0,RET
+	.endasmfunc
+___
+# (*)	Even though ABI doesn't specify GFPGFR as non-volatile, there
+#	are code samples out there that *assume* its default value.
+}
+{
+my ($inp,$out,$blocks,$key,$ivp)=("A4","B4","A6","B6","A8");
+$code.=<<___;
+	.global	_AES_ctr32_encrypt
+_AES_ctr32_encrypt:
+	.asmfunc
+	LDNDW	*${ivp}[0],A31:A30	; load counter value
+||	MV	$blocks,A2		; reassign $blocks
+||	DMV	RA,$key,B27:B26		; reassign RA and $key
+	LDNDW	*${ivp}[1],B31:B30
+||	MVK	0,B2			; don't let __encrypt load input
+||	MVK	0,A1			; and postpone writing output
+	.if	.BIG_ENDIAN
+	NOP
+	.else
+	NOP	4
+	SWAP2	B31,B31			; keep least significant 32 bits
+	SWAP4	B31,B31			; in host byte order
+	.endif
+ctr32_loop?:
+   [A2]	BNOP	__encrypt
+|| [A1]	XOR	A29,A9,A9		; input^Ek(counter)
+|| [A1]	XOR	A28,A8,A8
+|| [A2]	LDNDW	*INP++,A29:A28		; load input
+  [!A2]	BNOP	B27			; return
+|| [A1]	XOR	B29,B9,B9
+|| [A1]	XOR	B28,B8,B8
+|| [A2]	LDNDW	*INP++,B29:B28
+	.if	.BIG_ENDIAN
+   [A1]	STNDW	A9:A8,*OUT++		; save output
+|| [A2]	DMV	A31,A30,A9:A8		; pass counter value to __encrypt
+   [A1]	STNDW	B9:B8,*OUT++
+|| [A2]	DMV	B31,B30,B9:B8
+|| [A2]	ADD	B30,1,B30		; counter++
+	.else
+   [A1]	STNDW	A9:A8,*OUT++		; save output
+|| [A2]	DMV	A31,A30,A9:A8
+|| [A2]	SWAP2	B31,B0
+|| [A2]	ADD	B31,1,B31		; counter++
+   [A1]	STNDW	B9:B8,*OUT++
+|| [A2]	MV	B30,B8
+|| [A2]	SWAP4	B0,B9
+	.endif
+   [A2]	ADDKPC	ctr32_loop?,RA		; return to ctr32_loop?
+|| [A2]	MV	B26,KEY			; pass $key
+|| [A2]	SUB	A2,1,A2			; $blocks--
+||[!A1]	MVK	1,A1
+	NOP
+	NOP
+	.endasmfunc
+___
+}
+# Tables are kept in endian-neutral manner
+$code.=<<___;
+	.if	__TI_EABI__
+	.sect	".text:aes_asm.const"
+	.else
+	.sect	".const:aes_asm"
+	.endif
+	.align	128
+AES_Te:
+	.byte	0xc6,0x63,0x63,0xa5,	0xf8,0x7c,0x7c,0x84
+	.byte	0xee,0x77,0x77,0x99,	0xf6,0x7b,0x7b,0x8d
+	.byte	0xff,0xf2,0xf2,0x0d,	0xd6,0x6b,0x6b,0xbd
+	.byte	0xde,0x6f,0x6f,0xb1,	0x91,0xc5,0xc5,0x54
+	.byte	0x60,0x30,0x30,0x50,	0x02,0x01,0x01,0x03
+	.byte	0xce,0x67,0x67,0xa9,	0x56,0x2b,0x2b,0x7d
+	.byte	0xe7,0xfe,0xfe,0x19,	0xb5,0xd7,0xd7,0x62
+	.byte	0x4d,0xab,0xab,0xe6,	0xec,0x76,0x76,0x9a
+	.byte	0x8f,0xca,0xca,0x45,	0x1f,0x82,0x82,0x9d
+	.byte	0x89,0xc9,0xc9,0x40,	0xfa,0x7d,0x7d,0x87
+	.byte	0xef,0xfa,0xfa,0x15,	0xb2,0x59,0x59,0xeb
+	.byte	0x8e,0x47,0x47,0xc9,	0xfb,0xf0,0xf0,0x0b
+	.byte	0x41,0xad,0xad,0xec,	0xb3,0xd4,0xd4,0x67
+	.byte	0x5f,0xa2,0xa2,0xfd,	0x45,0xaf,0xaf,0xea
+	.byte	0x23,0x9c,0x9c,0xbf,	0x53,0xa4,0xa4,0xf7
+	.byte	0xe4,0x72,0x72,0x96,	0x9b,0xc0,0xc0,0x5b
+	.byte	0x75,0xb7,0xb7,0xc2,	0xe1,0xfd,0xfd,0x1c
+	.byte	0x3d,0x93,0x93,0xae,	0x4c,0x26,0x26,0x6a
+	.byte	0x6c,0x36,0x36,0x5a,	0x7e,0x3f,0x3f,0x41
+	.byte	0xf5,0xf7,0xf7,0x02,	0x83,0xcc,0xcc,0x4f
+	.byte	0x68,0x34,0x34,0x5c,	0x51,0xa5,0xa5,0xf4
+	.byte	0xd1,0xe5,0xe5,0x34,	0xf9,0xf1,0xf1,0x08
+	.byte	0xe2,0x71,0x71,0x93,	0xab,0xd8,0xd8,0x73
+	.byte	0x62,0x31,0x31,0x53,	0x2a,0x15,0x15,0x3f
+	.byte	0x08,0x04,0x04,0x0c,	0x95,0xc7,0xc7,0x52
+	.byte	0x46,0x23,0x23,0x65,	0x9d,0xc3,0xc3,0x5e
+	.byte	0x30,0x18,0x18,0x28,	0x37,0x96,0x96,0xa1
+	.byte	0x0a,0x05,0x05,0x0f,	0x2f,0x9a,0x9a,0xb5
+	.byte	0x0e,0x07,0x07,0x09,	0x24,0x12,0x12,0x36
+	.byte	0x1b,0x80,0x80,0x9b,	0xdf,0xe2,0xe2,0x3d
+	.byte	0xcd,0xeb,0xeb,0x26,	0x4e,0x27,0x27,0x69
+	.byte	0x7f,0xb2,0xb2,0xcd,	0xea,0x75,0x75,0x9f
+	.byte	0x12,0x09,0x09,0x1b,	0x1d,0x83,0x83,0x9e
+	.byte	0x58,0x2c,0x2c,0x74,	0x34,0x1a,0x1a,0x2e
+	.byte	0x36,0x1b,0x1b,0x2d,	0xdc,0x6e,0x6e,0xb2
+	.byte	0xb4,0x5a,0x5a,0xee,	0x5b,0xa0,0xa0,0xfb
+	.byte	0xa4,0x52,0x52,0xf6,	0x76,0x3b,0x3b,0x4d
+	.byte	0xb7,0xd6,0xd6,0x61,	0x7d,0xb3,0xb3,0xce
+	.byte	0x52,0x29,0x29,0x7b,	0xdd,0xe3,0xe3,0x3e
+	.byte	0x5e,0x2f,0x2f,0x71,	0x13,0x84,0x84,0x97
+	.byte	0xa6,0x53,0x53,0xf5,	0xb9,0xd1,0xd1,0x68
+	.byte	0x00,0x00,0x00,0x00,	0xc1,0xed,0xed,0x2c
+	.byte	0x40,0x20,0x20,0x60,	0xe3,0xfc,0xfc,0x1f
+	.byte	0x79,0xb1,0xb1,0xc8,	0xb6,0x5b,0x5b,0xed
+	.byte	0xd4,0x6a,0x6a,0xbe,	0x8d,0xcb,0xcb,0x46
+	.byte	0x67,0xbe,0xbe,0xd9,	0x72,0x39,0x39,0x4b
+	.byte	0x94,0x4a,0x4a,0xde,	0x98,0x4c,0x4c,0xd4
+	.byte	0xb0,0x58,0x58,0xe8,	0x85,0xcf,0xcf,0x4a
+	.byte	0xbb,0xd0,0xd0,0x6b,	0xc5,0xef,0xef,0x2a
+	.byte	0x4f,0xaa,0xaa,0xe5,	0xed,0xfb,0xfb,0x16
+	.byte	0x86,0x43,0x43,0xc5,	0x9a,0x4d,0x4d,0xd7
+	.byte	0x66,0x33,0x33,0x55,	0x11,0x85,0x85,0x94
+	.byte	0x8a,0x45,0x45,0xcf,	0xe9,0xf9,0xf9,0x10
+	.byte	0x04,0x02,0x02,0x06,	0xfe,0x7f,0x7f,0x81
+	.byte	0xa0,0x50,0x50,0xf0,	0x78,0x3c,0x3c,0x44
+	.byte	0x25,0x9f,0x9f,0xba,	0x4b,0xa8,0xa8,0xe3
+	.byte	0xa2,0x51,0x51,0xf3,	0x5d,0xa3,0xa3,0xfe
+	.byte	0x80,0x40,0x40,0xc0,	0x05,0x8f,0x8f,0x8a
+	.byte	0x3f,0x92,0x92,0xad,	0x21,0x9d,0x9d,0xbc
+	.byte	0x70,0x38,0x38,0x48,	0xf1,0xf5,0xf5,0x04
+	.byte	0x63,0xbc,0xbc,0xdf,	0x77,0xb6,0xb6,0xc1
+	.byte	0xaf,0xda,0xda,0x75,	0x42,0x21,0x21,0x63
+	.byte	0x20,0x10,0x10,0x30,	0xe5,0xff,0xff,0x1a
+	.byte	0xfd,0xf3,0xf3,0x0e,	0xbf,0xd2,0xd2,0x6d
+	.byte	0x81,0xcd,0xcd,0x4c,	0x18,0x0c,0x0c,0x14
+	.byte	0x26,0x13,0x13,0x35,	0xc3,0xec,0xec,0x2f
+	.byte	0xbe,0x5f,0x5f,0xe1,	0x35,0x97,0x97,0xa2
+	.byte	0x88,0x44,0x44,0xcc,	0x2e,0x17,0x17,0x39
+	.byte	0x93,0xc4,0xc4,0x57,	0x55,0xa7,0xa7,0xf2
+	.byte	0xfc,0x7e,0x7e,0x82,	0x7a,0x3d,0x3d,0x47
+	.byte	0xc8,0x64,0x64,0xac,	0xba,0x5d,0x5d,0xe7
+	.byte	0x32,0x19,0x19,0x2b,	0xe6,0x73,0x73,0x95
+	.byte	0xc0,0x60,0x60,0xa0,	0x19,0x81,0x81,0x98
+	.byte	0x9e,0x4f,0x4f,0xd1,	0xa3,0xdc,0xdc,0x7f
+	.byte	0x44,0x22,0x22,0x66,	0x54,0x2a,0x2a,0x7e
+	.byte	0x3b,0x90,0x90,0xab,	0x0b,0x88,0x88,0x83
+	.byte	0x8c,0x46,0x46,0xca,	0xc7,0xee,0xee,0x29
+	.byte	0x6b,0xb8,0xb8,0xd3,	0x28,0x14,0x14,0x3c
+	.byte	0xa7,0xde,0xde,0x79,	0xbc,0x5e,0x5e,0xe2
+	.byte	0x16,0x0b,0x0b,0x1d,	0xad,0xdb,0xdb,0x76
+	.byte	0xdb,0xe0,0xe0,0x3b,	0x64,0x32,0x32,0x56
+	.byte	0x74,0x3a,0x3a,0x4e,	0x14,0x0a,0x0a,0x1e
+	.byte	0x92,0x49,0x49,0xdb,	0x0c,0x06,0x06,0x0a
+	.byte	0x48,0x24,0x24,0x6c,	0xb8,0x5c,0x5c,0xe4
+	.byte	0x9f,0xc2,0xc2,0x5d,	0xbd,0xd3,0xd3,0x6e
+	.byte	0x43,0xac,0xac,0xef,	0xc4,0x62,0x62,0xa6
+	.byte	0x39,0x91,0x91,0xa8,	0x31,0x95,0x95,0xa4
+	.byte	0xd3,0xe4,0xe4,0x37,	0xf2,0x79,0x79,0x8b
+	.byte	0xd5,0xe7,0xe7,0x32,	0x8b,0xc8,0xc8,0x43
+	.byte	0x6e,0x37,0x37,0x59,	0xda,0x6d,0x6d,0xb7
+	.byte	0x01,0x8d,0x8d,0x8c,	0xb1,0xd5,0xd5,0x64
+	.byte	0x9c,0x4e,0x4e,0xd2,	0x49,0xa9,0xa9,0xe0
+	.byte	0xd8,0x6c,0x6c,0xb4,	0xac,0x56,0x56,0xfa
+	.byte	0xf3,0xf4,0xf4,0x07,	0xcf,0xea,0xea,0x25
+	.byte	0xca,0x65,0x65,0xaf,	0xf4,0x7a,0x7a,0x8e
+	.byte	0x47,0xae,0xae,0xe9,	0x10,0x08,0x08,0x18
+	.byte	0x6f,0xba,0xba,0xd5,	0xf0,0x78,0x78,0x88
+	.byte	0x4a,0x25,0x25,0x6f,	0x5c,0x2e,0x2e,0x72
+	.byte	0x38,0x1c,0x1c,0x24,	0x57,0xa6,0xa6,0xf1
+	.byte	0x73,0xb4,0xb4,0xc7,	0x97,0xc6,0xc6,0x51
+	.byte	0xcb,0xe8,0xe8,0x23,	0xa1,0xdd,0xdd,0x7c
+	.byte	0xe8,0x74,0x74,0x9c,	0x3e,0x1f,0x1f,0x21
+	.byte	0x96,0x4b,0x4b,0xdd,	0x61,0xbd,0xbd,0xdc
+	.byte	0x0d,0x8b,0x8b,0x86,	0x0f,0x8a,0x8a,0x85
+	.byte	0xe0,0x70,0x70,0x90,	0x7c,0x3e,0x3e,0x42
+	.byte	0x71,0xb5,0xb5,0xc4,	0xcc,0x66,0x66,0xaa
+	.byte	0x90,0x48,0x48,0xd8,	0x06,0x03,0x03,0x05
+	.byte	0xf7,0xf6,0xf6,0x01,	0x1c,0x0e,0x0e,0x12
+	.byte	0xc2,0x61,0x61,0xa3,	0x6a,0x35,0x35,0x5f
+	.byte	0xae,0x57,0x57,0xf9,	0x69,0xb9,0xb9,0xd0
+	.byte	0x17,0x86,0x86,0x91,	0x99,0xc1,0xc1,0x58
+	.byte	0x3a,0x1d,0x1d,0x27,	0x27,0x9e,0x9e,0xb9
+	.byte	0xd9,0xe1,0xe1,0x38,	0xeb,0xf8,0xf8,0x13
+	.byte	0x2b,0x98,0x98,0xb3,	0x22,0x11,0x11,0x33
+	.byte	0xd2,0x69,0x69,0xbb,	0xa9,0xd9,0xd9,0x70
+	.byte	0x07,0x8e,0x8e,0x89,	0x33,0x94,0x94,0xa7
+	.byte	0x2d,0x9b,0x9b,0xb6,	0x3c,0x1e,0x1e,0x22
+	.byte	0x15,0x87,0x87,0x92,	0xc9,0xe9,0xe9,0x20
+	.byte	0x87,0xce,0xce,0x49,	0xaa,0x55,0x55,0xff
+	.byte	0x50,0x28,0x28,0x78,	0xa5,0xdf,0xdf,0x7a
+	.byte	0x03,0x8c,0x8c,0x8f,	0x59,0xa1,0xa1,0xf8
+	.byte	0x09,0x89,0x89,0x80,	0x1a,0x0d,0x0d,0x17
+	.byte	0x65,0xbf,0xbf,0xda,	0xd7,0xe6,0xe6,0x31
+	.byte	0x84,0x42,0x42,0xc6,	0xd0,0x68,0x68,0xb8
+	.byte	0x82,0x41,0x41,0xc3,	0x29,0x99,0x99,0xb0
+	.byte	0x5a,0x2d,0x2d,0x77,	0x1e,0x0f,0x0f,0x11
+	.byte	0x7b,0xb0,0xb0,0xcb,	0xa8,0x54,0x54,0xfc
+	.byte	0x6d,0xbb,0xbb,0xd6,	0x2c,0x16,0x16,0x3a
+AES_Te4:
+	.byte	0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
+	.byte	0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
+	.byte	0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
+	.byte	0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
+	.byte	0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
+	.byte	0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
+	.byte	0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
+	.byte	0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
+	.byte	0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
+	.byte	0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
+	.byte	0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
+	.byte	0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
+	.byte	0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
+	.byte	0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
+	.byte	0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
+	.byte	0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
+	.byte	0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
+	.byte	0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
+	.byte	0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
+	.byte	0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
+	.byte	0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
+	.byte	0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
+	.byte	0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
+	.byte	0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
+	.byte	0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
+	.byte	0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
+	.byte	0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
+	.byte	0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
+	.byte	0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
+	.byte	0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
+	.byte	0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
+	.byte	0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
+rcon:
+	.byte	0x01,0x00,0x00,0x00,	0x02,0x00,0x00,0x00
+	.byte	0x04,0x00,0x00,0x00,	0x08,0x00,0x00,0x00
+	.byte	0x10,0x00,0x00,0x00,	0x20,0x00,0x00,0x00
+	.byte	0x40,0x00,0x00,0x00,	0x80,0x00,0x00,0x00
+	.byte	0x1B,0x00,0x00,0x00,	0x36,0x00,0x00,0x00
+	.align	128
+AES_Td:
+	.byte	0x51,0xf4,0xa7,0x50,	0x7e,0x41,0x65,0x53
+	.byte	0x1a,0x17,0xa4,0xc3,	0x3a,0x27,0x5e,0x96
+	.byte	0x3b,0xab,0x6b,0xcb,	0x1f,0x9d,0x45,0xf1
+	.byte	0xac,0xfa,0x58,0xab,	0x4b,0xe3,0x03,0x93
+	.byte	0x20,0x30,0xfa,0x55,	0xad,0x76,0x6d,0xf6
+	.byte	0x88,0xcc,0x76,0x91,	0xf5,0x02,0x4c,0x25
+	.byte	0x4f,0xe5,0xd7,0xfc,	0xc5,0x2a,0xcb,0xd7
+	.byte	0x26,0x35,0x44,0x80,	0xb5,0x62,0xa3,0x8f
+	.byte	0xde,0xb1,0x5a,0x49,	0x25,0xba,0x1b,0x67
+	.byte	0x45,0xea,0x0e,0x98,	0x5d,0xfe,0xc0,0xe1
+	.byte	0xc3,0x2f,0x75,0x02,	0x81,0x4c,0xf0,0x12
+	.byte	0x8d,0x46,0x97,0xa3,	0x6b,0xd3,0xf9,0xc6
+	.byte	0x03,0x8f,0x5f,0xe7,	0x15,0x92,0x9c,0x95
+	.byte	0xbf,0x6d,0x7a,0xeb,	0x95,0x52,0x59,0xda
+	.byte	0xd4,0xbe,0x83,0x2d,	0x58,0x74,0x21,0xd3
+	.byte	0x49,0xe0,0x69,0x29,	0x8e,0xc9,0xc8,0x44
+	.byte	0x75,0xc2,0x89,0x6a,	0xf4,0x8e,0x79,0x78
+	.byte	0x99,0x58,0x3e,0x6b,	0x27,0xb9,0x71,0xdd
+	.byte	0xbe,0xe1,0x4f,0xb6,	0xf0,0x88,0xad,0x17
+	.byte	0xc9,0x20,0xac,0x66,	0x7d,0xce,0x3a,0xb4
+	.byte	0x63,0xdf,0x4a,0x18,	0xe5,0x1a,0x31,0x82
+	.byte	0x97,0x51,0x33,0x60,	0x62,0x53,0x7f,0x45
+	.byte	0xb1,0x64,0x77,0xe0,	0xbb,0x6b,0xae,0x84
+	.byte	0xfe,0x81,0xa0,0x1c,	0xf9,0x08,0x2b,0x94
+	.byte	0x70,0x48,0x68,0x58,	0x8f,0x45,0xfd,0x19
+	.byte	0x94,0xde,0x6c,0x87,	0x52,0x7b,0xf8,0xb7
+	.byte	0xab,0x73,0xd3,0x23,	0x72,0x4b,0x02,0xe2
+	.byte	0xe3,0x1f,0x8f,0x57,	0x66,0x55,0xab,0x2a
+	.byte	0xb2,0xeb,0x28,0x07,	0x2f,0xb5,0xc2,0x03
+	.byte	0x86,0xc5,0x7b,0x9a,	0xd3,0x37,0x08,0xa5
+	.byte	0x30,0x28,0x87,0xf2,	0x23,0xbf,0xa5,0xb2
+	.byte	0x02,0x03,0x6a,0xba,	0xed,0x16,0x82,0x5c
+	.byte	0x8a,0xcf,0x1c,0x2b,	0xa7,0x79,0xb4,0x92
+	.byte	0xf3,0x07,0xf2,0xf0,	0x4e,0x69,0xe2,0xa1
+	.byte	0x65,0xda,0xf4,0xcd,	0x06,0x05,0xbe,0xd5
+	.byte	0xd1,0x34,0x62,0x1f,	0xc4,0xa6,0xfe,0x8a
+	.byte	0x34,0x2e,0x53,0x9d,	0xa2,0xf3,0x55,0xa0
+	.byte	0x05,0x8a,0xe1,0x32,	0xa4,0xf6,0xeb,0x75
+	.byte	0x0b,0x83,0xec,0x39,	0x40,0x60,0xef,0xaa
+	.byte	0x5e,0x71,0x9f,0x06,	0xbd,0x6e,0x10,0x51
+	.byte	0x3e,0x21,0x8a,0xf9,	0x96,0xdd,0x06,0x3d
+	.byte	0xdd,0x3e,0x05,0xae,	0x4d,0xe6,0xbd,0x46
+	.byte	0x91,0x54,0x8d,0xb5,	0x71,0xc4,0x5d,0x05
+	.byte	0x04,0x06,0xd4,0x6f,	0x60,0x50,0x15,0xff
+	.byte	0x19,0x98,0xfb,0x24,	0xd6,0xbd,0xe9,0x97
+	.byte	0x89,0x40,0x43,0xcc,	0x67,0xd9,0x9e,0x77
+	.byte	0xb0,0xe8,0x42,0xbd,	0x07,0x89,0x8b,0x88
+	.byte	0xe7,0x19,0x5b,0x38,	0x79,0xc8,0xee,0xdb
+	.byte	0xa1,0x7c,0x0a,0x47,	0x7c,0x42,0x0f,0xe9
+	.byte	0xf8,0x84,0x1e,0xc9,	0x00,0x00,0x00,0x00
+	.byte	0x09,0x80,0x86,0x83,	0x32,0x2b,0xed,0x48
+	.byte	0x1e,0x11,0x70,0xac,	0x6c,0x5a,0x72,0x4e
+	.byte	0xfd,0x0e,0xff,0xfb,	0x0f,0x85,0x38,0x56
+	.byte	0x3d,0xae,0xd5,0x1e,	0x36,0x2d,0x39,0x27
+	.byte	0x0a,0x0f,0xd9,0x64,	0x68,0x5c,0xa6,0x21
+	.byte	0x9b,0x5b,0x54,0xd1,	0x24,0x36,0x2e,0x3a
+	.byte	0x0c,0x0a,0x67,0xb1,	0x93,0x57,0xe7,0x0f
+	.byte	0xb4,0xee,0x96,0xd2,	0x1b,0x9b,0x91,0x9e
+	.byte	0x80,0xc0,0xc5,0x4f,	0x61,0xdc,0x20,0xa2
+	.byte	0x5a,0x77,0x4b,0x69,	0x1c,0x12,0x1a,0x16
+	.byte	0xe2,0x93,0xba,0x0a,	0xc0,0xa0,0x2a,0xe5
+	.byte	0x3c,0x22,0xe0,0x43,	0x12,0x1b,0x17,0x1d
+	.byte	0x0e,0x09,0x0d,0x0b,	0xf2,0x8b,0xc7,0xad
+	.byte	0x2d,0xb6,0xa8,0xb9,	0x14,0x1e,0xa9,0xc8
+	.byte	0x57,0xf1,0x19,0x85,	0xaf,0x75,0x07,0x4c
+	.byte	0xee,0x99,0xdd,0xbb,	0xa3,0x7f,0x60,0xfd
+	.byte	0xf7,0x01,0x26,0x9f,	0x5c,0x72,0xf5,0xbc
+	.byte	0x44,0x66,0x3b,0xc5,	0x5b,0xfb,0x7e,0x34
+	.byte	0x8b,0x43,0x29,0x76,	0xcb,0x23,0xc6,0xdc
+	.byte	0xb6,0xed,0xfc,0x68,	0xb8,0xe4,0xf1,0x63
+	.byte	0xd7,0x31,0xdc,0xca,	0x42,0x63,0x85,0x10
+	.byte	0x13,0x97,0x22,0x40,	0x84,0xc6,0x11,0x20
+	.byte	0x85,0x4a,0x24,0x7d,	0xd2,0xbb,0x3d,0xf8
+	.byte	0xae,0xf9,0x32,0x11,	0xc7,0x29,0xa1,0x6d
+	.byte	0x1d,0x9e,0x2f,0x4b,	0xdc,0xb2,0x30,0xf3
+	.byte	0x0d,0x86,0x52,0xec,	0x77,0xc1,0xe3,0xd0
+	.byte	0x2b,0xb3,0x16,0x6c,	0xa9,0x70,0xb9,0x99
+	.byte	0x11,0x94,0x48,0xfa,	0x47,0xe9,0x64,0x22
+	.byte	0xa8,0xfc,0x8c,0xc4,	0xa0,0xf0,0x3f,0x1a
+	.byte	0x56,0x7d,0x2c,0xd8,	0x22,0x33,0x90,0xef
+	.byte	0x87,0x49,0x4e,0xc7,	0xd9,0x38,0xd1,0xc1
+	.byte	0x8c,0xca,0xa2,0xfe,	0x98,0xd4,0x0b,0x36
+	.byte	0xa6,0xf5,0x81,0xcf,	0xa5,0x7a,0xde,0x28
+	.byte	0xda,0xb7,0x8e,0x26,	0x3f,0xad,0xbf,0xa4
+	.byte	0x2c,0x3a,0x9d,0xe4,	0x50,0x78,0x92,0x0d
+	.byte	0x6a,0x5f,0xcc,0x9b,	0x54,0x7e,0x46,0x62
+	.byte	0xf6,0x8d,0x13,0xc2,	0x90,0xd8,0xb8,0xe8
+	.byte	0x2e,0x39,0xf7,0x5e,	0x82,0xc3,0xaf,0xf5
+	.byte	0x9f,0x5d,0x80,0xbe,	0x69,0xd0,0x93,0x7c
+	.byte	0x6f,0xd5,0x2d,0xa9,	0xcf,0x25,0x12,0xb3
+	.byte	0xc8,0xac,0x99,0x3b,	0x10,0x18,0x7d,0xa7
+	.byte	0xe8,0x9c,0x63,0x6e,	0xdb,0x3b,0xbb,0x7b
+	.byte	0xcd,0x26,0x78,0x09,	0x6e,0x59,0x18,0xf4
+	.byte	0xec,0x9a,0xb7,0x01,	0x83,0x4f,0x9a,0xa8
+	.byte	0xe6,0x95,0x6e,0x65,	0xaa,0xff,0xe6,0x7e
+	.byte	0x21,0xbc,0xcf,0x08,	0xef,0x15,0xe8,0xe6
+	.byte	0xba,0xe7,0x9b,0xd9,	0x4a,0x6f,0x36,0xce
+	.byte	0xea,0x9f,0x09,0xd4,	0x29,0xb0,0x7c,0xd6
+	.byte	0x31,0xa4,0xb2,0xaf,	0x2a,0x3f,0x23,0x31
+	.byte	0xc6,0xa5,0x94,0x30,	0x35,0xa2,0x66,0xc0
+	.byte	0x74,0x4e,0xbc,0x37,	0xfc,0x82,0xca,0xa6
+	.byte	0xe0,0x90,0xd0,0xb0,	0x33,0xa7,0xd8,0x15
+	.byte	0xf1,0x04,0x98,0x4a,	0x41,0xec,0xda,0xf7
+	.byte	0x7f,0xcd,0x50,0x0e,	0x17,0x91,0xf6,0x2f
+	.byte	0x76,0x4d,0xd6,0x8d,	0x43,0xef,0xb0,0x4d
+	.byte	0xcc,0xaa,0x4d,0x54,	0xe4,0x96,0x04,0xdf
+	.byte	0x9e,0xd1,0xb5,0xe3,	0x4c,0x6a,0x88,0x1b
+	.byte	0xc1,0x2c,0x1f,0xb8,	0x46,0x65,0x51,0x7f
+	.byte	0x9d,0x5e,0xea,0x04,	0x01,0x8c,0x35,0x5d
+	.byte	0xfa,0x87,0x74,0x73,	0xfb,0x0b,0x41,0x2e
+	.byte	0xb3,0x67,0x1d,0x5a,	0x92,0xdb,0xd2,0x52
+	.byte	0xe9,0x10,0x56,0x33,	0x6d,0xd6,0x47,0x13
+	.byte	0x9a,0xd7,0x61,0x8c,	0x37,0xa1,0x0c,0x7a
+	.byte	0x59,0xf8,0x14,0x8e,	0xeb,0x13,0x3c,0x89
+	.byte	0xce,0xa9,0x27,0xee,	0xb7,0x61,0xc9,0x35
+	.byte	0xe1,0x1c,0xe5,0xed,	0x7a,0x47,0xb1,0x3c
+	.byte	0x9c,0xd2,0xdf,0x59,	0x55,0xf2,0x73,0x3f
+	.byte	0x18,0x14,0xce,0x79,	0x73,0xc7,0x37,0xbf
+	.byte	0x53,0xf7,0xcd,0xea,	0x5f,0xfd,0xaa,0x5b
+	.byte	0xdf,0x3d,0x6f,0x14,	0x78,0x44,0xdb,0x86
+	.byte	0xca,0xaf,0xf3,0x81,	0xb9,0x68,0xc4,0x3e
+	.byte	0x38,0x24,0x34,0x2c,	0xc2,0xa3,0x40,0x5f
+	.byte	0x16,0x1d,0xc3,0x72,	0xbc,0xe2,0x25,0x0c
+	.byte	0x28,0x3c,0x49,0x8b,	0xff,0x0d,0x95,0x41
+	.byte	0x39,0xa8,0x01,0x71,	0x08,0x0c,0xb3,0xde
+	.byte	0xd8,0xb4,0xe4,0x9c,	0x64,0x56,0xc1,0x90
+	.byte	0x7b,0xcb,0x84,0x61,	0xd5,0x32,0xb6,0x70
+	.byte	0x48,0x6c,0x5c,0x74,	0xd0,0xb8,0x57,0x42
+AES_Td4:
+	.byte	0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
+	.byte	0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
+	.byte	0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
+	.byte	0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
+	.byte	0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
+	.byte	0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
+	.byte	0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
+	.byte	0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
+	.byte	0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
+	.byte	0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
+	.byte	0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
+	.byte	0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
+	.byte	0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
+	.byte	0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
+	.byte	0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
+	.byte	0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
+	.byte	0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
+	.byte	0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
+	.byte	0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
+	.byte	0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
+	.byte	0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
+	.byte	0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
+	.byte	0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
+	.byte	0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
+	.byte	0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
+	.byte	0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
+	.byte	0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
+	.byte	0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
+	.byte	0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
+	.byte	0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
+	.byte	0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
+	.byte	0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
+	.cstring "AES for C64x+, CRYPTOGAMS by <appro\@openssl.org>"
+	.align	4
+___
+
+print $code;
+close STDOUT or die "error closing STDOUT: $!";
diff --git a/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aes-ia64.S b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aes-ia64.S
new file mode 100644
index 0000000..03f79b7
--- /dev/null
+++ b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aes-ia64.S
@@ -0,0 +1,1130 @@
+// Copyright 2004-2016 The OpenSSL Project Authors. All Rights Reserved.
+//
+// Licensed under the OpenSSL license (the "License").  You may not use
+// this file except in compliance with the License.  You can obtain a copy
+// in the file LICENSE in the source distribution or at
+// https://www.openssl.org/source/license.html
+//
+// ====================================================================
+// Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+// project. Rights for redistribution and usage in source and binary
+// forms are granted according to the OpenSSL license.
+// ====================================================================
+//
+// What's wrong with compiler generated code? Compiler never uses
+// variable 'shr' which is pairable with 'extr'/'dep' instructions.
+// Then it uses 'zxt' which is an I-type, but can be replaced with
+// 'and' which in turn can be assigned to M-port [there're double as
+// much M-ports as there're I-ports on Itanium 2]. By sacrificing few
+// registers for small constants (255, 24 and 16) to be used with
+// 'shr' and 'and' instructions I can achieve better ILP, Instruction
+// Level Parallelism, and performance. This code outperforms GCC 3.3
+// generated code by over factor of 2 (two), GCC 3.4 - by 70% and
+// HP C - by 40%. Measured best-case scenario, i.e. aligned
+// big-endian input, ECB timing on Itanium 2 is (18 + 13*rounds)
+// ticks per block, or 9.25 CPU cycles per byte for 128 bit key.
+
+// Version 1.2 mitigates the hazard of cache-timing attacks by
+// a) compressing S-boxes from 8KB to 2KB+256B, b) scheduling
+// references to S-boxes for L2 cache latency, c) prefetching T[ed]4
+// prior last round. As result performance dropped to (26 + 15*rounds)
+// ticks per block or 11 cycles per byte processed with 128-bit key.
+// This is ~16% deterioration. For reference Itanium 2 L1 cache has
+// 64 bytes line size and L2 - 128 bytes...
+
+.ident	"aes-ia64.S, version 1.2"
+.ident	"IA-64 ISA artwork by Andy Polyakov <appro@openssl.org>"
+.explicit
+.text
+
+rk0=r8;     rk1=r9;
+
+pfssave=r2;
+lcsave=r10;
+prsave=r3;
+maskff=r11;
+twenty4=r14;
+sixteen=r15;
+
+te00=r16;   te11=r17;   te22=r18;   te33=r19;
+te01=r20;   te12=r21;   te23=r22;   te30=r23;
+te02=r24;   te13=r25;   te20=r26;   te31=r27;
+te03=r28;   te10=r29;   te21=r30;   te32=r31;
+
+// these are rotating...
+t0=r32;     s0=r33;
+t1=r34;     s1=r35;
+t2=r36;     s2=r37;
+t3=r38;     s3=r39;
+
+te0=r40;    te1=r41;    te2=r42;    te3=r43;
+
+#if defined(_HPUX_SOURCE) && !defined(_LP64)
+# define ADDP	addp4
+#else
+# define ADDP	add
+#endif
+
+// Offsets from Te0
+#define TE0	0
+#define TE2	2
+#if defined(_HPUX_SOURCE) || defined(B_ENDIAN)
+#define TE1	3
+#define TE3	1
+#else
+#define TE1	1
+#define TE3	3
+#endif
+
+// This implies that AES_KEY comprises 32-bit key schedule elements
+// even on LP64 platforms.
+#ifndef	KSZ
+# define KSZ	4
+# define LDKEY	ld4
+#endif
+
+.proc	_ia64_AES_encrypt#
+// Input:	rk0-rk1
+//		te0
+//		te3	as AES_KEY->rounds!!!
+//		s0-s3
+//		maskff,twenty4,sixteen
+// Output:	r16,r20,r24,r28 as s0-s3
+// Clobber:	r16-r31,rk0-rk1,r32-r43
+.align	32
+_ia64_AES_encrypt:
+	.prologue
+	.altrp	b6
+	.body
+{ .mmi;	alloc	r16=ar.pfs,12,0,0,8
+	LDKEY	t0=[rk0],2*KSZ
+	mov	pr.rot=1<<16	}
+{ .mmi;	LDKEY	t1=[rk1],2*KSZ
+	add	te1=TE1,te0
+	add	te3=-3,te3	};;
+{ .mib;	LDKEY	t2=[rk0],2*KSZ
+	mov	ar.ec=2		}
+{ .mib;	LDKEY	t3=[rk1],2*KSZ
+	add	te2=TE2,te0
+	brp.loop.imp	.Le_top,.Le_end-16	};;
+
+{ .mmi;	xor	s0=s0,t0
+	xor	s1=s1,t1
+	mov	ar.lc=te3	}
+{ .mmi;	xor	s2=s2,t2
+	xor	s3=s3,t3
+	add	te3=TE3,te0	};;
+
+.align	32
+.Le_top:
+{ .mmi;	(p0)	LDKEY	t0=[rk0],2*KSZ		// 0/0:rk[0]
+	(p0)	and	te33=s3,maskff		// 0/0:s3&0xff
+	(p0)	extr.u	te22=s2,8,8	}	// 0/0:s2>>8&0xff
+{ .mmi; (p0)	LDKEY	t1=[rk1],2*KSZ		// 0/1:rk[1]
+	(p0)	and	te30=s0,maskff		// 0/1:s0&0xff
+	(p0)	shr.u	te00=s0,twenty4	};;	// 0/0:s0>>24
+{ .mmi;	(p0)	LDKEY	t2=[rk0],2*KSZ		// 1/2:rk[2]
+	(p0)	shladd	te33=te33,3,te3		// 1/0:te0+s0>>24
+	(p0)	extr.u	te23=s3,8,8	}	// 1/1:s3>>8&0xff
+{ .mmi;	(p0)	LDKEY	t3=[rk1],2*KSZ		// 1/3:rk[3]
+	(p0)	shladd	te30=te30,3,te3		// 1/1:te3+s0
+	(p0)	shr.u	te01=s1,twenty4	};;	// 1/1:s1>>24
+{ .mmi;	(p0)	ld4	te33=[te33]		// 2/0:te3[s3&0xff]
+	(p0)	shladd	te22=te22,3,te2		// 2/0:te2+s2>>8&0xff
+	(p0)	extr.u	te20=s0,8,8	}	// 2/2:s0>>8&0xff
+{ .mmi;	(p0)	ld4	te30=[te30]		// 2/1:te3[s0]
+	(p0)	shladd	te23=te23,3,te2		// 2/1:te2+s3>>8
+	(p0)	shr.u	te02=s2,twenty4	};;	// 2/2:s2>>24
+{ .mmi;	(p0)	ld4	te22=[te22]		// 3/0:te2[s2>>8]
+	(p0)	shladd	te20=te20,3,te2		// 3/2:te2+s0>>8
+	(p0)	extr.u	te21=s1,8,8	}	// 3/3:s1>>8&0xff
+{ .mmi;	(p0)	ld4	te23=[te23]		// 3/1:te2[s3>>8]
+	(p0)	shladd	te00=te00,3,te0		// 3/0:te0+s0>>24
+	(p0)	shr.u	te03=s3,twenty4	};;	// 3/3:s3>>24
+{ .mmi;	(p0)	ld4	te20=[te20]		// 4/2:te2[s0>>8]
+	(p0)	shladd	te21=te21,3,te2		// 4/3:te3+s2
+	(p0)	extr.u	te11=s1,16,8	}	// 4/0:s1>>16&0xff
+{ .mmi;	(p0)	ld4	te00=[te00]		// 4/0:te0[s0>>24]
+	(p0)	shladd	te01=te01,3,te0		// 4/1:te0+s1>>24
+	(p0)	shr.u	te13=s3,sixteen	};;	// 4/2:s3>>16
+{ .mmi;	(p0)	ld4	te21=[te21]		// 5/3:te2[s1>>8]
+	(p0)	shladd	te11=te11,3,te1		// 5/0:te1+s1>>16
+	(p0)	extr.u	te12=s2,16,8	}	// 5/1:s2>>16&0xff
+{ .mmi;	(p0)	ld4	te01=[te01]		// 5/1:te0[s1>>24]
+	(p0)	shladd	te02=te02,3,te0		// 5/2:te0+s2>>24
+	(p0)	and	te31=s1,maskff	};;	// 5/2:s1&0xff
+{ .mmi;	(p0)	ld4	te11=[te11]		// 6/0:te1[s1>>16]
+	(p0)	shladd	te12=te12,3,te1		// 6/1:te1+s2>>16
+	(p0)	extr.u	te10=s0,16,8	}	// 6/3:s0>>16&0xff
+{ .mmi;	(p0)	ld4	te02=[te02]		// 6/2:te0[s2>>24]
+	(p0)	shladd	te03=te03,3,te0		// 6/3:te1+s0>>16
+	(p0)	and	te32=s2,maskff	};;	// 6/3:s2&0xff
+
+{ .mmi;	(p0)	ld4	te12=[te12]		// 7/1:te1[s2>>16]
+	(p0)	shladd	te31=te31,3,te3		// 7/2:te3+s1&0xff
+	(p0)	and	te13=te13,maskff}	// 7/2:s3>>16&0xff
+{ .mmi;	(p0)	ld4	te03=[te03]		// 7/3:te0[s3>>24]
+	(p0)	shladd	te32=te32,3,te3		// 7/3:te3+s2
+	(p0)	xor	t0=t0,te33	};;	// 7/0:
+{ .mmi;	(p0)	ld4	te31=[te31]		// 8/2:te3[s1]
+	(p0)	shladd	te13=te13,3,te1		// 8/2:te1+s3>>16
+	(p0)	xor	t0=t0,te22	}	// 8/0:
+{ .mmi;	(p0)	ld4	te32=[te32]		// 8/3:te3[s2]
+	(p0)	shladd	te10=te10,3,te1		// 8/3:te1+s0>>16
+	(p0)	xor	t1=t1,te30	};;	// 8/1:
+{ .mmi;	(p0)	ld4	te13=[te13]		// 9/2:te1[s3>>16]
+	(p0)	ld4	te10=[te10]		// 9/3:te1[s0>>16]
+	(p0)	xor	t0=t0,te00	};;	// 9/0:		!L2 scheduling
+{ .mmi;	(p0)	xor	t1=t1,te23		// 10[9]/1:	
+	(p0)	xor	t2=t2,te20		// 10[9]/2:
+	(p0)	xor	t3=t3,te21	};;	// 10[9]/3:
+{ .mmi;	(p0)	xor	t0=t0,te11		// 11[10]/0:done!
+	(p0)	xor	t1=t1,te01		// 11[10]/1:
+	(p0)	xor	t2=t2,te02	};;	// 11[10]/2:	!L2 scheduling
+{ .mmi;	(p0)	xor	t3=t3,te03		// 12[10]/3:
+	(p16)	cmp.eq	p0,p17=r0,r0 	};;	// 12[10]/clear (p17)
+{ .mmi;	(p0)	xor	t1=t1,te12		// 13[11]/1:done!
+	(p0)	xor	t2=t2,te31		// 13[11]/2:
+	(p0)	xor	t3=t3,te32	}	// 13[11]/3:
+{ .mmi;	(p17)	add	te0=2048,te0		// 13[11]/
+	(p17)	add	te1=2048+64-TE1,te1};;	// 13[11]/
+{ .mib;	(p0)	xor	t2=t2,te13		// 14[12]/2:done!
+	(p17)	add	te2=2048+128-TE2,te2}	// 14[12]/
+{ .mib;	(p0)	xor	t3=t3,te10		// 14[12]/3:done!
+	(p17)	add	te3=2048+192-TE3,te3	// 14[12]/
+	br.ctop.sptk	.Le_top		};;
+.Le_end:
+
+
+{ .mmi;	ld8	te12=[te0]		// prefetch Te4
+	ld8	te31=[te1]	}
+{ .mmi;	ld8	te10=[te2]
+	ld8	te32=[te3]	}
+
+{ .mmi;	LDKEY	t0=[rk0],2*KSZ		// 0/0:rk[0]
+	and	te33=s3,maskff		// 0/0:s3&0xff
+	extr.u	te22=s2,8,8	}	// 0/0:s2>>8&0xff
+{ .mmi; LDKEY	t1=[rk1],2*KSZ		// 0/1:rk[1]
+	and	te30=s0,maskff		// 0/1:s0&0xff
+	shr.u	te00=s0,twenty4	};;	// 0/0:s0>>24
+{ .mmi;	LDKEY	t2=[rk0],2*KSZ		// 1/2:rk[2]
+	add	te33=te33,te0		// 1/0:te0+s0>>24
+	extr.u	te23=s3,8,8	}	// 1/1:s3>>8&0xff
+{ .mmi;	LDKEY	t3=[rk1],2*KSZ		// 1/3:rk[3]
+	add	te30=te30,te0		// 1/1:te0+s0
+	shr.u	te01=s1,twenty4	};;	// 1/1:s1>>24
+{ .mmi;	ld1	te33=[te33]		// 2/0:te0[s3&0xff]
+	add	te22=te22,te0		// 2/0:te0+s2>>8&0xff
+	extr.u	te20=s0,8,8	}	// 2/2:s0>>8&0xff
+{ .mmi;	ld1	te30=[te30]		// 2/1:te0[s0]
+	add	te23=te23,te0		// 2/1:te0+s3>>8
+	shr.u	te02=s2,twenty4	};;	// 2/2:s2>>24
+{ .mmi;	ld1	te22=[te22]		// 3/0:te0[s2>>8]
+	add	te20=te20,te0		// 3/2:te0+s0>>8
+	extr.u	te21=s1,8,8	}	// 3/3:s1>>8&0xff
+{ .mmi;	ld1	te23=[te23]		// 3/1:te0[s3>>8]
+	add	te00=te00,te0		// 3/0:te0+s0>>24
+	shr.u	te03=s3,twenty4	};;	// 3/3:s3>>24
+{ .mmi;	ld1	te20=[te20]		// 4/2:te0[s0>>8]
+	add	te21=te21,te0		// 4/3:te0+s2
+	extr.u	te11=s1,16,8	}	// 4/0:s1>>16&0xff
+{ .mmi;	ld1	te00=[te00]		// 4/0:te0[s0>>24]
+	add	te01=te01,te0		// 4/1:te0+s1>>24
+	shr.u	te13=s3,sixteen	};;	// 4/2:s3>>16
+{ .mmi;	ld1	te21=[te21]		// 5/3:te0[s1>>8]
+	add	te11=te11,te0		// 5/0:te0+s1>>16
+	extr.u	te12=s2,16,8	}	// 5/1:s2>>16&0xff
+{ .mmi;	ld1	te01=[te01]		// 5/1:te0[s1>>24]
+	add	te02=te02,te0		// 5/2:te0+s2>>24
+	and	te31=s1,maskff	};;	// 5/2:s1&0xff
+{ .mmi;	ld1	te11=[te11]		// 6/0:te0[s1>>16]
+	add	te12=te12,te0		// 6/1:te0+s2>>16
+	extr.u	te10=s0,16,8	}	// 6/3:s0>>16&0xff
+{ .mmi;	ld1	te02=[te02]		// 6/2:te0[s2>>24]
+	add	te03=te03,te0		// 6/3:te0+s0>>16
+	and	te32=s2,maskff	};;	// 6/3:s2&0xff
+
+{ .mmi;	ld1	te12=[te12]		// 7/1:te0[s2>>16]
+	add	te31=te31,te0		// 7/2:te0+s1&0xff
+	dep	te33=te22,te33,8,8}	// 7/0:
+{ .mmi;	ld1	te03=[te03]		// 7/3:te0[s3>>24]
+	add	te32=te32,te0		// 7/3:te0+s2
+	and	te13=te13,maskff};;	// 7/2:s3>>16&0xff
+{ .mmi;	ld1	te31=[te31]		// 8/2:te0[s1]
+	add	te13=te13,te0		// 8/2:te0+s3>>16
+	dep	te30=te23,te30,8,8}	// 8/1:
+{ .mmi;	ld1	te32=[te32]		// 8/3:te0[s2]
+	add	te10=te10,te0		// 8/3:te0+s0>>16
+	shl	te00=te00,twenty4};;	// 8/0:
+{ .mii;	ld1	te13=[te13]		// 9/2:te0[s3>>16]
+	dep	te33=te11,te33,16,8	// 9/0:
+	shl	te01=te01,twenty4};;	// 9/1:
+{ .mii;	ld1	te10=[te10]		// 10/3:te0[s0>>16]
+	dep	te31=te20,te31,8,8	// 10/2:
+	shl	te02=te02,twenty4};;	// 10/2:
+{ .mii;	xor	t0=t0,te33		// 11/0:
+	dep	te32=te21,te32,8,8	// 11/3:
+	shl	te12=te12,sixteen};;	// 11/1:
+{ .mii;	xor	r16=t0,te00		// 12/0:done!
+	dep	te31=te13,te31,16,8	// 12/2:
+	shl	te03=te03,twenty4};;	// 12/3:
+{ .mmi;	xor	t1=t1,te01		// 13/1:
+	xor	t2=t2,te02		// 13/2:
+	dep	te32=te10,te32,16,8};;	// 13/3:
+{ .mmi;	xor	t1=t1,te30		// 14/1:
+	xor	r24=t2,te31		// 14/2:done!
+	xor	t3=t3,te32	};;	// 14/3:
+{ .mib;	xor	r20=t1,te12		// 15/1:done!
+	xor	r28=t3,te03		// 15/3:done!
+	br.ret.sptk	b6	};;
+.endp	_ia64_AES_encrypt#
+
+// void AES_encrypt (const void *in,void *out,const AES_KEY *key);
+.global	AES_encrypt#
+.proc	AES_encrypt#
+.align	32
+AES_encrypt:
+	.prologue
+	.save	ar.pfs,pfssave
+{ .mmi;	alloc	pfssave=ar.pfs,3,1,12,0
+	and	out0=3,in0
+	mov	r3=ip			}
+{ .mmi;	ADDP	in0=0,in0
+	mov	loc0=psr.um
+	ADDP	out11=KSZ*60,in2	};;	// &AES_KEY->rounds
+
+{ .mmi;	ld4	out11=[out11]			// AES_KEY->rounds
+	add	out8=(AES_Te#-AES_encrypt#),r3	// Te0
+	.save	pr,prsave
+	mov	prsave=pr		}
+{ .mmi;	rum	1<<3				// clear um.ac
+	.save	ar.lc,lcsave
+	mov	lcsave=ar.lc		};;
+
+	.body
+#if defined(_HPUX_SOURCE)	// HPUX is big-endian, cut 15+15 cycles...
+{ .mib; cmp.ne	p6,p0=out0,r0
+	add	out0=4,in0
+(p6)	br.dpnt.many	.Le_i_unaligned	};;
+
+{ .mmi;	ld4	out1=[in0],8		// s0
+	and	out9=3,in1
+	mov	twenty4=24		}
+{ .mmi;	ld4	out3=[out0],8		// s1
+	ADDP	rk0=0,in2
+	mov	sixteen=16		};;
+{ .mmi;	ld4	out5=[in0]		// s2
+	cmp.ne	p6,p0=out9,r0
+	mov	maskff=0xff		}
+{ .mmb;	ld4	out7=[out0]		// s3
+	ADDP	rk1=KSZ,in2
+	br.call.sptk.many	b6=_ia64_AES_encrypt	};;
+
+{ .mib;	ADDP	in0=4,in1
+	ADDP	in1=0,in1
+(p6)	br.spnt	.Le_o_unaligned		};;
+
+{ .mii;	mov	psr.um=loc0
+	mov	ar.pfs=pfssave
+	mov	ar.lc=lcsave		};;
+{ .mmi;	st4	[in1]=r16,8		// s0
+	st4	[in0]=r20,8		// s1
+	mov	pr=prsave,0x1ffff	};;
+{ .mmb;	st4	[in1]=r24		// s2
+	st4	[in0]=r28		// s3
+	br.ret.sptk.many	b0	};;
+#endif
+
+.align	32
+.Le_i_unaligned:
+{ .mmi;	add	out0=1,in0
+	add	out2=2,in0
+	add	out4=3,in0	};;
+{ .mmi;	ld1	r16=[in0],4
+	ld1	r17=[out0],4	}//;;
+{ .mmi;	ld1	r18=[out2],4
+	ld1	out1=[out4],4	};;	// s0
+{ .mmi;	ld1	r20=[in0],4
+	ld1	r21=[out0],4	}//;;
+{ .mmi;	ld1	r22=[out2],4
+	ld1	out3=[out4],4	};;	// s1
+{ .mmi;	ld1	r24=[in0],4
+	ld1	r25=[out0],4	}//;;
+{ .mmi;	ld1	r26=[out2],4
+	ld1	out5=[out4],4	};;	// s2
+{ .mmi;	ld1	r28=[in0]
+	ld1	r29=[out0]	}//;;
+{ .mmi;	ld1	r30=[out2]
+	ld1	out7=[out4]	};;	// s3
+
+{ .mii;
+	dep	out1=r16,out1,24,8	//;;
+	dep	out3=r20,out3,24,8	}//;;
+{ .mii;	ADDP	rk0=0,in2
+	dep	out5=r24,out5,24,8	//;;
+	dep	out7=r28,out7,24,8	};;
+{ .mii;	ADDP	rk1=KSZ,in2
+	dep	out1=r17,out1,16,8	//;;
+	dep	out3=r21,out3,16,8	}//;;
+{ .mii;	mov	twenty4=24
+	dep	out5=r25,out5,16,8	//;;
+	dep	out7=r29,out7,16,8	};;
+{ .mii;	mov	sixteen=16
+	dep	out1=r18,out1,8,8	//;;
+	dep	out3=r22,out3,8,8	}//;;
+{ .mii;	mov	maskff=0xff
+	dep	out5=r26,out5,8,8	//;;
+	dep	out7=r30,out7,8,8	};;
+
+{ .mib;	br.call.sptk.many	b6=_ia64_AES_encrypt	};;
+
+.Le_o_unaligned:
+{ .mii;	ADDP	out0=0,in1
+	extr.u	r17=r16,8,8			// s0
+	shr.u	r19=r16,twenty4		}//;;
+{ .mii;	ADDP	out1=1,in1
+	extr.u	r18=r16,16,8
+	shr.u	r23=r20,twenty4		}//;;	// s1
+{ .mii;	ADDP	out2=2,in1
+	extr.u	r21=r20,8,8
+	shr.u	r22=r20,sixteen		}//;;
+{ .mii;	ADDP	out3=3,in1
+	extr.u	r25=r24,8,8			// s2
+	shr.u	r27=r24,twenty4		};;
+{ .mii;	st1	[out3]=r16,4
+	extr.u	r26=r24,16,8
+	shr.u	r31=r28,twenty4		}//;;	// s3
+{ .mii;	st1	[out2]=r17,4
+	extr.u	r29=r28,8,8
+	shr.u	r30=r28,sixteen		}//;;
+
+{ .mmi;	st1	[out1]=r18,4
+	st1	[out0]=r19,4		};;
+{ .mmi;	st1	[out3]=r20,4
+	st1	[out2]=r21,4		}//;;
+{ .mmi;	st1	[out1]=r22,4
+	st1	[out0]=r23,4		};;
+{ .mmi;	st1	[out3]=r24,4
+	st1	[out2]=r25,4
+	mov	pr=prsave,0x1ffff	}//;;
+{ .mmi;	st1	[out1]=r26,4
+	st1	[out0]=r27,4
+	mov	ar.pfs=pfssave		};;
+{ .mmi;	st1	[out3]=r28
+	st1	[out2]=r29
+	mov	ar.lc=lcsave		}//;;
+{ .mmi;	st1	[out1]=r30
+	st1	[out0]=r31		}
+{ .mfb;	mov	psr.um=loc0			// restore user mask
+	br.ret.sptk.many	b0	};;
+.endp	AES_encrypt#
+
+// *AES_decrypt are autogenerated by the following script:
+#if 0
+#!/usr/bin/env perl
+print "// *AES_decrypt are autogenerated by the following script:\n#if 0\n";
+open(PROG,'<'.$0); while(<PROG>) { print; } close(PROG);
+print "#endif\n";
+while(<>) {
+	$process=1	if (/\.proc\s+_ia64_AES_encrypt/);
+	next		if (!$process);
+
+	#s/te00=s0/td00=s0/;	s/te00/td00/g;
+	s/te11=s1/td13=s3/;	s/te11/td13/g;
+	#s/te22=s2/td22=s2/;	s/te22/td22/g;
+	s/te33=s3/td31=s1/;	s/te33/td31/g;
+
+	#s/te01=s1/td01=s1/;	s/te01/td01/g;
+	s/te12=s2/td10=s0/;	s/te12/td10/g;
+	#s/te23=s3/td23=s3/;	s/te23/td23/g;
+	s/te30=s0/td32=s2/;	s/te30/td32/g;
+
+	#s/te02=s2/td02=s2/;	s/te02/td02/g;
+	s/te13=s3/td11=s1/;	s/te13/td11/g;
+	#s/te20=s0/td20=s0/;	s/te20/td20/g;
+	s/te31=s1/td33=s3/;	s/te31/td33/g;
+
+	#s/te03=s3/td03=s3/;	s/te03/td03/g;
+	s/te10=s0/td12=s2/;	s/te10/td12/g;
+	#s/te21=s1/td21=s1/;	s/te21/td21/g;
+	s/te32=s2/td30=s0/;	s/te32/td30/g;
+
+	s/td/te/g;
+
+	s/AES_encrypt/AES_decrypt/g;
+	s/\.Le_/.Ld_/g;
+	s/AES_Te#/AES_Td#/g;
+
+	print;
+
+	exit		if (/\.endp\s+AES_decrypt/);
+}
+#endif
+.proc	_ia64_AES_decrypt#
+// Input:	rk0-rk1
+//		te0
+//		te3	as AES_KEY->rounds!!!
+//		s0-s3
+//		maskff,twenty4,sixteen
+// Output:	r16,r20,r24,r28 as s0-s3
+// Clobber:	r16-r31,rk0-rk1,r32-r43
+.align	32
+_ia64_AES_decrypt:
+	.prologue
+	.altrp	b6
+	.body
+{ .mmi;	alloc	r16=ar.pfs,12,0,0,8
+	LDKEY	t0=[rk0],2*KSZ
+	mov	pr.rot=1<<16	}
+{ .mmi;	LDKEY	t1=[rk1],2*KSZ
+	add	te1=TE1,te0
+	add	te3=-3,te3	};;
+{ .mib;	LDKEY	t2=[rk0],2*KSZ
+	mov	ar.ec=2		}
+{ .mib;	LDKEY	t3=[rk1],2*KSZ
+	add	te2=TE2,te0
+	brp.loop.imp	.Ld_top,.Ld_end-16	};;
+
+{ .mmi;	xor	s0=s0,t0
+	xor	s1=s1,t1
+	mov	ar.lc=te3	}
+{ .mmi;	xor	s2=s2,t2
+	xor	s3=s3,t3
+	add	te3=TE3,te0	};;
+
+.align	32
+.Ld_top:
+{ .mmi;	(p0)	LDKEY	t0=[rk0],2*KSZ		// 0/0:rk[0]
+	(p0)	and	te31=s1,maskff		// 0/0:s3&0xff
+	(p0)	extr.u	te22=s2,8,8	}	// 0/0:s2>>8&0xff
+{ .mmi; (p0)	LDKEY	t1=[rk1],2*KSZ		// 0/1:rk[1]
+	(p0)	and	te32=s2,maskff		// 0/1:s0&0xff
+	(p0)	shr.u	te00=s0,twenty4	};;	// 0/0:s0>>24
+{ .mmi;	(p0)	LDKEY	t2=[rk0],2*KSZ		// 1/2:rk[2]
+	(p0)	shladd	te31=te31,3,te3		// 1/0:te0+s0>>24
+	(p0)	extr.u	te23=s3,8,8	}	// 1/1:s3>>8&0xff
+{ .mmi;	(p0)	LDKEY	t3=[rk1],2*KSZ		// 1/3:rk[3]
+	(p0)	shladd	te32=te32,3,te3		// 1/1:te3+s0
+	(p0)	shr.u	te01=s1,twenty4	};;	// 1/1:s1>>24
+{ .mmi;	(p0)	ld4	te31=[te31]		// 2/0:te3[s3&0xff]
+	(p0)	shladd	te22=te22,3,te2		// 2/0:te2+s2>>8&0xff
+	(p0)	extr.u	te20=s0,8,8	}	// 2/2:s0>>8&0xff
+{ .mmi;	(p0)	ld4	te32=[te32]		// 2/1:te3[s0]
+	(p0)	shladd	te23=te23,3,te2		// 2/1:te2+s3>>8
+	(p0)	shr.u	te02=s2,twenty4	};;	// 2/2:s2>>24
+{ .mmi;	(p0)	ld4	te22=[te22]		// 3/0:te2[s2>>8]
+	(p0)	shladd	te20=te20,3,te2		// 3/2:te2+s0>>8
+	(p0)	extr.u	te21=s1,8,8	}	// 3/3:s1>>8&0xff
+{ .mmi;	(p0)	ld4	te23=[te23]		// 3/1:te2[s3>>8]
+	(p0)	shladd	te00=te00,3,te0		// 3/0:te0+s0>>24
+	(p0)	shr.u	te03=s3,twenty4	};;	// 3/3:s3>>24
+{ .mmi;	(p0)	ld4	te20=[te20]		// 4/2:te2[s0>>8]
+	(p0)	shladd	te21=te21,3,te2		// 4/3:te3+s2
+	(p0)	extr.u	te13=s3,16,8	}	// 4/0:s1>>16&0xff
+{ .mmi;	(p0)	ld4	te00=[te00]		// 4/0:te0[s0>>24]
+	(p0)	shladd	te01=te01,3,te0		// 4/1:te0+s1>>24
+	(p0)	shr.u	te11=s1,sixteen	};;	// 4/2:s3>>16
+{ .mmi;	(p0)	ld4	te21=[te21]		// 5/3:te2[s1>>8]
+	(p0)	shladd	te13=te13,3,te1		// 5/0:te1+s1>>16
+	(p0)	extr.u	te10=s0,16,8	}	// 5/1:s2>>16&0xff
+{ .mmi;	(p0)	ld4	te01=[te01]		// 5/1:te0[s1>>24]
+	(p0)	shladd	te02=te02,3,te0		// 5/2:te0+s2>>24
+	(p0)	and	te33=s3,maskff	};;	// 5/2:s1&0xff
+{ .mmi;	(p0)	ld4	te13=[te13]		// 6/0:te1[s1>>16]
+	(p0)	shladd	te10=te10,3,te1		// 6/1:te1+s2>>16
+	(p0)	extr.u	te12=s2,16,8	}	// 6/3:s0>>16&0xff
+{ .mmi;	(p0)	ld4	te02=[te02]		// 6/2:te0[s2>>24]
+	(p0)	shladd	te03=te03,3,te0		// 6/3:te1+s0>>16
+	(p0)	and	te30=s0,maskff	};;	// 6/3:s2&0xff
+
+{ .mmi;	(p0)	ld4	te10=[te10]		// 7/1:te1[s2>>16]
+	(p0)	shladd	te33=te33,3,te3		// 7/2:te3+s1&0xff
+	(p0)	and	te11=te11,maskff}	// 7/2:s3>>16&0xff
+{ .mmi;	(p0)	ld4	te03=[te03]		// 7/3:te0[s3>>24]
+	(p0)	shladd	te30=te30,3,te3		// 7/3:te3+s2
+	(p0)	xor	t0=t0,te31	};;	// 7/0:
+{ .mmi;	(p0)	ld4	te33=[te33]		// 8/2:te3[s1]
+	(p0)	shladd	te11=te11,3,te1		// 8/2:te1+s3>>16
+	(p0)	xor	t0=t0,te22	}	// 8/0:
+{ .mmi;	(p0)	ld4	te30=[te30]		// 8/3:te3[s2]
+	(p0)	shladd	te12=te12,3,te1		// 8/3:te1+s0>>16
+	(p0)	xor	t1=t1,te32	};;	// 8/1:
+{ .mmi;	(p0)	ld4	te11=[te11]		// 9/2:te1[s3>>16]
+	(p0)	ld4	te12=[te12]		// 9/3:te1[s0>>16]
+	(p0)	xor	t0=t0,te00	};;	// 9/0:		!L2 scheduling
+{ .mmi;	(p0)	xor	t1=t1,te23		// 10[9]/1:	
+	(p0)	xor	t2=t2,te20		// 10[9]/2:
+	(p0)	xor	t3=t3,te21	};;	// 10[9]/3:
+{ .mmi;	(p0)	xor	t0=t0,te13		// 11[10]/0:done!
+	(p0)	xor	t1=t1,te01		// 11[10]/1:
+	(p0)	xor	t2=t2,te02	};;	// 11[10]/2:	!L2 scheduling
+{ .mmi;	(p0)	xor	t3=t3,te03		// 12[10]/3:
+	(p16)	cmp.eq	p0,p17=r0,r0 	};;	// 12[10]/clear (p17)
+{ .mmi;	(p0)	xor	t1=t1,te10		// 13[11]/1:done!
+	(p0)	xor	t2=t2,te33		// 13[11]/2:
+	(p0)	xor	t3=t3,te30	}	// 13[11]/3:
+{ .mmi;	(p17)	add	te0=2048,te0		// 13[11]/
+	(p17)	add	te1=2048+64-TE1,te1};;	// 13[11]/
+{ .mib;	(p0)	xor	t2=t2,te11		// 14[12]/2:done!
+	(p17)	add	te2=2048+128-TE2,te2}	// 14[12]/
+{ .mib;	(p0)	xor	t3=t3,te12		// 14[12]/3:done!
+	(p17)	add	te3=2048+192-TE3,te3	// 14[12]/
+	br.ctop.sptk	.Ld_top		};;
+.Ld_end:
+
+
+{ .mmi;	ld8	te10=[te0]		// prefetch Td4
+	ld8	te33=[te1]	}
+{ .mmi;	ld8	te12=[te2]
+	ld8	te30=[te3]	}
+
+{ .mmi;	LDKEY	t0=[rk0],2*KSZ		// 0/0:rk[0]
+	and	te31=s1,maskff		// 0/0:s3&0xff
+	extr.u	te22=s2,8,8	}	// 0/0:s2>>8&0xff
+{ .mmi; LDKEY	t1=[rk1],2*KSZ		// 0/1:rk[1]
+	and	te32=s2,maskff		// 0/1:s0&0xff
+	shr.u	te00=s0,twenty4	};;	// 0/0:s0>>24
+{ .mmi;	LDKEY	t2=[rk0],2*KSZ		// 1/2:rk[2]
+	add	te31=te31,te0		// 1/0:te0+s0>>24
+	extr.u	te23=s3,8,8	}	// 1/1:s3>>8&0xff
+{ .mmi;	LDKEY	t3=[rk1],2*KSZ		// 1/3:rk[3]
+	add	te32=te32,te0		// 1/1:te0+s0
+	shr.u	te01=s1,twenty4	};;	// 1/1:s1>>24
+{ .mmi;	ld1	te31=[te31]		// 2/0:te0[s3&0xff]
+	add	te22=te22,te0		// 2/0:te0+s2>>8&0xff
+	extr.u	te20=s0,8,8	}	// 2/2:s0>>8&0xff
+{ .mmi;	ld1	te32=[te32]		// 2/1:te0[s0]
+	add	te23=te23,te0		// 2/1:te0+s3>>8
+	shr.u	te02=s2,twenty4	};;	// 2/2:s2>>24
+{ .mmi;	ld1	te22=[te22]		// 3/0:te0[s2>>8]
+	add	te20=te20,te0		// 3/2:te0+s0>>8
+	extr.u	te21=s1,8,8	}	// 3/3:s1>>8&0xff
+{ .mmi;	ld1	te23=[te23]		// 3/1:te0[s3>>8]
+	add	te00=te00,te0		// 3/0:te0+s0>>24
+	shr.u	te03=s3,twenty4	};;	// 3/3:s3>>24
+{ .mmi;	ld1	te20=[te20]		// 4/2:te0[s0>>8]
+	add	te21=te21,te0		// 4/3:te0+s2
+	extr.u	te13=s3,16,8	}	// 4/0:s1>>16&0xff
+{ .mmi;	ld1	te00=[te00]		// 4/0:te0[s0>>24]
+	add	te01=te01,te0		// 4/1:te0+s1>>24
+	shr.u	te11=s1,sixteen	};;	// 4/2:s3>>16
+{ .mmi;	ld1	te21=[te21]		// 5/3:te0[s1>>8]
+	add	te13=te13,te0		// 5/0:te0+s1>>16
+	extr.u	te10=s0,16,8	}	// 5/1:s2>>16&0xff
+{ .mmi;	ld1	te01=[te01]		// 5/1:te0[s1>>24]
+	add	te02=te02,te0		// 5/2:te0+s2>>24
+	and	te33=s3,maskff	};;	// 5/2:s1&0xff
+{ .mmi;	ld1	te13=[te13]		// 6/0:te0[s1>>16]
+	add	te10=te10,te0		// 6/1:te0+s2>>16
+	extr.u	te12=s2,16,8	}	// 6/3:s0>>16&0xff
+{ .mmi;	ld1	te02=[te02]		// 6/2:te0[s2>>24]
+	add	te03=te03,te0		// 6/3:te0+s0>>16
+	and	te30=s0,maskff	};;	// 6/3:s2&0xff
+
+{ .mmi;	ld1	te10=[te10]		// 7/1:te0[s2>>16]
+	add	te33=te33,te0		// 7/2:te0+s1&0xff
+	dep	te31=te22,te31,8,8}	// 7/0:
+{ .mmi;	ld1	te03=[te03]		// 7/3:te0[s3>>24]
+	add	te30=te30,te0		// 7/3:te0+s2
+	and	te11=te11,maskff};;	// 7/2:s3>>16&0xff
+{ .mmi;	ld1	te33=[te33]		// 8/2:te0[s1]
+	add	te11=te11,te0		// 8/2:te0+s3>>16
+	dep	te32=te23,te32,8,8}	// 8/1:
+{ .mmi;	ld1	te30=[te30]		// 8/3:te0[s2]
+	add	te12=te12,te0		// 8/3:te0+s0>>16
+	shl	te00=te00,twenty4};;	// 8/0:
+{ .mii;	ld1	te11=[te11]		// 9/2:te0[s3>>16]
+	dep	te31=te13,te31,16,8	// 9/0:
+	shl	te01=te01,twenty4};;	// 9/1:
+{ .mii;	ld1	te12=[te12]		// 10/3:te0[s0>>16]
+	dep	te33=te20,te33,8,8	// 10/2:
+	shl	te02=te02,twenty4};;	// 10/2:
+{ .mii;	xor	t0=t0,te31		// 11/0:
+	dep	te30=te21,te30,8,8	// 11/3:
+	shl	te10=te10,sixteen};;	// 11/1:
+{ .mii;	xor	r16=t0,te00		// 12/0:done!
+	dep	te33=te11,te33,16,8	// 12/2:
+	shl	te03=te03,twenty4};;	// 12/3:
+{ .mmi;	xor	t1=t1,te01		// 13/1:
+	xor	t2=t2,te02		// 13/2:
+	dep	te30=te12,te30,16,8};;	// 13/3:
+{ .mmi;	xor	t1=t1,te32		// 14/1:
+	xor	r24=t2,te33		// 14/2:done!
+	xor	t3=t3,te30	};;	// 14/3:
+{ .mib;	xor	r20=t1,te10		// 15/1:done!
+	xor	r28=t3,te03		// 15/3:done!
+	br.ret.sptk	b6	};;
+.endp	_ia64_AES_decrypt#
+
+// void AES_decrypt (const void *in,void *out,const AES_KEY *key);
+.global	AES_decrypt#
+.proc	AES_decrypt#
+.align	32
+AES_decrypt:
+	.prologue
+	.save	ar.pfs,pfssave
+{ .mmi;	alloc	pfssave=ar.pfs,3,1,12,0
+	and	out0=3,in0
+	mov	r3=ip			}
+{ .mmi;	ADDP	in0=0,in0
+	mov	loc0=psr.um
+	ADDP	out11=KSZ*60,in2	};;	// &AES_KEY->rounds
+
+{ .mmi;	ld4	out11=[out11]			// AES_KEY->rounds
+	add	out8=(AES_Td#-AES_decrypt#),r3	// Te0
+	.save	pr,prsave
+	mov	prsave=pr		}
+{ .mmi;	rum	1<<3				// clear um.ac
+	.save	ar.lc,lcsave
+	mov	lcsave=ar.lc		};;
+
+	.body
+#if defined(_HPUX_SOURCE)	// HPUX is big-endian, cut 15+15 cycles...
+{ .mib; cmp.ne	p6,p0=out0,r0
+	add	out0=4,in0
+(p6)	br.dpnt.many	.Ld_i_unaligned	};;
+
+{ .mmi;	ld4	out1=[in0],8		// s0
+	and	out9=3,in1
+	mov	twenty4=24		}
+{ .mmi;	ld4	out3=[out0],8		// s1
+	ADDP	rk0=0,in2
+	mov	sixteen=16		};;
+{ .mmi;	ld4	out5=[in0]		// s2
+	cmp.ne	p6,p0=out9,r0
+	mov	maskff=0xff		}
+{ .mmb;	ld4	out7=[out0]		// s3
+	ADDP	rk1=KSZ,in2
+	br.call.sptk.many	b6=_ia64_AES_decrypt	};;
+
+{ .mib;	ADDP	in0=4,in1
+	ADDP	in1=0,in1
+(p6)	br.spnt	.Ld_o_unaligned		};;
+
+{ .mii;	mov	psr.um=loc0
+	mov	ar.pfs=pfssave
+	mov	ar.lc=lcsave		};;
+{ .mmi;	st4	[in1]=r16,8		// s0
+	st4	[in0]=r20,8		// s1
+	mov	pr=prsave,0x1ffff	};;
+{ .mmb;	st4	[in1]=r24		// s2
+	st4	[in0]=r28		// s3
+	br.ret.sptk.many	b0	};;
+#endif
+
+.align	32
+.Ld_i_unaligned:
+{ .mmi;	add	out0=1,in0
+	add	out2=2,in0
+	add	out4=3,in0	};;
+{ .mmi;	ld1	r16=[in0],4
+	ld1	r17=[out0],4	}//;;
+{ .mmi;	ld1	r18=[out2],4
+	ld1	out1=[out4],4	};;	// s0
+{ .mmi;	ld1	r20=[in0],4
+	ld1	r21=[out0],4	}//;;
+{ .mmi;	ld1	r22=[out2],4
+	ld1	out3=[out4],4	};;	// s1
+{ .mmi;	ld1	r24=[in0],4
+	ld1	r25=[out0],4	}//;;
+{ .mmi;	ld1	r26=[out2],4
+	ld1	out5=[out4],4	};;	// s2
+{ .mmi;	ld1	r28=[in0]
+	ld1	r29=[out0]	}//;;
+{ .mmi;	ld1	r30=[out2]
+	ld1	out7=[out4]	};;	// s3
+
+{ .mii;
+	dep	out1=r16,out1,24,8	//;;
+	dep	out3=r20,out3,24,8	}//;;
+{ .mii;	ADDP	rk0=0,in2
+	dep	out5=r24,out5,24,8	//;;
+	dep	out7=r28,out7,24,8	};;
+{ .mii;	ADDP	rk1=KSZ,in2
+	dep	out1=r17,out1,16,8	//;;
+	dep	out3=r21,out3,16,8	}//;;
+{ .mii;	mov	twenty4=24
+	dep	out5=r25,out5,16,8	//;;
+	dep	out7=r29,out7,16,8	};;
+{ .mii;	mov	sixteen=16
+	dep	out1=r18,out1,8,8	//;;
+	dep	out3=r22,out3,8,8	}//;;
+{ .mii;	mov	maskff=0xff
+	dep	out5=r26,out5,8,8	//;;
+	dep	out7=r30,out7,8,8	};;
+
+{ .mib;	br.call.sptk.many	b6=_ia64_AES_decrypt	};;
+
+.Ld_o_unaligned:
+{ .mii;	ADDP	out0=0,in1
+	extr.u	r17=r16,8,8			// s0
+	shr.u	r19=r16,twenty4		}//;;
+{ .mii;	ADDP	out1=1,in1
+	extr.u	r18=r16,16,8
+	shr.u	r23=r20,twenty4		}//;;	// s1
+{ .mii;	ADDP	out2=2,in1
+	extr.u	r21=r20,8,8
+	shr.u	r22=r20,sixteen		}//;;
+{ .mii;	ADDP	out3=3,in1
+	extr.u	r25=r24,8,8			// s2
+	shr.u	r27=r24,twenty4		};;
+{ .mii;	st1	[out3]=r16,4
+	extr.u	r26=r24,16,8
+	shr.u	r31=r28,twenty4		}//;;	// s3
+{ .mii;	st1	[out2]=r17,4
+	extr.u	r29=r28,8,8
+	shr.u	r30=r28,sixteen		}//;;
+
+{ .mmi;	st1	[out1]=r18,4
+	st1	[out0]=r19,4		};;
+{ .mmi;	st1	[out3]=r20,4
+	st1	[out2]=r21,4		}//;;
+{ .mmi;	st1	[out1]=r22,4
+	st1	[out0]=r23,4		};;
+{ .mmi;	st1	[out3]=r24,4
+	st1	[out2]=r25,4
+	mov	pr=prsave,0x1ffff	}//;;
+{ .mmi;	st1	[out1]=r26,4
+	st1	[out0]=r27,4
+	mov	ar.pfs=pfssave		};;
+{ .mmi;	st1	[out3]=r28
+	st1	[out2]=r29
+	mov	ar.lc=lcsave		}//;;
+{ .mmi;	st1	[out1]=r30
+	st1	[out0]=r31		}
+{ .mfb;	mov	psr.um=loc0			// restore user mask
+	br.ret.sptk.many	b0	};;
+.endp	AES_decrypt#
+
+// leave it in .text segment...
+.align	64
+.global	AES_Te#
+.type	AES_Te#,@object
+AES_Te:	data4	0xc66363a5,0xc66363a5, 0xf87c7c84,0xf87c7c84
+	data4	0xee777799,0xee777799, 0xf67b7b8d,0xf67b7b8d
+	data4	0xfff2f20d,0xfff2f20d, 0xd66b6bbd,0xd66b6bbd
+	data4	0xde6f6fb1,0xde6f6fb1, 0x91c5c554,0x91c5c554
+	data4	0x60303050,0x60303050, 0x02010103,0x02010103
+	data4	0xce6767a9,0xce6767a9, 0x562b2b7d,0x562b2b7d
+	data4	0xe7fefe19,0xe7fefe19, 0xb5d7d762,0xb5d7d762
+	data4	0x4dababe6,0x4dababe6, 0xec76769a,0xec76769a
+	data4	0x8fcaca45,0x8fcaca45, 0x1f82829d,0x1f82829d
+	data4	0x89c9c940,0x89c9c940, 0xfa7d7d87,0xfa7d7d87
+	data4	0xeffafa15,0xeffafa15, 0xb25959eb,0xb25959eb
+	data4	0x8e4747c9,0x8e4747c9, 0xfbf0f00b,0xfbf0f00b
+	data4	0x41adadec,0x41adadec, 0xb3d4d467,0xb3d4d467
+	data4	0x5fa2a2fd,0x5fa2a2fd, 0x45afafea,0x45afafea
+	data4	0x239c9cbf,0x239c9cbf, 0x53a4a4f7,0x53a4a4f7
+	data4	0xe4727296,0xe4727296, 0x9bc0c05b,0x9bc0c05b
+	data4	0x75b7b7c2,0x75b7b7c2, 0xe1fdfd1c,0xe1fdfd1c
+	data4	0x3d9393ae,0x3d9393ae, 0x4c26266a,0x4c26266a
+	data4	0x6c36365a,0x6c36365a, 0x7e3f3f41,0x7e3f3f41
+	data4	0xf5f7f702,0xf5f7f702, 0x83cccc4f,0x83cccc4f
+	data4	0x6834345c,0x6834345c, 0x51a5a5f4,0x51a5a5f4
+	data4	0xd1e5e534,0xd1e5e534, 0xf9f1f108,0xf9f1f108
+	data4	0xe2717193,0xe2717193, 0xabd8d873,0xabd8d873
+	data4	0x62313153,0x62313153, 0x2a15153f,0x2a15153f
+	data4	0x0804040c,0x0804040c, 0x95c7c752,0x95c7c752
+	data4	0x46232365,0x46232365, 0x9dc3c35e,0x9dc3c35e
+	data4	0x30181828,0x30181828, 0x379696a1,0x379696a1
+	data4	0x0a05050f,0x0a05050f, 0x2f9a9ab5,0x2f9a9ab5
+	data4	0x0e070709,0x0e070709, 0x24121236,0x24121236
+	data4	0x1b80809b,0x1b80809b, 0xdfe2e23d,0xdfe2e23d
+	data4	0xcdebeb26,0xcdebeb26, 0x4e272769,0x4e272769
+	data4	0x7fb2b2cd,0x7fb2b2cd, 0xea75759f,0xea75759f
+	data4	0x1209091b,0x1209091b, 0x1d83839e,0x1d83839e
+	data4	0x582c2c74,0x582c2c74, 0x341a1a2e,0x341a1a2e
+	data4	0x361b1b2d,0x361b1b2d, 0xdc6e6eb2,0xdc6e6eb2
+	data4	0xb45a5aee,0xb45a5aee, 0x5ba0a0fb,0x5ba0a0fb
+	data4	0xa45252f6,0xa45252f6, 0x763b3b4d,0x763b3b4d
+	data4	0xb7d6d661,0xb7d6d661, 0x7db3b3ce,0x7db3b3ce
+	data4	0x5229297b,0x5229297b, 0xdde3e33e,0xdde3e33e
+	data4	0x5e2f2f71,0x5e2f2f71, 0x13848497,0x13848497
+	data4	0xa65353f5,0xa65353f5, 0xb9d1d168,0xb9d1d168
+	data4	0x00000000,0x00000000, 0xc1eded2c,0xc1eded2c
+	data4	0x40202060,0x40202060, 0xe3fcfc1f,0xe3fcfc1f
+	data4	0x79b1b1c8,0x79b1b1c8, 0xb65b5bed,0xb65b5bed
+	data4	0xd46a6abe,0xd46a6abe, 0x8dcbcb46,0x8dcbcb46
+	data4	0x67bebed9,0x67bebed9, 0x7239394b,0x7239394b
+	data4	0x944a4ade,0x944a4ade, 0x984c4cd4,0x984c4cd4
+	data4	0xb05858e8,0xb05858e8, 0x85cfcf4a,0x85cfcf4a
+	data4	0xbbd0d06b,0xbbd0d06b, 0xc5efef2a,0xc5efef2a
+	data4	0x4faaaae5,0x4faaaae5, 0xedfbfb16,0xedfbfb16
+	data4	0x864343c5,0x864343c5, 0x9a4d4dd7,0x9a4d4dd7
+	data4	0x66333355,0x66333355, 0x11858594,0x11858594
+	data4	0x8a4545cf,0x8a4545cf, 0xe9f9f910,0xe9f9f910
+	data4	0x04020206,0x04020206, 0xfe7f7f81,0xfe7f7f81
+	data4	0xa05050f0,0xa05050f0, 0x783c3c44,0x783c3c44
+	data4	0x259f9fba,0x259f9fba, 0x4ba8a8e3,0x4ba8a8e3
+	data4	0xa25151f3,0xa25151f3, 0x5da3a3fe,0x5da3a3fe
+	data4	0x804040c0,0x804040c0, 0x058f8f8a,0x058f8f8a
+	data4	0x3f9292ad,0x3f9292ad, 0x219d9dbc,0x219d9dbc
+	data4	0x70383848,0x70383848, 0xf1f5f504,0xf1f5f504
+	data4	0x63bcbcdf,0x63bcbcdf, 0x77b6b6c1,0x77b6b6c1
+	data4	0xafdada75,0xafdada75, 0x42212163,0x42212163
+	data4	0x20101030,0x20101030, 0xe5ffff1a,0xe5ffff1a
+	data4	0xfdf3f30e,0xfdf3f30e, 0xbfd2d26d,0xbfd2d26d
+	data4	0x81cdcd4c,0x81cdcd4c, 0x180c0c14,0x180c0c14
+	data4	0x26131335,0x26131335, 0xc3ecec2f,0xc3ecec2f
+	data4	0xbe5f5fe1,0xbe5f5fe1, 0x359797a2,0x359797a2
+	data4	0x884444cc,0x884444cc, 0x2e171739,0x2e171739
+	data4	0x93c4c457,0x93c4c457, 0x55a7a7f2,0x55a7a7f2
+	data4	0xfc7e7e82,0xfc7e7e82, 0x7a3d3d47,0x7a3d3d47
+	data4	0xc86464ac,0xc86464ac, 0xba5d5de7,0xba5d5de7
+	data4	0x3219192b,0x3219192b, 0xe6737395,0xe6737395
+	data4	0xc06060a0,0xc06060a0, 0x19818198,0x19818198
+	data4	0x9e4f4fd1,0x9e4f4fd1, 0xa3dcdc7f,0xa3dcdc7f
+	data4	0x44222266,0x44222266, 0x542a2a7e,0x542a2a7e
+	data4	0x3b9090ab,0x3b9090ab, 0x0b888883,0x0b888883
+	data4	0x8c4646ca,0x8c4646ca, 0xc7eeee29,0xc7eeee29
+	data4	0x6bb8b8d3,0x6bb8b8d3, 0x2814143c,0x2814143c
+	data4	0xa7dede79,0xa7dede79, 0xbc5e5ee2,0xbc5e5ee2
+	data4	0x160b0b1d,0x160b0b1d, 0xaddbdb76,0xaddbdb76
+	data4	0xdbe0e03b,0xdbe0e03b, 0x64323256,0x64323256
+	data4	0x743a3a4e,0x743a3a4e, 0x140a0a1e,0x140a0a1e
+	data4	0x924949db,0x924949db, 0x0c06060a,0x0c06060a
+	data4	0x4824246c,0x4824246c, 0xb85c5ce4,0xb85c5ce4
+	data4	0x9fc2c25d,0x9fc2c25d, 0xbdd3d36e,0xbdd3d36e
+	data4	0x43acacef,0x43acacef, 0xc46262a6,0xc46262a6
+	data4	0x399191a8,0x399191a8, 0x319595a4,0x319595a4
+	data4	0xd3e4e437,0xd3e4e437, 0xf279798b,0xf279798b
+	data4	0xd5e7e732,0xd5e7e732, 0x8bc8c843,0x8bc8c843
+	data4	0x6e373759,0x6e373759, 0xda6d6db7,0xda6d6db7
+	data4	0x018d8d8c,0x018d8d8c, 0xb1d5d564,0xb1d5d564
+	data4	0x9c4e4ed2,0x9c4e4ed2, 0x49a9a9e0,0x49a9a9e0
+	data4	0xd86c6cb4,0xd86c6cb4, 0xac5656fa,0xac5656fa
+	data4	0xf3f4f407,0xf3f4f407, 0xcfeaea25,0xcfeaea25
+	data4	0xca6565af,0xca6565af, 0xf47a7a8e,0xf47a7a8e
+	data4	0x47aeaee9,0x47aeaee9, 0x10080818,0x10080818
+	data4	0x6fbabad5,0x6fbabad5, 0xf0787888,0xf0787888
+	data4	0x4a25256f,0x4a25256f, 0x5c2e2e72,0x5c2e2e72
+	data4	0x381c1c24,0x381c1c24, 0x57a6a6f1,0x57a6a6f1
+	data4	0x73b4b4c7,0x73b4b4c7, 0x97c6c651,0x97c6c651
+	data4	0xcbe8e823,0xcbe8e823, 0xa1dddd7c,0xa1dddd7c
+	data4	0xe874749c,0xe874749c, 0x3e1f1f21,0x3e1f1f21
+	data4	0x964b4bdd,0x964b4bdd, 0x61bdbddc,0x61bdbddc
+	data4	0x0d8b8b86,0x0d8b8b86, 0x0f8a8a85,0x0f8a8a85
+	data4	0xe0707090,0xe0707090, 0x7c3e3e42,0x7c3e3e42
+	data4	0x71b5b5c4,0x71b5b5c4, 0xcc6666aa,0xcc6666aa
+	data4	0x904848d8,0x904848d8, 0x06030305,0x06030305
+	data4	0xf7f6f601,0xf7f6f601, 0x1c0e0e12,0x1c0e0e12
+	data4	0xc26161a3,0xc26161a3, 0x6a35355f,0x6a35355f
+	data4	0xae5757f9,0xae5757f9, 0x69b9b9d0,0x69b9b9d0
+	data4	0x17868691,0x17868691, 0x99c1c158,0x99c1c158
+	data4	0x3a1d1d27,0x3a1d1d27, 0x279e9eb9,0x279e9eb9
+	data4	0xd9e1e138,0xd9e1e138, 0xebf8f813,0xebf8f813
+	data4	0x2b9898b3,0x2b9898b3, 0x22111133,0x22111133
+	data4	0xd26969bb,0xd26969bb, 0xa9d9d970,0xa9d9d970
+	data4	0x078e8e89,0x078e8e89, 0x339494a7,0x339494a7
+	data4	0x2d9b9bb6,0x2d9b9bb6, 0x3c1e1e22,0x3c1e1e22
+	data4	0x15878792,0x15878792, 0xc9e9e920,0xc9e9e920
+	data4	0x87cece49,0x87cece49, 0xaa5555ff,0xaa5555ff
+	data4	0x50282878,0x50282878, 0xa5dfdf7a,0xa5dfdf7a
+	data4	0x038c8c8f,0x038c8c8f, 0x59a1a1f8,0x59a1a1f8
+	data4	0x09898980,0x09898980, 0x1a0d0d17,0x1a0d0d17
+	data4	0x65bfbfda,0x65bfbfda, 0xd7e6e631,0xd7e6e631
+	data4	0x844242c6,0x844242c6, 0xd06868b8,0xd06868b8
+	data4	0x824141c3,0x824141c3, 0x299999b0,0x299999b0
+	data4	0x5a2d2d77,0x5a2d2d77, 0x1e0f0f11,0x1e0f0f11
+	data4	0x7bb0b0cb,0x7bb0b0cb, 0xa85454fc,0xa85454fc
+	data4	0x6dbbbbd6,0x6dbbbbd6, 0x2c16163a,0x2c16163a
+// Te4:
+	data1	0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
+	data1	0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
+	data1	0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
+	data1	0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
+	data1	0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
+	data1	0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
+	data1	0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
+	data1	0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
+	data1	0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
+	data1	0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
+	data1	0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
+	data1	0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
+	data1	0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
+	data1	0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
+	data1	0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
+	data1	0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
+	data1	0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
+	data1	0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
+	data1	0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
+	data1	0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
+	data1	0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
+	data1	0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
+	data1	0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
+	data1	0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
+	data1	0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
+	data1	0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
+	data1	0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
+	data1	0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
+	data1	0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
+	data1	0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
+	data1	0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
+	data1	0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
+.size	AES_Te#,2048+256	// HP-UX assembler fails to ".-AES_Te#"
+
+.align	64
+.global	AES_Td#
+.type	AES_Td#,@object
+AES_Td:	data4	0x51f4a750,0x51f4a750, 0x7e416553,0x7e416553
+	data4	0x1a17a4c3,0x1a17a4c3, 0x3a275e96,0x3a275e96
+	data4	0x3bab6bcb,0x3bab6bcb, 0x1f9d45f1,0x1f9d45f1
+	data4	0xacfa58ab,0xacfa58ab, 0x4be30393,0x4be30393
+	data4	0x2030fa55,0x2030fa55, 0xad766df6,0xad766df6
+	data4	0x88cc7691,0x88cc7691, 0xf5024c25,0xf5024c25
+	data4	0x4fe5d7fc,0x4fe5d7fc, 0xc52acbd7,0xc52acbd7
+	data4	0x26354480,0x26354480, 0xb562a38f,0xb562a38f
+	data4	0xdeb15a49,0xdeb15a49, 0x25ba1b67,0x25ba1b67
+	data4	0x45ea0e98,0x45ea0e98, 0x5dfec0e1,0x5dfec0e1
+	data4	0xc32f7502,0xc32f7502, 0x814cf012,0x814cf012
+	data4	0x8d4697a3,0x8d4697a3, 0x6bd3f9c6,0x6bd3f9c6
+	data4	0x038f5fe7,0x038f5fe7, 0x15929c95,0x15929c95
+	data4	0xbf6d7aeb,0xbf6d7aeb, 0x955259da,0x955259da
+	data4	0xd4be832d,0xd4be832d, 0x587421d3,0x587421d3
+	data4	0x49e06929,0x49e06929, 0x8ec9c844,0x8ec9c844
+	data4	0x75c2896a,0x75c2896a, 0xf48e7978,0xf48e7978
+	data4	0x99583e6b,0x99583e6b, 0x27b971dd,0x27b971dd
+	data4	0xbee14fb6,0xbee14fb6, 0xf088ad17,0xf088ad17
+	data4	0xc920ac66,0xc920ac66, 0x7dce3ab4,0x7dce3ab4
+	data4	0x63df4a18,0x63df4a18, 0xe51a3182,0xe51a3182
+	data4	0x97513360,0x97513360, 0x62537f45,0x62537f45
+	data4	0xb16477e0,0xb16477e0, 0xbb6bae84,0xbb6bae84
+	data4	0xfe81a01c,0xfe81a01c, 0xf9082b94,0xf9082b94
+	data4	0x70486858,0x70486858, 0x8f45fd19,0x8f45fd19
+	data4	0x94de6c87,0x94de6c87, 0x527bf8b7,0x527bf8b7
+	data4	0xab73d323,0xab73d323, 0x724b02e2,0x724b02e2
+	data4	0xe31f8f57,0xe31f8f57, 0x6655ab2a,0x6655ab2a
+	data4	0xb2eb2807,0xb2eb2807, 0x2fb5c203,0x2fb5c203
+	data4	0x86c57b9a,0x86c57b9a, 0xd33708a5,0xd33708a5
+	data4	0x302887f2,0x302887f2, 0x23bfa5b2,0x23bfa5b2
+	data4	0x02036aba,0x02036aba, 0xed16825c,0xed16825c
+	data4	0x8acf1c2b,0x8acf1c2b, 0xa779b492,0xa779b492
+	data4	0xf307f2f0,0xf307f2f0, 0x4e69e2a1,0x4e69e2a1
+	data4	0x65daf4cd,0x65daf4cd, 0x0605bed5,0x0605bed5
+	data4	0xd134621f,0xd134621f, 0xc4a6fe8a,0xc4a6fe8a
+	data4	0x342e539d,0x342e539d, 0xa2f355a0,0xa2f355a0
+	data4	0x058ae132,0x058ae132, 0xa4f6eb75,0xa4f6eb75
+	data4	0x0b83ec39,0x0b83ec39, 0x4060efaa,0x4060efaa
+	data4	0x5e719f06,0x5e719f06, 0xbd6e1051,0xbd6e1051
+	data4	0x3e218af9,0x3e218af9, 0x96dd063d,0x96dd063d
+	data4	0xdd3e05ae,0xdd3e05ae, 0x4de6bd46,0x4de6bd46
+	data4	0x91548db5,0x91548db5, 0x71c45d05,0x71c45d05
+	data4	0x0406d46f,0x0406d46f, 0x605015ff,0x605015ff
+	data4	0x1998fb24,0x1998fb24, 0xd6bde997,0xd6bde997
+	data4	0x894043cc,0x894043cc, 0x67d99e77,0x67d99e77
+	data4	0xb0e842bd,0xb0e842bd, 0x07898b88,0x07898b88
+	data4	0xe7195b38,0xe7195b38, 0x79c8eedb,0x79c8eedb
+	data4	0xa17c0a47,0xa17c0a47, 0x7c420fe9,0x7c420fe9
+	data4	0xf8841ec9,0xf8841ec9, 0x00000000,0x00000000
+	data4	0x09808683,0x09808683, 0x322bed48,0x322bed48
+	data4	0x1e1170ac,0x1e1170ac, 0x6c5a724e,0x6c5a724e
+	data4	0xfd0efffb,0xfd0efffb, 0x0f853856,0x0f853856
+	data4	0x3daed51e,0x3daed51e, 0x362d3927,0x362d3927
+	data4	0x0a0fd964,0x0a0fd964, 0x685ca621,0x685ca621
+	data4	0x9b5b54d1,0x9b5b54d1, 0x24362e3a,0x24362e3a
+	data4	0x0c0a67b1,0x0c0a67b1, 0x9357e70f,0x9357e70f
+	data4	0xb4ee96d2,0xb4ee96d2, 0x1b9b919e,0x1b9b919e
+	data4	0x80c0c54f,0x80c0c54f, 0x61dc20a2,0x61dc20a2
+	data4	0x5a774b69,0x5a774b69, 0x1c121a16,0x1c121a16
+	data4	0xe293ba0a,0xe293ba0a, 0xc0a02ae5,0xc0a02ae5
+	data4	0x3c22e043,0x3c22e043, 0x121b171d,0x121b171d
+	data4	0x0e090d0b,0x0e090d0b, 0xf28bc7ad,0xf28bc7ad
+	data4	0x2db6a8b9,0x2db6a8b9, 0x141ea9c8,0x141ea9c8
+	data4	0x57f11985,0x57f11985, 0xaf75074c,0xaf75074c
+	data4	0xee99ddbb,0xee99ddbb, 0xa37f60fd,0xa37f60fd
+	data4	0xf701269f,0xf701269f, 0x5c72f5bc,0x5c72f5bc
+	data4	0x44663bc5,0x44663bc5, 0x5bfb7e34,0x5bfb7e34
+	data4	0x8b432976,0x8b432976, 0xcb23c6dc,0xcb23c6dc
+	data4	0xb6edfc68,0xb6edfc68, 0xb8e4f163,0xb8e4f163
+	data4	0xd731dcca,0xd731dcca, 0x42638510,0x42638510
+	data4	0x13972240,0x13972240, 0x84c61120,0x84c61120
+	data4	0x854a247d,0x854a247d, 0xd2bb3df8,0xd2bb3df8
+	data4	0xaef93211,0xaef93211, 0xc729a16d,0xc729a16d
+	data4	0x1d9e2f4b,0x1d9e2f4b, 0xdcb230f3,0xdcb230f3
+	data4	0x0d8652ec,0x0d8652ec, 0x77c1e3d0,0x77c1e3d0
+	data4	0x2bb3166c,0x2bb3166c, 0xa970b999,0xa970b999
+	data4	0x119448fa,0x119448fa, 0x47e96422,0x47e96422
+	data4	0xa8fc8cc4,0xa8fc8cc4, 0xa0f03f1a,0xa0f03f1a
+	data4	0x567d2cd8,0x567d2cd8, 0x223390ef,0x223390ef
+	data4	0x87494ec7,0x87494ec7, 0xd938d1c1,0xd938d1c1
+	data4	0x8ccaa2fe,0x8ccaa2fe, 0x98d40b36,0x98d40b36
+	data4	0xa6f581cf,0xa6f581cf, 0xa57ade28,0xa57ade28
+	data4	0xdab78e26,0xdab78e26, 0x3fadbfa4,0x3fadbfa4
+	data4	0x2c3a9de4,0x2c3a9de4, 0x5078920d,0x5078920d
+	data4	0x6a5fcc9b,0x6a5fcc9b, 0x547e4662,0x547e4662
+	data4	0xf68d13c2,0xf68d13c2, 0x90d8b8e8,0x90d8b8e8
+	data4	0x2e39f75e,0x2e39f75e, 0x82c3aff5,0x82c3aff5
+	data4	0x9f5d80be,0x9f5d80be, 0x69d0937c,0x69d0937c
+	data4	0x6fd52da9,0x6fd52da9, 0xcf2512b3,0xcf2512b3
+	data4	0xc8ac993b,0xc8ac993b, 0x10187da7,0x10187da7
+	data4	0xe89c636e,0xe89c636e, 0xdb3bbb7b,0xdb3bbb7b
+	data4	0xcd267809,0xcd267809, 0x6e5918f4,0x6e5918f4
+	data4	0xec9ab701,0xec9ab701, 0x834f9aa8,0x834f9aa8
+	data4	0xe6956e65,0xe6956e65, 0xaaffe67e,0xaaffe67e
+	data4	0x21bccf08,0x21bccf08, 0xef15e8e6,0xef15e8e6
+	data4	0xbae79bd9,0xbae79bd9, 0x4a6f36ce,0x4a6f36ce
+	data4	0xea9f09d4,0xea9f09d4, 0x29b07cd6,0x29b07cd6
+	data4	0x31a4b2af,0x31a4b2af, 0x2a3f2331,0x2a3f2331
+	data4	0xc6a59430,0xc6a59430, 0x35a266c0,0x35a266c0
+	data4	0x744ebc37,0x744ebc37, 0xfc82caa6,0xfc82caa6
+	data4	0xe090d0b0,0xe090d0b0, 0x33a7d815,0x33a7d815
+	data4	0xf104984a,0xf104984a, 0x41ecdaf7,0x41ecdaf7
+	data4	0x7fcd500e,0x7fcd500e, 0x1791f62f,0x1791f62f
+	data4	0x764dd68d,0x764dd68d, 0x43efb04d,0x43efb04d
+	data4	0xccaa4d54,0xccaa4d54, 0xe49604df,0xe49604df
+	data4	0x9ed1b5e3,0x9ed1b5e3, 0x4c6a881b,0x4c6a881b
+	data4	0xc12c1fb8,0xc12c1fb8, 0x4665517f,0x4665517f
+	data4	0x9d5eea04,0x9d5eea04, 0x018c355d,0x018c355d
+	data4	0xfa877473,0xfa877473, 0xfb0b412e,0xfb0b412e
+	data4	0xb3671d5a,0xb3671d5a, 0x92dbd252,0x92dbd252
+	data4	0xe9105633,0xe9105633, 0x6dd64713,0x6dd64713
+	data4	0x9ad7618c,0x9ad7618c, 0x37a10c7a,0x37a10c7a
+	data4	0x59f8148e,0x59f8148e, 0xeb133c89,0xeb133c89
+	data4	0xcea927ee,0xcea927ee, 0xb761c935,0xb761c935
+	data4	0xe11ce5ed,0xe11ce5ed, 0x7a47b13c,0x7a47b13c
+	data4	0x9cd2df59,0x9cd2df59, 0x55f2733f,0x55f2733f
+	data4	0x1814ce79,0x1814ce79, 0x73c737bf,0x73c737bf
+	data4	0x53f7cdea,0x53f7cdea, 0x5ffdaa5b,0x5ffdaa5b
+	data4	0xdf3d6f14,0xdf3d6f14, 0x7844db86,0x7844db86
+	data4	0xcaaff381,0xcaaff381, 0xb968c43e,0xb968c43e
+	data4	0x3824342c,0x3824342c, 0xc2a3405f,0xc2a3405f
+	data4	0x161dc372,0x161dc372, 0xbce2250c,0xbce2250c
+	data4	0x283c498b,0x283c498b, 0xff0d9541,0xff0d9541
+	data4	0x39a80171,0x39a80171, 0x080cb3de,0x080cb3de
+	data4	0xd8b4e49c,0xd8b4e49c, 0x6456c190,0x6456c190
+	data4	0x7bcb8461,0x7bcb8461, 0xd532b670,0xd532b670
+	data4	0x486c5c74,0x486c5c74, 0xd0b85742,0xd0b85742
+// Td4:
+	data1	0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
+	data1	0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
+	data1	0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
+	data1	0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
+	data1	0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
+	data1	0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
+	data1	0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
+	data1	0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
+	data1	0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
+	data1	0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
+	data1	0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
+	data1	0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
+	data1	0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
+	data1	0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
+	data1	0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
+	data1	0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
+	data1	0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
+	data1	0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
+	data1	0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
+	data1	0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
+	data1	0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
+	data1	0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
+	data1	0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
+	data1	0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
+	data1	0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
+	data1	0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
+	data1	0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
+	data1	0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
+	data1	0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
+	data1	0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
+	data1	0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
+	data1	0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
+.size	AES_Td#,2048+256	// HP-UX assembler fails to ".-AES_Td#"
diff --git a/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aes-mips.pl b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aes-mips.pl
new file mode 100644
index 0000000..b5601e9
--- /dev/null
+++ b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aes-mips.pl
@@ -0,0 +1,2170 @@
+#! /usr/bin/env perl
+# Copyright 2010-2020 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License").  You may not use
+# this file except in compliance with the License.  You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# AES for MIPS
+
+# October 2010
+#
+# Code uses 1K[+256B] S-box and on single-issue core [such as R5000]
+# spends ~68 cycles per byte processed with 128-bit key. This is ~16%
+# faster than gcc-generated code, which is not very impressive. But
+# recall that compressed S-box requires extra processing, namely
+# additional rotations. Rotations are implemented with lwl/lwr pairs,
+# which is normally used for loading unaligned data. Another cool
+# thing about this module is its endian neutrality, which means that
+# it processes data without ever changing byte order...
+
+# September 2012
+#
+# Add MIPS32R2 (~10% less instructions) and SmartMIPS ASE (further
+# ~25% less instructions) code. Note that there is no run-time switch,
+# instead, code path is chosen upon pre-process time, pass -mips32r2
+# or/and -msmartmips.
+
+######################################################################
+# There is a number of MIPS ABI in use, O32 and N32/64 are most
+# widely used. Then there is a new contender: NUBI. It appears that if
+# one picks the latter, it's possible to arrange code in ABI neutral
+# manner. Therefore let's stick to NUBI register layout:
+#
+($zero,$at,$t0,$t1,$t2)=map("\$$_",(0..2,24,25));
+($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$$_",(4..11));
+($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7,$s8,$s9,$s10,$s11)=map("\$$_",(12..23));
+($gp,$tp,$sp,$fp,$ra)=map("\$$_",(3,28..31));
+#
+# The return value is placed in $a0. Following coding rules facilitate
+# interoperability:
+#
+# - never ever touch $tp, "thread pointer", former $gp;
+# - copy return value to $t0, former $v0 [or to $a0 if you're adapting
+#   old code];
+# - on O32 populate $a4-$a7 with 'lw $aN,4*N($sp)' if necessary;
+#
+# For reference here is register layout for N32/64 MIPS ABIs:
+#
+# ($zero,$at,$v0,$v1)=map("\$$_",(0..3));
+# ($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$$_",(4..11));
+# ($t0,$t1,$t2,$t3,$t8,$t9)=map("\$$_",(12..15,24,25));
+# ($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7)=map("\$$_",(16..23));
+# ($gp,$sp,$fp,$ra)=map("\$$_",(28..31));
+#
+$flavour = shift || "o32"; # supported flavours are o32,n32,64,nubi32,nubi64
+
+if ($flavour =~ /64|n32/i) {
+	$PTR_LA="dla";
+	$PTR_ADD="daddu";	# incidentally works even on n32
+	$PTR_SUB="dsubu";	# incidentally works even on n32
+	$PTR_INS="dins";
+	$REG_S="sd";
+	$REG_L="ld";
+	$PTR_SLL="dsll";	# incidentally works even on n32
+	$SZREG=8;
+} else {
+	$PTR_LA="la";
+	$PTR_ADD="addu";
+	$PTR_SUB="subu";
+	$PTR_INS="ins";
+	$REG_S="sw";
+	$REG_L="lw";
+	$PTR_SLL="sll";
+	$SZREG=4;
+}
+$pf = ($flavour =~ /nubi/i) ? $t0 : $t2;
+#
+# <appro@openssl.org>
+#
+######################################################################
+
+$big_endian=(`echo MIPSEB | $ENV{CC} -E -`=~/MIPSEB/)?0:1 if ($ENV{CC});
+
+for (@ARGV) {	$output=$_ if (/\w[\w\-]*\.\w+$/);	}
+open STDOUT,">$output";
+
+if (!defined($big_endian))
+{    $big_endian=(unpack('L',pack('N',1))==1);   }
+
+while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+my ($MSB,$LSB)=(0,3);	# automatically converted to little-endian
+
+$code.=<<___;
+#include "mips_arch.h"
+
+.text
+#if !defined(__mips_eabi) && (!defined(__vxworks) || defined(__pic__))
+.option	pic2
+#endif
+.set	noat
+___
+
+{{{
+my $FRAMESIZE=16*$SZREG;
+my $SAVED_REGS_MASK = ($flavour =~ /nubi/i) ? "0xc0fff008" : "0xc0ff0000";
+
+my ($inp,$out,$key,$Tbl,$s0,$s1,$s2,$s3)=($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7);
+my ($i0,$i1,$i2,$i3)=($at,$t0,$t1,$t2);
+my ($t0,$t1,$t2,$t3,$t4,$t5,$t6,$t7,$t8,$t9,$t10,$t11) = map("\$$_",(12..23));
+my ($key0,$cnt)=($gp,$fp);
+
+# instruction ordering is "stolen" from output from MIPSpro assembler
+# invoked with -mips3 -O3 arguments...
+$code.=<<___;
+.align	5
+.ent	_mips_AES_encrypt
+_mips_AES_encrypt:
+	.frame	$sp,0,$ra
+	.set	reorder
+	lw	$t0,0($key)
+	lw	$t1,4($key)
+	lw	$t2,8($key)
+	lw	$t3,12($key)
+	lw	$cnt,240($key)
+	$PTR_ADD $key0,$key,16
+
+	xor	$s0,$t0
+	xor	$s1,$t1
+	xor	$s2,$t2
+	xor	$s3,$t3
+
+	subu	$cnt,1
+#if defined(__mips_smartmips)
+	ext	$i0,$s1,16,8
+.Loop_enc:
+	ext	$i1,$s2,16,8
+	ext	$i2,$s3,16,8
+	ext	$i3,$s0,16,8
+	lwxs	$t0,$i0($Tbl)		# Te1[s1>>16]
+	ext	$i0,$s2,8,8
+	lwxs	$t1,$i1($Tbl)		# Te1[s2>>16]
+	ext	$i1,$s3,8,8
+	lwxs	$t2,$i2($Tbl)		# Te1[s3>>16]
+	ext	$i2,$s0,8,8
+	lwxs	$t3,$i3($Tbl)		# Te1[s0>>16]
+	ext	$i3,$s1,8,8
+
+	lwxs	$t4,$i0($Tbl)		# Te2[s2>>8]
+	ext	$i0,$s3,0,8
+	lwxs	$t5,$i1($Tbl)		# Te2[s3>>8]
+	ext	$i1,$s0,0,8
+	lwxs	$t6,$i2($Tbl)		# Te2[s0>>8]
+	ext	$i2,$s1,0,8
+	lwxs	$t7,$i3($Tbl)		# Te2[s1>>8]
+	ext	$i3,$s2,0,8
+
+	lwxs	$t8,$i0($Tbl)		# Te3[s3]
+	ext	$i0,$s0,24,8
+	lwxs	$t9,$i1($Tbl)		# Te3[s0]
+	ext	$i1,$s1,24,8
+	lwxs	$t10,$i2($Tbl)		# Te3[s1]
+	ext	$i2,$s2,24,8
+	lwxs	$t11,$i3($Tbl)		# Te3[s2]
+	ext	$i3,$s3,24,8
+
+	rotr	$t0,$t0,8
+	rotr	$t1,$t1,8
+	rotr	$t2,$t2,8
+	rotr	$t3,$t3,8
+
+	rotr	$t4,$t4,16
+	rotr	$t5,$t5,16
+	rotr	$t6,$t6,16
+	rotr	$t7,$t7,16
+
+	xor	$t0,$t4
+	lwxs	$t4,$i0($Tbl)		# Te0[s0>>24]
+	xor	$t1,$t5
+	lwxs	$t5,$i1($Tbl)		# Te0[s1>>24]
+	xor	$t2,$t6
+	lwxs	$t6,$i2($Tbl)		# Te0[s2>>24]
+	xor	$t3,$t7
+	lwxs	$t7,$i3($Tbl)		# Te0[s3>>24]
+
+	rotr	$t8,$t8,24
+	lw	$s0,0($key0)
+	rotr	$t9,$t9,24
+	lw	$s1,4($key0)
+	rotr	$t10,$t10,24
+	lw	$s2,8($key0)
+	rotr	$t11,$t11,24
+	lw	$s3,12($key0)
+
+	xor	$t0,$t8
+	xor	$t1,$t9
+	xor	$t2,$t10
+	xor	$t3,$t11
+
+	xor	$t0,$t4
+	xor	$t1,$t5
+	xor	$t2,$t6
+	xor	$t3,$t7
+
+	subu	$cnt,1
+	$PTR_ADD $key0,16
+	xor	$s0,$t0
+	xor	$s1,$t1
+	xor	$s2,$t2
+	xor	$s3,$t3
+	.set	noreorder
+	bnez	$cnt,.Loop_enc
+	ext	$i0,$s1,16,8
+
+	_xtr	$i0,$s1,16-2
+#else
+	_xtr	$i0,$s1,16-2
+.Loop_enc:
+	_xtr	$i1,$s2,16-2
+	_xtr	$i2,$s3,16-2
+	_xtr	$i3,$s0,16-2
+	and	$i0,0x3fc
+	and	$i1,0x3fc
+	and	$i2,0x3fc
+	and	$i3,0x3fc
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2)
+	lw	$t0,0($i0)		# Te1[s1>>16]
+	_xtr	$i0,$s2,8-2
+	lw	$t1,0($i1)		# Te1[s2>>16]
+	_xtr	$i1,$s3,8-2
+	lw	$t2,0($i2)		# Te1[s3>>16]
+	_xtr	$i2,$s0,8-2
+	lw	$t3,0($i3)		# Te1[s0>>16]
+	_xtr	$i3,$s1,8-2
+#else
+	lwl	$t0,3($i0)		# Te1[s1>>16]
+	lwl	$t1,3($i1)		# Te1[s2>>16]
+	lwl	$t2,3($i2)		# Te1[s3>>16]
+	lwl	$t3,3($i3)		# Te1[s0>>16]
+	lwr	$t0,2($i0)		# Te1[s1>>16]
+	_xtr	$i0,$s2,8-2
+	lwr	$t1,2($i1)		# Te1[s2>>16]
+	_xtr	$i1,$s3,8-2
+	lwr	$t2,2($i2)		# Te1[s3>>16]
+	_xtr	$i2,$s0,8-2
+	lwr	$t3,2($i3)		# Te1[s0>>16]
+	_xtr	$i3,$s1,8-2
+#endif
+	and	$i0,0x3fc
+	and	$i1,0x3fc
+	and	$i2,0x3fc
+	and	$i3,0x3fc
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2)
+	rotr	$t0,$t0,8
+	rotr	$t1,$t1,8
+	rotr	$t2,$t2,8
+	rotr	$t3,$t3,8
+# if defined(_MIPSEL)
+	lw	$t4,0($i0)		# Te2[s2>>8]
+	_xtr	$i0,$s3,0-2
+	lw	$t5,0($i1)		# Te2[s3>>8]
+	_xtr	$i1,$s0,0-2
+	lw	$t6,0($i2)		# Te2[s0>>8]
+	_xtr	$i2,$s1,0-2
+	lw	$t7,0($i3)		# Te2[s1>>8]
+	_xtr	$i3,$s2,0-2
+
+	and	$i0,0x3fc
+	and	$i1,0x3fc
+	and	$i2,0x3fc
+	and	$i3,0x3fc
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+	lw	$t8,0($i0)		# Te3[s3]
+	$PTR_INS $i0,$s0,2,8
+	lw	$t9,0($i1)		# Te3[s0]
+	$PTR_INS $i1,$s1,2,8
+	lw	$t10,0($i2)		# Te3[s1]
+	$PTR_INS $i2,$s2,2,8
+	lw	$t11,0($i3)		# Te3[s2]
+	$PTR_INS $i3,$s3,2,8
+# else
+	lw	$t4,0($i0)		# Te2[s2>>8]
+	$PTR_INS $i0,$s3,2,8
+	lw	$t5,0($i1)		# Te2[s3>>8]
+	$PTR_INS $i1,$s0,2,8
+	lw	$t6,0($i2)		# Te2[s0>>8]
+	$PTR_INS $i2,$s1,2,8
+	lw	$t7,0($i3)		# Te2[s1>>8]
+	$PTR_INS $i3,$s2,2,8
+
+	lw	$t8,0($i0)		# Te3[s3]
+	_xtr	$i0,$s0,24-2
+	lw	$t9,0($i1)		# Te3[s0]
+	_xtr	$i1,$s1,24-2
+	lw	$t10,0($i2)		# Te3[s1]
+	_xtr	$i2,$s2,24-2
+	lw	$t11,0($i3)		# Te3[s2]
+	_xtr	$i3,$s3,24-2
+
+	and	$i0,0x3fc
+	and	$i1,0x3fc
+	and	$i2,0x3fc
+	and	$i3,0x3fc
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+# endif
+	rotr	$t4,$t4,16
+	rotr	$t5,$t5,16
+	rotr	$t6,$t6,16
+	rotr	$t7,$t7,16
+
+	rotr	$t8,$t8,24
+	rotr	$t9,$t9,24
+	rotr	$t10,$t10,24
+	rotr	$t11,$t11,24
+#else
+	lwl	$t4,2($i0)		# Te2[s2>>8]
+	lwl	$t5,2($i1)		# Te2[s3>>8]
+	lwl	$t6,2($i2)		# Te2[s0>>8]
+	lwl	$t7,2($i3)		# Te2[s1>>8]
+	lwr	$t4,1($i0)		# Te2[s2>>8]
+	_xtr	$i0,$s3,0-2
+	lwr	$t5,1($i1)		# Te2[s3>>8]
+	_xtr	$i1,$s0,0-2
+	lwr	$t6,1($i2)		# Te2[s0>>8]
+	_xtr	$i2,$s1,0-2
+	lwr	$t7,1($i3)		# Te2[s1>>8]
+	_xtr	$i3,$s2,0-2
+
+	and	$i0,0x3fc
+	and	$i1,0x3fc
+	and	$i2,0x3fc
+	and	$i3,0x3fc
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+	lwl	$t8,1($i0)		# Te3[s3]
+	lwl	$t9,1($i1)		# Te3[s0]
+	lwl	$t10,1($i2)		# Te3[s1]
+	lwl	$t11,1($i3)		# Te3[s2]
+	lwr	$t8,0($i0)		# Te3[s3]
+	_xtr	$i0,$s0,24-2
+	lwr	$t9,0($i1)		# Te3[s0]
+	_xtr	$i1,$s1,24-2
+	lwr	$t10,0($i2)		# Te3[s1]
+	_xtr	$i2,$s2,24-2
+	lwr	$t11,0($i3)		# Te3[s2]
+	_xtr	$i3,$s3,24-2
+
+	and	$i0,0x3fc
+	and	$i1,0x3fc
+	and	$i2,0x3fc
+	and	$i3,0x3fc
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+#endif
+	xor	$t0,$t4
+	lw	$t4,0($i0)		# Te0[s0>>24]
+	xor	$t1,$t5
+	lw	$t5,0($i1)		# Te0[s1>>24]
+	xor	$t2,$t6
+	lw	$t6,0($i2)		# Te0[s2>>24]
+	xor	$t3,$t7
+	lw	$t7,0($i3)		# Te0[s3>>24]
+
+	xor	$t0,$t8
+	lw	$s0,0($key0)
+	xor	$t1,$t9
+	lw	$s1,4($key0)
+	xor	$t2,$t10
+	lw	$s2,8($key0)
+	xor	$t3,$t11
+	lw	$s3,12($key0)
+
+	xor	$t0,$t4
+	xor	$t1,$t5
+	xor	$t2,$t6
+	xor	$t3,$t7
+
+	subu	$cnt,1
+	$PTR_ADD $key0,16
+	xor	$s0,$t0
+	xor	$s1,$t1
+	xor	$s2,$t2
+	xor	$s3,$t3
+	.set	noreorder
+	bnez	$cnt,.Loop_enc
+	_xtr	$i0,$s1,16-2
+#endif
+
+	.set	reorder
+	_xtr	$i1,$s2,16-2
+	_xtr	$i2,$s3,16-2
+	_xtr	$i3,$s0,16-2
+	and	$i0,0x3fc
+	and	$i1,0x3fc
+	and	$i2,0x3fc
+	and	$i3,0x3fc
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+	lbu	$t0,2($i0)		# Te4[s1>>16]
+	_xtr	$i0,$s2,8-2
+	lbu	$t1,2($i1)		# Te4[s2>>16]
+	_xtr	$i1,$s3,8-2
+	lbu	$t2,2($i2)		# Te4[s3>>16]
+	_xtr	$i2,$s0,8-2
+	lbu	$t3,2($i3)		# Te4[s0>>16]
+	_xtr	$i3,$s1,8-2
+
+	and	$i0,0x3fc
+	and	$i1,0x3fc
+	and	$i2,0x3fc
+	and	$i3,0x3fc
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2)
+# if defined(_MIPSEL)
+	lbu	$t4,2($i0)		# Te4[s2>>8]
+	$PTR_INS $i0,$s0,2,8
+	lbu	$t5,2($i1)		# Te4[s3>>8]
+	$PTR_INS $i1,$s1,2,8
+	lbu	$t6,2($i2)		# Te4[s0>>8]
+	$PTR_INS $i2,$s2,2,8
+	lbu	$t7,2($i3)		# Te4[s1>>8]
+	$PTR_INS $i3,$s3,2,8
+
+	lbu	$t8,2($i0)		# Te4[s0>>24]
+	_xtr	$i0,$s3,0-2
+	lbu	$t9,2($i1)		# Te4[s1>>24]
+	_xtr	$i1,$s0,0-2
+	lbu	$t10,2($i2)		# Te4[s2>>24]
+	_xtr	$i2,$s1,0-2
+	lbu	$t11,2($i3)		# Te4[s3>>24]
+	_xtr	$i3,$s2,0-2
+
+	and	$i0,0x3fc
+	and	$i1,0x3fc
+	and	$i2,0x3fc
+	and	$i3,0x3fc
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+# else
+	lbu	$t4,2($i0)		# Te4[s2>>8]
+	_xtr	$i0,$s0,24-2
+	lbu	$t5,2($i1)		# Te4[s3>>8]
+	_xtr	$i1,$s1,24-2
+	lbu	$t6,2($i2)		# Te4[s0>>8]
+	_xtr	$i2,$s2,24-2
+	lbu	$t7,2($i3)		# Te4[s1>>8]
+	_xtr	$i3,$s3,24-2
+
+	and	$i0,0x3fc
+	and	$i1,0x3fc
+	and	$i2,0x3fc
+	and	$i3,0x3fc
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+	lbu	$t8,2($i0)		# Te4[s0>>24]
+	$PTR_INS $i0,$s3,2,8
+	lbu	$t9,2($i1)		# Te4[s1>>24]
+	$PTR_INS $i1,$s0,2,8
+	lbu	$t10,2($i2)		# Te4[s2>>24]
+	$PTR_INS $i2,$s1,2,8
+	lbu	$t11,2($i3)		# Te4[s3>>24]
+	$PTR_INS $i3,$s2,2,8
+# endif
+	_ins	$t0,16
+	_ins	$t1,16
+	_ins	$t2,16
+	_ins	$t3,16
+
+	_ins2	$t0,$t4,8
+	lbu	$t4,2($i0)		# Te4[s3]
+	_ins2	$t1,$t5,8
+	lbu	$t5,2($i1)		# Te4[s0]
+	_ins2	$t2,$t6,8
+	lbu	$t6,2($i2)		# Te4[s1]
+	_ins2	$t3,$t7,8
+	lbu	$t7,2($i3)		# Te4[s2]
+
+	_ins2	$t0,$t8,24
+	lw	$s0,0($key0)
+	_ins2	$t1,$t9,24
+	lw	$s1,4($key0)
+	_ins2	$t2,$t10,24
+	lw	$s2,8($key0)
+	_ins2	$t3,$t11,24
+	lw	$s3,12($key0)
+
+	_ins2	$t0,$t4,0
+	_ins2	$t1,$t5,0
+	_ins2	$t2,$t6,0
+	_ins2	$t3,$t7,0
+#else
+	lbu	$t4,2($i0)		# Te4[s2>>8]
+	_xtr	$i0,$s0,24-2
+	lbu	$t5,2($i1)		# Te4[s3>>8]
+	_xtr	$i1,$s1,24-2
+	lbu	$t6,2($i2)		# Te4[s0>>8]
+	_xtr	$i2,$s2,24-2
+	lbu	$t7,2($i3)		# Te4[s1>>8]
+	_xtr	$i3,$s3,24-2
+
+	and	$i0,0x3fc
+	and	$i1,0x3fc
+	and	$i2,0x3fc
+	and	$i3,0x3fc
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+	lbu	$t8,2($i0)		# Te4[s0>>24]
+	_xtr	$i0,$s3,0-2
+	lbu	$t9,2($i1)		# Te4[s1>>24]
+	_xtr	$i1,$s0,0-2
+	lbu	$t10,2($i2)		# Te4[s2>>24]
+	_xtr	$i2,$s1,0-2
+	lbu	$t11,2($i3)		# Te4[s3>>24]
+	_xtr	$i3,$s2,0-2
+
+	and	$i0,0x3fc
+	and	$i1,0x3fc
+	and	$i2,0x3fc
+	and	$i3,0x3fc
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+
+	_ins	$t0,16
+	_ins	$t1,16
+	_ins	$t2,16
+	_ins	$t3,16
+
+	_ins	$t4,8
+	_ins	$t5,8
+	_ins	$t6,8
+	_ins	$t7,8
+
+	xor	$t0,$t4
+	lbu	$t4,2($i0)		# Te4[s3]
+	xor	$t1,$t5
+	lbu	$t5,2($i1)		# Te4[s0]
+	xor	$t2,$t6
+	lbu	$t6,2($i2)		# Te4[s1]
+	xor	$t3,$t7
+	lbu	$t7,2($i3)		# Te4[s2]
+
+	_ins	$t8,24
+	lw	$s0,0($key0)
+	_ins	$t9,24
+	lw	$s1,4($key0)
+	_ins	$t10,24
+	lw	$s2,8($key0)
+	_ins	$t11,24
+	lw	$s3,12($key0)
+
+	xor	$t0,$t8
+	xor	$t1,$t9
+	xor	$t2,$t10
+	xor	$t3,$t11
+
+	_ins	$t4,0
+	_ins	$t5,0
+	_ins	$t6,0
+	_ins	$t7,0
+
+	xor	$t0,$t4
+	xor	$t1,$t5
+	xor	$t2,$t6
+	xor	$t3,$t7
+#endif
+	xor	$s0,$t0
+	xor	$s1,$t1
+	xor	$s2,$t2
+	xor	$s3,$t3
+
+	jr	$ra
+.end	_mips_AES_encrypt
+
+.align	5
+.globl	AES_encrypt
+.ent	AES_encrypt
+AES_encrypt:
+	.frame	$sp,$FRAMESIZE,$ra
+	.mask	$SAVED_REGS_MASK,-$SZREG
+	.set	noreorder
+___
+$code.=<<___ if ($flavour =~ /o32/i);	# o32 PIC-ification
+	.cpload	$pf
+___
+$code.=<<___;
+	$PTR_SUB $sp,$FRAMESIZE
+	$REG_S	$ra,$FRAMESIZE-1*$SZREG($sp)
+	$REG_S	$fp,$FRAMESIZE-2*$SZREG($sp)
+	$REG_S	$s11,$FRAMESIZE-3*$SZREG($sp)
+	$REG_S	$s10,$FRAMESIZE-4*$SZREG($sp)
+	$REG_S	$s9,$FRAMESIZE-5*$SZREG($sp)
+	$REG_S	$s8,$FRAMESIZE-6*$SZREG($sp)
+	$REG_S	$s7,$FRAMESIZE-7*$SZREG($sp)
+	$REG_S	$s6,$FRAMESIZE-8*$SZREG($sp)
+	$REG_S	$s5,$FRAMESIZE-9*$SZREG($sp)
+	$REG_S	$s4,$FRAMESIZE-10*$SZREG($sp)
+___
+$code.=<<___ if ($flavour =~ /nubi/i);	# optimize non-nubi prologue
+	$REG_S	\$15,$FRAMESIZE-11*$SZREG($sp)
+	$REG_S	\$14,$FRAMESIZE-12*$SZREG($sp)
+	$REG_S	\$13,$FRAMESIZE-13*$SZREG($sp)
+	$REG_S	\$12,$FRAMESIZE-14*$SZREG($sp)
+	$REG_S	$gp,$FRAMESIZE-15*$SZREG($sp)
+___
+$code.=<<___ if ($flavour !~ /o32/i);	# non-o32 PIC-ification
+	.cplocal	$Tbl
+	.cpsetup	$pf,$zero,AES_encrypt
+___
+$code.=<<___;
+	.set	reorder
+	$PTR_LA	$Tbl,AES_Te		# PIC-ified 'load address'
+
+#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6)
+	lw	$s0,0($inp)
+	lw	$s1,4($inp)
+	lw	$s2,8($inp)
+	lw	$s3,12($inp)
+#else
+	lwl	$s0,0+$MSB($inp)
+	lwl	$s1,4+$MSB($inp)
+	lwl	$s2,8+$MSB($inp)
+	lwl	$s3,12+$MSB($inp)
+	lwr	$s0,0+$LSB($inp)
+	lwr	$s1,4+$LSB($inp)
+	lwr	$s2,8+$LSB($inp)
+	lwr	$s3,12+$LSB($inp)
+#endif
+
+	bal	_mips_AES_encrypt
+
+#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6)
+	sw	$s0,0($out)
+	sw	$s1,4($out)
+	sw	$s2,8($out)
+	sw	$s3,12($out)
+#else
+	swr	$s0,0+$LSB($out)
+	swr	$s1,4+$LSB($out)
+	swr	$s2,8+$LSB($out)
+	swr	$s3,12+$LSB($out)
+	swl	$s0,0+$MSB($out)
+	swl	$s1,4+$MSB($out)
+	swl	$s2,8+$MSB($out)
+	swl	$s3,12+$MSB($out)
+#endif
+
+	.set	noreorder
+	$REG_L	$ra,$FRAMESIZE-1*$SZREG($sp)
+	$REG_L	$fp,$FRAMESIZE-2*$SZREG($sp)
+	$REG_L	$s11,$FRAMESIZE-3*$SZREG($sp)
+	$REG_L	$s10,$FRAMESIZE-4*$SZREG($sp)
+	$REG_L	$s9,$FRAMESIZE-5*$SZREG($sp)
+	$REG_L	$s8,$FRAMESIZE-6*$SZREG($sp)
+	$REG_L	$s7,$FRAMESIZE-7*$SZREG($sp)
+	$REG_L	$s6,$FRAMESIZE-8*$SZREG($sp)
+	$REG_L	$s5,$FRAMESIZE-9*$SZREG($sp)
+	$REG_L	$s4,$FRAMESIZE-10*$SZREG($sp)
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+	$REG_L	\$15,$FRAMESIZE-11*$SZREG($sp)
+	$REG_L	\$14,$FRAMESIZE-12*$SZREG($sp)
+	$REG_L	\$13,$FRAMESIZE-13*$SZREG($sp)
+	$REG_L	\$12,$FRAMESIZE-14*$SZREG($sp)
+	$REG_L	$gp,$FRAMESIZE-15*$SZREG($sp)
+___
+$code.=<<___;
+	jr	$ra
+	$PTR_ADD $sp,$FRAMESIZE
+.end	AES_encrypt
+___
+
+$code.=<<___;
+.align	5
+.ent	_mips_AES_decrypt
+_mips_AES_decrypt:
+	.frame	$sp,0,$ra
+	.set	reorder
+	lw	$t0,0($key)
+	lw	$t1,4($key)
+	lw	$t2,8($key)
+	lw	$t3,12($key)
+	lw	$cnt,240($key)
+	$PTR_ADD $key0,$key,16
+
+	xor	$s0,$t0
+	xor	$s1,$t1
+	xor	$s2,$t2
+	xor	$s3,$t3
+
+	subu	$cnt,1
+#if defined(__mips_smartmips)
+	ext	$i0,$s3,16,8
+.Loop_dec:
+	ext	$i1,$s0,16,8
+	ext	$i2,$s1,16,8
+	ext	$i3,$s2,16,8
+	lwxs	$t0,$i0($Tbl)		# Td1[s3>>16]
+	ext	$i0,$s2,8,8
+	lwxs	$t1,$i1($Tbl)		# Td1[s0>>16]
+	ext	$i1,$s3,8,8
+	lwxs	$t2,$i2($Tbl)		# Td1[s1>>16]
+	ext	$i2,$s0,8,8
+	lwxs	$t3,$i3($Tbl)		# Td1[s2>>16]
+	ext	$i3,$s1,8,8
+
+	lwxs	$t4,$i0($Tbl)		# Td2[s2>>8]
+	ext	$i0,$s1,0,8
+	lwxs	$t5,$i1($Tbl)		# Td2[s3>>8]
+	ext	$i1,$s2,0,8
+	lwxs	$t6,$i2($Tbl)		# Td2[s0>>8]
+	ext	$i2,$s3,0,8
+	lwxs	$t7,$i3($Tbl)		# Td2[s1>>8]
+	ext	$i3,$s0,0,8
+
+	lwxs	$t8,$i0($Tbl)		# Td3[s1]
+	ext	$i0,$s0,24,8
+	lwxs	$t9,$i1($Tbl)		# Td3[s2]
+	ext	$i1,$s1,24,8
+	lwxs	$t10,$i2($Tbl)		# Td3[s3]
+	ext	$i2,$s2,24,8
+	lwxs	$t11,$i3($Tbl)		# Td3[s0]
+	ext	$i3,$s3,24,8
+
+	rotr	$t0,$t0,8
+	rotr	$t1,$t1,8
+	rotr	$t2,$t2,8
+	rotr	$t3,$t3,8
+
+	rotr	$t4,$t4,16
+	rotr	$t5,$t5,16
+	rotr	$t6,$t6,16
+	rotr	$t7,$t7,16
+
+	xor	$t0,$t4
+	lwxs	$t4,$i0($Tbl)		# Td0[s0>>24]
+	xor	$t1,$t5
+	lwxs	$t5,$i1($Tbl)		# Td0[s1>>24]
+	xor	$t2,$t6
+	lwxs	$t6,$i2($Tbl)		# Td0[s2>>24]
+	xor	$t3,$t7
+	lwxs	$t7,$i3($Tbl)		# Td0[s3>>24]
+
+	rotr	$t8,$t8,24
+	lw	$s0,0($key0)
+	rotr	$t9,$t9,24
+	lw	$s1,4($key0)
+	rotr	$t10,$t10,24
+	lw	$s2,8($key0)
+	rotr	$t11,$t11,24
+	lw	$s3,12($key0)
+
+	xor	$t0,$t8
+	xor	$t1,$t9
+	xor	$t2,$t10
+	xor	$t3,$t11
+
+	xor	$t0,$t4
+	xor	$t1,$t5
+	xor	$t2,$t6
+	xor	$t3,$t7
+
+	subu	$cnt,1
+	$PTR_ADD $key0,16
+	xor	$s0,$t0
+	xor	$s1,$t1
+	xor	$s2,$t2
+	xor	$s3,$t3
+	.set	noreorder
+	bnez	$cnt,.Loop_dec
+	ext	$i0,$s3,16,8
+
+	_xtr	$i0,$s3,16-2
+#else
+	_xtr	$i0,$s3,16-2
+.Loop_dec:
+	_xtr	$i1,$s0,16-2
+	_xtr	$i2,$s1,16-2
+	_xtr	$i3,$s2,16-2
+	and	$i0,0x3fc
+	and	$i1,0x3fc
+	and	$i2,0x3fc
+	and	$i3,0x3fc
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2)
+	lw	$t0,0($i0)		# Td1[s3>>16]
+	_xtr	$i0,$s2,8-2
+	lw	$t1,0($i1)		# Td1[s0>>16]
+	_xtr	$i1,$s3,8-2
+	lw	$t2,0($i2)		# Td1[s1>>16]
+	_xtr	$i2,$s0,8-2
+	lw	$t3,0($i3)		# Td1[s2>>16]
+	_xtr	$i3,$s1,8-2
+#else
+	lwl	$t0,3($i0)		# Td1[s3>>16]
+	lwl	$t1,3($i1)		# Td1[s0>>16]
+	lwl	$t2,3($i2)		# Td1[s1>>16]
+	lwl	$t3,3($i3)		# Td1[s2>>16]
+	lwr	$t0,2($i0)		# Td1[s3>>16]
+	_xtr	$i0,$s2,8-2
+	lwr	$t1,2($i1)		# Td1[s0>>16]
+	_xtr	$i1,$s3,8-2
+	lwr	$t2,2($i2)		# Td1[s1>>16]
+	_xtr	$i2,$s0,8-2
+	lwr	$t3,2($i3)		# Td1[s2>>16]
+	_xtr	$i3,$s1,8-2
+#endif
+
+	and	$i0,0x3fc
+	and	$i1,0x3fc
+	and	$i2,0x3fc
+	and	$i3,0x3fc
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2)
+	rotr	$t0,$t0,8
+	rotr	$t1,$t1,8
+	rotr	$t2,$t2,8
+	rotr	$t3,$t3,8
+# if defined(_MIPSEL)
+	lw	$t4,0($i0)		# Td2[s2>>8]
+	_xtr	$i0,$s1,0-2
+	lw	$t5,0($i1)		# Td2[s3>>8]
+	_xtr	$i1,$s2,0-2
+	lw	$t6,0($i2)		# Td2[s0>>8]
+	_xtr	$i2,$s3,0-2
+	lw	$t7,0($i3)		# Td2[s1>>8]
+	_xtr	$i3,$s0,0-2
+
+	and	$i0,0x3fc
+	and	$i1,0x3fc
+	and	$i2,0x3fc
+	and	$i3,0x3fc
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+	lw	$t8,0($i0)		# Td3[s1]
+	$PTR_INS $i0,$s0,2,8
+	lw	$t9,0($i1)		# Td3[s2]
+	$PTR_INS $i1,$s1,2,8
+	lw	$t10,0($i2)		# Td3[s3]
+	$PTR_INS $i2,$s2,2,8
+	lw	$t11,0($i3)		# Td3[s0]
+	$PTR_INS $i3,$s3,2,8
+#else
+	lw	$t4,0($i0)		# Td2[s2>>8]
+	$PTR_INS $i0,$s1,2,8
+	lw	$t5,0($i1)		# Td2[s3>>8]
+	$PTR_INS $i1,$s2,2,8
+	lw	$t6,0($i2)		# Td2[s0>>8]
+	$PTR_INS $i2,$s3,2,8
+	lw	$t7,0($i3)		# Td2[s1>>8]
+	$PTR_INS $i3,$s0,2,8
+
+	lw	$t8,0($i0)		# Td3[s1]
+	_xtr	$i0,$s0,24-2
+	lw	$t9,0($i1)		# Td3[s2]
+	_xtr	$i1,$s1,24-2
+	lw	$t10,0($i2)		# Td3[s3]
+	_xtr	$i2,$s2,24-2
+	lw	$t11,0($i3)		# Td3[s0]
+	_xtr	$i3,$s3,24-2
+
+	and	$i0,0x3fc
+	and	$i1,0x3fc
+	and	$i2,0x3fc
+	and	$i3,0x3fc
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+#endif
+	rotr	$t4,$t4,16
+	rotr	$t5,$t5,16
+	rotr	$t6,$t6,16
+	rotr	$t7,$t7,16
+
+	rotr	$t8,$t8,24
+	rotr	$t9,$t9,24
+	rotr	$t10,$t10,24
+	rotr	$t11,$t11,24
+#else
+	lwl	$t4,2($i0)		# Td2[s2>>8]
+	lwl	$t5,2($i1)		# Td2[s3>>8]
+	lwl	$t6,2($i2)		# Td2[s0>>8]
+	lwl	$t7,2($i3)		# Td2[s1>>8]
+	lwr	$t4,1($i0)		# Td2[s2>>8]
+	_xtr	$i0,$s1,0-2
+	lwr	$t5,1($i1)		# Td2[s3>>8]
+	_xtr	$i1,$s2,0-2
+	lwr	$t6,1($i2)		# Td2[s0>>8]
+	_xtr	$i2,$s3,0-2
+	lwr	$t7,1($i3)		# Td2[s1>>8]
+	_xtr	$i3,$s0,0-2
+
+	and	$i0,0x3fc
+	and	$i1,0x3fc
+	and	$i2,0x3fc
+	and	$i3,0x3fc
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+	lwl	$t8,1($i0)		# Td3[s1]
+	lwl	$t9,1($i1)		# Td3[s2]
+	lwl	$t10,1($i2)		# Td3[s3]
+	lwl	$t11,1($i3)		# Td3[s0]
+	lwr	$t8,0($i0)		# Td3[s1]
+	_xtr	$i0,$s0,24-2
+	lwr	$t9,0($i1)		# Td3[s2]
+	_xtr	$i1,$s1,24-2
+	lwr	$t10,0($i2)		# Td3[s3]
+	_xtr	$i2,$s2,24-2
+	lwr	$t11,0($i3)		# Td3[s0]
+	_xtr	$i3,$s3,24-2
+
+	and	$i0,0x3fc
+	and	$i1,0x3fc
+	and	$i2,0x3fc
+	and	$i3,0x3fc
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+#endif
+
+	xor	$t0,$t4
+	lw	$t4,0($i0)		# Td0[s0>>24]
+	xor	$t1,$t5
+	lw	$t5,0($i1)		# Td0[s1>>24]
+	xor	$t2,$t6
+	lw	$t6,0($i2)		# Td0[s2>>24]
+	xor	$t3,$t7
+	lw	$t7,0($i3)		# Td0[s3>>24]
+
+	xor	$t0,$t8
+	lw	$s0,0($key0)
+	xor	$t1,$t9
+	lw	$s1,4($key0)
+	xor	$t2,$t10
+	lw	$s2,8($key0)
+	xor	$t3,$t11
+	lw	$s3,12($key0)
+
+	xor	$t0,$t4
+	xor	$t1,$t5
+	xor	$t2,$t6
+	xor	$t3,$t7
+
+	subu	$cnt,1
+	$PTR_ADD $key0,16
+	xor	$s0,$t0
+	xor	$s1,$t1
+	xor	$s2,$t2
+	xor	$s3,$t3
+	.set	noreorder
+	bnez	$cnt,.Loop_dec
+	_xtr	$i0,$s3,16-2
+#endif
+
+	.set	reorder
+	lw	$t4,1024($Tbl)		# prefetch Td4
+	_xtr	$i0,$s3,16
+	lw	$t5,1024+32($Tbl)
+	_xtr	$i1,$s0,16
+	lw	$t6,1024+64($Tbl)
+	_xtr	$i2,$s1,16
+	lw	$t7,1024+96($Tbl)
+	_xtr	$i3,$s2,16
+	lw	$t8,1024+128($Tbl)
+	and	$i0,0xff
+	lw	$t9,1024+160($Tbl)
+	and	$i1,0xff
+	lw	$t10,1024+192($Tbl)
+	and	$i2,0xff
+	lw	$t11,1024+224($Tbl)
+	and	$i3,0xff
+
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+	lbu	$t0,1024($i0)		# Td4[s3>>16]
+	_xtr	$i0,$s2,8
+	lbu	$t1,1024($i1)		# Td4[s0>>16]
+	_xtr	$i1,$s3,8
+	lbu	$t2,1024($i2)		# Td4[s1>>16]
+	_xtr	$i2,$s0,8
+	lbu	$t3,1024($i3)		# Td4[s2>>16]
+	_xtr	$i3,$s1,8
+
+	and	$i0,0xff
+	and	$i1,0xff
+	and	$i2,0xff
+	and	$i3,0xff
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2)
+# if defined(_MIPSEL)
+	lbu	$t4,1024($i0)		# Td4[s2>>8]
+	$PTR_INS $i0,$s0,0,8
+	lbu	$t5,1024($i1)		# Td4[s3>>8]
+	$PTR_INS $i1,$s1,0,8
+	lbu	$t6,1024($i2)		# Td4[s0>>8]
+	$PTR_INS $i2,$s2,0,8
+	lbu	$t7,1024($i3)		# Td4[s1>>8]
+	$PTR_INS $i3,$s3,0,8
+
+	lbu	$t8,1024($i0)		# Td4[s0>>24]
+	_xtr	$i0,$s1,0
+	lbu	$t9,1024($i1)		# Td4[s1>>24]
+	_xtr	$i1,$s2,0
+	lbu	$t10,1024($i2)		# Td4[s2>>24]
+	_xtr	$i2,$s3,0
+	lbu	$t11,1024($i3)		# Td4[s3>>24]
+	_xtr	$i3,$s0,0
+
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+# else
+	lbu	$t4,1024($i0)		# Td4[s2>>8]
+	_xtr	$i0,$s0,24
+	lbu	$t5,1024($i1)		# Td4[s3>>8]
+	_xtr	$i1,$s1,24
+	lbu	$t6,1024($i2)		# Td4[s0>>8]
+	_xtr	$i2,$s2,24
+	lbu	$t7,1024($i3)		# Td4[s1>>8]
+	_xtr	$i3,$s3,24
+
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+	lbu	$t8,1024($i0)		# Td4[s0>>24]
+	$PTR_INS $i0,$s1,0,8
+	lbu	$t9,1024($i1)		# Td4[s1>>24]
+	$PTR_INS $i1,$s2,0,8
+	lbu	$t10,1024($i2)		# Td4[s2>>24]
+	$PTR_INS $i2,$s3,0,8
+	lbu	$t11,1024($i3)		# Td4[s3>>24]
+	$PTR_INS $i3,$s0,0,8
+# endif
+	_ins	$t0,16
+	_ins	$t1,16
+	_ins	$t2,16
+	_ins	$t3,16
+
+	_ins2	$t0,$t4,8
+	lbu	$t4,1024($i0)		# Td4[s1]
+	_ins2	$t1,$t5,8
+	lbu	$t5,1024($i1)		# Td4[s2]
+	_ins2	$t2,$t6,8
+	lbu	$t6,1024($i2)		# Td4[s3]
+	_ins2	$t3,$t7,8
+	lbu	$t7,1024($i3)		# Td4[s0]
+
+	_ins2	$t0,$t8,24
+	lw	$s0,0($key0)
+	_ins2	$t1,$t9,24
+	lw	$s1,4($key0)
+	_ins2	$t2,$t10,24
+	lw	$s2,8($key0)
+	_ins2	$t3,$t11,24
+	lw	$s3,12($key0)
+
+	_ins2	$t0,$t4,0
+	_ins2	$t1,$t5,0
+	_ins2	$t2,$t6,0
+	_ins2	$t3,$t7,0
+#else
+	lbu	$t4,1024($i0)		# Td4[s2>>8]
+	_xtr	$i0,$s0,24
+	lbu	$t5,1024($i1)		# Td4[s3>>8]
+	_xtr	$i1,$s1,24
+	lbu	$t6,1024($i2)		# Td4[s0>>8]
+	_xtr	$i2,$s2,24
+	lbu	$t7,1024($i3)		# Td4[s1>>8]
+	_xtr	$i3,$s3,24
+
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+	lbu	$t8,1024($i0)		# Td4[s0>>24]
+	_xtr	$i0,$s1,0
+	lbu	$t9,1024($i1)		# Td4[s1>>24]
+	_xtr	$i1,$s2,0
+	lbu	$t10,1024($i2)		# Td4[s2>>24]
+	_xtr	$i2,$s3,0
+	lbu	$t11,1024($i3)		# Td4[s3>>24]
+	_xtr	$i3,$s0,0
+
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+
+	_ins	$t0,16
+	_ins	$t1,16
+	_ins	$t2,16
+	_ins	$t3,16
+
+	_ins	$t4,8
+	_ins	$t5,8
+	_ins	$t6,8
+	_ins	$t7,8
+
+	xor	$t0,$t4
+	lbu	$t4,1024($i0)		# Td4[s1]
+	xor	$t1,$t5
+	lbu	$t5,1024($i1)		# Td4[s2]
+	xor	$t2,$t6
+	lbu	$t6,1024($i2)		# Td4[s3]
+	xor	$t3,$t7
+	lbu	$t7,1024($i3)		# Td4[s0]
+
+	_ins	$t8,24
+	lw	$s0,0($key0)
+	_ins	$t9,24
+	lw	$s1,4($key0)
+	_ins	$t10,24
+	lw	$s2,8($key0)
+	_ins	$t11,24
+	lw	$s3,12($key0)
+
+	xor	$t0,$t8
+	xor	$t1,$t9
+	xor	$t2,$t10
+	xor	$t3,$t11
+
+	_ins	$t4,0
+	_ins	$t5,0
+	_ins	$t6,0
+	_ins	$t7,0
+
+	xor	$t0,$t4
+	xor	$t1,$t5
+	xor	$t2,$t6
+	xor	$t3,$t7
+#endif
+
+	xor	$s0,$t0
+	xor	$s1,$t1
+	xor	$s2,$t2
+	xor	$s3,$t3
+
+	jr	$ra
+.end	_mips_AES_decrypt
+
+.align	5
+.globl	AES_decrypt
+.ent	AES_decrypt
+AES_decrypt:
+	.frame	$sp,$FRAMESIZE,$ra
+	.mask	$SAVED_REGS_MASK,-$SZREG
+	.set	noreorder
+___
+$code.=<<___ if ($flavour =~ /o32/i);	# o32 PIC-ification
+	.cpload	$pf
+___
+$code.=<<___;
+	$PTR_SUB $sp,$FRAMESIZE
+	$REG_S	$ra,$FRAMESIZE-1*$SZREG($sp)
+	$REG_S	$fp,$FRAMESIZE-2*$SZREG($sp)
+	$REG_S	$s11,$FRAMESIZE-3*$SZREG($sp)
+	$REG_S	$s10,$FRAMESIZE-4*$SZREG($sp)
+	$REG_S	$s9,$FRAMESIZE-5*$SZREG($sp)
+	$REG_S	$s8,$FRAMESIZE-6*$SZREG($sp)
+	$REG_S	$s7,$FRAMESIZE-7*$SZREG($sp)
+	$REG_S	$s6,$FRAMESIZE-8*$SZREG($sp)
+	$REG_S	$s5,$FRAMESIZE-9*$SZREG($sp)
+	$REG_S	$s4,$FRAMESIZE-10*$SZREG($sp)
+___
+$code.=<<___ if ($flavour =~ /nubi/i);	# optimize non-nubi prologue
+	$REG_S	\$15,$FRAMESIZE-11*$SZREG($sp)
+	$REG_S	\$14,$FRAMESIZE-12*$SZREG($sp)
+	$REG_S	\$13,$FRAMESIZE-13*$SZREG($sp)
+	$REG_S	\$12,$FRAMESIZE-14*$SZREG($sp)
+	$REG_S	$gp,$FRAMESIZE-15*$SZREG($sp)
+___
+$code.=<<___ if ($flavour !~ /o32/i);	# non-o32 PIC-ification
+	.cplocal	$Tbl
+	.cpsetup	$pf,$zero,AES_decrypt
+___
+$code.=<<___;
+	.set	reorder
+	$PTR_LA	$Tbl,AES_Td		# PIC-ified 'load address'
+
+#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6)
+	lw	$s0,0($inp)
+	lw	$s1,4($inp)
+	lw	$s2,8($inp)
+	lw	$s3,12($inp)
+#else
+	lwl	$s0,0+$MSB($inp)
+	lwl	$s1,4+$MSB($inp)
+	lwl	$s2,8+$MSB($inp)
+	lwl	$s3,12+$MSB($inp)
+	lwr	$s0,0+$LSB($inp)
+	lwr	$s1,4+$LSB($inp)
+	lwr	$s2,8+$LSB($inp)
+	lwr	$s3,12+$LSB($inp)
+#endif
+
+	bal	_mips_AES_decrypt
+
+#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6)
+	sw	$s0,0($out)
+	sw	$s1,4($out)
+	sw	$s2,8($out)
+	sw	$s3,12($out)
+#else
+	swr	$s0,0+$LSB($out)
+	swr	$s1,4+$LSB($out)
+	swr	$s2,8+$LSB($out)
+	swr	$s3,12+$LSB($out)
+	swl	$s0,0+$MSB($out)
+	swl	$s1,4+$MSB($out)
+	swl	$s2,8+$MSB($out)
+	swl	$s3,12+$MSB($out)
+#endif
+
+	.set	noreorder
+	$REG_L	$ra,$FRAMESIZE-1*$SZREG($sp)
+	$REG_L	$fp,$FRAMESIZE-2*$SZREG($sp)
+	$REG_L	$s11,$FRAMESIZE-3*$SZREG($sp)
+	$REG_L	$s10,$FRAMESIZE-4*$SZREG($sp)
+	$REG_L	$s9,$FRAMESIZE-5*$SZREG($sp)
+	$REG_L	$s8,$FRAMESIZE-6*$SZREG($sp)
+	$REG_L	$s7,$FRAMESIZE-7*$SZREG($sp)
+	$REG_L	$s6,$FRAMESIZE-8*$SZREG($sp)
+	$REG_L	$s5,$FRAMESIZE-9*$SZREG($sp)
+	$REG_L	$s4,$FRAMESIZE-10*$SZREG($sp)
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+	$REG_L	\$15,$FRAMESIZE-11*$SZREG($sp)
+	$REG_L	\$14,$FRAMESIZE-12*$SZREG($sp)
+	$REG_L	\$13,$FRAMESIZE-13*$SZREG($sp)
+	$REG_L	\$12,$FRAMESIZE-14*$SZREG($sp)
+	$REG_L	$gp,$FRAMESIZE-15*$SZREG($sp)
+___
+$code.=<<___;
+	jr	$ra
+	$PTR_ADD $sp,$FRAMESIZE
+.end	AES_decrypt
+___
+}}}
+
+{{{
+my $FRAMESIZE=8*$SZREG;
+my $SAVED_REGS_MASK = ($flavour =~ /nubi/i) ? "0xc000f008" : "0xc0000000";
+
+my ($inp,$bits,$key,$Tbl)=($a0,$a1,$a2,$a3);
+my ($rk0,$rk1,$rk2,$rk3,$rk4,$rk5,$rk6,$rk7)=($a4,$a5,$a6,$a7,$s0,$s1,$s2,$s3);
+my ($i0,$i1,$i2,$i3)=($at,$t0,$t1,$t2);
+my ($rcon,$cnt)=($gp,$fp);
+
+$code.=<<___;
+.align	5
+.ent	_mips_AES_set_encrypt_key
+_mips_AES_set_encrypt_key:
+	.frame	$sp,0,$ra
+	.set	noreorder
+	beqz	$inp,.Lekey_done
+	li	$t0,-1
+	beqz	$key,.Lekey_done
+	$PTR_ADD $rcon,$Tbl,256
+
+	.set	reorder
+#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6)
+	lw	$rk0,0($inp)		# load 128 bits
+	lw	$rk1,4($inp)
+	lw	$rk2,8($inp)
+	lw	$rk3,12($inp)
+#else
+	lwl	$rk0,0+$MSB($inp)	# load 128 bits
+	lwl	$rk1,4+$MSB($inp)
+	lwl	$rk2,8+$MSB($inp)
+	lwl	$rk3,12+$MSB($inp)
+	lwr	$rk0,0+$LSB($inp)
+	lwr	$rk1,4+$LSB($inp)
+	lwr	$rk2,8+$LSB($inp)
+	lwr	$rk3,12+$LSB($inp)
+#endif
+	li	$at,128
+	.set	noreorder
+	beq	$bits,$at,.L128bits
+	li	$cnt,10
+
+	.set	reorder
+#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6)
+	lw	$rk4,16($inp)		# load 192 bits
+	lw	$rk5,20($inp)
+#else
+	lwl	$rk4,16+$MSB($inp)	# load 192 bits
+	lwl	$rk5,20+$MSB($inp)
+	lwr	$rk4,16+$LSB($inp)
+	lwr	$rk5,20+$LSB($inp)
+#endif
+	li	$at,192
+	.set	noreorder
+	beq	$bits,$at,.L192bits
+	li	$cnt,8
+
+	.set	reorder
+#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6)
+	lw	$rk6,24($inp)		# load 256 bits
+	lw	$rk7,28($inp)
+#else
+	lwl	$rk6,24+$MSB($inp)	# load 256 bits
+	lwl	$rk7,28+$MSB($inp)
+	lwr	$rk6,24+$LSB($inp)
+	lwr	$rk7,28+$LSB($inp)
+#endif
+	li	$at,256
+	.set	noreorder
+	beq	$bits,$at,.L256bits
+	li	$cnt,7
+
+	b	.Lekey_done
+	li	$t0,-2
+
+.align	4
+.L128bits:
+	.set	reorder
+	srl	$i0,$rk3,16
+	srl	$i1,$rk3,8
+	and	$i0,0xff
+	and	$i1,0xff
+	and	$i2,$rk3,0xff
+	srl	$i3,$rk3,24
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+	lbu	$i0,0($i0)
+	lbu	$i1,0($i1)
+	lbu	$i2,0($i2)
+	lbu	$i3,0($i3)
+
+	sw	$rk0,0($key)
+	sw	$rk1,4($key)
+	sw	$rk2,8($key)
+	sw	$rk3,12($key)
+	subu	$cnt,1
+	$PTR_ADD $key,16
+
+	_bias	$i0,24
+	_bias	$i1,16
+	_bias	$i2,8
+	_bias	$i3,0
+
+	xor	$rk0,$i0
+	lw	$i0,0($rcon)
+	xor	$rk0,$i1
+	xor	$rk0,$i2
+	xor	$rk0,$i3
+	xor	$rk0,$i0
+
+	xor	$rk1,$rk0
+	xor	$rk2,$rk1
+	xor	$rk3,$rk2
+
+	.set	noreorder
+	bnez	$cnt,.L128bits
+	$PTR_ADD $rcon,4
+
+	sw	$rk0,0($key)
+	sw	$rk1,4($key)
+	sw	$rk2,8($key)
+	li	$cnt,10
+	sw	$rk3,12($key)
+	li	$t0,0
+	sw	$cnt,80($key)
+	b	.Lekey_done
+	$PTR_SUB $key,10*16
+
+.align	4
+.L192bits:
+	.set	reorder
+	srl	$i0,$rk5,16
+	srl	$i1,$rk5,8
+	and	$i0,0xff
+	and	$i1,0xff
+	and	$i2,$rk5,0xff
+	srl	$i3,$rk5,24
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+	lbu	$i0,0($i0)
+	lbu	$i1,0($i1)
+	lbu	$i2,0($i2)
+	lbu	$i3,0($i3)
+
+	sw	$rk0,0($key)
+	sw	$rk1,4($key)
+	sw	$rk2,8($key)
+	sw	$rk3,12($key)
+	sw	$rk4,16($key)
+	sw	$rk5,20($key)
+	subu	$cnt,1
+	$PTR_ADD $key,24
+
+	_bias	$i0,24
+	_bias	$i1,16
+	_bias	$i2,8
+	_bias	$i3,0
+
+	xor	$rk0,$i0
+	lw	$i0,0($rcon)
+	xor	$rk0,$i1
+	xor	$rk0,$i2
+	xor	$rk0,$i3
+	xor	$rk0,$i0
+
+	xor	$rk1,$rk0
+	xor	$rk2,$rk1
+	xor	$rk3,$rk2
+	xor	$rk4,$rk3
+	xor	$rk5,$rk4
+
+	.set	noreorder
+	bnez	$cnt,.L192bits
+	$PTR_ADD $rcon,4
+
+	sw	$rk0,0($key)
+	sw	$rk1,4($key)
+	sw	$rk2,8($key)
+	li	$cnt,12
+	sw	$rk3,12($key)
+	li	$t0,0
+	sw	$cnt,48($key)
+	b	.Lekey_done
+	$PTR_SUB $key,12*16
+
+.align	4
+.L256bits:
+	.set	reorder
+	srl	$i0,$rk7,16
+	srl	$i1,$rk7,8
+	and	$i0,0xff
+	and	$i1,0xff
+	and	$i2,$rk7,0xff
+	srl	$i3,$rk7,24
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+	lbu	$i0,0($i0)
+	lbu	$i1,0($i1)
+	lbu	$i2,0($i2)
+	lbu	$i3,0($i3)
+
+	sw	$rk0,0($key)
+	sw	$rk1,4($key)
+	sw	$rk2,8($key)
+	sw	$rk3,12($key)
+	sw	$rk4,16($key)
+	sw	$rk5,20($key)
+	sw	$rk6,24($key)
+	sw	$rk7,28($key)
+	subu	$cnt,1
+
+	_bias	$i0,24
+	_bias	$i1,16
+	_bias	$i2,8
+	_bias	$i3,0
+
+	xor	$rk0,$i0
+	lw	$i0,0($rcon)
+	xor	$rk0,$i1
+	xor	$rk0,$i2
+	xor	$rk0,$i3
+	xor	$rk0,$i0
+
+	xor	$rk1,$rk0
+	xor	$rk2,$rk1
+	xor	$rk3,$rk2
+	beqz	$cnt,.L256bits_done
+
+	srl	$i0,$rk3,24
+	srl	$i1,$rk3,16
+	srl	$i2,$rk3,8
+	and	$i3,$rk3,0xff
+	and	$i1,0xff
+	and	$i2,0xff
+	$PTR_ADD $i0,$Tbl
+	$PTR_ADD $i1,$Tbl
+	$PTR_ADD $i2,$Tbl
+	$PTR_ADD $i3,$Tbl
+	lbu	$i0,0($i0)
+	lbu	$i1,0($i1)
+	lbu	$i2,0($i2)
+	lbu	$i3,0($i3)
+	sll	$i0,24
+	sll	$i1,16
+	sll	$i2,8
+
+	xor	$rk4,$i0
+	xor	$rk4,$i1
+	xor	$rk4,$i2
+	xor	$rk4,$i3
+
+	xor	$rk5,$rk4
+	xor	$rk6,$rk5
+	xor	$rk7,$rk6
+
+	$PTR_ADD $key,32
+	.set	noreorder
+	b	.L256bits
+	$PTR_ADD $rcon,4
+
+.L256bits_done:
+	sw	$rk0,32($key)
+	sw	$rk1,36($key)
+	sw	$rk2,40($key)
+	li	$cnt,14
+	sw	$rk3,44($key)
+	li	$t0,0
+	sw	$cnt,48($key)
+	$PTR_SUB $key,12*16
+
+.Lekey_done:
+	jr	$ra
+	nop
+.end	_mips_AES_set_encrypt_key
+
+.globl	AES_set_encrypt_key
+.ent	AES_set_encrypt_key
+AES_set_encrypt_key:
+	.frame	$sp,$FRAMESIZE,$ra
+	.mask	$SAVED_REGS_MASK,-$SZREG
+	.set	noreorder
+___
+$code.=<<___ if ($flavour =~ /o32/i);	# o32 PIC-ification
+	.cpload	$pf
+___
+$code.=<<___;
+	$PTR_SUB $sp,$FRAMESIZE
+	$REG_S	$ra,$FRAMESIZE-1*$SZREG($sp)
+	$REG_S	$fp,$FRAMESIZE-2*$SZREG($sp)
+___
+$code.=<<___ if ($flavour =~ /nubi/i);	# optimize non-nubi prologue
+	$REG_S	$s3,$FRAMESIZE-3*$SZREG($sp)
+	$REG_S	$s2,$FRAMESIZE-4*$SZREG($sp)
+	$REG_S	$s1,$FRAMESIZE-5*$SZREG($sp)
+	$REG_S	$s0,$FRAMESIZE-6*$SZREG($sp)
+	$REG_S	$gp,$FRAMESIZE-7*$SZREG($sp)
+___
+$code.=<<___ if ($flavour !~ /o32/i);	# non-o32 PIC-ification
+	.cplocal	$Tbl
+	.cpsetup	$pf,$zero,AES_set_encrypt_key
+___
+$code.=<<___;
+	.set	reorder
+	$PTR_LA	$Tbl,AES_Te4		# PIC-ified 'load address'
+
+	bal	_mips_AES_set_encrypt_key
+
+	.set	noreorder
+	move	$a0,$t0
+	$REG_L	$ra,$FRAMESIZE-1*$SZREG($sp)
+	$REG_L	$fp,$FRAMESIZE-2*$SZREG($sp)
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+	$REG_L	$s3,$FRAMESIZE-11*$SZREG($sp)
+	$REG_L	$s2,$FRAMESIZE-12*$SZREG($sp)
+	$REG_L	$s1,$FRAMESIZE-13*$SZREG($sp)
+	$REG_L	$s0,$FRAMESIZE-14*$SZREG($sp)
+	$REG_L	$gp,$FRAMESIZE-15*$SZREG($sp)
+___
+$code.=<<___;
+	jr	$ra
+	$PTR_ADD $sp,$FRAMESIZE
+.end	AES_set_encrypt_key
+___
+
+my ($head,$tail)=($inp,$bits);
+my ($tp1,$tp2,$tp4,$tp8,$tp9,$tpb,$tpd,$tpe)=($a4,$a5,$a6,$a7,$s0,$s1,$s2,$s3);
+my ($m,$x80808080,$x7f7f7f7f,$x1b1b1b1b)=($at,$t0,$t1,$t2);
+$code.=<<___;
+.align	5
+.globl	AES_set_decrypt_key
+.ent	AES_set_decrypt_key
+AES_set_decrypt_key:
+	.frame	$sp,$FRAMESIZE,$ra
+	.mask	$SAVED_REGS_MASK,-$SZREG
+	.set	noreorder
+___
+$code.=<<___ if ($flavour =~ /o32/i);	# o32 PIC-ification
+	.cpload	$pf
+___
+$code.=<<___;
+	$PTR_SUB $sp,$FRAMESIZE
+	$REG_S	$ra,$FRAMESIZE-1*$SZREG($sp)
+	$REG_S	$fp,$FRAMESIZE-2*$SZREG($sp)
+___
+$code.=<<___ if ($flavour =~ /nubi/i);	# optimize non-nubi prologue
+	$REG_S	$s3,$FRAMESIZE-3*$SZREG($sp)
+	$REG_S	$s2,$FRAMESIZE-4*$SZREG($sp)
+	$REG_S	$s1,$FRAMESIZE-5*$SZREG($sp)
+	$REG_S	$s0,$FRAMESIZE-6*$SZREG($sp)
+	$REG_S	$gp,$FRAMESIZE-7*$SZREG($sp)
+___
+$code.=<<___ if ($flavour !~ /o32/i);	# non-o32 PIC-ification
+	.cplocal	$Tbl
+	.cpsetup	$pf,$zero,AES_set_decrypt_key
+___
+$code.=<<___;
+	.set	reorder
+	$PTR_LA	$Tbl,AES_Te4		# PIC-ified 'load address'
+
+	bal	_mips_AES_set_encrypt_key
+
+	bltz	$t0,.Ldkey_done
+
+	sll	$at,$cnt,4
+	$PTR_ADD $head,$key,0
+	$PTR_ADD $tail,$key,$at
+.align	4
+.Lswap:
+	lw	$rk0,0($head)
+	lw	$rk1,4($head)
+	lw	$rk2,8($head)
+	lw	$rk3,12($head)
+	lw	$rk4,0($tail)
+	lw	$rk5,4($tail)
+	lw	$rk6,8($tail)
+	lw	$rk7,12($tail)
+	sw	$rk0,0($tail)
+	sw	$rk1,4($tail)
+	sw	$rk2,8($tail)
+	sw	$rk3,12($tail)
+	$PTR_ADD $head,16
+	$PTR_SUB $tail,16
+	sw	$rk4,-16($head)
+	sw	$rk5,-12($head)
+	sw	$rk6,-8($head)
+	sw	$rk7,-4($head)
+	bne	$head,$tail,.Lswap
+
+	lw	$tp1,16($key)		# modulo-scheduled
+	lui	$x80808080,0x8080
+	subu	$cnt,1
+	or	$x80808080,0x8080
+	sll	$cnt,2
+	$PTR_ADD $key,16
+	lui	$x1b1b1b1b,0x1b1b
+	nor	$x7f7f7f7f,$zero,$x80808080
+	or	$x1b1b1b1b,0x1b1b
+.align	4
+.Lmix:
+	and	$m,$tp1,$x80808080
+	and	$tp2,$tp1,$x7f7f7f7f
+	srl	$tp4,$m,7
+	addu	$tp2,$tp2		# tp2<<1
+	subu	$m,$tp4
+	and	$m,$x1b1b1b1b
+	xor	$tp2,$m
+
+	and	$m,$tp2,$x80808080
+	and	$tp4,$tp2,$x7f7f7f7f
+	srl	$tp8,$m,7
+	addu	$tp4,$tp4		# tp4<<1
+	subu	$m,$tp8
+	and	$m,$x1b1b1b1b
+	xor	$tp4,$m
+
+	and	$m,$tp4,$x80808080
+	and	$tp8,$tp4,$x7f7f7f7f
+	srl	$tp9,$m,7
+	addu	$tp8,$tp8		# tp8<<1
+	subu	$m,$tp9
+	and	$m,$x1b1b1b1b
+	xor	$tp8,$m
+
+	xor	$tp9,$tp8,$tp1
+	xor	$tpe,$tp8,$tp4
+	xor	$tpb,$tp9,$tp2
+	xor	$tpd,$tp9,$tp4
+
+#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS64R2)
+	rotr	$tp1,$tpd,16
+	 xor	$tpe,$tp2
+	rotr	$tp2,$tp9,8
+	xor	$tpe,$tp1
+	rotr	$tp4,$tpb,24
+	xor	$tpe,$tp2
+	lw	$tp1,4($key)		# modulo-scheduled
+	xor	$tpe,$tp4
+#else
+	_ror	$tp1,$tpd,16
+	 xor	$tpe,$tp2
+	_ror	$tp2,$tpd,-16
+	xor	$tpe,$tp1
+	_ror	$tp1,$tp9,8
+	xor	$tpe,$tp2
+	_ror	$tp2,$tp9,-24
+	xor	$tpe,$tp1
+	_ror	$tp1,$tpb,24
+	xor	$tpe,$tp2
+	_ror	$tp2,$tpb,-8
+	xor	$tpe,$tp1
+	lw	$tp1,4($key)		# modulo-scheduled
+	xor	$tpe,$tp2
+#endif
+	subu	$cnt,1
+	sw	$tpe,0($key)
+	$PTR_ADD $key,4
+	bnez	$cnt,.Lmix
+
+	li	$t0,0
+.Ldkey_done:
+	.set	noreorder
+	move	$a0,$t0
+	$REG_L	$ra,$FRAMESIZE-1*$SZREG($sp)
+	$REG_L	$fp,$FRAMESIZE-2*$SZREG($sp)
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+	$REG_L	$s3,$FRAMESIZE-11*$SZREG($sp)
+	$REG_L	$s2,$FRAMESIZE-12*$SZREG($sp)
+	$REG_L	$s1,$FRAMESIZE-13*$SZREG($sp)
+	$REG_L	$s0,$FRAMESIZE-14*$SZREG($sp)
+	$REG_L	$gp,$FRAMESIZE-15*$SZREG($sp)
+___
+$code.=<<___;
+	jr	$ra
+	$PTR_ADD $sp,$FRAMESIZE
+.end	AES_set_decrypt_key
+___
+}}}
+
+######################################################################
+# Tables are kept in endian-neutral manner
+$code.=<<___;
+.rdata
+.align	10
+AES_Te:
+.byte	0xc6,0x63,0x63,0xa5,	0xf8,0x7c,0x7c,0x84	# Te0
+.byte	0xee,0x77,0x77,0x99,	0xf6,0x7b,0x7b,0x8d
+.byte	0xff,0xf2,0xf2,0x0d,	0xd6,0x6b,0x6b,0xbd
+.byte	0xde,0x6f,0x6f,0xb1,	0x91,0xc5,0xc5,0x54
+.byte	0x60,0x30,0x30,0x50,	0x02,0x01,0x01,0x03
+.byte	0xce,0x67,0x67,0xa9,	0x56,0x2b,0x2b,0x7d
+.byte	0xe7,0xfe,0xfe,0x19,	0xb5,0xd7,0xd7,0x62
+.byte	0x4d,0xab,0xab,0xe6,	0xec,0x76,0x76,0x9a
+.byte	0x8f,0xca,0xca,0x45,	0x1f,0x82,0x82,0x9d
+.byte	0x89,0xc9,0xc9,0x40,	0xfa,0x7d,0x7d,0x87
+.byte	0xef,0xfa,0xfa,0x15,	0xb2,0x59,0x59,0xeb
+.byte	0x8e,0x47,0x47,0xc9,	0xfb,0xf0,0xf0,0x0b
+.byte	0x41,0xad,0xad,0xec,	0xb3,0xd4,0xd4,0x67
+.byte	0x5f,0xa2,0xa2,0xfd,	0x45,0xaf,0xaf,0xea
+.byte	0x23,0x9c,0x9c,0xbf,	0x53,0xa4,0xa4,0xf7
+.byte	0xe4,0x72,0x72,0x96,	0x9b,0xc0,0xc0,0x5b
+.byte	0x75,0xb7,0xb7,0xc2,	0xe1,0xfd,0xfd,0x1c
+.byte	0x3d,0x93,0x93,0xae,	0x4c,0x26,0x26,0x6a
+.byte	0x6c,0x36,0x36,0x5a,	0x7e,0x3f,0x3f,0x41
+.byte	0xf5,0xf7,0xf7,0x02,	0x83,0xcc,0xcc,0x4f
+.byte	0x68,0x34,0x34,0x5c,	0x51,0xa5,0xa5,0xf4
+.byte	0xd1,0xe5,0xe5,0x34,	0xf9,0xf1,0xf1,0x08
+.byte	0xe2,0x71,0x71,0x93,	0xab,0xd8,0xd8,0x73
+.byte	0x62,0x31,0x31,0x53,	0x2a,0x15,0x15,0x3f
+.byte	0x08,0x04,0x04,0x0c,	0x95,0xc7,0xc7,0x52
+.byte	0x46,0x23,0x23,0x65,	0x9d,0xc3,0xc3,0x5e
+.byte	0x30,0x18,0x18,0x28,	0x37,0x96,0x96,0xa1
+.byte	0x0a,0x05,0x05,0x0f,	0x2f,0x9a,0x9a,0xb5
+.byte	0x0e,0x07,0x07,0x09,	0x24,0x12,0x12,0x36
+.byte	0x1b,0x80,0x80,0x9b,	0xdf,0xe2,0xe2,0x3d
+.byte	0xcd,0xeb,0xeb,0x26,	0x4e,0x27,0x27,0x69
+.byte	0x7f,0xb2,0xb2,0xcd,	0xea,0x75,0x75,0x9f
+.byte	0x12,0x09,0x09,0x1b,	0x1d,0x83,0x83,0x9e
+.byte	0x58,0x2c,0x2c,0x74,	0x34,0x1a,0x1a,0x2e
+.byte	0x36,0x1b,0x1b,0x2d,	0xdc,0x6e,0x6e,0xb2
+.byte	0xb4,0x5a,0x5a,0xee,	0x5b,0xa0,0xa0,0xfb
+.byte	0xa4,0x52,0x52,0xf6,	0x76,0x3b,0x3b,0x4d
+.byte	0xb7,0xd6,0xd6,0x61,	0x7d,0xb3,0xb3,0xce
+.byte	0x52,0x29,0x29,0x7b,	0xdd,0xe3,0xe3,0x3e
+.byte	0x5e,0x2f,0x2f,0x71,	0x13,0x84,0x84,0x97
+.byte	0xa6,0x53,0x53,0xf5,	0xb9,0xd1,0xd1,0x68
+.byte	0x00,0x00,0x00,0x00,	0xc1,0xed,0xed,0x2c
+.byte	0x40,0x20,0x20,0x60,	0xe3,0xfc,0xfc,0x1f
+.byte	0x79,0xb1,0xb1,0xc8,	0xb6,0x5b,0x5b,0xed
+.byte	0xd4,0x6a,0x6a,0xbe,	0x8d,0xcb,0xcb,0x46
+.byte	0x67,0xbe,0xbe,0xd9,	0x72,0x39,0x39,0x4b
+.byte	0x94,0x4a,0x4a,0xde,	0x98,0x4c,0x4c,0xd4
+.byte	0xb0,0x58,0x58,0xe8,	0x85,0xcf,0xcf,0x4a
+.byte	0xbb,0xd0,0xd0,0x6b,	0xc5,0xef,0xef,0x2a
+.byte	0x4f,0xaa,0xaa,0xe5,	0xed,0xfb,0xfb,0x16
+.byte	0x86,0x43,0x43,0xc5,	0x9a,0x4d,0x4d,0xd7
+.byte	0x66,0x33,0x33,0x55,	0x11,0x85,0x85,0x94
+.byte	0x8a,0x45,0x45,0xcf,	0xe9,0xf9,0xf9,0x10
+.byte	0x04,0x02,0x02,0x06,	0xfe,0x7f,0x7f,0x81
+.byte	0xa0,0x50,0x50,0xf0,	0x78,0x3c,0x3c,0x44
+.byte	0x25,0x9f,0x9f,0xba,	0x4b,0xa8,0xa8,0xe3
+.byte	0xa2,0x51,0x51,0xf3,	0x5d,0xa3,0xa3,0xfe
+.byte	0x80,0x40,0x40,0xc0,	0x05,0x8f,0x8f,0x8a
+.byte	0x3f,0x92,0x92,0xad,	0x21,0x9d,0x9d,0xbc
+.byte	0x70,0x38,0x38,0x48,	0xf1,0xf5,0xf5,0x04
+.byte	0x63,0xbc,0xbc,0xdf,	0x77,0xb6,0xb6,0xc1
+.byte	0xaf,0xda,0xda,0x75,	0x42,0x21,0x21,0x63
+.byte	0x20,0x10,0x10,0x30,	0xe5,0xff,0xff,0x1a
+.byte	0xfd,0xf3,0xf3,0x0e,	0xbf,0xd2,0xd2,0x6d
+.byte	0x81,0xcd,0xcd,0x4c,	0x18,0x0c,0x0c,0x14
+.byte	0x26,0x13,0x13,0x35,	0xc3,0xec,0xec,0x2f
+.byte	0xbe,0x5f,0x5f,0xe1,	0x35,0x97,0x97,0xa2
+.byte	0x88,0x44,0x44,0xcc,	0x2e,0x17,0x17,0x39
+.byte	0x93,0xc4,0xc4,0x57,	0x55,0xa7,0xa7,0xf2
+.byte	0xfc,0x7e,0x7e,0x82,	0x7a,0x3d,0x3d,0x47
+.byte	0xc8,0x64,0x64,0xac,	0xba,0x5d,0x5d,0xe7
+.byte	0x32,0x19,0x19,0x2b,	0xe6,0x73,0x73,0x95
+.byte	0xc0,0x60,0x60,0xa0,	0x19,0x81,0x81,0x98
+.byte	0x9e,0x4f,0x4f,0xd1,	0xa3,0xdc,0xdc,0x7f
+.byte	0x44,0x22,0x22,0x66,	0x54,0x2a,0x2a,0x7e
+.byte	0x3b,0x90,0x90,0xab,	0x0b,0x88,0x88,0x83
+.byte	0x8c,0x46,0x46,0xca,	0xc7,0xee,0xee,0x29
+.byte	0x6b,0xb8,0xb8,0xd3,	0x28,0x14,0x14,0x3c
+.byte	0xa7,0xde,0xde,0x79,	0xbc,0x5e,0x5e,0xe2
+.byte	0x16,0x0b,0x0b,0x1d,	0xad,0xdb,0xdb,0x76
+.byte	0xdb,0xe0,0xe0,0x3b,	0x64,0x32,0x32,0x56
+.byte	0x74,0x3a,0x3a,0x4e,	0x14,0x0a,0x0a,0x1e
+.byte	0x92,0x49,0x49,0xdb,	0x0c,0x06,0x06,0x0a
+.byte	0x48,0x24,0x24,0x6c,	0xb8,0x5c,0x5c,0xe4
+.byte	0x9f,0xc2,0xc2,0x5d,	0xbd,0xd3,0xd3,0x6e
+.byte	0x43,0xac,0xac,0xef,	0xc4,0x62,0x62,0xa6
+.byte	0x39,0x91,0x91,0xa8,	0x31,0x95,0x95,0xa4
+.byte	0xd3,0xe4,0xe4,0x37,	0xf2,0x79,0x79,0x8b
+.byte	0xd5,0xe7,0xe7,0x32,	0x8b,0xc8,0xc8,0x43
+.byte	0x6e,0x37,0x37,0x59,	0xda,0x6d,0x6d,0xb7
+.byte	0x01,0x8d,0x8d,0x8c,	0xb1,0xd5,0xd5,0x64
+.byte	0x9c,0x4e,0x4e,0xd2,	0x49,0xa9,0xa9,0xe0
+.byte	0xd8,0x6c,0x6c,0xb4,	0xac,0x56,0x56,0xfa
+.byte	0xf3,0xf4,0xf4,0x07,	0xcf,0xea,0xea,0x25
+.byte	0xca,0x65,0x65,0xaf,	0xf4,0x7a,0x7a,0x8e
+.byte	0x47,0xae,0xae,0xe9,	0x10,0x08,0x08,0x18
+.byte	0x6f,0xba,0xba,0xd5,	0xf0,0x78,0x78,0x88
+.byte	0x4a,0x25,0x25,0x6f,	0x5c,0x2e,0x2e,0x72
+.byte	0x38,0x1c,0x1c,0x24,	0x57,0xa6,0xa6,0xf1
+.byte	0x73,0xb4,0xb4,0xc7,	0x97,0xc6,0xc6,0x51
+.byte	0xcb,0xe8,0xe8,0x23,	0xa1,0xdd,0xdd,0x7c
+.byte	0xe8,0x74,0x74,0x9c,	0x3e,0x1f,0x1f,0x21
+.byte	0x96,0x4b,0x4b,0xdd,	0x61,0xbd,0xbd,0xdc
+.byte	0x0d,0x8b,0x8b,0x86,	0x0f,0x8a,0x8a,0x85
+.byte	0xe0,0x70,0x70,0x90,	0x7c,0x3e,0x3e,0x42
+.byte	0x71,0xb5,0xb5,0xc4,	0xcc,0x66,0x66,0xaa
+.byte	0x90,0x48,0x48,0xd8,	0x06,0x03,0x03,0x05
+.byte	0xf7,0xf6,0xf6,0x01,	0x1c,0x0e,0x0e,0x12
+.byte	0xc2,0x61,0x61,0xa3,	0x6a,0x35,0x35,0x5f
+.byte	0xae,0x57,0x57,0xf9,	0x69,0xb9,0xb9,0xd0
+.byte	0x17,0x86,0x86,0x91,	0x99,0xc1,0xc1,0x58
+.byte	0x3a,0x1d,0x1d,0x27,	0x27,0x9e,0x9e,0xb9
+.byte	0xd9,0xe1,0xe1,0x38,	0xeb,0xf8,0xf8,0x13
+.byte	0x2b,0x98,0x98,0xb3,	0x22,0x11,0x11,0x33
+.byte	0xd2,0x69,0x69,0xbb,	0xa9,0xd9,0xd9,0x70
+.byte	0x07,0x8e,0x8e,0x89,	0x33,0x94,0x94,0xa7
+.byte	0x2d,0x9b,0x9b,0xb6,	0x3c,0x1e,0x1e,0x22
+.byte	0x15,0x87,0x87,0x92,	0xc9,0xe9,0xe9,0x20
+.byte	0x87,0xce,0xce,0x49,	0xaa,0x55,0x55,0xff
+.byte	0x50,0x28,0x28,0x78,	0xa5,0xdf,0xdf,0x7a
+.byte	0x03,0x8c,0x8c,0x8f,	0x59,0xa1,0xa1,0xf8
+.byte	0x09,0x89,0x89,0x80,	0x1a,0x0d,0x0d,0x17
+.byte	0x65,0xbf,0xbf,0xda,	0xd7,0xe6,0xe6,0x31
+.byte	0x84,0x42,0x42,0xc6,	0xd0,0x68,0x68,0xb8
+.byte	0x82,0x41,0x41,0xc3,	0x29,0x99,0x99,0xb0
+.byte	0x5a,0x2d,0x2d,0x77,	0x1e,0x0f,0x0f,0x11
+.byte	0x7b,0xb0,0xb0,0xcb,	0xa8,0x54,0x54,0xfc
+.byte	0x6d,0xbb,0xbb,0xd6,	0x2c,0x16,0x16,0x3a
+
+AES_Td:
+.byte	0x51,0xf4,0xa7,0x50,	0x7e,0x41,0x65,0x53	# Td0
+.byte	0x1a,0x17,0xa4,0xc3,	0x3a,0x27,0x5e,0x96
+.byte	0x3b,0xab,0x6b,0xcb,	0x1f,0x9d,0x45,0xf1
+.byte	0xac,0xfa,0x58,0xab,	0x4b,0xe3,0x03,0x93
+.byte	0x20,0x30,0xfa,0x55,	0xad,0x76,0x6d,0xf6
+.byte	0x88,0xcc,0x76,0x91,	0xf5,0x02,0x4c,0x25
+.byte	0x4f,0xe5,0xd7,0xfc,	0xc5,0x2a,0xcb,0xd7
+.byte	0x26,0x35,0x44,0x80,	0xb5,0x62,0xa3,0x8f
+.byte	0xde,0xb1,0x5a,0x49,	0x25,0xba,0x1b,0x67
+.byte	0x45,0xea,0x0e,0x98,	0x5d,0xfe,0xc0,0xe1
+.byte	0xc3,0x2f,0x75,0x02,	0x81,0x4c,0xf0,0x12
+.byte	0x8d,0x46,0x97,0xa3,	0x6b,0xd3,0xf9,0xc6
+.byte	0x03,0x8f,0x5f,0xe7,	0x15,0x92,0x9c,0x95
+.byte	0xbf,0x6d,0x7a,0xeb,	0x95,0x52,0x59,0xda
+.byte	0xd4,0xbe,0x83,0x2d,	0x58,0x74,0x21,0xd3
+.byte	0x49,0xe0,0x69,0x29,	0x8e,0xc9,0xc8,0x44
+.byte	0x75,0xc2,0x89,0x6a,	0xf4,0x8e,0x79,0x78
+.byte	0x99,0x58,0x3e,0x6b,	0x27,0xb9,0x71,0xdd
+.byte	0xbe,0xe1,0x4f,0xb6,	0xf0,0x88,0xad,0x17
+.byte	0xc9,0x20,0xac,0x66,	0x7d,0xce,0x3a,0xb4
+.byte	0x63,0xdf,0x4a,0x18,	0xe5,0x1a,0x31,0x82
+.byte	0x97,0x51,0x33,0x60,	0x62,0x53,0x7f,0x45
+.byte	0xb1,0x64,0x77,0xe0,	0xbb,0x6b,0xae,0x84
+.byte	0xfe,0x81,0xa0,0x1c,	0xf9,0x08,0x2b,0x94
+.byte	0x70,0x48,0x68,0x58,	0x8f,0x45,0xfd,0x19
+.byte	0x94,0xde,0x6c,0x87,	0x52,0x7b,0xf8,0xb7
+.byte	0xab,0x73,0xd3,0x23,	0x72,0x4b,0x02,0xe2
+.byte	0xe3,0x1f,0x8f,0x57,	0x66,0x55,0xab,0x2a
+.byte	0xb2,0xeb,0x28,0x07,	0x2f,0xb5,0xc2,0x03
+.byte	0x86,0xc5,0x7b,0x9a,	0xd3,0x37,0x08,0xa5
+.byte	0x30,0x28,0x87,0xf2,	0x23,0xbf,0xa5,0xb2
+.byte	0x02,0x03,0x6a,0xba,	0xed,0x16,0x82,0x5c
+.byte	0x8a,0xcf,0x1c,0x2b,	0xa7,0x79,0xb4,0x92
+.byte	0xf3,0x07,0xf2,0xf0,	0x4e,0x69,0xe2,0xa1
+.byte	0x65,0xda,0xf4,0xcd,	0x06,0x05,0xbe,0xd5
+.byte	0xd1,0x34,0x62,0x1f,	0xc4,0xa6,0xfe,0x8a
+.byte	0x34,0x2e,0x53,0x9d,	0xa2,0xf3,0x55,0xa0
+.byte	0x05,0x8a,0xe1,0x32,	0xa4,0xf6,0xeb,0x75
+.byte	0x0b,0x83,0xec,0x39,	0x40,0x60,0xef,0xaa
+.byte	0x5e,0x71,0x9f,0x06,	0xbd,0x6e,0x10,0x51
+.byte	0x3e,0x21,0x8a,0xf9,	0x96,0xdd,0x06,0x3d
+.byte	0xdd,0x3e,0x05,0xae,	0x4d,0xe6,0xbd,0x46
+.byte	0x91,0x54,0x8d,0xb5,	0x71,0xc4,0x5d,0x05
+.byte	0x04,0x06,0xd4,0x6f,	0x60,0x50,0x15,0xff
+.byte	0x19,0x98,0xfb,0x24,	0xd6,0xbd,0xe9,0x97
+.byte	0x89,0x40,0x43,0xcc,	0x67,0xd9,0x9e,0x77
+.byte	0xb0,0xe8,0x42,0xbd,	0x07,0x89,0x8b,0x88
+.byte	0xe7,0x19,0x5b,0x38,	0x79,0xc8,0xee,0xdb
+.byte	0xa1,0x7c,0x0a,0x47,	0x7c,0x42,0x0f,0xe9
+.byte	0xf8,0x84,0x1e,0xc9,	0x00,0x00,0x00,0x00
+.byte	0x09,0x80,0x86,0x83,	0x32,0x2b,0xed,0x48
+.byte	0x1e,0x11,0x70,0xac,	0x6c,0x5a,0x72,0x4e
+.byte	0xfd,0x0e,0xff,0xfb,	0x0f,0x85,0x38,0x56
+.byte	0x3d,0xae,0xd5,0x1e,	0x36,0x2d,0x39,0x27
+.byte	0x0a,0x0f,0xd9,0x64,	0x68,0x5c,0xa6,0x21
+.byte	0x9b,0x5b,0x54,0xd1,	0x24,0x36,0x2e,0x3a
+.byte	0x0c,0x0a,0x67,0xb1,	0x93,0x57,0xe7,0x0f
+.byte	0xb4,0xee,0x96,0xd2,	0x1b,0x9b,0x91,0x9e
+.byte	0x80,0xc0,0xc5,0x4f,	0x61,0xdc,0x20,0xa2
+.byte	0x5a,0x77,0x4b,0x69,	0x1c,0x12,0x1a,0x16
+.byte	0xe2,0x93,0xba,0x0a,	0xc0,0xa0,0x2a,0xe5
+.byte	0x3c,0x22,0xe0,0x43,	0x12,0x1b,0x17,0x1d
+.byte	0x0e,0x09,0x0d,0x0b,	0xf2,0x8b,0xc7,0xad
+.byte	0x2d,0xb6,0xa8,0xb9,	0x14,0x1e,0xa9,0xc8
+.byte	0x57,0xf1,0x19,0x85,	0xaf,0x75,0x07,0x4c
+.byte	0xee,0x99,0xdd,0xbb,	0xa3,0x7f,0x60,0xfd
+.byte	0xf7,0x01,0x26,0x9f,	0x5c,0x72,0xf5,0xbc
+.byte	0x44,0x66,0x3b,0xc5,	0x5b,0xfb,0x7e,0x34
+.byte	0x8b,0x43,0x29,0x76,	0xcb,0x23,0xc6,0xdc
+.byte	0xb6,0xed,0xfc,0x68,	0xb8,0xe4,0xf1,0x63
+.byte	0xd7,0x31,0xdc,0xca,	0x42,0x63,0x85,0x10
+.byte	0x13,0x97,0x22,0x40,	0x84,0xc6,0x11,0x20
+.byte	0x85,0x4a,0x24,0x7d,	0xd2,0xbb,0x3d,0xf8
+.byte	0xae,0xf9,0x32,0x11,	0xc7,0x29,0xa1,0x6d
+.byte	0x1d,0x9e,0x2f,0x4b,	0xdc,0xb2,0x30,0xf3
+.byte	0x0d,0x86,0x52,0xec,	0x77,0xc1,0xe3,0xd0
+.byte	0x2b,0xb3,0x16,0x6c,	0xa9,0x70,0xb9,0x99
+.byte	0x11,0x94,0x48,0xfa,	0x47,0xe9,0x64,0x22
+.byte	0xa8,0xfc,0x8c,0xc4,	0xa0,0xf0,0x3f,0x1a
+.byte	0x56,0x7d,0x2c,0xd8,	0x22,0x33,0x90,0xef
+.byte	0x87,0x49,0x4e,0xc7,	0xd9,0x38,0xd1,0xc1
+.byte	0x8c,0xca,0xa2,0xfe,	0x98,0xd4,0x0b,0x36
+.byte	0xa6,0xf5,0x81,0xcf,	0xa5,0x7a,0xde,0x28
+.byte	0xda,0xb7,0x8e,0x26,	0x3f,0xad,0xbf,0xa4
+.byte	0x2c,0x3a,0x9d,0xe4,	0x50,0x78,0x92,0x0d
+.byte	0x6a,0x5f,0xcc,0x9b,	0x54,0x7e,0x46,0x62
+.byte	0xf6,0x8d,0x13,0xc2,	0x90,0xd8,0xb8,0xe8
+.byte	0x2e,0x39,0xf7,0x5e,	0x82,0xc3,0xaf,0xf5
+.byte	0x9f,0x5d,0x80,0xbe,	0x69,0xd0,0x93,0x7c
+.byte	0x6f,0xd5,0x2d,0xa9,	0xcf,0x25,0x12,0xb3
+.byte	0xc8,0xac,0x99,0x3b,	0x10,0x18,0x7d,0xa7
+.byte	0xe8,0x9c,0x63,0x6e,	0xdb,0x3b,0xbb,0x7b
+.byte	0xcd,0x26,0x78,0x09,	0x6e,0x59,0x18,0xf4
+.byte	0xec,0x9a,0xb7,0x01,	0x83,0x4f,0x9a,0xa8
+.byte	0xe6,0x95,0x6e,0x65,	0xaa,0xff,0xe6,0x7e
+.byte	0x21,0xbc,0xcf,0x08,	0xef,0x15,0xe8,0xe6
+.byte	0xba,0xe7,0x9b,0xd9,	0x4a,0x6f,0x36,0xce
+.byte	0xea,0x9f,0x09,0xd4,	0x29,0xb0,0x7c,0xd6
+.byte	0x31,0xa4,0xb2,0xaf,	0x2a,0x3f,0x23,0x31
+.byte	0xc6,0xa5,0x94,0x30,	0x35,0xa2,0x66,0xc0
+.byte	0x74,0x4e,0xbc,0x37,	0xfc,0x82,0xca,0xa6
+.byte	0xe0,0x90,0xd0,0xb0,	0x33,0xa7,0xd8,0x15
+.byte	0xf1,0x04,0x98,0x4a,	0x41,0xec,0xda,0xf7
+.byte	0x7f,0xcd,0x50,0x0e,	0x17,0x91,0xf6,0x2f
+.byte	0x76,0x4d,0xd6,0x8d,	0x43,0xef,0xb0,0x4d
+.byte	0xcc,0xaa,0x4d,0x54,	0xe4,0x96,0x04,0xdf
+.byte	0x9e,0xd1,0xb5,0xe3,	0x4c,0x6a,0x88,0x1b
+.byte	0xc1,0x2c,0x1f,0xb8,	0x46,0x65,0x51,0x7f
+.byte	0x9d,0x5e,0xea,0x04,	0x01,0x8c,0x35,0x5d
+.byte	0xfa,0x87,0x74,0x73,	0xfb,0x0b,0x41,0x2e
+.byte	0xb3,0x67,0x1d,0x5a,	0x92,0xdb,0xd2,0x52
+.byte	0xe9,0x10,0x56,0x33,	0x6d,0xd6,0x47,0x13
+.byte	0x9a,0xd7,0x61,0x8c,	0x37,0xa1,0x0c,0x7a
+.byte	0x59,0xf8,0x14,0x8e,	0xeb,0x13,0x3c,0x89
+.byte	0xce,0xa9,0x27,0xee,	0xb7,0x61,0xc9,0x35
+.byte	0xe1,0x1c,0xe5,0xed,	0x7a,0x47,0xb1,0x3c
+.byte	0x9c,0xd2,0xdf,0x59,	0x55,0xf2,0x73,0x3f
+.byte	0x18,0x14,0xce,0x79,	0x73,0xc7,0x37,0xbf
+.byte	0x53,0xf7,0xcd,0xea,	0x5f,0xfd,0xaa,0x5b
+.byte	0xdf,0x3d,0x6f,0x14,	0x78,0x44,0xdb,0x86
+.byte	0xca,0xaf,0xf3,0x81,	0xb9,0x68,0xc4,0x3e
+.byte	0x38,0x24,0x34,0x2c,	0xc2,0xa3,0x40,0x5f
+.byte	0x16,0x1d,0xc3,0x72,	0xbc,0xe2,0x25,0x0c
+.byte	0x28,0x3c,0x49,0x8b,	0xff,0x0d,0x95,0x41
+.byte	0x39,0xa8,0x01,0x71,	0x08,0x0c,0xb3,0xde
+.byte	0xd8,0xb4,0xe4,0x9c,	0x64,0x56,0xc1,0x90
+.byte	0x7b,0xcb,0x84,0x61,	0xd5,0x32,0xb6,0x70
+.byte	0x48,0x6c,0x5c,0x74,	0xd0,0xb8,0x57,0x42
+
+.byte	0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38	# Td4
+.byte	0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
+.byte	0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
+.byte	0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
+.byte	0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
+.byte	0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
+.byte	0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
+.byte	0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
+.byte	0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
+.byte	0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
+.byte	0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
+.byte	0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
+.byte	0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
+.byte	0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
+.byte	0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
+.byte	0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
+.byte	0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
+.byte	0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
+.byte	0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
+.byte	0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
+.byte	0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
+.byte	0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
+.byte	0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
+.byte	0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
+.byte	0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
+.byte	0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
+.byte	0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
+.byte	0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
+.byte	0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
+.byte	0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
+.byte	0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
+.byte	0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
+
+AES_Te4:
+.byte	0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5	# Te4
+.byte	0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
+.byte	0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
+.byte	0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
+.byte	0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
+.byte	0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
+.byte	0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
+.byte	0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
+.byte	0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
+.byte	0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
+.byte	0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
+.byte	0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
+.byte	0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
+.byte	0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
+.byte	0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
+.byte	0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
+.byte	0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
+.byte	0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
+.byte	0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
+.byte	0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
+.byte	0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
+.byte	0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
+.byte	0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
+.byte	0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
+.byte	0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
+.byte	0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
+.byte	0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
+.byte	0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
+.byte	0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
+.byte	0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
+.byte	0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
+.byte	0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
+
+.byte	0x01,0x00,0x00,0x00,	0x02,0x00,0x00,0x00	# rcon
+.byte	0x04,0x00,0x00,0x00,	0x08,0x00,0x00,0x00
+.byte	0x10,0x00,0x00,0x00,	0x20,0x00,0x00,0x00
+.byte	0x40,0x00,0x00,0x00,	0x80,0x00,0x00,0x00
+.byte	0x1B,0x00,0x00,0x00,	0x36,0x00,0x00,0x00
+___
+
+foreach (split("\n",$code)) {
+	s/\`([^\`]*)\`/eval $1/ge;
+
+	# made-up _instructions, _xtr, _ins, _ror and _bias, cope
+	# with byte order dependencies...
+	if (/^\s+_/) {
+	    s/(_[a-z]+\s+)(\$[0-9]+),([^,]+)(#.*)*$/$1$2,$2,$3/;
+
+	    s/_xtr\s+(\$[0-9]+),(\$[0-9]+),([0-9]+(\-2)*)/
+		sprintf("srl\t$1,$2,%d",$big_endian ?	eval($3)
+					:		eval("24-$3"))/e or
+	    s/_ins\s+(\$[0-9]+),(\$[0-9]+),([0-9]+)/
+		sprintf("sll\t$1,$2,%d",$big_endian ?	eval($3)
+					:		eval("24-$3"))/e or
+	    s/_ins2\s+(\$[0-9]+),(\$[0-9]+),([0-9]+)/
+		sprintf("ins\t$1,$2,%d,8",$big_endian ?	eval($3)
+					:		eval("24-$3"))/e or
+	    s/_ror\s+(\$[0-9]+),(\$[0-9]+),(\-?[0-9]+)/
+		sprintf("srl\t$1,$2,%d",$big_endian ?	eval($3)
+					:		eval("$3*-1"))/e or
+	    s/_bias\s+(\$[0-9]+),(\$[0-9]+),([0-9]+)/
+		sprintf("sll\t$1,$2,%d",$big_endian ?	eval($3)
+					:		eval("($3-16)&31"))/e;
+
+	    s/srl\s+(\$[0-9]+),(\$[0-9]+),\-([0-9]+)/
+		sprintf("sll\t$1,$2,$3")/e				or
+	    s/srl\s+(\$[0-9]+),(\$[0-9]+),0/
+		sprintf("and\t$1,$2,0xff")/e				or
+	    s/(sll\s+\$[0-9]+,\$[0-9]+,0)/#$1/;
+	}
+
+	# convert lwl/lwr and swr/swl to little-endian order
+	if (!$big_endian && /^\s+[sl]w[lr]\s+/) {
+	    s/([sl]wl.*)([0-9]+)\((\$[0-9]+)\)/
+		sprintf("$1%d($3)",eval("$2-$2%4+($2%4-1)&3"))/e	or
+	    s/([sl]wr.*)([0-9]+)\((\$[0-9]+)\)/
+		sprintf("$1%d($3)",eval("$2-$2%4+($2%4+1)&3"))/e;
+	}
+
+	if (!$big_endian) {
+	    s/(rotr\s+\$[0-9]+,\$[0-9]+),([0-9]+)/sprintf("$1,%d",32-$2)/e;
+	    s/(ext\s+\$[0-9]+,\$[0-9]+),([0-9]+),8/sprintf("$1,%d,8",24-$2)/e;
+	}
+
+	print $_,"\n";
+}
+
+close STDOUT or die "error closing STDOUT: $!";
diff --git a/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aes-parisc.pl b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aes-parisc.pl
new file mode 100644
index 0000000..5b07fac
--- /dev/null
+++ b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aes-parisc.pl
@@ -0,0 +1,1038 @@
+#! /usr/bin/env perl
+# Copyright 2009-2020 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License").  You may not use
+# this file except in compliance with the License.  You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# AES for PA-RISC.
+#
+# June 2009.
+#
+# The module is mechanical transliteration of aes-sparcv9.pl, but with
+# a twist: S-boxes are compressed even further down to 1K+256B. On
+# PA-7100LC performance is ~40% better than gcc 3.2 generated code and
+# is about 33 cycles per byte processed with 128-bit key. Newer CPUs
+# perform at 16 cycles per byte. It's not faster than code generated
+# by vendor compiler, but recall that it has compressed S-boxes, which
+# requires extra processing.
+#
+# Special thanks to polarhome.com for providing HP-UX account.
+
+$flavour = shift;
+$output = shift;
+open STDOUT,">$output";
+
+if ($flavour =~ /64/) {
+	$LEVEL		="2.0W";
+	$SIZE_T		=8;
+	$FRAME_MARKER	=80;
+	$SAVED_RP	=16;
+	$PUSH		="std";
+	$PUSHMA		="std,ma";
+	$POP		="ldd";
+	$POPMB		="ldd,mb";
+} else {
+	$LEVEL		="1.0";
+	$SIZE_T		=4;
+	$FRAME_MARKER	=48;
+	$SAVED_RP	=20;
+	$PUSH		="stw";
+	$PUSHMA		="stwm";
+	$POP		="ldw";
+	$POPMB		="ldwm";
+}
+
+$FRAME=16*$SIZE_T+$FRAME_MARKER;# 16 saved regs + frame marker
+				#                 [+ argument transfer]
+$inp="%r26";	# arg0
+$out="%r25";	# arg1
+$key="%r24";	# arg2
+
+($s0,$s1,$s2,$s3) = ("%r1","%r2","%r3","%r4");
+($t0,$t1,$t2,$t3) = ("%r5","%r6","%r7","%r8");
+
+($acc0, $acc1, $acc2, $acc3, $acc4, $acc5, $acc6, $acc7,
+ $acc8, $acc9,$acc10,$acc11,$acc12,$acc13,$acc14,$acc15) =
+("%r9","%r10","%r11","%r12","%r13","%r14","%r15","%r16",
+"%r17","%r18","%r19","%r20","%r21","%r22","%r23","%r26");
+
+$tbl="%r28";
+$rounds="%r29";
+
+$code=<<___;
+	.LEVEL	$LEVEL
+	.SPACE	\$TEXT\$
+	.SUBSPA	\$CODE\$,QUAD=0,ALIGN=8,ACCESS=0x2C,CODE_ONLY
+
+	.EXPORT	AES_encrypt,ENTRY,ARGW0=GR,ARGW1=GR,ARGW2=GR
+	.ALIGN	64
+AES_encrypt
+	.PROC
+	.CALLINFO	FRAME=`$FRAME-16*$SIZE_T`,NO_CALLS,SAVE_RP,ENTRY_GR=18
+	.ENTRY
+	$PUSH	%r2,-$SAVED_RP(%sp)	; standard prologue
+	$PUSHMA	%r3,$FRAME(%sp)
+	$PUSH	%r4,`-$FRAME+1*$SIZE_T`(%sp)
+	$PUSH	%r5,`-$FRAME+2*$SIZE_T`(%sp)
+	$PUSH	%r6,`-$FRAME+3*$SIZE_T`(%sp)
+	$PUSH	%r7,`-$FRAME+4*$SIZE_T`(%sp)
+	$PUSH	%r8,`-$FRAME+5*$SIZE_T`(%sp)
+	$PUSH	%r9,`-$FRAME+6*$SIZE_T`(%sp)
+	$PUSH	%r10,`-$FRAME+7*$SIZE_T`(%sp)
+	$PUSH	%r11,`-$FRAME+8*$SIZE_T`(%sp)
+	$PUSH	%r12,`-$FRAME+9*$SIZE_T`(%sp)
+	$PUSH	%r13,`-$FRAME+10*$SIZE_T`(%sp)
+	$PUSH	%r14,`-$FRAME+11*$SIZE_T`(%sp)
+	$PUSH	%r15,`-$FRAME+12*$SIZE_T`(%sp)
+	$PUSH	%r16,`-$FRAME+13*$SIZE_T`(%sp)
+	$PUSH	%r17,`-$FRAME+14*$SIZE_T`(%sp)
+	$PUSH	%r18,`-$FRAME+15*$SIZE_T`(%sp)
+
+	blr	%r0,$tbl
+	ldi	3,$t0
+L\$enc_pic
+	andcm	$tbl,$t0,$tbl
+	ldo	L\$AES_Te-L\$enc_pic($tbl),$tbl
+
+	and	$inp,$t0,$t0
+	sub	$inp,$t0,$inp
+	ldw	0($inp),$s0
+	ldw	4($inp),$s1
+	ldw	8($inp),$s2
+	comib,=	0,$t0,L\$enc_inp_aligned
+	ldw	12($inp),$s3
+
+	sh3addl	$t0,%r0,$t0
+	subi	32,$t0,$t0
+	mtctl	$t0,%cr11
+	ldw	16($inp),$t1
+	vshd	$s0,$s1,$s0
+	vshd	$s1,$s2,$s1
+	vshd	$s2,$s3,$s2
+	vshd	$s3,$t1,$s3
+
+L\$enc_inp_aligned
+	bl	_parisc_AES_encrypt,%r31
+	nop
+
+	extru,<> $out,31,2,%r0
+	b	L\$enc_out_aligned
+	nop
+
+	_srm	$s0,24,$acc0
+	_srm	$s0,16,$acc1
+	stb	$acc0,0($out)
+	_srm	$s0,8,$acc2
+	stb	$acc1,1($out)
+	_srm	$s1,24,$acc4
+	stb	$acc2,2($out)
+	_srm	$s1,16,$acc5
+	stb	$s0,3($out)
+	_srm	$s1,8,$acc6
+	stb	$acc4,4($out)
+	_srm	$s2,24,$acc0
+	stb	$acc5,5($out)
+	_srm	$s2,16,$acc1
+	stb	$acc6,6($out)
+	_srm	$s2,8,$acc2
+	stb	$s1,7($out)
+	_srm	$s3,24,$acc4
+	stb	$acc0,8($out)
+	_srm	$s3,16,$acc5
+	stb	$acc1,9($out)
+	_srm	$s3,8,$acc6
+	stb	$acc2,10($out)
+	stb	$s2,11($out)
+	stb	$acc4,12($out)
+	stb	$acc5,13($out)
+	stb	$acc6,14($out)
+	b	L\$enc_done
+	stb	$s3,15($out)
+
+L\$enc_out_aligned
+	stw	$s0,0($out)
+	stw	$s1,4($out)
+	stw	$s2,8($out)
+	stw	$s3,12($out)
+
+L\$enc_done
+	$POP	`-$FRAME-$SAVED_RP`(%sp),%r2	; standard epilogue
+	$POP	`-$FRAME+1*$SIZE_T`(%sp),%r4
+	$POP	`-$FRAME+2*$SIZE_T`(%sp),%r5
+	$POP	`-$FRAME+3*$SIZE_T`(%sp),%r6
+	$POP	`-$FRAME+4*$SIZE_T`(%sp),%r7
+	$POP	`-$FRAME+5*$SIZE_T`(%sp),%r8
+	$POP	`-$FRAME+6*$SIZE_T`(%sp),%r9
+	$POP	`-$FRAME+7*$SIZE_T`(%sp),%r10
+	$POP	`-$FRAME+8*$SIZE_T`(%sp),%r11
+	$POP	`-$FRAME+9*$SIZE_T`(%sp),%r12
+	$POP	`-$FRAME+10*$SIZE_T`(%sp),%r13
+	$POP	`-$FRAME+11*$SIZE_T`(%sp),%r14
+	$POP	`-$FRAME+12*$SIZE_T`(%sp),%r15
+	$POP	`-$FRAME+13*$SIZE_T`(%sp),%r16
+	$POP	`-$FRAME+14*$SIZE_T`(%sp),%r17
+	$POP	`-$FRAME+15*$SIZE_T`(%sp),%r18
+	bv	(%r2)
+	.EXIT
+	$POPMB	-$FRAME(%sp),%r3
+	.PROCEND
+
+	.ALIGN	16
+_parisc_AES_encrypt
+	.PROC
+	.CALLINFO	MILLICODE
+	.ENTRY
+	ldw	240($key),$rounds
+	ldw	0($key),$t0
+	ldw	4($key),$t1
+	ldw	8($key),$t2
+	_srm	$rounds,1,$rounds
+	xor	$t0,$s0,$s0
+	ldw	12($key),$t3
+	_srm	$s0,24,$acc0
+	xor	$t1,$s1,$s1
+	ldw	16($key),$t0
+	_srm	$s1,16,$acc1
+	xor	$t2,$s2,$s2
+	ldw	20($key),$t1
+	xor	$t3,$s3,$s3
+	ldw	24($key),$t2
+	ldw	28($key),$t3
+L\$enc_loop
+	_srm	$s2,8,$acc2
+	ldwx,s	$acc0($tbl),$acc0
+	_srm	$s3,0,$acc3
+	ldwx,s	$acc1($tbl),$acc1
+	_srm	$s1,24,$acc4
+	ldwx,s	$acc2($tbl),$acc2
+	_srm	$s2,16,$acc5
+	ldwx,s	$acc3($tbl),$acc3
+	_srm	$s3,8,$acc6
+	ldwx,s	$acc4($tbl),$acc4
+	_srm	$s0,0,$acc7
+	ldwx,s	$acc5($tbl),$acc5
+	_srm	$s2,24,$acc8
+	ldwx,s	$acc6($tbl),$acc6
+	_srm	$s3,16,$acc9
+	ldwx,s	$acc7($tbl),$acc7
+	_srm	$s0,8,$acc10
+	ldwx,s	$acc8($tbl),$acc8
+	_srm	$s1,0,$acc11
+	ldwx,s	$acc9($tbl),$acc9
+	_srm	$s3,24,$acc12
+	ldwx,s	$acc10($tbl),$acc10
+	_srm	$s0,16,$acc13
+	ldwx,s	$acc11($tbl),$acc11
+	_srm	$s1,8,$acc14
+	ldwx,s	$acc12($tbl),$acc12
+	_srm	$s2,0,$acc15
+	ldwx,s	$acc13($tbl),$acc13
+	ldwx,s	$acc14($tbl),$acc14
+	ldwx,s	$acc15($tbl),$acc15
+	addib,= -1,$rounds,L\$enc_last
+	ldo	32($key),$key
+
+		_ror	$acc1,8,$acc1
+		xor	$acc0,$t0,$t0
+	ldw	0($key),$s0
+		_ror	$acc2,16,$acc2
+		xor	$acc1,$t0,$t0
+	ldw	4($key),$s1
+		_ror	$acc3,24,$acc3
+		xor	$acc2,$t0,$t0
+	ldw	8($key),$s2
+		_ror	$acc5,8,$acc5
+		xor	$acc3,$t0,$t0
+	ldw	12($key),$s3
+		_ror	$acc6,16,$acc6
+		xor	$acc4,$t1,$t1
+		_ror	$acc7,24,$acc7
+		xor	$acc5,$t1,$t1
+		_ror	$acc9,8,$acc9
+		xor	$acc6,$t1,$t1
+		_ror	$acc10,16,$acc10
+		xor	$acc7,$t1,$t1
+		_ror	$acc11,24,$acc11
+		xor	$acc8,$t2,$t2
+		_ror	$acc13,8,$acc13
+		xor	$acc9,$t2,$t2
+		_ror	$acc14,16,$acc14
+		xor	$acc10,$t2,$t2
+		_ror	$acc15,24,$acc15
+		xor	$acc11,$t2,$t2
+		xor	$acc12,$acc14,$acc14
+		xor	$acc13,$t3,$t3
+	_srm	$t0,24,$acc0
+		xor	$acc14,$t3,$t3
+	_srm	$t1,16,$acc1
+		xor	$acc15,$t3,$t3
+
+	_srm	$t2,8,$acc2
+	ldwx,s	$acc0($tbl),$acc0
+	_srm	$t3,0,$acc3
+	ldwx,s	$acc1($tbl),$acc1
+	_srm	$t1,24,$acc4
+	ldwx,s	$acc2($tbl),$acc2
+	_srm	$t2,16,$acc5
+	ldwx,s	$acc3($tbl),$acc3
+	_srm	$t3,8,$acc6
+	ldwx,s	$acc4($tbl),$acc4
+	_srm	$t0,0,$acc7
+	ldwx,s	$acc5($tbl),$acc5
+	_srm	$t2,24,$acc8
+	ldwx,s	$acc6($tbl),$acc6
+	_srm	$t3,16,$acc9
+	ldwx,s	$acc7($tbl),$acc7
+	_srm	$t0,8,$acc10
+	ldwx,s	$acc8($tbl),$acc8
+	_srm	$t1,0,$acc11
+	ldwx,s	$acc9($tbl),$acc9
+	_srm	$t3,24,$acc12
+	ldwx,s	$acc10($tbl),$acc10
+	_srm	$t0,16,$acc13
+	ldwx,s	$acc11($tbl),$acc11
+	_srm	$t1,8,$acc14
+	ldwx,s	$acc12($tbl),$acc12
+	_srm	$t2,0,$acc15
+	ldwx,s	$acc13($tbl),$acc13
+		_ror	$acc1,8,$acc1
+	ldwx,s	$acc14($tbl),$acc14
+
+		_ror	$acc2,16,$acc2
+		xor	$acc0,$s0,$s0
+	ldwx,s	$acc15($tbl),$acc15
+		_ror	$acc3,24,$acc3
+		xor	$acc1,$s0,$s0
+	ldw	16($key),$t0
+		_ror	$acc5,8,$acc5
+		xor	$acc2,$s0,$s0
+	ldw	20($key),$t1
+		_ror	$acc6,16,$acc6
+		xor	$acc3,$s0,$s0
+	ldw	24($key),$t2
+		_ror	$acc7,24,$acc7
+		xor	$acc4,$s1,$s1
+	ldw	28($key),$t3
+		_ror	$acc9,8,$acc9
+		xor	$acc5,$s1,$s1
+	ldw	1024+0($tbl),%r0		; prefetch te4
+		_ror	$acc10,16,$acc10
+		xor	$acc6,$s1,$s1
+	ldw	1024+32($tbl),%r0		; prefetch te4
+		_ror	$acc11,24,$acc11
+		xor	$acc7,$s1,$s1
+	ldw	1024+64($tbl),%r0		; prefetch te4
+		_ror	$acc13,8,$acc13
+		xor	$acc8,$s2,$s2
+	ldw	1024+96($tbl),%r0		; prefetch te4
+		_ror	$acc14,16,$acc14
+		xor	$acc9,$s2,$s2
+	ldw	1024+128($tbl),%r0		; prefetch te4
+		_ror	$acc15,24,$acc15
+		xor	$acc10,$s2,$s2
+	ldw	1024+160($tbl),%r0		; prefetch te4
+	_srm	$s0,24,$acc0
+		xor	$acc11,$s2,$s2
+	ldw	1024+192($tbl),%r0		; prefetch te4
+		xor	$acc12,$acc14,$acc14
+		xor	$acc13,$s3,$s3
+	ldw	1024+224($tbl),%r0		; prefetch te4
+	_srm	$s1,16,$acc1
+		xor	$acc14,$s3,$s3
+	b	L\$enc_loop
+		xor	$acc15,$s3,$s3
+
+	.ALIGN	16
+L\$enc_last
+	ldo	1024($tbl),$rounds
+		_ror	$acc1,8,$acc1
+		xor	$acc0,$t0,$t0
+	ldw	0($key),$s0
+		_ror	$acc2,16,$acc2
+		xor	$acc1,$t0,$t0
+	ldw	4($key),$s1
+		_ror	$acc3,24,$acc3
+		xor	$acc2,$t0,$t0
+	ldw	8($key),$s2
+		_ror	$acc5,8,$acc5
+		xor	$acc3,$t0,$t0
+	ldw	12($key),$s3
+		_ror	$acc6,16,$acc6
+		xor	$acc4,$t1,$t1
+		_ror	$acc7,24,$acc7
+		xor	$acc5,$t1,$t1
+		_ror	$acc9,8,$acc9
+		xor	$acc6,$t1,$t1
+		_ror	$acc10,16,$acc10
+		xor	$acc7,$t1,$t1
+		_ror	$acc11,24,$acc11
+		xor	$acc8,$t2,$t2
+		_ror	$acc13,8,$acc13
+		xor	$acc9,$t2,$t2
+		_ror	$acc14,16,$acc14
+		xor	$acc10,$t2,$t2
+		_ror	$acc15,24,$acc15
+		xor	$acc11,$t2,$t2
+		xor	$acc12,$acc14,$acc14
+		xor	$acc13,$t3,$t3
+	_srm	$t0,24,$acc0
+		xor	$acc14,$t3,$t3
+	_srm	$t1,16,$acc1
+		xor	$acc15,$t3,$t3
+
+	_srm	$t2,8,$acc2
+	ldbx	$acc0($rounds),$acc0
+	_srm	$t1,24,$acc4
+	ldbx	$acc1($rounds),$acc1
+	_srm	$t2,16,$acc5
+	_srm	$t3,0,$acc3
+	ldbx	$acc2($rounds),$acc2
+	ldbx	$acc3($rounds),$acc3
+	_srm	$t3,8,$acc6
+	ldbx	$acc4($rounds),$acc4
+	_srm	$t2,24,$acc8
+	ldbx	$acc5($rounds),$acc5
+	_srm	$t3,16,$acc9
+	_srm	$t0,0,$acc7
+	ldbx	$acc6($rounds),$acc6
+	ldbx	$acc7($rounds),$acc7
+	_srm	$t0,8,$acc10
+	ldbx	$acc8($rounds),$acc8
+	_srm	$t3,24,$acc12
+	ldbx	$acc9($rounds),$acc9
+	_srm	$t0,16,$acc13
+	_srm	$t1,0,$acc11
+	ldbx	$acc10($rounds),$acc10
+	_srm	$t1,8,$acc14
+	ldbx	$acc11($rounds),$acc11
+	ldbx	$acc12($rounds),$acc12
+	ldbx	$acc13($rounds),$acc13
+	_srm	$t2,0,$acc15
+	ldbx	$acc14($rounds),$acc14
+
+		dep	$acc0,7,8,$acc3
+	ldbx	$acc15($rounds),$acc15
+		dep	$acc4,7,8,$acc7
+		dep	$acc1,15,8,$acc3
+		dep	$acc5,15,8,$acc7
+		dep	$acc2,23,8,$acc3
+		dep	$acc6,23,8,$acc7
+		xor	$acc3,$s0,$s0
+		xor	$acc7,$s1,$s1
+		dep	$acc8,7,8,$acc11
+		dep	$acc12,7,8,$acc15
+		dep	$acc9,15,8,$acc11
+		dep	$acc13,15,8,$acc15
+		dep	$acc10,23,8,$acc11
+		dep	$acc14,23,8,$acc15
+		xor	$acc11,$s2,$s2
+
+	bv	(%r31)
+	.EXIT
+		xor	$acc15,$s3,$s3
+	.PROCEND
+
+	.ALIGN	64
+L\$AES_Te
+	.WORD	0xc66363a5, 0xf87c7c84, 0xee777799, 0xf67b7b8d
+	.WORD	0xfff2f20d, 0xd66b6bbd, 0xde6f6fb1, 0x91c5c554
+	.WORD	0x60303050, 0x02010103, 0xce6767a9, 0x562b2b7d
+	.WORD	0xe7fefe19, 0xb5d7d762, 0x4dababe6, 0xec76769a
+	.WORD	0x8fcaca45, 0x1f82829d, 0x89c9c940, 0xfa7d7d87
+	.WORD	0xeffafa15, 0xb25959eb, 0x8e4747c9, 0xfbf0f00b
+	.WORD	0x41adadec, 0xb3d4d467, 0x5fa2a2fd, 0x45afafea
+	.WORD	0x239c9cbf, 0x53a4a4f7, 0xe4727296, 0x9bc0c05b
+	.WORD	0x75b7b7c2, 0xe1fdfd1c, 0x3d9393ae, 0x4c26266a
+	.WORD	0x6c36365a, 0x7e3f3f41, 0xf5f7f702, 0x83cccc4f
+	.WORD	0x6834345c, 0x51a5a5f4, 0xd1e5e534, 0xf9f1f108
+	.WORD	0xe2717193, 0xabd8d873, 0x62313153, 0x2a15153f
+	.WORD	0x0804040c, 0x95c7c752, 0x46232365, 0x9dc3c35e
+	.WORD	0x30181828, 0x379696a1, 0x0a05050f, 0x2f9a9ab5
+	.WORD	0x0e070709, 0x24121236, 0x1b80809b, 0xdfe2e23d
+	.WORD	0xcdebeb26, 0x4e272769, 0x7fb2b2cd, 0xea75759f
+	.WORD	0x1209091b, 0x1d83839e, 0x582c2c74, 0x341a1a2e
+	.WORD	0x361b1b2d, 0xdc6e6eb2, 0xb45a5aee, 0x5ba0a0fb
+	.WORD	0xa45252f6, 0x763b3b4d, 0xb7d6d661, 0x7db3b3ce
+	.WORD	0x5229297b, 0xdde3e33e, 0x5e2f2f71, 0x13848497
+	.WORD	0xa65353f5, 0xb9d1d168, 0x00000000, 0xc1eded2c
+	.WORD	0x40202060, 0xe3fcfc1f, 0x79b1b1c8, 0xb65b5bed
+	.WORD	0xd46a6abe, 0x8dcbcb46, 0x67bebed9, 0x7239394b
+	.WORD	0x944a4ade, 0x984c4cd4, 0xb05858e8, 0x85cfcf4a
+	.WORD	0xbbd0d06b, 0xc5efef2a, 0x4faaaae5, 0xedfbfb16
+	.WORD	0x864343c5, 0x9a4d4dd7, 0x66333355, 0x11858594
+	.WORD	0x8a4545cf, 0xe9f9f910, 0x04020206, 0xfe7f7f81
+	.WORD	0xa05050f0, 0x783c3c44, 0x259f9fba, 0x4ba8a8e3
+	.WORD	0xa25151f3, 0x5da3a3fe, 0x804040c0, 0x058f8f8a
+	.WORD	0x3f9292ad, 0x219d9dbc, 0x70383848, 0xf1f5f504
+	.WORD	0x63bcbcdf, 0x77b6b6c1, 0xafdada75, 0x42212163
+	.WORD	0x20101030, 0xe5ffff1a, 0xfdf3f30e, 0xbfd2d26d
+	.WORD	0x81cdcd4c, 0x180c0c14, 0x26131335, 0xc3ecec2f
+	.WORD	0xbe5f5fe1, 0x359797a2, 0x884444cc, 0x2e171739
+	.WORD	0x93c4c457, 0x55a7a7f2, 0xfc7e7e82, 0x7a3d3d47
+	.WORD	0xc86464ac, 0xba5d5de7, 0x3219192b, 0xe6737395
+	.WORD	0xc06060a0, 0x19818198, 0x9e4f4fd1, 0xa3dcdc7f
+	.WORD	0x44222266, 0x542a2a7e, 0x3b9090ab, 0x0b888883
+	.WORD	0x8c4646ca, 0xc7eeee29, 0x6bb8b8d3, 0x2814143c
+	.WORD	0xa7dede79, 0xbc5e5ee2, 0x160b0b1d, 0xaddbdb76
+	.WORD	0xdbe0e03b, 0x64323256, 0x743a3a4e, 0x140a0a1e
+	.WORD	0x924949db, 0x0c06060a, 0x4824246c, 0xb85c5ce4
+	.WORD	0x9fc2c25d, 0xbdd3d36e, 0x43acacef, 0xc46262a6
+	.WORD	0x399191a8, 0x319595a4, 0xd3e4e437, 0xf279798b
+	.WORD	0xd5e7e732, 0x8bc8c843, 0x6e373759, 0xda6d6db7
+	.WORD	0x018d8d8c, 0xb1d5d564, 0x9c4e4ed2, 0x49a9a9e0
+	.WORD	0xd86c6cb4, 0xac5656fa, 0xf3f4f407, 0xcfeaea25
+	.WORD	0xca6565af, 0xf47a7a8e, 0x47aeaee9, 0x10080818
+	.WORD	0x6fbabad5, 0xf0787888, 0x4a25256f, 0x5c2e2e72
+	.WORD	0x381c1c24, 0x57a6a6f1, 0x73b4b4c7, 0x97c6c651
+	.WORD	0xcbe8e823, 0xa1dddd7c, 0xe874749c, 0x3e1f1f21
+	.WORD	0x964b4bdd, 0x61bdbddc, 0x0d8b8b86, 0x0f8a8a85
+	.WORD	0xe0707090, 0x7c3e3e42, 0x71b5b5c4, 0xcc6666aa
+	.WORD	0x904848d8, 0x06030305, 0xf7f6f601, 0x1c0e0e12
+	.WORD	0xc26161a3, 0x6a35355f, 0xae5757f9, 0x69b9b9d0
+	.WORD	0x17868691, 0x99c1c158, 0x3a1d1d27, 0x279e9eb9
+	.WORD	0xd9e1e138, 0xebf8f813, 0x2b9898b3, 0x22111133
+	.WORD	0xd26969bb, 0xa9d9d970, 0x078e8e89, 0x339494a7
+	.WORD	0x2d9b9bb6, 0x3c1e1e22, 0x15878792, 0xc9e9e920
+	.WORD	0x87cece49, 0xaa5555ff, 0x50282878, 0xa5dfdf7a
+	.WORD	0x038c8c8f, 0x59a1a1f8, 0x09898980, 0x1a0d0d17
+	.WORD	0x65bfbfda, 0xd7e6e631, 0x844242c6, 0xd06868b8
+	.WORD	0x824141c3, 0x299999b0, 0x5a2d2d77, 0x1e0f0f11
+	.WORD	0x7bb0b0cb, 0xa85454fc, 0x6dbbbbd6, 0x2c16163a
+	.BYTE	0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
+	.BYTE	0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
+	.BYTE	0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
+	.BYTE	0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
+	.BYTE	0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
+	.BYTE	0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
+	.BYTE	0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
+	.BYTE	0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
+	.BYTE	0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
+	.BYTE	0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
+	.BYTE	0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
+	.BYTE	0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
+	.BYTE	0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
+	.BYTE	0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
+	.BYTE	0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
+	.BYTE	0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
+	.BYTE	0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
+	.BYTE	0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
+	.BYTE	0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
+	.BYTE	0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
+	.BYTE	0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
+	.BYTE	0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
+	.BYTE	0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
+	.BYTE	0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
+	.BYTE	0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
+	.BYTE	0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
+	.BYTE	0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
+	.BYTE	0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
+	.BYTE	0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
+	.BYTE	0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
+	.BYTE	0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
+	.BYTE	0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
+___
+
+$code.=<<___;
+	.EXPORT	AES_decrypt,ENTRY,ARGW0=GR,ARGW1=GR,ARGW2=GR
+	.ALIGN	16
+AES_decrypt
+	.PROC
+	.CALLINFO	FRAME=`$FRAME-16*$SIZE_T`,NO_CALLS,SAVE_RP,ENTRY_GR=18
+	.ENTRY
+	$PUSH	%r2,-$SAVED_RP(%sp)	; standard prologue
+	$PUSHMA	%r3,$FRAME(%sp)
+	$PUSH	%r4,`-$FRAME+1*$SIZE_T`(%sp)
+	$PUSH	%r5,`-$FRAME+2*$SIZE_T`(%sp)
+	$PUSH	%r6,`-$FRAME+3*$SIZE_T`(%sp)
+	$PUSH	%r7,`-$FRAME+4*$SIZE_T`(%sp)
+	$PUSH	%r8,`-$FRAME+5*$SIZE_T`(%sp)
+	$PUSH	%r9,`-$FRAME+6*$SIZE_T`(%sp)
+	$PUSH	%r10,`-$FRAME+7*$SIZE_T`(%sp)
+	$PUSH	%r11,`-$FRAME+8*$SIZE_T`(%sp)
+	$PUSH	%r12,`-$FRAME+9*$SIZE_T`(%sp)
+	$PUSH	%r13,`-$FRAME+10*$SIZE_T`(%sp)
+	$PUSH	%r14,`-$FRAME+11*$SIZE_T`(%sp)
+	$PUSH	%r15,`-$FRAME+12*$SIZE_T`(%sp)
+	$PUSH	%r16,`-$FRAME+13*$SIZE_T`(%sp)
+	$PUSH	%r17,`-$FRAME+14*$SIZE_T`(%sp)
+	$PUSH	%r18,`-$FRAME+15*$SIZE_T`(%sp)
+
+	blr	%r0,$tbl
+	ldi	3,$t0
+L\$dec_pic
+	andcm	$tbl,$t0,$tbl
+	ldo	L\$AES_Td-L\$dec_pic($tbl),$tbl
+
+	and	$inp,$t0,$t0
+	sub	$inp,$t0,$inp
+	ldw	0($inp),$s0
+	ldw	4($inp),$s1
+	ldw	8($inp),$s2
+	comib,=	0,$t0,L\$dec_inp_aligned
+	ldw	12($inp),$s3
+
+	sh3addl	$t0,%r0,$t0
+	subi	32,$t0,$t0
+	mtctl	$t0,%cr11
+	ldw	16($inp),$t1
+	vshd	$s0,$s1,$s0
+	vshd	$s1,$s2,$s1
+	vshd	$s2,$s3,$s2
+	vshd	$s3,$t1,$s3
+
+L\$dec_inp_aligned
+	bl	_parisc_AES_decrypt,%r31
+	nop
+
+	extru,<> $out,31,2,%r0
+	b	L\$dec_out_aligned
+	nop
+
+	_srm	$s0,24,$acc0
+	_srm	$s0,16,$acc1
+	stb	$acc0,0($out)
+	_srm	$s0,8,$acc2
+	stb	$acc1,1($out)
+	_srm	$s1,24,$acc4
+	stb	$acc2,2($out)
+	_srm	$s1,16,$acc5
+	stb	$s0,3($out)
+	_srm	$s1,8,$acc6
+	stb	$acc4,4($out)
+	_srm	$s2,24,$acc0
+	stb	$acc5,5($out)
+	_srm	$s2,16,$acc1
+	stb	$acc6,6($out)
+	_srm	$s2,8,$acc2
+	stb	$s1,7($out)
+	_srm	$s3,24,$acc4
+	stb	$acc0,8($out)
+	_srm	$s3,16,$acc5
+	stb	$acc1,9($out)
+	_srm	$s3,8,$acc6
+	stb	$acc2,10($out)
+	stb	$s2,11($out)
+	stb	$acc4,12($out)
+	stb	$acc5,13($out)
+	stb	$acc6,14($out)
+	b	L\$dec_done
+	stb	$s3,15($out)
+
+L\$dec_out_aligned
+	stw	$s0,0($out)
+	stw	$s1,4($out)
+	stw	$s2,8($out)
+	stw	$s3,12($out)
+
+L\$dec_done
+	$POP	`-$FRAME-$SAVED_RP`(%sp),%r2	; standard epilogue
+	$POP	`-$FRAME+1*$SIZE_T`(%sp),%r4
+	$POP	`-$FRAME+2*$SIZE_T`(%sp),%r5
+	$POP	`-$FRAME+3*$SIZE_T`(%sp),%r6
+	$POP	`-$FRAME+4*$SIZE_T`(%sp),%r7
+	$POP	`-$FRAME+5*$SIZE_T`(%sp),%r8
+	$POP	`-$FRAME+6*$SIZE_T`(%sp),%r9
+	$POP	`-$FRAME+7*$SIZE_T`(%sp),%r10
+	$POP	`-$FRAME+8*$SIZE_T`(%sp),%r11
+	$POP	`-$FRAME+9*$SIZE_T`(%sp),%r12
+	$POP	`-$FRAME+10*$SIZE_T`(%sp),%r13
+	$POP	`-$FRAME+11*$SIZE_T`(%sp),%r14
+	$POP	`-$FRAME+12*$SIZE_T`(%sp),%r15
+	$POP	`-$FRAME+13*$SIZE_T`(%sp),%r16
+	$POP	`-$FRAME+14*$SIZE_T`(%sp),%r17
+	$POP	`-$FRAME+15*$SIZE_T`(%sp),%r18
+	bv	(%r2)
+	.EXIT
+	$POPMB	-$FRAME(%sp),%r3
+	.PROCEND
+
+	.ALIGN	16
+_parisc_AES_decrypt
+	.PROC
+	.CALLINFO	MILLICODE
+	.ENTRY
+	ldw	240($key),$rounds
+	ldw	0($key),$t0
+	ldw	4($key),$t1
+	ldw	8($key),$t2
+	ldw	12($key),$t3
+	_srm	$rounds,1,$rounds
+	xor	$t0,$s0,$s0
+	ldw	16($key),$t0
+	xor	$t1,$s1,$s1
+	ldw	20($key),$t1
+	_srm	$s0,24,$acc0
+	xor	$t2,$s2,$s2
+	ldw	24($key),$t2
+	xor	$t3,$s3,$s3
+	ldw	28($key),$t3
+	_srm	$s3,16,$acc1
+L\$dec_loop
+	_srm	$s2,8,$acc2
+	ldwx,s	$acc0($tbl),$acc0
+	_srm	$s1,0,$acc3
+	ldwx,s	$acc1($tbl),$acc1
+	_srm	$s1,24,$acc4
+	ldwx,s	$acc2($tbl),$acc2
+	_srm	$s0,16,$acc5
+	ldwx,s	$acc3($tbl),$acc3
+	_srm	$s3,8,$acc6
+	ldwx,s	$acc4($tbl),$acc4
+	_srm	$s2,0,$acc7
+	ldwx,s	$acc5($tbl),$acc5
+	_srm	$s2,24,$acc8
+	ldwx,s	$acc6($tbl),$acc6
+	_srm	$s1,16,$acc9
+	ldwx,s	$acc7($tbl),$acc7
+	_srm	$s0,8,$acc10
+	ldwx,s	$acc8($tbl),$acc8
+	_srm	$s3,0,$acc11
+	ldwx,s	$acc9($tbl),$acc9
+	_srm	$s3,24,$acc12
+	ldwx,s	$acc10($tbl),$acc10
+	_srm	$s2,16,$acc13
+	ldwx,s	$acc11($tbl),$acc11
+	_srm	$s1,8,$acc14
+	ldwx,s	$acc12($tbl),$acc12
+	_srm	$s0,0,$acc15
+	ldwx,s	$acc13($tbl),$acc13
+	ldwx,s	$acc14($tbl),$acc14
+	ldwx,s	$acc15($tbl),$acc15
+	addib,= -1,$rounds,L\$dec_last
+	ldo	32($key),$key
+
+		_ror	$acc1,8,$acc1
+		xor	$acc0,$t0,$t0
+	ldw	0($key),$s0
+		_ror	$acc2,16,$acc2
+		xor	$acc1,$t0,$t0
+	ldw	4($key),$s1
+		_ror	$acc3,24,$acc3
+		xor	$acc2,$t0,$t0
+	ldw	8($key),$s2
+		_ror	$acc5,8,$acc5
+		xor	$acc3,$t0,$t0
+	ldw	12($key),$s3
+		_ror	$acc6,16,$acc6
+		xor	$acc4,$t1,$t1
+		_ror	$acc7,24,$acc7
+		xor	$acc5,$t1,$t1
+		_ror	$acc9,8,$acc9
+		xor	$acc6,$t1,$t1
+		_ror	$acc10,16,$acc10
+		xor	$acc7,$t1,$t1
+		_ror	$acc11,24,$acc11
+		xor	$acc8,$t2,$t2
+		_ror	$acc13,8,$acc13
+		xor	$acc9,$t2,$t2
+		_ror	$acc14,16,$acc14
+		xor	$acc10,$t2,$t2
+		_ror	$acc15,24,$acc15
+		xor	$acc11,$t2,$t2
+		xor	$acc12,$acc14,$acc14
+		xor	$acc13,$t3,$t3
+	_srm	$t0,24,$acc0
+		xor	$acc14,$t3,$t3
+		xor	$acc15,$t3,$t3
+	_srm	$t3,16,$acc1
+
+	_srm	$t2,8,$acc2
+	ldwx,s	$acc0($tbl),$acc0
+	_srm	$t1,0,$acc3
+	ldwx,s	$acc1($tbl),$acc1
+	_srm	$t1,24,$acc4
+	ldwx,s	$acc2($tbl),$acc2
+	_srm	$t0,16,$acc5
+	ldwx,s	$acc3($tbl),$acc3
+	_srm	$t3,8,$acc6
+	ldwx,s	$acc4($tbl),$acc4
+	_srm	$t2,0,$acc7
+	ldwx,s	$acc5($tbl),$acc5
+	_srm	$t2,24,$acc8
+	ldwx,s	$acc6($tbl),$acc6
+	_srm	$t1,16,$acc9
+	ldwx,s	$acc7($tbl),$acc7
+	_srm	$t0,8,$acc10
+	ldwx,s	$acc8($tbl),$acc8
+	_srm	$t3,0,$acc11
+	ldwx,s	$acc9($tbl),$acc9
+	_srm	$t3,24,$acc12
+	ldwx,s	$acc10($tbl),$acc10
+	_srm	$t2,16,$acc13
+	ldwx,s	$acc11($tbl),$acc11
+	_srm	$t1,8,$acc14
+	ldwx,s	$acc12($tbl),$acc12
+	_srm	$t0,0,$acc15
+	ldwx,s	$acc13($tbl),$acc13
+		_ror	$acc1,8,$acc1
+	ldwx,s	$acc14($tbl),$acc14
+
+		_ror	$acc2,16,$acc2
+		xor	$acc0,$s0,$s0
+	ldwx,s	$acc15($tbl),$acc15
+		_ror	$acc3,24,$acc3
+		xor	$acc1,$s0,$s0
+	ldw	16($key),$t0
+		_ror	$acc5,8,$acc5
+		xor	$acc2,$s0,$s0
+	ldw	20($key),$t1
+		_ror	$acc6,16,$acc6
+		xor	$acc3,$s0,$s0
+	ldw	24($key),$t2
+		_ror	$acc7,24,$acc7
+		xor	$acc4,$s1,$s1
+	ldw	28($key),$t3
+		_ror	$acc9,8,$acc9
+		xor	$acc5,$s1,$s1
+	ldw	1024+0($tbl),%r0		; prefetch td4
+		_ror	$acc10,16,$acc10
+		xor	$acc6,$s1,$s1
+	ldw	1024+32($tbl),%r0		; prefetch td4
+		_ror	$acc11,24,$acc11
+		xor	$acc7,$s1,$s1
+	ldw	1024+64($tbl),%r0		; prefetch td4
+		_ror	$acc13,8,$acc13
+		xor	$acc8,$s2,$s2
+	ldw	1024+96($tbl),%r0		; prefetch td4
+		_ror	$acc14,16,$acc14
+		xor	$acc9,$s2,$s2
+	ldw	1024+128($tbl),%r0		; prefetch td4
+		_ror	$acc15,24,$acc15
+		xor	$acc10,$s2,$s2
+	ldw	1024+160($tbl),%r0		; prefetch td4
+	_srm	$s0,24,$acc0
+		xor	$acc11,$s2,$s2
+	ldw	1024+192($tbl),%r0		; prefetch td4
+		xor	$acc12,$acc14,$acc14
+		xor	$acc13,$s3,$s3
+	ldw	1024+224($tbl),%r0		; prefetch td4
+		xor	$acc14,$s3,$s3
+		xor	$acc15,$s3,$s3
+	b	L\$dec_loop
+	_srm	$s3,16,$acc1
+
+	.ALIGN	16
+L\$dec_last
+	ldo	1024($tbl),$rounds
+		_ror	$acc1,8,$acc1
+		xor	$acc0,$t0,$t0
+	ldw	0($key),$s0
+		_ror	$acc2,16,$acc2
+		xor	$acc1,$t0,$t0
+	ldw	4($key),$s1
+		_ror	$acc3,24,$acc3
+		xor	$acc2,$t0,$t0
+	ldw	8($key),$s2
+		_ror	$acc5,8,$acc5
+		xor	$acc3,$t0,$t0
+	ldw	12($key),$s3
+		_ror	$acc6,16,$acc6
+		xor	$acc4,$t1,$t1
+		_ror	$acc7,24,$acc7
+		xor	$acc5,$t1,$t1
+		_ror	$acc9,8,$acc9
+		xor	$acc6,$t1,$t1
+		_ror	$acc10,16,$acc10
+		xor	$acc7,$t1,$t1
+		_ror	$acc11,24,$acc11
+		xor	$acc8,$t2,$t2
+		_ror	$acc13,8,$acc13
+		xor	$acc9,$t2,$t2
+		_ror	$acc14,16,$acc14
+		xor	$acc10,$t2,$t2
+		_ror	$acc15,24,$acc15
+		xor	$acc11,$t2,$t2
+		xor	$acc12,$acc14,$acc14
+		xor	$acc13,$t3,$t3
+	_srm	$t0,24,$acc0
+		xor	$acc14,$t3,$t3
+		xor	$acc15,$t3,$t3
+	_srm	$t3,16,$acc1
+
+	_srm	$t2,8,$acc2
+	ldbx	$acc0($rounds),$acc0
+	_srm	$t1,24,$acc4
+	ldbx	$acc1($rounds),$acc1
+	_srm	$t0,16,$acc5
+	_srm	$t1,0,$acc3
+	ldbx	$acc2($rounds),$acc2
+	ldbx	$acc3($rounds),$acc3
+	_srm	$t3,8,$acc6
+	ldbx	$acc4($rounds),$acc4
+	_srm	$t2,24,$acc8
+	ldbx	$acc5($rounds),$acc5
+	_srm	$t1,16,$acc9
+	_srm	$t2,0,$acc7
+	ldbx	$acc6($rounds),$acc6
+	ldbx	$acc7($rounds),$acc7
+	_srm	$t0,8,$acc10
+	ldbx	$acc8($rounds),$acc8
+	_srm	$t3,24,$acc12
+	ldbx	$acc9($rounds),$acc9
+	_srm	$t2,16,$acc13
+	_srm	$t3,0,$acc11
+	ldbx	$acc10($rounds),$acc10
+	_srm	$t1,8,$acc14
+	ldbx	$acc11($rounds),$acc11
+	ldbx	$acc12($rounds),$acc12
+	ldbx	$acc13($rounds),$acc13
+	_srm	$t0,0,$acc15
+	ldbx	$acc14($rounds),$acc14
+
+		dep	$acc0,7,8,$acc3
+	ldbx	$acc15($rounds),$acc15
+		dep	$acc4,7,8,$acc7
+		dep	$acc1,15,8,$acc3
+		dep	$acc5,15,8,$acc7
+		dep	$acc2,23,8,$acc3
+		dep	$acc6,23,8,$acc7
+		xor	$acc3,$s0,$s0
+		xor	$acc7,$s1,$s1
+		dep	$acc8,7,8,$acc11
+		dep	$acc12,7,8,$acc15
+		dep	$acc9,15,8,$acc11
+		dep	$acc13,15,8,$acc15
+		dep	$acc10,23,8,$acc11
+		dep	$acc14,23,8,$acc15
+		xor	$acc11,$s2,$s2
+
+	bv	(%r31)
+	.EXIT
+		xor	$acc15,$s3,$s3
+	.PROCEND
+
+	.ALIGN	64
+L\$AES_Td
+	.WORD	0x51f4a750, 0x7e416553, 0x1a17a4c3, 0x3a275e96
+	.WORD	0x3bab6bcb, 0x1f9d45f1, 0xacfa58ab, 0x4be30393
+	.WORD	0x2030fa55, 0xad766df6, 0x88cc7691, 0xf5024c25
+	.WORD	0x4fe5d7fc, 0xc52acbd7, 0x26354480, 0xb562a38f
+	.WORD	0xdeb15a49, 0x25ba1b67, 0x45ea0e98, 0x5dfec0e1
+	.WORD	0xc32f7502, 0x814cf012, 0x8d4697a3, 0x6bd3f9c6
+	.WORD	0x038f5fe7, 0x15929c95, 0xbf6d7aeb, 0x955259da
+	.WORD	0xd4be832d, 0x587421d3, 0x49e06929, 0x8ec9c844
+	.WORD	0x75c2896a, 0xf48e7978, 0x99583e6b, 0x27b971dd
+	.WORD	0xbee14fb6, 0xf088ad17, 0xc920ac66, 0x7dce3ab4
+	.WORD	0x63df4a18, 0xe51a3182, 0x97513360, 0x62537f45
+	.WORD	0xb16477e0, 0xbb6bae84, 0xfe81a01c, 0xf9082b94
+	.WORD	0x70486858, 0x8f45fd19, 0x94de6c87, 0x527bf8b7
+	.WORD	0xab73d323, 0x724b02e2, 0xe31f8f57, 0x6655ab2a
+	.WORD	0xb2eb2807, 0x2fb5c203, 0x86c57b9a, 0xd33708a5
+	.WORD	0x302887f2, 0x23bfa5b2, 0x02036aba, 0xed16825c
+	.WORD	0x8acf1c2b, 0xa779b492, 0xf307f2f0, 0x4e69e2a1
+	.WORD	0x65daf4cd, 0x0605bed5, 0xd134621f, 0xc4a6fe8a
+	.WORD	0x342e539d, 0xa2f355a0, 0x058ae132, 0xa4f6eb75
+	.WORD	0x0b83ec39, 0x4060efaa, 0x5e719f06, 0xbd6e1051
+	.WORD	0x3e218af9, 0x96dd063d, 0xdd3e05ae, 0x4de6bd46
+	.WORD	0x91548db5, 0x71c45d05, 0x0406d46f, 0x605015ff
+	.WORD	0x1998fb24, 0xd6bde997, 0x894043cc, 0x67d99e77
+	.WORD	0xb0e842bd, 0x07898b88, 0xe7195b38, 0x79c8eedb
+	.WORD	0xa17c0a47, 0x7c420fe9, 0xf8841ec9, 0x00000000
+	.WORD	0x09808683, 0x322bed48, 0x1e1170ac, 0x6c5a724e
+	.WORD	0xfd0efffb, 0x0f853856, 0x3daed51e, 0x362d3927
+	.WORD	0x0a0fd964, 0x685ca621, 0x9b5b54d1, 0x24362e3a
+	.WORD	0x0c0a67b1, 0x9357e70f, 0xb4ee96d2, 0x1b9b919e
+	.WORD	0x80c0c54f, 0x61dc20a2, 0x5a774b69, 0x1c121a16
+	.WORD	0xe293ba0a, 0xc0a02ae5, 0x3c22e043, 0x121b171d
+	.WORD	0x0e090d0b, 0xf28bc7ad, 0x2db6a8b9, 0x141ea9c8
+	.WORD	0x57f11985, 0xaf75074c, 0xee99ddbb, 0xa37f60fd
+	.WORD	0xf701269f, 0x5c72f5bc, 0x44663bc5, 0x5bfb7e34
+	.WORD	0x8b432976, 0xcb23c6dc, 0xb6edfc68, 0xb8e4f163
+	.WORD	0xd731dcca, 0x42638510, 0x13972240, 0x84c61120
+	.WORD	0x854a247d, 0xd2bb3df8, 0xaef93211, 0xc729a16d
+	.WORD	0x1d9e2f4b, 0xdcb230f3, 0x0d8652ec, 0x77c1e3d0
+	.WORD	0x2bb3166c, 0xa970b999, 0x119448fa, 0x47e96422
+	.WORD	0xa8fc8cc4, 0xa0f03f1a, 0x567d2cd8, 0x223390ef
+	.WORD	0x87494ec7, 0xd938d1c1, 0x8ccaa2fe, 0x98d40b36
+	.WORD	0xa6f581cf, 0xa57ade28, 0xdab78e26, 0x3fadbfa4
+	.WORD	0x2c3a9de4, 0x5078920d, 0x6a5fcc9b, 0x547e4662
+	.WORD	0xf68d13c2, 0x90d8b8e8, 0x2e39f75e, 0x82c3aff5
+	.WORD	0x9f5d80be, 0x69d0937c, 0x6fd52da9, 0xcf2512b3
+	.WORD	0xc8ac993b, 0x10187da7, 0xe89c636e, 0xdb3bbb7b
+	.WORD	0xcd267809, 0x6e5918f4, 0xec9ab701, 0x834f9aa8
+	.WORD	0xe6956e65, 0xaaffe67e, 0x21bccf08, 0xef15e8e6
+	.WORD	0xbae79bd9, 0x4a6f36ce, 0xea9f09d4, 0x29b07cd6
+	.WORD	0x31a4b2af, 0x2a3f2331, 0xc6a59430, 0x35a266c0
+	.WORD	0x744ebc37, 0xfc82caa6, 0xe090d0b0, 0x33a7d815
+	.WORD	0xf104984a, 0x41ecdaf7, 0x7fcd500e, 0x1791f62f
+	.WORD	0x764dd68d, 0x43efb04d, 0xccaa4d54, 0xe49604df
+	.WORD	0x9ed1b5e3, 0x4c6a881b, 0xc12c1fb8, 0x4665517f
+	.WORD	0x9d5eea04, 0x018c355d, 0xfa877473, 0xfb0b412e
+	.WORD	0xb3671d5a, 0x92dbd252, 0xe9105633, 0x6dd64713
+	.WORD	0x9ad7618c, 0x37a10c7a, 0x59f8148e, 0xeb133c89
+	.WORD	0xcea927ee, 0xb761c935, 0xe11ce5ed, 0x7a47b13c
+	.WORD	0x9cd2df59, 0x55f2733f, 0x1814ce79, 0x73c737bf
+	.WORD	0x53f7cdea, 0x5ffdaa5b, 0xdf3d6f14, 0x7844db86
+	.WORD	0xcaaff381, 0xb968c43e, 0x3824342c, 0xc2a3405f
+	.WORD	0x161dc372, 0xbce2250c, 0x283c498b, 0xff0d9541
+	.WORD	0x39a80171, 0x080cb3de, 0xd8b4e49c, 0x6456c190
+	.WORD	0x7bcb8461, 0xd532b670, 0x486c5c74, 0xd0b85742
+	.BYTE	0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
+	.BYTE	0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
+	.BYTE	0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
+	.BYTE	0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
+	.BYTE	0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
+	.BYTE	0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
+	.BYTE	0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
+	.BYTE	0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
+	.BYTE	0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
+	.BYTE	0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
+	.BYTE	0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
+	.BYTE	0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
+	.BYTE	0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
+	.BYTE	0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
+	.BYTE	0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
+	.BYTE	0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
+	.BYTE	0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
+	.BYTE	0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
+	.BYTE	0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
+	.BYTE	0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
+	.BYTE	0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
+	.BYTE	0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
+	.BYTE	0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
+	.BYTE	0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
+	.BYTE	0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
+	.BYTE	0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
+	.BYTE	0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
+	.BYTE	0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
+	.BYTE	0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
+	.BYTE	0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
+	.BYTE	0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
+	.BYTE	0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
+	.STRINGZ "AES for PA-RISC, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
+	=~ /GNU assembler/) {
+    $gnuas = 1;
+}
+
+foreach (split("\n",$code)) {
+	s/\`([^\`]*)\`/eval $1/ge;
+
+	# translate made up instructions: _ror, _srm
+	s/_ror(\s+)(%r[0-9]+),/shd$1$2,$2,/				or
+
+	s/_srm(\s+%r[0-9]+),([0-9]+),/
+		$SIZE_T==4 ? sprintf("extru%s,%d,8,",$1,31-$2)
+		:            sprintf("extrd,u%s,%d,8,",$1,63-$2)/e;
+
+	s/(\.LEVEL\s+2\.0)W/$1w/	if ($gnuas && $SIZE_T==8);
+	s/\.SPACE\s+\$TEXT\$/.text/	if ($gnuas && $SIZE_T==8);
+	s/\.SUBSPA.*//			if ($gnuas && $SIZE_T==8);
+	s/,\*/,/			if ($SIZE_T==4);
+	s/\bbv\b(.*\(%r2\))/bve$1/	if ($SIZE_T==8);
+
+	print $_,"\n";
+}
+close STDOUT or die "error closing STDOUT: $!";
diff --git a/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aes-ppc.pl b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aes-ppc.pl
new file mode 100644
index 0000000..bb4ee84
--- /dev/null
+++ b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aes-ppc.pl
@@ -0,0 +1,1459 @@
+#! /usr/bin/env perl
+# Copyright 2007-2020 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License").  You may not use
+# this file except in compliance with the License.  You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# Needs more work: key setup, CBC routine...
+#
+# ppc_AES_[en|de]crypt perform at 18 cycles per byte processed with
+# 128-bit key, which is ~40% better than 64-bit code generated by gcc
+# 4.0. But these are not the ones currently used! Their "compact"
+# counterparts are, for security reason. ppc_AES_encrypt_compact runs
+# at 1/2 of ppc_AES_encrypt speed, while ppc_AES_decrypt_compact -
+# at 1/3 of ppc_AES_decrypt.
+
+# February 2010
+#
+# Rescheduling instructions to favour Power6 pipeline gave 10%
+# performance improvement on the platform in question (and marginal
+# improvement even on others). It should be noted that Power6 fails
+# to process byte in 18 cycles, only in 23, because it fails to issue
+# 4 load instructions in two cycles, only in 3. As result non-compact
+# block subroutines are 25% slower than one would expect. Compact
+# functions scale better, because they have pure computational part,
+# which scales perfectly with clock frequency. To be specific
+# ppc_AES_encrypt_compact operates at 42 cycles per byte, while
+# ppc_AES_decrypt_compact - at 55 (in 64-bit build).
+
+$flavour = shift;
+
+if ($flavour =~ /64/) {
+	$SIZE_T	=8;
+	$LRSAVE	=2*$SIZE_T;
+	$STU	="stdu";
+	$POP	="ld";
+	$PUSH	="std";
+} elsif ($flavour =~ /32/) {
+	$SIZE_T	=4;
+	$LRSAVE	=$SIZE_T;
+	$STU	="stwu";
+	$POP	="lwz";
+	$PUSH	="stw";
+} else { die "nonsense $flavour"; }
+
+$LITTLE_ENDIAN = ($flavour=~/le$/) ? $SIZE_T : 0;
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
+die "can't locate ppc-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
+
+$FRAME=32*$SIZE_T;
+
+sub _data_word()
+{ my $i;
+    while(defined($i=shift)) { $code.=sprintf"\t.long\t0x%08x,0x%08x\n",$i,$i; }
+}
+
+$sp="r1";
+$toc="r2";
+$inp="r3";
+$out="r4";
+$key="r5";
+
+$Tbl0="r3";
+$Tbl1="r6";
+$Tbl2="r7";
+$Tbl3=$out;	# stay away from "r2"; $out is offloaded to stack
+
+$s0="r8";
+$s1="r9";
+$s2="r10";
+$s3="r11";
+
+$t0="r12";
+$t1="r0";	# stay away from "r13";
+$t2="r14";
+$t3="r15";
+
+$acc00="r16";
+$acc01="r17";
+$acc02="r18";
+$acc03="r19";
+
+$acc04="r20";
+$acc05="r21";
+$acc06="r22";
+$acc07="r23";
+
+$acc08="r24";
+$acc09="r25";
+$acc10="r26";
+$acc11="r27";
+
+$acc12="r28";
+$acc13="r29";
+$acc14="r30";
+$acc15="r31";
+
+$mask80=$Tbl2;
+$mask1b=$Tbl3;
+
+$code.=<<___;
+.machine	"any"
+.text
+
+.align	7
+LAES_Te:
+	mflr	r0
+	bcl	20,31,\$+4
+	mflr	$Tbl0	;    vvvvv "distance" between . and 1st data entry
+	addi	$Tbl0,$Tbl0,`128-8`
+	mtlr	r0
+	blr
+	.long	0
+	.byte	0,12,0x14,0,0,0,0,0
+	.space	`64-9*4`
+LAES_Td:
+	mflr	r0
+	bcl	20,31,\$+4
+	mflr	$Tbl0	;    vvvvvvvv "distance" between . and 1st data entry
+	addi	$Tbl0,$Tbl0,`128-64-8+2048+256`
+	mtlr	r0
+	blr
+	.long	0
+	.byte	0,12,0x14,0,0,0,0,0
+	.space	`128-64-9*4`
+___
+&_data_word(
+	0xc66363a5, 0xf87c7c84, 0xee777799, 0xf67b7b8d,
+	0xfff2f20d, 0xd66b6bbd, 0xde6f6fb1, 0x91c5c554,
+	0x60303050, 0x02010103, 0xce6767a9, 0x562b2b7d,
+	0xe7fefe19, 0xb5d7d762, 0x4dababe6, 0xec76769a,
+	0x8fcaca45, 0x1f82829d, 0x89c9c940, 0xfa7d7d87,
+	0xeffafa15, 0xb25959eb, 0x8e4747c9, 0xfbf0f00b,
+	0x41adadec, 0xb3d4d467, 0x5fa2a2fd, 0x45afafea,
+	0x239c9cbf, 0x53a4a4f7, 0xe4727296, 0x9bc0c05b,
+	0x75b7b7c2, 0xe1fdfd1c, 0x3d9393ae, 0x4c26266a,
+	0x6c36365a, 0x7e3f3f41, 0xf5f7f702, 0x83cccc4f,
+	0x6834345c, 0x51a5a5f4, 0xd1e5e534, 0xf9f1f108,
+	0xe2717193, 0xabd8d873, 0x62313153, 0x2a15153f,
+	0x0804040c, 0x95c7c752, 0x46232365, 0x9dc3c35e,
+	0x30181828, 0x379696a1, 0x0a05050f, 0x2f9a9ab5,
+	0x0e070709, 0x24121236, 0x1b80809b, 0xdfe2e23d,
+	0xcdebeb26, 0x4e272769, 0x7fb2b2cd, 0xea75759f,
+	0x1209091b, 0x1d83839e, 0x582c2c74, 0x341a1a2e,
+	0x361b1b2d, 0xdc6e6eb2, 0xb45a5aee, 0x5ba0a0fb,
+	0xa45252f6, 0x763b3b4d, 0xb7d6d661, 0x7db3b3ce,
+	0x5229297b, 0xdde3e33e, 0x5e2f2f71, 0x13848497,
+	0xa65353f5, 0xb9d1d168, 0x00000000, 0xc1eded2c,
+	0x40202060, 0xe3fcfc1f, 0x79b1b1c8, 0xb65b5bed,
+	0xd46a6abe, 0x8dcbcb46, 0x67bebed9, 0x7239394b,
+	0x944a4ade, 0x984c4cd4, 0xb05858e8, 0x85cfcf4a,
+	0xbbd0d06b, 0xc5efef2a, 0x4faaaae5, 0xedfbfb16,
+	0x864343c5, 0x9a4d4dd7, 0x66333355, 0x11858594,
+	0x8a4545cf, 0xe9f9f910, 0x04020206, 0xfe7f7f81,
+	0xa05050f0, 0x783c3c44, 0x259f9fba, 0x4ba8a8e3,
+	0xa25151f3, 0x5da3a3fe, 0x804040c0, 0x058f8f8a,
+	0x3f9292ad, 0x219d9dbc, 0x70383848, 0xf1f5f504,
+	0x63bcbcdf, 0x77b6b6c1, 0xafdada75, 0x42212163,
+	0x20101030, 0xe5ffff1a, 0xfdf3f30e, 0xbfd2d26d,
+	0x81cdcd4c, 0x180c0c14, 0x26131335, 0xc3ecec2f,
+	0xbe5f5fe1, 0x359797a2, 0x884444cc, 0x2e171739,
+	0x93c4c457, 0x55a7a7f2, 0xfc7e7e82, 0x7a3d3d47,
+	0xc86464ac, 0xba5d5de7, 0x3219192b, 0xe6737395,
+	0xc06060a0, 0x19818198, 0x9e4f4fd1, 0xa3dcdc7f,
+	0x44222266, 0x542a2a7e, 0x3b9090ab, 0x0b888883,
+	0x8c4646ca, 0xc7eeee29, 0x6bb8b8d3, 0x2814143c,
+	0xa7dede79, 0xbc5e5ee2, 0x160b0b1d, 0xaddbdb76,
+	0xdbe0e03b, 0x64323256, 0x743a3a4e, 0x140a0a1e,
+	0x924949db, 0x0c06060a, 0x4824246c, 0xb85c5ce4,
+	0x9fc2c25d, 0xbdd3d36e, 0x43acacef, 0xc46262a6,
+	0x399191a8, 0x319595a4, 0xd3e4e437, 0xf279798b,
+	0xd5e7e732, 0x8bc8c843, 0x6e373759, 0xda6d6db7,
+	0x018d8d8c, 0xb1d5d564, 0x9c4e4ed2, 0x49a9a9e0,
+	0xd86c6cb4, 0xac5656fa, 0xf3f4f407, 0xcfeaea25,
+	0xca6565af, 0xf47a7a8e, 0x47aeaee9, 0x10080818,
+	0x6fbabad5, 0xf0787888, 0x4a25256f, 0x5c2e2e72,
+	0x381c1c24, 0x57a6a6f1, 0x73b4b4c7, 0x97c6c651,
+	0xcbe8e823, 0xa1dddd7c, 0xe874749c, 0x3e1f1f21,
+	0x964b4bdd, 0x61bdbddc, 0x0d8b8b86, 0x0f8a8a85,
+	0xe0707090, 0x7c3e3e42, 0x71b5b5c4, 0xcc6666aa,
+	0x904848d8, 0x06030305, 0xf7f6f601, 0x1c0e0e12,
+	0xc26161a3, 0x6a35355f, 0xae5757f9, 0x69b9b9d0,
+	0x17868691, 0x99c1c158, 0x3a1d1d27, 0x279e9eb9,
+	0xd9e1e138, 0xebf8f813, 0x2b9898b3, 0x22111133,
+	0xd26969bb, 0xa9d9d970, 0x078e8e89, 0x339494a7,
+	0x2d9b9bb6, 0x3c1e1e22, 0x15878792, 0xc9e9e920,
+	0x87cece49, 0xaa5555ff, 0x50282878, 0xa5dfdf7a,
+	0x038c8c8f, 0x59a1a1f8, 0x09898980, 0x1a0d0d17,
+	0x65bfbfda, 0xd7e6e631, 0x844242c6, 0xd06868b8,
+	0x824141c3, 0x299999b0, 0x5a2d2d77, 0x1e0f0f11,
+	0x7bb0b0cb, 0xa85454fc, 0x6dbbbbd6, 0x2c16163a);
+$code.=<<___;
+.byte	0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
+.byte	0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
+.byte	0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
+.byte	0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
+.byte	0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
+.byte	0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
+.byte	0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
+.byte	0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
+.byte	0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
+.byte	0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
+.byte	0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
+.byte	0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
+.byte	0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
+.byte	0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
+.byte	0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
+.byte	0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
+.byte	0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
+.byte	0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
+.byte	0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
+.byte	0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
+.byte	0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
+.byte	0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
+.byte	0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
+.byte	0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
+.byte	0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
+.byte	0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
+.byte	0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
+.byte	0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
+.byte	0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
+.byte	0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
+.byte	0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
+.byte	0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
+___
+&_data_word(
+	0x51f4a750, 0x7e416553, 0x1a17a4c3, 0x3a275e96,
+	0x3bab6bcb, 0x1f9d45f1, 0xacfa58ab, 0x4be30393,
+	0x2030fa55, 0xad766df6, 0x88cc7691, 0xf5024c25,
+	0x4fe5d7fc, 0xc52acbd7, 0x26354480, 0xb562a38f,
+	0xdeb15a49, 0x25ba1b67, 0x45ea0e98, 0x5dfec0e1,
+	0xc32f7502, 0x814cf012, 0x8d4697a3, 0x6bd3f9c6,
+	0x038f5fe7, 0x15929c95, 0xbf6d7aeb, 0x955259da,
+	0xd4be832d, 0x587421d3, 0x49e06929, 0x8ec9c844,
+	0x75c2896a, 0xf48e7978, 0x99583e6b, 0x27b971dd,
+	0xbee14fb6, 0xf088ad17, 0xc920ac66, 0x7dce3ab4,
+	0x63df4a18, 0xe51a3182, 0x97513360, 0x62537f45,
+	0xb16477e0, 0xbb6bae84, 0xfe81a01c, 0xf9082b94,
+	0x70486858, 0x8f45fd19, 0x94de6c87, 0x527bf8b7,
+	0xab73d323, 0x724b02e2, 0xe31f8f57, 0x6655ab2a,
+	0xb2eb2807, 0x2fb5c203, 0x86c57b9a, 0xd33708a5,
+	0x302887f2, 0x23bfa5b2, 0x02036aba, 0xed16825c,
+	0x8acf1c2b, 0xa779b492, 0xf307f2f0, 0x4e69e2a1,
+	0x65daf4cd, 0x0605bed5, 0xd134621f, 0xc4a6fe8a,
+	0x342e539d, 0xa2f355a0, 0x058ae132, 0xa4f6eb75,
+	0x0b83ec39, 0x4060efaa, 0x5e719f06, 0xbd6e1051,
+	0x3e218af9, 0x96dd063d, 0xdd3e05ae, 0x4de6bd46,
+	0x91548db5, 0x71c45d05, 0x0406d46f, 0x605015ff,
+	0x1998fb24, 0xd6bde997, 0x894043cc, 0x67d99e77,
+	0xb0e842bd, 0x07898b88, 0xe7195b38, 0x79c8eedb,
+	0xa17c0a47, 0x7c420fe9, 0xf8841ec9, 0x00000000,
+	0x09808683, 0x322bed48, 0x1e1170ac, 0x6c5a724e,
+	0xfd0efffb, 0x0f853856, 0x3daed51e, 0x362d3927,
+	0x0a0fd964, 0x685ca621, 0x9b5b54d1, 0x24362e3a,
+	0x0c0a67b1, 0x9357e70f, 0xb4ee96d2, 0x1b9b919e,
+	0x80c0c54f, 0x61dc20a2, 0x5a774b69, 0x1c121a16,
+	0xe293ba0a, 0xc0a02ae5, 0x3c22e043, 0x121b171d,
+	0x0e090d0b, 0xf28bc7ad, 0x2db6a8b9, 0x141ea9c8,
+	0x57f11985, 0xaf75074c, 0xee99ddbb, 0xa37f60fd,
+	0xf701269f, 0x5c72f5bc, 0x44663bc5, 0x5bfb7e34,
+	0x8b432976, 0xcb23c6dc, 0xb6edfc68, 0xb8e4f163,
+	0xd731dcca, 0x42638510, 0x13972240, 0x84c61120,
+	0x854a247d, 0xd2bb3df8, 0xaef93211, 0xc729a16d,
+	0x1d9e2f4b, 0xdcb230f3, 0x0d8652ec, 0x77c1e3d0,
+	0x2bb3166c, 0xa970b999, 0x119448fa, 0x47e96422,
+	0xa8fc8cc4, 0xa0f03f1a, 0x567d2cd8, 0x223390ef,
+	0x87494ec7, 0xd938d1c1, 0x8ccaa2fe, 0x98d40b36,
+	0xa6f581cf, 0xa57ade28, 0xdab78e26, 0x3fadbfa4,
+	0x2c3a9de4, 0x5078920d, 0x6a5fcc9b, 0x547e4662,
+	0xf68d13c2, 0x90d8b8e8, 0x2e39f75e, 0x82c3aff5,
+	0x9f5d80be, 0x69d0937c, 0x6fd52da9, 0xcf2512b3,
+	0xc8ac993b, 0x10187da7, 0xe89c636e, 0xdb3bbb7b,
+	0xcd267809, 0x6e5918f4, 0xec9ab701, 0x834f9aa8,
+	0xe6956e65, 0xaaffe67e, 0x21bccf08, 0xef15e8e6,
+	0xbae79bd9, 0x4a6f36ce, 0xea9f09d4, 0x29b07cd6,
+	0x31a4b2af, 0x2a3f2331, 0xc6a59430, 0x35a266c0,
+	0x744ebc37, 0xfc82caa6, 0xe090d0b0, 0x33a7d815,
+	0xf104984a, 0x41ecdaf7, 0x7fcd500e, 0x1791f62f,
+	0x764dd68d, 0x43efb04d, 0xccaa4d54, 0xe49604df,
+	0x9ed1b5e3, 0x4c6a881b, 0xc12c1fb8, 0x4665517f,
+	0x9d5eea04, 0x018c355d, 0xfa877473, 0xfb0b412e,
+	0xb3671d5a, 0x92dbd252, 0xe9105633, 0x6dd64713,
+	0x9ad7618c, 0x37a10c7a, 0x59f8148e, 0xeb133c89,
+	0xcea927ee, 0xb761c935, 0xe11ce5ed, 0x7a47b13c,
+	0x9cd2df59, 0x55f2733f, 0x1814ce79, 0x73c737bf,
+	0x53f7cdea, 0x5ffdaa5b, 0xdf3d6f14, 0x7844db86,
+	0xcaaff381, 0xb968c43e, 0x3824342c, 0xc2a3405f,
+	0x161dc372, 0xbce2250c, 0x283c498b, 0xff0d9541,
+	0x39a80171, 0x080cb3de, 0xd8b4e49c, 0x6456c190,
+	0x7bcb8461, 0xd532b670, 0x486c5c74, 0xd0b85742);
+$code.=<<___;
+.byte	0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
+.byte	0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
+.byte	0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
+.byte	0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
+.byte	0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
+.byte	0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
+.byte	0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
+.byte	0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
+.byte	0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
+.byte	0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
+.byte	0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
+.byte	0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
+.byte	0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
+.byte	0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
+.byte	0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
+.byte	0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
+.byte	0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
+.byte	0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
+.byte	0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
+.byte	0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
+.byte	0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
+.byte	0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
+.byte	0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
+.byte	0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
+.byte	0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
+.byte	0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
+.byte	0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
+.byte	0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
+.byte	0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
+.byte	0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
+.byte	0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
+.byte	0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
+
+
+.globl	.AES_encrypt
+.align	7
+.AES_encrypt:
+	$STU	$sp,-$FRAME($sp)
+	mflr	r0
+
+	$PUSH	$out,`$FRAME-$SIZE_T*19`($sp)
+	$PUSH	r14,`$FRAME-$SIZE_T*18`($sp)
+	$PUSH	r15,`$FRAME-$SIZE_T*17`($sp)
+	$PUSH	r16,`$FRAME-$SIZE_T*16`($sp)
+	$PUSH	r17,`$FRAME-$SIZE_T*15`($sp)
+	$PUSH	r18,`$FRAME-$SIZE_T*14`($sp)
+	$PUSH	r19,`$FRAME-$SIZE_T*13`($sp)
+	$PUSH	r20,`$FRAME-$SIZE_T*12`($sp)
+	$PUSH	r21,`$FRAME-$SIZE_T*11`($sp)
+	$PUSH	r22,`$FRAME-$SIZE_T*10`($sp)
+	$PUSH	r23,`$FRAME-$SIZE_T*9`($sp)
+	$PUSH	r24,`$FRAME-$SIZE_T*8`($sp)
+	$PUSH	r25,`$FRAME-$SIZE_T*7`($sp)
+	$PUSH	r26,`$FRAME-$SIZE_T*6`($sp)
+	$PUSH	r27,`$FRAME-$SIZE_T*5`($sp)
+	$PUSH	r28,`$FRAME-$SIZE_T*4`($sp)
+	$PUSH	r29,`$FRAME-$SIZE_T*3`($sp)
+	$PUSH	r30,`$FRAME-$SIZE_T*2`($sp)
+	$PUSH	r31,`$FRAME-$SIZE_T*1`($sp)
+	$PUSH	r0,`$FRAME+$LRSAVE`($sp)
+
+	andi.	$t0,$inp,3
+	andi.	$t1,$out,3
+	or.	$t0,$t0,$t1
+	bne	Lenc_unaligned
+
+Lenc_unaligned_ok:
+___
+$code.=<<___ if (!$LITTLE_ENDIAN);
+	lwz	$s0,0($inp)
+	lwz	$s1,4($inp)
+	lwz	$s2,8($inp)
+	lwz	$s3,12($inp)
+___
+$code.=<<___ if ($LITTLE_ENDIAN);
+	lwz	$t0,0($inp)
+	lwz	$t1,4($inp)
+	lwz	$t2,8($inp)
+	lwz	$t3,12($inp)
+	rotlwi	$s0,$t0,8
+	rotlwi	$s1,$t1,8
+	rotlwi	$s2,$t2,8
+	rotlwi	$s3,$t3,8
+	rlwimi	$s0,$t0,24,0,7
+	rlwimi	$s1,$t1,24,0,7
+	rlwimi	$s2,$t2,24,0,7
+	rlwimi	$s3,$t3,24,0,7
+	rlwimi	$s0,$t0,24,16,23
+	rlwimi	$s1,$t1,24,16,23
+	rlwimi	$s2,$t2,24,16,23
+	rlwimi	$s3,$t3,24,16,23
+___
+$code.=<<___;
+	bl	LAES_Te
+	bl	Lppc_AES_encrypt_compact
+	$POP	$out,`$FRAME-$SIZE_T*19`($sp)
+___
+$code.=<<___ if ($LITTLE_ENDIAN);
+	rotlwi	$t0,$s0,8
+	rotlwi	$t1,$s1,8
+	rotlwi	$t2,$s2,8
+	rotlwi	$t3,$s3,8
+	rlwimi	$t0,$s0,24,0,7
+	rlwimi	$t1,$s1,24,0,7
+	rlwimi	$t2,$s2,24,0,7
+	rlwimi	$t3,$s3,24,0,7
+	rlwimi	$t0,$s0,24,16,23
+	rlwimi	$t1,$s1,24,16,23
+	rlwimi	$t2,$s2,24,16,23
+	rlwimi	$t3,$s3,24,16,23
+	stw	$t0,0($out)
+	stw	$t1,4($out)
+	stw	$t2,8($out)
+	stw	$t3,12($out)
+___
+$code.=<<___ if (!$LITTLE_ENDIAN);
+	stw	$s0,0($out)
+	stw	$s1,4($out)
+	stw	$s2,8($out)
+	stw	$s3,12($out)
+___
+$code.=<<___;
+	b	Lenc_done
+
+Lenc_unaligned:
+	subfic	$t0,$inp,4096
+	subfic	$t1,$out,4096
+	andi.	$t0,$t0,4096-16
+	beq	Lenc_xpage
+	andi.	$t1,$t1,4096-16
+	bne	Lenc_unaligned_ok
+
+Lenc_xpage:
+	lbz	$acc00,0($inp)
+	lbz	$acc01,1($inp)
+	lbz	$acc02,2($inp)
+	lbz	$s0,3($inp)
+	lbz	$acc04,4($inp)
+	lbz	$acc05,5($inp)
+	lbz	$acc06,6($inp)
+	lbz	$s1,7($inp)
+	lbz	$acc08,8($inp)
+	lbz	$acc09,9($inp)
+	lbz	$acc10,10($inp)
+	insrwi	$s0,$acc00,8,0
+	lbz	$s2,11($inp)
+	insrwi	$s1,$acc04,8,0
+	lbz	$acc12,12($inp)
+	insrwi	$s0,$acc01,8,8
+	lbz	$acc13,13($inp)
+	insrwi	$s1,$acc05,8,8
+	lbz	$acc14,14($inp)
+	insrwi	$s0,$acc02,8,16
+	lbz	$s3,15($inp)
+	insrwi	$s1,$acc06,8,16
+	insrwi	$s2,$acc08,8,0
+	insrwi	$s3,$acc12,8,0
+	insrwi	$s2,$acc09,8,8
+	insrwi	$s3,$acc13,8,8
+	insrwi	$s2,$acc10,8,16
+	insrwi	$s3,$acc14,8,16
+
+	bl	LAES_Te
+	bl	Lppc_AES_encrypt_compact
+	$POP	$out,`$FRAME-$SIZE_T*19`($sp)
+
+	extrwi	$acc00,$s0,8,0
+	extrwi	$acc01,$s0,8,8
+	stb	$acc00,0($out)
+	extrwi	$acc02,$s0,8,16
+	stb	$acc01,1($out)
+	stb	$acc02,2($out)
+	extrwi	$acc04,$s1,8,0
+	stb	$s0,3($out)
+	extrwi	$acc05,$s1,8,8
+	stb	$acc04,4($out)
+	extrwi	$acc06,$s1,8,16
+	stb	$acc05,5($out)
+	stb	$acc06,6($out)
+	extrwi	$acc08,$s2,8,0
+	stb	$s1,7($out)
+	extrwi	$acc09,$s2,8,8
+	stb	$acc08,8($out)
+	extrwi	$acc10,$s2,8,16
+	stb	$acc09,9($out)
+	stb	$acc10,10($out)
+	extrwi	$acc12,$s3,8,0
+	stb	$s2,11($out)
+	extrwi	$acc13,$s3,8,8
+	stb	$acc12,12($out)
+	extrwi	$acc14,$s3,8,16
+	stb	$acc13,13($out)
+	stb	$acc14,14($out)
+	stb	$s3,15($out)
+
+Lenc_done:
+	$POP	r0,`$FRAME+$LRSAVE`($sp)
+	$POP	r14,`$FRAME-$SIZE_T*18`($sp)
+	$POP	r15,`$FRAME-$SIZE_T*17`($sp)
+	$POP	r16,`$FRAME-$SIZE_T*16`($sp)
+	$POP	r17,`$FRAME-$SIZE_T*15`($sp)
+	$POP	r18,`$FRAME-$SIZE_T*14`($sp)
+	$POP	r19,`$FRAME-$SIZE_T*13`($sp)
+	$POP	r20,`$FRAME-$SIZE_T*12`($sp)
+	$POP	r21,`$FRAME-$SIZE_T*11`($sp)
+	$POP	r22,`$FRAME-$SIZE_T*10`($sp)
+	$POP	r23,`$FRAME-$SIZE_T*9`($sp)
+	$POP	r24,`$FRAME-$SIZE_T*8`($sp)
+	$POP	r25,`$FRAME-$SIZE_T*7`($sp)
+	$POP	r26,`$FRAME-$SIZE_T*6`($sp)
+	$POP	r27,`$FRAME-$SIZE_T*5`($sp)
+	$POP	r28,`$FRAME-$SIZE_T*4`($sp)
+	$POP	r29,`$FRAME-$SIZE_T*3`($sp)
+	$POP	r30,`$FRAME-$SIZE_T*2`($sp)
+	$POP	r31,`$FRAME-$SIZE_T*1`($sp)
+	mtlr	r0
+	addi	$sp,$sp,$FRAME
+	blr
+	.long	0
+	.byte	0,12,4,1,0x80,18,3,0
+	.long	0
+
+.align	5
+Lppc_AES_encrypt:
+	lwz	$acc00,240($key)
+	addi	$Tbl1,$Tbl0,3
+	lwz	$t0,0($key)
+	addi	$Tbl2,$Tbl0,2
+	lwz	$t1,4($key)
+	addi	$Tbl3,$Tbl0,1
+	lwz	$t2,8($key)
+	addi	$acc00,$acc00,-1
+	lwz	$t3,12($key)
+	addi	$key,$key,16
+	xor	$s0,$s0,$t0
+	xor	$s1,$s1,$t1
+	xor	$s2,$s2,$t2
+	xor	$s3,$s3,$t3
+	mtctr	$acc00
+.align	4
+Lenc_loop:
+	rlwinm	$acc00,$s0,`32-24+3`,21,28
+	rlwinm	$acc01,$s1,`32-24+3`,21,28
+	rlwinm	$acc02,$s2,`32-24+3`,21,28
+	rlwinm	$acc03,$s3,`32-24+3`,21,28
+	lwz	$t0,0($key)
+	rlwinm	$acc04,$s1,`32-16+3`,21,28
+	lwz	$t1,4($key)
+	rlwinm	$acc05,$s2,`32-16+3`,21,28
+	lwz	$t2,8($key)
+	rlwinm	$acc06,$s3,`32-16+3`,21,28
+	lwz	$t3,12($key)
+	rlwinm	$acc07,$s0,`32-16+3`,21,28
+	lwzx	$acc00,$Tbl0,$acc00
+	rlwinm	$acc08,$s2,`32-8+3`,21,28
+	lwzx	$acc01,$Tbl0,$acc01
+	rlwinm	$acc09,$s3,`32-8+3`,21,28
+	lwzx	$acc02,$Tbl0,$acc02
+	rlwinm	$acc10,$s0,`32-8+3`,21,28
+	lwzx	$acc03,$Tbl0,$acc03
+	rlwinm	$acc11,$s1,`32-8+3`,21,28
+	lwzx	$acc04,$Tbl1,$acc04
+	rlwinm	$acc12,$s3,`0+3`,21,28
+	lwzx	$acc05,$Tbl1,$acc05
+	rlwinm	$acc13,$s0,`0+3`,21,28
+	lwzx	$acc06,$Tbl1,$acc06
+	rlwinm	$acc14,$s1,`0+3`,21,28
+	lwzx	$acc07,$Tbl1,$acc07
+	rlwinm	$acc15,$s2,`0+3`,21,28
+	lwzx	$acc08,$Tbl2,$acc08
+	xor	$t0,$t0,$acc00
+	lwzx	$acc09,$Tbl2,$acc09
+	xor	$t1,$t1,$acc01
+	lwzx	$acc10,$Tbl2,$acc10
+	xor	$t2,$t2,$acc02
+	lwzx	$acc11,$Tbl2,$acc11
+	xor	$t3,$t3,$acc03
+	lwzx	$acc12,$Tbl3,$acc12
+	xor	$t0,$t0,$acc04
+	lwzx	$acc13,$Tbl3,$acc13
+	xor	$t1,$t1,$acc05
+	lwzx	$acc14,$Tbl3,$acc14
+	xor	$t2,$t2,$acc06
+	lwzx	$acc15,$Tbl3,$acc15
+	xor	$t3,$t3,$acc07
+	xor	$t0,$t0,$acc08
+	xor	$t1,$t1,$acc09
+	xor	$t2,$t2,$acc10
+	xor	$t3,$t3,$acc11
+	xor	$s0,$t0,$acc12
+	xor	$s1,$t1,$acc13
+	xor	$s2,$t2,$acc14
+	xor	$s3,$t3,$acc15
+	addi	$key,$key,16
+	bdnz	Lenc_loop
+
+	addi	$Tbl2,$Tbl0,2048
+	nop
+	lwz	$t0,0($key)
+	rlwinm	$acc00,$s0,`32-24`,24,31
+	lwz	$t1,4($key)
+	rlwinm	$acc01,$s1,`32-24`,24,31
+	lwz	$t2,8($key)
+	rlwinm	$acc02,$s2,`32-24`,24,31
+	lwz	$t3,12($key)
+	rlwinm	$acc03,$s3,`32-24`,24,31
+	lwz	$acc08,`2048+0`($Tbl0)	! prefetch Te4
+	rlwinm	$acc04,$s1,`32-16`,24,31
+	lwz	$acc09,`2048+32`($Tbl0)
+	rlwinm	$acc05,$s2,`32-16`,24,31
+	lwz	$acc10,`2048+64`($Tbl0)
+	rlwinm	$acc06,$s3,`32-16`,24,31
+	lwz	$acc11,`2048+96`($Tbl0)
+	rlwinm	$acc07,$s0,`32-16`,24,31
+	lwz	$acc12,`2048+128`($Tbl0)
+	rlwinm	$acc08,$s2,`32-8`,24,31
+	lwz	$acc13,`2048+160`($Tbl0)
+	rlwinm	$acc09,$s3,`32-8`,24,31
+	lwz	$acc14,`2048+192`($Tbl0)
+	rlwinm	$acc10,$s0,`32-8`,24,31
+	lwz	$acc15,`2048+224`($Tbl0)
+	rlwinm	$acc11,$s1,`32-8`,24,31
+	lbzx	$acc00,$Tbl2,$acc00
+	rlwinm	$acc12,$s3,`0`,24,31
+	lbzx	$acc01,$Tbl2,$acc01
+	rlwinm	$acc13,$s0,`0`,24,31
+	lbzx	$acc02,$Tbl2,$acc02
+	rlwinm	$acc14,$s1,`0`,24,31
+	lbzx	$acc03,$Tbl2,$acc03
+	rlwinm	$acc15,$s2,`0`,24,31
+	lbzx	$acc04,$Tbl2,$acc04
+	rlwinm	$s0,$acc00,24,0,7
+	lbzx	$acc05,$Tbl2,$acc05
+	rlwinm	$s1,$acc01,24,0,7
+	lbzx	$acc06,$Tbl2,$acc06
+	rlwinm	$s2,$acc02,24,0,7
+	lbzx	$acc07,$Tbl2,$acc07
+	rlwinm	$s3,$acc03,24,0,7
+	lbzx	$acc08,$Tbl2,$acc08
+	rlwimi	$s0,$acc04,16,8,15
+	lbzx	$acc09,$Tbl2,$acc09
+	rlwimi	$s1,$acc05,16,8,15
+	lbzx	$acc10,$Tbl2,$acc10
+	rlwimi	$s2,$acc06,16,8,15
+	lbzx	$acc11,$Tbl2,$acc11
+	rlwimi	$s3,$acc07,16,8,15
+	lbzx	$acc12,$Tbl2,$acc12
+	rlwimi	$s0,$acc08,8,16,23
+	lbzx	$acc13,$Tbl2,$acc13
+	rlwimi	$s1,$acc09,8,16,23
+	lbzx	$acc14,$Tbl2,$acc14
+	rlwimi	$s2,$acc10,8,16,23
+	lbzx	$acc15,$Tbl2,$acc15
+	rlwimi	$s3,$acc11,8,16,23
+	or	$s0,$s0,$acc12
+	or	$s1,$s1,$acc13
+	or	$s2,$s2,$acc14
+	or	$s3,$s3,$acc15
+	xor	$s0,$s0,$t0
+	xor	$s1,$s1,$t1
+	xor	$s2,$s2,$t2
+	xor	$s3,$s3,$t3
+	blr
+	.long	0
+	.byte	0,12,0x14,0,0,0,0,0
+
+.align	4
+Lppc_AES_encrypt_compact:
+	lwz	$acc00,240($key)
+	addi	$Tbl1,$Tbl0,2048
+	lwz	$t0,0($key)
+	lis	$mask80,0x8080
+	lwz	$t1,4($key)
+	lis	$mask1b,0x1b1b
+	lwz	$t2,8($key)
+	ori	$mask80,$mask80,0x8080
+	lwz	$t3,12($key)
+	ori	$mask1b,$mask1b,0x1b1b
+	addi	$key,$key,16
+	mtctr	$acc00
+.align	4
+Lenc_compact_loop:
+	xor	$s0,$s0,$t0
+	xor	$s1,$s1,$t1
+	rlwinm	$acc00,$s0,`32-24`,24,31
+	xor	$s2,$s2,$t2
+	rlwinm	$acc01,$s1,`32-24`,24,31
+	xor	$s3,$s3,$t3
+	rlwinm	$acc02,$s2,`32-24`,24,31
+	rlwinm	$acc03,$s3,`32-24`,24,31
+	rlwinm	$acc04,$s1,`32-16`,24,31
+	rlwinm	$acc05,$s2,`32-16`,24,31
+	rlwinm	$acc06,$s3,`32-16`,24,31
+	rlwinm	$acc07,$s0,`32-16`,24,31
+	lbzx	$acc00,$Tbl1,$acc00
+	rlwinm	$acc08,$s2,`32-8`,24,31
+	lbzx	$acc01,$Tbl1,$acc01
+	rlwinm	$acc09,$s3,`32-8`,24,31
+	lbzx	$acc02,$Tbl1,$acc02
+	rlwinm	$acc10,$s0,`32-8`,24,31
+	lbzx	$acc03,$Tbl1,$acc03
+	rlwinm	$acc11,$s1,`32-8`,24,31
+	lbzx	$acc04,$Tbl1,$acc04
+	rlwinm	$acc12,$s3,`0`,24,31
+	lbzx	$acc05,$Tbl1,$acc05
+	rlwinm	$acc13,$s0,`0`,24,31
+	lbzx	$acc06,$Tbl1,$acc06
+	rlwinm	$acc14,$s1,`0`,24,31
+	lbzx	$acc07,$Tbl1,$acc07
+	rlwinm	$acc15,$s2,`0`,24,31
+	lbzx	$acc08,$Tbl1,$acc08
+	rlwinm	$s0,$acc00,24,0,7
+	lbzx	$acc09,$Tbl1,$acc09
+	rlwinm	$s1,$acc01,24,0,7
+	lbzx	$acc10,$Tbl1,$acc10
+	rlwinm	$s2,$acc02,24,0,7
+	lbzx	$acc11,$Tbl1,$acc11
+	rlwinm	$s3,$acc03,24,0,7
+	lbzx	$acc12,$Tbl1,$acc12
+	rlwimi	$s0,$acc04,16,8,15
+	lbzx	$acc13,$Tbl1,$acc13
+	rlwimi	$s1,$acc05,16,8,15
+	lbzx	$acc14,$Tbl1,$acc14
+	rlwimi	$s2,$acc06,16,8,15
+	lbzx	$acc15,$Tbl1,$acc15
+	rlwimi	$s3,$acc07,16,8,15
+	rlwimi	$s0,$acc08,8,16,23
+	rlwimi	$s1,$acc09,8,16,23
+	rlwimi	$s2,$acc10,8,16,23
+	rlwimi	$s3,$acc11,8,16,23
+	lwz	$t0,0($key)
+	or	$s0,$s0,$acc12
+	lwz	$t1,4($key)
+	or	$s1,$s1,$acc13
+	lwz	$t2,8($key)
+	or	$s2,$s2,$acc14
+	lwz	$t3,12($key)
+	or	$s3,$s3,$acc15
+
+	addi	$key,$key,16
+	bdz	Lenc_compact_done
+
+	and	$acc00,$s0,$mask80	# r1=r0&0x80808080
+	and	$acc01,$s1,$mask80
+	and	$acc02,$s2,$mask80
+	and	$acc03,$s3,$mask80
+	srwi	$acc04,$acc00,7		# r1>>7
+	andc	$acc08,$s0,$mask80	# r0&0x7f7f7f7f
+	srwi	$acc05,$acc01,7
+	andc	$acc09,$s1,$mask80
+	srwi	$acc06,$acc02,7
+	andc	$acc10,$s2,$mask80
+	srwi	$acc07,$acc03,7
+	andc	$acc11,$s3,$mask80
+	sub	$acc00,$acc00,$acc04	# r1-(r1>>7)
+	sub	$acc01,$acc01,$acc05
+	sub	$acc02,$acc02,$acc06
+	sub	$acc03,$acc03,$acc07
+	add	$acc08,$acc08,$acc08	# (r0&0x7f7f7f7f)<<1
+	add	$acc09,$acc09,$acc09
+	add	$acc10,$acc10,$acc10
+	add	$acc11,$acc11,$acc11
+	and	$acc00,$acc00,$mask1b	# (r1-(r1>>7))&0x1b1b1b1b
+	and	$acc01,$acc01,$mask1b
+	and	$acc02,$acc02,$mask1b
+	and	$acc03,$acc03,$mask1b
+	xor	$acc00,$acc00,$acc08	# r2
+	xor	$acc01,$acc01,$acc09
+	 rotlwi	$acc12,$s0,16		# ROTATE(r0,16)
+	xor	$acc02,$acc02,$acc10
+	 rotlwi	$acc13,$s1,16
+	xor	$acc03,$acc03,$acc11
+	 rotlwi	$acc14,$s2,16
+
+	xor	$s0,$s0,$acc00		# r0^r2
+	rotlwi	$acc15,$s3,16
+	xor	$s1,$s1,$acc01
+	rotrwi	$s0,$s0,24		# ROTATE(r2^r0,24)
+	xor	$s2,$s2,$acc02
+	rotrwi	$s1,$s1,24
+	xor	$s3,$s3,$acc03
+	rotrwi	$s2,$s2,24
+	xor	$s0,$s0,$acc00		# ROTATE(r2^r0,24)^r2
+	rotrwi	$s3,$s3,24
+	xor	$s1,$s1,$acc01
+	xor	$s2,$s2,$acc02
+	xor	$s3,$s3,$acc03
+	rotlwi	$acc08,$acc12,8		# ROTATE(r0,24)
+	xor	$s0,$s0,$acc12		#
+	rotlwi	$acc09,$acc13,8
+	xor	$s1,$s1,$acc13
+	rotlwi	$acc10,$acc14,8
+	xor	$s2,$s2,$acc14
+	rotlwi	$acc11,$acc15,8
+	xor	$s3,$s3,$acc15
+	xor	$s0,$s0,$acc08		#
+	xor	$s1,$s1,$acc09
+	xor	$s2,$s2,$acc10
+	xor	$s3,$s3,$acc11
+
+	b	Lenc_compact_loop
+.align	4
+Lenc_compact_done:
+	xor	$s0,$s0,$t0
+	xor	$s1,$s1,$t1
+	xor	$s2,$s2,$t2
+	xor	$s3,$s3,$t3
+	blr
+	.long	0
+	.byte	0,12,0x14,0,0,0,0,0
+.size	.AES_encrypt,.-.AES_encrypt
+
+.globl	.AES_decrypt
+.align	7
+.AES_decrypt:
+	$STU	$sp,-$FRAME($sp)
+	mflr	r0
+
+	$PUSH	$out,`$FRAME-$SIZE_T*19`($sp)
+	$PUSH	r14,`$FRAME-$SIZE_T*18`($sp)
+	$PUSH	r15,`$FRAME-$SIZE_T*17`($sp)
+	$PUSH	r16,`$FRAME-$SIZE_T*16`($sp)
+	$PUSH	r17,`$FRAME-$SIZE_T*15`($sp)
+	$PUSH	r18,`$FRAME-$SIZE_T*14`($sp)
+	$PUSH	r19,`$FRAME-$SIZE_T*13`($sp)
+	$PUSH	r20,`$FRAME-$SIZE_T*12`($sp)
+	$PUSH	r21,`$FRAME-$SIZE_T*11`($sp)
+	$PUSH	r22,`$FRAME-$SIZE_T*10`($sp)
+	$PUSH	r23,`$FRAME-$SIZE_T*9`($sp)
+	$PUSH	r24,`$FRAME-$SIZE_T*8`($sp)
+	$PUSH	r25,`$FRAME-$SIZE_T*7`($sp)
+	$PUSH	r26,`$FRAME-$SIZE_T*6`($sp)
+	$PUSH	r27,`$FRAME-$SIZE_T*5`($sp)
+	$PUSH	r28,`$FRAME-$SIZE_T*4`($sp)
+	$PUSH	r29,`$FRAME-$SIZE_T*3`($sp)
+	$PUSH	r30,`$FRAME-$SIZE_T*2`($sp)
+	$PUSH	r31,`$FRAME-$SIZE_T*1`($sp)
+	$PUSH	r0,`$FRAME+$LRSAVE`($sp)
+
+	andi.	$t0,$inp,3
+	andi.	$t1,$out,3
+	or.	$t0,$t0,$t1
+	bne	Ldec_unaligned
+
+Ldec_unaligned_ok:
+___
+$code.=<<___ if (!$LITTLE_ENDIAN);
+	lwz	$s0,0($inp)
+	lwz	$s1,4($inp)
+	lwz	$s2,8($inp)
+	lwz	$s3,12($inp)
+___
+$code.=<<___ if ($LITTLE_ENDIAN);
+	lwz	$t0,0($inp)
+	lwz	$t1,4($inp)
+	lwz	$t2,8($inp)
+	lwz	$t3,12($inp)
+	rotlwi	$s0,$t0,8
+	rotlwi	$s1,$t1,8
+	rotlwi	$s2,$t2,8
+	rotlwi	$s3,$t3,8
+	rlwimi	$s0,$t0,24,0,7
+	rlwimi	$s1,$t1,24,0,7
+	rlwimi	$s2,$t2,24,0,7
+	rlwimi	$s3,$t3,24,0,7
+	rlwimi	$s0,$t0,24,16,23
+	rlwimi	$s1,$t1,24,16,23
+	rlwimi	$s2,$t2,24,16,23
+	rlwimi	$s3,$t3,24,16,23
+___
+$code.=<<___;
+	bl	LAES_Td
+	bl	Lppc_AES_decrypt_compact
+	$POP	$out,`$FRAME-$SIZE_T*19`($sp)
+___
+$code.=<<___ if ($LITTLE_ENDIAN);
+	rotlwi	$t0,$s0,8
+	rotlwi	$t1,$s1,8
+	rotlwi	$t2,$s2,8
+	rotlwi	$t3,$s3,8
+	rlwimi	$t0,$s0,24,0,7
+	rlwimi	$t1,$s1,24,0,7
+	rlwimi	$t2,$s2,24,0,7
+	rlwimi	$t3,$s3,24,0,7
+	rlwimi	$t0,$s0,24,16,23
+	rlwimi	$t1,$s1,24,16,23
+	rlwimi	$t2,$s2,24,16,23
+	rlwimi	$t3,$s3,24,16,23
+	stw	$t0,0($out)
+	stw	$t1,4($out)
+	stw	$t2,8($out)
+	stw	$t3,12($out)
+___
+$code.=<<___ if (!$LITTLE_ENDIAN);
+	stw	$s0,0($out)
+	stw	$s1,4($out)
+	stw	$s2,8($out)
+	stw	$s3,12($out)
+___
+$code.=<<___;
+	b	Ldec_done
+
+Ldec_unaligned:
+	subfic	$t0,$inp,4096
+	subfic	$t1,$out,4096
+	andi.	$t0,$t0,4096-16
+	beq	Ldec_xpage
+	andi.	$t1,$t1,4096-16
+	bne	Ldec_unaligned_ok
+
+Ldec_xpage:
+	lbz	$acc00,0($inp)
+	lbz	$acc01,1($inp)
+	lbz	$acc02,2($inp)
+	lbz	$s0,3($inp)
+	lbz	$acc04,4($inp)
+	lbz	$acc05,5($inp)
+	lbz	$acc06,6($inp)
+	lbz	$s1,7($inp)
+	lbz	$acc08,8($inp)
+	lbz	$acc09,9($inp)
+	lbz	$acc10,10($inp)
+	insrwi	$s0,$acc00,8,0
+	lbz	$s2,11($inp)
+	insrwi	$s1,$acc04,8,0
+	lbz	$acc12,12($inp)
+	insrwi	$s0,$acc01,8,8
+	lbz	$acc13,13($inp)
+	insrwi	$s1,$acc05,8,8
+	lbz	$acc14,14($inp)
+	insrwi	$s0,$acc02,8,16
+	lbz	$s3,15($inp)
+	insrwi	$s1,$acc06,8,16
+	insrwi	$s2,$acc08,8,0
+	insrwi	$s3,$acc12,8,0
+	insrwi	$s2,$acc09,8,8
+	insrwi	$s3,$acc13,8,8
+	insrwi	$s2,$acc10,8,16
+	insrwi	$s3,$acc14,8,16
+
+	bl	LAES_Td
+	bl	Lppc_AES_decrypt_compact
+	$POP	$out,`$FRAME-$SIZE_T*19`($sp)
+
+	extrwi	$acc00,$s0,8,0
+	extrwi	$acc01,$s0,8,8
+	stb	$acc00,0($out)
+	extrwi	$acc02,$s0,8,16
+	stb	$acc01,1($out)
+	stb	$acc02,2($out)
+	extrwi	$acc04,$s1,8,0
+	stb	$s0,3($out)
+	extrwi	$acc05,$s1,8,8
+	stb	$acc04,4($out)
+	extrwi	$acc06,$s1,8,16
+	stb	$acc05,5($out)
+	stb	$acc06,6($out)
+	extrwi	$acc08,$s2,8,0
+	stb	$s1,7($out)
+	extrwi	$acc09,$s2,8,8
+	stb	$acc08,8($out)
+	extrwi	$acc10,$s2,8,16
+	stb	$acc09,9($out)
+	stb	$acc10,10($out)
+	extrwi	$acc12,$s3,8,0
+	stb	$s2,11($out)
+	extrwi	$acc13,$s3,8,8
+	stb	$acc12,12($out)
+	extrwi	$acc14,$s3,8,16
+	stb	$acc13,13($out)
+	stb	$acc14,14($out)
+	stb	$s3,15($out)
+
+Ldec_done:
+	$POP	r0,`$FRAME+$LRSAVE`($sp)
+	$POP	r14,`$FRAME-$SIZE_T*18`($sp)
+	$POP	r15,`$FRAME-$SIZE_T*17`($sp)
+	$POP	r16,`$FRAME-$SIZE_T*16`($sp)
+	$POP	r17,`$FRAME-$SIZE_T*15`($sp)
+	$POP	r18,`$FRAME-$SIZE_T*14`($sp)
+	$POP	r19,`$FRAME-$SIZE_T*13`($sp)
+	$POP	r20,`$FRAME-$SIZE_T*12`($sp)
+	$POP	r21,`$FRAME-$SIZE_T*11`($sp)
+	$POP	r22,`$FRAME-$SIZE_T*10`($sp)
+	$POP	r23,`$FRAME-$SIZE_T*9`($sp)
+	$POP	r24,`$FRAME-$SIZE_T*8`($sp)
+	$POP	r25,`$FRAME-$SIZE_T*7`($sp)
+	$POP	r26,`$FRAME-$SIZE_T*6`($sp)
+	$POP	r27,`$FRAME-$SIZE_T*5`($sp)
+	$POP	r28,`$FRAME-$SIZE_T*4`($sp)
+	$POP	r29,`$FRAME-$SIZE_T*3`($sp)
+	$POP	r30,`$FRAME-$SIZE_T*2`($sp)
+	$POP	r31,`$FRAME-$SIZE_T*1`($sp)
+	mtlr	r0
+	addi	$sp,$sp,$FRAME
+	blr
+	.long	0
+	.byte	0,12,4,1,0x80,18,3,0
+	.long	0
+
+.align	5
+Lppc_AES_decrypt:
+	lwz	$acc00,240($key)
+	addi	$Tbl1,$Tbl0,3
+	lwz	$t0,0($key)
+	addi	$Tbl2,$Tbl0,2
+	lwz	$t1,4($key)
+	addi	$Tbl3,$Tbl0,1
+	lwz	$t2,8($key)
+	addi	$acc00,$acc00,-1
+	lwz	$t3,12($key)
+	addi	$key,$key,16
+	xor	$s0,$s0,$t0
+	xor	$s1,$s1,$t1
+	xor	$s2,$s2,$t2
+	xor	$s3,$s3,$t3
+	mtctr	$acc00
+.align	4
+Ldec_loop:
+	rlwinm	$acc00,$s0,`32-24+3`,21,28
+	rlwinm	$acc01,$s1,`32-24+3`,21,28
+	rlwinm	$acc02,$s2,`32-24+3`,21,28
+	rlwinm	$acc03,$s3,`32-24+3`,21,28
+	lwz	$t0,0($key)
+	rlwinm	$acc04,$s3,`32-16+3`,21,28
+	lwz	$t1,4($key)
+	rlwinm	$acc05,$s0,`32-16+3`,21,28
+	lwz	$t2,8($key)
+	rlwinm	$acc06,$s1,`32-16+3`,21,28
+	lwz	$t3,12($key)
+	rlwinm	$acc07,$s2,`32-16+3`,21,28
+	lwzx	$acc00,$Tbl0,$acc00
+	rlwinm	$acc08,$s2,`32-8+3`,21,28
+	lwzx	$acc01,$Tbl0,$acc01
+	rlwinm	$acc09,$s3,`32-8+3`,21,28
+	lwzx	$acc02,$Tbl0,$acc02
+	rlwinm	$acc10,$s0,`32-8+3`,21,28
+	lwzx	$acc03,$Tbl0,$acc03
+	rlwinm	$acc11,$s1,`32-8+3`,21,28
+	lwzx	$acc04,$Tbl1,$acc04
+	rlwinm	$acc12,$s1,`0+3`,21,28
+	lwzx	$acc05,$Tbl1,$acc05
+	rlwinm	$acc13,$s2,`0+3`,21,28
+	lwzx	$acc06,$Tbl1,$acc06
+	rlwinm	$acc14,$s3,`0+3`,21,28
+	lwzx	$acc07,$Tbl1,$acc07
+	rlwinm	$acc15,$s0,`0+3`,21,28
+	lwzx	$acc08,$Tbl2,$acc08
+	xor	$t0,$t0,$acc00
+	lwzx	$acc09,$Tbl2,$acc09
+	xor	$t1,$t1,$acc01
+	lwzx	$acc10,$Tbl2,$acc10
+	xor	$t2,$t2,$acc02
+	lwzx	$acc11,$Tbl2,$acc11
+	xor	$t3,$t3,$acc03
+	lwzx	$acc12,$Tbl3,$acc12
+	xor	$t0,$t0,$acc04
+	lwzx	$acc13,$Tbl3,$acc13
+	xor	$t1,$t1,$acc05
+	lwzx	$acc14,$Tbl3,$acc14
+	xor	$t2,$t2,$acc06
+	lwzx	$acc15,$Tbl3,$acc15
+	xor	$t3,$t3,$acc07
+	xor	$t0,$t0,$acc08
+	xor	$t1,$t1,$acc09
+	xor	$t2,$t2,$acc10
+	xor	$t3,$t3,$acc11
+	xor	$s0,$t0,$acc12
+	xor	$s1,$t1,$acc13
+	xor	$s2,$t2,$acc14
+	xor	$s3,$t3,$acc15
+	addi	$key,$key,16
+	bdnz	Ldec_loop
+
+	addi	$Tbl2,$Tbl0,2048
+	nop
+	lwz	$t0,0($key)
+	rlwinm	$acc00,$s0,`32-24`,24,31
+	lwz	$t1,4($key)
+	rlwinm	$acc01,$s1,`32-24`,24,31
+	lwz	$t2,8($key)
+	rlwinm	$acc02,$s2,`32-24`,24,31
+	lwz	$t3,12($key)
+	rlwinm	$acc03,$s3,`32-24`,24,31
+	lwz	$acc08,`2048+0`($Tbl0)	! prefetch Td4
+	rlwinm	$acc04,$s3,`32-16`,24,31
+	lwz	$acc09,`2048+32`($Tbl0)
+	rlwinm	$acc05,$s0,`32-16`,24,31
+	lwz	$acc10,`2048+64`($Tbl0)
+	lbzx	$acc00,$Tbl2,$acc00
+	lwz	$acc11,`2048+96`($Tbl0)
+	lbzx	$acc01,$Tbl2,$acc01
+	lwz	$acc12,`2048+128`($Tbl0)
+	rlwinm	$acc06,$s1,`32-16`,24,31
+	lwz	$acc13,`2048+160`($Tbl0)
+	rlwinm	$acc07,$s2,`32-16`,24,31
+	lwz	$acc14,`2048+192`($Tbl0)
+	rlwinm	$acc08,$s2,`32-8`,24,31
+	lwz	$acc15,`2048+224`($Tbl0)
+	rlwinm	$acc09,$s3,`32-8`,24,31
+	lbzx	$acc02,$Tbl2,$acc02
+	rlwinm	$acc10,$s0,`32-8`,24,31
+	lbzx	$acc03,$Tbl2,$acc03
+	rlwinm	$acc11,$s1,`32-8`,24,31
+	lbzx	$acc04,$Tbl2,$acc04
+	rlwinm	$acc12,$s1,`0`,24,31
+	lbzx	$acc05,$Tbl2,$acc05
+	rlwinm	$acc13,$s2,`0`,24,31
+	lbzx	$acc06,$Tbl2,$acc06
+	rlwinm	$acc14,$s3,`0`,24,31
+	lbzx	$acc07,$Tbl2,$acc07
+	rlwinm	$acc15,$s0,`0`,24,31
+	lbzx	$acc08,$Tbl2,$acc08
+	rlwinm	$s0,$acc00,24,0,7
+	lbzx	$acc09,$Tbl2,$acc09
+	rlwinm	$s1,$acc01,24,0,7
+	lbzx	$acc10,$Tbl2,$acc10
+	rlwinm	$s2,$acc02,24,0,7
+	lbzx	$acc11,$Tbl2,$acc11
+	rlwinm	$s3,$acc03,24,0,7
+	lbzx	$acc12,$Tbl2,$acc12
+	rlwimi	$s0,$acc04,16,8,15
+	lbzx	$acc13,$Tbl2,$acc13
+	rlwimi	$s1,$acc05,16,8,15
+	lbzx	$acc14,$Tbl2,$acc14
+	rlwimi	$s2,$acc06,16,8,15
+	lbzx	$acc15,$Tbl2,$acc15
+	rlwimi	$s3,$acc07,16,8,15
+	rlwimi	$s0,$acc08,8,16,23
+	rlwimi	$s1,$acc09,8,16,23
+	rlwimi	$s2,$acc10,8,16,23
+	rlwimi	$s3,$acc11,8,16,23
+	or	$s0,$s0,$acc12
+	or	$s1,$s1,$acc13
+	or	$s2,$s2,$acc14
+	or	$s3,$s3,$acc15
+	xor	$s0,$s0,$t0
+	xor	$s1,$s1,$t1
+	xor	$s2,$s2,$t2
+	xor	$s3,$s3,$t3
+	blr
+	.long	0
+	.byte	0,12,0x14,0,0,0,0,0
+
+.align	4
+Lppc_AES_decrypt_compact:
+	lwz	$acc00,240($key)
+	addi	$Tbl1,$Tbl0,2048
+	lwz	$t0,0($key)
+	lis	$mask80,0x8080
+	lwz	$t1,4($key)
+	lis	$mask1b,0x1b1b
+	lwz	$t2,8($key)
+	ori	$mask80,$mask80,0x8080
+	lwz	$t3,12($key)
+	ori	$mask1b,$mask1b,0x1b1b
+	addi	$key,$key,16
+___
+$code.=<<___ if ($SIZE_T==8);
+	insrdi	$mask80,$mask80,32,0
+	insrdi	$mask1b,$mask1b,32,0
+___
+$code.=<<___;
+	mtctr	$acc00
+.align	4
+Ldec_compact_loop:
+	xor	$s0,$s0,$t0
+	xor	$s1,$s1,$t1
+	rlwinm	$acc00,$s0,`32-24`,24,31
+	xor	$s2,$s2,$t2
+	rlwinm	$acc01,$s1,`32-24`,24,31
+	xor	$s3,$s3,$t3
+	rlwinm	$acc02,$s2,`32-24`,24,31
+	rlwinm	$acc03,$s3,`32-24`,24,31
+	rlwinm	$acc04,$s3,`32-16`,24,31
+	rlwinm	$acc05,$s0,`32-16`,24,31
+	rlwinm	$acc06,$s1,`32-16`,24,31
+	rlwinm	$acc07,$s2,`32-16`,24,31
+	lbzx	$acc00,$Tbl1,$acc00
+	rlwinm	$acc08,$s2,`32-8`,24,31
+	lbzx	$acc01,$Tbl1,$acc01
+	rlwinm	$acc09,$s3,`32-8`,24,31
+	lbzx	$acc02,$Tbl1,$acc02
+	rlwinm	$acc10,$s0,`32-8`,24,31
+	lbzx	$acc03,$Tbl1,$acc03
+	rlwinm	$acc11,$s1,`32-8`,24,31
+	lbzx	$acc04,$Tbl1,$acc04
+	rlwinm	$acc12,$s1,`0`,24,31
+	lbzx	$acc05,$Tbl1,$acc05
+	rlwinm	$acc13,$s2,`0`,24,31
+	lbzx	$acc06,$Tbl1,$acc06
+	rlwinm	$acc14,$s3,`0`,24,31
+	lbzx	$acc07,$Tbl1,$acc07
+	rlwinm	$acc15,$s0,`0`,24,31
+	lbzx	$acc08,$Tbl1,$acc08
+	rlwinm	$s0,$acc00,24,0,7
+	lbzx	$acc09,$Tbl1,$acc09
+	rlwinm	$s1,$acc01,24,0,7
+	lbzx	$acc10,$Tbl1,$acc10
+	rlwinm	$s2,$acc02,24,0,7
+	lbzx	$acc11,$Tbl1,$acc11
+	rlwinm	$s3,$acc03,24,0,7
+	lbzx	$acc12,$Tbl1,$acc12
+	rlwimi	$s0,$acc04,16,8,15
+	lbzx	$acc13,$Tbl1,$acc13
+	rlwimi	$s1,$acc05,16,8,15
+	lbzx	$acc14,$Tbl1,$acc14
+	rlwimi	$s2,$acc06,16,8,15
+	lbzx	$acc15,$Tbl1,$acc15
+	rlwimi	$s3,$acc07,16,8,15
+	rlwimi	$s0,$acc08,8,16,23
+	rlwimi	$s1,$acc09,8,16,23
+	rlwimi	$s2,$acc10,8,16,23
+	rlwimi	$s3,$acc11,8,16,23
+	lwz	$t0,0($key)
+	or	$s0,$s0,$acc12
+	lwz	$t1,4($key)
+	or	$s1,$s1,$acc13
+	lwz	$t2,8($key)
+	or	$s2,$s2,$acc14
+	lwz	$t3,12($key)
+	or	$s3,$s3,$acc15
+
+	addi	$key,$key,16
+	bdz	Ldec_compact_done
+___
+$code.=<<___ if ($SIZE_T==8);
+	# vectorized permutation improves decrypt performance by 10%
+	insrdi	$s0,$s1,32,0
+	insrdi	$s2,$s3,32,0
+
+	and	$acc00,$s0,$mask80	# r1=r0&0x80808080
+	and	$acc02,$s2,$mask80
+	srdi	$acc04,$acc00,7		# r1>>7
+	srdi	$acc06,$acc02,7
+	andc	$acc08,$s0,$mask80	# r0&0x7f7f7f7f
+	andc	$acc10,$s2,$mask80
+	sub	$acc00,$acc00,$acc04	# r1-(r1>>7)
+	sub	$acc02,$acc02,$acc06
+	add	$acc08,$acc08,$acc08	# (r0&0x7f7f7f7f)<<1
+	add	$acc10,$acc10,$acc10
+	and	$acc00,$acc00,$mask1b	# (r1-(r1>>7))&0x1b1b1b1b
+	and	$acc02,$acc02,$mask1b
+	xor	$acc00,$acc00,$acc08	# r2
+	xor	$acc02,$acc02,$acc10
+
+	and	$acc04,$acc00,$mask80	# r1=r2&0x80808080
+	and	$acc06,$acc02,$mask80
+	srdi	$acc08,$acc04,7		# r1>>7
+	srdi	$acc10,$acc06,7
+	andc	$acc12,$acc00,$mask80	# r2&0x7f7f7f7f
+	andc	$acc14,$acc02,$mask80
+	sub	$acc04,$acc04,$acc08	# r1-(r1>>7)
+	sub	$acc06,$acc06,$acc10
+	add	$acc12,$acc12,$acc12	# (r2&0x7f7f7f7f)<<1
+	add	$acc14,$acc14,$acc14
+	and	$acc04,$acc04,$mask1b	# (r1-(r1>>7))&0x1b1b1b1b
+	and	$acc06,$acc06,$mask1b
+	xor	$acc04,$acc04,$acc12	# r4
+	xor	$acc06,$acc06,$acc14
+
+	and	$acc08,$acc04,$mask80	# r1=r4&0x80808080
+	and	$acc10,$acc06,$mask80
+	srdi	$acc12,$acc08,7		# r1>>7
+	srdi	$acc14,$acc10,7
+	sub	$acc08,$acc08,$acc12	# r1-(r1>>7)
+	sub	$acc10,$acc10,$acc14
+	andc	$acc12,$acc04,$mask80	# r4&0x7f7f7f7f
+	andc	$acc14,$acc06,$mask80
+	add	$acc12,$acc12,$acc12	# (r4&0x7f7f7f7f)<<1
+	add	$acc14,$acc14,$acc14
+	and	$acc08,$acc08,$mask1b	# (r1-(r1>>7))&0x1b1b1b1b
+	and	$acc10,$acc10,$mask1b
+	xor	$acc08,$acc08,$acc12	# r8
+	xor	$acc10,$acc10,$acc14
+
+	xor	$acc00,$acc00,$s0	# r2^r0
+	xor	$acc02,$acc02,$s2
+	xor	$acc04,$acc04,$s0	# r4^r0
+	xor	$acc06,$acc06,$s2
+
+	extrdi	$acc01,$acc00,32,0
+	extrdi	$acc03,$acc02,32,0
+	extrdi	$acc05,$acc04,32,0
+	extrdi	$acc07,$acc06,32,0
+	extrdi	$acc09,$acc08,32,0
+	extrdi	$acc11,$acc10,32,0
+___
+$code.=<<___ if ($SIZE_T==4);
+	and	$acc00,$s0,$mask80	# r1=r0&0x80808080
+	and	$acc01,$s1,$mask80
+	and	$acc02,$s2,$mask80
+	and	$acc03,$s3,$mask80
+	srwi	$acc04,$acc00,7		# r1>>7
+	andc	$acc08,$s0,$mask80	# r0&0x7f7f7f7f
+	srwi	$acc05,$acc01,7
+	andc	$acc09,$s1,$mask80
+	srwi	$acc06,$acc02,7
+	andc	$acc10,$s2,$mask80
+	srwi	$acc07,$acc03,7
+	andc	$acc11,$s3,$mask80
+	sub	$acc00,$acc00,$acc04	# r1-(r1>>7)
+	sub	$acc01,$acc01,$acc05
+	sub	$acc02,$acc02,$acc06
+	sub	$acc03,$acc03,$acc07
+	add	$acc08,$acc08,$acc08	# (r0&0x7f7f7f7f)<<1
+	add	$acc09,$acc09,$acc09
+	add	$acc10,$acc10,$acc10
+	add	$acc11,$acc11,$acc11
+	and	$acc00,$acc00,$mask1b	# (r1-(r1>>7))&0x1b1b1b1b
+	and	$acc01,$acc01,$mask1b
+	and	$acc02,$acc02,$mask1b
+	and	$acc03,$acc03,$mask1b
+	xor	$acc00,$acc00,$acc08	# r2
+	xor	$acc01,$acc01,$acc09
+	xor	$acc02,$acc02,$acc10
+	xor	$acc03,$acc03,$acc11
+
+	and	$acc04,$acc00,$mask80	# r1=r2&0x80808080
+	and	$acc05,$acc01,$mask80
+	and	$acc06,$acc02,$mask80
+	and	$acc07,$acc03,$mask80
+	srwi	$acc08,$acc04,7		# r1>>7
+	andc	$acc12,$acc00,$mask80	# r2&0x7f7f7f7f
+	srwi	$acc09,$acc05,7
+	andc	$acc13,$acc01,$mask80
+	srwi	$acc10,$acc06,7
+	andc	$acc14,$acc02,$mask80
+	srwi	$acc11,$acc07,7
+	andc	$acc15,$acc03,$mask80
+	sub	$acc04,$acc04,$acc08	# r1-(r1>>7)
+	sub	$acc05,$acc05,$acc09
+	sub	$acc06,$acc06,$acc10
+	sub	$acc07,$acc07,$acc11
+	add	$acc12,$acc12,$acc12	# (r2&0x7f7f7f7f)<<1
+	add	$acc13,$acc13,$acc13
+	add	$acc14,$acc14,$acc14
+	add	$acc15,$acc15,$acc15
+	and	$acc04,$acc04,$mask1b	# (r1-(r1>>7))&0x1b1b1b1b
+	and	$acc05,$acc05,$mask1b
+	and	$acc06,$acc06,$mask1b
+	and	$acc07,$acc07,$mask1b
+	xor	$acc04,$acc04,$acc12	# r4
+	xor	$acc05,$acc05,$acc13
+	xor	$acc06,$acc06,$acc14
+	xor	$acc07,$acc07,$acc15
+
+	and	$acc08,$acc04,$mask80	# r1=r4&0x80808080
+	and	$acc09,$acc05,$mask80
+	srwi	$acc12,$acc08,7		# r1>>7
+	and	$acc10,$acc06,$mask80
+	srwi	$acc13,$acc09,7
+	and	$acc11,$acc07,$mask80
+	srwi	$acc14,$acc10,7
+	sub	$acc08,$acc08,$acc12	# r1-(r1>>7)
+	srwi	$acc15,$acc11,7
+	sub	$acc09,$acc09,$acc13
+	sub	$acc10,$acc10,$acc14
+	sub	$acc11,$acc11,$acc15
+	andc	$acc12,$acc04,$mask80	# r4&0x7f7f7f7f
+	andc	$acc13,$acc05,$mask80
+	andc	$acc14,$acc06,$mask80
+	andc	$acc15,$acc07,$mask80
+	add	$acc12,$acc12,$acc12	# (r4&0x7f7f7f7f)<<1
+	add	$acc13,$acc13,$acc13
+	add	$acc14,$acc14,$acc14
+	add	$acc15,$acc15,$acc15
+	and	$acc08,$acc08,$mask1b	# (r1-(r1>>7))&0x1b1b1b1b
+	and	$acc09,$acc09,$mask1b
+	and	$acc10,$acc10,$mask1b
+	and	$acc11,$acc11,$mask1b
+	xor	$acc08,$acc08,$acc12	# r8
+	xor	$acc09,$acc09,$acc13
+	xor	$acc10,$acc10,$acc14
+	xor	$acc11,$acc11,$acc15
+
+	xor	$acc00,$acc00,$s0	# r2^r0
+	xor	$acc01,$acc01,$s1
+	xor	$acc02,$acc02,$s2
+	xor	$acc03,$acc03,$s3
+	xor	$acc04,$acc04,$s0	# r4^r0
+	xor	$acc05,$acc05,$s1
+	xor	$acc06,$acc06,$s2
+	xor	$acc07,$acc07,$s3
+___
+$code.=<<___;
+	rotrwi	$s0,$s0,8		# = ROTATE(r0,8)
+	rotrwi	$s1,$s1,8
+	xor	$s0,$s0,$acc00		# ^= r2^r0
+	rotrwi	$s2,$s2,8
+	xor	$s1,$s1,$acc01
+	rotrwi	$s3,$s3,8
+	xor	$s2,$s2,$acc02
+	xor	$s3,$s3,$acc03
+	xor	$acc00,$acc00,$acc08
+	xor	$acc01,$acc01,$acc09
+	xor	$acc02,$acc02,$acc10
+	xor	$acc03,$acc03,$acc11
+	xor	$s0,$s0,$acc04		# ^= r4^r0
+	rotrwi	$acc00,$acc00,24
+	xor	$s1,$s1,$acc05
+	rotrwi	$acc01,$acc01,24
+	xor	$s2,$s2,$acc06
+	rotrwi	$acc02,$acc02,24
+	xor	$s3,$s3,$acc07
+	rotrwi	$acc03,$acc03,24
+	xor	$acc04,$acc04,$acc08
+	xor	$acc05,$acc05,$acc09
+	xor	$acc06,$acc06,$acc10
+	xor	$acc07,$acc07,$acc11
+	xor	$s0,$s0,$acc08		# ^= r8 [^((r4^r0)^(r2^r0)=r4^r2)]
+	rotrwi	$acc04,$acc04,16
+	xor	$s1,$s1,$acc09
+	rotrwi	$acc05,$acc05,16
+	xor	$s2,$s2,$acc10
+	rotrwi	$acc06,$acc06,16
+	xor	$s3,$s3,$acc11
+	rotrwi	$acc07,$acc07,16
+	xor	$s0,$s0,$acc00		# ^= ROTATE(r8^r2^r0,24)
+	rotrwi	$acc08,$acc08,8
+	xor	$s1,$s1,$acc01
+	rotrwi	$acc09,$acc09,8
+	xor	$s2,$s2,$acc02
+	rotrwi	$acc10,$acc10,8
+	xor	$s3,$s3,$acc03
+	rotrwi	$acc11,$acc11,8
+	xor	$s0,$s0,$acc04		# ^= ROTATE(r8^r4^r0,16)
+	xor	$s1,$s1,$acc05
+	xor	$s2,$s2,$acc06
+	xor	$s3,$s3,$acc07
+	xor	$s0,$s0,$acc08		# ^= ROTATE(r8,8)
+	xor	$s1,$s1,$acc09
+	xor	$s2,$s2,$acc10
+	xor	$s3,$s3,$acc11
+
+	b	Ldec_compact_loop
+.align	4
+Ldec_compact_done:
+	xor	$s0,$s0,$t0
+	xor	$s1,$s1,$t1
+	xor	$s2,$s2,$t2
+	xor	$s3,$s3,$t3
+	blr
+	.long	0
+	.byte	0,12,0x14,0,0,0,0,0
+.size	.AES_decrypt,.-.AES_decrypt
+
+.asciz	"AES for PPC, CRYPTOGAMS by <appro\@openssl.org>"
+.align	7
+___
+
+$code =~ s/\`([^\`]*)\`/eval $1/gem;
+print $code;
+close STDOUT or die "error closing STDOUT: $!";
diff --git a/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aes-s390x.pl b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aes-s390x.pl
new file mode 100644
index 0000000..4cb8f43
--- /dev/null
+++ b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aes-s390x.pl
@@ -0,0 +1,2282 @@
+#! /usr/bin/env perl
+# Copyright 2007-2020 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License").  You may not use
+# this file except in compliance with the License.  You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# AES for s390x.
+
+# April 2007.
+#
+# Software performance improvement over gcc-generated code is ~70% and
+# in absolute terms is ~73 cycles per byte processed with 128-bit key.
+# You're likely to exclaim "why so slow?" Keep in mind that z-CPUs are
+# *strictly* in-order execution and issued instruction [in this case
+# load value from memory is critical] has to complete before execution
+# flow proceeds. S-boxes are compressed to 2KB[+256B].
+#
+# As for hardware acceleration support. It's basically a "teaser," as
+# it can and should be improved in several ways. Most notably support
+# for CBC is not utilized, nor multiple blocks are ever processed.
+# Then software key schedule can be postponed till hardware support
+# detection... Performance improvement over assembler is reportedly
+# ~2.5x, but can reach >8x [naturally on larger chunks] if proper
+# support is implemented.
+
+# May 2007.
+#
+# Implement AES_set_[en|de]crypt_key. Key schedule setup is avoided
+# for 128-bit keys, if hardware support is detected.
+
+# January 2009.
+#
+# Add support for hardware AES192/256 and reschedule instructions to
+# minimize/avoid Address Generation Interlock hazard and to favour
+# dual-issue z10 pipeline. This gave ~25% improvement on z10 and
+# almost 50% on z9. The gain is smaller on z10, because being dual-
+# issue z10 makes it impossible to eliminate the interlock condition:
+# critical path is not long enough. Yet it spends ~24 cycles per byte
+# processed with 128-bit key.
+#
+# Unlike previous version hardware support detection takes place only
+# at the moment of key schedule setup, which is denoted in key->rounds.
+# This is done, because deferred key setup can't be made MT-safe, not
+# for keys longer than 128 bits.
+#
+# Add AES_cbc_encrypt, which gives incredible performance improvement,
+# it was measured to be ~6.6x. It's less than previously mentioned 8x,
+# because software implementation was optimized.
+
+# May 2010.
+#
+# Add AES_ctr32_encrypt. If hardware-assisted, it provides up to 4.3x
+# performance improvement over "generic" counter mode routine relying
+# on single-block, also hardware-assisted, AES_encrypt. "Up to" refers
+# to the fact that exact throughput value depends on current stack
+# frame alignment within 4KB page. In worst case you get ~75% of the
+# maximum, but *on average* it would be as much as ~98%. Meaning that
+# worst case is unlike, it's like hitting ravine on plateau.
+
+# November 2010.
+#
+# Adapt for -m31 build. If kernel supports what's called "highgprs"
+# feature on Linux [see /proc/cpuinfo], it's possible to use 64-bit
+# instructions and achieve "64-bit" performance even in 31-bit legacy
+# application context. The feature is not specific to any particular
+# processor, as long as it's "z-CPU". Latter implies that the code
+# remains z/Architecture specific. On z990 it was measured to perform
+# 2x better than code generated by gcc 4.3.
+
+# December 2010.
+#
+# Add support for z196 "cipher message with counter" instruction.
+# Note however that it's disengaged, because it was measured to
+# perform ~12% worse than vanilla km-based code...
+
+# February 2011.
+#
+# Add AES_xts_[en|de]crypt. This includes support for z196 km-xts-aes
+# instructions, which deliver ~70% improvement at 8KB block size over
+# vanilla km-based code, 37% - at most like 512-bytes block size.
+
+$flavour = shift;
+
+if ($flavour =~ /3[12]/) {
+	$SIZE_T=4;
+	$g="";
+} else {
+	$SIZE_T=8;
+	$g="g";
+}
+
+while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+$softonly=0;	# allow hardware support
+
+$t0="%r0";	$mask="%r0";
+$t1="%r1";
+$t2="%r2";	$inp="%r2";
+$t3="%r3";	$out="%r3";	$bits="%r3";
+$key="%r4";
+$i1="%r5";
+$i2="%r6";
+$i3="%r7";
+$s0="%r8";
+$s1="%r9";
+$s2="%r10";
+$s3="%r11";
+$tbl="%r12";
+$rounds="%r13";
+$ra="%r14";
+$sp="%r15";
+
+$stdframe=16*$SIZE_T+4*8;
+
+sub _data_word()
+{ my $i;
+    while(defined($i=shift)) { $code.=sprintf".long\t0x%08x,0x%08x\n",$i,$i; }
+}
+
+$code=<<___;
+#include "s390x_arch.h"
+
+.text
+
+.type	AES_Te,\@object
+.align	256
+AES_Te:
+___
+&_data_word(
+	0xc66363a5, 0xf87c7c84, 0xee777799, 0xf67b7b8d,
+	0xfff2f20d, 0xd66b6bbd, 0xde6f6fb1, 0x91c5c554,
+	0x60303050, 0x02010103, 0xce6767a9, 0x562b2b7d,
+	0xe7fefe19, 0xb5d7d762, 0x4dababe6, 0xec76769a,
+	0x8fcaca45, 0x1f82829d, 0x89c9c940, 0xfa7d7d87,
+	0xeffafa15, 0xb25959eb, 0x8e4747c9, 0xfbf0f00b,
+	0x41adadec, 0xb3d4d467, 0x5fa2a2fd, 0x45afafea,
+	0x239c9cbf, 0x53a4a4f7, 0xe4727296, 0x9bc0c05b,
+	0x75b7b7c2, 0xe1fdfd1c, 0x3d9393ae, 0x4c26266a,
+	0x6c36365a, 0x7e3f3f41, 0xf5f7f702, 0x83cccc4f,
+	0x6834345c, 0x51a5a5f4, 0xd1e5e534, 0xf9f1f108,
+	0xe2717193, 0xabd8d873, 0x62313153, 0x2a15153f,
+	0x0804040c, 0x95c7c752, 0x46232365, 0x9dc3c35e,
+	0x30181828, 0x379696a1, 0x0a05050f, 0x2f9a9ab5,
+	0x0e070709, 0x24121236, 0x1b80809b, 0xdfe2e23d,
+	0xcdebeb26, 0x4e272769, 0x7fb2b2cd, 0xea75759f,
+	0x1209091b, 0x1d83839e, 0x582c2c74, 0x341a1a2e,
+	0x361b1b2d, 0xdc6e6eb2, 0xb45a5aee, 0x5ba0a0fb,
+	0xa45252f6, 0x763b3b4d, 0xb7d6d661, 0x7db3b3ce,
+	0x5229297b, 0xdde3e33e, 0x5e2f2f71, 0x13848497,
+	0xa65353f5, 0xb9d1d168, 0x00000000, 0xc1eded2c,
+	0x40202060, 0xe3fcfc1f, 0x79b1b1c8, 0xb65b5bed,
+	0xd46a6abe, 0x8dcbcb46, 0x67bebed9, 0x7239394b,
+	0x944a4ade, 0x984c4cd4, 0xb05858e8, 0x85cfcf4a,
+	0xbbd0d06b, 0xc5efef2a, 0x4faaaae5, 0xedfbfb16,
+	0x864343c5, 0x9a4d4dd7, 0x66333355, 0x11858594,
+	0x8a4545cf, 0xe9f9f910, 0x04020206, 0xfe7f7f81,
+	0xa05050f0, 0x783c3c44, 0x259f9fba, 0x4ba8a8e3,
+	0xa25151f3, 0x5da3a3fe, 0x804040c0, 0x058f8f8a,
+	0x3f9292ad, 0x219d9dbc, 0x70383848, 0xf1f5f504,
+	0x63bcbcdf, 0x77b6b6c1, 0xafdada75, 0x42212163,
+	0x20101030, 0xe5ffff1a, 0xfdf3f30e, 0xbfd2d26d,
+	0x81cdcd4c, 0x180c0c14, 0x26131335, 0xc3ecec2f,
+	0xbe5f5fe1, 0x359797a2, 0x884444cc, 0x2e171739,
+	0x93c4c457, 0x55a7a7f2, 0xfc7e7e82, 0x7a3d3d47,
+	0xc86464ac, 0xba5d5de7, 0x3219192b, 0xe6737395,
+	0xc06060a0, 0x19818198, 0x9e4f4fd1, 0xa3dcdc7f,
+	0x44222266, 0x542a2a7e, 0x3b9090ab, 0x0b888883,
+	0x8c4646ca, 0xc7eeee29, 0x6bb8b8d3, 0x2814143c,
+	0xa7dede79, 0xbc5e5ee2, 0x160b0b1d, 0xaddbdb76,
+	0xdbe0e03b, 0x64323256, 0x743a3a4e, 0x140a0a1e,
+	0x924949db, 0x0c06060a, 0x4824246c, 0xb85c5ce4,
+	0x9fc2c25d, 0xbdd3d36e, 0x43acacef, 0xc46262a6,
+	0x399191a8, 0x319595a4, 0xd3e4e437, 0xf279798b,
+	0xd5e7e732, 0x8bc8c843, 0x6e373759, 0xda6d6db7,
+	0x018d8d8c, 0xb1d5d564, 0x9c4e4ed2, 0x49a9a9e0,
+	0xd86c6cb4, 0xac5656fa, 0xf3f4f407, 0xcfeaea25,
+	0xca6565af, 0xf47a7a8e, 0x47aeaee9, 0x10080818,
+	0x6fbabad5, 0xf0787888, 0x4a25256f, 0x5c2e2e72,
+	0x381c1c24, 0x57a6a6f1, 0x73b4b4c7, 0x97c6c651,
+	0xcbe8e823, 0xa1dddd7c, 0xe874749c, 0x3e1f1f21,
+	0x964b4bdd, 0x61bdbddc, 0x0d8b8b86, 0x0f8a8a85,
+	0xe0707090, 0x7c3e3e42, 0x71b5b5c4, 0xcc6666aa,
+	0x904848d8, 0x06030305, 0xf7f6f601, 0x1c0e0e12,
+	0xc26161a3, 0x6a35355f, 0xae5757f9, 0x69b9b9d0,
+	0x17868691, 0x99c1c158, 0x3a1d1d27, 0x279e9eb9,
+	0xd9e1e138, 0xebf8f813, 0x2b9898b3, 0x22111133,
+	0xd26969bb, 0xa9d9d970, 0x078e8e89, 0x339494a7,
+	0x2d9b9bb6, 0x3c1e1e22, 0x15878792, 0xc9e9e920,
+	0x87cece49, 0xaa5555ff, 0x50282878, 0xa5dfdf7a,
+	0x038c8c8f, 0x59a1a1f8, 0x09898980, 0x1a0d0d17,
+	0x65bfbfda, 0xd7e6e631, 0x844242c6, 0xd06868b8,
+	0x824141c3, 0x299999b0, 0x5a2d2d77, 0x1e0f0f11,
+	0x7bb0b0cb, 0xa85454fc, 0x6dbbbbd6, 0x2c16163a);
+$code.=<<___;
+# Te4[256]
+.byte	0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
+.byte	0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
+.byte	0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
+.byte	0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
+.byte	0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
+.byte	0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
+.byte	0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
+.byte	0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
+.byte	0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
+.byte	0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
+.byte	0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
+.byte	0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
+.byte	0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
+.byte	0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
+.byte	0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
+.byte	0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
+.byte	0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
+.byte	0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
+.byte	0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
+.byte	0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
+.byte	0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
+.byte	0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
+.byte	0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
+.byte	0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
+.byte	0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
+.byte	0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
+.byte	0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
+.byte	0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
+.byte	0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
+.byte	0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
+.byte	0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
+.byte	0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
+# rcon[]
+.long	0x01000000, 0x02000000, 0x04000000, 0x08000000
+.long	0x10000000, 0x20000000, 0x40000000, 0x80000000
+.long	0x1B000000, 0x36000000, 0, 0, 0, 0, 0, 0
+.align	256
+.size	AES_Te,.-AES_Te
+
+# void AES_encrypt(const unsigned char *inp, unsigned char *out,
+# 		 const AES_KEY *key) {
+.globl	AES_encrypt
+.type	AES_encrypt,\@function
+AES_encrypt:
+___
+$code.=<<___ if (!$softonly);
+	l	%r0,240($key)
+	lhi	%r1,16
+	clr	%r0,%r1
+	jl	.Lesoft
+
+	la	%r1,0($key)
+	#la	%r2,0($inp)
+	la	%r4,0($out)
+	lghi	%r3,16		# single block length
+	.long	0xb92e0042	# km %r4,%r2
+	brc	1,.-4		# can this happen?
+	br	%r14
+.align	64
+.Lesoft:
+___
+$code.=<<___;
+	stm${g}	%r3,$ra,3*$SIZE_T($sp)
+
+	llgf	$s0,0($inp)
+	llgf	$s1,4($inp)
+	llgf	$s2,8($inp)
+	llgf	$s3,12($inp)
+
+	larl	$tbl,AES_Te
+	bras	$ra,_s390x_AES_encrypt
+
+	l${g}	$out,3*$SIZE_T($sp)
+	st	$s0,0($out)
+	st	$s1,4($out)
+	st	$s2,8($out)
+	st	$s3,12($out)
+
+	lm${g}	%r6,$ra,6*$SIZE_T($sp)
+	br	$ra
+.size	AES_encrypt,.-AES_encrypt
+
+.type   _s390x_AES_encrypt,\@function
+.align	16
+_s390x_AES_encrypt:
+	st${g}	$ra,15*$SIZE_T($sp)
+	x	$s0,0($key)
+	x	$s1,4($key)
+	x	$s2,8($key)
+	x	$s3,12($key)
+	l	$rounds,240($key)
+	llill	$mask,`0xff<<3`
+	aghi	$rounds,-1
+	j	.Lenc_loop
+.align	16
+.Lenc_loop:
+	sllg	$t1,$s0,`0+3`
+	srlg	$t2,$s0,`8-3`
+	srlg	$t3,$s0,`16-3`
+	srl	$s0,`24-3`
+	nr	$s0,$mask
+	ngr	$t1,$mask
+	nr	$t2,$mask
+	nr	$t3,$mask
+
+	srlg	$i1,$s1,`16-3`	# i0
+	sllg	$i2,$s1,`0+3`
+	srlg	$i3,$s1,`8-3`
+	srl	$s1,`24-3`
+	nr	$i1,$mask
+	nr	$s1,$mask
+	ngr	$i2,$mask
+	nr	$i3,$mask
+
+	l	$s0,0($s0,$tbl)	# Te0[s0>>24]
+	l	$t1,1($t1,$tbl)	# Te3[s0>>0]
+	l	$t2,2($t2,$tbl) # Te2[s0>>8]
+	l	$t3,3($t3,$tbl)	# Te1[s0>>16]
+
+	x	$s0,3($i1,$tbl)	# Te1[s1>>16]
+	l	$s1,0($s1,$tbl)	# Te0[s1>>24]
+	x	$t2,1($i2,$tbl)	# Te3[s1>>0]
+	x	$t3,2($i3,$tbl)	# Te2[s1>>8]
+
+	srlg	$i1,$s2,`8-3`	# i0
+	srlg	$i2,$s2,`16-3`	# i1
+	nr	$i1,$mask
+	nr	$i2,$mask
+	sllg	$i3,$s2,`0+3`
+	srl	$s2,`24-3`
+	nr	$s2,$mask
+	ngr	$i3,$mask
+
+	xr	$s1,$t1
+	srlg	$ra,$s3,`8-3`	# i1
+	sllg	$t1,$s3,`0+3`	# i0
+	nr	$ra,$mask
+	la	$key,16($key)
+	ngr	$t1,$mask
+
+	x	$s0,2($i1,$tbl)	# Te2[s2>>8]
+	x	$s1,3($i2,$tbl)	# Te1[s2>>16]
+	l	$s2,0($s2,$tbl)	# Te0[s2>>24]
+	x	$t3,1($i3,$tbl)	# Te3[s2>>0]
+
+	srlg	$i3,$s3,`16-3`	# i2
+	xr	$s2,$t2
+	srl	$s3,`24-3`
+	nr	$i3,$mask
+	nr	$s3,$mask
+
+	x	$s0,0($key)
+	x	$s1,4($key)
+	x	$s2,8($key)
+	x	$t3,12($key)
+
+	x	$s0,1($t1,$tbl)	# Te3[s3>>0]
+	x	$s1,2($ra,$tbl)	# Te2[s3>>8]
+	x	$s2,3($i3,$tbl)	# Te1[s3>>16]
+	l	$s3,0($s3,$tbl)	# Te0[s3>>24]
+	xr	$s3,$t3
+
+	brct	$rounds,.Lenc_loop
+	.align	16
+
+	sllg	$t1,$s0,`0+3`
+	srlg	$t2,$s0,`8-3`
+	ngr	$t1,$mask
+	srlg	$t3,$s0,`16-3`
+	srl	$s0,`24-3`
+	nr	$s0,$mask
+	nr	$t2,$mask
+	nr	$t3,$mask
+
+	srlg	$i1,$s1,`16-3`	# i0
+	sllg	$i2,$s1,`0+3`
+	ngr	$i2,$mask
+	srlg	$i3,$s1,`8-3`
+	srl	$s1,`24-3`
+	nr	$i1,$mask
+	nr	$s1,$mask
+	nr	$i3,$mask
+
+	llgc	$s0,2($s0,$tbl)	# Te4[s0>>24]
+	llgc	$t1,2($t1,$tbl)	# Te4[s0>>0]
+	sll	$s0,24
+	llgc	$t2,2($t2,$tbl)	# Te4[s0>>8]
+	llgc	$t3,2($t3,$tbl)	# Te4[s0>>16]
+	sll	$t2,8
+	sll	$t3,16
+
+	llgc	$i1,2($i1,$tbl)	# Te4[s1>>16]
+	llgc	$s1,2($s1,$tbl)	# Te4[s1>>24]
+	llgc	$i2,2($i2,$tbl)	# Te4[s1>>0]
+	llgc	$i3,2($i3,$tbl)	# Te4[s1>>8]
+	sll	$i1,16
+	sll	$s1,24
+	sll	$i3,8
+	or	$s0,$i1
+	or	$s1,$t1
+	or	$t2,$i2
+	or	$t3,$i3
+
+	srlg	$i1,$s2,`8-3`	# i0
+	srlg	$i2,$s2,`16-3`	# i1
+	nr	$i1,$mask
+	nr	$i2,$mask
+	sllg	$i3,$s2,`0+3`
+	srl	$s2,`24-3`
+	ngr	$i3,$mask
+	nr	$s2,$mask
+
+	sllg	$t1,$s3,`0+3`	# i0
+	srlg	$ra,$s3,`8-3`	# i1
+	ngr	$t1,$mask
+
+	llgc	$i1,2($i1,$tbl)	# Te4[s2>>8]
+	llgc	$i2,2($i2,$tbl)	# Te4[s2>>16]
+	sll	$i1,8
+	llgc	$s2,2($s2,$tbl)	# Te4[s2>>24]
+	llgc	$i3,2($i3,$tbl)	# Te4[s2>>0]
+	sll	$i2,16
+	nr	$ra,$mask
+	sll	$s2,24
+	or	$s0,$i1
+	or	$s1,$i2
+	or	$s2,$t2
+	or	$t3,$i3
+
+	srlg	$i3,$s3,`16-3`	# i2
+	srl	$s3,`24-3`
+	nr	$i3,$mask
+	nr	$s3,$mask
+
+	l	$t0,16($key)
+	l	$t2,20($key)
+
+	llgc	$i1,2($t1,$tbl)	# Te4[s3>>0]
+	llgc	$i2,2($ra,$tbl)	# Te4[s3>>8]
+	llgc	$i3,2($i3,$tbl)	# Te4[s3>>16]
+	llgc	$s3,2($s3,$tbl)	# Te4[s3>>24]
+	sll	$i2,8
+	sll	$i3,16
+	sll	$s3,24
+	or	$s0,$i1
+	or	$s1,$i2
+	or	$s2,$i3
+	or	$s3,$t3
+
+	l${g}	$ra,15*$SIZE_T($sp)
+	xr	$s0,$t0
+	xr	$s1,$t2
+	x	$s2,24($key)
+	x	$s3,28($key)
+
+	br	$ra
+.size	_s390x_AES_encrypt,.-_s390x_AES_encrypt
+___
+
+$code.=<<___;
+.type	AES_Td,\@object
+.align	256
+AES_Td:
+___
+&_data_word(
+	0x51f4a750, 0x7e416553, 0x1a17a4c3, 0x3a275e96,
+	0x3bab6bcb, 0x1f9d45f1, 0xacfa58ab, 0x4be30393,
+	0x2030fa55, 0xad766df6, 0x88cc7691, 0xf5024c25,
+	0x4fe5d7fc, 0xc52acbd7, 0x26354480, 0xb562a38f,
+	0xdeb15a49, 0x25ba1b67, 0x45ea0e98, 0x5dfec0e1,
+	0xc32f7502, 0x814cf012, 0x8d4697a3, 0x6bd3f9c6,
+	0x038f5fe7, 0x15929c95, 0xbf6d7aeb, 0x955259da,
+	0xd4be832d, 0x587421d3, 0x49e06929, 0x8ec9c844,
+	0x75c2896a, 0xf48e7978, 0x99583e6b, 0x27b971dd,
+	0xbee14fb6, 0xf088ad17, 0xc920ac66, 0x7dce3ab4,
+	0x63df4a18, 0xe51a3182, 0x97513360, 0x62537f45,
+	0xb16477e0, 0xbb6bae84, 0xfe81a01c, 0xf9082b94,
+	0x70486858, 0x8f45fd19, 0x94de6c87, 0x527bf8b7,
+	0xab73d323, 0x724b02e2, 0xe31f8f57, 0x6655ab2a,
+	0xb2eb2807, 0x2fb5c203, 0x86c57b9a, 0xd33708a5,
+	0x302887f2, 0x23bfa5b2, 0x02036aba, 0xed16825c,
+	0x8acf1c2b, 0xa779b492, 0xf307f2f0, 0x4e69e2a1,
+	0x65daf4cd, 0x0605bed5, 0xd134621f, 0xc4a6fe8a,
+	0x342e539d, 0xa2f355a0, 0x058ae132, 0xa4f6eb75,
+	0x0b83ec39, 0x4060efaa, 0x5e719f06, 0xbd6e1051,
+	0x3e218af9, 0x96dd063d, 0xdd3e05ae, 0x4de6bd46,
+	0x91548db5, 0x71c45d05, 0x0406d46f, 0x605015ff,
+	0x1998fb24, 0xd6bde997, 0x894043cc, 0x67d99e77,
+	0xb0e842bd, 0x07898b88, 0xe7195b38, 0x79c8eedb,
+	0xa17c0a47, 0x7c420fe9, 0xf8841ec9, 0x00000000,
+	0x09808683, 0x322bed48, 0x1e1170ac, 0x6c5a724e,
+	0xfd0efffb, 0x0f853856, 0x3daed51e, 0x362d3927,
+	0x0a0fd964, 0x685ca621, 0x9b5b54d1, 0x24362e3a,
+	0x0c0a67b1, 0x9357e70f, 0xb4ee96d2, 0x1b9b919e,
+	0x80c0c54f, 0x61dc20a2, 0x5a774b69, 0x1c121a16,
+	0xe293ba0a, 0xc0a02ae5, 0x3c22e043, 0x121b171d,
+	0x0e090d0b, 0xf28bc7ad, 0x2db6a8b9, 0x141ea9c8,
+	0x57f11985, 0xaf75074c, 0xee99ddbb, 0xa37f60fd,
+	0xf701269f, 0x5c72f5bc, 0x44663bc5, 0x5bfb7e34,
+	0x8b432976, 0xcb23c6dc, 0xb6edfc68, 0xb8e4f163,
+	0xd731dcca, 0x42638510, 0x13972240, 0x84c61120,
+	0x854a247d, 0xd2bb3df8, 0xaef93211, 0xc729a16d,
+	0x1d9e2f4b, 0xdcb230f3, 0x0d8652ec, 0x77c1e3d0,
+	0x2bb3166c, 0xa970b999, 0x119448fa, 0x47e96422,
+	0xa8fc8cc4, 0xa0f03f1a, 0x567d2cd8, 0x223390ef,
+	0x87494ec7, 0xd938d1c1, 0x8ccaa2fe, 0x98d40b36,
+	0xa6f581cf, 0xa57ade28, 0xdab78e26, 0x3fadbfa4,
+	0x2c3a9de4, 0x5078920d, 0x6a5fcc9b, 0x547e4662,
+	0xf68d13c2, 0x90d8b8e8, 0x2e39f75e, 0x82c3aff5,
+	0x9f5d80be, 0x69d0937c, 0x6fd52da9, 0xcf2512b3,
+	0xc8ac993b, 0x10187da7, 0xe89c636e, 0xdb3bbb7b,
+	0xcd267809, 0x6e5918f4, 0xec9ab701, 0x834f9aa8,
+	0xe6956e65, 0xaaffe67e, 0x21bccf08, 0xef15e8e6,
+	0xbae79bd9, 0x4a6f36ce, 0xea9f09d4, 0x29b07cd6,
+	0x31a4b2af, 0x2a3f2331, 0xc6a59430, 0x35a266c0,
+	0x744ebc37, 0xfc82caa6, 0xe090d0b0, 0x33a7d815,
+	0xf104984a, 0x41ecdaf7, 0x7fcd500e, 0x1791f62f,
+	0x764dd68d, 0x43efb04d, 0xccaa4d54, 0xe49604df,
+	0x9ed1b5e3, 0x4c6a881b, 0xc12c1fb8, 0x4665517f,
+	0x9d5eea04, 0x018c355d, 0xfa877473, 0xfb0b412e,
+	0xb3671d5a, 0x92dbd252, 0xe9105633, 0x6dd64713,
+	0x9ad7618c, 0x37a10c7a, 0x59f8148e, 0xeb133c89,
+	0xcea927ee, 0xb761c935, 0xe11ce5ed, 0x7a47b13c,
+	0x9cd2df59, 0x55f2733f, 0x1814ce79, 0x73c737bf,
+	0x53f7cdea, 0x5ffdaa5b, 0xdf3d6f14, 0x7844db86,
+	0xcaaff381, 0xb968c43e, 0x3824342c, 0xc2a3405f,
+	0x161dc372, 0xbce2250c, 0x283c498b, 0xff0d9541,
+	0x39a80171, 0x080cb3de, 0xd8b4e49c, 0x6456c190,
+	0x7bcb8461, 0xd532b670, 0x486c5c74, 0xd0b85742);
+$code.=<<___;
+# Td4[256]
+.byte	0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
+.byte	0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
+.byte	0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
+.byte	0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
+.byte	0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
+.byte	0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
+.byte	0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
+.byte	0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
+.byte	0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
+.byte	0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
+.byte	0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
+.byte	0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
+.byte	0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
+.byte	0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
+.byte	0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
+.byte	0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
+.byte	0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
+.byte	0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
+.byte	0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
+.byte	0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
+.byte	0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
+.byte	0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
+.byte	0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
+.byte	0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
+.byte	0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
+.byte	0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
+.byte	0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
+.byte	0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
+.byte	0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
+.byte	0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
+.byte	0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
+.byte	0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
+.size	AES_Td,.-AES_Td
+
+# void AES_decrypt(const unsigned char *inp, unsigned char *out,
+# 		 const AES_KEY *key) {
+.globl	AES_decrypt
+.type	AES_decrypt,\@function
+AES_decrypt:
+___
+$code.=<<___ if (!$softonly);
+	l	%r0,240($key)
+	lhi	%r1,16
+	clr	%r0,%r1
+	jl	.Ldsoft
+
+	la	%r1,0($key)
+	#la	%r2,0($inp)
+	la	%r4,0($out)
+	lghi	%r3,16		# single block length
+	.long	0xb92e0042	# km %r4,%r2
+	brc	1,.-4		# can this happen?
+	br	%r14
+.align	64
+.Ldsoft:
+___
+$code.=<<___;
+	stm${g}	%r3,$ra,3*$SIZE_T($sp)
+
+	llgf	$s0,0($inp)
+	llgf	$s1,4($inp)
+	llgf	$s2,8($inp)
+	llgf	$s3,12($inp)
+
+	larl	$tbl,AES_Td
+	bras	$ra,_s390x_AES_decrypt
+
+	l${g}	$out,3*$SIZE_T($sp)
+	st	$s0,0($out)
+	st	$s1,4($out)
+	st	$s2,8($out)
+	st	$s3,12($out)
+
+	lm${g}	%r6,$ra,6*$SIZE_T($sp)
+	br	$ra
+.size	AES_decrypt,.-AES_decrypt
+
+.type   _s390x_AES_decrypt,\@function
+.align	16
+_s390x_AES_decrypt:
+	st${g}	$ra,15*$SIZE_T($sp)
+	x	$s0,0($key)
+	x	$s1,4($key)
+	x	$s2,8($key)
+	x	$s3,12($key)
+	l	$rounds,240($key)
+	llill	$mask,`0xff<<3`
+	aghi	$rounds,-1
+	j	.Ldec_loop
+.align	16
+.Ldec_loop:
+	srlg	$t1,$s0,`16-3`
+	srlg	$t2,$s0,`8-3`
+	sllg	$t3,$s0,`0+3`
+	srl	$s0,`24-3`
+	nr	$s0,$mask
+	nr	$t1,$mask
+	nr	$t2,$mask
+	ngr	$t3,$mask
+
+	sllg	$i1,$s1,`0+3`	# i0
+	srlg	$i2,$s1,`16-3`
+	srlg	$i3,$s1,`8-3`
+	srl	$s1,`24-3`
+	ngr	$i1,$mask
+	nr	$s1,$mask
+	nr	$i2,$mask
+	nr	$i3,$mask
+
+	l	$s0,0($s0,$tbl)	# Td0[s0>>24]
+	l	$t1,3($t1,$tbl)	# Td1[s0>>16]
+	l	$t2,2($t2,$tbl)	# Td2[s0>>8]
+	l	$t3,1($t3,$tbl)	# Td3[s0>>0]
+
+	x	$s0,1($i1,$tbl)	# Td3[s1>>0]
+	l	$s1,0($s1,$tbl)	# Td0[s1>>24]
+	x	$t2,3($i2,$tbl)	# Td1[s1>>16]
+	x	$t3,2($i3,$tbl)	# Td2[s1>>8]
+
+	srlg	$i1,$s2,`8-3`	# i0
+	sllg	$i2,$s2,`0+3`	# i1
+	srlg	$i3,$s2,`16-3`
+	srl	$s2,`24-3`
+	nr	$i1,$mask
+	ngr	$i2,$mask
+	nr	$s2,$mask
+	nr	$i3,$mask
+
+	xr	$s1,$t1
+	srlg	$ra,$s3,`8-3`	# i1
+	srlg	$t1,$s3,`16-3`	# i0
+	nr	$ra,$mask
+	la	$key,16($key)
+	nr	$t1,$mask
+
+	x	$s0,2($i1,$tbl)	# Td2[s2>>8]
+	x	$s1,1($i2,$tbl)	# Td3[s2>>0]
+	l	$s2,0($s2,$tbl)	# Td0[s2>>24]
+	x	$t3,3($i3,$tbl)	# Td1[s2>>16]
+
+	sllg	$i3,$s3,`0+3`	# i2
+	srl	$s3,`24-3`
+	ngr	$i3,$mask
+	nr	$s3,$mask
+
+	xr	$s2,$t2
+	x	$s0,0($key)
+	x	$s1,4($key)
+	x	$s2,8($key)
+	x	$t3,12($key)
+
+	x	$s0,3($t1,$tbl)	# Td1[s3>>16]
+	x	$s1,2($ra,$tbl)	# Td2[s3>>8]
+	x	$s2,1($i3,$tbl)	# Td3[s3>>0]
+	l	$s3,0($s3,$tbl)	# Td0[s3>>24]
+	xr	$s3,$t3
+
+	brct	$rounds,.Ldec_loop
+	.align	16
+
+	l	$t1,`2048+0`($tbl)	# prefetch Td4
+	l	$t2,`2048+64`($tbl)
+	l	$t3,`2048+128`($tbl)
+	l	$i1,`2048+192`($tbl)
+	llill	$mask,0xff
+
+	srlg	$i3,$s0,24	# i0
+	srlg	$t1,$s0,16
+	srlg	$t2,$s0,8
+	nr	$s0,$mask	# i3
+	nr	$t1,$mask
+
+	srlg	$i1,$s1,24
+	nr	$t2,$mask
+	srlg	$i2,$s1,16
+	srlg	$ra,$s1,8
+	nr	$s1,$mask	# i0
+	nr	$i2,$mask
+	nr	$ra,$mask
+
+	llgc	$i3,2048($i3,$tbl)	# Td4[s0>>24]
+	llgc	$t1,2048($t1,$tbl)	# Td4[s0>>16]
+	llgc	$t2,2048($t2,$tbl)	# Td4[s0>>8]
+	sll	$t1,16
+	llgc	$t3,2048($s0,$tbl)	# Td4[s0>>0]
+	sllg	$s0,$i3,24
+	sll	$t2,8
+
+	llgc	$s1,2048($s1,$tbl)	# Td4[s1>>0]
+	llgc	$i1,2048($i1,$tbl)	# Td4[s1>>24]
+	llgc	$i2,2048($i2,$tbl)	# Td4[s1>>16]
+	sll	$i1,24
+	llgc	$i3,2048($ra,$tbl)	# Td4[s1>>8]
+	sll	$i2,16
+	sll	$i3,8
+	or	$s0,$s1
+	or	$t1,$i1
+	or	$t2,$i2
+	or	$t3,$i3
+
+	srlg	$i1,$s2,8	# i0
+	srlg	$i2,$s2,24
+	srlg	$i3,$s2,16
+	nr	$s2,$mask	# i1
+	nr	$i1,$mask
+	nr	$i3,$mask
+	llgc	$i1,2048($i1,$tbl)	# Td4[s2>>8]
+	llgc	$s1,2048($s2,$tbl)	# Td4[s2>>0]
+	llgc	$i2,2048($i2,$tbl)	# Td4[s2>>24]
+	llgc	$i3,2048($i3,$tbl)	# Td4[s2>>16]
+	sll	$i1,8
+	sll	$i2,24
+	or	$s0,$i1
+	sll	$i3,16
+	or	$t2,$i2
+	or	$t3,$i3
+
+	srlg	$i1,$s3,16	# i0
+	srlg	$i2,$s3,8	# i1
+	srlg	$i3,$s3,24
+	nr	$s3,$mask	# i2
+	nr	$i1,$mask
+	nr	$i2,$mask
+
+	l${g}	$ra,15*$SIZE_T($sp)
+	or	$s1,$t1
+	l	$t0,16($key)
+	l	$t1,20($key)
+
+	llgc	$i1,2048($i1,$tbl)	# Td4[s3>>16]
+	llgc	$i2,2048($i2,$tbl)	# Td4[s3>>8]
+	sll	$i1,16
+	llgc	$s2,2048($s3,$tbl)	# Td4[s3>>0]
+	llgc	$s3,2048($i3,$tbl)	# Td4[s3>>24]
+	sll	$i2,8
+	sll	$s3,24
+	or	$s0,$i1
+	or	$s1,$i2
+	or	$s2,$t2
+	or	$s3,$t3
+
+	xr	$s0,$t0
+	xr	$s1,$t1
+	x	$s2,24($key)
+	x	$s3,28($key)
+
+	br	$ra
+.size	_s390x_AES_decrypt,.-_s390x_AES_decrypt
+___
+
+$code.=<<___;
+# void AES_set_encrypt_key(const unsigned char *in, int bits,
+# 		 AES_KEY *key) {
+.globl	AES_set_encrypt_key
+.type	AES_set_encrypt_key,\@function
+.align	16
+AES_set_encrypt_key:
+_s390x_AES_set_encrypt_key:
+	lghi	$t0,0
+	cl${g}r	$inp,$t0
+	je	.Lminus1
+	cl${g}r	$key,$t0
+	je	.Lminus1
+
+	lghi	$t0,128
+	clr	$bits,$t0
+	je	.Lproceed
+	lghi	$t0,192
+	clr	$bits,$t0
+	je	.Lproceed
+	lghi	$t0,256
+	clr	$bits,$t0
+	je	.Lproceed
+	lghi	%r2,-2
+	br	%r14
+
+.align	16
+.Lproceed:
+___
+$code.=<<___ if (!$softonly);
+	# convert bits to km(c) code, [128,192,256]->[18,19,20]
+	lhi	%r5,-128
+	lhi	%r0,18
+	ar	%r5,$bits
+	srl	%r5,6
+	ar	%r5,%r0
+
+	larl	%r1,OPENSSL_s390xcap_P
+	llihh	%r0,0x8000
+	srlg	%r0,%r0,0(%r5)
+	ng	%r0,S390X_KM(%r1)  # check availability of both km...
+	ng	%r0,S390X_KMC(%r1) # ...and kmc support for given key length
+	jz	.Lekey_internal
+
+	lmg	%r0,%r1,0($inp)	# just copy 128 bits...
+	stmg	%r0,%r1,0($key)
+	lhi	%r0,192
+	cr	$bits,%r0
+	jl	1f
+	lg	%r1,16($inp)
+	stg	%r1,16($key)
+	je	1f
+	lg	%r1,24($inp)
+	stg	%r1,24($key)
+1:	st	$bits,236($key)	# save bits [for debugging purposes]
+	lgr	$t0,%r5
+	st	%r5,240($key)	# save km(c) code
+	lghi	%r2,0
+	br	%r14
+___
+$code.=<<___;
+.align	16
+.Lekey_internal:
+	stm${g}	%r4,%r13,4*$SIZE_T($sp)	# all non-volatile regs and $key
+
+	larl	$tbl,AES_Te+2048
+
+	llgf	$s0,0($inp)
+	llgf	$s1,4($inp)
+	llgf	$s2,8($inp)
+	llgf	$s3,12($inp)
+	st	$s0,0($key)
+	st	$s1,4($key)
+	st	$s2,8($key)
+	st	$s3,12($key)
+	lghi	$t0,128
+	cr	$bits,$t0
+	jne	.Lnot128
+
+	llill	$mask,0xff
+	lghi	$t3,0			# i=0
+	lghi	$rounds,10
+	st	$rounds,240($key)
+
+	llgfr	$t2,$s3			# temp=rk[3]
+	srlg	$i1,$s3,8
+	srlg	$i2,$s3,16
+	srlg	$i3,$s3,24
+	nr	$t2,$mask
+	nr	$i1,$mask
+	nr	$i2,$mask
+
+.align	16
+.L128_loop:
+	la	$t2,0($t2,$tbl)
+	la	$i1,0($i1,$tbl)
+	la	$i2,0($i2,$tbl)
+	la	$i3,0($i3,$tbl)
+	icm	$t2,2,0($t2)		# Te4[rk[3]>>0]<<8
+	icm	$t2,4,0($i1)		# Te4[rk[3]>>8]<<16
+	icm	$t2,8,0($i2)		# Te4[rk[3]>>16]<<24
+	icm	$t2,1,0($i3)		# Te4[rk[3]>>24]
+	x	$t2,256($t3,$tbl)	# rcon[i]
+	xr	$s0,$t2			# rk[4]=rk[0]^...
+	xr	$s1,$s0			# rk[5]=rk[1]^rk[4]
+	xr	$s2,$s1			# rk[6]=rk[2]^rk[5]
+	xr	$s3,$s2			# rk[7]=rk[3]^rk[6]
+
+	llgfr	$t2,$s3			# temp=rk[3]
+	srlg	$i1,$s3,8
+	srlg	$i2,$s3,16
+	nr	$t2,$mask
+	nr	$i1,$mask
+	srlg	$i3,$s3,24
+	nr	$i2,$mask
+
+	st	$s0,16($key)
+	st	$s1,20($key)
+	st	$s2,24($key)
+	st	$s3,28($key)
+	la	$key,16($key)		# key+=4
+	la	$t3,4($t3)		# i++
+	brct	$rounds,.L128_loop
+	lghi	$t0,10
+	lghi	%r2,0
+	lm${g}	%r4,%r13,4*$SIZE_T($sp)
+	br	$ra
+
+.align	16
+.Lnot128:
+	llgf	$t0,16($inp)
+	llgf	$t1,20($inp)
+	st	$t0,16($key)
+	st	$t1,20($key)
+	lghi	$t0,192
+	cr	$bits,$t0
+	jne	.Lnot192
+
+	llill	$mask,0xff
+	lghi	$t3,0			# i=0
+	lghi	$rounds,12
+	st	$rounds,240($key)
+	lghi	$rounds,8
+
+	srlg	$i1,$t1,8
+	srlg	$i2,$t1,16
+	srlg	$i3,$t1,24
+	nr	$t1,$mask
+	nr	$i1,$mask
+	nr	$i2,$mask
+
+.align	16
+.L192_loop:
+	la	$t1,0($t1,$tbl)
+	la	$i1,0($i1,$tbl)
+	la	$i2,0($i2,$tbl)
+	la	$i3,0($i3,$tbl)
+	icm	$t1,2,0($t1)		# Te4[rk[5]>>0]<<8
+	icm	$t1,4,0($i1)		# Te4[rk[5]>>8]<<16
+	icm	$t1,8,0($i2)		# Te4[rk[5]>>16]<<24
+	icm	$t1,1,0($i3)		# Te4[rk[5]>>24]
+	x	$t1,256($t3,$tbl)	# rcon[i]
+	xr	$s0,$t1			# rk[6]=rk[0]^...
+	xr	$s1,$s0			# rk[7]=rk[1]^rk[6]
+	xr	$s2,$s1			# rk[8]=rk[2]^rk[7]
+	xr	$s3,$s2			# rk[9]=rk[3]^rk[8]
+
+	st	$s0,24($key)
+	st	$s1,28($key)
+	st	$s2,32($key)
+	st	$s3,36($key)
+	brct	$rounds,.L192_continue
+	lghi	$t0,12
+	lghi	%r2,0
+	lm${g}	%r4,%r13,4*$SIZE_T($sp)
+	br	$ra
+
+.align	16
+.L192_continue:
+	lgr	$t1,$s3
+	x	$t1,16($key)		# rk[10]=rk[4]^rk[9]
+	st	$t1,40($key)
+	x	$t1,20($key)		# rk[11]=rk[5]^rk[10]
+	st	$t1,44($key)
+
+	srlg	$i1,$t1,8
+	srlg	$i2,$t1,16
+	srlg	$i3,$t1,24
+	nr	$t1,$mask
+	nr	$i1,$mask
+	nr	$i2,$mask
+
+	la	$key,24($key)		# key+=6
+	la	$t3,4($t3)		# i++
+	j	.L192_loop
+
+.align	16
+.Lnot192:
+	llgf	$t0,24($inp)
+	llgf	$t1,28($inp)
+	st	$t0,24($key)
+	st	$t1,28($key)
+	llill	$mask,0xff
+	lghi	$t3,0			# i=0
+	lghi	$rounds,14
+	st	$rounds,240($key)
+	lghi	$rounds,7
+
+	srlg	$i1,$t1,8
+	srlg	$i2,$t1,16
+	srlg	$i3,$t1,24
+	nr	$t1,$mask
+	nr	$i1,$mask
+	nr	$i2,$mask
+
+.align	16
+.L256_loop:
+	la	$t1,0($t1,$tbl)
+	la	$i1,0($i1,$tbl)
+	la	$i2,0($i2,$tbl)
+	la	$i3,0($i3,$tbl)
+	icm	$t1,2,0($t1)		# Te4[rk[7]>>0]<<8
+	icm	$t1,4,0($i1)		# Te4[rk[7]>>8]<<16
+	icm	$t1,8,0($i2)		# Te4[rk[7]>>16]<<24
+	icm	$t1,1,0($i3)		# Te4[rk[7]>>24]
+	x	$t1,256($t3,$tbl)	# rcon[i]
+	xr	$s0,$t1			# rk[8]=rk[0]^...
+	xr	$s1,$s0			# rk[9]=rk[1]^rk[8]
+	xr	$s2,$s1			# rk[10]=rk[2]^rk[9]
+	xr	$s3,$s2			# rk[11]=rk[3]^rk[10]
+	st	$s0,32($key)
+	st	$s1,36($key)
+	st	$s2,40($key)
+	st	$s3,44($key)
+	brct	$rounds,.L256_continue
+	lghi	$t0,14
+	lghi	%r2,0
+	lm${g}	%r4,%r13,4*$SIZE_T($sp)
+	br	$ra
+
+.align	16
+.L256_continue:
+	lgr	$t1,$s3			# temp=rk[11]
+	srlg	$i1,$s3,8
+	srlg	$i2,$s3,16
+	srlg	$i3,$s3,24
+	nr	$t1,$mask
+	nr	$i1,$mask
+	nr	$i2,$mask
+	la	$t1,0($t1,$tbl)
+	la	$i1,0($i1,$tbl)
+	la	$i2,0($i2,$tbl)
+	la	$i3,0($i3,$tbl)
+	llgc	$t1,0($t1)		# Te4[rk[11]>>0]
+	icm	$t1,2,0($i1)		# Te4[rk[11]>>8]<<8
+	icm	$t1,4,0($i2)		# Te4[rk[11]>>16]<<16
+	icm	$t1,8,0($i3)		# Te4[rk[11]>>24]<<24
+	x	$t1,16($key)		# rk[12]=rk[4]^...
+	st	$t1,48($key)
+	x	$t1,20($key)		# rk[13]=rk[5]^rk[12]
+	st	$t1,52($key)
+	x	$t1,24($key)		# rk[14]=rk[6]^rk[13]
+	st	$t1,56($key)
+	x	$t1,28($key)		# rk[15]=rk[7]^rk[14]
+	st	$t1,60($key)
+
+	srlg	$i1,$t1,8
+	srlg	$i2,$t1,16
+	srlg	$i3,$t1,24
+	nr	$t1,$mask
+	nr	$i1,$mask
+	nr	$i2,$mask
+
+	la	$key,32($key)		# key+=8
+	la	$t3,4($t3)		# i++
+	j	.L256_loop
+
+.Lminus1:
+	lghi	%r2,-1
+	br	$ra
+.size	AES_set_encrypt_key,.-AES_set_encrypt_key
+
+# void AES_set_decrypt_key(const unsigned char *in, int bits,
+# 		 AES_KEY *key) {
+.globl	AES_set_decrypt_key
+.type	AES_set_decrypt_key,\@function
+.align	16
+AES_set_decrypt_key:
+	#st${g}	$key,4*$SIZE_T($sp)	# I rely on AES_set_encrypt_key to
+	st${g}	$ra,14*$SIZE_T($sp)	# save non-volatile registers and $key!
+	bras	$ra,_s390x_AES_set_encrypt_key
+	#l${g}	$key,4*$SIZE_T($sp)
+	l${g}	$ra,14*$SIZE_T($sp)
+	ltgr	%r2,%r2
+	bnzr	$ra
+___
+$code.=<<___ if (!$softonly);
+	#l	$t0,240($key)
+	lhi	$t1,16
+	cr	$t0,$t1
+	jl	.Lgo
+	oill	$t0,S390X_DECRYPT	# set "decrypt" bit
+	st	$t0,240($key)
+	br	$ra
+___
+$code.=<<___;
+.align	16
+.Lgo:	lgr	$rounds,$t0	#llgf	$rounds,240($key)
+	la	$i1,0($key)
+	sllg	$i2,$rounds,4
+	la	$i2,0($i2,$key)
+	srl	$rounds,1
+	lghi	$t1,-16
+
+.align	16
+.Linv:	lmg	$s0,$s1,0($i1)
+	lmg	$s2,$s3,0($i2)
+	stmg	$s0,$s1,0($i2)
+	stmg	$s2,$s3,0($i1)
+	la	$i1,16($i1)
+	la	$i2,0($t1,$i2)
+	brct	$rounds,.Linv
+___
+$mask80=$i1;
+$mask1b=$i2;
+$maskfe=$i3;
+$code.=<<___;
+	llgf	$rounds,240($key)
+	aghi	$rounds,-1
+	sll	$rounds,2	# (rounds-1)*4
+	llilh	$mask80,0x8080
+	llilh	$mask1b,0x1b1b
+	llilh	$maskfe,0xfefe
+	oill	$mask80,0x8080
+	oill	$mask1b,0x1b1b
+	oill	$maskfe,0xfefe
+
+.align	16
+.Lmix:	l	$s0,16($key)	# tp1
+	lr	$s1,$s0
+	ngr	$s1,$mask80
+	srlg	$t1,$s1,7
+	slr	$s1,$t1
+	nr	$s1,$mask1b
+	sllg	$t1,$s0,1
+	nr	$t1,$maskfe
+	xr	$s1,$t1		# tp2
+
+	lr	$s2,$s1
+	ngr	$s2,$mask80
+	srlg	$t1,$s2,7
+	slr	$s2,$t1
+	nr	$s2,$mask1b
+	sllg	$t1,$s1,1
+	nr	$t1,$maskfe
+	xr	$s2,$t1		# tp4
+
+	lr	$s3,$s2
+	ngr	$s3,$mask80
+	srlg	$t1,$s3,7
+	slr	$s3,$t1
+	nr	$s3,$mask1b
+	sllg	$t1,$s2,1
+	nr	$t1,$maskfe
+	xr	$s3,$t1		# tp8
+
+	xr	$s1,$s0		# tp2^tp1
+	xr	$s2,$s0		# tp4^tp1
+	rll	$s0,$s0,24	# = ROTATE(tp1,8)
+	xr	$s2,$s3		# ^=tp8
+	xr	$s0,$s1		# ^=tp2^tp1
+	xr	$s1,$s3		# tp2^tp1^tp8
+	xr	$s0,$s2		# ^=tp4^tp1^tp8
+	rll	$s1,$s1,8
+	rll	$s2,$s2,16
+	xr	$s0,$s1		# ^= ROTATE(tp8^tp2^tp1,24)
+	rll	$s3,$s3,24
+	xr	$s0,$s2    	# ^= ROTATE(tp8^tp4^tp1,16)
+	xr	$s0,$s3		# ^= ROTATE(tp8,8)
+
+	st	$s0,16($key)
+	la	$key,4($key)
+	brct	$rounds,.Lmix
+
+	lm${g}	%r6,%r13,6*$SIZE_T($sp)# as was saved by AES_set_encrypt_key!
+	lghi	%r2,0
+	br	$ra
+.size	AES_set_decrypt_key,.-AES_set_decrypt_key
+___
+
+########################################################################
+# void AES_cbc_encrypt(const unsigned char *in, unsigned char *out,
+#                     size_t length, const AES_KEY *key,
+#                     unsigned char *ivec, const int enc)
+{
+my $inp="%r2";
+my $out="%r4";	# length and out are swapped
+my $len="%r3";
+my $key="%r5";
+my $ivp="%r6";
+
+$code.=<<___;
+.globl	AES_cbc_encrypt
+.type	AES_cbc_encrypt,\@function
+.align	16
+AES_cbc_encrypt:
+	xgr	%r3,%r4		# flip %r3 and %r4, out and len
+	xgr	%r4,%r3
+	xgr	%r3,%r4
+___
+$code.=<<___ if (!$softonly);
+	lhi	%r0,16
+	cl	%r0,240($key)
+	jh	.Lcbc_software
+
+	lg	%r0,0($ivp)	# copy ivec
+	lg	%r1,8($ivp)
+	stmg	%r0,%r1,16($sp)
+	lmg	%r0,%r1,0($key)	# copy key, cover 256 bit
+	stmg	%r0,%r1,32($sp)
+	lmg	%r0,%r1,16($key)
+	stmg	%r0,%r1,48($sp)
+	l	%r0,240($key)	# load kmc code
+	lghi	$key,15		# res=len%16, len-=res;
+	ngr	$key,$len
+	sl${g}r	$len,$key
+	la	%r1,16($sp)	# parameter block - ivec || key
+	jz	.Lkmc_truncated
+	.long	0xb92f0042	# kmc %r4,%r2
+	brc	1,.-4		# pay attention to "partial completion"
+	ltr	$key,$key
+	jnz	.Lkmc_truncated
+.Lkmc_done:
+	lmg	%r0,%r1,16($sp)	# copy ivec to caller
+	stg	%r0,0($ivp)
+	stg	%r1,8($ivp)
+	br	$ra
+.align	16
+.Lkmc_truncated:
+	ahi	$key,-1		# it's the way it's encoded in mvc
+	tmll	%r0,S390X_DECRYPT
+	jnz	.Lkmc_truncated_dec
+	lghi	%r1,0
+	stg	%r1,16*$SIZE_T($sp)
+	stg	%r1,16*$SIZE_T+8($sp)
+	bras	%r1,1f
+	mvc	16*$SIZE_T(1,$sp),0($inp)
+1:	ex	$key,0(%r1)
+	la	%r1,16($sp)	# restore parameter block
+	la	$inp,16*$SIZE_T($sp)
+	lghi	$len,16
+	.long	0xb92f0042	# kmc %r4,%r2
+	j	.Lkmc_done
+.align	16
+.Lkmc_truncated_dec:
+	st${g}	$out,4*$SIZE_T($sp)
+	la	$out,16*$SIZE_T($sp)
+	lghi	$len,16
+	.long	0xb92f0042	# kmc %r4,%r2
+	l${g}	$out,4*$SIZE_T($sp)
+	bras	%r1,2f
+	mvc	0(1,$out),16*$SIZE_T($sp)
+2:	ex	$key,0(%r1)
+	j	.Lkmc_done
+.align	16
+.Lcbc_software:
+___
+$code.=<<___;
+	stm${g}	$key,$ra,5*$SIZE_T($sp)
+	lhi	%r0,0
+	cl	%r0,`$stdframe+$SIZE_T-4`($sp)
+	je	.Lcbc_decrypt
+
+	larl	$tbl,AES_Te
+
+	llgf	$s0,0($ivp)
+	llgf	$s1,4($ivp)
+	llgf	$s2,8($ivp)
+	llgf	$s3,12($ivp)
+
+	lghi	$t0,16
+	sl${g}r	$len,$t0
+	brc	4,.Lcbc_enc_tail	# if borrow
+.Lcbc_enc_loop:
+	stm${g}	$inp,$out,2*$SIZE_T($sp)
+	x	$s0,0($inp)
+	x	$s1,4($inp)
+	x	$s2,8($inp)
+	x	$s3,12($inp)
+	lgr	%r4,$key
+
+	bras	$ra,_s390x_AES_encrypt
+
+	lm${g}	$inp,$key,2*$SIZE_T($sp)
+	st	$s0,0($out)
+	st	$s1,4($out)
+	st	$s2,8($out)
+	st	$s3,12($out)
+
+	la	$inp,16($inp)
+	la	$out,16($out)
+	lghi	$t0,16
+	lt${g}r	$len,$len
+	jz	.Lcbc_enc_done
+	sl${g}r	$len,$t0
+	brc	4,.Lcbc_enc_tail	# if borrow
+	j	.Lcbc_enc_loop
+.align	16
+.Lcbc_enc_done:
+	l${g}	$ivp,6*$SIZE_T($sp)
+	st	$s0,0($ivp)
+	st	$s1,4($ivp)
+	st	$s2,8($ivp)
+	st	$s3,12($ivp)
+
+	lm${g}	%r7,$ra,7*$SIZE_T($sp)
+	br	$ra
+
+.align	16
+.Lcbc_enc_tail:
+	aghi	$len,15
+	lghi	$t0,0
+	stg	$t0,16*$SIZE_T($sp)
+	stg	$t0,16*$SIZE_T+8($sp)
+	bras	$t1,3f
+	mvc	16*$SIZE_T(1,$sp),0($inp)
+3:	ex	$len,0($t1)
+	lghi	$len,0
+	la	$inp,16*$SIZE_T($sp)
+	j	.Lcbc_enc_loop
+
+.align	16
+.Lcbc_decrypt:
+	larl	$tbl,AES_Td
+
+	lg	$t0,0($ivp)
+	lg	$t1,8($ivp)
+	stmg	$t0,$t1,16*$SIZE_T($sp)
+
+.Lcbc_dec_loop:
+	stm${g}	$inp,$out,2*$SIZE_T($sp)
+	llgf	$s0,0($inp)
+	llgf	$s1,4($inp)
+	llgf	$s2,8($inp)
+	llgf	$s3,12($inp)
+	lgr	%r4,$key
+
+	bras	$ra,_s390x_AES_decrypt
+
+	lm${g}	$inp,$key,2*$SIZE_T($sp)
+	sllg	$s0,$s0,32
+	sllg	$s2,$s2,32
+	lr	$s0,$s1
+	lr	$s2,$s3
+
+	lg	$t0,0($inp)
+	lg	$t1,8($inp)
+	xg	$s0,16*$SIZE_T($sp)
+	xg	$s2,16*$SIZE_T+8($sp)
+	lghi	$s1,16
+	sl${g}r	$len,$s1
+	brc	4,.Lcbc_dec_tail	# if borrow
+	brc	2,.Lcbc_dec_done	# if zero
+	stg	$s0,0($out)
+	stg	$s2,8($out)
+	stmg	$t0,$t1,16*$SIZE_T($sp)
+
+	la	$inp,16($inp)
+	la	$out,16($out)
+	j	.Lcbc_dec_loop
+
+.Lcbc_dec_done:
+	stg	$s0,0($out)
+	stg	$s2,8($out)
+.Lcbc_dec_exit:
+	lm${g}	%r6,$ra,6*$SIZE_T($sp)
+	stmg	$t0,$t1,0($ivp)
+
+	br	$ra
+
+.align	16
+.Lcbc_dec_tail:
+	aghi	$len,15
+	stg	$s0,16*$SIZE_T($sp)
+	stg	$s2,16*$SIZE_T+8($sp)
+	bras	$s1,4f
+	mvc	0(1,$out),16*$SIZE_T($sp)
+4:	ex	$len,0($s1)
+	j	.Lcbc_dec_exit
+.size	AES_cbc_encrypt,.-AES_cbc_encrypt
+___
+}
+########################################################################
+# void AES_ctr32_encrypt(const unsigned char *in, unsigned char *out,
+#                     size_t blocks, const AES_KEY *key,
+#                     const unsigned char *ivec)
+{
+my $inp="%r2";
+my $out="%r4";	# blocks and out are swapped
+my $len="%r3";
+my $key="%r5";	my $iv0="%r5";
+my $ivp="%r6";
+my $fp ="%r7";
+
+$code.=<<___;
+.globl	AES_ctr32_encrypt
+.type	AES_ctr32_encrypt,\@function
+.align	16
+AES_ctr32_encrypt:
+	xgr	%r3,%r4		# flip %r3 and %r4, $out and $len
+	xgr	%r4,%r3
+	xgr	%r3,%r4
+	llgfr	$len,$len	# safe in ctr32 subroutine even in 64-bit case
+___
+$code.=<<___ if (!$softonly);
+	l	%r0,240($key)
+	lhi	%r1,16
+	clr	%r0,%r1
+	jl	.Lctr32_software
+
+	st${g}	$s2,10*$SIZE_T($sp)
+	st${g}	$s3,11*$SIZE_T($sp)
+
+	clr	$len,%r1		# does work even in 64-bit mode
+	jle	.Lctr32_nokma		# kma is slower for <= 16 blocks
+
+	larl	%r1,OPENSSL_s390xcap_P
+	lr	$s2,%r0
+	llihh	$s3,0x8000
+	srlg	$s3,$s3,0($s2)
+	ng	$s3,S390X_KMA(%r1)		# check kma capability vector
+	jz	.Lctr32_nokma
+
+	l${g}hi	%r1,-$stdframe-112
+	l${g}r	$s3,$sp
+	la	$sp,0(%r1,$sp)			# prepare parameter block
+
+	lhi	%r1,0x0600
+	sllg	$len,$len,4
+	or	%r0,%r1				# set HS and LAAD flags
+
+	st${g}	$s3,0($sp)			# backchain
+	la	%r1,$stdframe($sp)
+
+	lmg	$s2,$s3,0($key)			# copy key
+	stg	$s2,$stdframe+80($sp)
+	stg	$s3,$stdframe+88($sp)
+	lmg	$s2,$s3,16($key)
+	stg	$s2,$stdframe+96($sp)
+	stg	$s3,$stdframe+104($sp)
+
+	lmg	$s2,$s3,0($ivp)			# copy iv
+	stg	$s2,$stdframe+64($sp)
+	ahi	$s3,-1				# kma requires counter-1
+	stg	$s3,$stdframe+72($sp)
+	st	$s3,$stdframe+12($sp)		# copy counter
+
+	lghi	$s2,0				# no AAD
+	lghi	$s3,0
+
+	.long	0xb929a042	# kma $out,$s2,$inp
+	brc	1,.-4		# pay attention to "partial completion"
+
+	stg	%r0,$stdframe+80($sp)		# wipe key
+	stg	%r0,$stdframe+88($sp)
+	stg	%r0,$stdframe+96($sp)
+	stg	%r0,$stdframe+104($sp)
+	la	$sp,$stdframe+112($sp)
+
+	lm${g}	$s2,$s3,10*$SIZE_T($sp)
+	br	$ra
+
+.align	16
+.Lctr32_nokma:
+	stm${g}	%r6,$s1,6*$SIZE_T($sp)
+
+	slgr	$out,$inp
+	la	%r1,0($key)	# %r1 is permanent copy of $key
+	lg	$iv0,0($ivp)	# load ivec
+	lg	$ivp,8($ivp)
+
+	# prepare and allocate stack frame at the top of 4K page
+	# with 1K reserved for eventual signal handling
+	lghi	$s0,-1024-256-16# guarantee at least 256-bytes buffer
+	lghi	$s1,-4096
+	algr	$s0,$sp
+	lgr	$fp,$sp
+	ngr	$s0,$s1		# align at page boundary
+	slgr	$fp,$s0		# total buffer size
+	lgr	$s2,$sp
+	lghi	$s1,1024+16	# sl[g]fi is extended-immediate facility
+	slgr	$fp,$s1		# deduct reservation to get usable buffer size
+	# buffer size is at lest 256 and at most 3072+256-16
+
+	la	$sp,1024($s0)	# alloca
+	srlg	$fp,$fp,4	# convert bytes to blocks, minimum 16
+	st${g}	$s2,0($sp)	# back-chain
+	st${g}	$fp,$SIZE_T($sp)
+
+	slgr	$len,$fp
+	brc	1,.Lctr32_hw_switch	# not zero, no borrow
+	algr	$fp,$len	# input is shorter than allocated buffer
+	lghi	$len,0
+	st${g}	$fp,$SIZE_T($sp)
+
+.Lctr32_hw_switch:
+___
+$code.=<<___ if (!$softonly && 0);# kmctr code was measured to be ~12% slower
+	llgfr	$s0,%r0
+	lgr	$s1,%r1
+	larl	%r1,OPENSSL_s390xcap_P
+	llihh	%r0,0x8000	# check if kmctr supports the function code
+	srlg	%r0,%r0,0($s0)
+	ng	%r0,S390X_KMCTR(%r1)	# check kmctr capability vector
+	lgr	%r0,$s0
+	lgr	%r1,$s1
+	jz	.Lctr32_km_loop
+
+####### kmctr code
+	algr	$out,$inp	# restore $out
+	lgr	$s1,$len	# $s1 undertakes $len
+	j	.Lctr32_kmctr_loop
+.align	16
+.Lctr32_kmctr_loop:
+	la	$s2,16($sp)
+	lgr	$s3,$fp
+.Lctr32_kmctr_prepare:
+	stg	$iv0,0($s2)
+	stg	$ivp,8($s2)
+	la	$s2,16($s2)
+	ahi	$ivp,1		# 32-bit increment, preserves upper half
+	brct	$s3,.Lctr32_kmctr_prepare
+
+	#la	$inp,0($inp)	# inp
+	sllg	$len,$fp,4	# len
+	#la	$out,0($out)	# out
+	la	$s2,16($sp)	# iv
+	.long	0xb92da042	# kmctr $out,$s2,$inp
+	brc	1,.-4		# pay attention to "partial completion"
+
+	slgr	$s1,$fp
+	brc	1,.Lctr32_kmctr_loop	# not zero, no borrow
+	algr	$fp,$s1
+	lghi	$s1,0
+	brc	4+1,.Lctr32_kmctr_loop	# not zero
+
+	l${g}	$sp,0($sp)
+	lm${g}	%r6,$s3,6*$SIZE_T($sp)
+	br	$ra
+.align	16
+___
+$code.=<<___ if (!$softonly);
+.Lctr32_km_loop:
+	la	$s2,16($sp)
+	lgr	$s3,$fp
+.Lctr32_km_prepare:
+	stg	$iv0,0($s2)
+	stg	$ivp,8($s2)
+	la	$s2,16($s2)
+	ahi	$ivp,1		# 32-bit increment, preserves upper half
+	brct	$s3,.Lctr32_km_prepare
+
+	la	$s0,16($sp)	# inp
+	sllg	$s1,$fp,4	# len
+	la	$s2,16($sp)	# out
+	.long	0xb92e00a8	# km %r10,%r8
+	brc	1,.-4		# pay attention to "partial completion"
+
+	la	$s2,16($sp)
+	lgr	$s3,$fp
+	slgr	$s2,$inp
+.Lctr32_km_xor:
+	lg	$s0,0($inp)
+	lg	$s1,8($inp)
+	xg	$s0,0($s2,$inp)
+	xg	$s1,8($s2,$inp)
+	stg	$s0,0($out,$inp)
+	stg	$s1,8($out,$inp)
+	la	$inp,16($inp)
+	brct	$s3,.Lctr32_km_xor
+
+	slgr	$len,$fp
+	brc	1,.Lctr32_km_loop	# not zero, no borrow
+	algr	$fp,$len
+	lghi	$len,0
+	brc	4+1,.Lctr32_km_loop	# not zero
+
+	l${g}	$s0,0($sp)
+	l${g}	$s1,$SIZE_T($sp)
+	la	$s2,16($sp)
+.Lctr32_km_zap:
+	stg	$s0,0($s2)
+	stg	$s0,8($s2)
+	la	$s2,16($s2)
+	brct	$s1,.Lctr32_km_zap
+
+	la	$sp,0($s0)
+	lm${g}	%r6,$s3,6*$SIZE_T($sp)
+	br	$ra
+.align	16
+.Lctr32_software:
+___
+$code.=<<___;
+	stm${g}	$key,$ra,5*$SIZE_T($sp)
+	sl${g}r	$inp,$out
+	larl	$tbl,AES_Te
+	llgf	$t1,12($ivp)
+
+.Lctr32_loop:
+	stm${g}	$inp,$out,2*$SIZE_T($sp)
+	llgf	$s0,0($ivp)
+	llgf	$s1,4($ivp)
+	llgf	$s2,8($ivp)
+	lgr	$s3,$t1
+	st	$t1,16*$SIZE_T($sp)
+	lgr	%r4,$key
+
+	bras	$ra,_s390x_AES_encrypt
+
+	lm${g}	$inp,$ivp,2*$SIZE_T($sp)
+	llgf	$t1,16*$SIZE_T($sp)
+	x	$s0,0($inp,$out)
+	x	$s1,4($inp,$out)
+	x	$s2,8($inp,$out)
+	x	$s3,12($inp,$out)
+	stm	$s0,$s3,0($out)
+
+	la	$out,16($out)
+	ahi	$t1,1		# 32-bit increment
+	brct	$len,.Lctr32_loop
+
+	lm${g}	%r6,$ra,6*$SIZE_T($sp)
+	br	$ra
+.size	AES_ctr32_encrypt,.-AES_ctr32_encrypt
+___
+}
+
+########################################################################
+# void AES_xts_encrypt(const unsigned char *inp, unsigned char *out,
+#	size_t len, const AES_KEY *key1, const AES_KEY *key2,
+#	const unsigned char iv[16]);
+#
+{
+my $inp="%r2";
+my $out="%r4";	# len and out are swapped
+my $len="%r3";
+my $key1="%r5";	# $i1
+my $key2="%r6";	# $i2
+my $fp="%r7";	# $i3
+my $tweak=16*$SIZE_T+16;	# or $stdframe-16, bottom of the frame...
+
+$code.=<<___;
+.type	_s390x_xts_km,\@function
+.align	16
+_s390x_xts_km:
+___
+$code.=<<___ if(1);
+	llgfr	$s0,%r0			# put aside the function code
+	lghi	$s1,0x7f
+	nr	$s1,%r0
+	larl	%r1,OPENSSL_s390xcap_P
+	llihh	%r0,0x8000
+	srlg	%r0,%r0,32($s1)		# check for 32+function code
+	ng	%r0,S390X_KM(%r1)	# check km capability vector
+	lgr	%r0,$s0			# restore the function code
+	la	%r1,0($key1)		# restore $key1
+	jz	.Lxts_km_vanilla
+
+	lmg	$i2,$i3,$tweak($sp)	# put aside the tweak value
+	algr	$out,$inp
+
+	oill	%r0,32			# switch to xts function code
+	aghi	$s1,-18			#
+	sllg	$s1,$s1,3		# (function code - 18)*8, 0 or 16
+	la	%r1,$tweak-16($sp)
+	slgr	%r1,$s1			# parameter block position
+	lmg	$s0,$s3,0($key1)	# load 256 bits of key material,
+	stmg	$s0,$s3,0(%r1)		# and copy it to parameter block.
+					# yes, it contains junk and overlaps
+					# with the tweak in 128-bit case.
+					# it's done to avoid conditional
+					# branch.
+	stmg	$i2,$i3,$tweak($sp)	# "re-seat" the tweak value
+
+	.long	0xb92e0042		# km %r4,%r2
+	brc	1,.-4			# pay attention to "partial completion"
+
+	lrvg	$s0,$tweak+0($sp)	# load the last tweak
+	lrvg	$s1,$tweak+8($sp)
+	stmg	%r0,%r3,$tweak-32($sp)	# wipe copy of the key
+
+	nill	%r0,0xffdf		# switch back to original function code
+	la	%r1,0($key1)		# restore pointer to $key1
+	slgr	$out,$inp
+
+	llgc	$len,2*$SIZE_T-1($sp)
+	nill	$len,0x0f		# $len%=16
+	br	$ra
+
+.align	16
+.Lxts_km_vanilla:
+___
+$code.=<<___;
+	# prepare and allocate stack frame at the top of 4K page
+	# with 1K reserved for eventual signal handling
+	lghi	$s0,-1024-256-16# guarantee at least 256-bytes buffer
+	lghi	$s1,-4096
+	algr	$s0,$sp
+	lgr	$fp,$sp
+	ngr	$s0,$s1		# align at page boundary
+	slgr	$fp,$s0		# total buffer size
+	lgr	$s2,$sp
+	lghi	$s1,1024+16	# sl[g]fi is extended-immediate facility
+	slgr	$fp,$s1		# deduct reservation to get usable buffer size
+	# buffer size is at lest 256 and at most 3072+256-16
+
+	la	$sp,1024($s0)	# alloca
+	nill	$fp,0xfff0	# round to 16*n
+	st${g}	$s2,0($sp)	# back-chain
+	nill	$len,0xfff0	# redundant
+	st${g}	$fp,$SIZE_T($sp)
+
+	slgr	$len,$fp
+	brc	1,.Lxts_km_go	# not zero, no borrow
+	algr	$fp,$len	# input is shorter than allocated buffer
+	lghi	$len,0
+	st${g}	$fp,$SIZE_T($sp)
+
+.Lxts_km_go:
+	lrvg	$s0,$tweak+0($s2)	# load the tweak value in little-endian
+	lrvg	$s1,$tweak+8($s2)
+
+	la	$s2,16($sp)		# vector of ascending tweak values
+	slgr	$s2,$inp
+	srlg	$s3,$fp,4
+	j	.Lxts_km_start
+
+.Lxts_km_loop:
+	la	$s2,16($sp)
+	slgr	$s2,$inp
+	srlg	$s3,$fp,4
+.Lxts_km_prepare:
+	lghi	$i1,0x87
+	srag	$i2,$s1,63		# broadcast upper bit
+	ngr	$i1,$i2			# rem
+	algr	$s0,$s0
+	alcgr	$s1,$s1
+	xgr	$s0,$i1
+.Lxts_km_start:
+	lrvgr	$i1,$s0			# flip byte order
+	lrvgr	$i2,$s1
+	stg	$i1,0($s2,$inp)
+	stg	$i2,8($s2,$inp)
+	xg	$i1,0($inp)
+	xg	$i2,8($inp)
+	stg	$i1,0($out,$inp)
+	stg	$i2,8($out,$inp)
+	la	$inp,16($inp)
+	brct	$s3,.Lxts_km_prepare
+
+	slgr	$inp,$fp		# rewind $inp
+	la	$s2,0($out,$inp)
+	lgr	$s3,$fp
+	.long	0xb92e00aa		# km $s2,$s2
+	brc	1,.-4			# pay attention to "partial completion"
+
+	la	$s2,16($sp)
+	slgr	$s2,$inp
+	srlg	$s3,$fp,4
+.Lxts_km_xor:
+	lg	$i1,0($out,$inp)
+	lg	$i2,8($out,$inp)
+	xg	$i1,0($s2,$inp)
+	xg	$i2,8($s2,$inp)
+	stg	$i1,0($out,$inp)
+	stg	$i2,8($out,$inp)
+	la	$inp,16($inp)
+	brct	$s3,.Lxts_km_xor
+
+	slgr	$len,$fp
+	brc	1,.Lxts_km_loop		# not zero, no borrow
+	algr	$fp,$len
+	lghi	$len,0
+	brc	4+1,.Lxts_km_loop	# not zero
+
+	l${g}	$i1,0($sp)		# back-chain
+	llgf	$fp,`2*$SIZE_T-4`($sp)	# bytes used
+	la	$i2,16($sp)
+	srlg	$fp,$fp,4
+.Lxts_km_zap:
+	stg	$i1,0($i2)
+	stg	$i1,8($i2)
+	la	$i2,16($i2)
+	brct	$fp,.Lxts_km_zap
+
+	la	$sp,0($i1)
+	llgc	$len,2*$SIZE_T-1($i1)
+	nill	$len,0x0f		# $len%=16
+	bzr	$ra
+
+	# generate one more tweak...
+	lghi	$i1,0x87
+	srag	$i2,$s1,63		# broadcast upper bit
+	ngr	$i1,$i2			# rem
+	algr	$s0,$s0
+	alcgr	$s1,$s1
+	xgr	$s0,$i1
+
+	ltr	$len,$len		# clear zero flag
+	br	$ra
+.size	_s390x_xts_km,.-_s390x_xts_km
+
+.globl	AES_xts_encrypt
+.type	AES_xts_encrypt,\@function
+.align	16
+AES_xts_encrypt:
+	xgr	%r3,%r4			# flip %r3 and %r4, $out and $len
+	xgr	%r4,%r3
+	xgr	%r3,%r4
+___
+$code.=<<___ if ($SIZE_T==4);
+	llgfr	$len,$len
+___
+$code.=<<___;
+	st${g}	$len,1*$SIZE_T($sp)	# save copy of $len
+	srag	$len,$len,4		# formally wrong, because it expands
+					# sign byte, but who can afford asking
+					# to process more than 2^63-1 bytes?
+					# I use it, because it sets condition
+					# code...
+	bcr	8,$ra			# abort if zero (i.e. less than 16)
+___
+$code.=<<___ if (!$softonly);
+	llgf	%r0,240($key2)
+	lhi	%r1,16
+	clr	%r0,%r1
+	jl	.Lxts_enc_software
+
+	st${g}	$ra,5*$SIZE_T($sp)
+	stm${g}	%r6,$s3,6*$SIZE_T($sp)
+
+	sllg	$len,$len,4		# $len&=~15
+	slgr	$out,$inp
+
+	# generate the tweak value
+	l${g}	$s3,$stdframe($sp)	# pointer to iv
+	la	$s2,$tweak($sp)
+	lmg	$s0,$s1,0($s3)
+	lghi	$s3,16
+	stmg	$s0,$s1,0($s2)
+	la	%r1,0($key2)		# $key2 is not needed anymore
+	.long	0xb92e00aa		# km $s2,$s2, generate the tweak
+	brc	1,.-4			# can this happen?
+
+	l	%r0,240($key1)
+	la	%r1,0($key1)		# $key1 is not needed anymore
+	bras	$ra,_s390x_xts_km
+	jz	.Lxts_enc_km_done
+
+	aghi	$inp,-16		# take one step back
+	la	$i3,0($out,$inp)	# put aside real $out
+.Lxts_enc_km_steal:
+	llgc	$i1,16($inp)
+	llgc	$i2,0($out,$inp)
+	stc	$i1,0($out,$inp)
+	stc	$i2,16($out,$inp)
+	la	$inp,1($inp)
+	brct	$len,.Lxts_enc_km_steal
+
+	la	$s2,0($i3)
+	lghi	$s3,16
+	lrvgr	$i1,$s0			# flip byte order
+	lrvgr	$i2,$s1
+	xg	$i1,0($s2)
+	xg	$i2,8($s2)
+	stg	$i1,0($s2)
+	stg	$i2,8($s2)
+	.long	0xb92e00aa		# km $s2,$s2
+	brc	1,.-4			# can this happen?
+	lrvgr	$i1,$s0			# flip byte order
+	lrvgr	$i2,$s1
+	xg	$i1,0($i3)
+	xg	$i2,8($i3)
+	stg	$i1,0($i3)
+	stg	$i2,8($i3)
+
+.Lxts_enc_km_done:
+	stg	$sp,$tweak+0($sp)	# wipe tweak
+	stg	$sp,$tweak+8($sp)
+	l${g}	$ra,5*$SIZE_T($sp)
+	lm${g}	%r6,$s3,6*$SIZE_T($sp)
+	br	$ra
+.align	16
+.Lxts_enc_software:
+___
+$code.=<<___;
+	stm${g}	%r6,$ra,6*$SIZE_T($sp)
+
+	slgr	$out,$inp
+
+	l${g}	$s3,$stdframe($sp)	# ivp
+	llgf	$s0,0($s3)		# load iv
+	llgf	$s1,4($s3)
+	llgf	$s2,8($s3)
+	llgf	$s3,12($s3)
+	stm${g}	%r2,%r5,2*$SIZE_T($sp)
+	la	$key,0($key2)
+	larl	$tbl,AES_Te
+	bras	$ra,_s390x_AES_encrypt	# generate the tweak
+	lm${g}	%r2,%r5,2*$SIZE_T($sp)
+	stm	$s0,$s3,$tweak($sp)	# save the tweak
+	j	.Lxts_enc_enter
+
+.align	16
+.Lxts_enc_loop:
+	lrvg	$s1,$tweak+0($sp)	# load the tweak in little-endian
+	lrvg	$s3,$tweak+8($sp)
+	lghi	%r1,0x87
+	srag	%r0,$s3,63		# broadcast upper bit
+	ngr	%r1,%r0			# rem
+	algr	$s1,$s1
+	alcgr	$s3,$s3
+	xgr	$s1,%r1
+	lrvgr	$s1,$s1			# flip byte order
+	lrvgr	$s3,$s3
+	srlg	$s0,$s1,32		# smash the tweak to 4x32-bits
+	stg	$s1,$tweak+0($sp)	# save the tweak
+	llgfr	$s1,$s1
+	srlg	$s2,$s3,32
+	stg	$s3,$tweak+8($sp)
+	llgfr	$s3,$s3
+	la	$inp,16($inp)		# $inp+=16
+.Lxts_enc_enter:
+	x	$s0,0($inp)		# ^=*($inp)
+	x	$s1,4($inp)
+	x	$s2,8($inp)
+	x	$s3,12($inp)
+	stm${g}	%r2,%r3,2*$SIZE_T($sp)	# only two registers are changing
+	la	$key,0($key1)
+	bras	$ra,_s390x_AES_encrypt
+	lm${g}	%r2,%r5,2*$SIZE_T($sp)
+	x	$s0,$tweak+0($sp)	# ^=tweak
+	x	$s1,$tweak+4($sp)
+	x	$s2,$tweak+8($sp)
+	x	$s3,$tweak+12($sp)
+	st	$s0,0($out,$inp)
+	st	$s1,4($out,$inp)
+	st	$s2,8($out,$inp)
+	st	$s3,12($out,$inp)
+	brct${g}	$len,.Lxts_enc_loop
+
+	llgc	$len,`2*$SIZE_T-1`($sp)
+	nill	$len,0x0f		# $len%16
+	jz	.Lxts_enc_done
+
+	la	$i3,0($inp,$out)	# put aside real $out
+.Lxts_enc_steal:
+	llgc	%r0,16($inp)
+	llgc	%r1,0($out,$inp)
+	stc	%r0,0($out,$inp)
+	stc	%r1,16($out,$inp)
+	la	$inp,1($inp)
+	brct	$len,.Lxts_enc_steal
+	la	$out,0($i3)		# restore real $out
+
+	# generate last tweak...
+	lrvg	$s1,$tweak+0($sp)	# load the tweak in little-endian
+	lrvg	$s3,$tweak+8($sp)
+	lghi	%r1,0x87
+	srag	%r0,$s3,63		# broadcast upper bit
+	ngr	%r1,%r0			# rem
+	algr	$s1,$s1
+	alcgr	$s3,$s3
+	xgr	$s1,%r1
+	lrvgr	$s1,$s1			# flip byte order
+	lrvgr	$s3,$s3
+	srlg	$s0,$s1,32		# smash the tweak to 4x32-bits
+	stg	$s1,$tweak+0($sp)	# save the tweak
+	llgfr	$s1,$s1
+	srlg	$s2,$s3,32
+	stg	$s3,$tweak+8($sp)
+	llgfr	$s3,$s3
+
+	x	$s0,0($out)		# ^=*(inp)|stolen cipther-text
+	x	$s1,4($out)
+	x	$s2,8($out)
+	x	$s3,12($out)
+	st${g}	$out,4*$SIZE_T($sp)
+	la	$key,0($key1)
+	bras	$ra,_s390x_AES_encrypt
+	l${g}	$out,4*$SIZE_T($sp)
+	x	$s0,`$tweak+0`($sp)	# ^=tweak
+	x	$s1,`$tweak+4`($sp)
+	x	$s2,`$tweak+8`($sp)
+	x	$s3,`$tweak+12`($sp)
+	st	$s0,0($out)
+	st	$s1,4($out)
+	st	$s2,8($out)
+	st	$s3,12($out)
+
+.Lxts_enc_done:
+	stg	$sp,$tweak+0($sp)	# wipe tweak
+	stg	$sp,$tweak+8($sp)
+	lm${g}	%r6,$ra,6*$SIZE_T($sp)
+	br	$ra
+.size	AES_xts_encrypt,.-AES_xts_encrypt
+___
+# void AES_xts_decrypt(const unsigned char *inp, unsigned char *out,
+#	size_t len, const AES_KEY *key1, const AES_KEY *key2,
+#	const unsigned char iv[16]);
+#
+$code.=<<___;
+.globl	AES_xts_decrypt
+.type	AES_xts_decrypt,\@function
+.align	16
+AES_xts_decrypt:
+	xgr	%r3,%r4			# flip %r3 and %r4, $out and $len
+	xgr	%r4,%r3
+	xgr	%r3,%r4
+___
+$code.=<<___ if ($SIZE_T==4);
+	llgfr	$len,$len
+___
+$code.=<<___;
+	st${g}	$len,1*$SIZE_T($sp)	# save copy of $len
+	aghi	$len,-16
+	bcr	4,$ra			# abort if less than zero. formally
+					# wrong, because $len is unsigned,
+					# but who can afford asking to
+					# process more than 2^63-1 bytes?
+	tmll	$len,0x0f
+	jnz	.Lxts_dec_proceed
+	aghi	$len,16
+.Lxts_dec_proceed:
+___
+$code.=<<___ if (!$softonly);
+	llgf	%r0,240($key2)
+	lhi	%r1,16
+	clr	%r0,%r1
+	jl	.Lxts_dec_software
+
+	st${g}	$ra,5*$SIZE_T($sp)
+	stm${g}	%r6,$s3,6*$SIZE_T($sp)
+
+	nill	$len,0xfff0		# $len&=~15
+	slgr	$out,$inp
+
+	# generate the tweak value
+	l${g}	$s3,$stdframe($sp)	# pointer to iv
+	la	$s2,$tweak($sp)
+	lmg	$s0,$s1,0($s3)
+	lghi	$s3,16
+	stmg	$s0,$s1,0($s2)
+	la	%r1,0($key2)		# $key2 is not needed past this point
+	.long	0xb92e00aa		# km $s2,$s2, generate the tweak
+	brc	1,.-4			# can this happen?
+
+	l	%r0,240($key1)
+	la	%r1,0($key1)		# $key1 is not needed anymore
+
+	ltgr	$len,$len
+	jz	.Lxts_dec_km_short
+	bras	$ra,_s390x_xts_km
+	jz	.Lxts_dec_km_done
+
+	lrvgr	$s2,$s0			# make copy in reverse byte order
+	lrvgr	$s3,$s1
+	j	.Lxts_dec_km_2ndtweak
+
+.Lxts_dec_km_short:
+	llgc	$len,`2*$SIZE_T-1`($sp)
+	nill	$len,0x0f		# $len%=16
+	lrvg	$s0,$tweak+0($sp)	# load the tweak
+	lrvg	$s1,$tweak+8($sp)
+	lrvgr	$s2,$s0			# make copy in reverse byte order
+	lrvgr	$s3,$s1
+
+.Lxts_dec_km_2ndtweak:
+	lghi	$i1,0x87
+	srag	$i2,$s1,63		# broadcast upper bit
+	ngr	$i1,$i2			# rem
+	algr	$s0,$s0
+	alcgr	$s1,$s1
+	xgr	$s0,$i1
+	lrvgr	$i1,$s0			# flip byte order
+	lrvgr	$i2,$s1
+
+	xg	$i1,0($inp)
+	xg	$i2,8($inp)
+	stg	$i1,0($out,$inp)
+	stg	$i2,8($out,$inp)
+	la	$i2,0($out,$inp)
+	lghi	$i3,16
+	.long	0xb92e0066		# km $i2,$i2
+	brc	1,.-4			# can this happen?
+	lrvgr	$i1,$s0
+	lrvgr	$i2,$s1
+	xg	$i1,0($out,$inp)
+	xg	$i2,8($out,$inp)
+	stg	$i1,0($out,$inp)
+	stg	$i2,8($out,$inp)
+
+	la	$i3,0($out,$inp)	# put aside real $out
+.Lxts_dec_km_steal:
+	llgc	$i1,16($inp)
+	llgc	$i2,0($out,$inp)
+	stc	$i1,0($out,$inp)
+	stc	$i2,16($out,$inp)
+	la	$inp,1($inp)
+	brct	$len,.Lxts_dec_km_steal
+
+	lgr	$s0,$s2
+	lgr	$s1,$s3
+	xg	$s0,0($i3)
+	xg	$s1,8($i3)
+	stg	$s0,0($i3)
+	stg	$s1,8($i3)
+	la	$s0,0($i3)
+	lghi	$s1,16
+	.long	0xb92e0088		# km $s0,$s0
+	brc	1,.-4			# can this happen?
+	xg	$s2,0($i3)
+	xg	$s3,8($i3)
+	stg	$s2,0($i3)
+	stg	$s3,8($i3)
+.Lxts_dec_km_done:
+	stg	$sp,$tweak+0($sp)	# wipe tweak
+	stg	$sp,$tweak+8($sp)
+	l${g}	$ra,5*$SIZE_T($sp)
+	lm${g}	%r6,$s3,6*$SIZE_T($sp)
+	br	$ra
+.align	16
+.Lxts_dec_software:
+___
+$code.=<<___;
+	stm${g}	%r6,$ra,6*$SIZE_T($sp)
+
+	srlg	$len,$len,4
+	slgr	$out,$inp
+
+	l${g}	$s3,$stdframe($sp)	# ivp
+	llgf	$s0,0($s3)		# load iv
+	llgf	$s1,4($s3)
+	llgf	$s2,8($s3)
+	llgf	$s3,12($s3)
+	stm${g}	%r2,%r5,2*$SIZE_T($sp)
+	la	$key,0($key2)
+	larl	$tbl,AES_Te
+	bras	$ra,_s390x_AES_encrypt	# generate the tweak
+	lm${g}	%r2,%r5,2*$SIZE_T($sp)
+	larl	$tbl,AES_Td
+	lt${g}r	$len,$len
+	stm	$s0,$s3,$tweak($sp)	# save the tweak
+	jz	.Lxts_dec_short
+	j	.Lxts_dec_enter
+
+.align	16
+.Lxts_dec_loop:
+	lrvg	$s1,$tweak+0($sp)	# load the tweak in little-endian
+	lrvg	$s3,$tweak+8($sp)
+	lghi	%r1,0x87
+	srag	%r0,$s3,63		# broadcast upper bit
+	ngr	%r1,%r0			# rem
+	algr	$s1,$s1
+	alcgr	$s3,$s3
+	xgr	$s1,%r1
+	lrvgr	$s1,$s1			# flip byte order
+	lrvgr	$s3,$s3
+	srlg	$s0,$s1,32		# smash the tweak to 4x32-bits
+	stg	$s1,$tweak+0($sp)	# save the tweak
+	llgfr	$s1,$s1
+	srlg	$s2,$s3,32
+	stg	$s3,$tweak+8($sp)
+	llgfr	$s3,$s3
+.Lxts_dec_enter:
+	x	$s0,0($inp)		# tweak^=*(inp)
+	x	$s1,4($inp)
+	x	$s2,8($inp)
+	x	$s3,12($inp)
+	stm${g}	%r2,%r3,2*$SIZE_T($sp)	# only two registers are changing
+	la	$key,0($key1)
+	bras	$ra,_s390x_AES_decrypt
+	lm${g}	%r2,%r5,2*$SIZE_T($sp)
+	x	$s0,$tweak+0($sp)	# ^=tweak
+	x	$s1,$tweak+4($sp)
+	x	$s2,$tweak+8($sp)
+	x	$s3,$tweak+12($sp)
+	st	$s0,0($out,$inp)
+	st	$s1,4($out,$inp)
+	st	$s2,8($out,$inp)
+	st	$s3,12($out,$inp)
+	la	$inp,16($inp)
+	brct${g}	$len,.Lxts_dec_loop
+
+	llgc	$len,`2*$SIZE_T-1`($sp)
+	nill	$len,0x0f		# $len%16
+	jz	.Lxts_dec_done
+
+	# generate pair of tweaks...
+	lrvg	$s1,$tweak+0($sp)	# load the tweak in little-endian
+	lrvg	$s3,$tweak+8($sp)
+	lghi	%r1,0x87
+	srag	%r0,$s3,63		# broadcast upper bit
+	ngr	%r1,%r0			# rem
+	algr	$s1,$s1
+	alcgr	$s3,$s3
+	xgr	$s1,%r1
+	lrvgr	$i2,$s1			# flip byte order
+	lrvgr	$i3,$s3
+	stmg	$i2,$i3,$tweak($sp)	# save the 1st tweak
+	j	.Lxts_dec_2ndtweak
+
+.align	16
+.Lxts_dec_short:
+	llgc	$len,`2*$SIZE_T-1`($sp)
+	nill	$len,0x0f		# $len%16
+	lrvg	$s1,$tweak+0($sp)	# load the tweak in little-endian
+	lrvg	$s3,$tweak+8($sp)
+.Lxts_dec_2ndtweak:
+	lghi	%r1,0x87
+	srag	%r0,$s3,63		# broadcast upper bit
+	ngr	%r1,%r0			# rem
+	algr	$s1,$s1
+	alcgr	$s3,$s3
+	xgr	$s1,%r1
+	lrvgr	$s1,$s1			# flip byte order
+	lrvgr	$s3,$s3
+	srlg	$s0,$s1,32		# smash the tweak to 4x32-bits
+	stg	$s1,$tweak-16+0($sp)	# save the 2nd tweak
+	llgfr	$s1,$s1
+	srlg	$s2,$s3,32
+	stg	$s3,$tweak-16+8($sp)
+	llgfr	$s3,$s3
+
+	x	$s0,0($inp)		# tweak_the_2nd^=*(inp)
+	x	$s1,4($inp)
+	x	$s2,8($inp)
+	x	$s3,12($inp)
+	stm${g}	%r2,%r3,2*$SIZE_T($sp)
+	la	$key,0($key1)
+	bras	$ra,_s390x_AES_decrypt
+	lm${g}	%r2,%r5,2*$SIZE_T($sp)
+	x	$s0,$tweak-16+0($sp)	# ^=tweak_the_2nd
+	x	$s1,$tweak-16+4($sp)
+	x	$s2,$tweak-16+8($sp)
+	x	$s3,$tweak-16+12($sp)
+	st	$s0,0($out,$inp)
+	st	$s1,4($out,$inp)
+	st	$s2,8($out,$inp)
+	st	$s3,12($out,$inp)
+
+	la	$i3,0($out,$inp)	# put aside real $out
+.Lxts_dec_steal:
+	llgc	%r0,16($inp)
+	llgc	%r1,0($out,$inp)
+	stc	%r0,0($out,$inp)
+	stc	%r1,16($out,$inp)
+	la	$inp,1($inp)
+	brct	$len,.Lxts_dec_steal
+	la	$out,0($i3)		# restore real $out
+
+	lm	$s0,$s3,$tweak($sp)	# load the 1st tweak
+	x	$s0,0($out)		# tweak^=*(inp)|stolen cipher-text
+	x	$s1,4($out)
+	x	$s2,8($out)
+	x	$s3,12($out)
+	st${g}	$out,4*$SIZE_T($sp)
+	la	$key,0($key1)
+	bras	$ra,_s390x_AES_decrypt
+	l${g}	$out,4*$SIZE_T($sp)
+	x	$s0,$tweak+0($sp)	# ^=tweak
+	x	$s1,$tweak+4($sp)
+	x	$s2,$tweak+8($sp)
+	x	$s3,$tweak+12($sp)
+	st	$s0,0($out)
+	st	$s1,4($out)
+	st	$s2,8($out)
+	st	$s3,12($out)
+	stg	$sp,$tweak-16+0($sp)	# wipe 2nd tweak
+	stg	$sp,$tweak-16+8($sp)
+.Lxts_dec_done:
+	stg	$sp,$tweak+0($sp)	# wipe tweak
+	stg	$sp,$tweak+8($sp)
+	lm${g}	%r6,$ra,6*$SIZE_T($sp)
+	br	$ra
+.size	AES_xts_decrypt,.-AES_xts_decrypt
+___
+}
+$code.=<<___;
+.string	"AES for s390x, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+$code =~ s/\`([^\`]*)\`/eval $1/gem;
+print $code;
+close STDOUT or die "error closing STDOUT: $!";	# force flush
diff --git a/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aes-sparcv9.pl b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aes-sparcv9.pl
new file mode 100755
index 0000000..1b37a92
--- /dev/null
+++ b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aes-sparcv9.pl
@@ -0,0 +1,1192 @@
+#! /usr/bin/env perl
+# Copyright 2005-2020 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License").  You may not use
+# this file except in compliance with the License.  You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. Rights for redistribution and usage in source and binary
+# forms are granted according to the OpenSSL license.
+# ====================================================================
+#
+# Version 1.1
+#
+# The major reason for undertaken effort was to mitigate the hazard of
+# cache-timing attack. This is [currently and initially!] addressed in
+# two ways. 1. S-boxes are compressed from 5KB to 2KB+256B size each.
+# 2. References to them are scheduled for L2 cache latency, meaning
+# that the tables don't have to reside in L1 cache. Once again, this
+# is an initial draft and one should expect more countermeasures to
+# be implemented...
+#
+# Version 1.1 prefetches T[ed]4 in order to mitigate attack on last
+# round.
+#
+# Even though performance was not the primary goal [on the contrary,
+# extra shifts "induced" by compressed S-box and longer loop epilogue
+# "induced" by scheduling for L2 have negative effect on performance],
+# the code turned out to run in ~23 cycles per processed byte en-/
+# decrypted with 128-bit key. This is pretty good result for code
+# with mentioned qualities and UltraSPARC core. Compared to Sun C
+# generated code my encrypt procedure runs just few percents faster,
+# while decrypt one - whole 50% faster [yes, Sun C failed to generate
+# optimal decrypt procedure]. Compared to GNU C generated code both
+# procedures are more than 60% faster:-)
+
+$output = pop;
+open STDOUT,">$output";
+
+$frame="STACK_FRAME";
+$bias="STACK_BIAS";
+$locals=16;
+
+$acc0="%l0";
+$acc1="%o0";
+$acc2="%o1";
+$acc3="%o2";
+
+$acc4="%l1";
+$acc5="%o3";
+$acc6="%o4";
+$acc7="%o5";
+
+$acc8="%l2";
+$acc9="%o7";
+$acc10="%g1";
+$acc11="%g2";
+
+$acc12="%l3";
+$acc13="%g3";
+$acc14="%g4";
+$acc15="%g5";
+
+$t0="%l4";
+$t1="%l5";
+$t2="%l6";
+$t3="%l7";
+
+$s0="%i0";
+$s1="%i1";
+$s2="%i2";
+$s3="%i3";
+$tbl="%i4";
+$key="%i5";
+$rounds="%i7";	# aliases with return address, which is off-loaded to stack
+
+sub _data_word()
+{ my $i;
+    while(defined($i=shift)) { $code.=sprintf"\t.long\t0x%08x,0x%08x\n",$i,$i; }
+}
+
+$code.=<<___;
+#include "sparc_arch.h"
+
+#ifdef  __arch64__
+.register	%g2,#scratch
+.register	%g3,#scratch
+#endif
+.section	".text",#alloc,#execinstr
+
+.align	256
+AES_Te:
+___
+&_data_word(
+	0xc66363a5, 0xf87c7c84, 0xee777799, 0xf67b7b8d,
+	0xfff2f20d, 0xd66b6bbd, 0xde6f6fb1, 0x91c5c554,
+	0x60303050, 0x02010103, 0xce6767a9, 0x562b2b7d,
+	0xe7fefe19, 0xb5d7d762, 0x4dababe6, 0xec76769a,
+	0x8fcaca45, 0x1f82829d, 0x89c9c940, 0xfa7d7d87,
+	0xeffafa15, 0xb25959eb, 0x8e4747c9, 0xfbf0f00b,
+	0x41adadec, 0xb3d4d467, 0x5fa2a2fd, 0x45afafea,
+	0x239c9cbf, 0x53a4a4f7, 0xe4727296, 0x9bc0c05b,
+	0x75b7b7c2, 0xe1fdfd1c, 0x3d9393ae, 0x4c26266a,
+	0x6c36365a, 0x7e3f3f41, 0xf5f7f702, 0x83cccc4f,
+	0x6834345c, 0x51a5a5f4, 0xd1e5e534, 0xf9f1f108,
+	0xe2717193, 0xabd8d873, 0x62313153, 0x2a15153f,
+	0x0804040c, 0x95c7c752, 0x46232365, 0x9dc3c35e,
+	0x30181828, 0x379696a1, 0x0a05050f, 0x2f9a9ab5,
+	0x0e070709, 0x24121236, 0x1b80809b, 0xdfe2e23d,
+	0xcdebeb26, 0x4e272769, 0x7fb2b2cd, 0xea75759f,
+	0x1209091b, 0x1d83839e, 0x582c2c74, 0x341a1a2e,
+	0x361b1b2d, 0xdc6e6eb2, 0xb45a5aee, 0x5ba0a0fb,
+	0xa45252f6, 0x763b3b4d, 0xb7d6d661, 0x7db3b3ce,
+	0x5229297b, 0xdde3e33e, 0x5e2f2f71, 0x13848497,
+	0xa65353f5, 0xb9d1d168, 0x00000000, 0xc1eded2c,
+	0x40202060, 0xe3fcfc1f, 0x79b1b1c8, 0xb65b5bed,
+	0xd46a6abe, 0x8dcbcb46, 0x67bebed9, 0x7239394b,
+	0x944a4ade, 0x984c4cd4, 0xb05858e8, 0x85cfcf4a,
+	0xbbd0d06b, 0xc5efef2a, 0x4faaaae5, 0xedfbfb16,
+	0x864343c5, 0x9a4d4dd7, 0x66333355, 0x11858594,
+	0x8a4545cf, 0xe9f9f910, 0x04020206, 0xfe7f7f81,
+	0xa05050f0, 0x783c3c44, 0x259f9fba, 0x4ba8a8e3,
+	0xa25151f3, 0x5da3a3fe, 0x804040c0, 0x058f8f8a,
+	0x3f9292ad, 0x219d9dbc, 0x70383848, 0xf1f5f504,
+	0x63bcbcdf, 0x77b6b6c1, 0xafdada75, 0x42212163,
+	0x20101030, 0xe5ffff1a, 0xfdf3f30e, 0xbfd2d26d,
+	0x81cdcd4c, 0x180c0c14, 0x26131335, 0xc3ecec2f,
+	0xbe5f5fe1, 0x359797a2, 0x884444cc, 0x2e171739,
+	0x93c4c457, 0x55a7a7f2, 0xfc7e7e82, 0x7a3d3d47,
+	0xc86464ac, 0xba5d5de7, 0x3219192b, 0xe6737395,
+	0xc06060a0, 0x19818198, 0x9e4f4fd1, 0xa3dcdc7f,
+	0x44222266, 0x542a2a7e, 0x3b9090ab, 0x0b888883,
+	0x8c4646ca, 0xc7eeee29, 0x6bb8b8d3, 0x2814143c,
+	0xa7dede79, 0xbc5e5ee2, 0x160b0b1d, 0xaddbdb76,
+	0xdbe0e03b, 0x64323256, 0x743a3a4e, 0x140a0a1e,
+	0x924949db, 0x0c06060a, 0x4824246c, 0xb85c5ce4,
+	0x9fc2c25d, 0xbdd3d36e, 0x43acacef, 0xc46262a6,
+	0x399191a8, 0x319595a4, 0xd3e4e437, 0xf279798b,
+	0xd5e7e732, 0x8bc8c843, 0x6e373759, 0xda6d6db7,
+	0x018d8d8c, 0xb1d5d564, 0x9c4e4ed2, 0x49a9a9e0,
+	0xd86c6cb4, 0xac5656fa, 0xf3f4f407, 0xcfeaea25,
+	0xca6565af, 0xf47a7a8e, 0x47aeaee9, 0x10080818,
+	0x6fbabad5, 0xf0787888, 0x4a25256f, 0x5c2e2e72,
+	0x381c1c24, 0x57a6a6f1, 0x73b4b4c7, 0x97c6c651,
+	0xcbe8e823, 0xa1dddd7c, 0xe874749c, 0x3e1f1f21,
+	0x964b4bdd, 0x61bdbddc, 0x0d8b8b86, 0x0f8a8a85,
+	0xe0707090, 0x7c3e3e42, 0x71b5b5c4, 0xcc6666aa,
+	0x904848d8, 0x06030305, 0xf7f6f601, 0x1c0e0e12,
+	0xc26161a3, 0x6a35355f, 0xae5757f9, 0x69b9b9d0,
+	0x17868691, 0x99c1c158, 0x3a1d1d27, 0x279e9eb9,
+	0xd9e1e138, 0xebf8f813, 0x2b9898b3, 0x22111133,
+	0xd26969bb, 0xa9d9d970, 0x078e8e89, 0x339494a7,
+	0x2d9b9bb6, 0x3c1e1e22, 0x15878792, 0xc9e9e920,
+	0x87cece49, 0xaa5555ff, 0x50282878, 0xa5dfdf7a,
+	0x038c8c8f, 0x59a1a1f8, 0x09898980, 0x1a0d0d17,
+	0x65bfbfda, 0xd7e6e631, 0x844242c6, 0xd06868b8,
+	0x824141c3, 0x299999b0, 0x5a2d2d77, 0x1e0f0f11,
+	0x7bb0b0cb, 0xa85454fc, 0x6dbbbbd6, 0x2c16163a);
+$code.=<<___;
+	.byte	0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
+	.byte	0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
+	.byte	0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
+	.byte	0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
+	.byte	0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
+	.byte	0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
+	.byte	0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
+	.byte	0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
+	.byte	0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
+	.byte	0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
+	.byte	0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
+	.byte	0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
+	.byte	0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
+	.byte	0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
+	.byte	0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
+	.byte	0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
+	.byte	0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
+	.byte	0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
+	.byte	0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
+	.byte	0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
+	.byte	0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
+	.byte	0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
+	.byte	0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
+	.byte	0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
+	.byte	0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
+	.byte	0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
+	.byte	0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
+	.byte	0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
+	.byte	0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
+	.byte	0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
+	.byte	0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
+	.byte	0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
+.type	AES_Te,#object
+.size	AES_Te,(.-AES_Te)
+
+.align	64
+.skip	16
+_sparcv9_AES_encrypt:
+	save	%sp,-$frame-$locals,%sp
+	stx	%i7,[%sp+$bias+$frame+0]	! off-load return address
+	ld	[$key+240],$rounds
+	ld	[$key+0],$t0
+	ld	[$key+4],$t1			!
+	ld	[$key+8],$t2
+	srl	$rounds,1,$rounds
+	xor	$t0,$s0,$s0
+	ld	[$key+12],$t3
+	srl	$s0,21,$acc0
+	xor	$t1,$s1,$s1
+	ld	[$key+16],$t0
+	srl	$s1,13,$acc1			!
+	xor	$t2,$s2,$s2
+	ld	[$key+20],$t1
+	xor	$t3,$s3,$s3
+	ld	[$key+24],$t2
+	and	$acc0,2040,$acc0
+	ld	[$key+28],$t3
+	nop
+.Lenc_loop:
+	srl	$s2,5,$acc2			!
+	and	$acc1,2040,$acc1
+	ldx	[$tbl+$acc0],$acc0
+	sll	$s3,3,$acc3
+	and	$acc2,2040,$acc2
+	ldx	[$tbl+$acc1],$acc1
+	srl	$s1,21,$acc4
+	and	$acc3,2040,$acc3
+	ldx	[$tbl+$acc2],$acc2		!
+	srl	$s2,13,$acc5
+	and	$acc4,2040,$acc4
+	ldx	[$tbl+$acc3],$acc3
+	srl	$s3,5,$acc6
+	and	$acc5,2040,$acc5
+	ldx	[$tbl+$acc4],$acc4
+	fmovs	%f0,%f0
+	sll	$s0,3,$acc7			!
+	and	$acc6,2040,$acc6
+	ldx	[$tbl+$acc5],$acc5
+	srl	$s2,21,$acc8
+	and	$acc7,2040,$acc7
+	ldx	[$tbl+$acc6],$acc6
+	srl	$s3,13,$acc9
+	and	$acc8,2040,$acc8
+	ldx	[$tbl+$acc7],$acc7		!
+	srl	$s0,5,$acc10
+	and	$acc9,2040,$acc9
+	ldx	[$tbl+$acc8],$acc8
+	sll	$s1,3,$acc11
+	and	$acc10,2040,$acc10
+	ldx	[$tbl+$acc9],$acc9
+	fmovs	%f0,%f0
+	srl	$s3,21,$acc12			!
+	and	$acc11,2040,$acc11
+	ldx	[$tbl+$acc10],$acc10
+	srl	$s0,13,$acc13
+	and	$acc12,2040,$acc12
+	ldx	[$tbl+$acc11],$acc11
+	srl	$s1,5,$acc14
+	and	$acc13,2040,$acc13
+	ldx	[$tbl+$acc12],$acc12		!
+	sll	$s2,3,$acc15
+	and	$acc14,2040,$acc14
+	ldx	[$tbl+$acc13],$acc13
+	and	$acc15,2040,$acc15
+	add	$key,32,$key
+	ldx	[$tbl+$acc14],$acc14
+	fmovs	%f0,%f0
+	subcc	$rounds,1,$rounds		!
+	ldx	[$tbl+$acc15],$acc15
+	bz,a,pn	%icc,.Lenc_last
+	add	$tbl,2048,$rounds
+
+		srlx	$acc1,8,$acc1
+		xor	$acc0,$t0,$t0
+	ld	[$key+0],$s0
+	fmovs	%f0,%f0
+		srlx	$acc2,16,$acc2		!
+		xor	$acc1,$t0,$t0
+	ld	[$key+4],$s1
+		srlx	$acc3,24,$acc3
+		xor	$acc2,$t0,$t0
+	ld	[$key+8],$s2
+		srlx	$acc5,8,$acc5
+		xor	$acc3,$t0,$t0
+	ld	[$key+12],$s3			!
+		srlx	$acc6,16,$acc6
+		xor	$acc4,$t1,$t1
+	fmovs	%f0,%f0
+		srlx	$acc7,24,$acc7
+		xor	$acc5,$t1,$t1
+		srlx	$acc9,8,$acc9
+		xor	$acc6,$t1,$t1
+		srlx	$acc10,16,$acc10	!
+		xor	$acc7,$t1,$t1
+		srlx	$acc11,24,$acc11
+		xor	$acc8,$t2,$t2
+		srlx	$acc13,8,$acc13
+		xor	$acc9,$t2,$t2
+		srlx	$acc14,16,$acc14
+		xor	$acc10,$t2,$t2
+		srlx	$acc15,24,$acc15	!
+		xor	$acc11,$t2,$t2
+		xor	$acc12,$acc14,$acc14
+		xor	$acc13,$t3,$t3
+	srl	$t0,21,$acc0
+		xor	$acc14,$t3,$t3
+	srl	$t1,13,$acc1
+		xor	$acc15,$t3,$t3
+
+	and	$acc0,2040,$acc0		!
+	srl	$t2,5,$acc2
+	and	$acc1,2040,$acc1
+	ldx	[$tbl+$acc0],$acc0
+	sll	$t3,3,$acc3
+	and	$acc2,2040,$acc2
+	ldx	[$tbl+$acc1],$acc1
+	fmovs	%f0,%f0
+	srl	$t1,21,$acc4			!
+	and	$acc3,2040,$acc3
+	ldx	[$tbl+$acc2],$acc2
+	srl	$t2,13,$acc5
+	and	$acc4,2040,$acc4
+	ldx	[$tbl+$acc3],$acc3
+	srl	$t3,5,$acc6
+	and	$acc5,2040,$acc5
+	ldx	[$tbl+$acc4],$acc4		!
+	sll	$t0,3,$acc7
+	and	$acc6,2040,$acc6
+	ldx	[$tbl+$acc5],$acc5
+	srl	$t2,21,$acc8
+	and	$acc7,2040,$acc7
+	ldx	[$tbl+$acc6],$acc6
+	fmovs	%f0,%f0
+	srl	$t3,13,$acc9			!
+	and	$acc8,2040,$acc8
+	ldx	[$tbl+$acc7],$acc7
+	srl	$t0,5,$acc10
+	and	$acc9,2040,$acc9
+	ldx	[$tbl+$acc8],$acc8
+	sll	$t1,3,$acc11
+	and	$acc10,2040,$acc10
+	ldx	[$tbl+$acc9],$acc9		!
+	srl	$t3,21,$acc12
+	and	$acc11,2040,$acc11
+	ldx	[$tbl+$acc10],$acc10
+	srl	$t0,13,$acc13
+	and	$acc12,2040,$acc12
+	ldx	[$tbl+$acc11],$acc11
+	fmovs	%f0,%f0
+	srl	$t1,5,$acc14			!
+	and	$acc13,2040,$acc13
+	ldx	[$tbl+$acc12],$acc12
+	sll	$t2,3,$acc15
+	and	$acc14,2040,$acc14
+	ldx	[$tbl+$acc13],$acc13
+		srlx	$acc1,8,$acc1
+	and	$acc15,2040,$acc15
+	ldx	[$tbl+$acc14],$acc14		!
+
+		srlx	$acc2,16,$acc2
+		xor	$acc0,$s0,$s0
+	ldx	[$tbl+$acc15],$acc15
+		srlx	$acc3,24,$acc3
+		xor	$acc1,$s0,$s0
+	ld	[$key+16],$t0
+	fmovs	%f0,%f0
+		srlx	$acc5,8,$acc5		!
+		xor	$acc2,$s0,$s0
+	ld	[$key+20],$t1
+		srlx	$acc6,16,$acc6
+		xor	$acc3,$s0,$s0
+	ld	[$key+24],$t2
+		srlx	$acc7,24,$acc7
+		xor	$acc4,$s1,$s1
+	ld	[$key+28],$t3			!
+		srlx	$acc9,8,$acc9
+		xor	$acc5,$s1,$s1
+	ldx	[$tbl+2048+0],%g0		! prefetch te4
+		srlx	$acc10,16,$acc10
+		xor	$acc6,$s1,$s1
+	ldx	[$tbl+2048+32],%g0		! prefetch te4
+		srlx	$acc11,24,$acc11
+		xor	$acc7,$s1,$s1
+	ldx	[$tbl+2048+64],%g0		! prefetch te4
+		srlx	$acc13,8,$acc13
+		xor	$acc8,$s2,$s2
+	ldx	[$tbl+2048+96],%g0		! prefetch te4
+		srlx	$acc14,16,$acc14	!
+		xor	$acc9,$s2,$s2
+	ldx	[$tbl+2048+128],%g0		! prefetch te4
+		srlx	$acc15,24,$acc15
+		xor	$acc10,$s2,$s2
+	ldx	[$tbl+2048+160],%g0		! prefetch te4
+	srl	$s0,21,$acc0
+		xor	$acc11,$s2,$s2
+	ldx	[$tbl+2048+192],%g0		! prefetch te4
+		xor	$acc12,$acc14,$acc14
+		xor	$acc13,$s3,$s3
+	ldx	[$tbl+2048+224],%g0		! prefetch te4
+	srl	$s1,13,$acc1			!
+		xor	$acc14,$s3,$s3
+		xor	$acc15,$s3,$s3
+	ba	.Lenc_loop
+	and	$acc0,2040,$acc0
+
+.align	32
+.Lenc_last:
+		srlx	$acc1,8,$acc1		!
+		xor	$acc0,$t0,$t0
+	ld	[$key+0],$s0
+		srlx	$acc2,16,$acc2
+		xor	$acc1,$t0,$t0
+	ld	[$key+4],$s1
+		srlx	$acc3,24,$acc3
+		xor	$acc2,$t0,$t0
+	ld	[$key+8],$s2			!
+		srlx	$acc5,8,$acc5
+		xor	$acc3,$t0,$t0
+	ld	[$key+12],$s3
+		srlx	$acc6,16,$acc6
+		xor	$acc4,$t1,$t1
+		srlx	$acc7,24,$acc7
+		xor	$acc5,$t1,$t1
+		srlx	$acc9,8,$acc9		!
+		xor	$acc6,$t1,$t1
+		srlx	$acc10,16,$acc10
+		xor	$acc7,$t1,$t1
+		srlx	$acc11,24,$acc11
+		xor	$acc8,$t2,$t2
+		srlx	$acc13,8,$acc13
+		xor	$acc9,$t2,$t2
+		srlx	$acc14,16,$acc14	!
+		xor	$acc10,$t2,$t2
+		srlx	$acc15,24,$acc15
+		xor	$acc11,$t2,$t2
+		xor	$acc12,$acc14,$acc14
+		xor	$acc13,$t3,$t3
+	srl	$t0,24,$acc0
+		xor	$acc14,$t3,$t3
+	srl	$t1,16,$acc1			!
+		xor	$acc15,$t3,$t3
+
+	srl	$t2,8,$acc2
+	and	$acc1,255,$acc1
+	ldub	[$rounds+$acc0],$acc0
+	srl	$t1,24,$acc4
+	and	$acc2,255,$acc2
+	ldub	[$rounds+$acc1],$acc1
+	srl	$t2,16,$acc5			!
+	and	$t3,255,$acc3
+	ldub	[$rounds+$acc2],$acc2
+	ldub	[$rounds+$acc3],$acc3
+	srl	$t3,8,$acc6
+	and	$acc5,255,$acc5
+	ldub	[$rounds+$acc4],$acc4
+	fmovs	%f0,%f0
+	srl	$t2,24,$acc8			!
+	and	$acc6,255,$acc6
+	ldub	[$rounds+$acc5],$acc5
+	srl	$t3,16,$acc9
+	and	$t0,255,$acc7
+	ldub	[$rounds+$acc6],$acc6
+	ldub	[$rounds+$acc7],$acc7
+	fmovs	%f0,%f0
+	srl	$t0,8,$acc10			!
+	and	$acc9,255,$acc9
+	ldub	[$rounds+$acc8],$acc8
+	srl	$t3,24,$acc12
+	and	$acc10,255,$acc10
+	ldub	[$rounds+$acc9],$acc9
+	srl	$t0,16,$acc13
+	and	$t1,255,$acc11
+	ldub	[$rounds+$acc10],$acc10		!
+	srl	$t1,8,$acc14
+	and	$acc13,255,$acc13
+	ldub	[$rounds+$acc11],$acc11
+	ldub	[$rounds+$acc12],$acc12
+	and	$acc14,255,$acc14
+	ldub	[$rounds+$acc13],$acc13
+	and	$t2,255,$acc15
+	ldub	[$rounds+$acc14],$acc14		!
+
+		sll	$acc0,24,$acc0
+		xor	$acc3,$s0,$s0
+	ldub	[$rounds+$acc15],$acc15
+		sll	$acc1,16,$acc1
+		xor	$acc0,$s0,$s0
+	ldx	[%sp+$bias+$frame+0],%i7	! restore return address
+	fmovs	%f0,%f0
+		sll	$acc2,8,$acc2		!
+		xor	$acc1,$s0,$s0
+		sll	$acc4,24,$acc4
+		xor	$acc2,$s0,$s0
+		sll	$acc5,16,$acc5
+		xor	$acc7,$s1,$s1
+		sll	$acc6,8,$acc6
+		xor	$acc4,$s1,$s1
+		sll	$acc8,24,$acc8		!
+		xor	$acc5,$s1,$s1
+		sll	$acc9,16,$acc9
+		xor	$acc11,$s2,$s2
+		sll	$acc10,8,$acc10
+		xor	$acc6,$s1,$s1
+		sll	$acc12,24,$acc12
+		xor	$acc8,$s2,$s2
+		sll	$acc13,16,$acc13	!
+		xor	$acc9,$s2,$s2
+		sll	$acc14,8,$acc14
+		xor	$acc10,$s2,$s2
+		xor	$acc12,$acc14,$acc14
+		xor	$acc13,$s3,$s3
+		xor	$acc14,$s3,$s3
+		xor	$acc15,$s3,$s3
+
+	ret
+	restore
+.type	_sparcv9_AES_encrypt,#function
+.size	_sparcv9_AES_encrypt,(.-_sparcv9_AES_encrypt)
+
+.align	32
+.globl	AES_encrypt
+AES_encrypt:
+	or	%o0,%o1,%g1
+	andcc	%g1,3,%g0
+	bnz,pn	%xcc,.Lunaligned_enc
+	save	%sp,-$frame,%sp
+
+	ld	[%i0+0],%o0
+	ld	[%i0+4],%o1
+	ld	[%i0+8],%o2
+	ld	[%i0+12],%o3
+
+1:	call	.+8
+	add	%o7,AES_Te-1b,%o4
+	call	_sparcv9_AES_encrypt
+	mov	%i2,%o5
+
+	st	%o0,[%i1+0]
+	st	%o1,[%i1+4]
+	st	%o2,[%i1+8]
+	st	%o3,[%i1+12]
+
+	ret
+	restore
+
+.align	32
+.Lunaligned_enc:
+	ldub	[%i0+0],%l0
+	ldub	[%i0+1],%l1
+	ldub	[%i0+2],%l2
+
+	sll	%l0,24,%l0
+	ldub	[%i0+3],%l3
+	sll	%l1,16,%l1
+	ldub	[%i0+4],%l4
+	sll	%l2,8,%l2
+	or	%l1,%l0,%l0
+	ldub	[%i0+5],%l5
+	sll	%l4,24,%l4
+	or	%l3,%l2,%l2
+	ldub	[%i0+6],%l6
+	sll	%l5,16,%l5
+	or	%l0,%l2,%o0
+	ldub	[%i0+7],%l7
+
+	sll	%l6,8,%l6
+	or	%l5,%l4,%l4
+	ldub	[%i0+8],%l0
+	or	%l7,%l6,%l6
+	ldub	[%i0+9],%l1
+	or	%l4,%l6,%o1
+	ldub	[%i0+10],%l2
+
+	sll	%l0,24,%l0
+	ldub	[%i0+11],%l3
+	sll	%l1,16,%l1
+	ldub	[%i0+12],%l4
+	sll	%l2,8,%l2
+	or	%l1,%l0,%l0
+	ldub	[%i0+13],%l5
+	sll	%l4,24,%l4
+	or	%l3,%l2,%l2
+	ldub	[%i0+14],%l6
+	sll	%l5,16,%l5
+	or	%l0,%l2,%o2
+	ldub	[%i0+15],%l7
+
+	sll	%l6,8,%l6
+	or	%l5,%l4,%l4
+	or	%l7,%l6,%l6
+	or	%l4,%l6,%o3
+
+1:	call	.+8
+	add	%o7,AES_Te-1b,%o4
+	call	_sparcv9_AES_encrypt
+	mov	%i2,%o5
+
+	srl	%o0,24,%l0
+	srl	%o0,16,%l1
+	stb	%l0,[%i1+0]
+	srl	%o0,8,%l2
+	stb	%l1,[%i1+1]
+	stb	%l2,[%i1+2]
+	srl	%o1,24,%l4
+	stb	%o0,[%i1+3]
+
+	srl	%o1,16,%l5
+	stb	%l4,[%i1+4]
+	srl	%o1,8,%l6
+	stb	%l5,[%i1+5]
+	stb	%l6,[%i1+6]
+	srl	%o2,24,%l0
+	stb	%o1,[%i1+7]
+
+	srl	%o2,16,%l1
+	stb	%l0,[%i1+8]
+	srl	%o2,8,%l2
+	stb	%l1,[%i1+9]
+	stb	%l2,[%i1+10]
+	srl	%o3,24,%l4
+	stb	%o2,[%i1+11]
+
+	srl	%o3,16,%l5
+	stb	%l4,[%i1+12]
+	srl	%o3,8,%l6
+	stb	%l5,[%i1+13]
+	stb	%l6,[%i1+14]
+	stb	%o3,[%i1+15]
+
+	ret
+	restore
+.type	AES_encrypt,#function
+.size	AES_encrypt,(.-AES_encrypt)
+
+___
+
+$code.=<<___;
+.align	256
+AES_Td:
+___
+&_data_word(
+	0x51f4a750, 0x7e416553, 0x1a17a4c3, 0x3a275e96,
+	0x3bab6bcb, 0x1f9d45f1, 0xacfa58ab, 0x4be30393,
+	0x2030fa55, 0xad766df6, 0x88cc7691, 0xf5024c25,
+	0x4fe5d7fc, 0xc52acbd7, 0x26354480, 0xb562a38f,
+	0xdeb15a49, 0x25ba1b67, 0x45ea0e98, 0x5dfec0e1,
+	0xc32f7502, 0x814cf012, 0x8d4697a3, 0x6bd3f9c6,
+	0x038f5fe7, 0x15929c95, 0xbf6d7aeb, 0x955259da,
+	0xd4be832d, 0x587421d3, 0x49e06929, 0x8ec9c844,
+	0x75c2896a, 0xf48e7978, 0x99583e6b, 0x27b971dd,
+	0xbee14fb6, 0xf088ad17, 0xc920ac66, 0x7dce3ab4,
+	0x63df4a18, 0xe51a3182, 0x97513360, 0x62537f45,
+	0xb16477e0, 0xbb6bae84, 0xfe81a01c, 0xf9082b94,
+	0x70486858, 0x8f45fd19, 0x94de6c87, 0x527bf8b7,
+	0xab73d323, 0x724b02e2, 0xe31f8f57, 0x6655ab2a,
+	0xb2eb2807, 0x2fb5c203, 0x86c57b9a, 0xd33708a5,
+	0x302887f2, 0x23bfa5b2, 0x02036aba, 0xed16825c,
+	0x8acf1c2b, 0xa779b492, 0xf307f2f0, 0x4e69e2a1,
+	0x65daf4cd, 0x0605bed5, 0xd134621f, 0xc4a6fe8a,
+	0x342e539d, 0xa2f355a0, 0x058ae132, 0xa4f6eb75,
+	0x0b83ec39, 0x4060efaa, 0x5e719f06, 0xbd6e1051,
+	0x3e218af9, 0x96dd063d, 0xdd3e05ae, 0x4de6bd46,
+	0x91548db5, 0x71c45d05, 0x0406d46f, 0x605015ff,
+	0x1998fb24, 0xd6bde997, 0x894043cc, 0x67d99e77,
+	0xb0e842bd, 0x07898b88, 0xe7195b38, 0x79c8eedb,
+	0xa17c0a47, 0x7c420fe9, 0xf8841ec9, 0x00000000,
+	0x09808683, 0x322bed48, 0x1e1170ac, 0x6c5a724e,
+	0xfd0efffb, 0x0f853856, 0x3daed51e, 0x362d3927,
+	0x0a0fd964, 0x685ca621, 0x9b5b54d1, 0x24362e3a,
+	0x0c0a67b1, 0x9357e70f, 0xb4ee96d2, 0x1b9b919e,
+	0x80c0c54f, 0x61dc20a2, 0x5a774b69, 0x1c121a16,
+	0xe293ba0a, 0xc0a02ae5, 0x3c22e043, 0x121b171d,
+	0x0e090d0b, 0xf28bc7ad, 0x2db6a8b9, 0x141ea9c8,
+	0x57f11985, 0xaf75074c, 0xee99ddbb, 0xa37f60fd,
+	0xf701269f, 0x5c72f5bc, 0x44663bc5, 0x5bfb7e34,
+	0x8b432976, 0xcb23c6dc, 0xb6edfc68, 0xb8e4f163,
+	0xd731dcca, 0x42638510, 0x13972240, 0x84c61120,
+	0x854a247d, 0xd2bb3df8, 0xaef93211, 0xc729a16d,
+	0x1d9e2f4b, 0xdcb230f3, 0x0d8652ec, 0x77c1e3d0,
+	0x2bb3166c, 0xa970b999, 0x119448fa, 0x47e96422,
+	0xa8fc8cc4, 0xa0f03f1a, 0x567d2cd8, 0x223390ef,
+	0x87494ec7, 0xd938d1c1, 0x8ccaa2fe, 0x98d40b36,
+	0xa6f581cf, 0xa57ade28, 0xdab78e26, 0x3fadbfa4,
+	0x2c3a9de4, 0x5078920d, 0x6a5fcc9b, 0x547e4662,
+	0xf68d13c2, 0x90d8b8e8, 0x2e39f75e, 0x82c3aff5,
+	0x9f5d80be, 0x69d0937c, 0x6fd52da9, 0xcf2512b3,
+	0xc8ac993b, 0x10187da7, 0xe89c636e, 0xdb3bbb7b,
+	0xcd267809, 0x6e5918f4, 0xec9ab701, 0x834f9aa8,
+	0xe6956e65, 0xaaffe67e, 0x21bccf08, 0xef15e8e6,
+	0xbae79bd9, 0x4a6f36ce, 0xea9f09d4, 0x29b07cd6,
+	0x31a4b2af, 0x2a3f2331, 0xc6a59430, 0x35a266c0,
+	0x744ebc37, 0xfc82caa6, 0xe090d0b0, 0x33a7d815,
+	0xf104984a, 0x41ecdaf7, 0x7fcd500e, 0x1791f62f,
+	0x764dd68d, 0x43efb04d, 0xccaa4d54, 0xe49604df,
+	0x9ed1b5e3, 0x4c6a881b, 0xc12c1fb8, 0x4665517f,
+	0x9d5eea04, 0x018c355d, 0xfa877473, 0xfb0b412e,
+	0xb3671d5a, 0x92dbd252, 0xe9105633, 0x6dd64713,
+	0x9ad7618c, 0x37a10c7a, 0x59f8148e, 0xeb133c89,
+	0xcea927ee, 0xb761c935, 0xe11ce5ed, 0x7a47b13c,
+	0x9cd2df59, 0x55f2733f, 0x1814ce79, 0x73c737bf,
+	0x53f7cdea, 0x5ffdaa5b, 0xdf3d6f14, 0x7844db86,
+	0xcaaff381, 0xb968c43e, 0x3824342c, 0xc2a3405f,
+	0x161dc372, 0xbce2250c, 0x283c498b, 0xff0d9541,
+	0x39a80171, 0x080cb3de, 0xd8b4e49c, 0x6456c190,
+	0x7bcb8461, 0xd532b670, 0x486c5c74, 0xd0b85742);
+$code.=<<___;
+	.byte	0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
+	.byte	0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
+	.byte	0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
+	.byte	0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
+	.byte	0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
+	.byte	0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
+	.byte	0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
+	.byte	0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
+	.byte	0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
+	.byte	0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
+	.byte	0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
+	.byte	0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
+	.byte	0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
+	.byte	0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
+	.byte	0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
+	.byte	0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
+	.byte	0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
+	.byte	0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
+	.byte	0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
+	.byte	0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
+	.byte	0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
+	.byte	0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
+	.byte	0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
+	.byte	0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
+	.byte	0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
+	.byte	0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
+	.byte	0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
+	.byte	0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
+	.byte	0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
+	.byte	0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
+	.byte	0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
+	.byte	0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
+.type	AES_Td,#object
+.size	AES_Td,(.-AES_Td)
+
+.align	64
+.skip	16
+_sparcv9_AES_decrypt:
+	save	%sp,-$frame-$locals,%sp
+	stx	%i7,[%sp+$bias+$frame+0]	! off-load return address
+	ld	[$key+240],$rounds
+	ld	[$key+0],$t0
+	ld	[$key+4],$t1			!
+	ld	[$key+8],$t2
+	ld	[$key+12],$t3
+	srl	$rounds,1,$rounds
+	xor	$t0,$s0,$s0
+	ld	[$key+16],$t0
+	xor	$t1,$s1,$s1
+	ld	[$key+20],$t1
+	srl	$s0,21,$acc0			!
+	xor	$t2,$s2,$s2
+	ld	[$key+24],$t2
+	xor	$t3,$s3,$s3
+	and	$acc0,2040,$acc0
+	ld	[$key+28],$t3
+	srl	$s3,13,$acc1
+	nop
+.Ldec_loop:
+	srl	$s2,5,$acc2			!
+	and	$acc1,2040,$acc1
+	ldx	[$tbl+$acc0],$acc0
+	sll	$s1,3,$acc3
+	and	$acc2,2040,$acc2
+	ldx	[$tbl+$acc1],$acc1
+	srl	$s1,21,$acc4
+	and	$acc3,2040,$acc3
+	ldx	[$tbl+$acc2],$acc2		!
+	srl	$s0,13,$acc5
+	and	$acc4,2040,$acc4
+	ldx	[$tbl+$acc3],$acc3
+	srl	$s3,5,$acc6
+	and	$acc5,2040,$acc5
+	ldx	[$tbl+$acc4],$acc4
+	fmovs	%f0,%f0
+	sll	$s2,3,$acc7			!
+	and	$acc6,2040,$acc6
+	ldx	[$tbl+$acc5],$acc5
+	srl	$s2,21,$acc8
+	and	$acc7,2040,$acc7
+	ldx	[$tbl+$acc6],$acc6
+	srl	$s1,13,$acc9
+	and	$acc8,2040,$acc8
+	ldx	[$tbl+$acc7],$acc7		!
+	srl	$s0,5,$acc10
+	and	$acc9,2040,$acc9
+	ldx	[$tbl+$acc8],$acc8
+	sll	$s3,3,$acc11
+	and	$acc10,2040,$acc10
+	ldx	[$tbl+$acc9],$acc9
+	fmovs	%f0,%f0
+	srl	$s3,21,$acc12			!
+	and	$acc11,2040,$acc11
+	ldx	[$tbl+$acc10],$acc10
+	srl	$s2,13,$acc13
+	and	$acc12,2040,$acc12
+	ldx	[$tbl+$acc11],$acc11
+	srl	$s1,5,$acc14
+	and	$acc13,2040,$acc13
+	ldx	[$tbl+$acc12],$acc12		!
+	sll	$s0,3,$acc15
+	and	$acc14,2040,$acc14
+	ldx	[$tbl+$acc13],$acc13
+	and	$acc15,2040,$acc15
+	add	$key,32,$key
+	ldx	[$tbl+$acc14],$acc14
+	fmovs	%f0,%f0
+	subcc	$rounds,1,$rounds		!
+	ldx	[$tbl+$acc15],$acc15
+	bz,a,pn	%icc,.Ldec_last
+	add	$tbl,2048,$rounds
+
+		srlx	$acc1,8,$acc1
+		xor	$acc0,$t0,$t0
+	ld	[$key+0],$s0
+	fmovs	%f0,%f0
+		srlx	$acc2,16,$acc2		!
+		xor	$acc1,$t0,$t0
+	ld	[$key+4],$s1
+		srlx	$acc3,24,$acc3
+		xor	$acc2,$t0,$t0
+	ld	[$key+8],$s2
+		srlx	$acc5,8,$acc5
+		xor	$acc3,$t0,$t0
+	ld	[$key+12],$s3			!
+		srlx	$acc6,16,$acc6
+		xor	$acc4,$t1,$t1
+	fmovs	%f0,%f0
+		srlx	$acc7,24,$acc7
+		xor	$acc5,$t1,$t1
+		srlx	$acc9,8,$acc9
+		xor	$acc6,$t1,$t1
+		srlx	$acc10,16,$acc10	!
+		xor	$acc7,$t1,$t1
+		srlx	$acc11,24,$acc11
+		xor	$acc8,$t2,$t2
+		srlx	$acc13,8,$acc13
+		xor	$acc9,$t2,$t2
+		srlx	$acc14,16,$acc14
+		xor	$acc10,$t2,$t2
+		srlx	$acc15,24,$acc15	!
+		xor	$acc11,$t2,$t2
+		xor	$acc12,$acc14,$acc14
+		xor	$acc13,$t3,$t3
+	srl	$t0,21,$acc0
+		xor	$acc14,$t3,$t3
+		xor	$acc15,$t3,$t3
+	srl	$t3,13,$acc1
+
+	and	$acc0,2040,$acc0		!
+	srl	$t2,5,$acc2
+	and	$acc1,2040,$acc1
+	ldx	[$tbl+$acc0],$acc0
+	sll	$t1,3,$acc3
+	and	$acc2,2040,$acc2
+	ldx	[$tbl+$acc1],$acc1
+	fmovs	%f0,%f0
+	srl	$t1,21,$acc4			!
+	and	$acc3,2040,$acc3
+	ldx	[$tbl+$acc2],$acc2
+	srl	$t0,13,$acc5
+	and	$acc4,2040,$acc4
+	ldx	[$tbl+$acc3],$acc3
+	srl	$t3,5,$acc6
+	and	$acc5,2040,$acc5
+	ldx	[$tbl+$acc4],$acc4		!
+	sll	$t2,3,$acc7
+	and	$acc6,2040,$acc6
+	ldx	[$tbl+$acc5],$acc5
+	srl	$t2,21,$acc8
+	and	$acc7,2040,$acc7
+	ldx	[$tbl+$acc6],$acc6
+	fmovs	%f0,%f0
+	srl	$t1,13,$acc9			!
+	and	$acc8,2040,$acc8
+	ldx	[$tbl+$acc7],$acc7
+	srl	$t0,5,$acc10
+	and	$acc9,2040,$acc9
+	ldx	[$tbl+$acc8],$acc8
+	sll	$t3,3,$acc11
+	and	$acc10,2040,$acc10
+	ldx	[$tbl+$acc9],$acc9		!
+	srl	$t3,21,$acc12
+	and	$acc11,2040,$acc11
+	ldx	[$tbl+$acc10],$acc10
+	srl	$t2,13,$acc13
+	and	$acc12,2040,$acc12
+	ldx	[$tbl+$acc11],$acc11
+	fmovs	%f0,%f0
+	srl	$t1,5,$acc14			!
+	and	$acc13,2040,$acc13
+	ldx	[$tbl+$acc12],$acc12
+	sll	$t0,3,$acc15
+	and	$acc14,2040,$acc14
+	ldx	[$tbl+$acc13],$acc13
+		srlx	$acc1,8,$acc1
+	and	$acc15,2040,$acc15
+	ldx	[$tbl+$acc14],$acc14		!
+
+		srlx	$acc2,16,$acc2
+		xor	$acc0,$s0,$s0
+	ldx	[$tbl+$acc15],$acc15
+		srlx	$acc3,24,$acc3
+		xor	$acc1,$s0,$s0
+	ld	[$key+16],$t0
+	fmovs	%f0,%f0
+		srlx	$acc5,8,$acc5		!
+		xor	$acc2,$s0,$s0
+	ld	[$key+20],$t1
+		srlx	$acc6,16,$acc6
+		xor	$acc3,$s0,$s0
+	ld	[$key+24],$t2
+		srlx	$acc7,24,$acc7
+		xor	$acc4,$s1,$s1
+	ld	[$key+28],$t3			!
+		srlx	$acc9,8,$acc9
+		xor	$acc5,$s1,$s1
+	ldx	[$tbl+2048+0],%g0		! prefetch td4
+		srlx	$acc10,16,$acc10
+		xor	$acc6,$s1,$s1
+	ldx	[$tbl+2048+32],%g0		! prefetch td4
+		srlx	$acc11,24,$acc11
+		xor	$acc7,$s1,$s1
+	ldx	[$tbl+2048+64],%g0		! prefetch td4
+		srlx	$acc13,8,$acc13
+		xor	$acc8,$s2,$s2
+	ldx	[$tbl+2048+96],%g0		! prefetch td4
+		srlx	$acc14,16,$acc14	!
+		xor	$acc9,$s2,$s2
+	ldx	[$tbl+2048+128],%g0		! prefetch td4
+		srlx	$acc15,24,$acc15
+		xor	$acc10,$s2,$s2
+	ldx	[$tbl+2048+160],%g0		! prefetch td4
+	srl	$s0,21,$acc0
+		xor	$acc11,$s2,$s2
+	ldx	[$tbl+2048+192],%g0		! prefetch td4
+		xor	$acc12,$acc14,$acc14
+		xor	$acc13,$s3,$s3
+	ldx	[$tbl+2048+224],%g0		! prefetch td4
+	and	$acc0,2040,$acc0		!
+		xor	$acc14,$s3,$s3
+		xor	$acc15,$s3,$s3
+	ba	.Ldec_loop
+	srl	$s3,13,$acc1
+
+.align	32
+.Ldec_last:
+		srlx	$acc1,8,$acc1		!
+		xor	$acc0,$t0,$t0
+	ld	[$key+0],$s0
+		srlx	$acc2,16,$acc2
+		xor	$acc1,$t0,$t0
+	ld	[$key+4],$s1
+		srlx	$acc3,24,$acc3
+		xor	$acc2,$t0,$t0
+	ld	[$key+8],$s2			!
+		srlx	$acc5,8,$acc5
+		xor	$acc3,$t0,$t0
+	ld	[$key+12],$s3
+		srlx	$acc6,16,$acc6
+		xor	$acc4,$t1,$t1
+		srlx	$acc7,24,$acc7
+		xor	$acc5,$t1,$t1
+		srlx	$acc9,8,$acc9		!
+		xor	$acc6,$t1,$t1
+		srlx	$acc10,16,$acc10
+		xor	$acc7,$t1,$t1
+		srlx	$acc11,24,$acc11
+		xor	$acc8,$t2,$t2
+		srlx	$acc13,8,$acc13
+		xor	$acc9,$t2,$t2
+		srlx	$acc14,16,$acc14	!
+		xor	$acc10,$t2,$t2
+		srlx	$acc15,24,$acc15
+		xor	$acc11,$t2,$t2
+		xor	$acc12,$acc14,$acc14
+		xor	$acc13,$t3,$t3
+	srl	$t0,24,$acc0
+		xor	$acc14,$t3,$t3
+		xor	$acc15,$t3,$t3		!
+	srl	$t3,16,$acc1
+
+	srl	$t2,8,$acc2
+	and	$acc1,255,$acc1
+	ldub	[$rounds+$acc0],$acc0
+	srl	$t1,24,$acc4
+	and	$acc2,255,$acc2
+	ldub	[$rounds+$acc1],$acc1
+	srl	$t0,16,$acc5			!
+	and	$t1,255,$acc3
+	ldub	[$rounds+$acc2],$acc2
+	ldub	[$rounds+$acc3],$acc3
+	srl	$t3,8,$acc6
+	and	$acc5,255,$acc5
+	ldub	[$rounds+$acc4],$acc4
+	fmovs	%f0,%f0
+	srl	$t2,24,$acc8			!
+	and	$acc6,255,$acc6
+	ldub	[$rounds+$acc5],$acc5
+	srl	$t1,16,$acc9
+	and	$t2,255,$acc7
+	ldub	[$rounds+$acc6],$acc6
+	ldub	[$rounds+$acc7],$acc7
+	fmovs	%f0,%f0
+	srl	$t0,8,$acc10			!
+	and	$acc9,255,$acc9
+	ldub	[$rounds+$acc8],$acc8
+	srl	$t3,24,$acc12
+	and	$acc10,255,$acc10
+	ldub	[$rounds+$acc9],$acc9
+	srl	$t2,16,$acc13
+	and	$t3,255,$acc11
+	ldub	[$rounds+$acc10],$acc10		!
+	srl	$t1,8,$acc14
+	and	$acc13,255,$acc13
+	ldub	[$rounds+$acc11],$acc11
+	ldub	[$rounds+$acc12],$acc12
+	and	$acc14,255,$acc14
+	ldub	[$rounds+$acc13],$acc13
+	and	$t0,255,$acc15
+	ldub	[$rounds+$acc14],$acc14		!
+
+		sll	$acc0,24,$acc0
+		xor	$acc3,$s0,$s0
+	ldub	[$rounds+$acc15],$acc15
+		sll	$acc1,16,$acc1
+		xor	$acc0,$s0,$s0
+	ldx	[%sp+$bias+$frame+0],%i7	! restore return address
+	fmovs	%f0,%f0
+		sll	$acc2,8,$acc2		!
+		xor	$acc1,$s0,$s0
+		sll	$acc4,24,$acc4
+		xor	$acc2,$s0,$s0
+		sll	$acc5,16,$acc5
+		xor	$acc7,$s1,$s1
+		sll	$acc6,8,$acc6
+		xor	$acc4,$s1,$s1
+		sll	$acc8,24,$acc8		!
+		xor	$acc5,$s1,$s1
+		sll	$acc9,16,$acc9
+		xor	$acc11,$s2,$s2
+		sll	$acc10,8,$acc10
+		xor	$acc6,$s1,$s1
+		sll	$acc12,24,$acc12
+		xor	$acc8,$s2,$s2
+		sll	$acc13,16,$acc13	!
+		xor	$acc9,$s2,$s2
+		sll	$acc14,8,$acc14
+		xor	$acc10,$s2,$s2
+		xor	$acc12,$acc14,$acc14
+		xor	$acc13,$s3,$s3
+		xor	$acc14,$s3,$s3
+		xor	$acc15,$s3,$s3
+
+	ret
+	restore
+.type	_sparcv9_AES_decrypt,#function
+.size	_sparcv9_AES_decrypt,(.-_sparcv9_AES_decrypt)
+
+.align	32
+.globl	AES_decrypt
+AES_decrypt:
+	or	%o0,%o1,%g1
+	andcc	%g1,3,%g0
+	bnz,pn	%xcc,.Lunaligned_dec
+	save	%sp,-$frame,%sp
+
+	ld	[%i0+0],%o0
+	ld	[%i0+4],%o1
+	ld	[%i0+8],%o2
+	ld	[%i0+12],%o3
+
+1:	call	.+8
+	add	%o7,AES_Td-1b,%o4
+	call	_sparcv9_AES_decrypt
+	mov	%i2,%o5
+
+	st	%o0,[%i1+0]
+	st	%o1,[%i1+4]
+	st	%o2,[%i1+8]
+	st	%o3,[%i1+12]
+
+	ret
+	restore
+
+.align	32
+.Lunaligned_dec:
+	ldub	[%i0+0],%l0
+	ldub	[%i0+1],%l1
+	ldub	[%i0+2],%l2
+
+	sll	%l0,24,%l0
+	ldub	[%i0+3],%l3
+	sll	%l1,16,%l1
+	ldub	[%i0+4],%l4
+	sll	%l2,8,%l2
+	or	%l1,%l0,%l0
+	ldub	[%i0+5],%l5
+	sll	%l4,24,%l4
+	or	%l3,%l2,%l2
+	ldub	[%i0+6],%l6
+	sll	%l5,16,%l5
+	or	%l0,%l2,%o0
+	ldub	[%i0+7],%l7
+
+	sll	%l6,8,%l6
+	or	%l5,%l4,%l4
+	ldub	[%i0+8],%l0
+	or	%l7,%l6,%l6
+	ldub	[%i0+9],%l1
+	or	%l4,%l6,%o1
+	ldub	[%i0+10],%l2
+
+	sll	%l0,24,%l0
+	ldub	[%i0+11],%l3
+	sll	%l1,16,%l1
+	ldub	[%i0+12],%l4
+	sll	%l2,8,%l2
+	or	%l1,%l0,%l0
+	ldub	[%i0+13],%l5
+	sll	%l4,24,%l4
+	or	%l3,%l2,%l2
+	ldub	[%i0+14],%l6
+	sll	%l5,16,%l5
+	or	%l0,%l2,%o2
+	ldub	[%i0+15],%l7
+
+	sll	%l6,8,%l6
+	or	%l5,%l4,%l4
+	or	%l7,%l6,%l6
+	or	%l4,%l6,%o3
+
+1:	call	.+8
+	add	%o7,AES_Td-1b,%o4
+	call	_sparcv9_AES_decrypt
+	mov	%i2,%o5
+
+	srl	%o0,24,%l0
+	srl	%o0,16,%l1
+	stb	%l0,[%i1+0]
+	srl	%o0,8,%l2
+	stb	%l1,[%i1+1]
+	stb	%l2,[%i1+2]
+	srl	%o1,24,%l4
+	stb	%o0,[%i1+3]
+
+	srl	%o1,16,%l5
+	stb	%l4,[%i1+4]
+	srl	%o1,8,%l6
+	stb	%l5,[%i1+5]
+	stb	%l6,[%i1+6]
+	srl	%o2,24,%l0
+	stb	%o1,[%i1+7]
+
+	srl	%o2,16,%l1
+	stb	%l0,[%i1+8]
+	srl	%o2,8,%l2
+	stb	%l1,[%i1+9]
+	stb	%l2,[%i1+10]
+	srl	%o3,24,%l4
+	stb	%o2,[%i1+11]
+
+	srl	%o3,16,%l5
+	stb	%l4,[%i1+12]
+	srl	%o3,8,%l6
+	stb	%l5,[%i1+13]
+	stb	%l6,[%i1+14]
+	stb	%o3,[%i1+15]
+
+	ret
+	restore
+.type	AES_decrypt,#function
+.size	AES_decrypt,(.-AES_decrypt)
+___
+
+# fmovs instructions substituting for FP nops were originally added
+# to meet specific instruction alignment requirements to maximize ILP.
+# As UltraSPARC T1, a.k.a. Niagara, has shared FPU, FP nops can have
+# undesired effect, so just omit them and sacrifice some portion of
+# percent in performance...
+$code =~ s/fmovs.*$//gm;
+
+print $code;
+close STDOUT or die "error closing STDOUT: $!";	# ensure flush
diff --git a/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aesfx-sparcv9.pl b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aesfx-sparcv9.pl
new file mode 100644
index 0000000..1678c4f
--- /dev/null
+++ b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aesfx-sparcv9.pl
@@ -0,0 +1,1270 @@
+#! /usr/bin/env perl
+# Copyright 2016-2020 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License").  You may not use
+# this file except in compliance with the License.  You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# March 2016
+#
+# Initial support for Fujitsu SPARC64 X/X+ comprises minimally
+# required key setup and single-block procedures.
+#
+# April 2016
+#
+# Add "teaser" CBC and CTR mode-specific subroutines. "Teaser" means
+# that parallelizable nature of CBC decrypt and CTR is not utilized
+# yet. CBC encrypt on the other hand is as good as it can possibly
+# get processing one byte in 4.1 cycles with 128-bit key on SPARC64 X.
+# This is ~6x faster than pure software implementation...
+#
+# July 2016
+#
+# Switch from faligndata to fshiftorx, which allows to omit alignaddr
+# instructions and improve single-block and short-input performance
+# with misaligned data.
+
+$output = pop;
+open STDOUT,">$output";
+
+{
+my ($inp,$out,$key,$rounds,$tmp,$mask) = map("%o$_",(0..5));
+
+$code.=<<___;
+#include "sparc_arch.h"
+
+#define LOCALS (STACK_BIAS+STACK_FRAME)
+
+.text
+
+.globl	aes_fx_encrypt
+.align	32
+aes_fx_encrypt:
+	and		$inp, 7, $tmp		! is input aligned?
+	andn		$inp, 7, $inp
+	ldd		[$key +  0], %f6	! round[0]
+	ldd		[$key +  8], %f8
+	mov		%o7, %g1
+	ld		[$key + 240], $rounds
+
+1:	call		.+8
+	add		%o7, .Linp_align-1b, %o7
+
+	sll		$tmp, 3, $tmp
+	ldd		[$inp + 0], %f0		! load input
+	brz,pt		$tmp, .Lenc_inp_aligned
+	ldd		[$inp + 8], %f2
+
+	ldd		[%o7 + $tmp], %f14	! shift left params
+	ldd		[$inp + 16], %f4
+	fshiftorx	%f0, %f2, %f14, %f0
+	fshiftorx	%f2, %f4, %f14, %f2
+
+.Lenc_inp_aligned:
+	ldd		[$key + 16], %f10	! round[1]
+	ldd		[$key + 24], %f12
+
+	fxor		%f0, %f6, %f0		! ^=round[0]
+	fxor		%f2, %f8, %f2
+	ldd		[$key + 32], %f6	! round[2]
+	ldd		[$key + 40], %f8
+	add		$key, 32, $key
+	sub		$rounds, 4, $rounds
+
+.Loop_enc:
+	fmovd		%f0, %f4
+	faesencx	%f2, %f10, %f0
+	faesencx	%f4, %f12, %f2
+	ldd		[$key + 16], %f10
+	ldd		[$key + 24], %f12
+	add		$key, 32, $key
+
+	fmovd		%f0, %f4
+	faesencx	%f2, %f6, %f0
+	faesencx	%f4, %f8, %f2
+	ldd		[$key +  0], %f6
+	ldd		[$key +  8], %f8
+
+	brnz,a		$rounds, .Loop_enc
+	sub		$rounds, 2, $rounds
+
+	andcc		$out, 7, $tmp		! is output aligned?
+	andn		$out, 7, $out
+	mov		0xff, $mask
+	srl		$mask, $tmp, $mask
+	add		%o7, 64, %o7
+	sll		$tmp, 3, $tmp
+
+	fmovd		%f0, %f4
+	faesencx	%f2, %f10, %f0
+	faesencx	%f4, %f12, %f2
+	ldd		[%o7 + $tmp], %f14	! shift right params
+
+	fmovd		%f0, %f4
+	faesenclx	%f2, %f6, %f0
+	faesenclx	%f4, %f8, %f2
+
+	bnz,pn		%icc, .Lenc_out_unaligned
+	mov		%g1, %o7
+
+	std		%f0, [$out + 0]
+	retl
+	std		%f2, [$out + 8]
+
+.align	16
+.Lenc_out_unaligned:
+	add		$out, 16, $inp
+	orn		%g0, $mask, $tmp
+	fshiftorx	%f0, %f0, %f14, %f4
+	fshiftorx	%f0, %f2, %f14, %f6
+	fshiftorx	%f2, %f2, %f14, %f8
+
+	stda		%f4, [$out + $mask]0xc0	! partial store
+	std		%f6, [$out + 8]
+	stda		%f8, [$inp + $tmp]0xc0	! partial store
+	retl
+	nop
+.type	aes_fx_encrypt,#function
+.size	aes_fx_encrypt,.-aes_fx_encrypt
+
+.globl	aes_fx_decrypt
+.align	32
+aes_fx_decrypt:
+	and		$inp, 7, $tmp		! is input aligned?
+	andn		$inp, 7, $inp
+	ldd		[$key +  0], %f6	! round[0]
+	ldd		[$key +  8], %f8
+	mov		%o7, %g1
+	ld		[$key + 240], $rounds
+
+1:	call		.+8
+	add		%o7, .Linp_align-1b, %o7
+
+	sll		$tmp, 3, $tmp
+	ldd		[$inp + 0], %f0		! load input
+	brz,pt		$tmp, .Ldec_inp_aligned
+	ldd		[$inp + 8], %f2
+
+	ldd		[%o7 + $tmp], %f14	! shift left params
+	ldd		[$inp + 16], %f4
+	fshiftorx	%f0, %f2, %f14, %f0
+	fshiftorx	%f2, %f4, %f14, %f2
+
+.Ldec_inp_aligned:
+	ldd		[$key + 16], %f10	! round[1]
+	ldd		[$key + 24], %f12
+
+	fxor		%f0, %f6, %f0		! ^=round[0]
+	fxor		%f2, %f8, %f2
+	ldd		[$key + 32], %f6	! round[2]
+	ldd		[$key + 40], %f8
+	add		$key, 32, $key
+	sub		$rounds, 4, $rounds
+
+.Loop_dec:
+	fmovd		%f0, %f4
+	faesdecx	%f2, %f10, %f0
+	faesdecx	%f4, %f12, %f2
+	ldd		[$key + 16], %f10
+	ldd		[$key + 24], %f12
+	add		$key, 32, $key
+
+	fmovd		%f0, %f4
+	faesdecx	%f2, %f6, %f0
+	faesdecx	%f4, %f8, %f2
+	ldd		[$key +  0], %f6
+	ldd		[$key +  8], %f8
+
+	brnz,a		$rounds, .Loop_dec
+	sub		$rounds, 2, $rounds
+
+	andcc		$out, 7, $tmp		! is output aligned?
+	andn		$out, 7, $out
+	mov		0xff, $mask
+	srl		$mask, $tmp, $mask
+	add		%o7, 64, %o7
+	sll		$tmp, 3, $tmp
+
+	fmovd		%f0, %f4
+	faesdecx	%f2, %f10, %f0
+	faesdecx	%f4, %f12, %f2
+	ldd		[%o7 + $tmp], %f14	! shift right params
+
+	fmovd		%f0, %f4
+	faesdeclx	%f2, %f6, %f0
+	faesdeclx	%f4, %f8, %f2
+
+	bnz,pn		%icc, .Ldec_out_unaligned
+	mov		%g1, %o7
+
+	std		%f0, [$out + 0]
+	retl
+	std		%f2, [$out + 8]
+
+.align	16
+.Ldec_out_unaligned:
+	add		$out, 16, $inp
+	orn		%g0, $mask, $tmp
+	fshiftorx	%f0, %f0, %f14, %f4
+	fshiftorx	%f0, %f2, %f14, %f6
+	fshiftorx	%f2, %f2, %f14, %f8
+
+	stda		%f4, [$out + $mask]0xc0	! partial store
+	std		%f6, [$out + 8]
+	stda		%f8, [$inp + $tmp]0xc0	! partial store
+	retl
+	nop
+.type	aes_fx_decrypt,#function
+.size	aes_fx_decrypt,.-aes_fx_decrypt
+___
+}
+{
+my ($inp,$bits,$out,$tmp,$inc) = map("%o$_",(0..5));
+$code.=<<___;
+.globl	aes_fx_set_decrypt_key
+.align	32
+aes_fx_set_decrypt_key:
+	b		.Lset_encrypt_key
+	mov		-1, $inc
+	retl
+	nop
+.type	aes_fx_set_decrypt_key,#function
+.size	aes_fx_set_decrypt_key,.-aes_fx_set_decrypt_key
+
+.globl	aes_fx_set_encrypt_key
+.align	32
+aes_fx_set_encrypt_key:
+	mov		1, $inc
+	nop
+.Lset_encrypt_key:
+	and		$inp, 7, $tmp
+	andn		$inp, 7, $inp
+	sll		$tmp, 3, $tmp
+	mov		%o7, %g1
+
+1:	call		.+8
+	add		%o7, .Linp_align-1b, %o7
+
+	ldd		[%o7 + $tmp], %f10	! shift left params
+	mov		%g1, %o7
+
+	cmp		$bits, 192
+	ldd		[$inp + 0], %f0
+	bl,pt		%icc, .L128
+	ldd		[$inp + 8], %f2
+
+	be,pt		%icc, .L192
+	ldd		[$inp + 16], %f4
+	brz,pt		$tmp, .L256aligned
+	ldd		[$inp + 24], %f6
+
+	ldd		[$inp + 32], %f8
+	fshiftorx	%f0, %f2, %f10, %f0
+	fshiftorx	%f2, %f4, %f10, %f2
+	fshiftorx	%f4, %f6, %f10, %f4
+	fshiftorx	%f6, %f8, %f10, %f6
+
+.L256aligned:
+	mov		14, $bits
+	and		$inc, `14*16`, $tmp
+	st		$bits, [$out + 240]	! store rounds
+	add		$out, $tmp, $out	! start or end of key schedule
+	sllx		$inc, 4, $inc		! 16 or -16
+___
+for ($i=0; $i<6; $i++) {
+    $code.=<<___;
+	std		%f0, [$out + 0]
+	faeskeyx	%f6, `0x10+$i`, %f0
+	std		%f2, [$out + 8]
+	add		$out, $inc, $out
+	faeskeyx	%f0, 0x00, %f2
+	std		%f4, [$out + 0]
+	faeskeyx	%f2, 0x01, %f4
+	std		%f6, [$out + 8]
+	add		$out, $inc, $out
+	faeskeyx	%f4, 0x00, %f6
+___
+}
+$code.=<<___;
+	std		%f0, [$out + 0]
+	faeskeyx	%f6, `0x10+$i`, %f0
+	std		%f2, [$out + 8]
+	add		$out, $inc, $out
+	faeskeyx	%f0, 0x00, %f2
+	std		%f4,[$out + 0]
+	std		%f6,[$out + 8]
+	add		$out, $inc, $out
+	std		%f0,[$out + 0]
+	std		%f2,[$out + 8]
+	retl
+	xor		%o0, %o0, %o0		! return 0
+
+.align	16
+.L192:
+	brz,pt		$tmp, .L192aligned
+	nop
+
+	ldd		[$inp + 24], %f6
+	fshiftorx	%f0, %f2, %f10, %f0
+	fshiftorx	%f2, %f4, %f10, %f2
+	fshiftorx	%f4, %f6, %f10, %f4
+
+.L192aligned:
+	mov		12, $bits
+	and		$inc, `12*16`, $tmp
+	st		$bits, [$out + 240]	! store rounds
+	add		$out, $tmp, $out	! start or end of key schedule
+	sllx		$inc, 4, $inc		! 16 or -16
+___
+for ($i=0; $i<8; $i+=2) {
+    $code.=<<___;
+	std		%f0, [$out + 0]
+	faeskeyx	%f4, `0x10+$i`, %f0
+	std		%f2, [$out + 8]
+	add		$out, $inc, $out
+	faeskeyx	%f0, 0x00, %f2
+	std		%f4, [$out + 0]
+	faeskeyx	%f2, 0x00, %f4
+	std		%f0, [$out + 8]
+	add		$out, $inc, $out
+	faeskeyx	%f4, `0x10+$i+1`, %f0
+	std		%f2, [$out + 0]
+	faeskeyx	%f0, 0x00, %f2
+	std		%f4, [$out + 8]
+	add		$out, $inc, $out
+___
+$code.=<<___		if ($i<6);
+	faeskeyx	%f2, 0x00, %f4
+___
+}
+$code.=<<___;
+	std		%f0, [$out + 0]
+	std		%f2, [$out + 8]
+	retl
+	xor		%o0, %o0, %o0		! return 0
+
+.align	16
+.L128:
+	brz,pt		$tmp, .L128aligned
+	nop
+
+	ldd		[$inp + 16], %f4
+	fshiftorx	%f0, %f2, %f10, %f0
+	fshiftorx	%f2, %f4, %f10, %f2
+
+.L128aligned:
+	mov		10, $bits
+	and		$inc, `10*16`, $tmp
+	st		$bits, [$out + 240]	! store rounds
+	add		$out, $tmp, $out	! start or end of key schedule
+	sllx		$inc, 4, $inc		! 16 or -16
+___
+for ($i=0; $i<10; $i++) {
+    $code.=<<___;
+	std		%f0, [$out + 0]
+	faeskeyx	%f2, `0x10+$i`, %f0
+	std		%f2, [$out + 8]
+	add		$out, $inc, $out
+	faeskeyx	%f0, 0x00, %f2
+___
+}
+$code.=<<___;
+	std		%f0, [$out + 0]
+	std		%f2, [$out + 8]
+	retl
+	xor		%o0, %o0, %o0		! return 0
+.type	aes_fx_set_encrypt_key,#function
+.size	aes_fx_set_encrypt_key,.-aes_fx_set_encrypt_key
+___
+}
+{
+my ($inp,$out,$len,$key,$ivp,$dir) = map("%i$_",(0..5));
+my ($rounds,$inner,$end,$inc,$ialign,$oalign,$mask) = map("%l$_",(0..7));
+my ($iv0,$iv1,$r0hi,$r0lo,$rlhi,$rllo,$in0,$in1,$intail,$outhead,$fshift)
+   = map("%f$_",grep { !($_ & 1) } (16 .. 62));
+my ($ileft,$iright) = ($ialign,$oalign);
+
+$code.=<<___;
+.globl	aes_fx_cbc_encrypt
+.align	32
+aes_fx_cbc_encrypt:
+	save		%sp, -STACK_FRAME-16, %sp
+	srln		$len, 4, $len
+	and		$inp, 7, $ialign
+	andn		$inp, 7, $inp
+	brz,pn		$len, .Lcbc_no_data
+	sll		$ialign, 3, $ileft
+
+1:	call		.+8
+	add		%o7, .Linp_align-1b, %o7
+
+	ld		[$key + 240], $rounds
+	and		$out, 7, $oalign
+	ld		[$ivp + 0], %f0		! load ivec
+	andn		$out, 7, $out
+	ld		[$ivp + 4], %f1
+	sll		$oalign, 3, $mask
+	ld		[$ivp + 8], %f2
+	ld		[$ivp + 12], %f3
+
+	sll		$rounds, 4, $rounds
+	add		$rounds, $key, $end
+	ldd		[$key + 0], $r0hi	! round[0]
+	ldd		[$key + 8], $r0lo
+
+	add		$inp, 16, $inp
+	sub		$len,  1, $len
+	ldd		[$end + 0], $rlhi	! round[last]
+	ldd		[$end + 8], $rllo
+
+	mov		16, $inc
+	movrz		$len, 0, $inc
+	ldd		[$key + 16], %f10	! round[1]
+	ldd		[$key + 24], %f12
+
+	ldd		[%o7 + $ileft], $fshift	! shift left params
+	add		%o7, 64, %o7
+	ldd		[$inp - 16], $in0	! load input
+	ldd		[$inp -  8], $in1
+	ldda		[$inp]0x82, $intail	! non-faulting load
+	brz		$dir, .Lcbc_decrypt
+	add		$inp, $inc, $inp	! inp+=16
+
+	fxor		$r0hi, %f0, %f0		! ivec^=round[0]
+	fxor		$r0lo, %f2, %f2
+	fshiftorx	$in0, $in1, $fshift, $in0
+	fshiftorx	$in1, $intail, $fshift, $in1
+	nop
+
+.Loop_cbc_enc:
+	fxor		$in0, %f0, %f0		! inp^ivec^round[0]
+	fxor		$in1, %f2, %f2
+	ldd		[$key + 32], %f6	! round[2]
+	ldd		[$key + 40], %f8
+	add		$key, 32, $end
+	sub		$rounds, 16*6, $inner
+
+.Lcbc_enc:
+	fmovd		%f0, %f4
+	faesencx	%f2, %f10, %f0
+	faesencx	%f4, %f12, %f2
+	ldd		[$end + 16], %f10
+	ldd		[$end + 24], %f12
+	add		$end, 32, $end
+
+	fmovd		%f0, %f4
+	faesencx	%f2, %f6, %f0
+	faesencx	%f4, %f8, %f2
+	ldd		[$end + 0], %f6
+	ldd		[$end + 8], %f8
+
+	brnz,a		$inner, .Lcbc_enc
+	sub		$inner, 16*2, $inner
+
+	fmovd		%f0, %f4
+	faesencx	%f2, %f10, %f0
+	faesencx	%f4, %f12, %f2
+	ldd		[$end + 16], %f10	! round[last-1]
+	ldd		[$end + 24], %f12
+
+	movrz		$len, 0, $inc
+	fmovd		$intail, $in0
+	ldd		[$inp - 8], $in1	! load next input block
+	ldda		[$inp]0x82, $intail	! non-faulting load
+	add		$inp, $inc, $inp	! inp+=16
+
+	fmovd		%f0, %f4
+	faesencx	%f2, %f6, %f0
+	faesencx	%f4, %f8, %f2
+
+	fshiftorx	$in0, $in1, $fshift, $in0
+	fshiftorx	$in1, $intail, $fshift, $in1
+
+	fmovd		%f0, %f4
+	faesencx	%f2, %f10, %f0
+	faesencx	%f4, %f12, %f2
+	ldd		[$key + 16], %f10	! round[1]
+	ldd		[$key + 24], %f12
+
+	fxor		$r0hi, $in0, $in0	! inp^=round[0]
+	fxor		$r0lo, $in1, $in1
+
+	fmovd		%f0, %f4
+	faesenclx	%f2, $rlhi, %f0
+	faesenclx	%f4, $rllo, %f2
+
+	brnz,pn		$oalign, .Lcbc_enc_unaligned_out
+	nop
+
+	std		%f0, [$out + 0]
+	std		%f2, [$out + 8]
+	add		$out, 16, $out
+
+	brnz,a		$len, .Loop_cbc_enc
+	sub		$len, 1, $len
+
+	st		%f0, [$ivp + 0]		! output ivec
+	st		%f1, [$ivp + 4]
+	st		%f2, [$ivp + 8]
+	st		%f3, [$ivp + 12]
+
+.Lcbc_no_data:
+	ret
+	restore
+
+.align	32
+.Lcbc_enc_unaligned_out:
+	ldd		[%o7 + $mask], $fshift	! shift right params
+	mov		0xff, $mask
+	srl		$mask, $oalign, $mask
+	sub		%g0, $ileft, $iright
+
+	fshiftorx	%f0, %f0, $fshift, %f6
+	fshiftorx	%f0, %f2, $fshift, %f8
+
+	stda		%f6, [$out + $mask]0xc0	! partial store
+	orn		%g0, $mask, $mask
+	std		%f8, [$out + 8]
+	add		$out, 16, $out
+	brz		$len, .Lcbc_enc_unaligned_out_done
+	sub		$len, 1, $len
+	b		.Loop_cbc_enc_unaligned_out
+	nop
+
+.align	32
+.Loop_cbc_enc_unaligned_out:
+	fmovd		%f2, $outhead
+	fxor		$in0, %f0, %f0		! inp^ivec^round[0]
+	fxor		$in1, %f2, %f2
+	ldd		[$key + 32], %f6	! round[2]
+	ldd		[$key + 40], %f8
+
+	fmovd		%f0, %f4
+	faesencx	%f2, %f10, %f0
+	faesencx	%f4, %f12, %f2
+	ldd		[$key + 48], %f10	! round[3]
+	ldd		[$key + 56], %f12
+
+	ldx		[$inp - 16], %o0
+	ldx		[$inp -  8], %o1
+	brz		$ileft, .Lcbc_enc_aligned_inp
+	movrz		$len, 0, $inc
+
+	ldx		[$inp], %o2
+	sllx		%o0, $ileft, %o0
+	srlx		%o1, $iright, %g1
+	sllx		%o1, $ileft, %o1
+	or		%g1, %o0, %o0
+	srlx		%o2, $iright, %o2
+	or		%o2, %o1, %o1
+
+.Lcbc_enc_aligned_inp:
+	fmovd		%f0, %f4
+	faesencx	%f2, %f6, %f0
+	faesencx	%f4, %f8, %f2
+	ldd		[$key + 64], %f6	! round[4]
+	ldd		[$key + 72], %f8
+	add		$key, 64, $end
+	sub		$rounds, 16*8, $inner
+
+	stx		%o0, [%sp + LOCALS + 0]
+	stx		%o1, [%sp + LOCALS + 8]
+	add		$inp, $inc, $inp	! inp+=16
+	nop
+
+.Lcbc_enc_unaligned:
+	fmovd		%f0, %f4
+	faesencx	%f2, %f10, %f0
+	faesencx	%f4, %f12, %f2
+	ldd		[$end + 16], %f10
+	ldd		[$end + 24], %f12
+	add		$end, 32, $end
+
+	fmovd		%f0, %f4
+	faesencx	%f2, %f6, %f0
+	faesencx	%f4, %f8, %f2
+	ldd		[$end + 0], %f6
+	ldd		[$end + 8], %f8
+
+	brnz,a		$inner, .Lcbc_enc_unaligned
+	sub		$inner, 16*2, $inner
+
+	fmovd		%f0, %f4
+	faesencx	%f2, %f10, %f0
+	faesencx	%f4, %f12, %f2
+	ldd		[$end + 16], %f10	! round[last-1]
+	ldd		[$end + 24], %f12
+
+	fmovd		%f0, %f4
+	faesencx	%f2, %f6, %f0
+	faesencx	%f4, %f8, %f2
+
+	ldd		[%sp + LOCALS + 0], $in0
+	ldd		[%sp + LOCALS + 8], $in1
+
+	fmovd		%f0, %f4
+	faesencx	%f2, %f10, %f0
+	faesencx	%f4, %f12, %f2
+	ldd		[$key + 16], %f10	! round[1]
+	ldd		[$key + 24], %f12
+
+	fxor		$r0hi, $in0, $in0	! inp^=round[0]
+	fxor		$r0lo, $in1, $in1
+
+	fmovd		%f0, %f4
+	faesenclx	%f2, $rlhi, %f0
+	faesenclx	%f4, $rllo, %f2
+
+	fshiftorx	$outhead, %f0, $fshift, %f6
+	fshiftorx	%f0, %f2, $fshift, %f8
+	std		%f6, [$out + 0]
+	std		%f8, [$out + 8]
+	add		$out, 16, $out
+
+	brnz,a		$len, .Loop_cbc_enc_unaligned_out
+	sub		$len, 1, $len
+
+.Lcbc_enc_unaligned_out_done:
+	fshiftorx	%f2, %f2, $fshift, %f8
+	stda		%f8, [$out + $mask]0xc0	! partial store
+
+	st		%f0, [$ivp + 0]		! output ivec
+	st		%f1, [$ivp + 4]
+	st		%f2, [$ivp + 8]
+	st		%f3, [$ivp + 12]
+
+	ret
+	restore
+
+.align	32
+.Lcbc_decrypt:
+	fshiftorx	$in0, $in1, $fshift, $in0
+	fshiftorx	$in1, $intail, $fshift, $in1
+	fmovd		%f0, $iv0
+	fmovd		%f2, $iv1
+
+.Loop_cbc_dec:
+	fxor		$in0, $r0hi, %f0	! inp^round[0]
+	fxor		$in1, $r0lo, %f2
+	ldd		[$key + 32], %f6	! round[2]
+	ldd		[$key + 40], %f8
+	add		$key, 32, $end
+	sub		$rounds, 16*6, $inner
+
+.Lcbc_dec:
+	fmovd		%f0, %f4
+	faesdecx	%f2, %f10, %f0
+	faesdecx	%f4, %f12, %f2
+	ldd		[$end + 16], %f10
+	ldd		[$end + 24], %f12
+	add		$end, 32, $end
+
+	fmovd		%f0, %f4
+	faesdecx	%f2, %f6, %f0
+	faesdecx	%f4, %f8, %f2
+	ldd		[$end + 0], %f6
+	ldd		[$end + 8], %f8
+
+	brnz,a		$inner, .Lcbc_dec
+	sub		$inner, 16*2, $inner
+
+	fmovd		%f0, %f4
+	faesdecx	%f2, %f10, %f0
+	faesdecx	%f4, %f12, %f2
+	ldd		[$end + 16], %f10	! round[last-1]
+	ldd		[$end + 24], %f12
+
+	fmovd		%f0, %f4
+	faesdecx	%f2, %f6, %f0
+	faesdecx	%f4, %f8, %f2
+	fxor		$iv0, $rlhi, %f6	! ivec^round[last]
+	fxor		$iv1, $rllo, %f8
+	fmovd		$in0, $iv0
+	fmovd		$in1, $iv1
+
+	movrz		$len, 0, $inc
+	fmovd		$intail, $in0
+	ldd		[$inp - 8], $in1	! load next input block
+	ldda		[$inp]0x82, $intail	! non-faulting load
+	add		$inp, $inc, $inp	! inp+=16
+
+	fmovd		%f0, %f4
+	faesdecx	%f2, %f10, %f0
+	faesdecx	%f4, %f12, %f2
+	ldd		[$key + 16], %f10	! round[1]
+	ldd		[$key + 24], %f12
+
+	fshiftorx	$in0, $in1, $fshift, $in0
+	fshiftorx	$in1, $intail, $fshift, $in1
+
+	fmovd		%f0, %f4
+	faesdeclx	%f2, %f6, %f0
+	faesdeclx	%f4, %f8, %f2
+
+	brnz,pn		$oalign, .Lcbc_dec_unaligned_out
+	nop
+
+	std		%f0, [$out + 0]
+	std		%f2, [$out + 8]
+	add		$out, 16, $out
+
+	brnz,a		$len, .Loop_cbc_dec
+	sub		$len, 1, $len
+
+	st		$iv0,    [$ivp + 0]	! output ivec
+	st		$iv0#lo, [$ivp + 4]
+	st		$iv1,    [$ivp + 8]
+	st		$iv1#lo, [$ivp + 12]
+
+	ret
+	restore
+
+.align	32
+.Lcbc_dec_unaligned_out:
+	ldd		[%o7 + $mask], $fshift	! shift right params
+	mov		0xff, $mask
+	srl		$mask, $oalign, $mask
+	sub		%g0, $ileft, $iright
+
+	fshiftorx	%f0, %f0, $fshift, %f6
+	fshiftorx	%f0, %f2, $fshift, %f8
+
+	stda		%f6, [$out + $mask]0xc0	! partial store
+	orn		%g0, $mask, $mask
+	std		%f8, [$out + 8]
+	add		$out, 16, $out
+	brz		$len, .Lcbc_dec_unaligned_out_done
+	sub		$len, 1, $len
+	b		.Loop_cbc_dec_unaligned_out
+	nop
+
+.align	32
+.Loop_cbc_dec_unaligned_out:
+	fmovd		%f2, $outhead
+	fxor		$in0, $r0hi, %f0	! inp^round[0]
+	fxor		$in1, $r0lo, %f2
+	ldd		[$key + 32], %f6	! round[2]
+	ldd		[$key + 40], %f8
+
+	fmovd		%f0, %f4
+	faesdecx	%f2, %f10, %f0
+	faesdecx	%f4, %f12, %f2
+	ldd		[$key + 48], %f10	! round[3]
+	ldd		[$key + 56], %f12
+
+	ldx		[$inp - 16], %o0
+	ldx		[$inp - 8], %o1
+	brz		$ileft, .Lcbc_dec_aligned_inp
+	movrz		$len, 0, $inc
+
+	ldx		[$inp], %o2
+	sllx		%o0, $ileft, %o0
+	srlx		%o1, $iright, %g1
+	sllx		%o1, $ileft, %o1
+	or		%g1, %o0, %o0
+	srlx		%o2, $iright, %o2
+	or		%o2, %o1, %o1
+
+.Lcbc_dec_aligned_inp:
+	fmovd		%f0, %f4
+	faesdecx	%f2, %f6, %f0
+	faesdecx	%f4, %f8, %f2
+	ldd		[$key + 64], %f6	! round[4]
+	ldd		[$key + 72], %f8
+	add		$key, 64, $end
+	sub		$rounds, 16*8, $inner
+
+	stx		%o0, [%sp + LOCALS + 0]
+	stx		%o1, [%sp + LOCALS + 8]
+	add		$inp, $inc, $inp	! inp+=16
+	nop
+
+.Lcbc_dec_unaligned:
+	fmovd		%f0, %f4
+	faesdecx	%f2, %f10, %f0
+	faesdecx	%f4, %f12, %f2
+	ldd		[$end + 16], %f10
+	ldd		[$end + 24], %f12
+	add		$end, 32, $end
+
+	fmovd		%f0, %f4
+	faesdecx	%f2, %f6, %f0
+	faesdecx	%f4, %f8, %f2
+	ldd		[$end + 0], %f6
+	ldd		[$end + 8], %f8
+
+	brnz,a		$inner, .Lcbc_dec_unaligned
+	sub		$inner, 16*2, $inner
+
+	fmovd		%f0, %f4
+	faesdecx	%f2, %f10, %f0
+	faesdecx	%f4, %f12, %f2
+	ldd		[$end + 16], %f10	! round[last-1]
+	ldd		[$end + 24], %f12
+
+	fmovd		%f0, %f4
+	faesdecx	%f2, %f6, %f0
+	faesdecx	%f4, %f8, %f2
+
+	fxor		$iv0, $rlhi, %f6	! ivec^round[last]
+	fxor		$iv1, $rllo, %f8
+	fmovd		$in0, $iv0
+	fmovd		$in1, $iv1
+	ldd		[%sp + LOCALS + 0], $in0
+	ldd		[%sp + LOCALS + 8], $in1
+
+	fmovd		%f0, %f4
+	faesdecx	%f2, %f10, %f0
+	faesdecx	%f4, %f12, %f2
+	ldd		[$key + 16], %f10	! round[1]
+	ldd		[$key + 24], %f12
+
+	fmovd		%f0, %f4
+	faesdeclx	%f2, %f6, %f0
+	faesdeclx	%f4, %f8, %f2
+
+	fshiftorx	$outhead, %f0, $fshift, %f6
+	fshiftorx	%f0, %f2, $fshift, %f8
+	std		%f6, [$out + 0]
+	std		%f8, [$out + 8]
+	add		$out, 16, $out
+
+	brnz,a		$len, .Loop_cbc_dec_unaligned_out
+	sub		$len, 1, $len
+
+.Lcbc_dec_unaligned_out_done:
+	fshiftorx	%f2, %f2, $fshift, %f8
+	stda		%f8, [$out + $mask]0xc0	! partial store
+
+	st		$iv0,    [$ivp + 0]	! output ivec
+	st		$iv0#lo, [$ivp + 4]
+	st		$iv1,    [$ivp + 8]
+	st		$iv1#lo, [$ivp + 12]
+
+	ret
+	restore
+.type	aes_fx_cbc_encrypt,#function
+.size	aes_fx_cbc_encrypt,.-aes_fx_cbc_encrypt
+___
+}
+{
+my ($inp,$out,$len,$key,$ivp) = map("%i$_",(0..5));
+my ($rounds,$inner,$end,$inc,$ialign,$oalign,$mask) = map("%l$_",(0..7));
+my ($ctr0,$ctr1,$r0hi,$r0lo,$rlhi,$rllo,$in0,$in1,$intail,$outhead,$fshift)
+   = map("%f$_",grep { !($_ & 1) } (16 .. 62));
+my ($ileft,$iright) = ($ialign, $oalign);
+my $one = "%f14";
+
+$code.=<<___;
+.globl	aes_fx_ctr32_encrypt_blocks
+.align	32
+aes_fx_ctr32_encrypt_blocks:
+	save		%sp, -STACK_FRAME-16, %sp
+	srln		$len, 0, $len
+	and		$inp, 7, $ialign
+	andn		$inp, 7, $inp
+	brz,pn		$len, .Lctr32_no_data
+	sll		$ialign, 3, $ileft
+
+.Lpic:	call		.+8
+	add		%o7, .Linp_align - .Lpic, %o7
+
+	ld		[$key + 240], $rounds
+	and		$out, 7, $oalign
+	ld		[$ivp +  0], $ctr0	! load counter
+	andn		$out, 7, $out
+	ld		[$ivp +  4], $ctr0#lo
+	sll		$oalign, 3, $mask
+	ld		[$ivp +  8], $ctr1
+	ld		[$ivp + 12], $ctr1#lo
+	ldd		[%o7 + 128], $one
+
+	sll		$rounds, 4, $rounds
+	add		$rounds, $key, $end
+	ldd		[$key + 0], $r0hi	! round[0]
+	ldd		[$key + 8], $r0lo
+
+	add		$inp, 16, $inp
+	sub		$len, 1, $len
+	ldd		[$key + 16], %f10	! round[1]
+	ldd		[$key + 24], %f12
+
+	mov		16, $inc
+	movrz		$len, 0, $inc
+	ldd		[$end + 0], $rlhi	! round[last]
+	ldd		[$end + 8], $rllo
+
+	ldd		[%o7 + $ileft], $fshift	! shiftleft params
+	add		%o7, 64, %o7
+	ldd		[$inp - 16], $in0	! load input
+	ldd		[$inp -  8], $in1
+	ldda		[$inp]0x82, $intail	! non-faulting load
+	add		$inp, $inc, $inp	! inp+=16
+
+	fshiftorx	$in0, $in1, $fshift, $in0
+	fshiftorx	$in1, $intail, $fshift, $in1
+
+.Loop_ctr32:
+	fxor		$ctr0, $r0hi, %f0	! counter^round[0]
+	fxor		$ctr1, $r0lo, %f2
+	ldd		[$key + 32], %f6	! round[2]
+	ldd		[$key + 40], %f8
+	add		$key, 32, $end
+	sub		$rounds, 16*6, $inner
+
+.Lctr32_enc:
+	fmovd		%f0, %f4
+	faesencx	%f2, %f10, %f0
+	faesencx	%f4, %f12, %f2
+	ldd		[$end + 16], %f10
+	ldd		[$end + 24], %f12
+	add		$end, 32, $end
+
+	fmovd		%f0, %f4
+	faesencx	%f2, %f6, %f0
+	faesencx	%f4, %f8, %f2
+	ldd		[$end + 0], %f6
+	ldd		[$end + 8], %f8
+
+	brnz,a		$inner, .Lctr32_enc
+	sub		$inner, 16*2, $inner
+
+	fmovd		%f0, %f4
+	faesencx	%f2, %f10, %f0
+	faesencx	%f4, %f12, %f2
+	ldd		[$end + 16], %f10	! round[last-1]
+	ldd		[$end + 24], %f12
+
+	fmovd		%f0, %f4
+	faesencx	%f2, %f6, %f0
+	faesencx	%f4, %f8, %f2
+	fxor		$in0, $rlhi, %f6	! inp^round[last]
+	fxor		$in1, $rllo, %f8
+
+	movrz		$len, 0, $inc
+	fmovd		$intail, $in0
+	ldd		[$inp - 8], $in1	! load next input block
+	ldda		[$inp]0x82, $intail	! non-faulting load
+	add		$inp, $inc, $inp	! inp+=16
+
+	fmovd		%f0, %f4
+	faesencx	%f2, %f10, %f0
+	faesencx	%f4, %f12, %f2
+	ldd		[$key + 16], %f10	! round[1]
+	ldd		[$key + 24], %f12
+
+	fshiftorx	$in0, $in1, $fshift, $in0
+	fshiftorx	$in1, $intail, $fshift, $in1
+	fpadd32		$ctr1, $one, $ctr1	! increment counter
+
+	fmovd		%f0, %f4
+	faesenclx	%f2, %f6, %f0
+	faesenclx	%f4, %f8, %f2
+
+	brnz,pn		$oalign, .Lctr32_unaligned_out
+	nop
+
+	std		%f0, [$out + 0]
+	std		%f2, [$out + 8]
+	add		$out, 16, $out
+
+	brnz,a		$len, .Loop_ctr32
+	sub		$len, 1, $len
+
+.Lctr32_no_data:
+	ret
+	restore
+
+.align	32
+.Lctr32_unaligned_out:
+	ldd		[%o7 + $mask], $fshift	! shift right params
+	mov		0xff, $mask
+	srl		$mask, $oalign, $mask
+	sub		%g0, $ileft, $iright
+
+	fshiftorx	%f0, %f0, $fshift, %f6
+	fshiftorx	%f0, %f2, $fshift, %f8
+
+	stda		%f6, [$out + $mask]0xc0	! partial store
+	orn		%g0, $mask, $mask
+	std		%f8, [$out + 8]
+	add		$out, 16, $out
+	brz		$len, .Lctr32_unaligned_out_done
+	sub		$len, 1, $len
+	b		.Loop_ctr32_unaligned_out
+	nop
+
+.align	32
+.Loop_ctr32_unaligned_out:
+	fmovd		%f2, $outhead
+	fxor		$ctr0, $r0hi, %f0	! counter^round[0]
+	fxor		$ctr1, $r0lo, %f2
+	ldd		[$key + 32], %f6	! round[2]
+	ldd		[$key + 40], %f8
+
+	fmovd		%f0, %f4
+	faesencx	%f2, %f10, %f0
+	faesencx	%f4, %f12, %f2
+	ldd		[$key + 48], %f10	! round[3]
+	ldd		[$key + 56], %f12
+
+	ldx		[$inp - 16], %o0
+	ldx		[$inp -  8], %o1
+	brz		$ileft, .Lctr32_aligned_inp
+	movrz		$len, 0, $inc
+
+	ldx		[$inp], %o2
+	sllx		%o0, $ileft, %o0
+	srlx		%o1, $iright, %g1
+	sllx		%o1, $ileft, %o1
+	or		%g1, %o0, %o0
+	srlx		%o2, $iright, %o2
+	or		%o2, %o1, %o1
+
+.Lctr32_aligned_inp:
+	fmovd		%f0, %f4
+	faesencx	%f2, %f6, %f0
+	faesencx	%f4, %f8, %f2
+	ldd		[$key + 64], %f6	! round[4]
+	ldd		[$key + 72], %f8
+	add		$key, 64, $end
+	sub		$rounds, 16*8, $inner
+
+	stx		%o0, [%sp + LOCALS + 0]
+	stx		%o1, [%sp + LOCALS + 8]
+	add		$inp, $inc, $inp	! inp+=16
+	nop
+
+.Lctr32_enc_unaligned:
+	fmovd		%f0, %f4
+	faesencx	%f2, %f10, %f0
+	faesencx	%f4, %f12, %f2
+	ldd		[$end + 16], %f10
+	ldd		[$end + 24], %f12
+	add		$end, 32, $end
+
+	fmovd		%f0, %f4
+	faesencx	%f2, %f6, %f0
+	faesencx	%f4, %f8, %f2
+	ldd		[$end + 0], %f6
+	ldd		[$end + 8], %f8
+
+	brnz,a		$inner, .Lctr32_enc_unaligned
+	sub		$inner, 16*2, $inner
+
+	fmovd		%f0, %f4
+	faesencx	%f2, %f10, %f0
+	faesencx	%f4, %f12, %f2
+	ldd		[$end + 16], %f10	! round[last-1]
+	ldd		[$end + 24], %f12
+	fpadd32		$ctr1, $one, $ctr1	! increment counter
+
+	fmovd		%f0, %f4
+	faesencx	%f2, %f6, %f0
+	faesencx	%f4, %f8, %f2
+	fxor		$in0, $rlhi, %f6	! inp^round[last]
+	fxor		$in1, $rllo, %f8
+	ldd		[%sp + LOCALS + 0], $in0
+	ldd		[%sp + LOCALS + 8], $in1
+
+	fmovd		%f0, %f4
+	faesencx	%f2, %f10, %f0
+	faesencx	%f4, %f12, %f2
+	ldd		[$key + 16], %f10	! round[1]
+	ldd		[$key + 24], %f12
+
+	fmovd		%f0, %f4
+	faesenclx	%f2, %f6, %f0
+	faesenclx	%f4, %f8, %f2
+
+	fshiftorx	$outhead, %f0, $fshift, %f6
+	fshiftorx	%f0, %f2, $fshift, %f8
+	std		%f6, [$out + 0]
+	std		%f8, [$out + 8]
+	add		$out, 16, $out
+
+	brnz,a		$len, .Loop_ctr32_unaligned_out
+	sub		$len, 1, $len
+
+.Lctr32_unaligned_out_done:
+	fshiftorx	%f2, %f2, $fshift, %f8
+	stda		%f8, [$out + $mask]0xc0	! partial store
+
+	ret
+	restore
+.type	aes_fx_ctr32_encrypt_blocks,#function
+.size	aes_fx_ctr32_encrypt_blocks,.-aes_fx_ctr32_encrypt_blocks
+
+.align	32
+.Linp_align:		! fshiftorx parameters for left shift toward %rs1
+	.byte	0, 0, 64,  0,	0, 64,  0, -64
+	.byte	0, 0, 56,  8,	0, 56,  8, -56
+	.byte	0, 0, 48, 16,	0, 48, 16, -48
+	.byte	0, 0, 40, 24,	0, 40, 24, -40
+	.byte	0, 0, 32, 32,	0, 32, 32, -32
+	.byte	0, 0, 24, 40,	0, 24, 40, -24
+	.byte	0, 0, 16, 48,	0, 16, 48, -16
+	.byte	0, 0,  8, 56,	0,  8, 56, -8
+.Lout_align:		! fshiftorx parameters for right shift toward %rs2
+	.byte	0, 0,  0, 64,	0,  0, 64,   0
+	.byte	0, 0,  8, 56,	0,  8, 56,  -8
+	.byte	0, 0, 16, 48,	0, 16, 48, -16
+	.byte	0, 0, 24, 40,	0, 24, 40, -24
+	.byte	0, 0, 32, 32,	0, 32, 32, -32
+	.byte	0, 0, 40, 24,	0, 40, 24, -40
+	.byte	0, 0, 48, 16,	0, 48, 16, -48
+	.byte	0, 0, 56,  8,	0, 56,  8, -56
+.Lone:
+	.word	0, 1
+.asciz	"AES for Fujitsu SPARC64 X, CRYPTOGAMS by <appro\@openssl.org>"
+.align	4
+___
+}
+# Purpose of these subroutines is to explicitly encode VIS instructions,
+# so that one can compile the module without having to specify VIS
+# extensions on compiler command line, e.g. -xarch=v9 vs. -xarch=v9a.
+# Idea is to reserve for option to produce "universal" binary and let
+# programmer detect if current CPU is VIS capable at run-time.
+sub unvis {
+my ($mnemonic,$rs1,$rs2,$rd)=@_;
+my ($ref,$opf);
+my %visopf = (	"faligndata"	=> 0x048,
+		"bshuffle"	=> 0x04c,
+		"fpadd32"	=> 0x052,
+		"fxor"		=> 0x06c,
+		"fsrc2"		=> 0x078	);
+
+    $ref = "$mnemonic\t$rs1,$rs2,$rd";
+
+    if ($opf=$visopf{$mnemonic}) {
+	foreach ($rs1,$rs2,$rd) {
+	    return $ref if (!/%f([0-9]{1,2})/);
+	    $_=$1;
+	    if ($1>=32) {
+		return $ref if ($1&1);
+		# re-encode for upper double register addressing
+		$_=($1|$1>>5)&31;
+	    }
+	}
+
+	return	sprintf ".word\t0x%08x !%s",
+			0x81b00000|$rd<<25|$rs1<<14|$opf<<5|$rs2,
+			$ref;
+    } else {
+	return $ref;
+    }
+}
+
+sub unvis3 {
+my ($mnemonic,$rs1,$rs2,$rd)=@_;
+my %bias = ( "g" => 0, "o" => 8, "l" => 16, "i" => 24 );
+my ($ref,$opf);
+my %visopf = (	"alignaddr"	=> 0x018,
+		"bmask"		=> 0x019,
+		"alignaddrl"	=> 0x01a	);
+
+    $ref = "$mnemonic\t$rs1,$rs2,$rd";
+
+    if ($opf=$visopf{$mnemonic}) {
+	foreach ($rs1,$rs2,$rd) {
+	    return $ref if (!/%([goli])([0-9])/);
+	    $_=$bias{$1}+$2;
+	}
+
+	return	sprintf ".word\t0x%08x !%s",
+			0x81b00000|$rd<<25|$rs1<<14|$opf<<5|$rs2,
+			$ref;
+    } else {
+	return $ref;
+    }
+}
+
+sub unfx {
+my ($mnemonic,$rs1,$rs2,$rd)=@_;
+my ($ref,$opf);
+my %aesopf = (	"faesencx"	=> 0x90,
+		"faesdecx"	=> 0x91,
+		"faesenclx"	=> 0x92,
+		"faesdeclx"	=> 0x93,
+		"faeskeyx"	=> 0x94	);
+
+    $ref = "$mnemonic\t$rs1,$rs2,$rd";
+
+    if (defined($opf=$aesopf{$mnemonic})) {
+	$rs2 = ($rs2 =~ /%f([0-6]*[02468])/) ? (($1|$1>>5)&31) : $rs2;
+	$rs2 = oct($rs2) if ($rs2 =~ /^0/);
+
+	foreach ($rs1,$rd) {
+	    return $ref if (!/%f([0-9]{1,2})/);
+	    $_=$1;
+	    if ($1>=32) {
+		return $ref if ($1&1);
+		# re-encode for upper double register addressing
+		$_=($1|$1>>5)&31;
+	    }
+	}
+
+	return	sprintf ".word\t0x%08x !%s",
+			2<<30|$rd<<25|0x36<<19|$rs1<<14|$opf<<5|$rs2,
+			$ref;
+    } else {
+	return $ref;
+    }
+}
+
+sub unfx3src {
+my ($mnemonic,$rs1,$rs2,$rs3,$rd)=@_;
+my ($ref,$opf);
+my %aesopf = (	"fshiftorx"	=> 0x0b	);
+
+    $ref = "$mnemonic\t$rs1,$rs2,$rs3,$rd";
+
+    if (defined($opf=$aesopf{$mnemonic})) {
+	foreach ($rs1,$rs2,$rs3,$rd) {
+	    return $ref if (!/%f([0-9]{1,2})/);
+	    $_=$1;
+	    if ($1>=32) {
+		return $ref if ($1&1);
+		# re-encode for upper double register addressing
+		$_=($1|$1>>5)&31;
+	    }
+	}
+
+	return	sprintf ".word\t0x%08x !%s",
+			2<<30|$rd<<25|0x37<<19|$rs1<<14|$rs3<<9|$opf<<5|$rs2,
+			$ref;
+    } else {
+	return $ref;
+    }
+}
+
+foreach (split("\n",$code)) {
+    s/\`([^\`]*)\`/eval $1/ge;
+
+    s/%f([0-9]+)#lo/sprintf "%%f%d",$1+1/ge;
+
+    s/\b(faes[^x]{3,4}x)\s+(%f[0-9]{1,2}),\s*([%fx0-9]+),\s*(%f[0-9]{1,2})/
+		&unfx($1,$2,$3,$4)
+     /ge or
+    s/\b([f][^\s]*)\s+(%f[0-9]{1,2}),\s*(%f[0-9]{1,2}),\s*(%f[0-9]{1,2}),\s*(%f[0-9]{1,2})/
+		&unfx3src($1,$2,$3,$4,$5)
+     /ge or
+    s/\b([fb][^\s]*)\s+(%f[0-9]{1,2}),\s*(%f[0-9]{1,2}),\s*(%f[0-9]{1,2})/
+		&unvis($1,$2,$3,$4)
+     /ge or
+    s/\b(alignaddr[l]*)\s+(%[goli][0-7]),\s*(%[goli][0-7]),\s*(%[goli][0-7])/
+		&unvis3($1,$2,$3,$4)
+     /ge;
+    print $_,"\n";
+}
+
+close STDOUT or die "error closing STDOUT: $!";
diff --git a/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aesni-mb-x86_64.pl b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aesni-mb-x86_64.pl
new file mode 100644
index 0000000..a80cfdc
--- /dev/null
+++ b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aesni-mb-x86_64.pl
@@ -0,0 +1,1474 @@
+#! /usr/bin/env perl
+# Copyright 2013-2020 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License").  You may not use
+# this file except in compliance with the License.  You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# Multi-buffer AES-NI procedures process several independent buffers
+# in parallel by interleaving independent instructions.
+#
+# Cycles per byte for interleave factor 4:
+#
+#			asymptotic	measured
+#			---------------------------
+# Westmere		5.00/4=1.25	5.13/4=1.28
+# Atom			15.0/4=3.75	?15.7/4=3.93
+# Sandy Bridge		5.06/4=1.27	5.18/4=1.29
+# Ivy Bridge		5.06/4=1.27	5.14/4=1.29
+# Haswell		4.44/4=1.11	4.44/4=1.11
+# Bulldozer		5.75/4=1.44	5.76/4=1.44
+#
+# Cycles per byte for interleave factor 8 (not implemented for
+# pre-AVX processors, where higher interleave factor incidentally
+# doesn't result in improvement):
+#
+#			asymptotic	measured
+#			---------------------------
+# Sandy Bridge		5.06/8=0.64	7.10/8=0.89(*)
+# Ivy Bridge		5.06/8=0.64	7.14/8=0.89(*)
+# Haswell		5.00/8=0.63	5.00/8=0.63
+# Bulldozer		5.75/8=0.72	5.77/8=0.72
+#
+# (*)	Sandy/Ivy Bridge are known to handle high interleave factors
+#	suboptimally;
+
+$flavour = shift;
+$output  = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+$avx=0;
+
+if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
+		=~ /GNU assembler version ([2-9]\.[0-9]+)/) {
+	$avx = ($1>=2.19) + ($1>=2.22);
+}
+
+if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
+	   `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
+	$avx = ($1>=2.09) + ($1>=2.10);
+}
+
+if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
+	   `ml64 2>&1` =~ /Version ([0-9]+)\./) {
+	$avx = ($1>=10) + ($1>=11);
+}
+
+if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:clang|LLVM) version|.*based on LLVM) ([0-9]+\.[0-9]+)/) {
+	$avx = ($2>=3.0) + ($2>3.0);
+}
+
+open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
+*STDOUT=*OUT;
+
+# void aesni_multi_cbc_encrypt (
+#     struct {	void *inp,*out; int blocks; double iv[2]; } inp[8];
+#     const AES_KEY *key,
+#     int num);		/* 1 or 2 */
+#
+$inp="%rdi";	# 1st arg
+$key="%rsi";	# 2nd arg
+$num="%edx";
+
+@inptr=map("%r$_",(8..11));
+@outptr=map("%r$_",(12..15));
+
+($rndkey0,$rndkey1)=("%xmm0","%xmm1");
+@out=map("%xmm$_",(2..5));
+@inp=map("%xmm$_",(6..9));
+($counters,$mask,$zero)=map("%xmm$_",(10..12));
+
+($rounds,$one,$sink,$offset)=("%eax","%ecx","%rbp","%rbx");
+
+$code.=<<___;
+.text
+
+.extern	OPENSSL_ia32cap_P
+
+.globl	aesni_multi_cbc_encrypt
+.type	aesni_multi_cbc_encrypt,\@function,3
+.align	32
+aesni_multi_cbc_encrypt:
+.cfi_startproc
+___
+$code.=<<___ if ($avx);
+	cmp	\$2,$num
+	jb	.Lenc_non_avx
+	mov	OPENSSL_ia32cap_P+4(%rip),%ecx
+	test	\$`1<<28`,%ecx			# AVX bit
+	jnz	_avx_cbc_enc_shortcut
+	jmp	.Lenc_non_avx
+.align	16
+.Lenc_non_avx:
+___
+$code.=<<___;
+	mov	%rsp,%rax
+.cfi_def_cfa_register	%rax
+	push	%rbx
+.cfi_push	%rbx
+	push	%rbp
+.cfi_push	%rbp
+	push	%r12
+.cfi_push	%r12
+	push	%r13
+.cfi_push	%r13
+	push	%r14
+.cfi_push	%r14
+	push	%r15
+.cfi_push	%r15
+___
+$code.=<<___ if ($win64);
+	lea	-0xa8(%rsp),%rsp
+	movaps	%xmm6,(%rsp)
+	movaps	%xmm7,0x10(%rsp)
+	movaps	%xmm8,0x20(%rsp)
+	movaps	%xmm9,0x30(%rsp)
+	movaps	%xmm10,0x40(%rsp)
+	movaps	%xmm11,0x50(%rsp)
+	movaps	%xmm12,0x60(%rsp)
+	movaps	%xmm13,-0x68(%rax)	# not used, saved to share se_handler
+	movaps	%xmm14,-0x58(%rax)
+	movaps	%xmm15,-0x48(%rax)
+___
+$code.=<<___;
+	# stack layout
+	#
+	# +0	output sink
+	# +16	input sink [original %rsp and $num]
+	# +32	counters
+
+	sub	\$48,%rsp
+	and	\$-64,%rsp
+	mov	%rax,16(%rsp)			# original %rsp
+.cfi_cfa_expression	%rsp+16,deref,+8
+
+.Lenc4x_body:
+	movdqu	($key),$zero			# 0-round key
+	lea	0x78($key),$key			# size optimization
+	lea	40*2($inp),$inp
+
+.Lenc4x_loop_grande:
+	mov	$num,24(%rsp)			# original $num
+	xor	$num,$num
+___
+for($i=0;$i<4;$i++) {
+    $code.=<<___;
+	mov	`40*$i+16-40*2`($inp),$one	# borrow $one for number of blocks
+	mov	`40*$i+0-40*2`($inp),@inptr[$i]
+	cmp	$num,$one
+	mov	`40*$i+8-40*2`($inp),@outptr[$i]
+	cmovg	$one,$num			# find maximum
+	test	$one,$one
+	movdqu	`40*$i+24-40*2`($inp),@out[$i]	# load IV
+	mov	$one,`32+4*$i`(%rsp)		# initialize counters
+	cmovle	%rsp,@inptr[$i]			# cancel input
+___
+}
+$code.=<<___;
+	test	$num,$num
+	jz	.Lenc4x_done
+
+	movups	0x10-0x78($key),$rndkey1
+	 pxor	$zero,@out[0]
+	movups	0x20-0x78($key),$rndkey0
+	 pxor	$zero,@out[1]
+	mov	0xf0-0x78($key),$rounds
+	 pxor	$zero,@out[2]
+	movdqu	(@inptr[0]),@inp[0]		# load inputs
+	 pxor	$zero,@out[3]
+	movdqu	(@inptr[1]),@inp[1]
+	 pxor	@inp[0],@out[0]
+	movdqu	(@inptr[2]),@inp[2]
+	 pxor	@inp[1],@out[1]
+	movdqu	(@inptr[3]),@inp[3]
+	 pxor	@inp[2],@out[2]
+	 pxor	@inp[3],@out[3]
+	movdqa	32(%rsp),$counters		# load counters
+	xor	$offset,$offset
+	jmp	.Loop_enc4x
+
+.align	32
+.Loop_enc4x:
+	add	\$16,$offset
+	lea	16(%rsp),$sink			# sink pointer
+	mov	\$1,$one			# constant of 1
+	sub	$offset,$sink
+
+	aesenc		$rndkey1,@out[0]
+	prefetcht0	31(@inptr[0],$offset)	# prefetch input
+	prefetcht0	31(@inptr[1],$offset)
+	aesenc		$rndkey1,@out[1]
+	prefetcht0	31(@inptr[2],$offset)
+	prefetcht0	31(@inptr[2],$offset)
+	aesenc		$rndkey1,@out[2]
+	aesenc		$rndkey1,@out[3]
+	movups		0x30-0x78($key),$rndkey1
+___
+for($i=0;$i<4;$i++) {
+my $rndkey = ($i&1) ? $rndkey1 : $rndkey0;
+$code.=<<___;
+	 cmp		`32+4*$i`(%rsp),$one
+	aesenc		$rndkey,@out[0]
+	aesenc		$rndkey,@out[1]
+	aesenc		$rndkey,@out[2]
+	 cmovge		$sink,@inptr[$i]	# cancel input
+	 cmovg		$sink,@outptr[$i]	# sink output
+	aesenc		$rndkey,@out[3]
+	movups		`0x40+16*$i-0x78`($key),$rndkey
+___
+}
+$code.=<<___;
+	 movdqa		$counters,$mask
+	aesenc		$rndkey0,@out[0]
+	prefetcht0	15(@outptr[0],$offset)	# prefetch output
+	prefetcht0	15(@outptr[1],$offset)
+	aesenc		$rndkey0,@out[1]
+	prefetcht0	15(@outptr[2],$offset)
+	prefetcht0	15(@outptr[3],$offset)
+	aesenc		$rndkey0,@out[2]
+	aesenc		$rndkey0,@out[3]
+	movups		0x80-0x78($key),$rndkey0
+	 pxor		$zero,$zero
+
+	aesenc		$rndkey1,@out[0]
+	 pcmpgtd	$zero,$mask
+	 movdqu		-0x78($key),$zero	# reload 0-round key
+	aesenc		$rndkey1,@out[1]
+	 paddd		$mask,$counters		# decrement counters
+	 movdqa		$counters,32(%rsp)	# update counters
+	aesenc		$rndkey1,@out[2]
+	aesenc		$rndkey1,@out[3]
+	movups		0x90-0x78($key),$rndkey1
+
+	cmp	\$11,$rounds
+
+	aesenc		$rndkey0,@out[0]
+	aesenc		$rndkey0,@out[1]
+	aesenc		$rndkey0,@out[2]
+	aesenc		$rndkey0,@out[3]
+	movups		0xa0-0x78($key),$rndkey0
+
+	jb	.Lenc4x_tail
+
+	aesenc		$rndkey1,@out[0]
+	aesenc		$rndkey1,@out[1]
+	aesenc		$rndkey1,@out[2]
+	aesenc		$rndkey1,@out[3]
+	movups		0xb0-0x78($key),$rndkey1
+
+	aesenc		$rndkey0,@out[0]
+	aesenc		$rndkey0,@out[1]
+	aesenc		$rndkey0,@out[2]
+	aesenc		$rndkey0,@out[3]
+	movups		0xc0-0x78($key),$rndkey0
+
+	je	.Lenc4x_tail
+
+	aesenc		$rndkey1,@out[0]
+	aesenc		$rndkey1,@out[1]
+	aesenc		$rndkey1,@out[2]
+	aesenc		$rndkey1,@out[3]
+	movups		0xd0-0x78($key),$rndkey1
+
+	aesenc		$rndkey0,@out[0]
+	aesenc		$rndkey0,@out[1]
+	aesenc		$rndkey0,@out[2]
+	aesenc		$rndkey0,@out[3]
+	movups		0xe0-0x78($key),$rndkey0
+	jmp	.Lenc4x_tail
+
+.align	32
+.Lenc4x_tail:
+	aesenc		$rndkey1,@out[0]
+	aesenc		$rndkey1,@out[1]
+	aesenc		$rndkey1,@out[2]
+	aesenc		$rndkey1,@out[3]
+	 movdqu		(@inptr[0],$offset),@inp[0]
+	movdqu		0x10-0x78($key),$rndkey1
+
+	aesenclast	$rndkey0,@out[0]
+	 movdqu		(@inptr[1],$offset),@inp[1]
+	 pxor		$zero,@inp[0]
+	aesenclast	$rndkey0,@out[1]
+	 movdqu		(@inptr[2],$offset),@inp[2]
+	 pxor		$zero,@inp[1]
+	aesenclast	$rndkey0,@out[2]
+	 movdqu		(@inptr[3],$offset),@inp[3]
+	 pxor		$zero,@inp[2]
+	aesenclast	$rndkey0,@out[3]
+	movdqu		0x20-0x78($key),$rndkey0
+	 pxor		$zero,@inp[3]
+
+	movups		@out[0],-16(@outptr[0],$offset)
+	 pxor		@inp[0],@out[0]
+	movups		@out[1],-16(@outptr[1],$offset)
+	 pxor		@inp[1],@out[1]
+	movups		@out[2],-16(@outptr[2],$offset)
+	 pxor		@inp[2],@out[2]
+	movups		@out[3],-16(@outptr[3],$offset)
+	 pxor		@inp[3],@out[3]
+
+	dec	$num
+	jnz	.Loop_enc4x
+
+	mov	16(%rsp),%rax			# original %rsp
+.cfi_def_cfa	%rax,8
+	mov	24(%rsp),$num
+
+	#pxor	@inp[0],@out[0]
+	#pxor	@inp[1],@out[1]
+	#movdqu	@out[0],`40*0+24-40*2`($inp)	# output iv FIX ME!
+	#pxor	@inp[2],@out[2]
+	#movdqu	@out[1],`40*1+24-40*2`($inp)
+	#pxor	@inp[3],@out[3]
+	#movdqu	@out[2],`40*2+24-40*2`($inp)	# won't fix, let caller
+	#movdqu	@out[3],`40*3+24-40*2`($inp)	# figure this out...
+
+	lea	`40*4`($inp),$inp
+	dec	$num
+	jnz	.Lenc4x_loop_grande
+
+.Lenc4x_done:
+___
+$code.=<<___ if ($win64);
+	movaps	-0xd8(%rax),%xmm6
+	movaps	-0xc8(%rax),%xmm7
+	movaps	-0xb8(%rax),%xmm8
+	movaps	-0xa8(%rax),%xmm9
+	movaps	-0x98(%rax),%xmm10
+	movaps	-0x88(%rax),%xmm11
+	movaps	-0x78(%rax),%xmm12
+	#movaps	-0x68(%rax),%xmm13
+	#movaps	-0x58(%rax),%xmm14
+	#movaps	-0x48(%rax),%xmm15
+___
+$code.=<<___;
+	mov	-48(%rax),%r15
+.cfi_restore	%r15
+	mov	-40(%rax),%r14
+.cfi_restore	%r14
+	mov	-32(%rax),%r13
+.cfi_restore	%r13
+	mov	-24(%rax),%r12
+.cfi_restore	%r12
+	mov	-16(%rax),%rbp
+.cfi_restore	%rbp
+	mov	-8(%rax),%rbx
+.cfi_restore	%rbx
+	lea	(%rax),%rsp
+.cfi_def_cfa_register	%rsp
+.Lenc4x_epilogue:
+	ret
+.cfi_endproc
+.size	aesni_multi_cbc_encrypt,.-aesni_multi_cbc_encrypt
+
+.globl	aesni_multi_cbc_decrypt
+.type	aesni_multi_cbc_decrypt,\@function,3
+.align	32
+aesni_multi_cbc_decrypt:
+.cfi_startproc
+___
+$code.=<<___ if ($avx);
+	cmp	\$2,$num
+	jb	.Ldec_non_avx
+	mov	OPENSSL_ia32cap_P+4(%rip),%ecx
+	test	\$`1<<28`,%ecx			# AVX bit
+	jnz	_avx_cbc_dec_shortcut
+	jmp	.Ldec_non_avx
+.align	16
+.Ldec_non_avx:
+___
+$code.=<<___;
+	mov	%rsp,%rax
+.cfi_def_cfa_register	%rax
+	push	%rbx
+.cfi_push	%rbx
+	push	%rbp
+.cfi_push	%rbp
+	push	%r12
+.cfi_push	%r12
+	push	%r13
+.cfi_push	%r13
+	push	%r14
+.cfi_push	%r14
+	push	%r15
+.cfi_push	%r15
+___
+$code.=<<___ if ($win64);
+	lea	-0xa8(%rsp),%rsp
+	movaps	%xmm6,(%rsp)
+	movaps	%xmm7,0x10(%rsp)
+	movaps	%xmm8,0x20(%rsp)
+	movaps	%xmm9,0x30(%rsp)
+	movaps	%xmm10,0x40(%rsp)
+	movaps	%xmm11,0x50(%rsp)
+	movaps	%xmm12,0x60(%rsp)
+	movaps	%xmm13,-0x68(%rax)	# not used, saved to share se_handler
+	movaps	%xmm14,-0x58(%rax)
+	movaps	%xmm15,-0x48(%rax)
+___
+$code.=<<___;
+	# stack layout
+	#
+	# +0	output sink
+	# +16	input sink [original %rsp and $num]
+	# +32	counters
+
+	sub	\$48,%rsp
+	and	\$-64,%rsp
+	mov	%rax,16(%rsp)			# original %rsp
+.cfi_cfa_expression	%rsp+16,deref,+8
+
+.Ldec4x_body:
+	movdqu	($key),$zero			# 0-round key
+	lea	0x78($key),$key			# size optimization
+	lea	40*2($inp),$inp
+
+.Ldec4x_loop_grande:
+	mov	$num,24(%rsp)			# original $num
+	xor	$num,$num
+___
+for($i=0;$i<4;$i++) {
+    $code.=<<___;
+	mov	`40*$i+16-40*2`($inp),$one	# borrow $one for number of blocks
+	mov	`40*$i+0-40*2`($inp),@inptr[$i]
+	cmp	$num,$one
+	mov	`40*$i+8-40*2`($inp),@outptr[$i]
+	cmovg	$one,$num			# find maximum
+	test	$one,$one
+	movdqu	`40*$i+24-40*2`($inp),@inp[$i]	# load IV
+	mov	$one,`32+4*$i`(%rsp)		# initialize counters
+	cmovle	%rsp,@inptr[$i]			# cancel input
+___
+}
+$code.=<<___;
+	test	$num,$num
+	jz	.Ldec4x_done
+
+	movups	0x10-0x78($key),$rndkey1
+	movups	0x20-0x78($key),$rndkey0
+	mov	0xf0-0x78($key),$rounds
+	movdqu	(@inptr[0]),@out[0]		# load inputs
+	movdqu	(@inptr[1]),@out[1]
+	 pxor	$zero,@out[0]
+	movdqu	(@inptr[2]),@out[2]
+	 pxor	$zero,@out[1]
+	movdqu	(@inptr[3]),@out[3]
+	 pxor	$zero,@out[2]
+	 pxor	$zero,@out[3]
+	movdqa	32(%rsp),$counters		# load counters
+	xor	$offset,$offset
+	jmp	.Loop_dec4x
+
+.align	32
+.Loop_dec4x:
+	add	\$16,$offset
+	lea	16(%rsp),$sink			# sink pointer
+	mov	\$1,$one			# constant of 1
+	sub	$offset,$sink
+
+	aesdec		$rndkey1,@out[0]
+	prefetcht0	31(@inptr[0],$offset)	# prefetch input
+	prefetcht0	31(@inptr[1],$offset)
+	aesdec		$rndkey1,@out[1]
+	prefetcht0	31(@inptr[2],$offset)
+	prefetcht0	31(@inptr[3],$offset)
+	aesdec		$rndkey1,@out[2]
+	aesdec		$rndkey1,@out[3]
+	movups		0x30-0x78($key),$rndkey1
+___
+for($i=0;$i<4;$i++) {
+my $rndkey = ($i&1) ? $rndkey1 : $rndkey0;
+$code.=<<___;
+	 cmp		`32+4*$i`(%rsp),$one
+	aesdec		$rndkey,@out[0]
+	aesdec		$rndkey,@out[1]
+	aesdec		$rndkey,@out[2]
+	 cmovge		$sink,@inptr[$i]	# cancel input
+	 cmovg		$sink,@outptr[$i]	# sink output
+	aesdec		$rndkey,@out[3]
+	movups		`0x40+16*$i-0x78`($key),$rndkey
+___
+}
+$code.=<<___;
+	 movdqa		$counters,$mask
+	aesdec		$rndkey0,@out[0]
+	prefetcht0	15(@outptr[0],$offset)	# prefetch output
+	prefetcht0	15(@outptr[1],$offset)
+	aesdec		$rndkey0,@out[1]
+	prefetcht0	15(@outptr[2],$offset)
+	prefetcht0	15(@outptr[3],$offset)
+	aesdec		$rndkey0,@out[2]
+	aesdec		$rndkey0,@out[3]
+	movups		0x80-0x78($key),$rndkey0
+	 pxor		$zero,$zero
+
+	aesdec		$rndkey1,@out[0]
+	 pcmpgtd	$zero,$mask
+	 movdqu		-0x78($key),$zero	# reload 0-round key
+	aesdec		$rndkey1,@out[1]
+	 paddd		$mask,$counters		# decrement counters
+	 movdqa		$counters,32(%rsp)	# update counters
+	aesdec		$rndkey1,@out[2]
+	aesdec		$rndkey1,@out[3]
+	movups		0x90-0x78($key),$rndkey1
+
+	cmp	\$11,$rounds
+
+	aesdec		$rndkey0,@out[0]
+	aesdec		$rndkey0,@out[1]
+	aesdec		$rndkey0,@out[2]
+	aesdec		$rndkey0,@out[3]
+	movups		0xa0-0x78($key),$rndkey0
+
+	jb	.Ldec4x_tail
+
+	aesdec		$rndkey1,@out[0]
+	aesdec		$rndkey1,@out[1]
+	aesdec		$rndkey1,@out[2]
+	aesdec		$rndkey1,@out[3]
+	movups		0xb0-0x78($key),$rndkey1
+
+	aesdec		$rndkey0,@out[0]
+	aesdec		$rndkey0,@out[1]
+	aesdec		$rndkey0,@out[2]
+	aesdec		$rndkey0,@out[3]
+	movups		0xc0-0x78($key),$rndkey0
+
+	je	.Ldec4x_tail
+
+	aesdec		$rndkey1,@out[0]
+	aesdec		$rndkey1,@out[1]
+	aesdec		$rndkey1,@out[2]
+	aesdec		$rndkey1,@out[3]
+	movups		0xd0-0x78($key),$rndkey1
+
+	aesdec		$rndkey0,@out[0]
+	aesdec		$rndkey0,@out[1]
+	aesdec		$rndkey0,@out[2]
+	aesdec		$rndkey0,@out[3]
+	movups		0xe0-0x78($key),$rndkey0
+	jmp	.Ldec4x_tail
+
+.align	32
+.Ldec4x_tail:
+	aesdec		$rndkey1,@out[0]
+	aesdec		$rndkey1,@out[1]
+	aesdec		$rndkey1,@out[2]
+	 pxor		$rndkey0,@inp[0]
+	 pxor		$rndkey0,@inp[1]
+	aesdec		$rndkey1,@out[3]
+	movdqu		0x10-0x78($key),$rndkey1
+	 pxor		$rndkey0,@inp[2]
+	 pxor		$rndkey0,@inp[3]
+	movdqu		0x20-0x78($key),$rndkey0
+
+	aesdeclast	@inp[0],@out[0]
+	aesdeclast	@inp[1],@out[1]
+	 movdqu		-16(@inptr[0],$offset),@inp[0]	# load next IV
+	 movdqu		-16(@inptr[1],$offset),@inp[1]
+	aesdeclast	@inp[2],@out[2]
+	aesdeclast	@inp[3],@out[3]
+	 movdqu		-16(@inptr[2],$offset),@inp[2]
+	 movdqu		-16(@inptr[3],$offset),@inp[3]
+
+	movups		@out[0],-16(@outptr[0],$offset)
+	 movdqu		(@inptr[0],$offset),@out[0]
+	movups		@out[1],-16(@outptr[1],$offset)
+	 movdqu		(@inptr[1],$offset),@out[1]
+	 pxor		$zero,@out[0]
+	movups		@out[2],-16(@outptr[2],$offset)
+	 movdqu		(@inptr[2],$offset),@out[2]
+	 pxor		$zero,@out[1]
+	movups		@out[3],-16(@outptr[3],$offset)
+	 movdqu		(@inptr[3],$offset),@out[3]
+	 pxor		$zero,@out[2]
+	 pxor		$zero,@out[3]
+
+	dec	$num
+	jnz	.Loop_dec4x
+
+	mov	16(%rsp),%rax			# original %rsp
+.cfi_def_cfa	%rax,8
+	mov	24(%rsp),$num
+
+	lea	`40*4`($inp),$inp
+	dec	$num
+	jnz	.Ldec4x_loop_grande
+
+.Ldec4x_done:
+___
+$code.=<<___ if ($win64);
+	movaps	-0xd8(%rax),%xmm6
+	movaps	-0xc8(%rax),%xmm7
+	movaps	-0xb8(%rax),%xmm8
+	movaps	-0xa8(%rax),%xmm9
+	movaps	-0x98(%rax),%xmm10
+	movaps	-0x88(%rax),%xmm11
+	movaps	-0x78(%rax),%xmm12
+	#movaps	-0x68(%rax),%xmm13
+	#movaps	-0x58(%rax),%xmm14
+	#movaps	-0x48(%rax),%xmm15
+___
+$code.=<<___;
+	mov	-48(%rax),%r15
+.cfi_restore	%r15
+	mov	-40(%rax),%r14
+.cfi_restore	%r14
+	mov	-32(%rax),%r13
+.cfi_restore	%r13
+	mov	-24(%rax),%r12
+.cfi_restore	%r12
+	mov	-16(%rax),%rbp
+.cfi_restore	%rbp
+	mov	-8(%rax),%rbx
+.cfi_restore	%rbx
+	lea	(%rax),%rsp
+.cfi_def_cfa_register	%rsp
+.Ldec4x_epilogue:
+	ret
+.cfi_endproc
+.size	aesni_multi_cbc_decrypt,.-aesni_multi_cbc_decrypt
+___
+
+						if ($avx) {{{
+my @ptr=map("%r$_",(8..15));
+my $offload=$sink;
+
+my @out=map("%xmm$_",(2..9));
+my @inp=map("%xmm$_",(10..13));
+my ($counters,$zero)=("%xmm14","%xmm15");
+
+$code.=<<___;
+.type	aesni_multi_cbc_encrypt_avx,\@function,3
+.align	32
+aesni_multi_cbc_encrypt_avx:
+.cfi_startproc
+_avx_cbc_enc_shortcut:
+	mov	%rsp,%rax
+.cfi_def_cfa_register	%rax
+	push	%rbx
+.cfi_push	%rbx
+	push	%rbp
+.cfi_push	%rbp
+	push	%r12
+.cfi_push	%r12
+	push	%r13
+.cfi_push	%r13
+	push	%r14
+.cfi_push	%r14
+	push	%r15
+.cfi_push	%r15
+___
+$code.=<<___ if ($win64);
+	lea	-0xa8(%rsp),%rsp
+	movaps	%xmm6,(%rsp)
+	movaps	%xmm7,0x10(%rsp)
+	movaps	%xmm8,0x20(%rsp)
+	movaps	%xmm9,0x30(%rsp)
+	movaps	%xmm10,0x40(%rsp)
+	movaps	%xmm11,0x50(%rsp)
+	movaps	%xmm12,-0x78(%rax)
+	movaps	%xmm13,-0x68(%rax)
+	movaps	%xmm14,-0x58(%rax)
+	movaps	%xmm15,-0x48(%rax)
+___
+$code.=<<___;
+	# stack layout
+	#
+	# +0	output sink
+	# +16	input sink [original %rsp and $num]
+	# +32	counters
+	# +64	distances between inputs and outputs
+	# +128	off-load area for @inp[0..3]
+
+	sub	\$192,%rsp
+	and	\$-128,%rsp
+	mov	%rax,16(%rsp)			# original %rsp
+.cfi_cfa_expression	%rsp+16,deref,+8
+
+.Lenc8x_body:
+	vzeroupper
+	vmovdqu	($key),$zero			# 0-round key
+	lea	0x78($key),$key			# size optimization
+	lea	40*4($inp),$inp
+	shr	\$1,$num
+
+.Lenc8x_loop_grande:
+	#mov	$num,24(%rsp)			# original $num
+	xor	$num,$num
+___
+for($i=0;$i<8;$i++) {
+  my $temp = $i ? $offload : $offset;
+    $code.=<<___;
+	mov	`40*$i+16-40*4`($inp),$one	# borrow $one for number of blocks
+	mov	`40*$i+0-40*4`($inp),@ptr[$i]	# input pointer
+	cmp	$num,$one
+	mov	`40*$i+8-40*4`($inp),$temp	# output pointer
+	cmovg	$one,$num			# find maximum
+	test	$one,$one
+	vmovdqu	`40*$i+24-40*4`($inp),@out[$i]	# load IV
+	mov	$one,`32+4*$i`(%rsp)		# initialize counters
+	cmovle	%rsp,@ptr[$i]			# cancel input
+	sub	@ptr[$i],$temp			# distance between input and output
+	mov	$temp,`64+8*$i`(%rsp)		# initialize distances
+___
+}
+$code.=<<___;
+	test	$num,$num
+	jz	.Lenc8x_done
+
+	vmovups	0x10-0x78($key),$rndkey1
+	vmovups	0x20-0x78($key),$rndkey0
+	mov	0xf0-0x78($key),$rounds
+
+	vpxor	(@ptr[0]),$zero,@inp[0]		# load inputs and xor with 0-round
+	 lea	128(%rsp),$offload		# offload area
+	vpxor	(@ptr[1]),$zero,@inp[1]
+	vpxor	(@ptr[2]),$zero,@inp[2]
+	vpxor	(@ptr[3]),$zero,@inp[3]
+	 vpxor	@inp[0],@out[0],@out[0]
+	vpxor	(@ptr[4]),$zero,@inp[0]
+	 vpxor	@inp[1],@out[1],@out[1]
+	vpxor	(@ptr[5]),$zero,@inp[1]
+	 vpxor	@inp[2],@out[2],@out[2]
+	vpxor	(@ptr[6]),$zero,@inp[2]
+	 vpxor	@inp[3],@out[3],@out[3]
+	vpxor	(@ptr[7]),$zero,@inp[3]
+	 vpxor	@inp[0],@out[4],@out[4]
+	mov	\$1,$one			# constant of 1
+	 vpxor	@inp[1],@out[5],@out[5]
+	 vpxor	@inp[2],@out[6],@out[6]
+	 vpxor	@inp[3],@out[7],@out[7]
+	jmp	.Loop_enc8x
+
+.align	32
+.Loop_enc8x:
+___
+for($i=0;$i<8;$i++) {
+my $rndkey=($i&1)?$rndkey0:$rndkey1;
+$code.=<<___;
+	vaesenc		$rndkey,@out[0],@out[0]
+	 cmp		32+4*$i(%rsp),$one
+___
+$code.=<<___ if ($i);
+	 mov		64+8*$i(%rsp),$offset
+___
+$code.=<<___;
+	vaesenc		$rndkey,@out[1],@out[1]
+	prefetcht0	31(@ptr[$i])			# prefetch input
+	vaesenc		$rndkey,@out[2],@out[2]
+___
+$code.=<<___ if ($i>1);
+	prefetcht0	15(@ptr[$i-2])			# prefetch output
+___
+$code.=<<___;
+	vaesenc		$rndkey,@out[3],@out[3]
+	 lea		(@ptr[$i],$offset),$offset
+	 cmovge		%rsp,@ptr[$i]			# cancel input
+	vaesenc		$rndkey,@out[4],@out[4]
+	 cmovg		%rsp,$offset			# sink output
+	vaesenc		$rndkey,@out[5],@out[5]
+	 sub		@ptr[$i],$offset
+	vaesenc		$rndkey,@out[6],@out[6]
+	 vpxor		16(@ptr[$i]),$zero,@inp[$i%4]	# load input and xor with 0-round
+	 mov		$offset,64+8*$i(%rsp)
+	vaesenc		$rndkey,@out[7],@out[7]
+	vmovups		`16*(3+$i)-0x78`($key),$rndkey
+	 lea		16(@ptr[$i],$offset),@ptr[$i]	# switch to output
+___
+$code.=<<___ if ($i<4)
+	 vmovdqu	@inp[$i%4],`16*$i`($offload)	# off-load
+___
+}
+$code.=<<___;
+	 vmovdqu	32(%rsp),$counters
+	prefetcht0	15(@ptr[$i-2])			# prefetch output
+	prefetcht0	15(@ptr[$i-1])
+	cmp	\$11,$rounds
+	jb	.Lenc8x_tail
+
+	vaesenc		$rndkey1,@out[0],@out[0]
+	vaesenc		$rndkey1,@out[1],@out[1]
+	vaesenc		$rndkey1,@out[2],@out[2]
+	vaesenc		$rndkey1,@out[3],@out[3]
+	vaesenc		$rndkey1,@out[4],@out[4]
+	vaesenc		$rndkey1,@out[5],@out[5]
+	vaesenc		$rndkey1,@out[6],@out[6]
+	vaesenc		$rndkey1,@out[7],@out[7]
+	vmovups		0xb0-0x78($key),$rndkey1
+
+	vaesenc		$rndkey0,@out[0],@out[0]
+	vaesenc		$rndkey0,@out[1],@out[1]
+	vaesenc		$rndkey0,@out[2],@out[2]
+	vaesenc		$rndkey0,@out[3],@out[3]
+	vaesenc		$rndkey0,@out[4],@out[4]
+	vaesenc		$rndkey0,@out[5],@out[5]
+	vaesenc		$rndkey0,@out[6],@out[6]
+	vaesenc		$rndkey0,@out[7],@out[7]
+	vmovups		0xc0-0x78($key),$rndkey0
+	je	.Lenc8x_tail
+
+	vaesenc		$rndkey1,@out[0],@out[0]
+	vaesenc		$rndkey1,@out[1],@out[1]
+	vaesenc		$rndkey1,@out[2],@out[2]
+	vaesenc		$rndkey1,@out[3],@out[3]
+	vaesenc		$rndkey1,@out[4],@out[4]
+	vaesenc		$rndkey1,@out[5],@out[5]
+	vaesenc		$rndkey1,@out[6],@out[6]
+	vaesenc		$rndkey1,@out[7],@out[7]
+	vmovups		0xd0-0x78($key),$rndkey1
+
+	vaesenc		$rndkey0,@out[0],@out[0]
+	vaesenc		$rndkey0,@out[1],@out[1]
+	vaesenc		$rndkey0,@out[2],@out[2]
+	vaesenc		$rndkey0,@out[3],@out[3]
+	vaesenc		$rndkey0,@out[4],@out[4]
+	vaesenc		$rndkey0,@out[5],@out[5]
+	vaesenc		$rndkey0,@out[6],@out[6]
+	vaesenc		$rndkey0,@out[7],@out[7]
+	vmovups		0xe0-0x78($key),$rndkey0
+
+.Lenc8x_tail:
+	vaesenc		$rndkey1,@out[0],@out[0]
+	 vpxor		$zero,$zero,$zero
+	vaesenc		$rndkey1,@out[1],@out[1]
+	vaesenc		$rndkey1,@out[2],@out[2]
+	 vpcmpgtd	$zero,$counters,$zero
+	vaesenc		$rndkey1,@out[3],@out[3]
+	vaesenc		$rndkey1,@out[4],@out[4]
+	 vpaddd		$counters,$zero,$zero		# decrement counters
+	 vmovdqu	48(%rsp),$counters
+	vaesenc		$rndkey1,@out[5],@out[5]
+	 mov		64(%rsp),$offset		# pre-load 1st offset
+	vaesenc		$rndkey1,@out[6],@out[6]
+	vaesenc		$rndkey1,@out[7],@out[7]
+	vmovups		0x10-0x78($key),$rndkey1
+
+	vaesenclast	$rndkey0,@out[0],@out[0]
+	 vmovdqa	$zero,32(%rsp)			# update counters
+	 vpxor		$zero,$zero,$zero
+	vaesenclast	$rndkey0,@out[1],@out[1]
+	vaesenclast	$rndkey0,@out[2],@out[2]
+	 vpcmpgtd	$zero,$counters,$zero
+	vaesenclast	$rndkey0,@out[3],@out[3]
+	vaesenclast	$rndkey0,@out[4],@out[4]
+	 vpaddd		$zero,$counters,$counters	# decrement counters
+	 vmovdqu	-0x78($key),$zero		# 0-round
+	vaesenclast	$rndkey0,@out[5],@out[5]
+	vaesenclast	$rndkey0,@out[6],@out[6]
+	 vmovdqa	$counters,48(%rsp)		# update counters
+	vaesenclast	$rndkey0,@out[7],@out[7]
+	vmovups		0x20-0x78($key),$rndkey0
+
+	vmovups		@out[0],-16(@ptr[0])		# write output
+	 sub		$offset,@ptr[0]			# switch to input
+	 vpxor		0x00($offload),@out[0],@out[0]
+	vmovups		@out[1],-16(@ptr[1])
+	 sub		`64+1*8`(%rsp),@ptr[1]
+	 vpxor		0x10($offload),@out[1],@out[1]
+	vmovups		@out[2],-16(@ptr[2])
+	 sub		`64+2*8`(%rsp),@ptr[2]
+	 vpxor		0x20($offload),@out[2],@out[2]
+	vmovups		@out[3],-16(@ptr[3])
+	 sub		`64+3*8`(%rsp),@ptr[3]
+	 vpxor		0x30($offload),@out[3],@out[3]
+	vmovups		@out[4],-16(@ptr[4])
+	 sub		`64+4*8`(%rsp),@ptr[4]
+	 vpxor		@inp[0],@out[4],@out[4]
+	vmovups		@out[5],-16(@ptr[5])
+	 sub		`64+5*8`(%rsp),@ptr[5]
+	 vpxor		@inp[1],@out[5],@out[5]
+	vmovups		@out[6],-16(@ptr[6])
+	 sub		`64+6*8`(%rsp),@ptr[6]
+	 vpxor		@inp[2],@out[6],@out[6]
+	vmovups		@out[7],-16(@ptr[7])
+	 sub		`64+7*8`(%rsp),@ptr[7]
+	 vpxor		@inp[3],@out[7],@out[7]
+
+	dec	$num
+	jnz	.Loop_enc8x
+
+	mov	16(%rsp),%rax			# original %rsp
+.cfi_def_cfa	%rax,8
+	#mov	24(%rsp),$num
+	#lea	`40*8`($inp),$inp
+	#dec	$num
+	#jnz	.Lenc8x_loop_grande
+
+.Lenc8x_done:
+	vzeroupper
+___
+$code.=<<___ if ($win64);
+	movaps	-0xd8(%rax),%xmm6
+	movaps	-0xc8(%rax),%xmm7
+	movaps	-0xb8(%rax),%xmm8
+	movaps	-0xa8(%rax),%xmm9
+	movaps	-0x98(%rax),%xmm10
+	movaps	-0x88(%rax),%xmm11
+	movaps	-0x78(%rax),%xmm12
+	movaps	-0x68(%rax),%xmm13
+	movaps	-0x58(%rax),%xmm14
+	movaps	-0x48(%rax),%xmm15
+___
+$code.=<<___;
+	mov	-48(%rax),%r15
+.cfi_restore	%r15
+	mov	-40(%rax),%r14
+.cfi_restore	%r14
+	mov	-32(%rax),%r13
+.cfi_restore	%r13
+	mov	-24(%rax),%r12
+.cfi_restore	%r12
+	mov	-16(%rax),%rbp
+.cfi_restore	%rbp
+	mov	-8(%rax),%rbx
+.cfi_restore	%rbx
+	lea	(%rax),%rsp
+.cfi_def_cfa_register	%rsp
+.Lenc8x_epilogue:
+	ret
+.cfi_endproc
+.size	aesni_multi_cbc_encrypt_avx,.-aesni_multi_cbc_encrypt_avx
+
+.type	aesni_multi_cbc_decrypt_avx,\@function,3
+.align	32
+aesni_multi_cbc_decrypt_avx:
+.cfi_startproc
+_avx_cbc_dec_shortcut:
+	mov	%rsp,%rax
+.cfi_def_cfa_register	%rax
+	push	%rbx
+.cfi_push	%rbx
+	push	%rbp
+.cfi_push	%rbp
+	push	%r12
+.cfi_push	%r12
+	push	%r13
+.cfi_push	%r13
+	push	%r14
+.cfi_push	%r14
+	push	%r15
+.cfi_push	%r15
+___
+$code.=<<___ if ($win64);
+	lea	-0xa8(%rsp),%rsp
+	movaps	%xmm6,(%rsp)
+	movaps	%xmm7,0x10(%rsp)
+	movaps	%xmm8,0x20(%rsp)
+	movaps	%xmm9,0x30(%rsp)
+	movaps	%xmm10,0x40(%rsp)
+	movaps	%xmm11,0x50(%rsp)
+	movaps	%xmm12,-0x78(%rax)
+	movaps	%xmm13,-0x68(%rax)
+	movaps	%xmm14,-0x58(%rax)
+	movaps	%xmm15,-0x48(%rax)
+___
+$code.=<<___;
+	# stack layout
+	#
+	# +0	output sink
+	# +16	input sink [original %rsp and $num]
+	# +32	counters
+	# +64	distances between inputs and outputs
+	# +128	off-load area for @inp[0..3]
+	# +192	IV/input offload
+
+	sub	\$256,%rsp
+	and	\$-256,%rsp
+	sub	\$192,%rsp
+	mov	%rax,16(%rsp)			# original %rsp
+.cfi_cfa_expression	%rsp+16,deref,+8
+
+.Ldec8x_body:
+	vzeroupper
+	vmovdqu	($key),$zero			# 0-round key
+	lea	0x78($key),$key			# size optimization
+	lea	40*4($inp),$inp
+	shr	\$1,$num
+
+.Ldec8x_loop_grande:
+	#mov	$num,24(%rsp)			# original $num
+	xor	$num,$num
+___
+for($i=0;$i<8;$i++) {
+  my $temp = $i ? $offload : $offset;
+    $code.=<<___;
+	mov	`40*$i+16-40*4`($inp),$one	# borrow $one for number of blocks
+	mov	`40*$i+0-40*4`($inp),@ptr[$i]	# input pointer
+	cmp	$num,$one
+	mov	`40*$i+8-40*4`($inp),$temp	# output pointer
+	cmovg	$one,$num			# find maximum
+	test	$one,$one
+	vmovdqu	`40*$i+24-40*4`($inp),@out[$i]	# load IV
+	mov	$one,`32+4*$i`(%rsp)		# initialize counters
+	cmovle	%rsp,@ptr[$i]			# cancel input
+	sub	@ptr[$i],$temp			# distance between input and output
+	mov	$temp,`64+8*$i`(%rsp)		# initialize distances
+	vmovdqu	@out[$i],`192+16*$i`(%rsp)	# offload IV
+___
+}
+$code.=<<___;
+	test	$num,$num
+	jz	.Ldec8x_done
+
+	vmovups	0x10-0x78($key),$rndkey1
+	vmovups	0x20-0x78($key),$rndkey0
+	mov	0xf0-0x78($key),$rounds
+	 lea	192+128(%rsp),$offload		# offload area
+
+	vmovdqu	(@ptr[0]),@out[0]		# load inputs
+	vmovdqu	(@ptr[1]),@out[1]
+	vmovdqu	(@ptr[2]),@out[2]
+	vmovdqu	(@ptr[3]),@out[3]
+	vmovdqu	(@ptr[4]),@out[4]
+	vmovdqu	(@ptr[5]),@out[5]
+	vmovdqu	(@ptr[6]),@out[6]
+	vmovdqu	(@ptr[7]),@out[7]
+	vmovdqu	@out[0],0x00($offload)		# offload inputs
+	vpxor	$zero,@out[0],@out[0]		# xor inputs with 0-round
+	vmovdqu	@out[1],0x10($offload)
+	vpxor	$zero,@out[1],@out[1]
+	vmovdqu	@out[2],0x20($offload)
+	vpxor	$zero,@out[2],@out[2]
+	vmovdqu	@out[3],0x30($offload)
+	vpxor	$zero,@out[3],@out[3]
+	vmovdqu	@out[4],0x40($offload)
+	vpxor	$zero,@out[4],@out[4]
+	vmovdqu	@out[5],0x50($offload)
+	vpxor	$zero,@out[5],@out[5]
+	vmovdqu	@out[6],0x60($offload)
+	vpxor	$zero,@out[6],@out[6]
+	vmovdqu	@out[7],0x70($offload)
+	vpxor	$zero,@out[7],@out[7]
+	xor	\$0x80,$offload
+	mov	\$1,$one			# constant of 1
+	jmp	.Loop_dec8x
+
+.align	32
+.Loop_dec8x:
+___
+for($i=0;$i<8;$i++) {
+my $rndkey=($i&1)?$rndkey0:$rndkey1;
+$code.=<<___;
+	vaesdec		$rndkey,@out[0],@out[0]
+	 cmp		32+4*$i(%rsp),$one
+___
+$code.=<<___ if ($i);
+	 mov		64+8*$i(%rsp),$offset
+___
+$code.=<<___;
+	vaesdec		$rndkey,@out[1],@out[1]
+	prefetcht0	31(@ptr[$i])			# prefetch input
+	vaesdec		$rndkey,@out[2],@out[2]
+___
+$code.=<<___ if ($i>1);
+	prefetcht0	15(@ptr[$i-2])			# prefetch output
+___
+$code.=<<___;
+	vaesdec		$rndkey,@out[3],@out[3]
+	 lea		(@ptr[$i],$offset),$offset
+	 cmovge		%rsp,@ptr[$i]			# cancel input
+	vaesdec		$rndkey,@out[4],@out[4]
+	 cmovg		%rsp,$offset			# sink output
+	vaesdec		$rndkey,@out[5],@out[5]
+	 sub		@ptr[$i],$offset
+	vaesdec		$rndkey,@out[6],@out[6]
+	 vmovdqu	16(@ptr[$i]),@inp[$i%4]		# load input
+	 mov		$offset,64+8*$i(%rsp)
+	vaesdec		$rndkey,@out[7],@out[7]
+	vmovups		`16*(3+$i)-0x78`($key),$rndkey
+	 lea		16(@ptr[$i],$offset),@ptr[$i]	# switch to output
+___
+$code.=<<___ if ($i<4);
+	 vmovdqu	@inp[$i%4],`128+16*$i`(%rsp)	# off-load
+___
+}
+$code.=<<___;
+	 vmovdqu	32(%rsp),$counters
+	prefetcht0	15(@ptr[$i-2])			# prefetch output
+	prefetcht0	15(@ptr[$i-1])
+	cmp	\$11,$rounds
+	jb	.Ldec8x_tail
+
+	vaesdec		$rndkey1,@out[0],@out[0]
+	vaesdec		$rndkey1,@out[1],@out[1]
+	vaesdec		$rndkey1,@out[2],@out[2]
+	vaesdec		$rndkey1,@out[3],@out[3]
+	vaesdec		$rndkey1,@out[4],@out[4]
+	vaesdec		$rndkey1,@out[5],@out[5]
+	vaesdec		$rndkey1,@out[6],@out[6]
+	vaesdec		$rndkey1,@out[7],@out[7]
+	vmovups		0xb0-0x78($key),$rndkey1
+
+	vaesdec		$rndkey0,@out[0],@out[0]
+	vaesdec		$rndkey0,@out[1],@out[1]
+	vaesdec		$rndkey0,@out[2],@out[2]
+	vaesdec		$rndkey0,@out[3],@out[3]
+	vaesdec		$rndkey0,@out[4],@out[4]
+	vaesdec		$rndkey0,@out[5],@out[5]
+	vaesdec		$rndkey0,@out[6],@out[6]
+	vaesdec		$rndkey0,@out[7],@out[7]
+	vmovups		0xc0-0x78($key),$rndkey0
+	je	.Ldec8x_tail
+
+	vaesdec		$rndkey1,@out[0],@out[0]
+	vaesdec		$rndkey1,@out[1],@out[1]
+	vaesdec		$rndkey1,@out[2],@out[2]
+	vaesdec		$rndkey1,@out[3],@out[3]
+	vaesdec		$rndkey1,@out[4],@out[4]
+	vaesdec		$rndkey1,@out[5],@out[5]
+	vaesdec		$rndkey1,@out[6],@out[6]
+	vaesdec		$rndkey1,@out[7],@out[7]
+	vmovups		0xd0-0x78($key),$rndkey1
+
+	vaesdec		$rndkey0,@out[0],@out[0]
+	vaesdec		$rndkey0,@out[1],@out[1]
+	vaesdec		$rndkey0,@out[2],@out[2]
+	vaesdec		$rndkey0,@out[3],@out[3]
+	vaesdec		$rndkey0,@out[4],@out[4]
+	vaesdec		$rndkey0,@out[5],@out[5]
+	vaesdec		$rndkey0,@out[6],@out[6]
+	vaesdec		$rndkey0,@out[7],@out[7]
+	vmovups		0xe0-0x78($key),$rndkey0
+
+.Ldec8x_tail:
+	vaesdec		$rndkey1,@out[0],@out[0]
+	 vpxor		$zero,$zero,$zero
+	vaesdec		$rndkey1,@out[1],@out[1]
+	vaesdec		$rndkey1,@out[2],@out[2]
+	 vpcmpgtd	$zero,$counters,$zero
+	vaesdec		$rndkey1,@out[3],@out[3]
+	vaesdec		$rndkey1,@out[4],@out[4]
+	 vpaddd		$counters,$zero,$zero		# decrement counters
+	 vmovdqu	48(%rsp),$counters
+	vaesdec		$rndkey1,@out[5],@out[5]
+	 mov		64(%rsp),$offset		# pre-load 1st offset
+	vaesdec		$rndkey1,@out[6],@out[6]
+	vaesdec		$rndkey1,@out[7],@out[7]
+	vmovups		0x10-0x78($key),$rndkey1
+
+	vaesdeclast	$rndkey0,@out[0],@out[0]
+	 vmovdqa	$zero,32(%rsp)			# update counters
+	 vpxor		$zero,$zero,$zero
+	vaesdeclast	$rndkey0,@out[1],@out[1]
+	vpxor		0x00($offload),@out[0],@out[0]	# xor with IV
+	vaesdeclast	$rndkey0,@out[2],@out[2]
+	vpxor		0x10($offload),@out[1],@out[1]
+	 vpcmpgtd	$zero,$counters,$zero
+	vaesdeclast	$rndkey0,@out[3],@out[3]
+	vpxor		0x20($offload),@out[2],@out[2]
+	vaesdeclast	$rndkey0,@out[4],@out[4]
+	vpxor		0x30($offload),@out[3],@out[3]
+	 vpaddd		$zero,$counters,$counters	# decrement counters
+	 vmovdqu	-0x78($key),$zero		# 0-round
+	vaesdeclast	$rndkey0,@out[5],@out[5]
+	vpxor		0x40($offload),@out[4],@out[4]
+	vaesdeclast	$rndkey0,@out[6],@out[6]
+	vpxor		0x50($offload),@out[5],@out[5]
+	 vmovdqa	$counters,48(%rsp)		# update counters
+	vaesdeclast	$rndkey0,@out[7],@out[7]
+	vpxor		0x60($offload),@out[6],@out[6]
+	vmovups		0x20-0x78($key),$rndkey0
+
+	vmovups		@out[0],-16(@ptr[0])		# write output
+	 sub		$offset,@ptr[0]			# switch to input
+	 vmovdqu	128+0(%rsp),@out[0]
+	vpxor		0x70($offload),@out[7],@out[7]
+	vmovups		@out[1],-16(@ptr[1])
+	 sub		`64+1*8`(%rsp),@ptr[1]
+	 vmovdqu	@out[0],0x00($offload)
+	 vpxor		$zero,@out[0],@out[0]
+	 vmovdqu	128+16(%rsp),@out[1]
+	vmovups		@out[2],-16(@ptr[2])
+	 sub		`64+2*8`(%rsp),@ptr[2]
+	 vmovdqu	@out[1],0x10($offload)
+	 vpxor		$zero,@out[1],@out[1]
+	 vmovdqu	128+32(%rsp),@out[2]
+	vmovups		@out[3],-16(@ptr[3])
+	 sub		`64+3*8`(%rsp),@ptr[3]
+	 vmovdqu	@out[2],0x20($offload)
+	 vpxor		$zero,@out[2],@out[2]
+	 vmovdqu	128+48(%rsp),@out[3]
+	vmovups		@out[4],-16(@ptr[4])
+	 sub		`64+4*8`(%rsp),@ptr[4]
+	 vmovdqu	@out[3],0x30($offload)
+	 vpxor		$zero,@out[3],@out[3]
+	 vmovdqu	@inp[0],0x40($offload)
+	 vpxor		@inp[0],$zero,@out[4]
+	vmovups		@out[5],-16(@ptr[5])
+	 sub		`64+5*8`(%rsp),@ptr[5]
+	 vmovdqu	@inp[1],0x50($offload)
+	 vpxor		@inp[1],$zero,@out[5]
+	vmovups		@out[6],-16(@ptr[6])
+	 sub		`64+6*8`(%rsp),@ptr[6]
+	 vmovdqu	@inp[2],0x60($offload)
+	 vpxor		@inp[2],$zero,@out[6]
+	vmovups		@out[7],-16(@ptr[7])
+	 sub		`64+7*8`(%rsp),@ptr[7]
+	 vmovdqu	@inp[3],0x70($offload)
+	 vpxor		@inp[3],$zero,@out[7]
+
+	xor	\$128,$offload
+	dec	$num
+	jnz	.Loop_dec8x
+
+	mov	16(%rsp),%rax			# original %rsp
+.cfi_def_cfa	%rax,8
+	#mov	24(%rsp),$num
+	#lea	`40*8`($inp),$inp
+	#dec	$num
+	#jnz	.Ldec8x_loop_grande
+
+.Ldec8x_done:
+	vzeroupper
+___
+$code.=<<___ if ($win64);
+	movaps	-0xd8(%rax),%xmm6
+	movaps	-0xc8(%rax),%xmm7
+	movaps	-0xb8(%rax),%xmm8
+	movaps	-0xa8(%rax),%xmm9
+	movaps	-0x98(%rax),%xmm10
+	movaps	-0x88(%rax),%xmm11
+	movaps	-0x78(%rax),%xmm12
+	movaps	-0x68(%rax),%xmm13
+	movaps	-0x58(%rax),%xmm14
+	movaps	-0x48(%rax),%xmm15
+___
+$code.=<<___;
+	mov	-48(%rax),%r15
+.cfi_restore	%r15
+	mov	-40(%rax),%r14
+.cfi_restore	%r14
+	mov	-32(%rax),%r13
+.cfi_restore	%r13
+	mov	-24(%rax),%r12
+.cfi_restore	%r12
+	mov	-16(%rax),%rbp
+.cfi_restore	%rbp
+	mov	-8(%rax),%rbx
+.cfi_restore	%rbx
+	lea	(%rax),%rsp
+.cfi_def_cfa_register	%rsp
+.Ldec8x_epilogue:
+	ret
+.cfi_endproc
+.size	aesni_multi_cbc_decrypt_avx,.-aesni_multi_cbc_decrypt_avx
+___
+						}}}
+
+if ($win64) {
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+#		CONTEXT *context,DISPATCHER_CONTEXT *disp)
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern	__imp_RtlVirtualUnwind
+.type	se_handler,\@abi-omnipotent
+.align	16
+se_handler:
+	push	%rsi
+	push	%rdi
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	pushfq
+	sub	\$64,%rsp
+
+	mov	120($context),%rax	# pull context->Rax
+	mov	248($context),%rbx	# pull context->Rip
+
+	mov	8($disp),%rsi		# disp->ImageBase
+	mov	56($disp),%r11		# disp->HandlerData
+
+	mov	0(%r11),%r10d		# HandlerData[0]
+	lea	(%rsi,%r10),%r10	# prologue label
+	cmp	%r10,%rbx		# context->Rip<.Lprologue
+	jb	.Lin_prologue
+
+	mov	152($context),%rax	# pull context->Rsp
+
+	mov	4(%r11),%r10d		# HandlerData[1]
+	lea	(%rsi,%r10),%r10	# epilogue label
+	cmp	%r10,%rbx		# context->Rip>=.Lepilogue
+	jae	.Lin_prologue
+
+	mov	16(%rax),%rax		# pull saved stack pointer
+
+	mov	-8(%rax),%rbx
+	mov	-16(%rax),%rbp
+	mov	-24(%rax),%r12
+	mov	-32(%rax),%r13
+	mov	-40(%rax),%r14
+	mov	-48(%rax),%r15
+	mov	%rbx,144($context)	# restore context->Rbx
+	mov	%rbp,160($context)	# restore context->Rbp
+	mov	%r12,216($context)	# restore context->R12
+	mov	%r13,224($context)	# restore context->R13
+	mov	%r14,232($context)	# restore context->R14
+	mov	%r15,240($context)	# restore context->R15
+
+	lea	-56-10*16(%rax),%rsi
+	lea	512($context),%rdi	# &context.Xmm6
+	mov	\$20,%ecx
+	.long	0xa548f3fc		# cld; rep movsq
+
+.Lin_prologue:
+	mov	8(%rax),%rdi
+	mov	16(%rax),%rsi
+	mov	%rax,152($context)	# restore context->Rsp
+	mov	%rsi,168($context)	# restore context->Rsi
+	mov	%rdi,176($context)	# restore context->Rdi
+
+	mov	40($disp),%rdi		# disp->ContextRecord
+	mov	$context,%rsi		# context
+	mov	\$154,%ecx		# sizeof(CONTEXT)
+	.long	0xa548f3fc		# cld; rep movsq
+
+	mov	$disp,%rsi
+	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
+	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
+	mov	0(%rsi),%r8		# arg3, disp->ControlPc
+	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
+	mov	40(%rsi),%r10		# disp->ContextRecord
+	lea	56(%rsi),%r11		# &disp->HandlerData
+	lea	24(%rsi),%r12		# &disp->EstablisherFrame
+	mov	%r10,32(%rsp)		# arg5
+	mov	%r11,40(%rsp)		# arg6
+	mov	%r12,48(%rsp)		# arg7
+	mov	%rcx,56(%rsp)		# arg8, (NULL)
+	call	*__imp_RtlVirtualUnwind(%rip)
+
+	mov	\$1,%eax		# ExceptionContinueSearch
+	add	\$64,%rsp
+	popfq
+	pop	%r15
+	pop	%r14
+	pop	%r13
+	pop	%r12
+	pop	%rbp
+	pop	%rbx
+	pop	%rdi
+	pop	%rsi
+	ret
+.size	se_handler,.-se_handler
+
+.section	.pdata
+.align	4
+	.rva	.LSEH_begin_aesni_multi_cbc_encrypt
+	.rva	.LSEH_end_aesni_multi_cbc_encrypt
+	.rva	.LSEH_info_aesni_multi_cbc_encrypt
+	.rva	.LSEH_begin_aesni_multi_cbc_decrypt
+	.rva	.LSEH_end_aesni_multi_cbc_decrypt
+	.rva	.LSEH_info_aesni_multi_cbc_decrypt
+___
+$code.=<<___ if ($avx);
+	.rva	.LSEH_begin_aesni_multi_cbc_encrypt_avx
+	.rva	.LSEH_end_aesni_multi_cbc_encrypt_avx
+	.rva	.LSEH_info_aesni_multi_cbc_encrypt_avx
+	.rva	.LSEH_begin_aesni_multi_cbc_decrypt_avx
+	.rva	.LSEH_end_aesni_multi_cbc_decrypt_avx
+	.rva	.LSEH_info_aesni_multi_cbc_decrypt_avx
+___
+$code.=<<___;
+.section	.xdata
+.align	8
+.LSEH_info_aesni_multi_cbc_encrypt:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Lenc4x_body,.Lenc4x_epilogue		# HandlerData[]
+.LSEH_info_aesni_multi_cbc_decrypt:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Ldec4x_body,.Ldec4x_epilogue		# HandlerData[]
+___
+$code.=<<___ if ($avx);
+.LSEH_info_aesni_multi_cbc_encrypt_avx:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Lenc8x_body,.Lenc8x_epilogue		# HandlerData[]
+.LSEH_info_aesni_multi_cbc_decrypt_avx:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Ldec8x_body,.Ldec8x_epilogue		# HandlerData[]
+___
+}
+####################################################################
+
+sub rex {
+  local *opcode=shift;
+  my ($dst,$src)=@_;
+  my $rex=0;
+
+    $rex|=0x04			if($dst>=8);
+    $rex|=0x01			if($src>=8);
+    push @opcode,$rex|0x40	if($rex);
+}
+
+sub aesni {
+  my $line=shift;
+  my @opcode=(0x66);
+
+    if ($line=~/(aeskeygenassist)\s+\$([x0-9a-f]+),\s*%xmm([0-9]+),\s*%xmm([0-9]+)/) {
+	rex(\@opcode,$4,$3);
+	push @opcode,0x0f,0x3a,0xdf;
+	push @opcode,0xc0|($3&7)|(($4&7)<<3);	# ModR/M
+	my $c=$2;
+	push @opcode,$c=~/^0/?oct($c):$c;
+	return ".byte\t".join(',',@opcode);
+    }
+    elsif ($line=~/(aes[a-z]+)\s+%xmm([0-9]+),\s*%xmm([0-9]+)/) {
+	my %opcodelet = (
+		"aesimc" => 0xdb,
+		"aesenc" => 0xdc,	"aesenclast" => 0xdd,
+		"aesdec" => 0xde,	"aesdeclast" => 0xdf
+	);
+	return undef if (!defined($opcodelet{$1}));
+	rex(\@opcode,$3,$2);
+	push @opcode,0x0f,0x38,$opcodelet{$1};
+	push @opcode,0xc0|($2&7)|(($3&7)<<3);	# ModR/M
+	return ".byte\t".join(',',@opcode);
+    }
+    elsif ($line=~/(aes[a-z]+)\s+([0x1-9a-fA-F]*)\(%rsp\),\s*%xmm([0-9]+)/) {
+	my %opcodelet = (
+		"aesenc" => 0xdc,	"aesenclast" => 0xdd,
+		"aesdec" => 0xde,	"aesdeclast" => 0xdf
+	);
+	return undef if (!defined($opcodelet{$1}));
+	my $off = $2;
+	push @opcode,0x44 if ($3>=8);
+	push @opcode,0x0f,0x38,$opcodelet{$1};
+	push @opcode,0x44|(($3&7)<<3),0x24;	# ModR/M
+	push @opcode,($off=~/^0/?oct($off):$off)&0xff;
+	return ".byte\t".join(',',@opcode);
+    }
+    return $line;
+}
+
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+$code =~ s/\b(aes.*%xmm[0-9]+).*$/aesni($1)/gem;
+
+print $code;
+close STDOUT or die "error closing STDOUT: $!";
diff --git a/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aesni-sha1-x86_64.pl b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aesni-sha1-x86_64.pl
new file mode 100644
index 0000000..04fd13b
--- /dev/null
+++ b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aesni-sha1-x86_64.pl
@@ -0,0 +1,2146 @@
+#! /usr/bin/env perl
+# Copyright 2011-2020 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License").  You may not use
+# this file except in compliance with the License.  You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# June 2011
+#
+# This is AESNI-CBC+SHA1 "stitch" implementation. The idea, as spelled
+# in http://download.intel.com/design/intarch/papers/323686.pdf, is
+# that since AESNI-CBC encrypt exhibit *very* low instruction-level
+# parallelism, interleaving it with another algorithm would allow to
+# utilize processor resources better and achieve better performance.
+# SHA1 instruction sequences(*) are taken from sha1-x86_64.pl and
+# AESNI code is weaved into it. Below are performance numbers in
+# cycles per processed byte, less is better, for standalone AESNI-CBC
+# encrypt, sum of the latter and standalone SHA1, and "stitched"
+# subroutine:
+#
+#		AES-128-CBC	+SHA1		stitch      gain
+# Westmere	3.77[+5.3]	9.07		6.55	    +38%
+# Sandy Bridge	5.05[+5.0(6.1)]	10.06(11.15)	5.98(7.05)  +68%(+58%)
+# Ivy Bridge	5.05[+4.6]	9.65		5.54        +74%
+# Haswell	4.43[+3.6(4.2)]	8.00(8.58)	4.55(5.21)  +75%(+65%)
+# Skylake	2.63[+3.5(4.1)]	6.17(6.69)	4.23(4.44)  +46%(+51%)
+# Bulldozer	5.77[+6.0]	11.72		6.37        +84%
+# Ryzen(**)	2.71[+1.93]	4.64		2.74        +69%
+# Goldmont(**)	3.82[+1.70]	5.52		4.20        +31%
+#
+#		AES-192-CBC
+# Westmere	4.51		9.81		6.80	    +44%
+# Sandy Bridge	6.05		11.06(12.15)	6.11(7.19)  +81%(+69%)
+# Ivy Bridge	6.05		10.65		6.07        +75%
+# Haswell	5.29		8.86(9.44)	5.32(5.32)  +67%(+77%)
+# Bulldozer	6.89		12.84		6.96        +84%
+#
+#		AES-256-CBC
+# Westmere	5.25		10.55		7.21	    +46%
+# Sandy Bridge	7.05		12.06(13.15)	7.12(7.72)  +69%(+70%)
+# Ivy Bridge	7.05		11.65		7.12        +64%
+# Haswell	6.19		9.76(10.34)	6.21(6.25)  +57%(+65%)
+# Skylake	3.62		7.16(7.68)	4.56(4.76)  +57%(+61%)
+# Bulldozer	8.00		13.95		8.25        +69%
+# Ryzen(**)	3.71		5.64		3.72        +52%
+# Goldmont(**)	5.35		7.05		5.76        +22%
+#
+# (*)	There are two code paths: SSSE3 and AVX. See sha1-568.pl for
+#	background information. Above numbers in parentheses are SSSE3
+#	results collected on AVX-capable CPU, i.e. apply on OSes that
+#	don't support AVX.
+# (**)	SHAEXT results.
+#
+# Needless to mention that it makes no sense to implement "stitched"
+# *decrypt* subroutine. Because *both* AESNI-CBC decrypt and SHA1
+# fully utilize parallelism, so stitching would not give any gain
+# anyway. Well, there might be some, e.g. because of better cache
+# locality... For reference, here are performance results for
+# standalone AESNI-CBC decrypt:
+#
+#		AES-128-CBC	AES-192-CBC	AES-256-CBC
+# Westmere	1.25		1.50		1.75
+# Sandy Bridge	0.74		0.91		1.09
+# Ivy Bridge	0.74		0.90		1.11
+# Haswell	0.63		0.76		0.88
+# Bulldozer	0.70		0.85		0.99
+
+# And indeed:
+#
+#		AES-256-CBC	+SHA1		stitch      gain
+# Westmere	1.75		7.20		6.68        +7.8%
+# Sandy Bridge	1.09		6.09(7.22)	5.82(6.95)  +4.6%(+3.9%)
+# Ivy Bridge	1.11		5.70		5.45        +4.6%
+# Haswell	0.88		4.45(5.00)	4.39(4.69)  +1.4%(*)(+6.6%)
+# Bulldozer	0.99		6.95		5.95        +17%(**)
+#
+# (*)	Tiny improvement coefficient on Haswell is because we compare
+#	AVX1 stitch to sum with AVX2 SHA1.
+# (**)	Execution is fully dominated by integer code sequence and
+#	SIMD still hardly shows [in single-process benchmark;-]
+
+$flavour = shift;
+$output  = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+$avx=1 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
+		=~ /GNU assembler version ([2-9]\.[0-9]+)/ &&
+	   $1>=2.19);
+$avx=1 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
+	   `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/ &&
+	   $1>=2.09);
+$avx=1 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
+	   `ml64 2>&1` =~ /Version ([0-9]+)\./ &&
+	   $1>=10);
+$avx=1 if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:clang|LLVM) version|.*based on LLVM) ([0-9]+\.[0-9]+)/ && $2>=3.0);
+
+$shaext=1;	### set to zero if compiling for 1.0.1
+
+$stitched_decrypt=0;
+
+open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
+*STDOUT=*OUT;
+
+# void aesni_cbc_sha1_enc(const void *inp,
+#			void *out,
+#			size_t length,
+#			const AES_KEY *key,
+#			unsigned char *iv,
+#			SHA_CTX *ctx,
+#			const void *in0);
+
+$code.=<<___;
+.text
+.extern	OPENSSL_ia32cap_P
+
+.globl	aesni_cbc_sha1_enc
+.type	aesni_cbc_sha1_enc,\@abi-omnipotent
+.align	32
+aesni_cbc_sha1_enc:
+.cfi_startproc
+	# caller should check for SSSE3 and AES-NI bits
+	mov	OPENSSL_ia32cap_P+0(%rip),%r10d
+	mov	OPENSSL_ia32cap_P+4(%rip),%r11
+___
+$code.=<<___ if ($shaext);
+	bt	\$61,%r11		# check SHA bit
+	jc	aesni_cbc_sha1_enc_shaext
+___
+$code.=<<___ if ($avx);
+	and	\$`1<<28`,%r11d		# mask AVX bit
+	and	\$`1<<30`,%r10d		# mask "Intel CPU" bit
+	or	%r11d,%r10d
+	cmp	\$`1<<28|1<<30`,%r10d
+	je	aesni_cbc_sha1_enc_avx
+___
+$code.=<<___;
+	jmp	aesni_cbc_sha1_enc_ssse3
+	ret
+.cfi_endproc
+.size	aesni_cbc_sha1_enc,.-aesni_cbc_sha1_enc
+___
+
+my ($in0,$out,$len,$key,$ivp,$ctx,$inp)=("%rdi","%rsi","%rdx","%rcx","%r8","%r9","%r10");
+
+my $Xi=4;
+my @X=map("%xmm$_",(4..7,0..3));
+my @Tx=map("%xmm$_",(8..10));
+my @V=($A,$B,$C,$D,$E)=("%eax","%ebx","%ecx","%edx","%ebp");	# size optimization
+my @T=("%esi","%edi");
+my $j=0; my $jj=0; my $r=0; my $sn=0; my $rx=0;
+my $K_XX_XX="%r11";
+my ($rndkey0,$iv,$in)=map("%xmm$_",(11..13));			# for enc
+my @rndkey=("%xmm14","%xmm15");					# for enc
+my ($inout0,$inout1,$inout2,$inout3)=map("%xmm$_",(12..15));	# for dec
+
+if (1) {	# reassign for Atom Silvermont
+    # The goal is to minimize amount of instructions with more than
+    # 3 prefix bytes. Or in more practical terms to keep AES-NI *and*
+    # SSSE3 instructions to upper half of the register bank.
+    @X=map("%xmm$_",(8..11,4..7));
+    @Tx=map("%xmm$_",(12,13,3));
+    ($iv,$in,$rndkey0)=map("%xmm$_",(2,14,15));
+    @rndkey=("%xmm0","%xmm1");
+}
+
+sub AUTOLOAD()		# thunk [simplified] 32-bit style perlasm
+{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
+  my $arg = pop;
+    $arg = "\$$arg" if ($arg*1 eq $arg);
+    $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n";
+}
+
+my $_rol=sub { &rol(@_) };
+my $_ror=sub { &ror(@_) };
+
+$code.=<<___;
+.type	aesni_cbc_sha1_enc_ssse3,\@function,6
+.align	32
+aesni_cbc_sha1_enc_ssse3:
+.cfi_startproc
+	mov	`($win64?56:8)`(%rsp),$inp	# load 7th argument
+	#shr	\$6,$len			# debugging artefact
+	#jz	.Lepilogue_ssse3		# debugging artefact
+	push	%rbx
+.cfi_push	%rbx
+	push	%rbp
+.cfi_push	%rbp
+	push	%r12
+.cfi_push	%r12
+	push	%r13
+.cfi_push	%r13
+	push	%r14
+.cfi_push	%r14
+	push	%r15
+.cfi_push	%r15
+	lea	`-104-($win64?10*16:0)`(%rsp),%rsp
+.cfi_adjust_cfa_offset	`104+($win64?10*16:0)`
+	#mov	$in0,$inp			# debugging artefact
+	#lea	64(%rsp),$ctx			# debugging artefact
+___
+$code.=<<___ if ($win64);
+	movaps	%xmm6,96+0(%rsp)
+	movaps	%xmm7,96+16(%rsp)
+	movaps	%xmm8,96+32(%rsp)
+	movaps	%xmm9,96+48(%rsp)
+	movaps	%xmm10,96+64(%rsp)
+	movaps	%xmm11,96+80(%rsp)
+	movaps	%xmm12,96+96(%rsp)
+	movaps	%xmm13,96+112(%rsp)
+	movaps	%xmm14,96+128(%rsp)
+	movaps	%xmm15,96+144(%rsp)
+.Lprologue_ssse3:
+___
+$code.=<<___;
+	mov	$in0,%r12			# reassign arguments
+	mov	$out,%r13
+	mov	$len,%r14
+	lea	112($key),%r15			# size optimization
+	movdqu	($ivp),$iv			# load IV
+	mov	$ivp,88(%rsp)			# save $ivp
+___
+($in0,$out,$len,$key)=map("%r$_",(12..15));	# reassign arguments
+my $rounds="${ivp}d";
+$code.=<<___;
+	shl	\$6,$len
+	sub	$in0,$out
+	mov	240-112($key),$rounds
+	add	$inp,$len		# end of input
+
+	lea	K_XX_XX(%rip),$K_XX_XX
+	mov	0($ctx),$A		# load context
+	mov	4($ctx),$B
+	mov	8($ctx),$C
+	mov	12($ctx),$D
+	mov	$B,@T[0]		# magic seed
+	mov	16($ctx),$E
+	mov	$C,@T[1]
+	xor	$D,@T[1]
+	and	@T[1],@T[0]
+
+	movdqa	64($K_XX_XX),@Tx[2]	# pbswap mask
+	movdqa	0($K_XX_XX),@Tx[1]	# K_00_19
+	movdqu	0($inp),@X[-4&7]	# load input to %xmm[0-3]
+	movdqu	16($inp),@X[-3&7]
+	movdqu	32($inp),@X[-2&7]
+	movdqu	48($inp),@X[-1&7]
+	pshufb	@Tx[2],@X[-4&7]		# byte swap
+	pshufb	@Tx[2],@X[-3&7]
+	pshufb	@Tx[2],@X[-2&7]
+	add	\$64,$inp
+	paddd	@Tx[1],@X[-4&7]		# add K_00_19
+	pshufb	@Tx[2],@X[-1&7]
+	paddd	@Tx[1],@X[-3&7]
+	paddd	@Tx[1],@X[-2&7]
+	movdqa	@X[-4&7],0(%rsp)	# X[]+K xfer to IALU
+	psubd	@Tx[1],@X[-4&7]		# restore X[]
+	movdqa	@X[-3&7],16(%rsp)
+	psubd	@Tx[1],@X[-3&7]
+	movdqa	@X[-2&7],32(%rsp)
+	psubd	@Tx[1],@X[-2&7]
+	movups	-112($key),$rndkey0	# $key[0]
+	movups	16-112($key),$rndkey[0]	# forward reference
+	jmp	.Loop_ssse3
+___
+
+my $aesenc=sub {
+  use integer;
+  my ($n,$k)=($r/10,$r%10);
+    if ($k==0) {
+      $code.=<<___;
+	movups		`16*$n`($in0),$in		# load input
+	xorps		$rndkey0,$in
+___
+      $code.=<<___ if ($n);
+	movups		$iv,`16*($n-1)`($out,$in0)	# write output
+___
+      $code.=<<___;
+	xorps		$in,$iv
+	movups		`32+16*$k-112`($key),$rndkey[1]
+	aesenc		$rndkey[0],$iv
+___
+    } elsif ($k==9) {
+      $sn++;
+      $code.=<<___;
+	cmp		\$11,$rounds
+	jb		.Laesenclast$sn
+	movups		`32+16*($k+0)-112`($key),$rndkey[1]
+	aesenc		$rndkey[0],$iv
+	movups		`32+16*($k+1)-112`($key),$rndkey[0]
+	aesenc		$rndkey[1],$iv
+	je		.Laesenclast$sn
+	movups		`32+16*($k+2)-112`($key),$rndkey[1]
+	aesenc		$rndkey[0],$iv
+	movups		`32+16*($k+3)-112`($key),$rndkey[0]
+	aesenc		$rndkey[1],$iv
+.Laesenclast$sn:
+	aesenclast	$rndkey[0],$iv
+	movups		16-112($key),$rndkey[1]		# forward reference
+___
+    } else {
+      $code.=<<___;
+	movups		`32+16*$k-112`($key),$rndkey[1]
+	aesenc		$rndkey[0],$iv
+___
+    }
+    $r++;	unshift(@rndkey,pop(@rndkey));
+};
+
+sub Xupdate_ssse3_16_31()		# recall that $Xi starts with 4
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 40 instructions
+  my ($a,$b,$c,$d,$e);
+
+	 eval(shift(@insns));		# ror
+	&pshufd	(@X[0],@X[-4&7],0xee);	# was &movdqa	(@X[0],@X[-3&7]);
+	 eval(shift(@insns));
+	&movdqa	(@Tx[0],@X[-1&7]);
+	  &paddd	(@Tx[1],@X[-1&7]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&punpcklqdq(@X[0],@X[-3&7]);	# compose "X[-14]" in "X[0]", was &palignr(@X[0],@X[-4&7],8);
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+	 eval(shift(@insns));
+	&psrldq	(@Tx[0],4);		# "X[-3]", 3 dwords
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&pxor	(@X[0],@X[-4&7]);	# "X[0]"^="X[-16]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# ror
+	&pxor	(@Tx[0],@X[-2&7]);	# "X[-3]"^"X[-8]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&pxor	(@X[0],@Tx[0]);		# "X[0]"^="X[-3]"^"X[-8]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+	  &movdqa	(eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]);	# X[]+K xfer to IALU
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&movdqa	(@Tx[2],@X[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# ror
+	&movdqa	(@Tx[0],@X[0]);
+	 eval(shift(@insns));
+
+	&pslldq	(@Tx[2],12);		# "X[0]"<<96, extract one dword
+	&paddd	(@X[0],@X[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&psrld	(@Tx[0],31);
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+	 eval(shift(@insns));
+	&movdqa	(@Tx[1],@Tx[2]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&psrld	(@Tx[2],30);
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# ror
+	&por	(@X[0],@Tx[0]);		# "X[0]"<<<=1
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&pslld	(@Tx[1],2);
+	&pxor	(@X[0],@Tx[2]);
+	 eval(shift(@insns));
+	  &movdqa	(@Tx[2],eval(16*(($Xi)/5))."($K_XX_XX)");	# K_XX_XX
+	 eval(shift(@insns));		# rol
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&pxor	(@X[0],@Tx[1]);		# "X[0]"^=("X[0]">>96)<<<2
+	&pshufd (@Tx[1],@X[-1&7],0xee)	if ($Xi==7);	# was &movdqa	(@Tx[0],@X[-1&7]) in Xupdate_ssse3_32_79
+
+	 foreach (@insns) { eval; }	# remaining instructions [if any]
+
+  $Xi++;	push(@X,shift(@X));	# "rotate" X[]
+		push(@Tx,shift(@Tx));
+}
+
+sub Xupdate_ssse3_32_79()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 to 44 instructions
+  my ($a,$b,$c,$d,$e);
+
+	 eval(shift(@insns))		if ($Xi==8);
+	&pxor	(@X[0],@X[-4&7]);	# "X[0]"="X[-32]"^"X[-16]"
+	 eval(shift(@insns))		if ($Xi==8);
+	 eval(shift(@insns));		# body_20_39
+	 eval(shift(@insns));
+	 eval(shift(@insns))		if (@insns[1] =~ /_ror/);
+	 eval(shift(@insns))		if (@insns[0] =~ /_ror/);
+	&punpcklqdq(@Tx[0],@X[-1&7]);	# compose "X[-6]", was &palignr(@Tx[0],@X[-2&7],8);
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+
+	&pxor	(@X[0],@X[-7&7]);	# "X[0]"^="X[-28]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	if ($Xi%5) {
+	  &movdqa	(@Tx[2],@Tx[1]);# "perpetuate" K_XX_XX...
+	} else {			# ... or load next one
+	  &movdqa	(@Tx[2],eval(16*($Xi/5))."($K_XX_XX)");
+	}
+	 eval(shift(@insns));		# ror
+	  &paddd	(@Tx[1],@X[-1&7]);
+	 eval(shift(@insns));
+
+	&pxor	(@X[0],@Tx[0]);		# "X[0]"^="X[-6]"
+	 eval(shift(@insns));		# body_20_39
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+	 eval(shift(@insns))		if (@insns[0] =~ /_ror/);
+
+	&movdqa	(@Tx[0],@X[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &movdqa	(eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]);	# X[]+K xfer to IALU
+	 eval(shift(@insns));		# ror
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# body_20_39
+
+	&pslld	(@X[0],2);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&psrld	(@Tx[0],30);
+	 eval(shift(@insns))		if (@insns[0] =~ /_rol/);# rol
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# ror
+
+	&por	(@X[0],@Tx[0]);		# "X[0]"<<<=2
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# body_20_39
+	 eval(shift(@insns))		if (@insns[1] =~ /_rol/);
+	 eval(shift(@insns))		if (@insns[0] =~ /_rol/);
+	  &pshufd(@Tx[1],@X[-1&7],0xee)	if ($Xi<19);	# was &movdqa	(@Tx[1],@X[0])
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+	 eval(shift(@insns));
+
+	 foreach (@insns) { eval; }	# remaining instructions
+
+  $Xi++;	push(@X,shift(@X));	# "rotate" X[]
+		push(@Tx,shift(@Tx));
+}
+
+sub Xuplast_ssse3_80()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 instructions
+  my ($a,$b,$c,$d,$e);
+
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &paddd	(@Tx[1],@X[-1&7]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	  &movdqa	(eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]);	# X[]+K xfer IALU
+
+	 foreach (@insns) { eval; }		# remaining instructions
+
+	&cmp	($inp,$len);
+	&je	(shift);
+
+	unshift(@Tx,pop(@Tx));
+
+	&movdqa	(@Tx[2],"64($K_XX_XX)");	# pbswap mask
+	&movdqa	(@Tx[1],"0($K_XX_XX)");		# K_00_19
+	&movdqu	(@X[-4&7],"0($inp)");		# load input
+	&movdqu	(@X[-3&7],"16($inp)");
+	&movdqu	(@X[-2&7],"32($inp)");
+	&movdqu	(@X[-1&7],"48($inp)");
+	&pshufb	(@X[-4&7],@Tx[2]);		# byte swap
+	&add	($inp,64);
+
+  $Xi=0;
+}
+
+sub Xloop_ssse3()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 instructions
+  my ($a,$b,$c,$d,$e);
+
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&pshufb	(@X[($Xi-3)&7],@Tx[2]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&paddd	(@X[($Xi-4)&7],@Tx[1]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&movdqa	(eval(16*$Xi)."(%rsp)",@X[($Xi-4)&7]);	# X[]+K xfer to IALU
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&psubd	(@X[($Xi-4)&7],@Tx[1]);
+
+	foreach (@insns) { eval; }
+  $Xi++;
+}
+
+sub Xtail_ssse3()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 instructions
+  my ($a,$b,$c,$d,$e);
+
+	foreach (@insns) { eval; }
+}
+
+my @body_00_19 = (
+	'($a,$b,$c,$d,$e)=@V;'.
+	'&$_ror	($b,$j?7:2);',	# $b>>>2
+	'&xor	(@T[0],$d);',
+	'&mov	(@T[1],$a);',	# $b for next round
+
+	'&add	($e,eval(4*($j&15))."(%rsp)");',# X[]+K xfer
+	'&xor	($b,$c);',	# $c^$d for next round
+
+	'&$_rol	($a,5);',
+	'&add	($e,@T[0]);',
+	'&and	(@T[1],$b);',	# ($b&($c^$d)) for next round
+
+	'&xor	($b,$c);',	# restore $b
+	'&add	($e,$a);'	.'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
+	);
+
+sub body_00_19 () {	# ((c^d)&b)^d
+    # on start @T[0]=(c^d)&b
+    return &body_20_39() if ($rx==19); $rx++;
+
+    use integer;
+    my ($k,$n);
+    my @r=@body_00_19;
+
+	$n = scalar(@r);
+	$k = (($jj+1)*12/20)*20*$n/12;	# 12 aesencs per these 20 rounds
+	@r[$k%$n].='&$aesenc();'	if ($jj==$k/$n);
+	$jj++;
+
+    return @r;
+}
+
+my @body_20_39 = (
+	'($a,$b,$c,$d,$e)=@V;'.
+	'&add	($e,eval(4*($j&15))."(%rsp)");',# X[]+K xfer
+	'&xor	(@T[0],$d)	if($j==19);'.
+	'&xor	(@T[0],$c)	if($j> 19);',	# ($b^$d^$c)
+	'&mov	(@T[1],$a);',	# $b for next round
+
+	'&$_rol	($a,5);',
+	'&add	($e,@T[0]);',
+	'&xor	(@T[1],$c)	if ($j< 79);',	# $b^$d for next round
+
+	'&$_ror	($b,7);',	# $b>>>2
+	'&add	($e,$a);'	.'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
+	);
+
+sub body_20_39 () {	# b^d^c
+    # on entry @T[0]=b^d
+    return &body_40_59() if ($rx==39); $rx++;
+
+    use integer;
+    my ($k,$n);
+    my @r=@body_20_39;
+
+	$n = scalar(@r);
+	$k = (($jj+1)*8/20)*20*$n/8;	# 8 aesencs per these 20 rounds
+	@r[$k%$n].='&$aesenc();'	if ($jj==$k/$n && $rx!=20);
+	$jj++;
+
+    return @r;
+}
+
+my @body_40_59 = (
+	'($a,$b,$c,$d,$e)=@V;'.
+	'&add	($e,eval(4*($j&15))."(%rsp)");',# X[]+K xfer
+	'&and	(@T[0],$c)	if ($j>=40);',	# (b^c)&(c^d)
+	'&xor	($c,$d)		if ($j>=40);',	# restore $c
+
+	'&$_ror	($b,7);',	# $b>>>2
+	'&mov	(@T[1],$a);',	# $b for next round
+	'&xor	(@T[0],$c);',
+
+	'&$_rol	($a,5);',
+	'&add	($e,@T[0]);',
+	'&xor	(@T[1],$c)	if ($j==59);'.
+	'&xor	(@T[1],$b)	if ($j< 59);',	# b^c for next round
+
+	'&xor	($b,$c)		if ($j< 59);',	# c^d for next round
+	'&add	($e,$a);'	.'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
+	);
+
+sub body_40_59 () {	# ((b^c)&(c^d))^c
+    # on entry @T[0]=(b^c), (c^=d)
+    $rx++;
+
+    use integer;
+    my ($k,$n);
+    my @r=@body_40_59;
+
+	$n = scalar(@r);
+	$k=(($jj+1)*12/20)*20*$n/12;	# 12 aesencs per these 20 rounds
+	@r[$k%$n].='&$aesenc();'	if ($jj==$k/$n && $rx!=40);
+	$jj++;
+
+    return @r;
+}
+$code.=<<___;
+.align	32
+.Loop_ssse3:
+___
+	&Xupdate_ssse3_16_31(\&body_00_19);
+	&Xupdate_ssse3_16_31(\&body_00_19);
+	&Xupdate_ssse3_16_31(\&body_00_19);
+	&Xupdate_ssse3_16_31(\&body_00_19);
+	&Xupdate_ssse3_32_79(\&body_00_19);
+	&Xupdate_ssse3_32_79(\&body_20_39);
+	&Xupdate_ssse3_32_79(\&body_20_39);
+	&Xupdate_ssse3_32_79(\&body_20_39);
+	&Xupdate_ssse3_32_79(\&body_20_39);
+	&Xupdate_ssse3_32_79(\&body_20_39);
+	&Xupdate_ssse3_32_79(\&body_40_59);
+	&Xupdate_ssse3_32_79(\&body_40_59);
+	&Xupdate_ssse3_32_79(\&body_40_59);
+	&Xupdate_ssse3_32_79(\&body_40_59);
+	&Xupdate_ssse3_32_79(\&body_40_59);
+	&Xupdate_ssse3_32_79(\&body_20_39);
+	&Xuplast_ssse3_80(\&body_20_39,".Ldone_ssse3");	# can jump to "done"
+
+				$saved_j=$j; @saved_V=@V;
+				$saved_r=$r; @saved_rndkey=@rndkey;
+
+	&Xloop_ssse3(\&body_20_39);
+	&Xloop_ssse3(\&body_20_39);
+	&Xloop_ssse3(\&body_20_39);
+
+$code.=<<___;
+	movups	$iv,48($out,$in0)		# write output
+	lea	64($in0),$in0
+
+	add	0($ctx),$A			# update context
+	add	4($ctx),@T[0]
+	add	8($ctx),$C
+	add	12($ctx),$D
+	mov	$A,0($ctx)
+	add	16($ctx),$E
+	mov	@T[0],4($ctx)
+	mov	@T[0],$B			# magic seed
+	mov	$C,8($ctx)
+	mov	$C,@T[1]
+	mov	$D,12($ctx)
+	xor	$D,@T[1]
+	mov	$E,16($ctx)
+	and	@T[1],@T[0]
+	jmp	.Loop_ssse3
+
+.Ldone_ssse3:
+___
+				$jj=$j=$saved_j; @V=@saved_V;
+				$r=$saved_r;     @rndkey=@saved_rndkey;
+
+	&Xtail_ssse3(\&body_20_39);
+	&Xtail_ssse3(\&body_20_39);
+	&Xtail_ssse3(\&body_20_39);
+
+$code.=<<___;
+	movups	$iv,48($out,$in0)		# write output
+	mov	88(%rsp),$ivp			# restore $ivp
+
+	add	0($ctx),$A			# update context
+	add	4($ctx),@T[0]
+	add	8($ctx),$C
+	mov	$A,0($ctx)
+	add	12($ctx),$D
+	mov	@T[0],4($ctx)
+	add	16($ctx),$E
+	mov	$C,8($ctx)
+	mov	$D,12($ctx)
+	mov	$E,16($ctx)
+	movups	$iv,($ivp)			# write IV
+___
+$code.=<<___ if ($win64);
+	movaps	96+0(%rsp),%xmm6
+	movaps	96+16(%rsp),%xmm7
+	movaps	96+32(%rsp),%xmm8
+	movaps	96+48(%rsp),%xmm9
+	movaps	96+64(%rsp),%xmm10
+	movaps	96+80(%rsp),%xmm11
+	movaps	96+96(%rsp),%xmm12
+	movaps	96+112(%rsp),%xmm13
+	movaps	96+128(%rsp),%xmm14
+	movaps	96+144(%rsp),%xmm15
+___
+$code.=<<___;
+	lea	`104+($win64?10*16:0)`(%rsp),%rsi
+.cfi_def_cfa	%rsi,56
+	mov	0(%rsi),%r15
+.cfi_restore	%r15
+	mov	8(%rsi),%r14
+.cfi_restore	%r14
+	mov	16(%rsi),%r13
+.cfi_restore	%r13
+	mov	24(%rsi),%r12
+.cfi_restore	%r12
+	mov	32(%rsi),%rbp
+.cfi_restore	%rbp
+	mov	40(%rsi),%rbx
+.cfi_restore	%rbx
+	lea	48(%rsi),%rsp
+.cfi_def_cfa	%rsp,8
+.Lepilogue_ssse3:
+	ret
+.cfi_endproc
+.size	aesni_cbc_sha1_enc_ssse3,.-aesni_cbc_sha1_enc_ssse3
+___
+
+						if ($stitched_decrypt) {{{
+# reset
+($in0,$out,$len,$key,$ivp,$ctx,$inp)=("%rdi","%rsi","%rdx","%rcx","%r8","%r9","%r10");
+$j=$jj=$r=$rx=0;
+$Xi=4;
+
+# reassign for Atom Silvermont (see above)
+($inout0,$inout1,$inout2,$inout3,$rndkey0)=map("%xmm$_",(0..4));
+@X=map("%xmm$_",(8..13,6,7));
+@Tx=map("%xmm$_",(14,15,5));
+
+my @aes256_dec = (
+	'&movdqu($inout0,"0x00($in0)");',
+	'&movdqu($inout1,"0x10($in0)");	&pxor	($inout0,$rndkey0);',
+	'&movdqu($inout2,"0x20($in0)");	&pxor	($inout1,$rndkey0);',
+	'&movdqu($inout3,"0x30($in0)");	&pxor	($inout2,$rndkey0);',
+
+	'&pxor	($inout3,$rndkey0);	&movups	($rndkey0,"16-112($key)");',
+	'&movaps("64(%rsp)",@X[2]);',	# save IV, originally @X[3]
+	undef,undef
+	);
+for ($i=0;$i<13;$i++) {
+    push (@aes256_dec,(
+	'&aesdec	($inout0,$rndkey0);',
+	'&aesdec	($inout1,$rndkey0);',
+	'&aesdec	($inout2,$rndkey0);',
+	'&aesdec	($inout3,$rndkey0);	&movups($rndkey0,"'.(16*($i+2)-112).'($key)");'
+	));
+    push (@aes256_dec,(undef,undef))	if (($i>=3 && $i<=5) || $i>=11);
+    push (@aes256_dec,(undef,undef))	if ($i==5);
+}
+push(@aes256_dec,(
+	'&aesdeclast	($inout0,$rndkey0);	&movups	(@X[0],"0x00($in0)");',
+	'&aesdeclast	($inout1,$rndkey0);	&movups	(@X[1],"0x10($in0)");',
+	'&aesdeclast	($inout2,$rndkey0);	&movups	(@X[2],"0x20($in0)");',
+	'&aesdeclast	($inout3,$rndkey0);	&movups	(@X[3],"0x30($in0)");',
+
+	'&xorps		($inout0,"64(%rsp)");	&movdqu	($rndkey0,"-112($key)");',
+	'&xorps		($inout1,@X[0]);	&movups	("0x00($out,$in0)",$inout0);',
+	'&xorps		($inout2,@X[1]);	&movups	("0x10($out,$in0)",$inout1);',
+	'&xorps		($inout3,@X[2]);	&movups	("0x20($out,$in0)",$inout2);',
+
+	'&movups	("0x30($out,$in0)",$inout3);'
+	));
+
+sub body_00_19_dec () {	# ((c^d)&b)^d
+    # on start @T[0]=(c^d)&b
+    return &body_20_39_dec() if ($rx==19);
+
+    my @r=@body_00_19;
+
+	unshift (@r,@aes256_dec[$rx])	if (@aes256_dec[$rx]);
+	$rx++;
+
+    return @r;
+}
+
+sub body_20_39_dec () {	# b^d^c
+    # on entry @T[0]=b^d
+    return &body_40_59_dec() if ($rx==39);
+
+    my @r=@body_20_39;
+
+	unshift (@r,@aes256_dec[$rx])	if (@aes256_dec[$rx]);
+	$rx++;
+
+    return @r;
+}
+
+sub body_40_59_dec () {	# ((b^c)&(c^d))^c
+    # on entry @T[0]=(b^c), (c^=d)
+
+    my @r=@body_40_59;
+
+	unshift (@r,@aes256_dec[$rx])	if (@aes256_dec[$rx]);
+	$rx++;
+
+    return @r;
+}
+
+$code.=<<___;
+.globl	aesni256_cbc_sha1_dec
+.type	aesni256_cbc_sha1_dec,\@abi-omnipotent
+.align	32
+aesni256_cbc_sha1_dec:
+.cfi_startproc
+	# caller should check for SSSE3 and AES-NI bits
+	mov	OPENSSL_ia32cap_P+0(%rip),%r10d
+	mov	OPENSSL_ia32cap_P+4(%rip),%r11d
+___
+$code.=<<___ if ($avx);
+	and	\$`1<<28`,%r11d		# mask AVX bit
+	and	\$`1<<30`,%r10d		# mask "Intel CPU" bit
+	or	%r11d,%r10d
+	cmp	\$`1<<28|1<<30`,%r10d
+	je	aesni256_cbc_sha1_dec_avx
+___
+$code.=<<___;
+	jmp	aesni256_cbc_sha1_dec_ssse3
+	ret
+.cfi_endproc
+.size	aesni256_cbc_sha1_dec,.-aesni256_cbc_sha1_dec
+
+.type	aesni256_cbc_sha1_dec_ssse3,\@function,6
+.align	32
+aesni256_cbc_sha1_dec_ssse3:
+.cfi_startproc
+	mov	`($win64?56:8)`(%rsp),$inp	# load 7th argument
+	push	%rbx
+.cfi_push	%rbx
+	push	%rbp
+.cfi_push	%rbp
+	push	%r12
+.cfi_push	%r12
+	push	%r13
+.cfi_push	%r13
+	push	%r14
+.cfi_push	%r14
+	push	%r15
+.cfi_push	%r15
+	lea	`-104-($win64?10*16:0)`(%rsp),%rsp
+.cfi_adjust_cfa_offset	`104+($win64?10*16:0)`
+___
+$code.=<<___ if ($win64);
+	movaps	%xmm6,96+0(%rsp)
+	movaps	%xmm7,96+16(%rsp)
+	movaps	%xmm8,96+32(%rsp)
+	movaps	%xmm9,96+48(%rsp)
+	movaps	%xmm10,96+64(%rsp)
+	movaps	%xmm11,96+80(%rsp)
+	movaps	%xmm12,96+96(%rsp)
+	movaps	%xmm13,96+112(%rsp)
+	movaps	%xmm14,96+128(%rsp)
+	movaps	%xmm15,96+144(%rsp)
+.Lprologue_dec_ssse3:
+___
+$code.=<<___;
+	mov	$in0,%r12			# reassign arguments
+	mov	$out,%r13
+	mov	$len,%r14
+	lea	112($key),%r15			# size optimization
+	movdqu	($ivp),@X[3]			# load IV
+	#mov	$ivp,88(%rsp)			# save $ivp
+___
+($in0,$out,$len,$key)=map("%r$_",(12..15));	# reassign arguments
+$code.=<<___;
+	shl	\$6,$len
+	sub	$in0,$out
+	add	$inp,$len		# end of input
+
+	lea	K_XX_XX(%rip),$K_XX_XX
+	mov	0($ctx),$A		# load context
+	mov	4($ctx),$B
+	mov	8($ctx),$C
+	mov	12($ctx),$D
+	mov	$B,@T[0]		# magic seed
+	mov	16($ctx),$E
+	mov	$C,@T[1]
+	xor	$D,@T[1]
+	and	@T[1],@T[0]
+
+	movdqa	64($K_XX_XX),@Tx[2]	# pbswap mask
+	movdqa	0($K_XX_XX),@Tx[1]	# K_00_19
+	movdqu	0($inp),@X[-4&7]	# load input to %xmm[0-3]
+	movdqu	16($inp),@X[-3&7]
+	movdqu	32($inp),@X[-2&7]
+	movdqu	48($inp),@X[-1&7]
+	pshufb	@Tx[2],@X[-4&7]		# byte swap
+	add	\$64,$inp
+	pshufb	@Tx[2],@X[-3&7]
+	pshufb	@Tx[2],@X[-2&7]
+	pshufb	@Tx[2],@X[-1&7]
+	paddd	@Tx[1],@X[-4&7]		# add K_00_19
+	paddd	@Tx[1],@X[-3&7]
+	paddd	@Tx[1],@X[-2&7]
+	movdqa	@X[-4&7],0(%rsp)	# X[]+K xfer to IALU
+	psubd	@Tx[1],@X[-4&7]		# restore X[]
+	movdqa	@X[-3&7],16(%rsp)
+	psubd	@Tx[1],@X[-3&7]
+	movdqa	@X[-2&7],32(%rsp)
+	psubd	@Tx[1],@X[-2&7]
+	movdqu	-112($key),$rndkey0	# $key[0]
+	jmp	.Loop_dec_ssse3
+
+.align	32
+.Loop_dec_ssse3:
+___
+	&Xupdate_ssse3_16_31(\&body_00_19_dec);
+	&Xupdate_ssse3_16_31(\&body_00_19_dec);
+	&Xupdate_ssse3_16_31(\&body_00_19_dec);
+	&Xupdate_ssse3_16_31(\&body_00_19_dec);
+	&Xupdate_ssse3_32_79(\&body_00_19_dec);
+	&Xupdate_ssse3_32_79(\&body_20_39_dec);
+	&Xupdate_ssse3_32_79(\&body_20_39_dec);
+	&Xupdate_ssse3_32_79(\&body_20_39_dec);
+	&Xupdate_ssse3_32_79(\&body_20_39_dec);
+	&Xupdate_ssse3_32_79(\&body_20_39_dec);
+	&Xupdate_ssse3_32_79(\&body_40_59_dec);
+	&Xupdate_ssse3_32_79(\&body_40_59_dec);
+	&Xupdate_ssse3_32_79(\&body_40_59_dec);
+	&Xupdate_ssse3_32_79(\&body_40_59_dec);
+	&Xupdate_ssse3_32_79(\&body_40_59_dec);
+	&Xupdate_ssse3_32_79(\&body_20_39_dec);
+	&Xuplast_ssse3_80(\&body_20_39_dec,".Ldone_dec_ssse3");	# can jump to "done"
+
+				$saved_j=$j;   @saved_V=@V;
+				$saved_rx=$rx;
+
+	&Xloop_ssse3(\&body_20_39_dec);
+	&Xloop_ssse3(\&body_20_39_dec);
+	&Xloop_ssse3(\&body_20_39_dec);
+
+	eval(@aes256_dec[-1]);			# last store
+$code.=<<___;
+	lea	64($in0),$in0
+
+	add	0($ctx),$A			# update context
+	add	4($ctx),@T[0]
+	add	8($ctx),$C
+	add	12($ctx),$D
+	mov	$A,0($ctx)
+	add	16($ctx),$E
+	mov	@T[0],4($ctx)
+	mov	@T[0],$B			# magic seed
+	mov	$C,8($ctx)
+	mov	$C,@T[1]
+	mov	$D,12($ctx)
+	xor	$D,@T[1]
+	mov	$E,16($ctx)
+	and	@T[1],@T[0]
+	jmp	.Loop_dec_ssse3
+
+.Ldone_dec_ssse3:
+___
+				$jj=$j=$saved_j; @V=@saved_V;
+				$rx=$saved_rx;
+
+	&Xtail_ssse3(\&body_20_39_dec);
+	&Xtail_ssse3(\&body_20_39_dec);
+	&Xtail_ssse3(\&body_20_39_dec);
+
+	eval(@aes256_dec[-1]);			# last store
+$code.=<<___;
+	add	0($ctx),$A			# update context
+	add	4($ctx),@T[0]
+	add	8($ctx),$C
+	mov	$A,0($ctx)
+	add	12($ctx),$D
+	mov	@T[0],4($ctx)
+	add	16($ctx),$E
+	mov	$C,8($ctx)
+	mov	$D,12($ctx)
+	mov	$E,16($ctx)
+	movups	@X[3],($ivp)			# write IV
+___
+$code.=<<___ if ($win64);
+	movaps	96+0(%rsp),%xmm6
+	movaps	96+16(%rsp),%xmm7
+	movaps	96+32(%rsp),%xmm8
+	movaps	96+48(%rsp),%xmm9
+	movaps	96+64(%rsp),%xmm10
+	movaps	96+80(%rsp),%xmm11
+	movaps	96+96(%rsp),%xmm12
+	movaps	96+112(%rsp),%xmm13
+	movaps	96+128(%rsp),%xmm14
+	movaps	96+144(%rsp),%xmm15
+___
+$code.=<<___;
+	lea	`104+($win64?10*16:0)`(%rsp),%rsi
+.cfi_cfa_def	%rsi,56
+	mov	0(%rsi),%r15
+.cfi_restore	%r15
+	mov	8(%rsi),%r14
+.cfi_restore	%r14
+	mov	16(%rsi),%r13
+.cfi_restore	%r13
+	mov	24(%rsi),%r12
+.cfi_restore	%r12
+	mov	32(%rsi),%rbp
+.cfi_restore	%rbp
+	mov	40(%rsi),%rbx
+.cfi_restore	%rbx
+	lea	48(%rsi),%rsp
+.cfi_cfa_def	%rsp,8
+.Lepilogue_dec_ssse3:
+	ret
+.cfi_endproc
+.size	aesni256_cbc_sha1_dec_ssse3,.-aesni256_cbc_sha1_dec_ssse3
+___
+						}}}
+$j=$jj=$r=$rx=0;
+
+if ($avx) {
+my ($in0,$out,$len,$key,$ivp,$ctx,$inp)=("%rdi","%rsi","%rdx","%rcx","%r8","%r9","%r10");
+
+my $Xi=4;
+my @X=map("%xmm$_",(4..7,0..3));
+my @Tx=map("%xmm$_",(8..10));
+my @V=($A,$B,$C,$D,$E)=("%eax","%ebx","%ecx","%edx","%ebp");	# size optimization
+my @T=("%esi","%edi");
+my ($rndkey0,$iv,$in)=map("%xmm$_",(11..13));
+my @rndkey=("%xmm14","%xmm15");
+my ($inout0,$inout1,$inout2,$inout3)=map("%xmm$_",(12..15));	# for dec
+my $Kx=@Tx[2];
+
+my $_rol=sub { &shld(@_[0],@_) };
+my $_ror=sub { &shrd(@_[0],@_) };
+
+$code.=<<___;
+.type	aesni_cbc_sha1_enc_avx,\@function,6
+.align	32
+aesni_cbc_sha1_enc_avx:
+.cfi_startproc
+	mov	`($win64?56:8)`(%rsp),$inp	# load 7th argument
+	#shr	\$6,$len			# debugging artefact
+	#jz	.Lepilogue_avx			# debugging artefact
+	push	%rbx
+.cfi_push	%rbx
+	push	%rbp
+.cfi_push	%rbp
+	push	%r12
+.cfi_push	%r12
+	push	%r13
+.cfi_push	%r13
+	push	%r14
+.cfi_push	%r14
+	push	%r15
+.cfi_push	%r15
+	lea	`-104-($win64?10*16:0)`(%rsp),%rsp
+.cfi_adjust_cfa_offset	`104+($win64?10*16:0)`
+	#mov	$in0,$inp			# debugging artefact
+	#lea	64(%rsp),$ctx			# debugging artefact
+___
+$code.=<<___ if ($win64);
+	movaps	%xmm6,96+0(%rsp)
+	movaps	%xmm7,96+16(%rsp)
+	movaps	%xmm8,96+32(%rsp)
+	movaps	%xmm9,96+48(%rsp)
+	movaps	%xmm10,96+64(%rsp)
+	movaps	%xmm11,96+80(%rsp)
+	movaps	%xmm12,96+96(%rsp)
+	movaps	%xmm13,96+112(%rsp)
+	movaps	%xmm14,96+128(%rsp)
+	movaps	%xmm15,96+144(%rsp)
+.Lprologue_avx:
+___
+$code.=<<___;
+	vzeroall
+	mov	$in0,%r12			# reassign arguments
+	mov	$out,%r13
+	mov	$len,%r14
+	lea	112($key),%r15			# size optimization
+	vmovdqu	($ivp),$iv			# load IV
+	mov	$ivp,88(%rsp)			# save $ivp
+___
+($in0,$out,$len,$key)=map("%r$_",(12..15));	# reassign arguments
+my $rounds="${ivp}d";
+$code.=<<___;
+	shl	\$6,$len
+	sub	$in0,$out
+	mov	240-112($key),$rounds
+	add	$inp,$len		# end of input
+
+	lea	K_XX_XX(%rip),$K_XX_XX
+	mov	0($ctx),$A		# load context
+	mov	4($ctx),$B
+	mov	8($ctx),$C
+	mov	12($ctx),$D
+	mov	$B,@T[0]		# magic seed
+	mov	16($ctx),$E
+	mov	$C,@T[1]
+	xor	$D,@T[1]
+	and	@T[1],@T[0]
+
+	vmovdqa	64($K_XX_XX),@X[2]	# pbswap mask
+	vmovdqa	0($K_XX_XX),$Kx		# K_00_19
+	vmovdqu	0($inp),@X[-4&7]	# load input to %xmm[0-3]
+	vmovdqu	16($inp),@X[-3&7]
+	vmovdqu	32($inp),@X[-2&7]
+	vmovdqu	48($inp),@X[-1&7]
+	vpshufb	@X[2],@X[-4&7],@X[-4&7]	# byte swap
+	add	\$64,$inp
+	vpshufb	@X[2],@X[-3&7],@X[-3&7]
+	vpshufb	@X[2],@X[-2&7],@X[-2&7]
+	vpshufb	@X[2],@X[-1&7],@X[-1&7]
+	vpaddd	$Kx,@X[-4&7],@X[0]	# add K_00_19
+	vpaddd	$Kx,@X[-3&7],@X[1]
+	vpaddd	$Kx,@X[-2&7],@X[2]
+	vmovdqa	@X[0],0(%rsp)		# X[]+K xfer to IALU
+	vmovdqa	@X[1],16(%rsp)
+	vmovdqa	@X[2],32(%rsp)
+	vmovups	-112($key),$rndkey[1]	# $key[0]
+	vmovups	16-112($key),$rndkey[0]	# forward reference
+	jmp	.Loop_avx
+___
+
+my $aesenc=sub {
+  use integer;
+  my ($n,$k)=($r/10,$r%10);
+    if ($k==0) {
+      $code.=<<___;
+	vmovdqu		`16*$n`($in0),$in		# load input
+	vpxor		$rndkey[1],$in,$in
+___
+      $code.=<<___ if ($n);
+	vmovups		$iv,`16*($n-1)`($out,$in0)	# write output
+___
+      $code.=<<___;
+	vpxor		$in,$iv,$iv
+	vaesenc		$rndkey[0],$iv,$iv
+	vmovups		`32+16*$k-112`($key),$rndkey[1]
+___
+    } elsif ($k==9) {
+      $sn++;
+      $code.=<<___;
+	cmp		\$11,$rounds
+	jb		.Lvaesenclast$sn
+	vaesenc		$rndkey[0],$iv,$iv
+	vmovups		`32+16*($k+0)-112`($key),$rndkey[1]
+	vaesenc		$rndkey[1],$iv,$iv
+	vmovups		`32+16*($k+1)-112`($key),$rndkey[0]
+	je		.Lvaesenclast$sn
+	vaesenc		$rndkey[0],$iv,$iv
+	vmovups		`32+16*($k+2)-112`($key),$rndkey[1]
+	vaesenc		$rndkey[1],$iv,$iv
+	vmovups		`32+16*($k+3)-112`($key),$rndkey[0]
+.Lvaesenclast$sn:
+	vaesenclast	$rndkey[0],$iv,$iv
+	vmovups		-112($key),$rndkey[0]
+	vmovups		16-112($key),$rndkey[1]		# forward reference
+___
+    } else {
+      $code.=<<___;
+	vaesenc		$rndkey[0],$iv,$iv
+	vmovups		`32+16*$k-112`($key),$rndkey[1]
+___
+    }
+    $r++;	unshift(@rndkey,pop(@rndkey));
+};
+
+sub Xupdate_avx_16_31()		# recall that $Xi starts with 4
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 40 instructions
+  my ($a,$b,$c,$d,$e);
+
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vpalignr(@X[0],@X[-3&7],@X[-4&7],8);	# compose "X[-14]" in "X[0]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	  &vpaddd	(@Tx[1],$Kx,@X[-1&7]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vpsrldq(@Tx[0],@X[-1&7],4);		# "X[-3]", 3 dwords
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vpxor	(@X[0],@X[0],@X[-4&7]);		# "X[0]"^="X[-16]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpxor	(@Tx[0],@Tx[0],@X[-2&7]);	# "X[-3]"^"X[-8]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpxor	(@X[0],@X[0],@Tx[0]);		# "X[0]"^="X[-3]"^"X[-8]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &vmovdqa	(eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]);	# X[]+K xfer to IALU
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpsrld	(@Tx[0],@X[0],31);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpslldq(@Tx[1],@X[0],12);		# "X[0]"<<96, extract one dword
+	&vpaddd	(@X[0],@X[0],@X[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpor	(@X[0],@X[0],@Tx[0]);		# "X[0]"<<<=1
+	&vpsrld	(@Tx[0],@Tx[1],30);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpslld	(@Tx[1],@Tx[1],2);
+	&vpxor	(@X[0],@X[0],@Tx[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpxor	(@X[0],@X[0],@Tx[1]);		# "X[0]"^=("X[0]">>96)<<<2
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &vmovdqa	($Kx,eval(16*(($Xi)/5))."($K_XX_XX)")	if ($Xi%5==0);	# K_XX_XX
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+
+	 foreach (@insns) { eval; }	# remaining instructions [if any]
+
+  $Xi++;	push(@X,shift(@X));	# "rotate" X[]
+}
+
+sub Xupdate_avx_32_79()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 to 48 instructions
+  my ($a,$b,$c,$d,$e);
+
+	&vpalignr(@Tx[0],@X[-1&7],@X[-2&7],8);	# compose "X[-6]"
+	&vpxor	(@X[0],@X[0],@X[-4&7]);		# "X[0]"="X[-32]"^"X[-16]"
+	 eval(shift(@insns));		# body_20_39
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+
+	&vpxor	(@X[0],@X[0],@X[-7&7]);		# "X[0]"^="X[-28]"
+	 eval(shift(@insns));
+	 eval(shift(@insns))	if (@insns[0] !~ /&ro[rl]/);
+	  &vpaddd	(@Tx[1],$Kx,@X[-1&7]);
+	  &vmovdqa	($Kx,eval(16*($Xi/5))."($K_XX_XX)")	if ($Xi%5==0);
+	 eval(shift(@insns));		# ror
+	 eval(shift(@insns));
+
+	&vpxor	(@X[0],@X[0],@Tx[0]);		# "X[0]"^="X[-6]"
+	 eval(shift(@insns));		# body_20_39
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+
+	&vpsrld	(@Tx[0],@X[0],30);
+	  &vmovdqa	(eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]);	# X[]+K xfer to IALU
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# ror
+	 eval(shift(@insns));
+
+	&vpslld	(@X[0],@X[0],2);
+	 eval(shift(@insns));		# body_20_39
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# ror
+	 eval(shift(@insns));
+
+	&vpor	(@X[0],@X[0],@Tx[0]);		# "X[0]"<<<=2
+	 eval(shift(@insns));		# body_20_39
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+	 eval(shift(@insns));
+
+	 foreach (@insns) { eval; }	# remaining instructions
+
+  $Xi++;	push(@X,shift(@X));	# "rotate" X[]
+}
+
+sub Xuplast_avx_80()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 instructions
+  my ($a,$b,$c,$d,$e);
+
+	 eval(shift(@insns));
+	  &vpaddd	(@Tx[1],$Kx,@X[-1&7]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	  &vmovdqa	(eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]);	# X[]+K xfer IALU
+
+	 foreach (@insns) { eval; }		# remaining instructions
+
+	&cmp	($inp,$len);
+	&je	(shift);
+
+	&vmovdqa(@Tx[1],"64($K_XX_XX)");	# pbswap mask
+	&vmovdqa($Kx,"0($K_XX_XX)");		# K_00_19
+	&vmovdqu(@X[-4&7],"0($inp)");		# load input
+	&vmovdqu(@X[-3&7],"16($inp)");
+	&vmovdqu(@X[-2&7],"32($inp)");
+	&vmovdqu(@X[-1&7],"48($inp)");
+	&vpshufb(@X[-4&7],@X[-4&7],@Tx[1]);	# byte swap
+	&add	($inp,64);
+
+  $Xi=0;
+}
+
+sub Xloop_avx()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 instructions
+  my ($a,$b,$c,$d,$e);
+
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vpshufb(@X[($Xi-3)&7],@X[($Xi-3)&7],@Tx[1]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vpaddd	(@Tx[0],@X[($Xi-4)&7],$Kx);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vmovdqa(eval(16*$Xi)."(%rsp)",@Tx[0]);	# X[]+K xfer to IALU
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	foreach (@insns) { eval; }
+  $Xi++;
+}
+
+sub Xtail_avx()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 instructions
+  my ($a,$b,$c,$d,$e);
+
+	foreach (@insns) { eval; }
+}
+
+$code.=<<___;
+.align	32
+.Loop_avx:
+___
+	&Xupdate_avx_16_31(\&body_00_19);
+	&Xupdate_avx_16_31(\&body_00_19);
+	&Xupdate_avx_16_31(\&body_00_19);
+	&Xupdate_avx_16_31(\&body_00_19);
+	&Xupdate_avx_32_79(\&body_00_19);
+	&Xupdate_avx_32_79(\&body_20_39);
+	&Xupdate_avx_32_79(\&body_20_39);
+	&Xupdate_avx_32_79(\&body_20_39);
+	&Xupdate_avx_32_79(\&body_20_39);
+	&Xupdate_avx_32_79(\&body_20_39);
+	&Xupdate_avx_32_79(\&body_40_59);
+	&Xupdate_avx_32_79(\&body_40_59);
+	&Xupdate_avx_32_79(\&body_40_59);
+	&Xupdate_avx_32_79(\&body_40_59);
+	&Xupdate_avx_32_79(\&body_40_59);
+	&Xupdate_avx_32_79(\&body_20_39);
+	&Xuplast_avx_80(\&body_20_39,".Ldone_avx");	# can jump to "done"
+
+				$saved_j=$j; @saved_V=@V;
+				$saved_r=$r; @saved_rndkey=@rndkey;
+
+	&Xloop_avx(\&body_20_39);
+	&Xloop_avx(\&body_20_39);
+	&Xloop_avx(\&body_20_39);
+
+$code.=<<___;
+	vmovups	$iv,48($out,$in0)		# write output
+	lea	64($in0),$in0
+
+	add	0($ctx),$A			# update context
+	add	4($ctx),@T[0]
+	add	8($ctx),$C
+	add	12($ctx),$D
+	mov	$A,0($ctx)
+	add	16($ctx),$E
+	mov	@T[0],4($ctx)
+	mov	@T[0],$B			# magic seed
+	mov	$C,8($ctx)
+	mov	$C,@T[1]
+	mov	$D,12($ctx)
+	xor	$D,@T[1]
+	mov	$E,16($ctx)
+	and	@T[1],@T[0]
+	jmp	.Loop_avx
+
+.Ldone_avx:
+___
+				$jj=$j=$saved_j; @V=@saved_V;
+				$r=$saved_r;     @rndkey=@saved_rndkey;
+
+	&Xtail_avx(\&body_20_39);
+	&Xtail_avx(\&body_20_39);
+	&Xtail_avx(\&body_20_39);
+
+$code.=<<___;
+	vmovups	$iv,48($out,$in0)		# write output
+	mov	88(%rsp),$ivp			# restore $ivp
+
+	add	0($ctx),$A			# update context
+	add	4($ctx),@T[0]
+	add	8($ctx),$C
+	mov	$A,0($ctx)
+	add	12($ctx),$D
+	mov	@T[0],4($ctx)
+	add	16($ctx),$E
+	mov	$C,8($ctx)
+	mov	$D,12($ctx)
+	mov	$E,16($ctx)
+	vmovups	$iv,($ivp)			# write IV
+	vzeroall
+___
+$code.=<<___ if ($win64);
+	movaps	96+0(%rsp),%xmm6
+	movaps	96+16(%rsp),%xmm7
+	movaps	96+32(%rsp),%xmm8
+	movaps	96+48(%rsp),%xmm9
+	movaps	96+64(%rsp),%xmm10
+	movaps	96+80(%rsp),%xmm11
+	movaps	96+96(%rsp),%xmm12
+	movaps	96+112(%rsp),%xmm13
+	movaps	96+128(%rsp),%xmm14
+	movaps	96+144(%rsp),%xmm15
+___
+$code.=<<___;
+	lea	`104+($win64?10*16:0)`(%rsp),%rsi
+.cfi_def_cfa	%rsi,56
+	mov	0(%rsi),%r15
+.cfi_restore	%r15
+	mov	8(%rsi),%r14
+.cfi_restore	%r14
+	mov	16(%rsi),%r13
+.cfi_restore	%r13
+	mov	24(%rsi),%r12
+.cfi_restore	%r12
+	mov	32(%rsi),%rbp
+.cfi_restore	%rbp
+	mov	40(%rsi),%rbx
+.cfi_restore	%rbx
+	lea	48(%rsi),%rsp
+.cfi_def_cfa	%rsp,8
+.Lepilogue_avx:
+	ret
+.cfi_endproc
+.size	aesni_cbc_sha1_enc_avx,.-aesni_cbc_sha1_enc_avx
+___
+
+						if ($stitched_decrypt) {{{
+# reset
+($in0,$out,$len,$key,$ivp,$ctx,$inp)=("%rdi","%rsi","%rdx","%rcx","%r8","%r9","%r10");
+
+$j=$jj=$r=$rx=0;
+$Xi=4;
+
+@aes256_dec = (
+	'&vpxor	($inout0,$rndkey0,"0x00($in0)");',
+	'&vpxor	($inout1,$rndkey0,"0x10($in0)");',
+	'&vpxor	($inout2,$rndkey0,"0x20($in0)");',
+	'&vpxor	($inout3,$rndkey0,"0x30($in0)");',
+
+	'&vmovups($rndkey0,"16-112($key)");',
+	'&vmovups("64(%rsp)",@X[2]);',		# save IV, originally @X[3]
+	undef,undef
+	);
+for ($i=0;$i<13;$i++) {
+    push (@aes256_dec,(
+	'&vaesdec	($inout0,$inout0,$rndkey0);',
+	'&vaesdec	($inout1,$inout1,$rndkey0);',
+	'&vaesdec	($inout2,$inout2,$rndkey0);',
+	'&vaesdec	($inout3,$inout3,$rndkey0);	&vmovups($rndkey0,"'.(16*($i+2)-112).'($key)");'
+	));
+    push (@aes256_dec,(undef,undef))	if (($i>=3 && $i<=5) || $i>=11);
+    push (@aes256_dec,(undef,undef))	if ($i==5);
+}
+push(@aes256_dec,(
+	'&vaesdeclast	($inout0,$inout0,$rndkey0);	&vmovups(@X[0],"0x00($in0)");',
+	'&vaesdeclast	($inout1,$inout1,$rndkey0);	&vmovups(@X[1],"0x10($in0)");',
+	'&vaesdeclast	($inout2,$inout2,$rndkey0);	&vmovups(@X[2],"0x20($in0)");',
+	'&vaesdeclast	($inout3,$inout3,$rndkey0);	&vmovups(@X[3],"0x30($in0)");',
+
+	'&vxorps	($inout0,$inout0,"64(%rsp)");	&vmovdqu($rndkey0,"-112($key)");',
+	'&vxorps	($inout1,$inout1,@X[0]);	&vmovups("0x00($out,$in0)",$inout0);',
+	'&vxorps	($inout2,$inout2,@X[1]);	&vmovups("0x10($out,$in0)",$inout1);',
+	'&vxorps	($inout3,$inout3,@X[2]);	&vmovups("0x20($out,$in0)",$inout2);',
+
+	'&vmovups	("0x30($out,$in0)",$inout3);'
+	));
+
+$code.=<<___;
+.type	aesni256_cbc_sha1_dec_avx,\@function,6
+.align	32
+aesni256_cbc_sha1_dec_avx:
+.cfi_startproc
+	mov	`($win64?56:8)`(%rsp),$inp	# load 7th argument
+	push	%rbx
+.cfi_push	%rbx
+	push	%rbp
+.cfi_push	%rbp
+	push	%r12
+.cfi_push	%r12
+	push	%r13
+.cfi_push	%r13
+	push	%r14
+.cfi_push	%r14
+	push	%r15
+.cfi_push	%r15
+	lea	`-104-($win64?10*16:0)`(%rsp),%rsp
+.cfi_adjust_cfa_offset	`104+($win64?10*16:0)`
+___
+$code.=<<___ if ($win64);
+	movaps	%xmm6,96+0(%rsp)
+	movaps	%xmm7,96+16(%rsp)
+	movaps	%xmm8,96+32(%rsp)
+	movaps	%xmm9,96+48(%rsp)
+	movaps	%xmm10,96+64(%rsp)
+	movaps	%xmm11,96+80(%rsp)
+	movaps	%xmm12,96+96(%rsp)
+	movaps	%xmm13,96+112(%rsp)
+	movaps	%xmm14,96+128(%rsp)
+	movaps	%xmm15,96+144(%rsp)
+.Lprologue_dec_avx:
+___
+$code.=<<___;
+	vzeroall
+	mov	$in0,%r12			# reassign arguments
+	mov	$out,%r13
+	mov	$len,%r14
+	lea	112($key),%r15			# size optimization
+	vmovdqu	($ivp),@X[3]			# load IV
+___
+($in0,$out,$len,$key)=map("%r$_",(12..15));	# reassign arguments
+$code.=<<___;
+	shl	\$6,$len
+	sub	$in0,$out
+	add	$inp,$len		# end of input
+
+	lea	K_XX_XX(%rip),$K_XX_XX
+	mov	0($ctx),$A		# load context
+	mov	4($ctx),$B
+	mov	8($ctx),$C
+	mov	12($ctx),$D
+	mov	$B,@T[0]		# magic seed
+	mov	16($ctx),$E
+	mov	$C,@T[1]
+	xor	$D,@T[1]
+	and	@T[1],@T[0]
+
+	vmovdqa	64($K_XX_XX),@X[2]	# pbswap mask
+	vmovdqa	0($K_XX_XX),$Kx		# K_00_19
+	vmovdqu	0($inp),@X[-4&7]	# load input to %xmm[0-3]
+	vmovdqu	16($inp),@X[-3&7]
+	vmovdqu	32($inp),@X[-2&7]
+	vmovdqu	48($inp),@X[-1&7]
+	vpshufb	@X[2],@X[-4&7],@X[-4&7]	# byte swap
+	add	\$64,$inp
+	vpshufb	@X[2],@X[-3&7],@X[-3&7]
+	vpshufb	@X[2],@X[-2&7],@X[-2&7]
+	vpshufb	@X[2],@X[-1&7],@X[-1&7]
+	vpaddd	$Kx,@X[-4&7],@X[0]	# add K_00_19
+	vpaddd	$Kx,@X[-3&7],@X[1]
+	vpaddd	$Kx,@X[-2&7],@X[2]
+	vmovdqa	@X[0],0(%rsp)		# X[]+K xfer to IALU
+	vmovdqa	@X[1],16(%rsp)
+	vmovdqa	@X[2],32(%rsp)
+	vmovups	-112($key),$rndkey0	# $key[0]
+	jmp	.Loop_dec_avx
+
+.align	32
+.Loop_dec_avx:
+___
+	&Xupdate_avx_16_31(\&body_00_19_dec);
+	&Xupdate_avx_16_31(\&body_00_19_dec);
+	&Xupdate_avx_16_31(\&body_00_19_dec);
+	&Xupdate_avx_16_31(\&body_00_19_dec);
+	&Xupdate_avx_32_79(\&body_00_19_dec);
+	&Xupdate_avx_32_79(\&body_20_39_dec);
+	&Xupdate_avx_32_79(\&body_20_39_dec);
+	&Xupdate_avx_32_79(\&body_20_39_dec);
+	&Xupdate_avx_32_79(\&body_20_39_dec);
+	&Xupdate_avx_32_79(\&body_20_39_dec);
+	&Xupdate_avx_32_79(\&body_40_59_dec);
+	&Xupdate_avx_32_79(\&body_40_59_dec);
+	&Xupdate_avx_32_79(\&body_40_59_dec);
+	&Xupdate_avx_32_79(\&body_40_59_dec);
+	&Xupdate_avx_32_79(\&body_40_59_dec);
+	&Xupdate_avx_32_79(\&body_20_39_dec);
+	&Xuplast_avx_80(\&body_20_39_dec,".Ldone_dec_avx");	# can jump to "done"
+
+				$saved_j=$j; @saved_V=@V;
+				$saved_rx=$rx;
+
+	&Xloop_avx(\&body_20_39_dec);
+	&Xloop_avx(\&body_20_39_dec);
+	&Xloop_avx(\&body_20_39_dec);
+
+	eval(@aes256_dec[-1]);			# last store
+$code.=<<___;
+	lea	64($in0),$in0
+
+	add	0($ctx),$A			# update context
+	add	4($ctx),@T[0]
+	add	8($ctx),$C
+	add	12($ctx),$D
+	mov	$A,0($ctx)
+	add	16($ctx),$E
+	mov	@T[0],4($ctx)
+	mov	@T[0],$B			# magic seed
+	mov	$C,8($ctx)
+	mov	$C,@T[1]
+	mov	$D,12($ctx)
+	xor	$D,@T[1]
+	mov	$E,16($ctx)
+	and	@T[1],@T[0]
+	jmp	.Loop_dec_avx
+
+.Ldone_dec_avx:
+___
+				$jj=$j=$saved_j; @V=@saved_V;
+				$rx=$saved_rx;
+
+	&Xtail_avx(\&body_20_39_dec);
+	&Xtail_avx(\&body_20_39_dec);
+	&Xtail_avx(\&body_20_39_dec);
+
+	eval(@aes256_dec[-1]);			# last store
+$code.=<<___;
+
+	add	0($ctx),$A			# update context
+	add	4($ctx),@T[0]
+	add	8($ctx),$C
+	mov	$A,0($ctx)
+	add	12($ctx),$D
+	mov	@T[0],4($ctx)
+	add	16($ctx),$E
+	mov	$C,8($ctx)
+	mov	$D,12($ctx)
+	mov	$E,16($ctx)
+	vmovups	@X[3],($ivp)			# write IV
+	vzeroall
+___
+$code.=<<___ if ($win64);
+	movaps	96+0(%rsp),%xmm6
+	movaps	96+16(%rsp),%xmm7
+	movaps	96+32(%rsp),%xmm8
+	movaps	96+48(%rsp),%xmm9
+	movaps	96+64(%rsp),%xmm10
+	movaps	96+80(%rsp),%xmm11
+	movaps	96+96(%rsp),%xmm12
+	movaps	96+112(%rsp),%xmm13
+	movaps	96+128(%rsp),%xmm14
+	movaps	96+144(%rsp),%xmm15
+___
+$code.=<<___;
+	lea	`104+($win64?10*16:0)`(%rsp),%rsi
+.cfi_def_cfa	%rsi,56
+	mov	0(%rsi),%r15
+.cfi_restore	%r15
+	mov	8(%rsi),%r14
+.cfi_restore	%r14
+	mov	16(%rsi),%r13
+.cfi_restore	%r13
+	mov	24(%rsi),%r12
+.cfi_restore	%r12
+	mov	32(%rsi),%rbp
+.cfi_restore	%rbp
+	mov	40(%rsi),%rbx
+.cfi_restore	%rbx
+	lea	48(%rsi),%rsp
+.cfi_def_cfa	%rsp,8
+.Lepilogue_dec_avx:
+	ret
+.cfi_endproc
+.size	aesni256_cbc_sha1_dec_avx,.-aesni256_cbc_sha1_dec_avx
+___
+						}}}
+}
+$code.=<<___;
+.align	64
+K_XX_XX:
+.long	0x5a827999,0x5a827999,0x5a827999,0x5a827999	# K_00_19
+.long	0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1	# K_20_39
+.long	0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc	# K_40_59
+.long	0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6	# K_60_79
+.long	0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f	# pbswap mask
+.byte	0xf,0xe,0xd,0xc,0xb,0xa,0x9,0x8,0x7,0x6,0x5,0x4,0x3,0x2,0x1,0x0
+
+.asciz	"AESNI-CBC+SHA1 stitch for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
+.align	64
+___
+						if ($shaext) {{{
+($in0,$out,$len,$key,$ivp,$ctx,$inp)=("%rdi","%rsi","%rdx","%rcx","%r8","%r9","%r10");
+
+$rounds="%r11d";
+
+($iv,$in,$rndkey0)=map("%xmm$_",(2,14,15));
+@rndkey=("%xmm0","%xmm1");
+$r=0;
+
+my ($BSWAP,$ABCD,$E,$E_,$ABCD_SAVE,$E_SAVE)=map("%xmm$_",(7..12));
+my @MSG=map("%xmm$_",(3..6));
+
+$code.=<<___;
+.type	aesni_cbc_sha1_enc_shaext,\@function,6
+.align	32
+aesni_cbc_sha1_enc_shaext:
+.cfi_startproc
+	mov	`($win64?56:8)`(%rsp),$inp	# load 7th argument
+___
+$code.=<<___ if ($win64);
+	lea	`-8-10*16`(%rsp),%rsp
+	movaps	%xmm6,-8-10*16(%rax)
+	movaps	%xmm7,-8-9*16(%rax)
+	movaps	%xmm8,-8-8*16(%rax)
+	movaps	%xmm9,-8-7*16(%rax)
+	movaps	%xmm10,-8-6*16(%rax)
+	movaps	%xmm11,-8-5*16(%rax)
+	movaps	%xmm12,-8-4*16(%rax)
+	movaps	%xmm13,-8-3*16(%rax)
+	movaps	%xmm14,-8-2*16(%rax)
+	movaps	%xmm15,-8-1*16(%rax)
+.Lprologue_shaext:
+___
+$code.=<<___;
+	movdqu	($ctx),$ABCD
+	movd	16($ctx),$E
+	movdqa	K_XX_XX+0x50(%rip),$BSWAP	# byte-n-word swap
+
+	mov	240($key),$rounds
+	sub	$in0,$out
+	movups	($key),$rndkey0			# $key[0]
+	movups	($ivp),$iv			# load IV
+	movups	16($key),$rndkey[0]		# forward reference
+	lea	112($key),$key			# size optimization
+
+	pshufd	\$0b00011011,$ABCD,$ABCD	# flip word order
+	pshufd	\$0b00011011,$E,$E		# flip word order
+	jmp	.Loop_shaext
+
+.align	16
+.Loop_shaext:
+___
+	&$aesenc();
+$code.=<<___;
+	movdqu		($inp),@MSG[0]
+	movdqa		$E,$E_SAVE		# offload $E
+	pshufb		$BSWAP,@MSG[0]
+	movdqu		0x10($inp),@MSG[1]
+	movdqa		$ABCD,$ABCD_SAVE	# offload $ABCD
+___
+	&$aesenc();
+$code.=<<___;
+	pshufb		$BSWAP,@MSG[1]
+
+	paddd		@MSG[0],$E
+	movdqu		0x20($inp),@MSG[2]
+	lea		0x40($inp),$inp
+	pxor		$E_SAVE,@MSG[0]		# black magic
+___
+	&$aesenc();
+$code.=<<___;
+	pxor		$E_SAVE,@MSG[0]		# black magic
+	movdqa		$ABCD,$E_
+	pshufb		$BSWAP,@MSG[2]
+	sha1rnds4	\$0,$E,$ABCD		# 0-3
+	sha1nexte	@MSG[1],$E_
+___
+	&$aesenc();
+$code.=<<___;
+	sha1msg1	@MSG[1],@MSG[0]
+	movdqu		-0x10($inp),@MSG[3]
+	movdqa		$ABCD,$E
+	pshufb		$BSWAP,@MSG[3]
+___
+	&$aesenc();
+$code.=<<___;
+	sha1rnds4	\$0,$E_,$ABCD		# 4-7
+	sha1nexte	@MSG[2],$E
+	pxor		@MSG[2],@MSG[0]
+	sha1msg1	@MSG[2],@MSG[1]
+___
+	&$aesenc();
+
+for($i=2;$i<20-4;$i++) {
+$code.=<<___;
+	movdqa		$ABCD,$E_
+	sha1rnds4	\$`int($i/5)`,$E,$ABCD	# 8-11
+	sha1nexte	@MSG[3],$E_
+___
+	&$aesenc();
+$code.=<<___;
+	sha1msg2	@MSG[3],@MSG[0]
+	pxor		@MSG[3],@MSG[1]
+	sha1msg1	@MSG[3],@MSG[2]
+___
+	($E,$E_)=($E_,$E);
+	push(@MSG,shift(@MSG));
+
+	&$aesenc();
+}
+$code.=<<___;
+	movdqa		$ABCD,$E_
+	sha1rnds4	\$3,$E,$ABCD		# 64-67
+	sha1nexte	@MSG[3],$E_
+	sha1msg2	@MSG[3],@MSG[0]
+	pxor		@MSG[3],@MSG[1]
+___
+	&$aesenc();
+$code.=<<___;
+	movdqa		$ABCD,$E
+	sha1rnds4	\$3,$E_,$ABCD		# 68-71
+	sha1nexte	@MSG[0],$E
+	sha1msg2	@MSG[0],@MSG[1]
+___
+	&$aesenc();
+$code.=<<___;
+	movdqa		$E_SAVE,@MSG[0]
+	movdqa		$ABCD,$E_
+	sha1rnds4	\$3,$E,$ABCD		# 72-75
+	sha1nexte	@MSG[1],$E_
+___
+	&$aesenc();
+$code.=<<___;
+	movdqa		$ABCD,$E
+	sha1rnds4	\$3,$E_,$ABCD		# 76-79
+	sha1nexte	$MSG[0],$E
+___
+	while($r<40)	{ &$aesenc(); }		# remaining aesenc's
+$code.=<<___;
+	dec		$len
+
+	paddd		$ABCD_SAVE,$ABCD
+	movups		$iv,48($out,$in0)	# write output
+	lea		64($in0),$in0
+	jnz		.Loop_shaext
+
+	pshufd	\$0b00011011,$ABCD,$ABCD
+	pshufd	\$0b00011011,$E,$E
+	movups	$iv,($ivp)			# write IV
+	movdqu	$ABCD,($ctx)
+	movd	$E,16($ctx)
+___
+$code.=<<___ if ($win64);
+	movaps	-8-10*16(%rax),%xmm6
+	movaps	-8-9*16(%rax),%xmm7
+	movaps	-8-8*16(%rax),%xmm8
+	movaps	-8-7*16(%rax),%xmm9
+	movaps	-8-6*16(%rax),%xmm10
+	movaps	-8-5*16(%rax),%xmm11
+	movaps	-8-4*16(%rax),%xmm12
+	movaps	-8-3*16(%rax),%xmm13
+	movaps	-8-2*16(%rax),%xmm14
+	movaps	-8-1*16(%rax),%xmm15
+	mov	%rax,%rsp
+.Lepilogue_shaext:
+___
+$code.=<<___;
+	ret
+.cfi_endproc
+.size	aesni_cbc_sha1_enc_shaext,.-aesni_cbc_sha1_enc_shaext
+___
+						}}}
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+#		CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64) {
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern	__imp_RtlVirtualUnwind
+.type	ssse3_handler,\@abi-omnipotent
+.align	16
+ssse3_handler:
+	push	%rsi
+	push	%rdi
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	pushfq
+	sub	\$64,%rsp
+
+	mov	120($context),%rax	# pull context->Rax
+	mov	248($context),%rbx	# pull context->Rip
+
+	mov	8($disp),%rsi		# disp->ImageBase
+	mov	56($disp),%r11		# disp->HandlerData
+
+	mov	0(%r11),%r10d		# HandlerData[0]
+	lea	(%rsi,%r10),%r10	# prologue label
+	cmp	%r10,%rbx		# context->Rip<prologue label
+	jb	.Lcommon_seh_tail
+
+	mov	152($context),%rax	# pull context->Rsp
+
+	mov	4(%r11),%r10d		# HandlerData[1]
+	lea	(%rsi,%r10),%r10	# epilogue label
+	cmp	%r10,%rbx		# context->Rip>=epilogue label
+	jae	.Lcommon_seh_tail
+___
+$code.=<<___ if ($shaext);
+	lea	aesni_cbc_sha1_enc_shaext(%rip),%r10
+	cmp	%r10,%rbx
+	jb	.Lseh_no_shaext
+
+	lea	(%rax),%rsi
+	lea	512($context),%rdi	# &context.Xmm6
+	mov	\$20,%ecx
+	.long	0xa548f3fc		# cld; rep movsq
+	lea	168(%rax),%rax		# adjust stack pointer
+	jmp	.Lcommon_seh_tail
+.Lseh_no_shaext:
+___
+$code.=<<___;
+	lea	96(%rax),%rsi
+	lea	512($context),%rdi	# &context.Xmm6
+	mov	\$20,%ecx
+	.long	0xa548f3fc		# cld; rep movsq
+	lea	`104+10*16`(%rax),%rax	# adjust stack pointer
+
+	mov	0(%rax),%r15
+	mov	8(%rax),%r14
+	mov	16(%rax),%r13
+	mov	24(%rax),%r12
+	mov	32(%rax),%rbp
+	mov	40(%rax),%rbx
+	lea	48(%rax),%rax
+	mov	%rbx,144($context)	# restore context->Rbx
+	mov	%rbp,160($context)	# restore context->Rbp
+	mov	%r12,216($context)	# restore context->R12
+	mov	%r13,224($context)	# restore context->R13
+	mov	%r14,232($context)	# restore context->R14
+	mov	%r15,240($context)	# restore context->R15
+
+.Lcommon_seh_tail:
+	mov	8(%rax),%rdi
+	mov	16(%rax),%rsi
+	mov	%rax,152($context)	# restore context->Rsp
+	mov	%rsi,168($context)	# restore context->Rsi
+	mov	%rdi,176($context)	# restore context->Rdi
+
+	mov	40($disp),%rdi		# disp->ContextRecord
+	mov	$context,%rsi		# context
+	mov	\$154,%ecx		# sizeof(CONTEXT)
+	.long	0xa548f3fc		# cld; rep movsq
+
+	mov	$disp,%rsi
+	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
+	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
+	mov	0(%rsi),%r8		# arg3, disp->ControlPc
+	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
+	mov	40(%rsi),%r10		# disp->ContextRecord
+	lea	56(%rsi),%r11		# &disp->HandlerData
+	lea	24(%rsi),%r12		# &disp->EstablisherFrame
+	mov	%r10,32(%rsp)		# arg5
+	mov	%r11,40(%rsp)		# arg6
+	mov	%r12,48(%rsp)		# arg7
+	mov	%rcx,56(%rsp)		# arg8, (NULL)
+	call	*__imp_RtlVirtualUnwind(%rip)
+
+	mov	\$1,%eax		# ExceptionContinueSearch
+	add	\$64,%rsp
+	popfq
+	pop	%r15
+	pop	%r14
+	pop	%r13
+	pop	%r12
+	pop	%rbp
+	pop	%rbx
+	pop	%rdi
+	pop	%rsi
+	ret
+.size	ssse3_handler,.-ssse3_handler
+
+.section	.pdata
+.align	4
+	.rva	.LSEH_begin_aesni_cbc_sha1_enc_ssse3
+	.rva	.LSEH_end_aesni_cbc_sha1_enc_ssse3
+	.rva	.LSEH_info_aesni_cbc_sha1_enc_ssse3
+___
+$code.=<<___ if ($avx);
+	.rva	.LSEH_begin_aesni_cbc_sha1_enc_avx
+	.rva	.LSEH_end_aesni_cbc_sha1_enc_avx
+	.rva	.LSEH_info_aesni_cbc_sha1_enc_avx
+___
+$code.=<<___ if ($shaext);
+	.rva	.LSEH_begin_aesni_cbc_sha1_enc_shaext
+	.rva	.LSEH_end_aesni_cbc_sha1_enc_shaext
+	.rva	.LSEH_info_aesni_cbc_sha1_enc_shaext
+___
+$code.=<<___;
+.section	.xdata
+.align	8
+.LSEH_info_aesni_cbc_sha1_enc_ssse3:
+	.byte	9,0,0,0
+	.rva	ssse3_handler
+	.rva	.Lprologue_ssse3,.Lepilogue_ssse3	# HandlerData[]
+___
+$code.=<<___ if ($avx);
+.LSEH_info_aesni_cbc_sha1_enc_avx:
+	.byte	9,0,0,0
+	.rva	ssse3_handler
+	.rva	.Lprologue_avx,.Lepilogue_avx		# HandlerData[]
+___
+$code.=<<___ if ($shaext);
+.LSEH_info_aesni_cbc_sha1_enc_shaext:
+	.byte	9,0,0,0
+	.rva	ssse3_handler
+	.rva	.Lprologue_shaext,.Lepilogue_shaext	# HandlerData[]
+___
+}
+
+####################################################################
+sub rex {
+  local *opcode=shift;
+  my ($dst,$src)=@_;
+  my $rex=0;
+
+    $rex|=0x04			if($dst>=8);
+    $rex|=0x01			if($src>=8);
+    unshift @opcode,$rex|0x40	if($rex);
+}
+
+sub sha1rnds4 {
+    if (@_[0] =~ /\$([x0-9a-f]+),\s*%xmm([0-9]+),\s*%xmm([0-9]+)/) {
+      my @opcode=(0x0f,0x3a,0xcc);
+	rex(\@opcode,$3,$2);
+	push @opcode,0xc0|($2&7)|(($3&7)<<3);		# ModR/M
+	my $c=$1;
+	push @opcode,$c=~/^0/?oct($c):$c;
+	return ".byte\t".join(',',@opcode);
+    } else {
+	return "sha1rnds4\t".@_[0];
+    }
+}
+
+sub sha1op38 {
+    my $instr = shift;
+    my %opcodelet = (
+		"sha1nexte" => 0xc8,
+  		"sha1msg1"  => 0xc9,
+		"sha1msg2"  => 0xca	);
+
+    if (defined($opcodelet{$instr}) && @_[0] =~ /%xmm([0-9]+),\s*%xmm([0-9]+)/) {
+      my @opcode=(0x0f,0x38);
+	rex(\@opcode,$2,$1);
+	push @opcode,$opcodelet{$instr};
+	push @opcode,0xc0|($1&7)|(($2&7)<<3);		# ModR/M
+	return ".byte\t".join(',',@opcode);
+    } else {
+	return $instr."\t".@_[0];
+    }
+}
+
+sub aesni {
+  my $line=shift;
+  my @opcode=(0x0f,0x38);
+
+    if ($line=~/(aes[a-z]+)\s+%xmm([0-9]+),\s*%xmm([0-9]+)/) {
+	my %opcodelet = (
+		"aesenc" => 0xdc,	"aesenclast" => 0xdd,
+		"aesdec" => 0xde,	"aesdeclast" => 0xdf
+	);
+	return undef if (!defined($opcodelet{$1}));
+	rex(\@opcode,$3,$2);
+	push @opcode,$opcodelet{$1},0xc0|($2&7)|(($3&7)<<3);	# ModR/M
+	unshift @opcode,0x66;
+	return ".byte\t".join(',',@opcode);
+    }
+    return $line;
+}
+
+foreach (split("\n",$code)) {
+        s/\`([^\`]*)\`/eval $1/geo;
+
+	s/\b(sha1rnds4)\s+(.*)/sha1rnds4($2)/geo		or
+	s/\b(sha1[^\s]*)\s+(.*)/sha1op38($1,$2)/geo		or
+	s/\b(aes.*%xmm[0-9]+).*$/aesni($1)/geo;
+
+	print $_,"\n";
+}
+close STDOUT or die "error closing STDOUT: $!";
diff --git a/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aesni-sha256-x86_64.pl b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aesni-sha256-x86_64.pl
new file mode 100644
index 0000000..ff9b185
--- /dev/null
+++ b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aesni-sha256-x86_64.pl
@@ -0,0 +1,1802 @@
+#! /usr/bin/env perl
+# Copyright 2013-2020 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License").  You may not use
+# this file except in compliance with the License.  You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# January 2013
+#
+# This is AESNI-CBC+SHA256 stitch implementation. The idea, as spelled
+# in http://download.intel.com/design/intarch/papers/323686.pdf, is
+# that since AESNI-CBC encrypt exhibit *very* low instruction-level
+# parallelism, interleaving it with another algorithm would allow to
+# utilize processor resources better and achieve better performance.
+# SHA256 instruction sequences(*) are taken from sha512-x86_64.pl and
+# AESNI code is weaved into it. As SHA256 dominates execution time,
+# stitch performance does not depend on AES key length. Below are
+# performance numbers in cycles per processed byte, less is better,
+# for standalone AESNI-CBC encrypt, standalone SHA256, and stitched
+# subroutine:
+#
+#		 AES-128/-192/-256+SHA256   this(**)	gain
+# Sandy Bridge	    5.05/6.05/7.05+11.6	    13.0	+28%/36%/43%
+# Ivy Bridge	    5.05/6.05/7.05+10.3	    11.6	+32%/41%/50%
+# Haswell	    4.43/5.29/6.19+7.80	    8.79	+39%/49%/59%
+# Skylake	    2.62/3.14/3.62+7.70	    8.10	+27%/34%/40%
+# Bulldozer	    5.77/6.89/8.00+13.7	    13.7	+42%/50%/58%
+# Ryzen(***)	    2.71/-/3.71+2.05	    2.74/-/3.73	+74%/-/54%
+# Goldmont(***)	    3.82/-/5.35+4.16	    4.73/-/5.94	+69%/-/60%
+#
+# (*)	there are XOP, AVX1 and AVX2 code paths, meaning that
+#	Westmere is omitted from loop, this is because gain was not
+#	estimated high enough to justify the effort;
+# (**)	these are EVP-free results, results obtained with 'speed
+#	-evp aes-256-cbc-hmac-sha256' will vary by percent or two;
+# (***)	these are SHAEXT results;
+
+$flavour = shift;
+$output  = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
+		=~ /GNU assembler version ([2-9]\.[0-9]+)/) {
+	$avx = ($1>=2.19) + ($1>=2.22);
+}
+
+if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
+	   `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
+	$avx = ($1>=2.09) + ($1>=2.10);
+}
+
+if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
+	   `ml64 2>&1` =~ /Version ([0-9]+)\./) {
+	$avx = ($1>=10) + ($1>=12);
+}
+
+if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:clang|LLVM) version|.*based on LLVM) ([0-9]+\.[0-9]+)/) {
+	$avx = ($2>=3.0) + ($2>3.0);
+}
+
+$shaext=$avx;	### set to zero if compiling for 1.0.1
+$avx=1		if (!$shaext && $avx);
+
+open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
+*STDOUT=*OUT;
+
+$func="aesni_cbc_sha256_enc";
+$TABLE="K256";
+$SZ=4;
+@ROT=($A,$B,$C,$D,$E,$F,$G,$H)=("%eax","%ebx","%ecx","%edx",
+				"%r8d","%r9d","%r10d","%r11d");
+($T1,$a0,$a1,$a2,$a3)=("%r12d","%r13d","%r14d","%r15d","%esi");
+@Sigma0=( 2,13,22);
+@Sigma1=( 6,11,25);
+@sigma0=( 7,18, 3);
+@sigma1=(17,19,10);
+$rounds=64;
+
+########################################################################
+# void aesni_cbc_sha256_enc(const void *inp,
+#			void *out,
+#			size_t length,
+#			const AES_KEY *key,
+#			unsigned char *iv,
+#			SHA256_CTX *ctx,
+#			const void *in0);
+($inp,  $out,  $len,  $key,  $ivp, $ctx, $in0) =
+("%rdi","%rsi","%rdx","%rcx","%r8","%r9","%r10");
+
+$Tbl="%rbp";
+
+$_inp="16*$SZ+0*8(%rsp)";
+$_out="16*$SZ+1*8(%rsp)";
+$_end="16*$SZ+2*8(%rsp)";
+$_key="16*$SZ+3*8(%rsp)";
+$_ivp="16*$SZ+4*8(%rsp)";
+$_ctx="16*$SZ+5*8(%rsp)";
+$_in0="16*$SZ+6*8(%rsp)";
+$_rsp="`16*$SZ+7*8`(%rsp)";
+$framesz=16*$SZ+8*8;
+
+$code=<<___;
+.text
+
+.extern	OPENSSL_ia32cap_P
+.globl	$func
+.type	$func,\@abi-omnipotent
+.align	16
+$func:
+.cfi_startproc
+___
+						if ($avx) {
+$code.=<<___;
+	lea	OPENSSL_ia32cap_P(%rip),%r11
+	mov	\$1,%eax
+	cmp	\$0,`$win64?"%rcx":"%rdi"`
+	je	.Lprobe
+	mov	0(%r11),%eax
+	mov	4(%r11),%r10
+___
+$code.=<<___ if ($shaext);
+	bt	\$61,%r10			# check for SHA
+	jc	${func}_shaext
+___
+$code.=<<___;
+	mov	%r10,%r11
+	shr	\$32,%r11
+
+	test	\$`1<<11`,%r10d			# check for XOP
+	jnz	${func}_xop
+___
+$code.=<<___ if ($avx>1);
+	and	\$`1<<8|1<<5|1<<3`,%r11d	# check for BMI2+AVX2+BMI1
+	cmp	\$`1<<8|1<<5|1<<3`,%r11d
+	je	${func}_avx2
+___
+$code.=<<___;
+	and	\$`1<<28`,%r10d			# check for AVX
+	jnz	${func}_avx
+	ud2
+___
+						}
+$code.=<<___;
+	xor	%eax,%eax
+	cmp	\$0,`$win64?"%rcx":"%rdi"`
+	je	.Lprobe
+	ud2
+.Lprobe:
+	ret
+.cfi_endproc
+.size	$func,.-$func
+
+.align	64
+.type	$TABLE,\@object
+$TABLE:
+	.long	0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+	.long	0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+	.long	0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
+	.long	0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
+	.long	0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+	.long	0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+	.long	0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
+	.long	0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
+	.long	0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+	.long	0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+	.long	0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
+	.long	0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
+	.long	0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+	.long	0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+	.long	0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+	.long	0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+	.long	0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
+	.long	0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
+	.long	0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+	.long	0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+	.long	0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
+	.long	0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
+	.long	0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+	.long	0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+	.long	0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+	.long	0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+	.long	0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
+	.long	0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
+	.long	0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+	.long	0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+	.long	0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+	.long	0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+
+	.long	0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
+	.long	0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
+	.long	0,0,0,0,   0,0,0,0,   -1,-1,-1,-1
+	.long	0,0,0,0,   0,0,0,0
+	.asciz	"AESNI-CBC+SHA256 stitch for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
+.align	64
+___
+
+######################################################################
+# SIMD code paths
+#
+{{{
+($iv,$inout,$roundkey,$temp,
+ $mask10,$mask12,$mask14,$offload)=map("%xmm$_",(8..15));
+
+$aesni_cbc_idx=0;
+@aesni_cbc_block = (
+##	&vmovdqu	($roundkey,"0x00-0x80($inp)");'
+##	&vmovdqu	($inout,($inp));
+##	&mov		($_inp,$inp);
+
+	'&vpxor		($inout,$inout,$roundkey);'.
+	' &vmovdqu	($roundkey,"0x10-0x80($inp)");',
+
+	'&vpxor		($inout,$inout,$iv);',
+
+	'&vaesenc	($inout,$inout,$roundkey);'.
+	' &vmovdqu	($roundkey,"0x20-0x80($inp)");',
+
+	'&vaesenc	($inout,$inout,$roundkey);'.
+	' &vmovdqu	($roundkey,"0x30-0x80($inp)");',
+
+	'&vaesenc	($inout,$inout,$roundkey);'.
+	' &vmovdqu	($roundkey,"0x40-0x80($inp)");',
+
+	'&vaesenc	($inout,$inout,$roundkey);'.
+	' &vmovdqu	($roundkey,"0x50-0x80($inp)");',
+
+	'&vaesenc	($inout,$inout,$roundkey);'.
+	' &vmovdqu	($roundkey,"0x60-0x80($inp)");',
+
+	'&vaesenc	($inout,$inout,$roundkey);'.
+	' &vmovdqu	($roundkey,"0x70-0x80($inp)");',
+
+	'&vaesenc	($inout,$inout,$roundkey);'.
+	' &vmovdqu	($roundkey,"0x80-0x80($inp)");',
+
+	'&vaesenc	($inout,$inout,$roundkey);'.
+	' &vmovdqu	($roundkey,"0x90-0x80($inp)");',
+
+	'&vaesenc	($inout,$inout,$roundkey);'.
+	' &vmovdqu	($roundkey,"0xa0-0x80($inp)");',
+
+	'&vaesenclast	($temp,$inout,$roundkey);'.
+	' &vaesenc	($inout,$inout,$roundkey);'.
+	' &vmovdqu	($roundkey,"0xb0-0x80($inp)");',
+
+	'&vpand		($iv,$temp,$mask10);'.
+	' &vaesenc	($inout,$inout,$roundkey);'.
+	' &vmovdqu	($roundkey,"0xc0-0x80($inp)");',
+
+	'&vaesenclast	($temp,$inout,$roundkey);'.
+	' &vaesenc	($inout,$inout,$roundkey);'.
+	' &vmovdqu	($roundkey,"0xd0-0x80($inp)");',
+
+	'&vpand		($temp,$temp,$mask12);'.
+	' &vaesenc	($inout,$inout,$roundkey);'.
+	 '&vmovdqu	($roundkey,"0xe0-0x80($inp)");',
+
+	'&vpor		($iv,$iv,$temp);'.
+	' &vaesenclast	($temp,$inout,$roundkey);'.
+	' &vmovdqu	($roundkey,"0x00-0x80($inp)");'
+
+##	&mov		($inp,$_inp);
+##	&mov		($out,$_out);
+##	&vpand		($temp,$temp,$mask14);
+##	&vpor		($iv,$iv,$temp);
+##	&vmovdqu	($iv,($out,$inp);
+##	&lea		(inp,16($inp));
+);
+
+my $a4=$T1;
+my ($a,$b,$c,$d,$e,$f,$g,$h);
+
+sub AUTOLOAD()		# thunk [simplified] 32-bit style perlasm
+{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
+  my $arg = pop;
+    $arg = "\$$arg" if ($arg*1 eq $arg);
+    $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n";
+}
+
+sub body_00_15 () {
+	(
+	'($a,$b,$c,$d,$e,$f,$g,$h)=@ROT;'.
+
+	'&ror	($a0,$Sigma1[2]-$Sigma1[1])',
+	'&mov	($a,$a1)',
+	'&mov	($a4,$f)',
+
+	'&xor	($a0,$e)',
+	'&ror	($a1,$Sigma0[2]-$Sigma0[1])',
+	'&xor	($a4,$g)',			# f^g
+
+	'&ror	($a0,$Sigma1[1]-$Sigma1[0])',
+	'&xor	($a1,$a)',
+	'&and	($a4,$e)',			# (f^g)&e
+
+	@aesni_cbc_block[$aesni_cbc_idx++].
+	'&xor	($a0,$e)',
+	'&add	($h,$SZ*($i&15)."(%rsp)")',	# h+=X[i]+K[i]
+	'&mov	($a2,$a)',
+
+	'&ror	($a1,$Sigma0[1]-$Sigma0[0])',
+	'&xor	($a4,$g)',			# Ch(e,f,g)=((f^g)&e)^g
+	'&xor	($a2,$b)',			# a^b, b^c in next round
+
+	'&ror	($a0,$Sigma1[0])',		# Sigma1(e)
+	'&add	($h,$a4)',			# h+=Ch(e,f,g)
+	'&and	($a3,$a2)',			# (b^c)&(a^b)
+
+	'&xor	($a1,$a)',
+	'&add	($h,$a0)',			# h+=Sigma1(e)
+	'&xor	($a3,$b)',			# Maj(a,b,c)=Ch(a^b,c,b)
+
+	'&add	($d,$h)',			# d+=h
+	'&ror	($a1,$Sigma0[0])',		# Sigma0(a)
+	'&add	($h,$a3)',			# h+=Maj(a,b,c)
+
+	'&mov	($a0,$d)',
+	'&add	($a1,$h);'.			# h+=Sigma0(a)
+	'($a2,$a3) = ($a3,$a2); unshift(@ROT,pop(@ROT)); $i++;'
+	);
+}
+
+if ($avx) {{
+######################################################################
+# XOP code path
+#
+$code.=<<___;
+.type	${func}_xop,\@function,6
+.align	64
+${func}_xop:
+.cfi_startproc
+.Lxop_shortcut:
+	mov	`($win64?56:8)`(%rsp),$in0	# load 7th parameter
+	mov	%rsp,%rax		# copy %rsp
+.cfi_def_cfa_register	%rax
+	push	%rbx
+.cfi_push	%rbx
+	push	%rbp
+.cfi_push	%rbp
+	push	%r12
+.cfi_push	%r12
+	push	%r13
+.cfi_push	%r13
+	push	%r14
+.cfi_push	%r14
+	push	%r15
+.cfi_push	%r15
+	sub	\$`$framesz+$win64*16*10`,%rsp
+	and	\$-64,%rsp		# align stack frame
+
+	shl	\$6,$len
+	sub	$inp,$out		# re-bias
+	sub	$inp,$in0
+	add	$inp,$len		# end of input
+
+	#mov	$inp,$_inp		# saved later
+	mov	$out,$_out
+	mov	$len,$_end
+	#mov	$key,$_key		# remains resident in $inp register
+	mov	$ivp,$_ivp
+	mov	$ctx,$_ctx
+	mov	$in0,$_in0
+	mov	%rax,$_rsp
+.cfi_cfa_expression	$_rsp,deref,+8
+___
+$code.=<<___ if ($win64);
+	movaps	%xmm6,`$framesz+16*0`(%rsp)
+	movaps	%xmm7,`$framesz+16*1`(%rsp)
+	movaps	%xmm8,`$framesz+16*2`(%rsp)
+	movaps	%xmm9,`$framesz+16*3`(%rsp)
+	movaps	%xmm10,`$framesz+16*4`(%rsp)
+	movaps	%xmm11,`$framesz+16*5`(%rsp)
+	movaps	%xmm12,`$framesz+16*6`(%rsp)
+	movaps	%xmm13,`$framesz+16*7`(%rsp)
+	movaps	%xmm14,`$framesz+16*8`(%rsp)
+	movaps	%xmm15,`$framesz+16*9`(%rsp)
+___
+$code.=<<___;
+.Lprologue_xop:
+	vzeroall
+
+	mov	$inp,%r12		# borrow $a4
+	lea	0x80($key),$inp		# size optimization, reassign
+	lea	$TABLE+`$SZ*2*$rounds+32`(%rip),%r13	# borrow $a0
+	mov	0xf0-0x80($inp),%r14d	# rounds, borrow $a1
+	mov	$ctx,%r15		# borrow $a2
+	mov	$in0,%rsi		# borrow $a3
+	vmovdqu	($ivp),$iv		# load IV
+	sub	\$9,%r14
+
+	mov	$SZ*0(%r15),$A
+	mov	$SZ*1(%r15),$B
+	mov	$SZ*2(%r15),$C
+	mov	$SZ*3(%r15),$D
+	mov	$SZ*4(%r15),$E
+	mov	$SZ*5(%r15),$F
+	mov	$SZ*6(%r15),$G
+	mov	$SZ*7(%r15),$H
+
+	vmovdqa	0x00(%r13,%r14,8),$mask14
+	vmovdqa	0x10(%r13,%r14,8),$mask12
+	vmovdqa	0x20(%r13,%r14,8),$mask10
+	vmovdqu	0x00-0x80($inp),$roundkey
+	jmp	.Lloop_xop
+___
+					if ($SZ==4) {	# SHA256
+    my @X = map("%xmm$_",(0..3));
+    my ($t0,$t1,$t2,$t3) = map("%xmm$_",(4..7));
+
+$code.=<<___;
+.align	16
+.Lloop_xop:
+	vmovdqa	$TABLE+`$SZ*2*$rounds`(%rip),$t3
+	vmovdqu	0x00(%rsi,%r12),@X[0]
+	vmovdqu	0x10(%rsi,%r12),@X[1]
+	vmovdqu	0x20(%rsi,%r12),@X[2]
+	vmovdqu	0x30(%rsi,%r12),@X[3]
+	vpshufb	$t3,@X[0],@X[0]
+	lea	$TABLE(%rip),$Tbl
+	vpshufb	$t3,@X[1],@X[1]
+	vpshufb	$t3,@X[2],@X[2]
+	vpaddd	0x00($Tbl),@X[0],$t0
+	vpshufb	$t3,@X[3],@X[3]
+	vpaddd	0x20($Tbl),@X[1],$t1
+	vpaddd	0x40($Tbl),@X[2],$t2
+	vpaddd	0x60($Tbl),@X[3],$t3
+	vmovdqa	$t0,0x00(%rsp)
+	mov	$A,$a1
+	vmovdqa	$t1,0x10(%rsp)
+	mov	$B,$a3
+	vmovdqa	$t2,0x20(%rsp)
+	xor	$C,$a3			# magic
+	vmovdqa	$t3,0x30(%rsp)
+	mov	$E,$a0
+	jmp	.Lxop_00_47
+
+.align	16
+.Lxop_00_47:
+	sub	\$-16*2*$SZ,$Tbl	# size optimization
+	vmovdqu	(%r12),$inout		# $a4
+	mov	%r12,$_inp		# $a4
+___
+sub XOP_256_00_47 () {
+my $j = shift;
+my $body = shift;
+my @X = @_;
+my @insns = (&$body,&$body,&$body,&$body);	# 104 instructions
+
+	&vpalignr	($t0,@X[1],@X[0],$SZ);	# X[1..4]
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vpalignr	($t3,@X[3],@X[2],$SZ);	# X[9..12]
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vprotd		($t1,$t0,8*$SZ-$sigma0[1]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vpsrld		($t0,$t0,$sigma0[2]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vpaddd	(@X[0],@X[0],$t3);	# X[0..3] += X[9..12]
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vprotd		($t2,$t1,$sigma0[1]-$sigma0[0]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vpxor		($t0,$t0,$t1);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vprotd	($t3,@X[3],8*$SZ-$sigma1[1]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vpxor		($t0,$t0,$t2);		# sigma0(X[1..4])
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vpsrld	($t2,@X[3],$sigma1[2]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vpaddd		(@X[0],@X[0],$t0);	# X[0..3] += sigma0(X[1..4])
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vprotd	($t1,$t3,$sigma1[1]-$sigma1[0]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vpxor		($t3,$t3,$t2);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vpxor		($t3,$t3,$t1);		# sigma1(X[14..15])
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vpsrldq	($t3,$t3,8);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vpaddd		(@X[0],@X[0],$t3);	# X[0..1] += sigma1(X[14..15])
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vprotd	($t3,@X[0],8*$SZ-$sigma1[1]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vpsrld	($t2,@X[0],$sigma1[2]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vprotd	($t1,$t3,$sigma1[1]-$sigma1[0]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vpxor		($t3,$t3,$t2);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vpxor		($t3,$t3,$t1);		# sigma1(X[16..17])
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vpslldq	($t3,$t3,8);		# 22 instructions
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vpaddd		(@X[0],@X[0],$t3);	# X[2..3] += sigma1(X[16..17])
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vpaddd		($t2,@X[0],16*2*$j."($Tbl)");
+	  foreach (@insns) { eval; }		# remaining instructions
+	&vmovdqa	(16*$j."(%rsp)",$t2);
+}
+
+    $aesni_cbc_idx=0;
+    for ($i=0,$j=0; $j<4; $j++) {
+	&XOP_256_00_47($j,\&body_00_15,@X);
+	push(@X,shift(@X));			# rotate(@X)
+    }
+    	&mov		("%r12",$_inp);		# borrow $a4
+	&vpand		($temp,$temp,$mask14);
+	&mov		("%r15",$_out);		# borrow $a2
+	&vpor		($iv,$iv,$temp);
+	&vmovdqu	("(%r15,%r12)",$iv);	# write output
+	&lea		("%r12","16(%r12)");	# inp++
+
+	&cmpb	($SZ-1+16*2*$SZ."($Tbl)",0);
+	&jne	(".Lxop_00_47");
+
+	&vmovdqu	($inout,"(%r12)");
+	&mov		($_inp,"%r12");
+
+    $aesni_cbc_idx=0;
+    for ($i=0; $i<16; ) {
+	foreach(body_00_15()) { eval; }
+    }
+					}
+$code.=<<___;
+	mov	$_inp,%r12		# borrow $a4
+	mov	$_out,%r13		# borrow $a0
+	mov	$_ctx,%r15		# borrow $a2
+	mov	$_in0,%rsi		# borrow $a3
+
+	vpand	$mask14,$temp,$temp
+	mov	$a1,$A
+	vpor	$temp,$iv,$iv
+	vmovdqu	$iv,(%r13,%r12)		# write output
+	lea	16(%r12),%r12		# inp++
+
+	add	$SZ*0(%r15),$A
+	add	$SZ*1(%r15),$B
+	add	$SZ*2(%r15),$C
+	add	$SZ*3(%r15),$D
+	add	$SZ*4(%r15),$E
+	add	$SZ*5(%r15),$F
+	add	$SZ*6(%r15),$G
+	add	$SZ*7(%r15),$H
+
+	cmp	$_end,%r12
+
+	mov	$A,$SZ*0(%r15)
+	mov	$B,$SZ*1(%r15)
+	mov	$C,$SZ*2(%r15)
+	mov	$D,$SZ*3(%r15)
+	mov	$E,$SZ*4(%r15)
+	mov	$F,$SZ*5(%r15)
+	mov	$G,$SZ*6(%r15)
+	mov	$H,$SZ*7(%r15)
+
+	jb	.Lloop_xop
+
+	mov	$_ivp,$ivp
+	mov	$_rsp,%rsi
+.cfi_def_cfa	%rsi,8
+	vmovdqu	$iv,($ivp)		# output IV
+	vzeroall
+___
+$code.=<<___ if ($win64);
+	movaps	`$framesz+16*0`(%rsp),%xmm6
+	movaps	`$framesz+16*1`(%rsp),%xmm7
+	movaps	`$framesz+16*2`(%rsp),%xmm8
+	movaps	`$framesz+16*3`(%rsp),%xmm9
+	movaps	`$framesz+16*4`(%rsp),%xmm10
+	movaps	`$framesz+16*5`(%rsp),%xmm11
+	movaps	`$framesz+16*6`(%rsp),%xmm12
+	movaps	`$framesz+16*7`(%rsp),%xmm13
+	movaps	`$framesz+16*8`(%rsp),%xmm14
+	movaps	`$framesz+16*9`(%rsp),%xmm15
+___
+$code.=<<___;
+	mov	-48(%rsi),%r15
+.cfi_restore	%r15
+	mov	-40(%rsi),%r14
+.cfi_restore	%r14
+	mov	-32(%rsi),%r13
+.cfi_restore	%r13
+	mov	-24(%rsi),%r12
+.cfi_restore	%r12
+	mov	-16(%rsi),%rbp
+.cfi_restore	%rbp
+	mov	-8(%rsi),%rbx
+.cfi_restore	%rbx
+	lea	(%rsi),%rsp
+.cfi_def_cfa_register	%rsp
+.Lepilogue_xop:
+	ret
+.cfi_endproc
+.size	${func}_xop,.-${func}_xop
+___
+######################################################################
+# AVX+shrd code path
+#
+local *ror = sub { &shrd(@_[0],@_) };
+
+$code.=<<___;
+.type	${func}_avx,\@function,6
+.align	64
+${func}_avx:
+.cfi_startproc
+.Lavx_shortcut:
+	mov	`($win64?56:8)`(%rsp),$in0	# load 7th parameter
+	mov	%rsp,%rax		# copy %rsp
+.cfi_def_cfa_register	%rax
+	push	%rbx
+.cfi_push	%rbx
+	push	%rbp
+.cfi_push	%rbp
+	push	%r12
+.cfi_push	%r12
+	push	%r13
+.cfi_push	%r13
+	push	%r14
+.cfi_push	%r14
+	push	%r15
+.cfi_push	%r15
+	sub	\$`$framesz+$win64*16*10`,%rsp
+	and	\$-64,%rsp		# align stack frame
+
+	shl	\$6,$len
+	sub	$inp,$out		# re-bias
+	sub	$inp,$in0
+	add	$inp,$len		# end of input
+
+	#mov	$inp,$_inp		# saved later
+	mov	$out,$_out
+	mov	$len,$_end
+	#mov	$key,$_key		# remains resident in $inp register
+	mov	$ivp,$_ivp
+	mov	$ctx,$_ctx
+	mov	$in0,$_in0
+	mov	%rax,$_rsp
+.cfi_cfa_expression	$_rsp,deref,+8
+___
+$code.=<<___ if ($win64);
+	movaps	%xmm6,`$framesz+16*0`(%rsp)
+	movaps	%xmm7,`$framesz+16*1`(%rsp)
+	movaps	%xmm8,`$framesz+16*2`(%rsp)
+	movaps	%xmm9,`$framesz+16*3`(%rsp)
+	movaps	%xmm10,`$framesz+16*4`(%rsp)
+	movaps	%xmm11,`$framesz+16*5`(%rsp)
+	movaps	%xmm12,`$framesz+16*6`(%rsp)
+	movaps	%xmm13,`$framesz+16*7`(%rsp)
+	movaps	%xmm14,`$framesz+16*8`(%rsp)
+	movaps	%xmm15,`$framesz+16*9`(%rsp)
+___
+$code.=<<___;
+.Lprologue_avx:
+	vzeroall
+
+	mov	$inp,%r12		# borrow $a4
+	lea	0x80($key),$inp		# size optimization, reassign
+	lea	$TABLE+`$SZ*2*$rounds+32`(%rip),%r13	# borrow $a0
+	mov	0xf0-0x80($inp),%r14d	# rounds, borrow $a1
+	mov	$ctx,%r15		# borrow $a2
+	mov	$in0,%rsi		# borrow $a3
+	vmovdqu	($ivp),$iv		# load IV
+	sub	\$9,%r14
+
+	mov	$SZ*0(%r15),$A
+	mov	$SZ*1(%r15),$B
+	mov	$SZ*2(%r15),$C
+	mov	$SZ*3(%r15),$D
+	mov	$SZ*4(%r15),$E
+	mov	$SZ*5(%r15),$F
+	mov	$SZ*6(%r15),$G
+	mov	$SZ*7(%r15),$H
+
+	vmovdqa	0x00(%r13,%r14,8),$mask14
+	vmovdqa	0x10(%r13,%r14,8),$mask12
+	vmovdqa	0x20(%r13,%r14,8),$mask10
+	vmovdqu	0x00-0x80($inp),$roundkey
+___
+					if ($SZ==4) {	# SHA256
+    my @X = map("%xmm$_",(0..3));
+    my ($t0,$t1,$t2,$t3) = map("%xmm$_",(4..7));
+
+$code.=<<___;
+	jmp	.Lloop_avx
+.align	16
+.Lloop_avx:
+	vmovdqa	$TABLE+`$SZ*2*$rounds`(%rip),$t3
+	vmovdqu	0x00(%rsi,%r12),@X[0]
+	vmovdqu	0x10(%rsi,%r12),@X[1]
+	vmovdqu	0x20(%rsi,%r12),@X[2]
+	vmovdqu	0x30(%rsi,%r12),@X[3]
+	vpshufb	$t3,@X[0],@X[0]
+	lea	$TABLE(%rip),$Tbl
+	vpshufb	$t3,@X[1],@X[1]
+	vpshufb	$t3,@X[2],@X[2]
+	vpaddd	0x00($Tbl),@X[0],$t0
+	vpshufb	$t3,@X[3],@X[3]
+	vpaddd	0x20($Tbl),@X[1],$t1
+	vpaddd	0x40($Tbl),@X[2],$t2
+	vpaddd	0x60($Tbl),@X[3],$t3
+	vmovdqa	$t0,0x00(%rsp)
+	mov	$A,$a1
+	vmovdqa	$t1,0x10(%rsp)
+	mov	$B,$a3
+	vmovdqa	$t2,0x20(%rsp)
+	xor	$C,$a3			# magic
+	vmovdqa	$t3,0x30(%rsp)
+	mov	$E,$a0
+	jmp	.Lavx_00_47
+
+.align	16
+.Lavx_00_47:
+	sub	\$-16*2*$SZ,$Tbl	# size optimization
+	vmovdqu	(%r12),$inout		# $a4
+	mov	%r12,$_inp		# $a4
+___
+sub Xupdate_256_AVX () {
+	(
+	'&vpalignr	($t0,@X[1],@X[0],$SZ)',	# X[1..4]
+	 '&vpalignr	($t3,@X[3],@X[2],$SZ)',	# X[9..12]
+	'&vpsrld	($t2,$t0,$sigma0[0]);',
+	 '&vpaddd	(@X[0],@X[0],$t3)',	# X[0..3] += X[9..12]
+	'&vpsrld	($t3,$t0,$sigma0[2])',
+	'&vpslld	($t1,$t0,8*$SZ-$sigma0[1]);',
+	'&vpxor		($t0,$t3,$t2)',
+	 '&vpshufd	($t3,@X[3],0b11111010)',# X[14..15]
+	'&vpsrld	($t2,$t2,$sigma0[1]-$sigma0[0]);',
+	'&vpxor		($t0,$t0,$t1)',
+	'&vpslld	($t1,$t1,$sigma0[1]-$sigma0[0]);',
+	'&vpxor		($t0,$t0,$t2)',
+	 '&vpsrld	($t2,$t3,$sigma1[2]);',
+	'&vpxor		($t0,$t0,$t1)',		# sigma0(X[1..4])
+	 '&vpsrlq	($t3,$t3,$sigma1[0]);',
+	'&vpaddd	(@X[0],@X[0],$t0)',	# X[0..3] += sigma0(X[1..4])
+	 '&vpxor	($t2,$t2,$t3);',
+	 '&vpsrlq	($t3,$t3,$sigma1[1]-$sigma1[0])',
+	 '&vpxor	($t2,$t2,$t3)',		# sigma1(X[14..15])
+	 '&vpshufd	($t2,$t2,0b10000100)',
+	 '&vpsrldq	($t2,$t2,8)',
+	'&vpaddd	(@X[0],@X[0],$t2)',	# X[0..1] += sigma1(X[14..15])
+	 '&vpshufd	($t3,@X[0],0b01010000)',# X[16..17]
+	 '&vpsrld	($t2,$t3,$sigma1[2])',
+	 '&vpsrlq	($t3,$t3,$sigma1[0])',
+	 '&vpxor	($t2,$t2,$t3);',
+	 '&vpsrlq	($t3,$t3,$sigma1[1]-$sigma1[0])',
+	 '&vpxor	($t2,$t2,$t3)',
+	 '&vpshufd	($t2,$t2,0b11101000)',
+	 '&vpslldq	($t2,$t2,8)',
+	'&vpaddd	(@X[0],@X[0],$t2)'	# X[2..3] += sigma1(X[16..17])
+	);
+}
+
+sub AVX_256_00_47 () {
+my $j = shift;
+my $body = shift;
+my @X = @_;
+my @insns = (&$body,&$body,&$body,&$body);	# 104 instructions
+
+	foreach (Xupdate_256_AVX()) {		# 29 instructions
+	    eval;
+	    eval(shift(@insns));
+	    eval(shift(@insns));
+	    eval(shift(@insns));
+	}
+	&vpaddd		($t2,@X[0],16*2*$j."($Tbl)");
+	  foreach (@insns) { eval; }		# remaining instructions
+	&vmovdqa	(16*$j."(%rsp)",$t2);
+}
+
+    $aesni_cbc_idx=0;
+    for ($i=0,$j=0; $j<4; $j++) {
+	&AVX_256_00_47($j,\&body_00_15,@X);
+	push(@X,shift(@X));			# rotate(@X)
+    }
+    	&mov		("%r12",$_inp);		# borrow $a4
+	&vpand		($temp,$temp,$mask14);
+	&mov		("%r15",$_out);		# borrow $a2
+	&vpor		($iv,$iv,$temp);
+	&vmovdqu	("(%r15,%r12)",$iv);	# write output
+	&lea		("%r12","16(%r12)");	# inp++
+
+	&cmpb	($SZ-1+16*2*$SZ."($Tbl)",0);
+	&jne	(".Lavx_00_47");
+
+	&vmovdqu	($inout,"(%r12)");
+	&mov		($_inp,"%r12");
+
+    $aesni_cbc_idx=0;
+    for ($i=0; $i<16; ) {
+	foreach(body_00_15()) { eval; }
+    }
+
+					}
+$code.=<<___;
+	mov	$_inp,%r12		# borrow $a4
+	mov	$_out,%r13		# borrow $a0
+	mov	$_ctx,%r15		# borrow $a2
+	mov	$_in0,%rsi		# borrow $a3
+
+	vpand	$mask14,$temp,$temp
+	mov	$a1,$A
+	vpor	$temp,$iv,$iv
+	vmovdqu	$iv,(%r13,%r12)		# write output
+	lea	16(%r12),%r12		# inp++
+
+	add	$SZ*0(%r15),$A
+	add	$SZ*1(%r15),$B
+	add	$SZ*2(%r15),$C
+	add	$SZ*3(%r15),$D
+	add	$SZ*4(%r15),$E
+	add	$SZ*5(%r15),$F
+	add	$SZ*6(%r15),$G
+	add	$SZ*7(%r15),$H
+
+	cmp	$_end,%r12
+
+	mov	$A,$SZ*0(%r15)
+	mov	$B,$SZ*1(%r15)
+	mov	$C,$SZ*2(%r15)
+	mov	$D,$SZ*3(%r15)
+	mov	$E,$SZ*4(%r15)
+	mov	$F,$SZ*5(%r15)
+	mov	$G,$SZ*6(%r15)
+	mov	$H,$SZ*7(%r15)
+	jb	.Lloop_avx
+
+	mov	$_ivp,$ivp
+	mov	$_rsp,%rsi
+.cfi_def_cfa	%rsi,8
+	vmovdqu	$iv,($ivp)		# output IV
+	vzeroall
+___
+$code.=<<___ if ($win64);
+	movaps	`$framesz+16*0`(%rsp),%xmm6
+	movaps	`$framesz+16*1`(%rsp),%xmm7
+	movaps	`$framesz+16*2`(%rsp),%xmm8
+	movaps	`$framesz+16*3`(%rsp),%xmm9
+	movaps	`$framesz+16*4`(%rsp),%xmm10
+	movaps	`$framesz+16*5`(%rsp),%xmm11
+	movaps	`$framesz+16*6`(%rsp),%xmm12
+	movaps	`$framesz+16*7`(%rsp),%xmm13
+	movaps	`$framesz+16*8`(%rsp),%xmm14
+	movaps	`$framesz+16*9`(%rsp),%xmm15
+___
+$code.=<<___;
+	mov	-48(%rsi),%r15
+.cfi_restore	%r15
+	mov	-40(%rsi),%r14
+.cfi_restore	%r14
+	mov	-32(%rsi),%r13
+.cfi_restore	%r13
+	mov	-24(%rsi),%r12
+.cfi_restore	%r12
+	mov	-16(%rsi),%rbp
+.cfi_restore	%rbp
+	mov	-8(%rsi),%rbx
+.cfi_restore	%rbx
+	lea	(%rsi),%rsp
+.cfi_def_cfa_register	%rsp
+.Lepilogue_avx:
+	ret
+.cfi_endproc
+.size	${func}_avx,.-${func}_avx
+___
+
+if ($avx>1) {{
+######################################################################
+# AVX2+BMI code path
+#
+my $a5=$SZ==4?"%esi":"%rsi";	# zap $inp
+my $PUSH8=8*2*$SZ;
+use integer;
+
+sub bodyx_00_15 () {
+	# at start $a1 should be zero, $a3 - $b^$c and $a4 copy of $f
+	(
+	'($a,$b,$c,$d,$e,$f,$g,$h)=@ROT;'.
+
+	'&add	($h,(32*($i/(16/$SZ))+$SZ*($i%(16/$SZ)))%$PUSH8.$base)',    # h+=X[i]+K[i]
+	'&and	($a4,$e)',		# f&e
+	'&rorx	($a0,$e,$Sigma1[2])',
+	'&rorx	($a2,$e,$Sigma1[1])',
+
+	'&lea	($a,"($a,$a1)")',	# h+=Sigma0(a) from the past
+	'&lea	($h,"($h,$a4)")',
+	'&andn	($a4,$e,$g)',		# ~e&g
+	'&xor	($a0,$a2)',
+
+	'&rorx	($a1,$e,$Sigma1[0])',
+	'&lea	($h,"($h,$a4)")',	# h+=Ch(e,f,g)=(e&f)+(~e&g)
+	'&xor	($a0,$a1)',		# Sigma1(e)
+	'&mov	($a2,$a)',
+
+	'&rorx	($a4,$a,$Sigma0[2])',
+	'&lea	($h,"($h,$a0)")',	# h+=Sigma1(e)
+	'&xor	($a2,$b)',		# a^b, b^c in next round
+	'&rorx	($a1,$a,$Sigma0[1])',
+
+	'&rorx	($a0,$a,$Sigma0[0])',
+	'&lea	($d,"($d,$h)")',	# d+=h
+	'&and	($a3,$a2)',		# (b^c)&(a^b)
+	@aesni_cbc_block[$aesni_cbc_idx++].
+	'&xor	($a1,$a4)',
+
+	'&xor	($a3,$b)',		# Maj(a,b,c)=Ch(a^b,c,b)
+	'&xor	($a1,$a0)',		# Sigma0(a)
+	'&lea	($h,"($h,$a3)");'.	# h+=Maj(a,b,c)
+	'&mov	($a4,$e)',		# copy of f in future
+
+	'($a2,$a3) = ($a3,$a2); unshift(@ROT,pop(@ROT)); $i++;'
+	);
+	# and at the finish one has to $a+=$a1
+}
+
+$code.=<<___;
+.type	${func}_avx2,\@function,6
+.align	64
+${func}_avx2:
+.cfi_startproc
+.Lavx2_shortcut:
+	mov	`($win64?56:8)`(%rsp),$in0	# load 7th parameter
+	mov	%rsp,%rax		# copy %rsp
+.cfi_def_cfa_register	%rax
+	push	%rbx
+.cfi_push	%rbx
+	push	%rbp
+.cfi_push	%rbp
+	push	%r12
+.cfi_push	%r12
+	push	%r13
+.cfi_push	%r13
+	push	%r14
+.cfi_push	%r14
+	push	%r15
+.cfi_push	%r15
+	sub	\$`2*$SZ*$rounds+8*8+$win64*16*10`,%rsp
+	and	\$-256*$SZ,%rsp		# align stack frame
+	add	\$`2*$SZ*($rounds-8)`,%rsp
+
+	shl	\$6,$len
+	sub	$inp,$out		# re-bias
+	sub	$inp,$in0
+	add	$inp,$len		# end of input
+
+	#mov	$inp,$_inp		# saved later
+	#mov	$out,$_out		# kept in $offload
+	mov	$len,$_end
+	#mov	$key,$_key		# remains resident in $inp register
+	mov	$ivp,$_ivp
+	mov	$ctx,$_ctx
+	mov	$in0,$_in0
+	mov	%rax,$_rsp
+.cfi_cfa_expression	$_rsp,deref,+8
+___
+$code.=<<___ if ($win64);
+	movaps	%xmm6,`$framesz+16*0`(%rsp)
+	movaps	%xmm7,`$framesz+16*1`(%rsp)
+	movaps	%xmm8,`$framesz+16*2`(%rsp)
+	movaps	%xmm9,`$framesz+16*3`(%rsp)
+	movaps	%xmm10,`$framesz+16*4`(%rsp)
+	movaps	%xmm11,`$framesz+16*5`(%rsp)
+	movaps	%xmm12,`$framesz+16*6`(%rsp)
+	movaps	%xmm13,`$framesz+16*7`(%rsp)
+	movaps	%xmm14,`$framesz+16*8`(%rsp)
+	movaps	%xmm15,`$framesz+16*9`(%rsp)
+___
+$code.=<<___;
+.Lprologue_avx2:
+	vzeroall
+
+	mov	$inp,%r13		# borrow $a0
+	vpinsrq	\$1,$out,$offload,$offload
+	lea	0x80($key),$inp		# size optimization, reassign
+	lea	$TABLE+`$SZ*2*$rounds+32`(%rip),%r12	# borrow $a4
+	mov	0xf0-0x80($inp),%r14d	# rounds, borrow $a1
+	mov	$ctx,%r15		# borrow $a2
+	mov	$in0,%rsi		# borrow $a3
+	vmovdqu	($ivp),$iv		# load IV
+	lea	-9(%r14),%r14
+
+	vmovdqa	0x00(%r12,%r14,8),$mask14
+	vmovdqa	0x10(%r12,%r14,8),$mask12
+	vmovdqa	0x20(%r12,%r14,8),$mask10
+
+	sub	\$-16*$SZ,%r13		# inp++, size optimization
+	mov	$SZ*0(%r15),$A
+	lea	(%rsi,%r13),%r12	# borrow $a0
+	mov	$SZ*1(%r15),$B
+	cmp	$len,%r13		# $_end
+	mov	$SZ*2(%r15),$C
+	cmove	%rsp,%r12		# next block or random data
+	mov	$SZ*3(%r15),$D
+	mov	$SZ*4(%r15),$E
+	mov	$SZ*5(%r15),$F
+	mov	$SZ*6(%r15),$G
+	mov	$SZ*7(%r15),$H
+	vmovdqu	0x00-0x80($inp),$roundkey
+___
+					if ($SZ==4) {	# SHA256
+    my @X = map("%ymm$_",(0..3));
+    my ($t0,$t1,$t2,$t3) = map("%ymm$_",(4..7));
+
+$code.=<<___;
+	jmp	.Loop_avx2
+.align	16
+.Loop_avx2:
+	vmovdqa	$TABLE+`$SZ*2*$rounds`(%rip),$t3
+	vmovdqu	-16*$SZ+0(%rsi,%r13),%xmm0
+	vmovdqu	-16*$SZ+16(%rsi,%r13),%xmm1
+	vmovdqu	-16*$SZ+32(%rsi,%r13),%xmm2
+	vmovdqu	-16*$SZ+48(%rsi,%r13),%xmm3
+
+	vinserti128	\$1,(%r12),@X[0],@X[0]
+	vinserti128	\$1,16(%r12),@X[1],@X[1]
+	 vpshufb	$t3,@X[0],@X[0]
+	vinserti128	\$1,32(%r12),@X[2],@X[2]
+	 vpshufb	$t3,@X[1],@X[1]
+	vinserti128	\$1,48(%r12),@X[3],@X[3]
+
+	lea	$TABLE(%rip),$Tbl
+	vpshufb	$t3,@X[2],@X[2]
+	lea	-16*$SZ(%r13),%r13
+	vpaddd	0x00($Tbl),@X[0],$t0
+	vpshufb	$t3,@X[3],@X[3]
+	vpaddd	0x20($Tbl),@X[1],$t1
+	vpaddd	0x40($Tbl),@X[2],$t2
+	vpaddd	0x60($Tbl),@X[3],$t3
+	vmovdqa	$t0,0x00(%rsp)
+	xor	$a1,$a1
+	vmovdqa	$t1,0x20(%rsp)
+___
+$code.=<<___ if (!$win64);
+# temporarily use %rsi as frame pointer
+        mov     $_rsp,%rsi
+.cfi_def_cfa    %rsi,8
+___
+$code.=<<___;
+	lea	-$PUSH8(%rsp),%rsp
+___
+$code.=<<___ if (!$win64);
+# the frame info is at $_rsp, but the stack is moving...
+# so a second frame pointer is saved at -8(%rsp)
+# that is in the red zone
+        mov     %rsi,-8(%rsp)
+.cfi_cfa_expression     %rsp-8,deref,+8
+___
+$code.=<<___;
+	mov	$B,$a3
+	vmovdqa	$t2,0x00(%rsp)
+	xor	$C,$a3			# magic
+	vmovdqa	$t3,0x20(%rsp)
+	mov	$F,$a4
+	sub	\$-16*2*$SZ,$Tbl	# size optimization
+	jmp	.Lavx2_00_47
+
+.align	16
+.Lavx2_00_47:
+	vmovdqu	(%r13),$inout
+	vpinsrq	\$0,%r13,$offload,$offload
+___
+
+sub AVX2_256_00_47 () {
+my $j = shift;
+my $body = shift;
+my @X = @_;
+my @insns = (&$body,&$body,&$body,&$body);	# 96 instructions
+my $base = "+2*$PUSH8(%rsp)";
+
+	if (($j%2)==0) {
+	&lea	("%rsp","-$PUSH8(%rsp)");
+$code.=<<___ if (!$win64);
+.cfi_cfa_expression     %rsp+`$PUSH8-8`,deref,+8
+# copy secondary frame pointer to new location again at -8(%rsp)
+        pushq   $PUSH8-8(%rsp)
+.cfi_cfa_expression     %rsp,deref,+8
+        lea     8(%rsp),%rsp
+.cfi_cfa_expression     %rsp-8,deref,+8
+___
+	}
+	foreach (Xupdate_256_AVX()) {		# 29 instructions
+	    eval;
+	    eval(shift(@insns));
+	    eval(shift(@insns));
+	    eval(shift(@insns));
+	}
+	&vpaddd		($t2,@X[0],16*2*$j."($Tbl)");
+	  foreach (@insns) { eval; }		# remaining instructions
+	&vmovdqa	((32*$j)%$PUSH8."(%rsp)",$t2);
+}
+    $aesni_cbc_idx=0;
+    for ($i=0,$j=0; $j<4; $j++) {
+	&AVX2_256_00_47($j,\&bodyx_00_15,@X);
+	push(@X,shift(@X));			# rotate(@X)
+    }
+	&vmovq		("%r13",$offload);	# borrow $a0
+	&vpextrq	("%r15",$offload,1);	# borrow $a2
+	&vpand		($temp,$temp,$mask14);
+	&vpor		($iv,$iv,$temp);
+	&vmovdqu	("(%r15,%r13)",$iv);	# write output
+	&lea		("%r13","16(%r13)");	# inp++
+
+	&lea	($Tbl,16*2*$SZ."($Tbl)");
+	&cmpb	(($SZ-1)."($Tbl)",0);
+	&jne	(".Lavx2_00_47");
+
+	&vmovdqu	($inout,"(%r13)");
+	&vpinsrq	($offload,$offload,"%r13",0);
+
+    $aesni_cbc_idx=0;
+    for ($i=0; $i<16; ) {
+	my $base=$i<8?"+$PUSH8(%rsp)":"(%rsp)";
+	foreach(bodyx_00_15()) { eval; }
+    }
+					}
+$code.=<<___;
+	vpextrq	\$1,$offload,%r12		# $_out, borrow $a4
+	vmovq	$offload,%r13			# $_inp, borrow $a0
+	mov	`2*$SZ*$rounds+5*8`(%rsp),%r15	# $_ctx, borrow $a2
+	add	$a1,$A
+	lea	`2*$SZ*($rounds-8)`(%rsp),$Tbl
+
+	vpand	$mask14,$temp,$temp
+	vpor	$temp,$iv,$iv
+	vmovdqu	$iv,(%r12,%r13)			# write output
+	lea	16(%r13),%r13
+
+	add	$SZ*0(%r15),$A
+	add	$SZ*1(%r15),$B
+	add	$SZ*2(%r15),$C
+	add	$SZ*3(%r15),$D
+	add	$SZ*4(%r15),$E
+	add	$SZ*5(%r15),$F
+	add	$SZ*6(%r15),$G
+	add	$SZ*7(%r15),$H
+
+	mov	$A,$SZ*0(%r15)
+	mov	$B,$SZ*1(%r15)
+	mov	$C,$SZ*2(%r15)
+	mov	$D,$SZ*3(%r15)
+	mov	$E,$SZ*4(%r15)
+	mov	$F,$SZ*5(%r15)
+	mov	$G,$SZ*6(%r15)
+	mov	$H,$SZ*7(%r15)
+
+	cmp	`$PUSH8+2*8`($Tbl),%r13		# $_end
+	je	.Ldone_avx2
+
+	xor	$a1,$a1
+	mov	$B,$a3
+	mov	$F,$a4
+	xor	$C,$a3			# magic
+	jmp	.Lower_avx2
+.align	16
+.Lower_avx2:
+	vmovdqu	(%r13),$inout
+	vpinsrq	\$0,%r13,$offload,$offload
+___
+    $aesni_cbc_idx=0;
+    for ($i=0; $i<16; ) {
+	my $base="+16($Tbl)";
+	foreach(bodyx_00_15()) { eval; }
+	&lea	($Tbl,"-$PUSH8($Tbl)")	if ($i==8);
+    }
+$code.=<<___;
+	vmovq	$offload,%r13			# borrow $a0
+	vpextrq	\$1,$offload,%r15		# borrow $a2
+	vpand	$mask14,$temp,$temp
+	vpor	$temp,$iv,$iv
+	lea	-$PUSH8($Tbl),$Tbl
+	vmovdqu	$iv,(%r15,%r13)			# write output
+	lea	16(%r13),%r13			# inp++
+	cmp	%rsp,$Tbl
+	jae	.Lower_avx2
+
+	mov	`2*$SZ*$rounds+5*8`(%rsp),%r15	# $_ctx, borrow $a2
+	lea	16*$SZ(%r13),%r13
+	mov	`2*$SZ*$rounds+6*8`(%rsp),%rsi	# $_in0, borrow $a3
+	add	$a1,$A
+	lea	`2*$SZ*($rounds-8)`(%rsp),%rsp
+
+	add	$SZ*0(%r15),$A
+	add	$SZ*1(%r15),$B
+	add	$SZ*2(%r15),$C
+	add	$SZ*3(%r15),$D
+	add	$SZ*4(%r15),$E
+	add	$SZ*5(%r15),$F
+	add	$SZ*6(%r15),$G
+	lea	(%rsi,%r13),%r12
+	add	$SZ*7(%r15),$H
+
+	cmp	$_end,%r13
+
+	mov	$A,$SZ*0(%r15)
+	cmove	%rsp,%r12		# next block or stale data
+	mov	$B,$SZ*1(%r15)
+	mov	$C,$SZ*2(%r15)
+	mov	$D,$SZ*3(%r15)
+	mov	$E,$SZ*4(%r15)
+	mov	$F,$SZ*5(%r15)
+	mov	$G,$SZ*6(%r15)
+	mov	$H,$SZ*7(%r15)
+
+	jbe	.Loop_avx2
+	lea	(%rsp),$Tbl
+# temporarily use $Tbl as index to $_rsp
+# this avoids the need to save a secondary frame pointer at -8(%rsp)
+.cfi_cfa_expression     $Tbl+`16*$SZ+7*8`,deref,+8
+
+.Ldone_avx2:
+	mov	16*$SZ+4*8($Tbl),$ivp
+	mov	16*$SZ+7*8($Tbl),%rsi
+.cfi_def_cfa	%rsi,8
+	vmovdqu	$iv,($ivp)		# output IV
+	vzeroall
+___
+$code.=<<___ if ($win64);
+	movaps	`$framesz+16*0`($Tbl),%xmm6
+	movaps	`$framesz+16*1`($Tbl),%xmm7
+	movaps	`$framesz+16*2`($Tbl),%xmm8
+	movaps	`$framesz+16*3`($Tbl),%xmm9
+	movaps	`$framesz+16*4`($Tbl),%xmm10
+	movaps	`$framesz+16*5`($Tbl),%xmm11
+	movaps	`$framesz+16*6`($Tbl),%xmm12
+	movaps	`$framesz+16*7`($Tbl),%xmm13
+	movaps	`$framesz+16*8`($Tbl),%xmm14
+	movaps	`$framesz+16*9`($Tbl),%xmm15
+___
+$code.=<<___;
+	mov	-48(%rsi),%r15
+.cfi_restore	%r15
+	mov	-40(%rsi),%r14
+.cfi_restore	%r14
+	mov	-32(%rsi),%r13
+.cfi_restore	%r13
+	mov	-24(%rsi),%r12
+.cfi_restore	%r12
+	mov	-16(%rsi),%rbp
+.cfi_restore	%rbp
+	mov	-8(%rsi),%rbx
+.cfi_restore	%rbx
+	lea	(%rsi),%rsp
+.cfi_def_cfa_register	%rsp
+.Lepilogue_avx2:
+	ret
+.cfi_endproc
+.size	${func}_avx2,.-${func}_avx2
+___
+}}
+}}
+{{
+my ($in0,$out,$len,$key,$ivp,$ctx,$inp)=("%rdi","%rsi","%rdx","%rcx","%r8","%r9","%r10");
+
+my ($rounds,$Tbl)=("%r11d","%rbx");
+
+my ($iv,$in,$rndkey0)=map("%xmm$_",(6,14,15));
+my @rndkey=("%xmm4","%xmm5");
+my $r=0;
+my $sn=0;
+
+my ($Wi,$ABEF,$CDGH,$TMP,$BSWAP,$ABEF_SAVE,$CDGH_SAVE)=map("%xmm$_",(0..3,7..9));
+my @MSG=map("%xmm$_",(10..13));
+
+my $aesenc=sub {
+  use integer;
+  my ($n,$k)=($r/10,$r%10);
+    if ($k==0) {
+      $code.=<<___;
+	movups		`16*$n`($in0),$in		# load input
+	xorps		$rndkey0,$in
+___
+      $code.=<<___ if ($n);
+	movups		$iv,`16*($n-1)`($out,$in0)	# write output
+___
+      $code.=<<___;
+	xorps		$in,$iv
+	movups		`32+16*$k-112`($key),$rndkey[1]
+	aesenc		$rndkey[0],$iv
+___
+    } elsif ($k==9) {
+      $sn++;
+      $code.=<<___;
+	cmp		\$11,$rounds
+	jb		.Laesenclast$sn
+	movups		`32+16*($k+0)-112`($key),$rndkey[1]
+	aesenc		$rndkey[0],$iv
+	movups		`32+16*($k+1)-112`($key),$rndkey[0]
+	aesenc		$rndkey[1],$iv
+	je		.Laesenclast$sn
+	movups		`32+16*($k+2)-112`($key),$rndkey[1]
+	aesenc		$rndkey[0],$iv
+	movups		`32+16*($k+3)-112`($key),$rndkey[0]
+	aesenc		$rndkey[1],$iv
+.Laesenclast$sn:
+	aesenclast	$rndkey[0],$iv
+	movups		16-112($key),$rndkey[1]		# forward reference
+	nop
+___
+    } else {
+      $code.=<<___;
+	movups		`32+16*$k-112`($key),$rndkey[1]
+	aesenc		$rndkey[0],$iv
+___
+    }
+    $r++;	unshift(@rndkey,pop(@rndkey));
+};
+
+if ($shaext) {
+my $Tbl="%rax";
+
+$code.=<<___;
+.type	${func}_shaext,\@function,6
+.align	32
+${func}_shaext:
+.cfi_startproc
+	mov	`($win64?56:8)`(%rsp),$inp	# load 7th argument
+___
+$code.=<<___ if ($win64);
+	lea	`-8-10*16`(%rsp),%rsp
+	movaps	%xmm6,-8-10*16(%rax)
+	movaps	%xmm7,-8-9*16(%rax)
+	movaps	%xmm8,-8-8*16(%rax)
+	movaps	%xmm9,-8-7*16(%rax)
+	movaps	%xmm10,-8-6*16(%rax)
+	movaps	%xmm11,-8-5*16(%rax)
+	movaps	%xmm12,-8-4*16(%rax)
+	movaps	%xmm13,-8-3*16(%rax)
+	movaps	%xmm14,-8-2*16(%rax)
+	movaps	%xmm15,-8-1*16(%rax)
+.Lprologue_shaext:
+___
+$code.=<<___;
+	lea		K256+0x80(%rip),$Tbl
+	movdqu		($ctx),$ABEF		# DCBA
+	movdqu		16($ctx),$CDGH		# HGFE
+	movdqa		0x200-0x80($Tbl),$TMP	# byte swap mask
+
+	mov		240($key),$rounds
+	sub		$in0,$out
+	movups		($key),$rndkey0		# $key[0]
+	movups		($ivp),$iv		# load IV
+	movups		16($key),$rndkey[0]	# forward reference
+	lea		112($key),$key		# size optimization
+
+	pshufd		\$0x1b,$ABEF,$Wi	# ABCD
+	pshufd		\$0xb1,$ABEF,$ABEF	# CDAB
+	pshufd		\$0x1b,$CDGH,$CDGH	# EFGH
+	movdqa		$TMP,$BSWAP		# offload
+	palignr		\$8,$CDGH,$ABEF		# ABEF
+	punpcklqdq	$Wi,$CDGH		# CDGH
+
+	jmp	.Loop_shaext
+
+.align	16
+.Loop_shaext:
+	movdqu		($inp),@MSG[0]
+	movdqu		0x10($inp),@MSG[1]
+	movdqu		0x20($inp),@MSG[2]
+	pshufb		$TMP,@MSG[0]
+	movdqu		0x30($inp),@MSG[3]
+
+	movdqa		0*32-0x80($Tbl),$Wi
+	paddd		@MSG[0],$Wi
+	pshufb		$TMP,@MSG[1]
+	movdqa		$CDGH,$CDGH_SAVE	# offload
+	movdqa		$ABEF,$ABEF_SAVE	# offload
+___
+	&$aesenc();
+$code.=<<___;
+	sha256rnds2	$ABEF,$CDGH		# 0-3
+	pshufd		\$0x0e,$Wi,$Wi
+___
+	&$aesenc();
+$code.=<<___;
+	sha256rnds2	$CDGH,$ABEF
+
+	movdqa		1*32-0x80($Tbl),$Wi
+	paddd		@MSG[1],$Wi
+	pshufb		$TMP,@MSG[2]
+	lea		0x40($inp),$inp
+___
+	&$aesenc();
+$code.=<<___;
+	sha256rnds2	$ABEF,$CDGH		# 4-7
+	pshufd		\$0x0e,$Wi,$Wi
+___
+	&$aesenc();
+$code.=<<___;
+	sha256rnds2	$CDGH,$ABEF
+
+	movdqa		2*32-0x80($Tbl),$Wi
+	paddd		@MSG[2],$Wi
+	pshufb		$TMP,@MSG[3]
+	sha256msg1	@MSG[1],@MSG[0]
+___
+	&$aesenc();
+$code.=<<___;
+	sha256rnds2	$ABEF,$CDGH		# 8-11
+	pshufd		\$0x0e,$Wi,$Wi
+	movdqa		@MSG[3],$TMP
+	palignr		\$4,@MSG[2],$TMP
+	paddd		$TMP,@MSG[0]
+___
+	&$aesenc();
+$code.=<<___;
+	sha256rnds2	$CDGH,$ABEF
+
+	movdqa		3*32-0x80($Tbl),$Wi
+	paddd		@MSG[3],$Wi
+	sha256msg2	@MSG[3],@MSG[0]
+	sha256msg1	@MSG[2],@MSG[1]
+___
+	&$aesenc();
+$code.=<<___;
+	sha256rnds2	$ABEF,$CDGH		# 12-15
+	pshufd		\$0x0e,$Wi,$Wi
+___
+	&$aesenc();
+$code.=<<___;
+	movdqa		@MSG[0],$TMP
+	palignr		\$4,@MSG[3],$TMP
+	paddd		$TMP,@MSG[1]
+	sha256rnds2	$CDGH,$ABEF
+___
+for($i=4;$i<16-3;$i++) {
+	&$aesenc()	if (($r%10)==0);
+$code.=<<___;
+	movdqa		$i*32-0x80($Tbl),$Wi
+	paddd		@MSG[0],$Wi
+	sha256msg2	@MSG[0],@MSG[1]
+	sha256msg1	@MSG[3],@MSG[2]
+___
+	&$aesenc();
+$code.=<<___;
+	sha256rnds2	$ABEF,$CDGH		# 16-19...
+	pshufd		\$0x0e,$Wi,$Wi
+	movdqa		@MSG[1],$TMP
+	palignr		\$4,@MSG[0],$TMP
+	paddd		$TMP,@MSG[2]
+___
+	&$aesenc();
+	&$aesenc()	if ($r==19);
+$code.=<<___;
+	sha256rnds2	$CDGH,$ABEF
+___
+	push(@MSG,shift(@MSG));
+}
+$code.=<<___;
+	movdqa		13*32-0x80($Tbl),$Wi
+	paddd		@MSG[0],$Wi
+	sha256msg2	@MSG[0],@MSG[1]
+	sha256msg1	@MSG[3],@MSG[2]
+___
+	&$aesenc();
+$code.=<<___;
+	sha256rnds2	$ABEF,$CDGH		# 52-55
+	pshufd		\$0x0e,$Wi,$Wi
+	movdqa		@MSG[1],$TMP
+	palignr		\$4,@MSG[0],$TMP
+	paddd		$TMP,@MSG[2]
+___
+	&$aesenc();
+	&$aesenc();
+$code.=<<___;
+	sha256rnds2	$CDGH,$ABEF
+
+	movdqa		14*32-0x80($Tbl),$Wi
+	paddd		@MSG[1],$Wi
+	sha256msg2	@MSG[1],@MSG[2]
+	movdqa		$BSWAP,$TMP
+___
+	&$aesenc();
+$code.=<<___;
+	sha256rnds2	$ABEF,$CDGH		# 56-59
+	pshufd		\$0x0e,$Wi,$Wi
+___
+	&$aesenc();
+$code.=<<___;
+	sha256rnds2	$CDGH,$ABEF
+
+	movdqa		15*32-0x80($Tbl),$Wi
+	paddd		@MSG[2],$Wi
+___
+	&$aesenc();
+	&$aesenc();
+$code.=<<___;
+	sha256rnds2	$ABEF,$CDGH		# 60-63
+	pshufd		\$0x0e,$Wi,$Wi
+___
+	&$aesenc();
+$code.=<<___;
+	sha256rnds2	$CDGH,$ABEF
+	#pxor		$CDGH,$rndkey0		# black magic
+___
+	while ($r<40)	{ &$aesenc(); }		# remaining aesenc's
+$code.=<<___;
+	#xorps		$CDGH,$rndkey0		# black magic
+	paddd		$CDGH_SAVE,$CDGH
+	paddd		$ABEF_SAVE,$ABEF
+
+	dec		$len
+	movups		$iv,48($out,$in0)	# write output
+	lea		64($in0),$in0
+	jnz		.Loop_shaext
+
+	pshufd		\$0xb1,$CDGH,$CDGH	# DCHG
+	pshufd		\$0x1b,$ABEF,$TMP	# FEBA
+	pshufd		\$0xb1,$ABEF,$ABEF	# BAFE
+	punpckhqdq	$CDGH,$ABEF		# DCBA
+	palignr		\$8,$TMP,$CDGH		# HGFE
+
+	movups		$iv,($ivp)		# write IV
+	movdqu		$ABEF,($ctx)
+	movdqu		$CDGH,16($ctx)
+___
+$code.=<<___ if ($win64);
+	movaps	0*16(%rsp),%xmm6
+	movaps	1*16(%rsp),%xmm7
+	movaps	2*16(%rsp),%xmm8
+	movaps	3*16(%rsp),%xmm9
+	movaps	4*16(%rsp),%xmm10
+	movaps	5*16(%rsp),%xmm11
+	movaps	6*16(%rsp),%xmm12
+	movaps	7*16(%rsp),%xmm13
+	movaps	8*16(%rsp),%xmm14
+	movaps	9*16(%rsp),%xmm15
+	lea	8+10*16(%rsp),%rsp
+.Lepilogue_shaext:
+___
+$code.=<<___;
+	ret
+.cfi_endproc
+.size	${func}_shaext,.-${func}_shaext
+___
+}
+}}}}}
+
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+#		CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64 && $avx) {
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern	__imp_RtlVirtualUnwind
+.type	se_handler,\@abi-omnipotent
+.align	16
+se_handler:
+	push	%rsi
+	push	%rdi
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	pushfq
+	sub	\$64,%rsp
+
+	mov	120($context),%rax	# pull context->Rax
+	mov	248($context),%rbx	# pull context->Rip
+
+	mov	8($disp),%rsi		# disp->ImageBase
+	mov	56($disp),%r11		# disp->HanderlData
+
+	mov	0(%r11),%r10d		# HandlerData[0]
+	lea	(%rsi,%r10),%r10	# prologue label
+	cmp	%r10,%rbx		# context->Rip<prologue label
+	jb	.Lin_prologue
+
+	mov	152($context),%rax	# pull context->Rsp
+
+	mov	4(%r11),%r10d		# HandlerData[1]
+	lea	(%rsi,%r10),%r10	# epilogue label
+	cmp	%r10,%rbx		# context->Rip>=epilogue label
+	jae	.Lin_prologue
+___
+$code.=<<___ if ($shaext);
+	lea	aesni_cbc_sha256_enc_shaext(%rip),%r10
+	cmp	%r10,%rbx
+	jb	.Lnot_in_shaext
+
+	lea	(%rax),%rsi
+	lea	512($context),%rdi	# &context.Xmm6
+	mov	\$20,%ecx
+	.long	0xa548f3fc		# cld; rep movsq
+	lea	168(%rax),%rax		# adjust stack pointer
+	jmp	.Lin_prologue
+.Lnot_in_shaext:
+___
+$code.=<<___ if ($avx>1);
+	lea	.Lavx2_shortcut(%rip),%r10
+	cmp	%r10,%rbx		# context->Rip<avx2_shortcut
+	jb	.Lnot_in_avx2
+
+	and	\$-256*$SZ,%rax
+	add	\$`2*$SZ*($rounds-8)`,%rax
+.Lnot_in_avx2:
+___
+$code.=<<___;
+	mov	%rax,%rsi		# put aside Rsp
+	mov	16*$SZ+7*8(%rax),%rax	# pull $_rsp
+
+	mov	-8(%rax),%rbx
+	mov	-16(%rax),%rbp
+	mov	-24(%rax),%r12
+	mov	-32(%rax),%r13
+	mov	-40(%rax),%r14
+	mov	-48(%rax),%r15
+	mov	%rbx,144($context)	# restore context->Rbx
+	mov	%rbp,160($context)	# restore context->Rbp
+	mov	%r12,216($context)	# restore context->R12
+	mov	%r13,224($context)	# restore context->R13
+	mov	%r14,232($context)	# restore context->R14
+	mov	%r15,240($context)	# restore context->R15
+
+	lea	16*$SZ+8*8(%rsi),%rsi	# Xmm6- save area
+	lea	512($context),%rdi	# &context.Xmm6
+	mov	\$20,%ecx
+	.long	0xa548f3fc		# cld; rep movsq
+
+.Lin_prologue:
+	mov	8(%rax),%rdi
+	mov	16(%rax),%rsi
+	mov	%rax,152($context)	# restore context->Rsp
+	mov	%rsi,168($context)	# restore context->Rsi
+	mov	%rdi,176($context)	# restore context->Rdi
+
+	mov	40($disp),%rdi		# disp->ContextRecord
+	mov	$context,%rsi		# context
+	mov	\$154,%ecx		# sizeof(CONTEXT)
+	.long	0xa548f3fc		# cld; rep movsq
+
+	mov	$disp,%rsi
+	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
+	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
+	mov	0(%rsi),%r8		# arg3, disp->ControlPc
+	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
+	mov	40(%rsi),%r10		# disp->ContextRecord
+	lea	56(%rsi),%r11		# &disp->HandlerData
+	lea	24(%rsi),%r12		# &disp->EstablisherFrame
+	mov	%r10,32(%rsp)		# arg5
+	mov	%r11,40(%rsp)		# arg6
+	mov	%r12,48(%rsp)		# arg7
+	mov	%rcx,56(%rsp)		# arg8, (NULL)
+	call	*__imp_RtlVirtualUnwind(%rip)
+
+	mov	\$1,%eax		# ExceptionContinueSearch
+	add	\$64,%rsp
+	popfq
+	pop	%r15
+	pop	%r14
+	pop	%r13
+	pop	%r12
+	pop	%rbp
+	pop	%rbx
+	pop	%rdi
+	pop	%rsi
+	ret
+.size	se_handler,.-se_handler
+
+.section	.pdata
+	.rva	.LSEH_begin_${func}_xop
+	.rva	.LSEH_end_${func}_xop
+	.rva	.LSEH_info_${func}_xop
+
+	.rva	.LSEH_begin_${func}_avx
+	.rva	.LSEH_end_${func}_avx
+	.rva	.LSEH_info_${func}_avx
+___
+$code.=<<___ if ($avx>1);
+	.rva	.LSEH_begin_${func}_avx2
+	.rva	.LSEH_end_${func}_avx2
+	.rva	.LSEH_info_${func}_avx2
+___
+$code.=<<___ if ($shaext);
+	.rva	.LSEH_begin_${func}_shaext
+	.rva	.LSEH_end_${func}_shaext
+	.rva	.LSEH_info_${func}_shaext
+___
+$code.=<<___;
+.section	.xdata
+.align	8
+.LSEH_info_${func}_xop:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Lprologue_xop,.Lepilogue_xop		# HandlerData[]
+
+.LSEH_info_${func}_avx:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Lprologue_avx,.Lepilogue_avx		# HandlerData[]
+___
+$code.=<<___ if ($avx>1);
+.LSEH_info_${func}_avx2:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Lprologue_avx2,.Lepilogue_avx2		# HandlerData[]
+___
+$code.=<<___ if ($shaext);
+.LSEH_info_${func}_shaext:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Lprologue_shaext,.Lepilogue_shaext	# HandlerData[]
+___
+}
+
+####################################################################
+sub rex {
+  local *opcode=shift;
+  my ($dst,$src)=@_;
+  my $rex=0;
+
+    $rex|=0x04			if($dst>=8);
+    $rex|=0x01			if($src>=8);
+    unshift @opcode,$rex|0x40	if($rex);
+}
+
+{
+  my %opcodelet = (
+		"sha256rnds2" => 0xcb,
+  		"sha256msg1"  => 0xcc,
+		"sha256msg2"  => 0xcd	);
+
+  sub sha256op38 {
+    my $instr = shift;
+
+    if (defined($opcodelet{$instr}) && @_[0] =~ /%xmm([0-9]+),\s*%xmm([0-9]+)/) {
+      my @opcode=(0x0f,0x38);
+	rex(\@opcode,$2,$1);
+	push @opcode,$opcodelet{$instr};
+	push @opcode,0xc0|($1&7)|(($2&7)<<3);		# ModR/M
+	return ".byte\t".join(',',@opcode);
+    } else {
+	return $instr."\t".@_[0];
+    }
+  }
+}
+
+$code =~ s/\`([^\`]*)\`/eval $1/gem;
+$code =~ s/\b(sha256[^\s]*)\s+(.*)/sha256op38($1,$2)/gem;
+print $code;
+close STDOUT or die "error closing STDOUT: $!";
diff --git a/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aesni-x86.pl b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aesni-x86.pl
new file mode 100644
index 0000000..fe2b265
--- /dev/null
+++ b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aesni-x86.pl
@@ -0,0 +1,3415 @@
+#! /usr/bin/env perl
+# Copyright 2009-2020 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License").  You may not use
+# this file except in compliance with the License.  You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# This module implements support for Intel AES-NI extension. In
+# OpenSSL context it's used with Intel engine, but can also be used as
+# drop-in replacement for crypto/aes/asm/aes-586.pl [see below for
+# details].
+#
+# Performance.
+#
+# To start with see corresponding paragraph in aesni-x86_64.pl...
+# Instead of filling table similar to one found there I've chosen to
+# summarize *comparison* results for raw ECB, CTR and CBC benchmarks.
+# The simplified table below represents 32-bit performance relative
+# to 64-bit one in every given point. Ratios vary for different
+# encryption modes, therefore interval values.
+#
+#	16-byte     64-byte     256-byte    1-KB        8-KB
+#	53-67%      67-84%      91-94%      95-98%      97-99.5%
+#
+# Lower ratios for smaller block sizes are perfectly understandable,
+# because function call overhead is higher in 32-bit mode. Largest
+# 8-KB block performance is virtually same: 32-bit code is less than
+# 1% slower for ECB, CBC and CCM, and ~3% slower otherwise.
+
+# January 2011
+#
+# See aesni-x86_64.pl for details. Unlike x86_64 version this module
+# interleaves at most 6 aes[enc|dec] instructions, because there are
+# not enough registers for 8x interleave [which should be optimal for
+# Sandy Bridge]. Actually, performance results for 6x interleave
+# factor presented in aesni-x86_64.pl (except for CTR) are for this
+# module.
+
+# April 2011
+#
+# Add aesni_xts_[en|de]crypt. Westmere spends 1.50 cycles processing
+# one byte out of 8KB with 128-bit key, Sandy Bridge - 1.09.
+
+# November 2015
+#
+# Add aesni_ocb_[en|de]crypt.
+
+######################################################################
+# Current large-block performance in cycles per byte processed with
+# 128-bit key (less is better).
+#
+#		CBC en-/decrypt	CTR	XTS	ECB	OCB
+# Westmere	3.77/1.37	1.37	1.52	1.27
+# * Bridge	5.07/0.98	0.99	1.09	0.91	1.10
+# Haswell	4.44/0.80	0.97	1.03	0.72	0.76
+# Skylake	2.68/0.65	0.65	0.66	0.64	0.66
+# Silvermont	5.77/3.56	3.67	4.03	3.46	4.03
+# Goldmont	3.84/1.39	1.39	1.63	1.31	1.70
+# Bulldozer	5.80/0.98	1.05	1.24	0.93	1.23
+
+$PREFIX="aesni";	# if $PREFIX is set to "AES", the script
+			# generates drop-in replacement for
+			# crypto/aes/asm/aes-586.pl:-)
+$inline=1;		# inline _aesni_[en|de]crypt
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../perlasm");
+require "x86asm.pl";
+
+$output = pop;
+open OUT,">$output";
+*STDOUT=*OUT;
+
+&asm_init($ARGV[0]);
+
+&external_label("OPENSSL_ia32cap_P");
+&static_label("key_const");
+
+if ($PREFIX eq "aesni")	{ $movekey=\&movups; }
+else			{ $movekey=\&movups; }
+
+$len="eax";
+$rounds="ecx";
+$key="edx";
+$inp="esi";
+$out="edi";
+$rounds_="ebx";	# backup copy for $rounds
+$key_="ebp";	# backup copy for $key
+
+$rndkey0="xmm0";
+$rndkey1="xmm1";
+$inout0="xmm2";
+$inout1="xmm3";
+$inout2="xmm4";
+$inout3="xmm5";	$in1="xmm5";
+$inout4="xmm6";	$in0="xmm6";
+$inout5="xmm7";	$ivec="xmm7";
+
+# AESNI extension
+sub aeskeygenassist
+{ my($dst,$src,$imm)=@_;
+    if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/)
+    {	&data_byte(0x66,0x0f,0x3a,0xdf,0xc0|($1<<3)|$2,$imm);	}
+}
+sub aescommon
+{ my($opcodelet,$dst,$src)=@_;
+    if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/)
+    {	&data_byte(0x66,0x0f,0x38,$opcodelet,0xc0|($1<<3)|$2);}
+}
+sub aesimc	{ aescommon(0xdb,@_); }
+sub aesenc	{ aescommon(0xdc,@_); }
+sub aesenclast	{ aescommon(0xdd,@_); }
+sub aesdec	{ aescommon(0xde,@_); }
+sub aesdeclast	{ aescommon(0xdf,@_); }
+
+# Inline version of internal aesni_[en|de]crypt1
+{ my $sn;
+sub aesni_inline_generate1
+{ my ($p,$inout,$ivec)=@_; $inout=$inout0 if (!defined($inout));
+  $sn++;
+
+    &$movekey		($rndkey0,&QWP(0,$key));
+    &$movekey		($rndkey1,&QWP(16,$key));
+    &xorps		($ivec,$rndkey0)	if (defined($ivec));
+    &lea		($key,&DWP(32,$key));
+    &xorps		($inout,$ivec)		if (defined($ivec));
+    &xorps		($inout,$rndkey0)	if (!defined($ivec));
+    &set_label("${p}1_loop_$sn");
+	eval"&aes${p}	($inout,$rndkey1)";
+	&dec		($rounds);
+	&$movekey	($rndkey1,&QWP(0,$key));
+	&lea		($key,&DWP(16,$key));
+    &jnz		(&label("${p}1_loop_$sn"));
+    eval"&aes${p}last	($inout,$rndkey1)";
+}}
+
+sub aesni_generate1	# fully unrolled loop
+{ my ($p,$inout)=@_; $inout=$inout0 if (!defined($inout));
+
+    &function_begin_B("_aesni_${p}rypt1");
+	&movups		($rndkey0,&QWP(0,$key));
+	&$movekey	($rndkey1,&QWP(0x10,$key));
+	&xorps		($inout,$rndkey0);
+	&$movekey	($rndkey0,&QWP(0x20,$key));
+	&lea		($key,&DWP(0x30,$key));
+	&cmp		($rounds,11);
+	&jb		(&label("${p}128"));
+	&lea		($key,&DWP(0x20,$key));
+	&je		(&label("${p}192"));
+	&lea		($key,&DWP(0x20,$key));
+	eval"&aes${p}	($inout,$rndkey1)";
+	&$movekey	($rndkey1,&QWP(-0x40,$key));
+	eval"&aes${p}	($inout,$rndkey0)";
+	&$movekey	($rndkey0,&QWP(-0x30,$key));
+    &set_label("${p}192");
+	eval"&aes${p}	($inout,$rndkey1)";
+	&$movekey	($rndkey1,&QWP(-0x20,$key));
+	eval"&aes${p}	($inout,$rndkey0)";
+	&$movekey	($rndkey0,&QWP(-0x10,$key));
+    &set_label("${p}128");
+	eval"&aes${p}	($inout,$rndkey1)";
+	&$movekey	($rndkey1,&QWP(0,$key));
+	eval"&aes${p}	($inout,$rndkey0)";
+	&$movekey	($rndkey0,&QWP(0x10,$key));
+	eval"&aes${p}	($inout,$rndkey1)";
+	&$movekey	($rndkey1,&QWP(0x20,$key));
+	eval"&aes${p}	($inout,$rndkey0)";
+	&$movekey	($rndkey0,&QWP(0x30,$key));
+	eval"&aes${p}	($inout,$rndkey1)";
+	&$movekey	($rndkey1,&QWP(0x40,$key));
+	eval"&aes${p}	($inout,$rndkey0)";
+	&$movekey	($rndkey0,&QWP(0x50,$key));
+	eval"&aes${p}	($inout,$rndkey1)";
+	&$movekey	($rndkey1,&QWP(0x60,$key));
+	eval"&aes${p}	($inout,$rndkey0)";
+	&$movekey	($rndkey0,&QWP(0x70,$key));
+	eval"&aes${p}	($inout,$rndkey1)";
+    eval"&aes${p}last	($inout,$rndkey0)";
+    &ret();
+    &function_end_B("_aesni_${p}rypt1");
+}
+
+# void $PREFIX_encrypt (const void *inp,void *out,const AES_KEY *key);
+&aesni_generate1("enc") if (!$inline);
+&function_begin_B("${PREFIX}_encrypt");
+	&mov	("eax",&wparam(0));
+	&mov	($key,&wparam(2));
+	&movups	($inout0,&QWP(0,"eax"));
+	&mov	($rounds,&DWP(240,$key));
+	&mov	("eax",&wparam(1));
+	if ($inline)
+	{   &aesni_inline_generate1("enc");	}
+	else
+	{   &call	("_aesni_encrypt1");	}
+	&pxor	($rndkey0,$rndkey0);		# clear register bank
+	&pxor	($rndkey1,$rndkey1);
+	&movups	(&QWP(0,"eax"),$inout0);
+	&pxor	($inout0,$inout0);
+	&ret	();
+&function_end_B("${PREFIX}_encrypt");
+
+# void $PREFIX_decrypt (const void *inp,void *out,const AES_KEY *key);
+&aesni_generate1("dec") if(!$inline);
+&function_begin_B("${PREFIX}_decrypt");
+	&mov	("eax",&wparam(0));
+	&mov	($key,&wparam(2));
+	&movups	($inout0,&QWP(0,"eax"));
+	&mov	($rounds,&DWP(240,$key));
+	&mov	("eax",&wparam(1));
+	if ($inline)
+	{   &aesni_inline_generate1("dec");	}
+	else
+	{   &call	("_aesni_decrypt1");	}
+	&pxor	($rndkey0,$rndkey0);		# clear register bank
+	&pxor	($rndkey1,$rndkey1);
+	&movups	(&QWP(0,"eax"),$inout0);
+	&pxor	($inout0,$inout0);
+	&ret	();
+&function_end_B("${PREFIX}_decrypt");
+
+# _aesni_[en|de]cryptN are private interfaces, N denotes interleave
+# factor. Why 3x subroutine were originally used in loops? Even though
+# aes[enc|dec] latency was originally 6, it could be scheduled only
+# every *2nd* cycle. Thus 3x interleave was the one providing optimal
+# utilization, i.e. when subroutine's throughput is virtually same as
+# of non-interleaved subroutine [for number of input blocks up to 3].
+# This is why it originally made no sense to implement 2x subroutine.
+# But times change and it became appropriate to spend extra 192 bytes
+# on 2x subroutine on Atom Silvermont account. For processors that
+# can schedule aes[enc|dec] every cycle optimal interleave factor
+# equals to corresponding instructions latency. 8x is optimal for
+# * Bridge, but it's unfeasible to accommodate such implementation
+# in XMM registers addressable in 32-bit mode and therefore maximum
+# of 6x is used instead...
+
+sub aesni_generate2
+{ my $p=shift;
+
+    &function_begin_B("_aesni_${p}rypt2");
+	&$movekey	($rndkey0,&QWP(0,$key));
+	&shl		($rounds,4);
+	&$movekey	($rndkey1,&QWP(16,$key));
+	&xorps		($inout0,$rndkey0);
+	&pxor		($inout1,$rndkey0);
+	&$movekey	($rndkey0,&QWP(32,$key));
+	&lea		($key,&DWP(32,$key,$rounds));
+	&neg		($rounds);
+	&add		($rounds,16);
+
+    &set_label("${p}2_loop");
+	eval"&aes${p}	($inout0,$rndkey1)";
+	eval"&aes${p}	($inout1,$rndkey1)";
+	&$movekey	($rndkey1,&QWP(0,$key,$rounds));
+	&add		($rounds,32);
+	eval"&aes${p}	($inout0,$rndkey0)";
+	eval"&aes${p}	($inout1,$rndkey0)";
+	&$movekey	($rndkey0,&QWP(-16,$key,$rounds));
+	&jnz		(&label("${p}2_loop"));
+    eval"&aes${p}	($inout0,$rndkey1)";
+    eval"&aes${p}	($inout1,$rndkey1)";
+    eval"&aes${p}last	($inout0,$rndkey0)";
+    eval"&aes${p}last	($inout1,$rndkey0)";
+    &ret();
+    &function_end_B("_aesni_${p}rypt2");
+}
+
+sub aesni_generate3
+{ my $p=shift;
+
+    &function_begin_B("_aesni_${p}rypt3");
+	&$movekey	($rndkey0,&QWP(0,$key));
+	&shl		($rounds,4);
+	&$movekey	($rndkey1,&QWP(16,$key));
+	&xorps		($inout0,$rndkey0);
+	&pxor		($inout1,$rndkey0);
+	&pxor		($inout2,$rndkey0);
+	&$movekey	($rndkey0,&QWP(32,$key));
+	&lea		($key,&DWP(32,$key,$rounds));
+	&neg		($rounds);
+	&add		($rounds,16);
+
+    &set_label("${p}3_loop");
+	eval"&aes${p}	($inout0,$rndkey1)";
+	eval"&aes${p}	($inout1,$rndkey1)";
+	eval"&aes${p}	($inout2,$rndkey1)";
+	&$movekey	($rndkey1,&QWP(0,$key,$rounds));
+	&add		($rounds,32);
+	eval"&aes${p}	($inout0,$rndkey0)";
+	eval"&aes${p}	($inout1,$rndkey0)";
+	eval"&aes${p}	($inout2,$rndkey0)";
+	&$movekey	($rndkey0,&QWP(-16,$key,$rounds));
+	&jnz		(&label("${p}3_loop"));
+    eval"&aes${p}	($inout0,$rndkey1)";
+    eval"&aes${p}	($inout1,$rndkey1)";
+    eval"&aes${p}	($inout2,$rndkey1)";
+    eval"&aes${p}last	($inout0,$rndkey0)";
+    eval"&aes${p}last	($inout1,$rndkey0)";
+    eval"&aes${p}last	($inout2,$rndkey0)";
+    &ret();
+    &function_end_B("_aesni_${p}rypt3");
+}
+
+# 4x interleave is implemented to improve small block performance,
+# most notably [and naturally] 4 block by ~30%. One can argue that one
+# should have implemented 5x as well, but improvement  would be <20%,
+# so it's not worth it...
+sub aesni_generate4
+{ my $p=shift;
+
+    &function_begin_B("_aesni_${p}rypt4");
+	&$movekey	($rndkey0,&QWP(0,$key));
+	&$movekey	($rndkey1,&QWP(16,$key));
+	&shl		($rounds,4);
+	&xorps		($inout0,$rndkey0);
+	&pxor		($inout1,$rndkey0);
+	&pxor		($inout2,$rndkey0);
+	&pxor		($inout3,$rndkey0);
+	&$movekey	($rndkey0,&QWP(32,$key));
+	&lea		($key,&DWP(32,$key,$rounds));
+	&neg		($rounds);
+	&data_byte	(0x0f,0x1f,0x40,0x00);
+	&add		($rounds,16);
+
+    &set_label("${p}4_loop");
+	eval"&aes${p}	($inout0,$rndkey1)";
+	eval"&aes${p}	($inout1,$rndkey1)";
+	eval"&aes${p}	($inout2,$rndkey1)";
+	eval"&aes${p}	($inout3,$rndkey1)";
+	&$movekey	($rndkey1,&QWP(0,$key,$rounds));
+	&add		($rounds,32);
+	eval"&aes${p}	($inout0,$rndkey0)";
+	eval"&aes${p}	($inout1,$rndkey0)";
+	eval"&aes${p}	($inout2,$rndkey0)";
+	eval"&aes${p}	($inout3,$rndkey0)";
+	&$movekey	($rndkey0,&QWP(-16,$key,$rounds));
+    &jnz		(&label("${p}4_loop"));
+
+    eval"&aes${p}	($inout0,$rndkey1)";
+    eval"&aes${p}	($inout1,$rndkey1)";
+    eval"&aes${p}	($inout2,$rndkey1)";
+    eval"&aes${p}	($inout3,$rndkey1)";
+    eval"&aes${p}last	($inout0,$rndkey0)";
+    eval"&aes${p}last	($inout1,$rndkey0)";
+    eval"&aes${p}last	($inout2,$rndkey0)";
+    eval"&aes${p}last	($inout3,$rndkey0)";
+    &ret();
+    &function_end_B("_aesni_${p}rypt4");
+}
+
+sub aesni_generate6
+{ my $p=shift;
+
+    &function_begin_B("_aesni_${p}rypt6");
+    &static_label("_aesni_${p}rypt6_enter");
+	&$movekey	($rndkey0,&QWP(0,$key));
+	&shl		($rounds,4);
+	&$movekey	($rndkey1,&QWP(16,$key));
+	&xorps		($inout0,$rndkey0);
+	&pxor		($inout1,$rndkey0);	# pxor does better here
+	&pxor		($inout2,$rndkey0);
+	eval"&aes${p}	($inout0,$rndkey1)";
+	&pxor		($inout3,$rndkey0);
+	&pxor		($inout4,$rndkey0);
+	eval"&aes${p}	($inout1,$rndkey1)";
+	&lea		($key,&DWP(32,$key,$rounds));
+	&neg		($rounds);
+	eval"&aes${p}	($inout2,$rndkey1)";
+	&pxor		($inout5,$rndkey0);
+	&$movekey	($rndkey0,&QWP(0,$key,$rounds));
+	&add		($rounds,16);
+	&jmp		(&label("_aesni_${p}rypt6_inner"));
+
+    &set_label("${p}6_loop",16);
+	eval"&aes${p}	($inout0,$rndkey1)";
+	eval"&aes${p}	($inout1,$rndkey1)";
+	eval"&aes${p}	($inout2,$rndkey1)";
+    &set_label("_aesni_${p}rypt6_inner");
+	eval"&aes${p}	($inout3,$rndkey1)";
+	eval"&aes${p}	($inout4,$rndkey1)";
+	eval"&aes${p}	($inout5,$rndkey1)";
+    &set_label("_aesni_${p}rypt6_enter");
+	&$movekey	($rndkey1,&QWP(0,$key,$rounds));
+	&add		($rounds,32);
+	eval"&aes${p}	($inout0,$rndkey0)";
+	eval"&aes${p}	($inout1,$rndkey0)";
+	eval"&aes${p}	($inout2,$rndkey0)";
+	eval"&aes${p}	($inout3,$rndkey0)";
+	eval"&aes${p}	($inout4,$rndkey0)";
+	eval"&aes${p}	($inout5,$rndkey0)";
+	&$movekey	($rndkey0,&QWP(-16,$key,$rounds));
+    &jnz		(&label("${p}6_loop"));
+
+    eval"&aes${p}	($inout0,$rndkey1)";
+    eval"&aes${p}	($inout1,$rndkey1)";
+    eval"&aes${p}	($inout2,$rndkey1)";
+    eval"&aes${p}	($inout3,$rndkey1)";
+    eval"&aes${p}	($inout4,$rndkey1)";
+    eval"&aes${p}	($inout5,$rndkey1)";
+    eval"&aes${p}last	($inout0,$rndkey0)";
+    eval"&aes${p}last	($inout1,$rndkey0)";
+    eval"&aes${p}last	($inout2,$rndkey0)";
+    eval"&aes${p}last	($inout3,$rndkey0)";
+    eval"&aes${p}last	($inout4,$rndkey0)";
+    eval"&aes${p}last	($inout5,$rndkey0)";
+    &ret();
+    &function_end_B("_aesni_${p}rypt6");
+}
+&aesni_generate2("enc") if ($PREFIX eq "aesni");
+&aesni_generate2("dec");
+&aesni_generate3("enc") if ($PREFIX eq "aesni");
+&aesni_generate3("dec");
+&aesni_generate4("enc") if ($PREFIX eq "aesni");
+&aesni_generate4("dec");
+&aesni_generate6("enc") if ($PREFIX eq "aesni");
+&aesni_generate6("dec");
+
+if ($PREFIX eq "aesni") {
+######################################################################
+# void aesni_ecb_encrypt (const void *in, void *out,
+#                         size_t length, const AES_KEY *key,
+#                         int enc);
+&function_begin("aesni_ecb_encrypt");
+	&mov	($inp,&wparam(0));
+	&mov	($out,&wparam(1));
+	&mov	($len,&wparam(2));
+	&mov	($key,&wparam(3));
+	&mov	($rounds_,&wparam(4));
+	&and	($len,-16);
+	&jz	(&label("ecb_ret"));
+	&mov	($rounds,&DWP(240,$key));
+	&test	($rounds_,$rounds_);
+	&jz	(&label("ecb_decrypt"));
+
+	&mov	($key_,$key);		# backup $key
+	&mov	($rounds_,$rounds);	# backup $rounds
+	&cmp	($len,0x60);
+	&jb	(&label("ecb_enc_tail"));
+
+	&movdqu	($inout0,&QWP(0,$inp));
+	&movdqu	($inout1,&QWP(0x10,$inp));
+	&movdqu	($inout2,&QWP(0x20,$inp));
+	&movdqu	($inout3,&QWP(0x30,$inp));
+	&movdqu	($inout4,&QWP(0x40,$inp));
+	&movdqu	($inout5,&QWP(0x50,$inp));
+	&lea	($inp,&DWP(0x60,$inp));
+	&sub	($len,0x60);
+	&jmp	(&label("ecb_enc_loop6_enter"));
+
+&set_label("ecb_enc_loop6",16);
+	&movups	(&QWP(0,$out),$inout0);
+	&movdqu	($inout0,&QWP(0,$inp));
+	&movups	(&QWP(0x10,$out),$inout1);
+	&movdqu	($inout1,&QWP(0x10,$inp));
+	&movups	(&QWP(0x20,$out),$inout2);
+	&movdqu	($inout2,&QWP(0x20,$inp));
+	&movups	(&QWP(0x30,$out),$inout3);
+	&movdqu	($inout3,&QWP(0x30,$inp));
+	&movups	(&QWP(0x40,$out),$inout4);
+	&movdqu	($inout4,&QWP(0x40,$inp));
+	&movups	(&QWP(0x50,$out),$inout5);
+	&lea	($out,&DWP(0x60,$out));
+	&movdqu	($inout5,&QWP(0x50,$inp));
+	&lea	($inp,&DWP(0x60,$inp));
+&set_label("ecb_enc_loop6_enter");
+
+	&call	("_aesni_encrypt6");
+
+	&mov	($key,$key_);		# restore $key
+	&mov	($rounds,$rounds_);	# restore $rounds
+	&sub	($len,0x60);
+	&jnc	(&label("ecb_enc_loop6"));
+
+	&movups	(&QWP(0,$out),$inout0);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&movups	(&QWP(0x20,$out),$inout2);
+	&movups	(&QWP(0x30,$out),$inout3);
+	&movups	(&QWP(0x40,$out),$inout4);
+	&movups	(&QWP(0x50,$out),$inout5);
+	&lea	($out,&DWP(0x60,$out));
+	&add	($len,0x60);
+	&jz	(&label("ecb_ret"));
+
+&set_label("ecb_enc_tail");
+	&movups	($inout0,&QWP(0,$inp));
+	&cmp	($len,0x20);
+	&jb	(&label("ecb_enc_one"));
+	&movups	($inout1,&QWP(0x10,$inp));
+	&je	(&label("ecb_enc_two"));
+	&movups	($inout2,&QWP(0x20,$inp));
+	&cmp	($len,0x40);
+	&jb	(&label("ecb_enc_three"));
+	&movups	($inout3,&QWP(0x30,$inp));
+	&je	(&label("ecb_enc_four"));
+	&movups	($inout4,&QWP(0x40,$inp));
+	&xorps	($inout5,$inout5);
+	&call	("_aesni_encrypt6");
+	&movups	(&QWP(0,$out),$inout0);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&movups	(&QWP(0x20,$out),$inout2);
+	&movups	(&QWP(0x30,$out),$inout3);
+	&movups	(&QWP(0x40,$out),$inout4);
+	jmp	(&label("ecb_ret"));
+
+&set_label("ecb_enc_one",16);
+	if ($inline)
+	{   &aesni_inline_generate1("enc");	}
+	else
+	{   &call	("_aesni_encrypt1");	}
+	&movups	(&QWP(0,$out),$inout0);
+	&jmp	(&label("ecb_ret"));
+
+&set_label("ecb_enc_two",16);
+	&call	("_aesni_encrypt2");
+	&movups	(&QWP(0,$out),$inout0);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&jmp	(&label("ecb_ret"));
+
+&set_label("ecb_enc_three",16);
+	&call	("_aesni_encrypt3");
+	&movups	(&QWP(0,$out),$inout0);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&movups	(&QWP(0x20,$out),$inout2);
+	&jmp	(&label("ecb_ret"));
+
+&set_label("ecb_enc_four",16);
+	&call	("_aesni_encrypt4");
+	&movups	(&QWP(0,$out),$inout0);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&movups	(&QWP(0x20,$out),$inout2);
+	&movups	(&QWP(0x30,$out),$inout3);
+	&jmp	(&label("ecb_ret"));
+######################################################################
+&set_label("ecb_decrypt",16);
+	&mov	($key_,$key);		# backup $key
+	&mov	($rounds_,$rounds);	# backup $rounds
+	&cmp	($len,0x60);
+	&jb	(&label("ecb_dec_tail"));
+
+	&movdqu	($inout0,&QWP(0,$inp));
+	&movdqu	($inout1,&QWP(0x10,$inp));
+	&movdqu	($inout2,&QWP(0x20,$inp));
+	&movdqu	($inout3,&QWP(0x30,$inp));
+	&movdqu	($inout4,&QWP(0x40,$inp));
+	&movdqu	($inout5,&QWP(0x50,$inp));
+	&lea	($inp,&DWP(0x60,$inp));
+	&sub	($len,0x60);
+	&jmp	(&label("ecb_dec_loop6_enter"));
+
+&set_label("ecb_dec_loop6",16);
+	&movups	(&QWP(0,$out),$inout0);
+	&movdqu	($inout0,&QWP(0,$inp));
+	&movups	(&QWP(0x10,$out),$inout1);
+	&movdqu	($inout1,&QWP(0x10,$inp));
+	&movups	(&QWP(0x20,$out),$inout2);
+	&movdqu	($inout2,&QWP(0x20,$inp));
+	&movups	(&QWP(0x30,$out),$inout3);
+	&movdqu	($inout3,&QWP(0x30,$inp));
+	&movups	(&QWP(0x40,$out),$inout4);
+	&movdqu	($inout4,&QWP(0x40,$inp));
+	&movups	(&QWP(0x50,$out),$inout5);
+	&lea	($out,&DWP(0x60,$out));
+	&movdqu	($inout5,&QWP(0x50,$inp));
+	&lea	($inp,&DWP(0x60,$inp));
+&set_label("ecb_dec_loop6_enter");
+
+	&call	("_aesni_decrypt6");
+
+	&mov	($key,$key_);		# restore $key
+	&mov	($rounds,$rounds_);	# restore $rounds
+	&sub	($len,0x60);
+	&jnc	(&label("ecb_dec_loop6"));
+
+	&movups	(&QWP(0,$out),$inout0);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&movups	(&QWP(0x20,$out),$inout2);
+	&movups	(&QWP(0x30,$out),$inout3);
+	&movups	(&QWP(0x40,$out),$inout4);
+	&movups	(&QWP(0x50,$out),$inout5);
+	&lea	($out,&DWP(0x60,$out));
+	&add	($len,0x60);
+	&jz	(&label("ecb_ret"));
+
+&set_label("ecb_dec_tail");
+	&movups	($inout0,&QWP(0,$inp));
+	&cmp	($len,0x20);
+	&jb	(&label("ecb_dec_one"));
+	&movups	($inout1,&QWP(0x10,$inp));
+	&je	(&label("ecb_dec_two"));
+	&movups	($inout2,&QWP(0x20,$inp));
+	&cmp	($len,0x40);
+	&jb	(&label("ecb_dec_three"));
+	&movups	($inout3,&QWP(0x30,$inp));
+	&je	(&label("ecb_dec_four"));
+	&movups	($inout4,&QWP(0x40,$inp));
+	&xorps	($inout5,$inout5);
+	&call	("_aesni_decrypt6");
+	&movups	(&QWP(0,$out),$inout0);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&movups	(&QWP(0x20,$out),$inout2);
+	&movups	(&QWP(0x30,$out),$inout3);
+	&movups	(&QWP(0x40,$out),$inout4);
+	&jmp	(&label("ecb_ret"));
+
+&set_label("ecb_dec_one",16);
+	if ($inline)
+	{   &aesni_inline_generate1("dec");	}
+	else
+	{   &call	("_aesni_decrypt1");	}
+	&movups	(&QWP(0,$out),$inout0);
+	&jmp	(&label("ecb_ret"));
+
+&set_label("ecb_dec_two",16);
+	&call	("_aesni_decrypt2");
+	&movups	(&QWP(0,$out),$inout0);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&jmp	(&label("ecb_ret"));
+
+&set_label("ecb_dec_three",16);
+	&call	("_aesni_decrypt3");
+	&movups	(&QWP(0,$out),$inout0);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&movups	(&QWP(0x20,$out),$inout2);
+	&jmp	(&label("ecb_ret"));
+
+&set_label("ecb_dec_four",16);
+	&call	("_aesni_decrypt4");
+	&movups	(&QWP(0,$out),$inout0);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&movups	(&QWP(0x20,$out),$inout2);
+	&movups	(&QWP(0x30,$out),$inout3);
+
+&set_label("ecb_ret");
+	&pxor	("xmm0","xmm0");		# clear register bank
+	&pxor	("xmm1","xmm1");
+	&pxor	("xmm2","xmm2");
+	&pxor	("xmm3","xmm3");
+	&pxor	("xmm4","xmm4");
+	&pxor	("xmm5","xmm5");
+	&pxor	("xmm6","xmm6");
+	&pxor	("xmm7","xmm7");
+&function_end("aesni_ecb_encrypt");
+
+######################################################################
+# void aesni_ccm64_[en|de]crypt_blocks (const void *in, void *out,
+#                         size_t blocks, const AES_KEY *key,
+#                         const char *ivec,char *cmac);
+#
+# Handles only complete blocks, operates on 64-bit counter and
+# does not update *ivec! Nor does it finalize CMAC value
+# (see engine/eng_aesni.c for details)
+#
+{ my $cmac=$inout1;
+&function_begin("aesni_ccm64_encrypt_blocks");
+	&mov	($inp,&wparam(0));
+	&mov	($out,&wparam(1));
+	&mov	($len,&wparam(2));
+	&mov	($key,&wparam(3));
+	&mov	($rounds_,&wparam(4));
+	&mov	($rounds,&wparam(5));
+	&mov	($key_,"esp");
+	&sub	("esp",60);
+	&and	("esp",-16);			# align stack
+	&mov	(&DWP(48,"esp"),$key_);
+
+	&movdqu	($ivec,&QWP(0,$rounds_));	# load ivec
+	&movdqu	($cmac,&QWP(0,$rounds));	# load cmac
+	&mov	($rounds,&DWP(240,$key));
+
+	# compose byte-swap control mask for pshufb on stack
+	&mov	(&DWP(0,"esp"),0x0c0d0e0f);
+	&mov	(&DWP(4,"esp"),0x08090a0b);
+	&mov	(&DWP(8,"esp"),0x04050607);
+	&mov	(&DWP(12,"esp"),0x00010203);
+
+	# compose counter increment vector on stack
+	&mov	($rounds_,1);
+	&xor	($key_,$key_);
+	&mov	(&DWP(16,"esp"),$rounds_);
+	&mov	(&DWP(20,"esp"),$key_);
+	&mov	(&DWP(24,"esp"),$key_);
+	&mov	(&DWP(28,"esp"),$key_);
+
+	&shl	($rounds,4);
+	&mov	($rounds_,16);
+	&lea	($key_,&DWP(0,$key));
+	&movdqa	($inout3,&QWP(0,"esp"));
+	&movdqa	($inout0,$ivec);
+	&lea	($key,&DWP(32,$key,$rounds));
+	&sub	($rounds_,$rounds);
+	&pshufb	($ivec,$inout3);
+
+&set_label("ccm64_enc_outer");
+	&$movekey	($rndkey0,&QWP(0,$key_));
+	&mov		($rounds,$rounds_);
+	&movups		($in0,&QWP(0,$inp));
+
+	&xorps		($inout0,$rndkey0);
+	&$movekey	($rndkey1,&QWP(16,$key_));
+	&xorps		($rndkey0,$in0);
+	&xorps		($cmac,$rndkey0);		# cmac^=inp
+	&$movekey	($rndkey0,&QWP(32,$key_));
+
+&set_label("ccm64_enc2_loop");
+	&aesenc		($inout0,$rndkey1);
+	&aesenc		($cmac,$rndkey1);
+	&$movekey	($rndkey1,&QWP(0,$key,$rounds));
+	&add		($rounds,32);
+	&aesenc		($inout0,$rndkey0);
+	&aesenc		($cmac,$rndkey0);
+	&$movekey	($rndkey0,&QWP(-16,$key,$rounds));
+	&jnz		(&label("ccm64_enc2_loop"));
+	&aesenc		($inout0,$rndkey1);
+	&aesenc		($cmac,$rndkey1);
+	&paddq		($ivec,&QWP(16,"esp"));
+	&dec		($len);
+	&aesenclast	($inout0,$rndkey0);
+	&aesenclast	($cmac,$rndkey0);
+
+	&lea	($inp,&DWP(16,$inp));
+	&xorps	($in0,$inout0);			# inp^=E(ivec)
+	&movdqa	($inout0,$ivec);
+	&movups	(&QWP(0,$out),$in0);		# save output
+	&pshufb	($inout0,$inout3);
+	&lea	($out,&DWP(16,$out));
+	&jnz	(&label("ccm64_enc_outer"));
+
+	&mov	("esp",&DWP(48,"esp"));
+	&mov	($out,&wparam(5));
+	&movups	(&QWP(0,$out),$cmac);
+
+	&pxor	("xmm0","xmm0");		# clear register bank
+	&pxor	("xmm1","xmm1");
+	&pxor	("xmm2","xmm2");
+	&pxor	("xmm3","xmm3");
+	&pxor	("xmm4","xmm4");
+	&pxor	("xmm5","xmm5");
+	&pxor	("xmm6","xmm6");
+	&pxor	("xmm7","xmm7");
+&function_end("aesni_ccm64_encrypt_blocks");
+
+&function_begin("aesni_ccm64_decrypt_blocks");
+	&mov	($inp,&wparam(0));
+	&mov	($out,&wparam(1));
+	&mov	($len,&wparam(2));
+	&mov	($key,&wparam(3));
+	&mov	($rounds_,&wparam(4));
+	&mov	($rounds,&wparam(5));
+	&mov	($key_,"esp");
+	&sub	("esp",60);
+	&and	("esp",-16);			# align stack
+	&mov	(&DWP(48,"esp"),$key_);
+
+	&movdqu	($ivec,&QWP(0,$rounds_));	# load ivec
+	&movdqu	($cmac,&QWP(0,$rounds));	# load cmac
+	&mov	($rounds,&DWP(240,$key));
+
+	# compose byte-swap control mask for pshufb on stack
+	&mov	(&DWP(0,"esp"),0x0c0d0e0f);
+	&mov	(&DWP(4,"esp"),0x08090a0b);
+	&mov	(&DWP(8,"esp"),0x04050607);
+	&mov	(&DWP(12,"esp"),0x00010203);
+
+	# compose counter increment vector on stack
+	&mov	($rounds_,1);
+	&xor	($key_,$key_);
+	&mov	(&DWP(16,"esp"),$rounds_);
+	&mov	(&DWP(20,"esp"),$key_);
+	&mov	(&DWP(24,"esp"),$key_);
+	&mov	(&DWP(28,"esp"),$key_);
+
+	&movdqa	($inout3,&QWP(0,"esp"));	# bswap mask
+	&movdqa	($inout0,$ivec);
+
+	&mov	($key_,$key);
+	&mov	($rounds_,$rounds);
+
+	&pshufb	($ivec,$inout3);
+	if ($inline)
+	{   &aesni_inline_generate1("enc");	}
+	else
+	{   &call	("_aesni_encrypt1");	}
+	&shl	($rounds_,4);
+	&mov	($rounds,16);
+	&movups	($in0,&QWP(0,$inp));		# load inp
+	&paddq	($ivec,&QWP(16,"esp"));
+	&lea	($inp,&QWP(16,$inp));
+	&sub	($rounds,$rounds_);
+	&lea	($key,&DWP(32,$key_,$rounds_));
+	&mov	($rounds_,$rounds);
+	&jmp	(&label("ccm64_dec_outer"));
+
+&set_label("ccm64_dec_outer",16);
+	&xorps	($in0,$inout0);			# inp ^= E(ivec)
+	&movdqa	($inout0,$ivec);
+	&movups	(&QWP(0,$out),$in0);		# save output
+	&lea	($out,&DWP(16,$out));
+	&pshufb	($inout0,$inout3);
+
+	&sub	($len,1);
+	&jz	(&label("ccm64_dec_break"));
+
+	&$movekey	($rndkey0,&QWP(0,$key_));
+	&mov		($rounds,$rounds_);
+	&$movekey	($rndkey1,&QWP(16,$key_));
+	&xorps		($in0,$rndkey0);
+	&xorps		($inout0,$rndkey0);
+	&xorps		($cmac,$in0);		# cmac^=out
+	&$movekey	($rndkey0,&QWP(32,$key_));
+
+&set_label("ccm64_dec2_loop");
+	&aesenc		($inout0,$rndkey1);
+	&aesenc		($cmac,$rndkey1);
+	&$movekey	($rndkey1,&QWP(0,$key,$rounds));
+	&add		($rounds,32);
+	&aesenc		($inout0,$rndkey0);
+	&aesenc		($cmac,$rndkey0);
+	&$movekey	($rndkey0,&QWP(-16,$key,$rounds));
+	&jnz		(&label("ccm64_dec2_loop"));
+	&movups		($in0,&QWP(0,$inp));	# load inp
+	&paddq		($ivec,&QWP(16,"esp"));
+	&aesenc		($inout0,$rndkey1);
+	&aesenc		($cmac,$rndkey1);
+	&aesenclast	($inout0,$rndkey0);
+	&aesenclast	($cmac,$rndkey0);
+	&lea		($inp,&QWP(16,$inp));
+	&jmp	(&label("ccm64_dec_outer"));
+
+&set_label("ccm64_dec_break",16);
+	&mov	($rounds,&DWP(240,$key_));
+	&mov	($key,$key_);
+	if ($inline)
+	{   &aesni_inline_generate1("enc",$cmac,$in0);	}
+	else
+	{   &call	("_aesni_encrypt1",$cmac);	}
+
+	&mov	("esp",&DWP(48,"esp"));
+	&mov	($out,&wparam(5));
+	&movups	(&QWP(0,$out),$cmac);
+
+	&pxor	("xmm0","xmm0");		# clear register bank
+	&pxor	("xmm1","xmm1");
+	&pxor	("xmm2","xmm2");
+	&pxor	("xmm3","xmm3");
+	&pxor	("xmm4","xmm4");
+	&pxor	("xmm5","xmm5");
+	&pxor	("xmm6","xmm6");
+	&pxor	("xmm7","xmm7");
+&function_end("aesni_ccm64_decrypt_blocks");
+}
+
+######################################################################
+# void aesni_ctr32_encrypt_blocks (const void *in, void *out,
+#                         size_t blocks, const AES_KEY *key,
+#                         const char *ivec);
+#
+# Handles only complete blocks, operates on 32-bit counter and
+# does not update *ivec! (see crypto/modes/ctr128.c for details)
+#
+# stack layout:
+#	0	pshufb mask
+#	16	vector addend: 0,6,6,6
+# 	32	counter-less ivec
+#	48	1st triplet of counter vector
+#	64	2nd triplet of counter vector
+#	80	saved %esp
+
+&function_begin("aesni_ctr32_encrypt_blocks");
+	&mov	($inp,&wparam(0));
+	&mov	($out,&wparam(1));
+	&mov	($len,&wparam(2));
+	&mov	($key,&wparam(3));
+	&mov	($rounds_,&wparam(4));
+	&mov	($key_,"esp");
+	&sub	("esp",88);
+	&and	("esp",-16);			# align stack
+	&mov	(&DWP(80,"esp"),$key_);
+
+	&cmp	($len,1);
+	&je	(&label("ctr32_one_shortcut"));
+
+	&movdqu	($inout5,&QWP(0,$rounds_));	# load ivec
+
+	# compose byte-swap control mask for pshufb on stack
+	&mov	(&DWP(0,"esp"),0x0c0d0e0f);
+	&mov	(&DWP(4,"esp"),0x08090a0b);
+	&mov	(&DWP(8,"esp"),0x04050607);
+	&mov	(&DWP(12,"esp"),0x00010203);
+
+	# compose counter increment vector on stack
+	&mov	($rounds,6);
+	&xor	($key_,$key_);
+	&mov	(&DWP(16,"esp"),$rounds);
+	&mov	(&DWP(20,"esp"),$rounds);
+	&mov	(&DWP(24,"esp"),$rounds);
+	&mov	(&DWP(28,"esp"),$key_);
+
+	&pextrd	($rounds_,$inout5,3);		# pull 32-bit counter
+	&pinsrd	($inout5,$key_,3);		# wipe 32-bit counter
+
+	&mov	($rounds,&DWP(240,$key));	# key->rounds
+
+	# compose 2 vectors of 3x32-bit counters
+	&bswap	($rounds_);
+	&pxor	($rndkey0,$rndkey0);
+	&pxor	($rndkey1,$rndkey1);
+	&movdqa	($inout0,&QWP(0,"esp"));	# load byte-swap mask
+	&pinsrd	($rndkey0,$rounds_,0);
+	&lea	($key_,&DWP(3,$rounds_));
+	&pinsrd	($rndkey1,$key_,0);
+	&inc	($rounds_);
+	&pinsrd	($rndkey0,$rounds_,1);
+	&inc	($key_);
+	&pinsrd	($rndkey1,$key_,1);
+	&inc	($rounds_);
+	&pinsrd	($rndkey0,$rounds_,2);
+	&inc	($key_);
+	&pinsrd	($rndkey1,$key_,2);
+	&movdqa	(&QWP(48,"esp"),$rndkey0);	# save 1st triplet
+	&pshufb	($rndkey0,$inout0);		# byte swap
+	&movdqu	($inout4,&QWP(0,$key));		# key[0]
+	&movdqa	(&QWP(64,"esp"),$rndkey1);	# save 2nd triplet
+	&pshufb	($rndkey1,$inout0);		# byte swap
+
+	&pshufd	($inout0,$rndkey0,3<<6);	# place counter to upper dword
+	&pshufd	($inout1,$rndkey0,2<<6);
+	&cmp	($len,6);
+	&jb	(&label("ctr32_tail"));
+	&pxor	($inout5,$inout4);		# counter-less ivec^key[0]
+	&shl	($rounds,4);
+	&mov	($rounds_,16);
+	&movdqa	(&QWP(32,"esp"),$inout5);	# save counter-less ivec^key[0]
+	&mov	($key_,$key);			# backup $key
+	&sub	($rounds_,$rounds);		# backup twisted $rounds
+	&lea	($key,&DWP(32,$key,$rounds));
+	&sub	($len,6);
+	&jmp	(&label("ctr32_loop6"));
+
+&set_label("ctr32_loop6",16);
+	# inlining _aesni_encrypt6's prologue gives ~6% improvement...
+	&pshufd	($inout2,$rndkey0,1<<6);
+	&movdqa	($rndkey0,&QWP(32,"esp"));	# pull counter-less ivec
+	&pshufd	($inout3,$rndkey1,3<<6);
+	&pxor		($inout0,$rndkey0);	# merge counter-less ivec
+	&pshufd	($inout4,$rndkey1,2<<6);
+	&pxor		($inout1,$rndkey0);
+	&pshufd	($inout5,$rndkey1,1<<6);
+	&$movekey	($rndkey1,&QWP(16,$key_));
+	&pxor		($inout2,$rndkey0);
+	&pxor		($inout3,$rndkey0);
+	&aesenc		($inout0,$rndkey1);
+	&pxor		($inout4,$rndkey0);
+	&pxor		($inout5,$rndkey0);
+	&aesenc		($inout1,$rndkey1);
+	&$movekey	($rndkey0,&QWP(32,$key_));
+	&mov		($rounds,$rounds_);
+	&aesenc		($inout2,$rndkey1);
+	&aesenc		($inout3,$rndkey1);
+	&aesenc		($inout4,$rndkey1);
+	&aesenc		($inout5,$rndkey1);
+
+	&call		(&label("_aesni_encrypt6_enter"));
+
+	&movups	($rndkey1,&QWP(0,$inp));
+	&movups	($rndkey0,&QWP(0x10,$inp));
+	&xorps	($inout0,$rndkey1);
+	&movups	($rndkey1,&QWP(0x20,$inp));
+	&xorps	($inout1,$rndkey0);
+	&movups	(&QWP(0,$out),$inout0);
+	&movdqa	($rndkey0,&QWP(16,"esp"));	# load increment
+	&xorps	($inout2,$rndkey1);
+	&movdqa	($rndkey1,&QWP(64,"esp"));	# load 2nd triplet
+	&movups	(&QWP(0x10,$out),$inout1);
+	&movups	(&QWP(0x20,$out),$inout2);
+
+	&paddd	($rndkey1,$rndkey0);		# 2nd triplet increment
+	&paddd	($rndkey0,&QWP(48,"esp"));	# 1st triplet increment
+	&movdqa	($inout0,&QWP(0,"esp"));	# load byte swap mask
+
+	&movups	($inout1,&QWP(0x30,$inp));
+	&movups	($inout2,&QWP(0x40,$inp));
+	&xorps	($inout3,$inout1);
+	&movups	($inout1,&QWP(0x50,$inp));
+	&lea	($inp,&DWP(0x60,$inp));
+	&movdqa	(&QWP(48,"esp"),$rndkey0);	# save 1st triplet
+	&pshufb	($rndkey0,$inout0);		# byte swap
+	&xorps	($inout4,$inout2);
+	&movups	(&QWP(0x30,$out),$inout3);
+	&xorps	($inout5,$inout1);
+	&movdqa	(&QWP(64,"esp"),$rndkey1);	# save 2nd triplet
+	&pshufb	($rndkey1,$inout0);		# byte swap
+	&movups	(&QWP(0x40,$out),$inout4);
+	&pshufd	($inout0,$rndkey0,3<<6);
+	&movups	(&QWP(0x50,$out),$inout5);
+	&lea	($out,&DWP(0x60,$out));
+
+	&pshufd	($inout1,$rndkey0,2<<6);
+	&sub	($len,6);
+	&jnc	(&label("ctr32_loop6"));
+
+	&add	($len,6);
+	&jz	(&label("ctr32_ret"));
+	&movdqu	($inout5,&QWP(0,$key_));
+	&mov	($key,$key_);
+	&pxor	($inout5,&QWP(32,"esp"));	# restore count-less ivec
+	&mov	($rounds,&DWP(240,$key_));	# restore $rounds
+
+&set_label("ctr32_tail");
+	&por	($inout0,$inout5);
+	&cmp	($len,2);
+	&jb	(&label("ctr32_one"));
+
+	&pshufd	($inout2,$rndkey0,1<<6);
+	&por	($inout1,$inout5);
+	&je	(&label("ctr32_two"));
+
+	&pshufd	($inout3,$rndkey1,3<<6);
+	&por	($inout2,$inout5);
+	&cmp	($len,4);
+	&jb	(&label("ctr32_three"));
+
+	&pshufd	($inout4,$rndkey1,2<<6);
+	&por	($inout3,$inout5);
+	&je	(&label("ctr32_four"));
+
+	&por	($inout4,$inout5);
+	&call	("_aesni_encrypt6");
+	&movups	($rndkey1,&QWP(0,$inp));
+	&movups	($rndkey0,&QWP(0x10,$inp));
+	&xorps	($inout0,$rndkey1);
+	&movups	($rndkey1,&QWP(0x20,$inp));
+	&xorps	($inout1,$rndkey0);
+	&movups	($rndkey0,&QWP(0x30,$inp));
+	&xorps	($inout2,$rndkey1);
+	&movups	($rndkey1,&QWP(0x40,$inp));
+	&xorps	($inout3,$rndkey0);
+	&movups	(&QWP(0,$out),$inout0);
+	&xorps	($inout4,$rndkey1);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&movups	(&QWP(0x20,$out),$inout2);
+	&movups	(&QWP(0x30,$out),$inout3);
+	&movups	(&QWP(0x40,$out),$inout4);
+	&jmp	(&label("ctr32_ret"));
+
+&set_label("ctr32_one_shortcut",16);
+	&movups	($inout0,&QWP(0,$rounds_));	# load ivec
+	&mov	($rounds,&DWP(240,$key));
+
+&set_label("ctr32_one");
+	if ($inline)
+	{   &aesni_inline_generate1("enc");	}
+	else
+	{   &call	("_aesni_encrypt1");	}
+	&movups	($in0,&QWP(0,$inp));
+	&xorps	($in0,$inout0);
+	&movups	(&QWP(0,$out),$in0);
+	&jmp	(&label("ctr32_ret"));
+
+&set_label("ctr32_two",16);
+	&call	("_aesni_encrypt2");
+	&movups	($inout3,&QWP(0,$inp));
+	&movups	($inout4,&QWP(0x10,$inp));
+	&xorps	($inout0,$inout3);
+	&xorps	($inout1,$inout4);
+	&movups	(&QWP(0,$out),$inout0);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&jmp	(&label("ctr32_ret"));
+
+&set_label("ctr32_three",16);
+	&call	("_aesni_encrypt3");
+	&movups	($inout3,&QWP(0,$inp));
+	&movups	($inout4,&QWP(0x10,$inp));
+	&xorps	($inout0,$inout3);
+	&movups	($inout5,&QWP(0x20,$inp));
+	&xorps	($inout1,$inout4);
+	&movups	(&QWP(0,$out),$inout0);
+	&xorps	($inout2,$inout5);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&movups	(&QWP(0x20,$out),$inout2);
+	&jmp	(&label("ctr32_ret"));
+
+&set_label("ctr32_four",16);
+	&call	("_aesni_encrypt4");
+	&movups	($inout4,&QWP(0,$inp));
+	&movups	($inout5,&QWP(0x10,$inp));
+	&movups	($rndkey1,&QWP(0x20,$inp));
+	&xorps	($inout0,$inout4);
+	&movups	($rndkey0,&QWP(0x30,$inp));
+	&xorps	($inout1,$inout5);
+	&movups	(&QWP(0,$out),$inout0);
+	&xorps	($inout2,$rndkey1);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&xorps	($inout3,$rndkey0);
+	&movups	(&QWP(0x20,$out),$inout2);
+	&movups	(&QWP(0x30,$out),$inout3);
+
+&set_label("ctr32_ret");
+	&pxor	("xmm0","xmm0");		# clear register bank
+	&pxor	("xmm1","xmm1");
+	&pxor	("xmm2","xmm2");
+	&pxor	("xmm3","xmm3");
+	&pxor	("xmm4","xmm4");
+	&movdqa	(&QWP(32,"esp"),"xmm0");	# clear stack
+	&pxor	("xmm5","xmm5");
+	&movdqa	(&QWP(48,"esp"),"xmm0");
+	&pxor	("xmm6","xmm6");
+	&movdqa	(&QWP(64,"esp"),"xmm0");
+	&pxor	("xmm7","xmm7");
+	&mov	("esp",&DWP(80,"esp"));
+&function_end("aesni_ctr32_encrypt_blocks");
+
+######################################################################
+# void aesni_xts_[en|de]crypt(const char *inp,char *out,size_t len,
+#	const AES_KEY *key1, const AES_KEY *key2
+#	const unsigned char iv[16]);
+#
+{ my ($tweak,$twtmp,$twres,$twmask)=($rndkey1,$rndkey0,$inout0,$inout1);
+
+&function_begin("aesni_xts_encrypt");
+	&mov	($key,&wparam(4));		# key2
+	&mov	($inp,&wparam(5));		# clear-text tweak
+
+	&mov	($rounds,&DWP(240,$key));	# key2->rounds
+	&movups	($inout0,&QWP(0,$inp));
+	if ($inline)
+	{   &aesni_inline_generate1("enc");	}
+	else
+	{   &call	("_aesni_encrypt1");	}
+
+	&mov	($inp,&wparam(0));
+	&mov	($out,&wparam(1));
+	&mov	($len,&wparam(2));
+	&mov	($key,&wparam(3));		# key1
+
+	&mov	($key_,"esp");
+	&sub	("esp",16*7+8);
+	&mov	($rounds,&DWP(240,$key));	# key1->rounds
+	&and	("esp",-16);			# align stack
+
+	&mov	(&DWP(16*6+0,"esp"),0x87);	# compose the magic constant
+	&mov	(&DWP(16*6+4,"esp"),0);
+	&mov	(&DWP(16*6+8,"esp"),1);
+	&mov	(&DWP(16*6+12,"esp"),0);
+	&mov	(&DWP(16*7+0,"esp"),$len);	# save original $len
+	&mov	(&DWP(16*7+4,"esp"),$key_);	# save original %esp
+
+	&movdqa	($tweak,$inout0);
+	&pxor	($twtmp,$twtmp);
+	&movdqa	($twmask,&QWP(6*16,"esp"));	# 0x0...010...87
+	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
+
+	&and	($len,-16);
+	&mov	($key_,$key);			# backup $key
+	&mov	($rounds_,$rounds);		# backup $rounds
+	&sub	($len,16*6);
+	&jc	(&label("xts_enc_short"));
+
+	&shl	($rounds,4);
+	&mov	($rounds_,16);
+	&sub	($rounds_,$rounds);
+	&lea	($key,&DWP(32,$key,$rounds));
+	&jmp	(&label("xts_enc_loop6"));
+
+&set_label("xts_enc_loop6",16);
+	for ($i=0;$i<4;$i++) {
+	    &pshufd	($twres,$twtmp,0x13);
+	    &pxor	($twtmp,$twtmp);
+	    &movdqa	(&QWP(16*$i,"esp"),$tweak);
+	    &paddq	($tweak,$tweak);	# &psllq($tweak,1);
+	    &pand	($twres,$twmask);	# isolate carry and residue
+	    &pcmpgtd	($twtmp,$tweak);	# broadcast upper bits
+	    &pxor	($tweak,$twres);
+	}
+	&pshufd	($inout5,$twtmp,0x13);
+	&movdqa	(&QWP(16*$i++,"esp"),$tweak);
+	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
+	 &$movekey	($rndkey0,&QWP(0,$key_));
+	&pand	($inout5,$twmask);		# isolate carry and residue
+	 &movups	($inout0,&QWP(0,$inp));	# load input
+	&pxor	($inout5,$tweak);
+
+	# inline _aesni_encrypt6 prologue and flip xor with tweak and key[0]
+	&mov	($rounds,$rounds_);		# restore $rounds
+	&movdqu	($inout1,&QWP(16*1,$inp));
+	 &xorps		($inout0,$rndkey0);	# input^=rndkey[0]
+	&movdqu	($inout2,&QWP(16*2,$inp));
+	 &pxor		($inout1,$rndkey0);
+	&movdqu	($inout3,&QWP(16*3,$inp));
+	 &pxor		($inout2,$rndkey0);
+	&movdqu	($inout4,&QWP(16*4,$inp));
+	 &pxor		($inout3,$rndkey0);
+	&movdqu	($rndkey1,&QWP(16*5,$inp));
+	 &pxor		($inout4,$rndkey0);
+	&lea	($inp,&DWP(16*6,$inp));
+	&pxor	($inout0,&QWP(16*0,"esp"));	# input^=tweak
+	&movdqa	(&QWP(16*$i,"esp"),$inout5);	# save last tweak
+	&pxor	($inout5,$rndkey1);
+
+	 &$movekey	($rndkey1,&QWP(16,$key_));
+	&pxor	($inout1,&QWP(16*1,"esp"));
+	&pxor	($inout2,&QWP(16*2,"esp"));
+	 &aesenc	($inout0,$rndkey1);
+	&pxor	($inout3,&QWP(16*3,"esp"));
+	&pxor	($inout4,&QWP(16*4,"esp"));
+	 &aesenc	($inout1,$rndkey1);
+	&pxor		($inout5,$rndkey0);
+	 &$movekey	($rndkey0,&QWP(32,$key_));
+	 &aesenc	($inout2,$rndkey1);
+	 &aesenc	($inout3,$rndkey1);
+	 &aesenc	($inout4,$rndkey1);
+	 &aesenc	($inout5,$rndkey1);
+	&call		(&label("_aesni_encrypt6_enter"));
+
+	&movdqa	($tweak,&QWP(16*5,"esp"));	# last tweak
+       &pxor	($twtmp,$twtmp);
+	&xorps	($inout0,&QWP(16*0,"esp"));	# output^=tweak
+       &pcmpgtd	($twtmp,$tweak);		# broadcast upper bits
+	&xorps	($inout1,&QWP(16*1,"esp"));
+	&movups	(&QWP(16*0,$out),$inout0);	# write output
+	&xorps	($inout2,&QWP(16*2,"esp"));
+	&movups	(&QWP(16*1,$out),$inout1);
+	&xorps	($inout3,&QWP(16*3,"esp"));
+	&movups	(&QWP(16*2,$out),$inout2);
+	&xorps	($inout4,&QWP(16*4,"esp"));
+	&movups	(&QWP(16*3,$out),$inout3);
+	&xorps	($inout5,$tweak);
+	&movups	(&QWP(16*4,$out),$inout4);
+       &pshufd	($twres,$twtmp,0x13);
+	&movups	(&QWP(16*5,$out),$inout5);
+	&lea	($out,&DWP(16*6,$out));
+       &movdqa	($twmask,&QWP(16*6,"esp"));	# 0x0...010...87
+
+	&pxor	($twtmp,$twtmp);
+	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
+	&pand	($twres,$twmask);		# isolate carry and residue
+	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
+	&pxor	($tweak,$twres);
+
+	&sub	($len,16*6);
+	&jnc	(&label("xts_enc_loop6"));
+
+	&mov	($rounds,&DWP(240,$key_));	# restore $rounds
+	&mov	($key,$key_);			# restore $key
+	&mov	($rounds_,$rounds);
+
+&set_label("xts_enc_short");
+	&add	($len,16*6);
+	&jz	(&label("xts_enc_done6x"));
+
+	&movdqa	($inout3,$tweak);		# put aside previous tweak
+	&cmp	($len,0x20);
+	&jb	(&label("xts_enc_one"));
+
+	&pshufd	($twres,$twtmp,0x13);
+	&pxor	($twtmp,$twtmp);
+	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
+	&pand	($twres,$twmask);		# isolate carry and residue
+	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
+	&pxor	($tweak,$twres);
+	&je	(&label("xts_enc_two"));
+
+	&pshufd	($twres,$twtmp,0x13);
+	&pxor	($twtmp,$twtmp);
+	&movdqa	($inout4,$tweak);		# put aside previous tweak
+	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
+	&pand	($twres,$twmask);		# isolate carry and residue
+	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
+	&pxor	($tweak,$twres);
+	&cmp	($len,0x40);
+	&jb	(&label("xts_enc_three"));
+
+	&pshufd	($twres,$twtmp,0x13);
+	&pxor	($twtmp,$twtmp);
+	&movdqa	($inout5,$tweak);		# put aside previous tweak
+	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
+	&pand	($twres,$twmask);		# isolate carry and residue
+	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
+	&pxor	($tweak,$twres);
+	&movdqa	(&QWP(16*0,"esp"),$inout3);
+	&movdqa	(&QWP(16*1,"esp"),$inout4);
+	&je	(&label("xts_enc_four"));
+
+	&movdqa	(&QWP(16*2,"esp"),$inout5);
+	&pshufd	($inout5,$twtmp,0x13);
+	&movdqa	(&QWP(16*3,"esp"),$tweak);
+	&paddq	($tweak,$tweak);		# &psllq($inout0,1);
+	&pand	($inout5,$twmask);		# isolate carry and residue
+	&pxor	($inout5,$tweak);
+
+	&movdqu	($inout0,&QWP(16*0,$inp));	# load input
+	&movdqu	($inout1,&QWP(16*1,$inp));
+	&movdqu	($inout2,&QWP(16*2,$inp));
+	&pxor	($inout0,&QWP(16*0,"esp"));	# input^=tweak
+	&movdqu	($inout3,&QWP(16*3,$inp));
+	&pxor	($inout1,&QWP(16*1,"esp"));
+	&movdqu	($inout4,&QWP(16*4,$inp));
+	&pxor	($inout2,&QWP(16*2,"esp"));
+	&lea	($inp,&DWP(16*5,$inp));
+	&pxor	($inout3,&QWP(16*3,"esp"));
+	&movdqa	(&QWP(16*4,"esp"),$inout5);	# save last tweak
+	&pxor	($inout4,$inout5);
+
+	&call	("_aesni_encrypt6");
+
+	&movaps	($tweak,&QWP(16*4,"esp"));	# last tweak
+	&xorps	($inout0,&QWP(16*0,"esp"));	# output^=tweak
+	&xorps	($inout1,&QWP(16*1,"esp"));
+	&xorps	($inout2,&QWP(16*2,"esp"));
+	&movups	(&QWP(16*0,$out),$inout0);	# write output
+	&xorps	($inout3,&QWP(16*3,"esp"));
+	&movups	(&QWP(16*1,$out),$inout1);
+	&xorps	($inout4,$tweak);
+	&movups	(&QWP(16*2,$out),$inout2);
+	&movups	(&QWP(16*3,$out),$inout3);
+	&movups	(&QWP(16*4,$out),$inout4);
+	&lea	($out,&DWP(16*5,$out));
+	&jmp	(&label("xts_enc_done"));
+
+&set_label("xts_enc_one",16);
+	&movups	($inout0,&QWP(16*0,$inp));	# load input
+	&lea	($inp,&DWP(16*1,$inp));
+	&xorps	($inout0,$inout3);		# input^=tweak
+	if ($inline)
+	{   &aesni_inline_generate1("enc");	}
+	else
+	{   &call	("_aesni_encrypt1");	}
+	&xorps	($inout0,$inout3);		# output^=tweak
+	&movups	(&QWP(16*0,$out),$inout0);	# write output
+	&lea	($out,&DWP(16*1,$out));
+
+	&movdqa	($tweak,$inout3);		# last tweak
+	&jmp	(&label("xts_enc_done"));
+
+&set_label("xts_enc_two",16);
+	&movaps	($inout4,$tweak);		# put aside last tweak
+
+	&movups	($inout0,&QWP(16*0,$inp));	# load input
+	&movups	($inout1,&QWP(16*1,$inp));
+	&lea	($inp,&DWP(16*2,$inp));
+	&xorps	($inout0,$inout3);		# input^=tweak
+	&xorps	($inout1,$inout4);
+
+	&call	("_aesni_encrypt2");
+
+	&xorps	($inout0,$inout3);		# output^=tweak
+	&xorps	($inout1,$inout4);
+	&movups	(&QWP(16*0,$out),$inout0);	# write output
+	&movups	(&QWP(16*1,$out),$inout1);
+	&lea	($out,&DWP(16*2,$out));
+
+	&movdqa	($tweak,$inout4);		# last tweak
+	&jmp	(&label("xts_enc_done"));
+
+&set_label("xts_enc_three",16);
+	&movaps	($inout5,$tweak);		# put aside last tweak
+	&movups	($inout0,&QWP(16*0,$inp));	# load input
+	&movups	($inout1,&QWP(16*1,$inp));
+	&movups	($inout2,&QWP(16*2,$inp));
+	&lea	($inp,&DWP(16*3,$inp));
+	&xorps	($inout0,$inout3);		# input^=tweak
+	&xorps	($inout1,$inout4);
+	&xorps	($inout2,$inout5);
+
+	&call	("_aesni_encrypt3");
+
+	&xorps	($inout0,$inout3);		# output^=tweak
+	&xorps	($inout1,$inout4);
+	&xorps	($inout2,$inout5);
+	&movups	(&QWP(16*0,$out),$inout0);	# write output
+	&movups	(&QWP(16*1,$out),$inout1);
+	&movups	(&QWP(16*2,$out),$inout2);
+	&lea	($out,&DWP(16*3,$out));
+
+	&movdqa	($tweak,$inout5);		# last tweak
+	&jmp	(&label("xts_enc_done"));
+
+&set_label("xts_enc_four",16);
+	&movaps	($inout4,$tweak);		# put aside last tweak
+
+	&movups	($inout0,&QWP(16*0,$inp));	# load input
+	&movups	($inout1,&QWP(16*1,$inp));
+	&movups	($inout2,&QWP(16*2,$inp));
+	&xorps	($inout0,&QWP(16*0,"esp"));	# input^=tweak
+	&movups	($inout3,&QWP(16*3,$inp));
+	&lea	($inp,&DWP(16*4,$inp));
+	&xorps	($inout1,&QWP(16*1,"esp"));
+	&xorps	($inout2,$inout5);
+	&xorps	($inout3,$inout4);
+
+	&call	("_aesni_encrypt4");
+
+	&xorps	($inout0,&QWP(16*0,"esp"));	# output^=tweak
+	&xorps	($inout1,&QWP(16*1,"esp"));
+	&xorps	($inout2,$inout5);
+	&movups	(&QWP(16*0,$out),$inout0);	# write output
+	&xorps	($inout3,$inout4);
+	&movups	(&QWP(16*1,$out),$inout1);
+	&movups	(&QWP(16*2,$out),$inout2);
+	&movups	(&QWP(16*3,$out),$inout3);
+	&lea	($out,&DWP(16*4,$out));
+
+	&movdqa	($tweak,$inout4);		# last tweak
+	&jmp	(&label("xts_enc_done"));
+
+&set_label("xts_enc_done6x",16);		# $tweak is pre-calculated
+	&mov	($len,&DWP(16*7+0,"esp"));	# restore original $len
+	&and	($len,15);
+	&jz	(&label("xts_enc_ret"));
+	&movdqa	($inout3,$tweak);
+	&mov	(&DWP(16*7+0,"esp"),$len);	# save $len%16
+	&jmp	(&label("xts_enc_steal"));
+
+&set_label("xts_enc_done",16);
+	&mov	($len,&DWP(16*7+0,"esp"));	# restore original $len
+	&pxor	($twtmp,$twtmp);
+	&and	($len,15);
+	&jz	(&label("xts_enc_ret"));
+
+	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
+	&mov	(&DWP(16*7+0,"esp"),$len);	# save $len%16
+	&pshufd	($inout3,$twtmp,0x13);
+	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
+	&pand	($inout3,&QWP(16*6,"esp"));	# isolate carry and residue
+	&pxor	($inout3,$tweak);
+
+&set_label("xts_enc_steal");
+	&movz	($rounds,&BP(0,$inp));
+	&movz	($key,&BP(-16,$out));
+	&lea	($inp,&DWP(1,$inp));
+	&mov	(&BP(-16,$out),&LB($rounds));
+	&mov	(&BP(0,$out),&LB($key));
+	&lea	($out,&DWP(1,$out));
+	&sub	($len,1);
+	&jnz	(&label("xts_enc_steal"));
+
+	&sub	($out,&DWP(16*7+0,"esp"));	# rewind $out
+	&mov	($key,$key_);			# restore $key
+	&mov	($rounds,$rounds_);		# restore $rounds
+
+	&movups	($inout0,&QWP(-16,$out));	# load input
+	&xorps	($inout0,$inout3);		# input^=tweak
+	if ($inline)
+	{   &aesni_inline_generate1("enc");	}
+	else
+	{   &call	("_aesni_encrypt1");	}
+	&xorps	($inout0,$inout3);		# output^=tweak
+	&movups	(&QWP(-16,$out),$inout0);	# write output
+
+&set_label("xts_enc_ret");
+	&pxor	("xmm0","xmm0");		# clear register bank
+	&pxor	("xmm1","xmm1");
+	&pxor	("xmm2","xmm2");
+	&movdqa	(&QWP(16*0,"esp"),"xmm0");	# clear stack
+	&pxor	("xmm3","xmm3");
+	&movdqa	(&QWP(16*1,"esp"),"xmm0");
+	&pxor	("xmm4","xmm4");
+	&movdqa	(&QWP(16*2,"esp"),"xmm0");
+	&pxor	("xmm5","xmm5");
+	&movdqa	(&QWP(16*3,"esp"),"xmm0");
+	&pxor	("xmm6","xmm6");
+	&movdqa	(&QWP(16*4,"esp"),"xmm0");
+	&pxor	("xmm7","xmm7");
+	&movdqa	(&QWP(16*5,"esp"),"xmm0");
+	&mov	("esp",&DWP(16*7+4,"esp"));	# restore %esp
+&function_end("aesni_xts_encrypt");
+
+&function_begin("aesni_xts_decrypt");
+	&mov	($key,&wparam(4));		# key2
+	&mov	($inp,&wparam(5));		# clear-text tweak
+
+	&mov	($rounds,&DWP(240,$key));	# key2->rounds
+	&movups	($inout0,&QWP(0,$inp));
+	if ($inline)
+	{   &aesni_inline_generate1("enc");	}
+	else
+	{   &call	("_aesni_encrypt1");	}
+
+	&mov	($inp,&wparam(0));
+	&mov	($out,&wparam(1));
+	&mov	($len,&wparam(2));
+	&mov	($key,&wparam(3));		# key1
+
+	&mov	($key_,"esp");
+	&sub	("esp",16*7+8);
+	&and	("esp",-16);			# align stack
+
+	&xor	($rounds_,$rounds_);		# if(len%16) len-=16;
+	&test	($len,15);
+	&setnz	(&LB($rounds_));
+	&shl	($rounds_,4);
+	&sub	($len,$rounds_);
+
+	&mov	(&DWP(16*6+0,"esp"),0x87);	# compose the magic constant
+	&mov	(&DWP(16*6+4,"esp"),0);
+	&mov	(&DWP(16*6+8,"esp"),1);
+	&mov	(&DWP(16*6+12,"esp"),0);
+	&mov	(&DWP(16*7+0,"esp"),$len);	# save original $len
+	&mov	(&DWP(16*7+4,"esp"),$key_);	# save original %esp
+
+	&mov	($rounds,&DWP(240,$key));	# key1->rounds
+	&mov	($key_,$key);			# backup $key
+	&mov	($rounds_,$rounds);		# backup $rounds
+
+	&movdqa	($tweak,$inout0);
+	&pxor	($twtmp,$twtmp);
+	&movdqa	($twmask,&QWP(6*16,"esp"));	# 0x0...010...87
+	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
+
+	&and	($len,-16);
+	&sub	($len,16*6);
+	&jc	(&label("xts_dec_short"));
+
+	&shl	($rounds,4);
+	&mov	($rounds_,16);
+	&sub	($rounds_,$rounds);
+	&lea	($key,&DWP(32,$key,$rounds));
+	&jmp	(&label("xts_dec_loop6"));
+
+&set_label("xts_dec_loop6",16);
+	for ($i=0;$i<4;$i++) {
+	    &pshufd	($twres,$twtmp,0x13);
+	    &pxor	($twtmp,$twtmp);
+	    &movdqa	(&QWP(16*$i,"esp"),$tweak);
+	    &paddq	($tweak,$tweak);	# &psllq($tweak,1);
+	    &pand	($twres,$twmask);	# isolate carry and residue
+	    &pcmpgtd	($twtmp,$tweak);	# broadcast upper bits
+	    &pxor	($tweak,$twres);
+	}
+	&pshufd	($inout5,$twtmp,0x13);
+	&movdqa	(&QWP(16*$i++,"esp"),$tweak);
+	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
+	 &$movekey	($rndkey0,&QWP(0,$key_));
+	&pand	($inout5,$twmask);		# isolate carry and residue
+	 &movups	($inout0,&QWP(0,$inp));	# load input
+	&pxor	($inout5,$tweak);
+
+	# inline _aesni_encrypt6 prologue and flip xor with tweak and key[0]
+	&mov	($rounds,$rounds_);
+	&movdqu	($inout1,&QWP(16*1,$inp));
+	 &xorps		($inout0,$rndkey0);	# input^=rndkey[0]
+	&movdqu	($inout2,&QWP(16*2,$inp));
+	 &pxor		($inout1,$rndkey0);
+	&movdqu	($inout3,&QWP(16*3,$inp));
+	 &pxor		($inout2,$rndkey0);
+	&movdqu	($inout4,&QWP(16*4,$inp));
+	 &pxor		($inout3,$rndkey0);
+	&movdqu	($rndkey1,&QWP(16*5,$inp));
+	 &pxor		($inout4,$rndkey0);
+	&lea	($inp,&DWP(16*6,$inp));
+	&pxor	($inout0,&QWP(16*0,"esp"));	# input^=tweak
+	&movdqa	(&QWP(16*$i,"esp"),$inout5);	# save last tweak
+	&pxor	($inout5,$rndkey1);
+
+	 &$movekey	($rndkey1,&QWP(16,$key_));
+	&pxor	($inout1,&QWP(16*1,"esp"));
+	&pxor	($inout2,&QWP(16*2,"esp"));
+	 &aesdec	($inout0,$rndkey1);
+	&pxor	($inout3,&QWP(16*3,"esp"));
+	&pxor	($inout4,&QWP(16*4,"esp"));
+	 &aesdec	($inout1,$rndkey1);
+	&pxor		($inout5,$rndkey0);
+	 &$movekey	($rndkey0,&QWP(32,$key_));
+	 &aesdec	($inout2,$rndkey1);
+	 &aesdec	($inout3,$rndkey1);
+	 &aesdec	($inout4,$rndkey1);
+	 &aesdec	($inout5,$rndkey1);
+	&call		(&label("_aesni_decrypt6_enter"));
+
+	&movdqa	($tweak,&QWP(16*5,"esp"));	# last tweak
+       &pxor	($twtmp,$twtmp);
+	&xorps	($inout0,&QWP(16*0,"esp"));	# output^=tweak
+       &pcmpgtd	($twtmp,$tweak);		# broadcast upper bits
+	&xorps	($inout1,&QWP(16*1,"esp"));
+	&movups	(&QWP(16*0,$out),$inout0);	# write output
+	&xorps	($inout2,&QWP(16*2,"esp"));
+	&movups	(&QWP(16*1,$out),$inout1);
+	&xorps	($inout3,&QWP(16*3,"esp"));
+	&movups	(&QWP(16*2,$out),$inout2);
+	&xorps	($inout4,&QWP(16*4,"esp"));
+	&movups	(&QWP(16*3,$out),$inout3);
+	&xorps	($inout5,$tweak);
+	&movups	(&QWP(16*4,$out),$inout4);
+       &pshufd	($twres,$twtmp,0x13);
+	&movups	(&QWP(16*5,$out),$inout5);
+	&lea	($out,&DWP(16*6,$out));
+       &movdqa	($twmask,&QWP(16*6,"esp"));	# 0x0...010...87
+
+	&pxor	($twtmp,$twtmp);
+	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
+	&pand	($twres,$twmask);		# isolate carry and residue
+	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
+	&pxor	($tweak,$twres);
+
+	&sub	($len,16*6);
+	&jnc	(&label("xts_dec_loop6"));
+
+	&mov	($rounds,&DWP(240,$key_));	# restore $rounds
+	&mov	($key,$key_);			# restore $key
+	&mov	($rounds_,$rounds);
+
+&set_label("xts_dec_short");
+	&add	($len,16*6);
+	&jz	(&label("xts_dec_done6x"));
+
+	&movdqa	($inout3,$tweak);		# put aside previous tweak
+	&cmp	($len,0x20);
+	&jb	(&label("xts_dec_one"));
+
+	&pshufd	($twres,$twtmp,0x13);
+	&pxor	($twtmp,$twtmp);
+	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
+	&pand	($twres,$twmask);		# isolate carry and residue
+	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
+	&pxor	($tweak,$twres);
+	&je	(&label("xts_dec_two"));
+
+	&pshufd	($twres,$twtmp,0x13);
+	&pxor	($twtmp,$twtmp);
+	&movdqa	($inout4,$tweak);		# put aside previous tweak
+	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
+	&pand	($twres,$twmask);		# isolate carry and residue
+	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
+	&pxor	($tweak,$twres);
+	&cmp	($len,0x40);
+	&jb	(&label("xts_dec_three"));
+
+	&pshufd	($twres,$twtmp,0x13);
+	&pxor	($twtmp,$twtmp);
+	&movdqa	($inout5,$tweak);		# put aside previous tweak
+	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
+	&pand	($twres,$twmask);		# isolate carry and residue
+	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
+	&pxor	($tweak,$twres);
+	&movdqa	(&QWP(16*0,"esp"),$inout3);
+	&movdqa	(&QWP(16*1,"esp"),$inout4);
+	&je	(&label("xts_dec_four"));
+
+	&movdqa	(&QWP(16*2,"esp"),$inout5);
+	&pshufd	($inout5,$twtmp,0x13);
+	&movdqa	(&QWP(16*3,"esp"),$tweak);
+	&paddq	($tweak,$tweak);		# &psllq($inout0,1);
+	&pand	($inout5,$twmask);		# isolate carry and residue
+	&pxor	($inout5,$tweak);
+
+	&movdqu	($inout0,&QWP(16*0,$inp));	# load input
+	&movdqu	($inout1,&QWP(16*1,$inp));
+	&movdqu	($inout2,&QWP(16*2,$inp));
+	&pxor	($inout0,&QWP(16*0,"esp"));	# input^=tweak
+	&movdqu	($inout3,&QWP(16*3,$inp));
+	&pxor	($inout1,&QWP(16*1,"esp"));
+	&movdqu	($inout4,&QWP(16*4,$inp));
+	&pxor	($inout2,&QWP(16*2,"esp"));
+	&lea	($inp,&DWP(16*5,$inp));
+	&pxor	($inout3,&QWP(16*3,"esp"));
+	&movdqa	(&QWP(16*4,"esp"),$inout5);	# save last tweak
+	&pxor	($inout4,$inout5);
+
+	&call	("_aesni_decrypt6");
+
+	&movaps	($tweak,&QWP(16*4,"esp"));	# last tweak
+	&xorps	($inout0,&QWP(16*0,"esp"));	# output^=tweak
+	&xorps	($inout1,&QWP(16*1,"esp"));
+	&xorps	($inout2,&QWP(16*2,"esp"));
+	&movups	(&QWP(16*0,$out),$inout0);	# write output
+	&xorps	($inout3,&QWP(16*3,"esp"));
+	&movups	(&QWP(16*1,$out),$inout1);
+	&xorps	($inout4,$tweak);
+	&movups	(&QWP(16*2,$out),$inout2);
+	&movups	(&QWP(16*3,$out),$inout3);
+	&movups	(&QWP(16*4,$out),$inout4);
+	&lea	($out,&DWP(16*5,$out));
+	&jmp	(&label("xts_dec_done"));
+
+&set_label("xts_dec_one",16);
+	&movups	($inout0,&QWP(16*0,$inp));	# load input
+	&lea	($inp,&DWP(16*1,$inp));
+	&xorps	($inout0,$inout3);		# input^=tweak
+	if ($inline)
+	{   &aesni_inline_generate1("dec");	}
+	else
+	{   &call	("_aesni_decrypt1");	}
+	&xorps	($inout0,$inout3);		# output^=tweak
+	&movups	(&QWP(16*0,$out),$inout0);	# write output
+	&lea	($out,&DWP(16*1,$out));
+
+	&movdqa	($tweak,$inout3);		# last tweak
+	&jmp	(&label("xts_dec_done"));
+
+&set_label("xts_dec_two",16);
+	&movaps	($inout4,$tweak);		# put aside last tweak
+
+	&movups	($inout0,&QWP(16*0,$inp));	# load input
+	&movups	($inout1,&QWP(16*1,$inp));
+	&lea	($inp,&DWP(16*2,$inp));
+	&xorps	($inout0,$inout3);		# input^=tweak
+	&xorps	($inout1,$inout4);
+
+	&call	("_aesni_decrypt2");
+
+	&xorps	($inout0,$inout3);		# output^=tweak
+	&xorps	($inout1,$inout4);
+	&movups	(&QWP(16*0,$out),$inout0);	# write output
+	&movups	(&QWP(16*1,$out),$inout1);
+	&lea	($out,&DWP(16*2,$out));
+
+	&movdqa	($tweak,$inout4);		# last tweak
+	&jmp	(&label("xts_dec_done"));
+
+&set_label("xts_dec_three",16);
+	&movaps	($inout5,$tweak);		# put aside last tweak
+	&movups	($inout0,&QWP(16*0,$inp));	# load input
+	&movups	($inout1,&QWP(16*1,$inp));
+	&movups	($inout2,&QWP(16*2,$inp));
+	&lea	($inp,&DWP(16*3,$inp));
+	&xorps	($inout0,$inout3);		# input^=tweak
+	&xorps	($inout1,$inout4);
+	&xorps	($inout2,$inout5);
+
+	&call	("_aesni_decrypt3");
+
+	&xorps	($inout0,$inout3);		# output^=tweak
+	&xorps	($inout1,$inout4);
+	&xorps	($inout2,$inout5);
+	&movups	(&QWP(16*0,$out),$inout0);	# write output
+	&movups	(&QWP(16*1,$out),$inout1);
+	&movups	(&QWP(16*2,$out),$inout2);
+	&lea	($out,&DWP(16*3,$out));
+
+	&movdqa	($tweak,$inout5);		# last tweak
+	&jmp	(&label("xts_dec_done"));
+
+&set_label("xts_dec_four",16);
+	&movaps	($inout4,$tweak);		# put aside last tweak
+
+	&movups	($inout0,&QWP(16*0,$inp));	# load input
+	&movups	($inout1,&QWP(16*1,$inp));
+	&movups	($inout2,&QWP(16*2,$inp));
+	&xorps	($inout0,&QWP(16*0,"esp"));	# input^=tweak
+	&movups	($inout3,&QWP(16*3,$inp));
+	&lea	($inp,&DWP(16*4,$inp));
+	&xorps	($inout1,&QWP(16*1,"esp"));
+	&xorps	($inout2,$inout5);
+	&xorps	($inout3,$inout4);
+
+	&call	("_aesni_decrypt4");
+
+	&xorps	($inout0,&QWP(16*0,"esp"));	# output^=tweak
+	&xorps	($inout1,&QWP(16*1,"esp"));
+	&xorps	($inout2,$inout5);
+	&movups	(&QWP(16*0,$out),$inout0);	# write output
+	&xorps	($inout3,$inout4);
+	&movups	(&QWP(16*1,$out),$inout1);
+	&movups	(&QWP(16*2,$out),$inout2);
+	&movups	(&QWP(16*3,$out),$inout3);
+	&lea	($out,&DWP(16*4,$out));
+
+	&movdqa	($tweak,$inout4);		# last tweak
+	&jmp	(&label("xts_dec_done"));
+
+&set_label("xts_dec_done6x",16);		# $tweak is pre-calculated
+	&mov	($len,&DWP(16*7+0,"esp"));	# restore original $len
+	&and	($len,15);
+	&jz	(&label("xts_dec_ret"));
+	&mov	(&DWP(16*7+0,"esp"),$len);	# save $len%16
+	&jmp	(&label("xts_dec_only_one_more"));
+
+&set_label("xts_dec_done",16);
+	&mov	($len,&DWP(16*7+0,"esp"));	# restore original $len
+	&pxor	($twtmp,$twtmp);
+	&and	($len,15);
+	&jz	(&label("xts_dec_ret"));
+
+	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
+	&mov	(&DWP(16*7+0,"esp"),$len);	# save $len%16
+	&pshufd	($twres,$twtmp,0x13);
+	&pxor	($twtmp,$twtmp);
+	&movdqa	($twmask,&QWP(16*6,"esp"));
+	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
+	&pand	($twres,$twmask);		# isolate carry and residue
+	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
+	&pxor	($tweak,$twres);
+
+&set_label("xts_dec_only_one_more");
+	&pshufd	($inout3,$twtmp,0x13);
+	&movdqa	($inout4,$tweak);		# put aside previous tweak
+	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
+	&pand	($inout3,$twmask);		# isolate carry and residue
+	&pxor	($inout3,$tweak);
+
+	&mov	($key,$key_);			# restore $key
+	&mov	($rounds,$rounds_);		# restore $rounds
+
+	&movups	($inout0,&QWP(0,$inp));		# load input
+	&xorps	($inout0,$inout3);		# input^=tweak
+	if ($inline)
+	{   &aesni_inline_generate1("dec");	}
+	else
+	{   &call	("_aesni_decrypt1");	}
+	&xorps	($inout0,$inout3);		# output^=tweak
+	&movups	(&QWP(0,$out),$inout0);		# write output
+
+&set_label("xts_dec_steal");
+	&movz	($rounds,&BP(16,$inp));
+	&movz	($key,&BP(0,$out));
+	&lea	($inp,&DWP(1,$inp));
+	&mov	(&BP(0,$out),&LB($rounds));
+	&mov	(&BP(16,$out),&LB($key));
+	&lea	($out,&DWP(1,$out));
+	&sub	($len,1);
+	&jnz	(&label("xts_dec_steal"));
+
+	&sub	($out,&DWP(16*7+0,"esp"));	# rewind $out
+	&mov	($key,$key_);			# restore $key
+	&mov	($rounds,$rounds_);		# restore $rounds
+
+	&movups	($inout0,&QWP(0,$out));		# load input
+	&xorps	($inout0,$inout4);		# input^=tweak
+	if ($inline)
+	{   &aesni_inline_generate1("dec");	}
+	else
+	{   &call	("_aesni_decrypt1");	}
+	&xorps	($inout0,$inout4);		# output^=tweak
+	&movups	(&QWP(0,$out),$inout0);		# write output
+
+&set_label("xts_dec_ret");
+	&pxor	("xmm0","xmm0");		# clear register bank
+	&pxor	("xmm1","xmm1");
+	&pxor	("xmm2","xmm2");
+	&movdqa	(&QWP(16*0,"esp"),"xmm0");	# clear stack
+	&pxor	("xmm3","xmm3");
+	&movdqa	(&QWP(16*1,"esp"),"xmm0");
+	&pxor	("xmm4","xmm4");
+	&movdqa	(&QWP(16*2,"esp"),"xmm0");
+	&pxor	("xmm5","xmm5");
+	&movdqa	(&QWP(16*3,"esp"),"xmm0");
+	&pxor	("xmm6","xmm6");
+	&movdqa	(&QWP(16*4,"esp"),"xmm0");
+	&pxor	("xmm7","xmm7");
+	&movdqa	(&QWP(16*5,"esp"),"xmm0");
+	&mov	("esp",&DWP(16*7+4,"esp"));	# restore %esp
+&function_end("aesni_xts_decrypt");
+}
+
+######################################################################
+# void aesni_ocb_[en|de]crypt(const char *inp, char *out, size_t blocks,
+#	const AES_KEY *key, unsigned int start_block_num,
+#	unsigned char offset_i[16], const unsigned char L_[][16],
+#	unsigned char checksum[16]);
+#
+{
+# offsets within stack frame
+my $checksum = 16*6;
+my ($key_off,$rounds_off,$out_off,$end_off,$esp_off)=map(16*7+4*$_,(0..4));
+
+# reassigned registers
+my ($l_,$block,$i1,$i3,$i5) = ($rounds_,$key_,$rounds,$len,$out);
+# $l_, $blocks, $inp, $key are permanently allocated in registers;
+# remaining non-volatile ones are offloaded to stack, which even
+# stay invariant after written to stack.
+
+&function_begin("aesni_ocb_encrypt");
+	&mov	($rounds,&wparam(5));		# &offset_i
+	&mov	($rounds_,&wparam(7));		# &checksum
+
+	&mov	($inp,&wparam(0));
+	&mov	($out,&wparam(1));
+	&mov	($len,&wparam(2));
+	&mov	($key,&wparam(3));
+	&movdqu	($rndkey0,&QWP(0,$rounds));	# load offset_i
+	&mov	($block,&wparam(4));		# start_block_num
+	&movdqu	($rndkey1,&QWP(0,$rounds_));	# load checksum
+	&mov	($l_,&wparam(6));		# L_
+
+	&mov	($rounds,"esp");
+	&sub	("esp",$esp_off+4);		# alloca
+	&and	("esp",-16);			# align stack
+
+	&sub	($out,$inp);
+	&shl	($len,4);
+	&lea	($len,&DWP(-16*6,$inp,$len));	# end of input - 16*6
+	&mov	(&DWP($out_off,"esp"),$out);
+	&mov	(&DWP($end_off,"esp"),$len);
+	&mov	(&DWP($esp_off,"esp"),$rounds);
+
+	&mov	($rounds,&DWP(240,$key));
+
+	&test	($block,1);
+	&jnz	(&label("odd"));
+
+	&bsf		($i3,$block);
+	&add		($block,1);
+	&shl		($i3,4);
+	&movdqu		($inout5,&QWP(0,$l_,$i3));
+	&mov		($i3,$key);			# put aside key
+
+	&movdqu		($inout0,&QWP(16*0,$inp));	# load input
+	&lea		($inp,&DWP(16,$inp));
+
+	&pxor		($inout5,$rndkey0);		# ^ last offset_i
+	&pxor		($rndkey1,$inout0);		# checksum
+	&pxor		($inout0,$inout5);		# ^ offset_i
+
+	&movdqa		($inout4,$rndkey1);
+	if ($inline)
+	{   &aesni_inline_generate1("enc");	}
+	else
+	{   &call	("_aesni_encrypt1");	}
+
+	&xorps		($inout0,$inout5);		# ^ offset_i
+	&movdqa		($rndkey0,$inout5);		# pass last offset_i
+	&movdqa		($rndkey1,$inout4);		# pass the checksum
+
+	&movups		(&QWP(-16,$out,$inp),$inout0);	# store output
+
+	&mov		($rounds,&DWP(240,$i3));
+	&mov		($key,$i3);			# restore key
+	&mov		($len,&DWP($end_off,"esp"));
+
+&set_label("odd");
+	&shl		($rounds,4);
+	&mov		($out,16);
+	&sub		($out,$rounds);			# twisted rounds
+	&mov		(&DWP($key_off,"esp"),$key);
+	&lea		($key,&DWP(32,$key,$rounds));	# end of key schedule
+	&mov		(&DWP($rounds_off,"esp"),$out);
+
+	&cmp		($inp,$len);
+	&ja		(&label("short"));
+	&jmp		(&label("grandloop"));
+
+&set_label("grandloop",32);
+	&lea		($i1,&DWP(1,$block));
+	&lea		($i3,&DWP(3,$block));
+	&lea		($i5,&DWP(5,$block));
+	&add		($block,6);
+	&bsf		($i1,$i1);
+	&bsf		($i3,$i3);
+	&bsf		($i5,$i5);
+	&shl		($i1,4);
+	&shl		($i3,4);
+	&shl		($i5,4);
+	&movdqu		($inout0,&QWP(0,$l_));
+	&movdqu		($inout1,&QWP(0,$l_,$i1));
+	&mov		($rounds,&DWP($rounds_off,"esp"));
+	&movdqa		($inout2,$inout0);
+	&movdqu		($inout3,&QWP(0,$l_,$i3));
+	&movdqa		($inout4,$inout0);
+	&movdqu		($inout5,&QWP(0,$l_,$i5));
+
+	&pxor		($inout0,$rndkey0);		# ^ last offset_i
+	&pxor		($inout1,$inout0);
+	&movdqa		(&QWP(16*0,"esp"),$inout0);
+	&pxor		($inout2,$inout1);
+	&movdqa		(&QWP(16*1,"esp"),$inout1);
+	&pxor		($inout3,$inout2);
+	&movdqa		(&QWP(16*2,"esp"),$inout2);
+	&pxor		($inout4,$inout3);
+	&movdqa		(&QWP(16*3,"esp"),$inout3);
+	&pxor		($inout5,$inout4);
+	&movdqa		(&QWP(16*4,"esp"),$inout4);
+	&movdqa		(&QWP(16*5,"esp"),$inout5);
+
+	&$movekey	($rndkey0,&QWP(-48,$key,$rounds));
+	&movdqu		($inout0,&QWP(16*0,$inp));	# load input
+	&movdqu		($inout1,&QWP(16*1,$inp));
+	&movdqu		($inout2,&QWP(16*2,$inp));
+	&movdqu		($inout3,&QWP(16*3,$inp));
+	&movdqu		($inout4,&QWP(16*4,$inp));
+	&movdqu		($inout5,&QWP(16*5,$inp));
+	&lea		($inp,&DWP(16*6,$inp));
+
+	&pxor		($rndkey1,$inout0);		# checksum
+	&pxor		($inout0,$rndkey0);		# ^ roundkey[0]
+	&pxor		($rndkey1,$inout1);
+	&pxor		($inout1,$rndkey0);
+	&pxor		($rndkey1,$inout2);
+	&pxor		($inout2,$rndkey0);
+	&pxor		($rndkey1,$inout3);
+	&pxor		($inout3,$rndkey0);
+	&pxor		($rndkey1,$inout4);
+	&pxor		($inout4,$rndkey0);
+	&pxor		($rndkey1,$inout5);
+	&pxor		($inout5,$rndkey0);
+	&movdqa		(&QWP($checksum,"esp"),$rndkey1);
+
+	&$movekey	($rndkey1,&QWP(-32,$key,$rounds));
+	&pxor		($inout0,&QWP(16*0,"esp"));	# ^ offset_i
+	&pxor		($inout1,&QWP(16*1,"esp"));
+	&pxor		($inout2,&QWP(16*2,"esp"));
+	&pxor		($inout3,&QWP(16*3,"esp"));
+	&pxor		($inout4,&QWP(16*4,"esp"));
+	&pxor		($inout5,&QWP(16*5,"esp"));
+
+	&$movekey	($rndkey0,&QWP(-16,$key,$rounds));
+	&aesenc		($inout0,$rndkey1);
+	&aesenc		($inout1,$rndkey1);
+	&aesenc		($inout2,$rndkey1);
+	&aesenc		($inout3,$rndkey1);
+	&aesenc		($inout4,$rndkey1);
+	&aesenc		($inout5,$rndkey1);
+
+	&mov		($out,&DWP($out_off,"esp"));
+	&mov		($len,&DWP($end_off,"esp"));
+	&call		("_aesni_encrypt6_enter");
+
+	&movdqa		($rndkey0,&QWP(16*5,"esp"));	# pass last offset_i
+	&pxor		($inout0,&QWP(16*0,"esp"));	# ^ offset_i
+	&pxor		($inout1,&QWP(16*1,"esp"));
+	&pxor		($inout2,&QWP(16*2,"esp"));
+	&pxor		($inout3,&QWP(16*3,"esp"));
+	&pxor		($inout4,&QWP(16*4,"esp"));
+	&pxor		($inout5,$rndkey0);
+	&movdqa		($rndkey1,&QWP($checksum,"esp"));# pass the checksum
+
+	&movdqu		(&QWP(-16*6,$out,$inp),$inout0);# store output
+	&movdqu		(&QWP(-16*5,$out,$inp),$inout1);
+	&movdqu		(&QWP(-16*4,$out,$inp),$inout2);
+	&movdqu		(&QWP(-16*3,$out,$inp),$inout3);
+	&movdqu		(&QWP(-16*2,$out,$inp),$inout4);
+	&movdqu		(&QWP(-16*1,$out,$inp),$inout5);
+	&cmp		($inp,$len);			# done yet?
+	&jb		(&label("grandloop"));
+
+&set_label("short");
+	&add		($len,16*6);
+	&sub		($len,$inp);
+	&jz		(&label("done"));
+
+	&cmp		($len,16*2);
+	&jb		(&label("one"));
+	&je		(&label("two"));
+
+	&cmp		($len,16*4);
+	&jb		(&label("three"));
+	&je		(&label("four"));
+
+	&lea		($i1,&DWP(1,$block));
+	&lea		($i3,&DWP(3,$block));
+	&bsf		($i1,$i1);
+	&bsf		($i3,$i3);
+	&shl		($i1,4);
+	&shl		($i3,4);
+	&movdqu		($inout0,&QWP(0,$l_));
+	&movdqu		($inout1,&QWP(0,$l_,$i1));
+	&mov		($rounds,&DWP($rounds_off,"esp"));
+	&movdqa		($inout2,$inout0);
+	&movdqu		($inout3,&QWP(0,$l_,$i3));
+	&movdqa		($inout4,$inout0);
+
+	&pxor		($inout0,$rndkey0);		# ^ last offset_i
+	&pxor		($inout1,$inout0);
+	&movdqa		(&QWP(16*0,"esp"),$inout0);
+	&pxor		($inout2,$inout1);
+	&movdqa		(&QWP(16*1,"esp"),$inout1);
+	&pxor		($inout3,$inout2);
+	&movdqa		(&QWP(16*2,"esp"),$inout2);
+	&pxor		($inout4,$inout3);
+	&movdqa		(&QWP(16*3,"esp"),$inout3);
+	&pxor		($inout5,$inout4);
+	&movdqa		(&QWP(16*4,"esp"),$inout4);
+
+	&$movekey	($rndkey0,&QWP(-48,$key,$rounds));
+	&movdqu		($inout0,&QWP(16*0,$inp));	# load input
+	&movdqu		($inout1,&QWP(16*1,$inp));
+	&movdqu		($inout2,&QWP(16*2,$inp));
+	&movdqu		($inout3,&QWP(16*3,$inp));
+	&movdqu		($inout4,&QWP(16*4,$inp));
+	&pxor		($inout5,$inout5);
+
+	&pxor		($rndkey1,$inout0);		# checksum
+	&pxor		($inout0,$rndkey0);		# ^ roundkey[0]
+	&pxor		($rndkey1,$inout1);
+	&pxor		($inout1,$rndkey0);
+	&pxor		($rndkey1,$inout2);
+	&pxor		($inout2,$rndkey0);
+	&pxor		($rndkey1,$inout3);
+	&pxor		($inout3,$rndkey0);
+	&pxor		($rndkey1,$inout4);
+	&pxor		($inout4,$rndkey0);
+	&movdqa		(&QWP($checksum,"esp"),$rndkey1);
+
+	&$movekey	($rndkey1,&QWP(-32,$key,$rounds));
+	&pxor		($inout0,&QWP(16*0,"esp"));	# ^ offset_i
+	&pxor		($inout1,&QWP(16*1,"esp"));
+	&pxor		($inout2,&QWP(16*2,"esp"));
+	&pxor		($inout3,&QWP(16*3,"esp"));
+	&pxor		($inout4,&QWP(16*4,"esp"));
+
+	&$movekey	($rndkey0,&QWP(-16,$key,$rounds));
+	&aesenc		($inout0,$rndkey1);
+	&aesenc		($inout1,$rndkey1);
+	&aesenc		($inout2,$rndkey1);
+	&aesenc		($inout3,$rndkey1);
+	&aesenc		($inout4,$rndkey1);
+	&aesenc		($inout5,$rndkey1);
+
+	&mov		($out,&DWP($out_off,"esp"));
+	&call		("_aesni_encrypt6_enter");
+
+	&movdqa		($rndkey0,&QWP(16*4,"esp"));	# pass last offset_i
+	&pxor		($inout0,&QWP(16*0,"esp"));	# ^ offset_i
+	&pxor		($inout1,&QWP(16*1,"esp"));
+	&pxor		($inout2,&QWP(16*2,"esp"));
+	&pxor		($inout3,&QWP(16*3,"esp"));
+	&pxor		($inout4,$rndkey0);
+	&movdqa		($rndkey1,&QWP($checksum,"esp"));# pass the checksum
+
+	&movdqu		(&QWP(16*0,$out,$inp),$inout0);	# store output
+	&movdqu		(&QWP(16*1,$out,$inp),$inout1);
+	&movdqu		(&QWP(16*2,$out,$inp),$inout2);
+	&movdqu		(&QWP(16*3,$out,$inp),$inout3);
+	&movdqu		(&QWP(16*4,$out,$inp),$inout4);
+
+	&jmp		(&label("done"));
+
+&set_label("one",16);
+	&movdqu		($inout5,&QWP(0,$l_));
+	&mov		($key,&DWP($key_off,"esp"));	# restore key
+
+	&movdqu		($inout0,&QWP(16*0,$inp));	# load input
+	&mov		($rounds,&DWP(240,$key));
+
+	&pxor		($inout5,$rndkey0);		# ^ last offset_i
+	&pxor		($rndkey1,$inout0);		# checksum
+	&pxor		($inout0,$inout5);		# ^ offset_i
+
+	&movdqa		($inout4,$rndkey1);
+	&mov		($out,&DWP($out_off,"esp"));
+	if ($inline)
+	{   &aesni_inline_generate1("enc");	}
+	else
+	{   &call	("_aesni_encrypt1");	}
+
+	&xorps		($inout0,$inout5);		# ^ offset_i
+	&movdqa		($rndkey0,$inout5);		# pass last offset_i
+	&movdqa		($rndkey1,$inout4);		# pass the checksum
+	&movups		(&QWP(0,$out,$inp),$inout0);
+
+	&jmp		(&label("done"));
+
+&set_label("two",16);
+	&lea		($i1,&DWP(1,$block));
+	&mov		($key,&DWP($key_off,"esp"));	# restore key
+	&bsf		($i1,$i1);
+	&shl		($i1,4);
+	&movdqu		($inout4,&QWP(0,$l_));
+	&movdqu		($inout5,&QWP(0,$l_,$i1));
+
+	&movdqu		($inout0,&QWP(16*0,$inp));	# load input
+	&movdqu		($inout1,&QWP(16*1,$inp));
+	&mov		($rounds,&DWP(240,$key));
+
+	&pxor		($inout4,$rndkey0);		# ^ last offset_i
+	&pxor		($inout5,$inout4);
+
+	&pxor		($rndkey1,$inout0);		# checksum
+	&pxor		($inout0,$inout4);		# ^ offset_i
+	&pxor		($rndkey1,$inout1);
+	&pxor		($inout1,$inout5);
+
+	&movdqa		($inout3,$rndkey1)
+	&mov		($out,&DWP($out_off,"esp"));
+	&call		("_aesni_encrypt2");
+
+	&xorps		($inout0,$inout4);		# ^ offset_i
+	&xorps		($inout1,$inout5);
+	&movdqa		($rndkey0,$inout5);		# pass last offset_i
+	&movdqa		($rndkey1,$inout3);		# pass the checksum
+	&movups		(&QWP(16*0,$out,$inp),$inout0);	# store output
+	&movups		(&QWP(16*1,$out,$inp),$inout1);
+
+	&jmp		(&label("done"));
+
+&set_label("three",16);
+	&lea		($i1,&DWP(1,$block));
+	&mov		($key,&DWP($key_off,"esp"));	# restore key
+	&bsf		($i1,$i1);
+	&shl		($i1,4);
+	&movdqu		($inout3,&QWP(0,$l_));
+	&movdqu		($inout4,&QWP(0,$l_,$i1));
+	&movdqa		($inout5,$inout3);
+
+	&movdqu		($inout0,&QWP(16*0,$inp));	# load input
+	&movdqu		($inout1,&QWP(16*1,$inp));
+	&movdqu		($inout2,&QWP(16*2,$inp));
+	&mov		($rounds,&DWP(240,$key));
+
+	&pxor		($inout3,$rndkey0);		# ^ last offset_i
+	&pxor		($inout4,$inout3);
+	&pxor		($inout5,$inout4);
+
+	&pxor		($rndkey1,$inout0);		# checksum
+	&pxor		($inout0,$inout3);		# ^ offset_i
+	&pxor		($rndkey1,$inout1);
+	&pxor		($inout1,$inout4);
+	&pxor		($rndkey1,$inout2);
+	&pxor		($inout2,$inout5);
+
+	&movdqa		(&QWP($checksum,"esp"),$rndkey1);
+	&mov		($out,&DWP($out_off,"esp"));
+	&call		("_aesni_encrypt3");
+
+	&xorps		($inout0,$inout3);		# ^ offset_i
+	&xorps		($inout1,$inout4);
+	&xorps		($inout2,$inout5);
+	&movdqa		($rndkey0,$inout5);		# pass last offset_i
+	&movdqa		($rndkey1,&QWP($checksum,"esp"));# pass the checksum
+	&movups		(&QWP(16*0,$out,$inp),$inout0);	# store output
+	&movups		(&QWP(16*1,$out,$inp),$inout1);
+	&movups		(&QWP(16*2,$out,$inp),$inout2);
+
+	&jmp		(&label("done"));
+
+&set_label("four",16);
+	&lea		($i1,&DWP(1,$block));
+	&lea		($i3,&DWP(3,$block));
+	&bsf		($i1,$i1);
+	&bsf		($i3,$i3);
+	&mov		($key,&DWP($key_off,"esp"));	# restore key
+	&shl		($i1,4);
+	&shl		($i3,4);
+	&movdqu		($inout2,&QWP(0,$l_));
+	&movdqu		($inout3,&QWP(0,$l_,$i1));
+	&movdqa		($inout4,$inout2);
+	&movdqu		($inout5,&QWP(0,$l_,$i3));
+
+	&pxor		($inout2,$rndkey0);		# ^ last offset_i
+	&movdqu		($inout0,&QWP(16*0,$inp));	# load input
+	&pxor		($inout3,$inout2);
+	&movdqu		($inout1,&QWP(16*1,$inp));
+	&pxor		($inout4,$inout3);
+	&movdqa		(&QWP(16*0,"esp"),$inout2);
+	&pxor		($inout5,$inout4);
+	&movdqa		(&QWP(16*1,"esp"),$inout3);
+	&movdqu		($inout2,&QWP(16*2,$inp));
+	&movdqu		($inout3,&QWP(16*3,$inp));
+	&mov		($rounds,&DWP(240,$key));
+
+	&pxor		($rndkey1,$inout0);		# checksum
+	&pxor		($inout0,&QWP(16*0,"esp"));	# ^ offset_i
+	&pxor		($rndkey1,$inout1);
+	&pxor		($inout1,&QWP(16*1,"esp"));
+	&pxor		($rndkey1,$inout2);
+	&pxor		($inout2,$inout4);
+	&pxor		($rndkey1,$inout3);
+	&pxor		($inout3,$inout5);
+
+	&movdqa		(&QWP($checksum,"esp"),$rndkey1)
+	&mov		($out,&DWP($out_off,"esp"));
+	&call		("_aesni_encrypt4");
+
+	&xorps		($inout0,&QWP(16*0,"esp"));	# ^ offset_i
+	&xorps		($inout1,&QWP(16*1,"esp"));
+	&xorps		($inout2,$inout4);
+	&movups		(&QWP(16*0,$out,$inp),$inout0);	# store output
+	&xorps		($inout3,$inout5);
+	&movups		(&QWP(16*1,$out,$inp),$inout1);
+	&movdqa		($rndkey0,$inout5);		# pass last offset_i
+	&movups		(&QWP(16*2,$out,$inp),$inout2);
+	&movdqa		($rndkey1,&QWP($checksum,"esp"));# pass the checksum
+	&movups		(&QWP(16*3,$out,$inp),$inout3);
+
+&set_label("done");
+	&mov	($key,&DWP($esp_off,"esp"));
+	&pxor	($inout0,$inout0);		# clear register bank
+	&pxor	($inout1,$inout1);
+	&movdqa	(&QWP(16*0,"esp"),$inout0);	# clear stack
+	&pxor	($inout2,$inout2);
+	&movdqa	(&QWP(16*1,"esp"),$inout0);
+	&pxor	($inout3,$inout3);
+	&movdqa	(&QWP(16*2,"esp"),$inout0);
+	&pxor	($inout4,$inout4);
+	&movdqa	(&QWP(16*3,"esp"),$inout0);
+	&pxor	($inout5,$inout5);
+	&movdqa	(&QWP(16*4,"esp"),$inout0);
+	&movdqa	(&QWP(16*5,"esp"),$inout0);
+	&movdqa	(&QWP(16*6,"esp"),$inout0);
+
+	&lea	("esp",&DWP(0,$key));
+	&mov	($rounds,&wparam(5));		# &offset_i
+	&mov	($rounds_,&wparam(7));		# &checksum
+	&movdqu	(&QWP(0,$rounds),$rndkey0);
+	&pxor	($rndkey0,$rndkey0);
+	&movdqu	(&QWP(0,$rounds_),$rndkey1);
+	&pxor	($rndkey1,$rndkey1);
+&function_end("aesni_ocb_encrypt");
+
+&function_begin("aesni_ocb_decrypt");
+	&mov	($rounds,&wparam(5));		# &offset_i
+	&mov	($rounds_,&wparam(7));		# &checksum
+
+	&mov	($inp,&wparam(0));
+	&mov	($out,&wparam(1));
+	&mov	($len,&wparam(2));
+	&mov	($key,&wparam(3));
+	&movdqu	($rndkey0,&QWP(0,$rounds));	# load offset_i
+	&mov	($block,&wparam(4));		# start_block_num
+	&movdqu	($rndkey1,&QWP(0,$rounds_));	# load checksum
+	&mov	($l_,&wparam(6));		# L_
+
+	&mov	($rounds,"esp");
+	&sub	("esp",$esp_off+4);		# alloca
+	&and	("esp",-16);			# align stack
+
+	&sub	($out,$inp);
+	&shl	($len,4);
+	&lea	($len,&DWP(-16*6,$inp,$len));	# end of input - 16*6
+	&mov	(&DWP($out_off,"esp"),$out);
+	&mov	(&DWP($end_off,"esp"),$len);
+	&mov	(&DWP($esp_off,"esp"),$rounds);
+
+	&mov	($rounds,&DWP(240,$key));
+
+	&test	($block,1);
+	&jnz	(&label("odd"));
+
+	&bsf		($i3,$block);
+	&add		($block,1);
+	&shl		($i3,4);
+	&movdqu		($inout5,&QWP(0,$l_,$i3));
+	&mov		($i3,$key);			# put aside key
+
+	&movdqu		($inout0,&QWP(16*0,$inp));	# load input
+	&lea		($inp,&DWP(16,$inp));
+
+	&pxor		($inout5,$rndkey0);		# ^ last offset_i
+	&pxor		($inout0,$inout5);		# ^ offset_i
+
+	&movdqa		($inout4,$rndkey1);
+	if ($inline)
+	{   &aesni_inline_generate1("dec");	}
+	else
+	{   &call	("_aesni_decrypt1");	}
+
+	&xorps		($inout0,$inout5);		# ^ offset_i
+	&movaps		($rndkey1,$inout4);		# pass the checksum
+	&movdqa		($rndkey0,$inout5);		# pass last offset_i
+	&xorps		($rndkey1,$inout0);		# checksum
+	&movups		(&QWP(-16,$out,$inp),$inout0);	# store output
+
+	&mov		($rounds,&DWP(240,$i3));
+	&mov		($key,$i3);			# restore key
+	&mov		($len,&DWP($end_off,"esp"));
+
+&set_label("odd");
+	&shl		($rounds,4);
+	&mov		($out,16);
+	&sub		($out,$rounds);			# twisted rounds
+	&mov		(&DWP($key_off,"esp"),$key);
+	&lea		($key,&DWP(32,$key,$rounds));	# end of key schedule
+	&mov		(&DWP($rounds_off,"esp"),$out);
+
+	&cmp		($inp,$len);
+	&ja		(&label("short"));
+	&jmp		(&label("grandloop"));
+
+&set_label("grandloop",32);
+	&lea		($i1,&DWP(1,$block));
+	&lea		($i3,&DWP(3,$block));
+	&lea		($i5,&DWP(5,$block));
+	&add		($block,6);
+	&bsf		($i1,$i1);
+	&bsf		($i3,$i3);
+	&bsf		($i5,$i5);
+	&shl		($i1,4);
+	&shl		($i3,4);
+	&shl		($i5,4);
+	&movdqu		($inout0,&QWP(0,$l_));
+	&movdqu		($inout1,&QWP(0,$l_,$i1));
+	&mov		($rounds,&DWP($rounds_off,"esp"));
+	&movdqa		($inout2,$inout0);
+	&movdqu		($inout3,&QWP(0,$l_,$i3));
+	&movdqa		($inout4,$inout0);
+	&movdqu		($inout5,&QWP(0,$l_,$i5));
+
+	&pxor		($inout0,$rndkey0);		# ^ last offset_i
+	&pxor		($inout1,$inout0);
+	&movdqa		(&QWP(16*0,"esp"),$inout0);
+	&pxor		($inout2,$inout1);
+	&movdqa		(&QWP(16*1,"esp"),$inout1);
+	&pxor		($inout3,$inout2);
+	&movdqa		(&QWP(16*2,"esp"),$inout2);
+	&pxor		($inout4,$inout3);
+	&movdqa		(&QWP(16*3,"esp"),$inout3);
+	&pxor		($inout5,$inout4);
+	&movdqa		(&QWP(16*4,"esp"),$inout4);
+	&movdqa		(&QWP(16*5,"esp"),$inout5);
+
+	&$movekey	($rndkey0,&QWP(-48,$key,$rounds));
+	&movdqu		($inout0,&QWP(16*0,$inp));	# load input
+	&movdqu		($inout1,&QWP(16*1,$inp));
+	&movdqu		($inout2,&QWP(16*2,$inp));
+	&movdqu		($inout3,&QWP(16*3,$inp));
+	&movdqu		($inout4,&QWP(16*4,$inp));
+	&movdqu		($inout5,&QWP(16*5,$inp));
+	&lea		($inp,&DWP(16*6,$inp));
+
+	&movdqa		(&QWP($checksum,"esp"),$rndkey1);
+	&pxor		($inout0,$rndkey0);		# ^ roundkey[0]
+	&pxor		($inout1,$rndkey0);
+	&pxor		($inout2,$rndkey0);
+	&pxor		($inout3,$rndkey0);
+	&pxor		($inout4,$rndkey0);
+	&pxor		($inout5,$rndkey0);
+
+	&$movekey	($rndkey1,&QWP(-32,$key,$rounds));
+	&pxor		($inout0,&QWP(16*0,"esp"));	# ^ offset_i
+	&pxor		($inout1,&QWP(16*1,"esp"));
+	&pxor		($inout2,&QWP(16*2,"esp"));
+	&pxor		($inout3,&QWP(16*3,"esp"));
+	&pxor		($inout4,&QWP(16*4,"esp"));
+	&pxor		($inout5,&QWP(16*5,"esp"));
+
+	&$movekey	($rndkey0,&QWP(-16,$key,$rounds));
+	&aesdec		($inout0,$rndkey1);
+	&aesdec		($inout1,$rndkey1);
+	&aesdec		($inout2,$rndkey1);
+	&aesdec		($inout3,$rndkey1);
+	&aesdec		($inout4,$rndkey1);
+	&aesdec		($inout5,$rndkey1);
+
+	&mov		($out,&DWP($out_off,"esp"));
+	&mov		($len,&DWP($end_off,"esp"));
+	&call		("_aesni_decrypt6_enter");
+
+	&movdqa		($rndkey0,&QWP(16*5,"esp"));	# pass last offset_i
+	&pxor		($inout0,&QWP(16*0,"esp"));	# ^ offset_i
+	&movdqa		($rndkey1,&QWP($checksum,"esp"));
+	&pxor		($inout1,&QWP(16*1,"esp"));
+	&pxor		($inout2,&QWP(16*2,"esp"));
+	&pxor		($inout3,&QWP(16*3,"esp"));
+	&pxor		($inout4,&QWP(16*4,"esp"));
+	&pxor		($inout5,$rndkey0);
+
+	&pxor		($rndkey1,$inout0);		# checksum
+	&movdqu		(&QWP(-16*6,$out,$inp),$inout0);# store output
+	&pxor		($rndkey1,$inout1);
+	&movdqu		(&QWP(-16*5,$out,$inp),$inout1);
+	&pxor		($rndkey1,$inout2);
+	&movdqu		(&QWP(-16*4,$out,$inp),$inout2);
+	&pxor		($rndkey1,$inout3);
+	&movdqu		(&QWP(-16*3,$out,$inp),$inout3);
+	&pxor		($rndkey1,$inout4);
+	&movdqu		(&QWP(-16*2,$out,$inp),$inout4);
+	&pxor		($rndkey1,$inout5);
+	&movdqu		(&QWP(-16*1,$out,$inp),$inout5);
+	&cmp		($inp,$len);			# done yet?
+	&jb		(&label("grandloop"));
+
+&set_label("short");
+	&add		($len,16*6);
+	&sub		($len,$inp);
+	&jz		(&label("done"));
+
+	&cmp		($len,16*2);
+	&jb		(&label("one"));
+	&je		(&label("two"));
+
+	&cmp		($len,16*4);
+	&jb		(&label("three"));
+	&je		(&label("four"));
+
+	&lea		($i1,&DWP(1,$block));
+	&lea		($i3,&DWP(3,$block));
+	&bsf		($i1,$i1);
+	&bsf		($i3,$i3);
+	&shl		($i1,4);
+	&shl		($i3,4);
+	&movdqu		($inout0,&QWP(0,$l_));
+	&movdqu		($inout1,&QWP(0,$l_,$i1));
+	&mov		($rounds,&DWP($rounds_off,"esp"));
+	&movdqa		($inout2,$inout0);
+	&movdqu		($inout3,&QWP(0,$l_,$i3));
+	&movdqa		($inout4,$inout0);
+
+	&pxor		($inout0,$rndkey0);		# ^ last offset_i
+	&pxor		($inout1,$inout0);
+	&movdqa		(&QWP(16*0,"esp"),$inout0);
+	&pxor		($inout2,$inout1);
+	&movdqa		(&QWP(16*1,"esp"),$inout1);
+	&pxor		($inout3,$inout2);
+	&movdqa		(&QWP(16*2,"esp"),$inout2);
+	&pxor		($inout4,$inout3);
+	&movdqa		(&QWP(16*3,"esp"),$inout3);
+	&pxor		($inout5,$inout4);
+	&movdqa		(&QWP(16*4,"esp"),$inout4);
+
+	&$movekey	($rndkey0,&QWP(-48,$key,$rounds));
+	&movdqu		($inout0,&QWP(16*0,$inp));	# load input
+	&movdqu		($inout1,&QWP(16*1,$inp));
+	&movdqu		($inout2,&QWP(16*2,$inp));
+	&movdqu		($inout3,&QWP(16*3,$inp));
+	&movdqu		($inout4,&QWP(16*4,$inp));
+	&pxor		($inout5,$inout5);
+
+	&movdqa		(&QWP($checksum,"esp"),$rndkey1);
+	&pxor		($inout0,$rndkey0);		# ^ roundkey[0]
+	&pxor		($inout1,$rndkey0);
+	&pxor		($inout2,$rndkey0);
+	&pxor		($inout3,$rndkey0);
+	&pxor		($inout4,$rndkey0);
+
+	&$movekey	($rndkey1,&QWP(-32,$key,$rounds));
+	&pxor		($inout0,&QWP(16*0,"esp"));	# ^ offset_i
+	&pxor		($inout1,&QWP(16*1,"esp"));
+	&pxor		($inout2,&QWP(16*2,"esp"));
+	&pxor		($inout3,&QWP(16*3,"esp"));
+	&pxor		($inout4,&QWP(16*4,"esp"));
+
+	&$movekey	($rndkey0,&QWP(-16,$key,$rounds));
+	&aesdec		($inout0,$rndkey1);
+	&aesdec		($inout1,$rndkey1);
+	&aesdec		($inout2,$rndkey1);
+	&aesdec		($inout3,$rndkey1);
+	&aesdec		($inout4,$rndkey1);
+	&aesdec		($inout5,$rndkey1);
+
+	&mov		($out,&DWP($out_off,"esp"));
+	&call		("_aesni_decrypt6_enter");
+
+	&movdqa		($rndkey0,&QWP(16*4,"esp"));	# pass last offset_i
+	&pxor		($inout0,&QWP(16*0,"esp"));	# ^ offset_i
+	&movdqa		($rndkey1,&QWP($checksum,"esp"));
+	&pxor		($inout1,&QWP(16*1,"esp"));
+	&pxor		($inout2,&QWP(16*2,"esp"));
+	&pxor		($inout3,&QWP(16*3,"esp"));
+	&pxor		($inout4,$rndkey0);
+
+	&pxor		($rndkey1,$inout0);		# checksum
+	&movdqu		(&QWP(16*0,$out,$inp),$inout0);	# store output
+	&pxor		($rndkey1,$inout1);
+	&movdqu		(&QWP(16*1,$out,$inp),$inout1);
+	&pxor		($rndkey1,$inout2);
+	&movdqu		(&QWP(16*2,$out,$inp),$inout2);
+	&pxor		($rndkey1,$inout3);
+	&movdqu		(&QWP(16*3,$out,$inp),$inout3);
+	&pxor		($rndkey1,$inout4);
+	&movdqu		(&QWP(16*4,$out,$inp),$inout4);
+
+	&jmp		(&label("done"));
+
+&set_label("one",16);
+	&movdqu		($inout5,&QWP(0,$l_));
+	&mov		($key,&DWP($key_off,"esp"));	# restore key
+
+	&movdqu		($inout0,&QWP(16*0,$inp));	# load input
+	&mov		($rounds,&DWP(240,$key));
+
+	&pxor		($inout5,$rndkey0);		# ^ last offset_i
+	&pxor		($inout0,$inout5);		# ^ offset_i
+
+	&movdqa		($inout4,$rndkey1);
+	&mov		($out,&DWP($out_off,"esp"));
+	if ($inline)
+	{   &aesni_inline_generate1("dec");	}
+	else
+	{   &call	("_aesni_decrypt1");	}
+
+	&xorps		($inout0,$inout5);		# ^ offset_i
+	&movaps		($rndkey1,$inout4);		# pass the checksum
+	&movdqa		($rndkey0,$inout5);		# pass last offset_i
+	&xorps		($rndkey1,$inout0);		# checksum
+	&movups		(&QWP(0,$out,$inp),$inout0);
+
+	&jmp		(&label("done"));
+
+&set_label("two",16);
+	&lea		($i1,&DWP(1,$block));
+	&mov		($key,&DWP($key_off,"esp"));	# restore key
+	&bsf		($i1,$i1);
+	&shl		($i1,4);
+	&movdqu		($inout4,&QWP(0,$l_));
+	&movdqu		($inout5,&QWP(0,$l_,$i1));
+
+	&movdqu		($inout0,&QWP(16*0,$inp));	# load input
+	&movdqu		($inout1,&QWP(16*1,$inp));
+	&mov		($rounds,&DWP(240,$key));
+
+	&movdqa		($inout3,$rndkey1);
+	&pxor		($inout4,$rndkey0);		# ^ last offset_i
+	&pxor		($inout5,$inout4);
+
+	&pxor		($inout0,$inout4);		# ^ offset_i
+	&pxor		($inout1,$inout5);
+
+	&mov		($out,&DWP($out_off,"esp"));
+	&call		("_aesni_decrypt2");
+
+	&xorps		($inout0,$inout4);		# ^ offset_i
+	&xorps		($inout1,$inout5);
+	&movdqa		($rndkey0,$inout5);		# pass last offset_i
+	&xorps		($inout3,$inout0);		# checksum
+	&movups		(&QWP(16*0,$out,$inp),$inout0);	# store output
+	&xorps		($inout3,$inout1);
+	&movups		(&QWP(16*1,$out,$inp),$inout1);
+	&movaps		($rndkey1,$inout3);		# pass the checksum
+
+	&jmp		(&label("done"));
+
+&set_label("three",16);
+	&lea		($i1,&DWP(1,$block));
+	&mov		($key,&DWP($key_off,"esp"));	# restore key
+	&bsf		($i1,$i1);
+	&shl		($i1,4);
+	&movdqu		($inout3,&QWP(0,$l_));
+	&movdqu		($inout4,&QWP(0,$l_,$i1));
+	&movdqa		($inout5,$inout3);
+
+	&movdqu		($inout0,&QWP(16*0,$inp));	# load input
+	&movdqu		($inout1,&QWP(16*1,$inp));
+	&movdqu		($inout2,&QWP(16*2,$inp));
+	&mov		($rounds,&DWP(240,$key));
+
+	&movdqa		(&QWP($checksum,"esp"),$rndkey1);
+	&pxor		($inout3,$rndkey0);		# ^ last offset_i
+	&pxor		($inout4,$inout3);
+	&pxor		($inout5,$inout4);
+
+	&pxor		($inout0,$inout3);		# ^ offset_i
+	&pxor		($inout1,$inout4);
+	&pxor		($inout2,$inout5);
+
+	&mov		($out,&DWP($out_off,"esp"));
+	&call		("_aesni_decrypt3");
+
+	&movdqa		($rndkey1,&QWP($checksum,"esp"));# pass the checksum
+	&xorps		($inout0,$inout3);		# ^ offset_i
+	&xorps		($inout1,$inout4);
+	&xorps		($inout2,$inout5);
+	&movups		(&QWP(16*0,$out,$inp),$inout0);	# store output
+	&pxor		($rndkey1,$inout0);		# checksum
+	&movdqa		($rndkey0,$inout5);		# pass last offset_i
+	&movups		(&QWP(16*1,$out,$inp),$inout1);
+	&pxor		($rndkey1,$inout1);
+	&movups		(&QWP(16*2,$out,$inp),$inout2);
+	&pxor		($rndkey1,$inout2);
+
+	&jmp		(&label("done"));
+
+&set_label("four",16);
+	&lea		($i1,&DWP(1,$block));
+	&lea		($i3,&DWP(3,$block));
+	&bsf		($i1,$i1);
+	&bsf		($i3,$i3);
+	&mov		($key,&DWP($key_off,"esp"));	# restore key
+	&shl		($i1,4);
+	&shl		($i3,4);
+	&movdqu		($inout2,&QWP(0,$l_));
+	&movdqu		($inout3,&QWP(0,$l_,$i1));
+	&movdqa		($inout4,$inout2);
+	&movdqu		($inout5,&QWP(0,$l_,$i3));
+
+	&pxor		($inout2,$rndkey0);		# ^ last offset_i
+	&movdqu		($inout0,&QWP(16*0,$inp));	# load input
+	&pxor		($inout3,$inout2);
+	&movdqu		($inout1,&QWP(16*1,$inp));
+	&pxor		($inout4,$inout3);
+	&movdqa		(&QWP(16*0,"esp"),$inout2);
+	&pxor		($inout5,$inout4);
+	&movdqa		(&QWP(16*1,"esp"),$inout3);
+	&movdqu		($inout2,&QWP(16*2,$inp));
+	&movdqu		($inout3,&QWP(16*3,$inp));
+	&mov		($rounds,&DWP(240,$key));
+
+	&movdqa		(&QWP($checksum,"esp"),$rndkey1);
+	&pxor		($inout0,&QWP(16*0,"esp"));	# ^ offset_i
+	&pxor		($inout1,&QWP(16*1,"esp"));
+	&pxor		($inout2,$inout4);
+	&pxor		($inout3,$inout5);
+
+	&mov		($out,&DWP($out_off,"esp"));
+	&call		("_aesni_decrypt4");
+
+	&movdqa		($rndkey1,&QWP($checksum,"esp"));# pass the checksum
+	&xorps		($inout0,&QWP(16*0,"esp"));	# ^ offset_i
+	&xorps		($inout1,&QWP(16*1,"esp"));
+	&xorps		($inout2,$inout4);
+	&movups		(&QWP(16*0,$out,$inp),$inout0);	# store output
+	&pxor		($rndkey1,$inout0);		# checksum
+	&xorps		($inout3,$inout5);
+	&movups		(&QWP(16*1,$out,$inp),$inout1);
+	&pxor		($rndkey1,$inout1);
+	&movdqa		($rndkey0,$inout5);		# pass last offset_i
+	&movups		(&QWP(16*2,$out,$inp),$inout2);
+	&pxor		($rndkey1,$inout2);
+	&movups		(&QWP(16*3,$out,$inp),$inout3);
+	&pxor		($rndkey1,$inout3);
+
+&set_label("done");
+	&mov	($key,&DWP($esp_off,"esp"));
+	&pxor	($inout0,$inout0);		# clear register bank
+	&pxor	($inout1,$inout1);
+	&movdqa	(&QWP(16*0,"esp"),$inout0);	# clear stack
+	&pxor	($inout2,$inout2);
+	&movdqa	(&QWP(16*1,"esp"),$inout0);
+	&pxor	($inout3,$inout3);
+	&movdqa	(&QWP(16*2,"esp"),$inout0);
+	&pxor	($inout4,$inout4);
+	&movdqa	(&QWP(16*3,"esp"),$inout0);
+	&pxor	($inout5,$inout5);
+	&movdqa	(&QWP(16*4,"esp"),$inout0);
+	&movdqa	(&QWP(16*5,"esp"),$inout0);
+	&movdqa	(&QWP(16*6,"esp"),$inout0);
+
+	&lea	("esp",&DWP(0,$key));
+	&mov	($rounds,&wparam(5));		# &offset_i
+	&mov	($rounds_,&wparam(7));		# &checksum
+	&movdqu	(&QWP(0,$rounds),$rndkey0);
+	&pxor	($rndkey0,$rndkey0);
+	&movdqu	(&QWP(0,$rounds_),$rndkey1);
+	&pxor	($rndkey1,$rndkey1);
+&function_end("aesni_ocb_decrypt");
+}
+}
+
+######################################################################
+# void $PREFIX_cbc_encrypt (const void *inp, void *out,
+#                           size_t length, const AES_KEY *key,
+#                           unsigned char *ivp,const int enc);
+&function_begin("${PREFIX}_cbc_encrypt");
+	&mov	($inp,&wparam(0));
+	&mov	($rounds_,"esp");
+	&mov	($out,&wparam(1));
+	&sub	($rounds_,24);
+	&mov	($len,&wparam(2));
+	&and	($rounds_,-16);
+	&mov	($key,&wparam(3));
+	&mov	($key_,&wparam(4));
+	&test	($len,$len);
+	&jz	(&label("cbc_abort"));
+
+	&cmp	(&wparam(5),0);
+	&xchg	($rounds_,"esp");		# alloca
+	&movups	($ivec,&QWP(0,$key_));		# load IV
+	&mov	($rounds,&DWP(240,$key));
+	&mov	($key_,$key);			# backup $key
+	&mov	(&DWP(16,"esp"),$rounds_);	# save original %esp
+	&mov	($rounds_,$rounds);		# backup $rounds
+	&je	(&label("cbc_decrypt"));
+
+	&movaps	($inout0,$ivec);
+	&cmp	($len,16);
+	&jb	(&label("cbc_enc_tail"));
+	&sub	($len,16);
+	&jmp	(&label("cbc_enc_loop"));
+
+&set_label("cbc_enc_loop",16);
+	&movups	($ivec,&QWP(0,$inp));		# input actually
+	&lea	($inp,&DWP(16,$inp));
+	if ($inline)
+	{   &aesni_inline_generate1("enc",$inout0,$ivec);	}
+	else
+	{   &xorps($inout0,$ivec); &call("_aesni_encrypt1");	}
+	&mov	($rounds,$rounds_);	# restore $rounds
+	&mov	($key,$key_);		# restore $key
+	&movups	(&QWP(0,$out),$inout0);	# store output
+	&lea	($out,&DWP(16,$out));
+	&sub	($len,16);
+	&jnc	(&label("cbc_enc_loop"));
+	&add	($len,16);
+	&jnz	(&label("cbc_enc_tail"));
+	&movaps	($ivec,$inout0);
+	&pxor	($inout0,$inout0);
+	&jmp	(&label("cbc_ret"));
+
+&set_label("cbc_enc_tail");
+	&mov	("ecx",$len);		# zaps $rounds
+	&data_word(0xA4F3F689);		# rep movsb
+	&mov	("ecx",16);		# zero tail
+	&sub	("ecx",$len);
+	&xor	("eax","eax");		# zaps $len
+	&data_word(0xAAF3F689);		# rep stosb
+	&lea	($out,&DWP(-16,$out));	# rewind $out by 1 block
+	&mov	($rounds,$rounds_);	# restore $rounds
+	&mov	($inp,$out);		# $inp and $out are the same
+	&mov	($key,$key_);		# restore $key
+	&jmp	(&label("cbc_enc_loop"));
+######################################################################
+&set_label("cbc_decrypt",16);
+	&cmp	($len,0x50);
+	&jbe	(&label("cbc_dec_tail"));
+	&movaps	(&QWP(0,"esp"),$ivec);		# save IV
+	&sub	($len,0x50);
+	&jmp	(&label("cbc_dec_loop6_enter"));
+
+&set_label("cbc_dec_loop6",16);
+	&movaps	(&QWP(0,"esp"),$rndkey0);	# save IV
+	&movups	(&QWP(0,$out),$inout5);
+	&lea	($out,&DWP(0x10,$out));
+&set_label("cbc_dec_loop6_enter");
+	&movdqu	($inout0,&QWP(0,$inp));
+	&movdqu	($inout1,&QWP(0x10,$inp));
+	&movdqu	($inout2,&QWP(0x20,$inp));
+	&movdqu	($inout3,&QWP(0x30,$inp));
+	&movdqu	($inout4,&QWP(0x40,$inp));
+	&movdqu	($inout5,&QWP(0x50,$inp));
+
+	&call	("_aesni_decrypt6");
+
+	&movups	($rndkey1,&QWP(0,$inp));
+	&movups	($rndkey0,&QWP(0x10,$inp));
+	&xorps	($inout0,&QWP(0,"esp"));	# ^=IV
+	&xorps	($inout1,$rndkey1);
+	&movups	($rndkey1,&QWP(0x20,$inp));
+	&xorps	($inout2,$rndkey0);
+	&movups	($rndkey0,&QWP(0x30,$inp));
+	&xorps	($inout3,$rndkey1);
+	&movups	($rndkey1,&QWP(0x40,$inp));
+	&xorps	($inout4,$rndkey0);
+	&movups	($rndkey0,&QWP(0x50,$inp));	# IV
+	&xorps	($inout5,$rndkey1);
+	&movups	(&QWP(0,$out),$inout0);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&lea	($inp,&DWP(0x60,$inp));
+	&movups	(&QWP(0x20,$out),$inout2);
+	&mov	($rounds,$rounds_);		# restore $rounds
+	&movups	(&QWP(0x30,$out),$inout3);
+	&mov	($key,$key_);			# restore $key
+	&movups	(&QWP(0x40,$out),$inout4);
+	&lea	($out,&DWP(0x50,$out));
+	&sub	($len,0x60);
+	&ja	(&label("cbc_dec_loop6"));
+
+	&movaps	($inout0,$inout5);
+	&movaps	($ivec,$rndkey0);
+	&add	($len,0x50);
+	&jle	(&label("cbc_dec_clear_tail_collected"));
+	&movups	(&QWP(0,$out),$inout0);
+	&lea	($out,&DWP(0x10,$out));
+&set_label("cbc_dec_tail");
+	&movups	($inout0,&QWP(0,$inp));
+	&movaps	($in0,$inout0);
+	&cmp	($len,0x10);
+	&jbe	(&label("cbc_dec_one"));
+
+	&movups	($inout1,&QWP(0x10,$inp));
+	&movaps	($in1,$inout1);
+	&cmp	($len,0x20);
+	&jbe	(&label("cbc_dec_two"));
+
+	&movups	($inout2,&QWP(0x20,$inp));
+	&cmp	($len,0x30);
+	&jbe	(&label("cbc_dec_three"));
+
+	&movups	($inout3,&QWP(0x30,$inp));
+	&cmp	($len,0x40);
+	&jbe	(&label("cbc_dec_four"));
+
+	&movups	($inout4,&QWP(0x40,$inp));
+	&movaps	(&QWP(0,"esp"),$ivec);		# save IV
+	&movups	($inout0,&QWP(0,$inp));
+	&xorps	($inout5,$inout5);
+	&call	("_aesni_decrypt6");
+	&movups	($rndkey1,&QWP(0,$inp));
+	&movups	($rndkey0,&QWP(0x10,$inp));
+	&xorps	($inout0,&QWP(0,"esp"));	# ^= IV
+	&xorps	($inout1,$rndkey1);
+	&movups	($rndkey1,&QWP(0x20,$inp));
+	&xorps	($inout2,$rndkey0);
+	&movups	($rndkey0,&QWP(0x30,$inp));
+	&xorps	($inout3,$rndkey1);
+	&movups	($ivec,&QWP(0x40,$inp));	# IV
+	&xorps	($inout4,$rndkey0);
+	&movups	(&QWP(0,$out),$inout0);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&pxor	($inout1,$inout1);
+	&movups	(&QWP(0x20,$out),$inout2);
+	&pxor	($inout2,$inout2);
+	&movups	(&QWP(0x30,$out),$inout3);
+	&pxor	($inout3,$inout3);
+	&lea	($out,&DWP(0x40,$out));
+	&movaps	($inout0,$inout4);
+	&pxor	($inout4,$inout4);
+	&sub	($len,0x50);
+	&jmp	(&label("cbc_dec_tail_collected"));
+
+&set_label("cbc_dec_one",16);
+	if ($inline)
+	{   &aesni_inline_generate1("dec");	}
+	else
+	{   &call	("_aesni_decrypt1");	}
+	&xorps	($inout0,$ivec);
+	&movaps	($ivec,$in0);
+	&sub	($len,0x10);
+	&jmp	(&label("cbc_dec_tail_collected"));
+
+&set_label("cbc_dec_two",16);
+	&call	("_aesni_decrypt2");
+	&xorps	($inout0,$ivec);
+	&xorps	($inout1,$in0);
+	&movups	(&QWP(0,$out),$inout0);
+	&movaps	($inout0,$inout1);
+	&pxor	($inout1,$inout1);
+	&lea	($out,&DWP(0x10,$out));
+	&movaps	($ivec,$in1);
+	&sub	($len,0x20);
+	&jmp	(&label("cbc_dec_tail_collected"));
+
+&set_label("cbc_dec_three",16);
+	&call	("_aesni_decrypt3");
+	&xorps	($inout0,$ivec);
+	&xorps	($inout1,$in0);
+	&xorps	($inout2,$in1);
+	&movups	(&QWP(0,$out),$inout0);
+	&movaps	($inout0,$inout2);
+	&pxor	($inout2,$inout2);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&pxor	($inout1,$inout1);
+	&lea	($out,&DWP(0x20,$out));
+	&movups	($ivec,&QWP(0x20,$inp));
+	&sub	($len,0x30);
+	&jmp	(&label("cbc_dec_tail_collected"));
+
+&set_label("cbc_dec_four",16);
+	&call	("_aesni_decrypt4");
+	&movups	($rndkey1,&QWP(0x10,$inp));
+	&movups	($rndkey0,&QWP(0x20,$inp));
+	&xorps	($inout0,$ivec);
+	&movups	($ivec,&QWP(0x30,$inp));
+	&xorps	($inout1,$in0);
+	&movups	(&QWP(0,$out),$inout0);
+	&xorps	($inout2,$rndkey1);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&pxor	($inout1,$inout1);
+	&xorps	($inout3,$rndkey0);
+	&movups	(&QWP(0x20,$out),$inout2);
+	&pxor	($inout2,$inout2);
+	&lea	($out,&DWP(0x30,$out));
+	&movaps	($inout0,$inout3);
+	&pxor	($inout3,$inout3);
+	&sub	($len,0x40);
+	&jmp	(&label("cbc_dec_tail_collected"));
+
+&set_label("cbc_dec_clear_tail_collected",16);
+	&pxor	($inout1,$inout1);
+	&pxor	($inout2,$inout2);
+	&pxor	($inout3,$inout3);
+	&pxor	($inout4,$inout4);
+&set_label("cbc_dec_tail_collected");
+	&and	($len,15);
+	&jnz	(&label("cbc_dec_tail_partial"));
+	&movups	(&QWP(0,$out),$inout0);
+	&pxor	($rndkey0,$rndkey0);
+	&jmp	(&label("cbc_ret"));
+
+&set_label("cbc_dec_tail_partial",16);
+	&movaps	(&QWP(0,"esp"),$inout0);
+	&pxor	($rndkey0,$rndkey0);
+	&mov	("ecx",16);
+	&mov	($inp,"esp");
+	&sub	("ecx",$len);
+	&data_word(0xA4F3F689);		# rep movsb
+	&movdqa	(&QWP(0,"esp"),$inout0);
+
+&set_label("cbc_ret");
+	&mov	("esp",&DWP(16,"esp"));	# pull original %esp
+	&mov	($key_,&wparam(4));
+	&pxor	($inout0,$inout0);
+	&pxor	($rndkey1,$rndkey1);
+	&movups	(&QWP(0,$key_),$ivec);	# output IV
+	&pxor	($ivec,$ivec);
+&set_label("cbc_abort");
+&function_end("${PREFIX}_cbc_encrypt");
+
+######################################################################
+# Mechanical port from aesni-x86_64.pl.
+#
+# _aesni_set_encrypt_key is private interface,
+# input:
+#	"eax"	const unsigned char *userKey
+#	$rounds	int bits
+#	$key	AES_KEY *key
+# output:
+#	"eax"	return code
+#	$round	rounds
+
+&function_begin_B("_aesni_set_encrypt_key");
+	&push	("ebp");
+	&push	("ebx");
+	&test	("eax","eax");
+	&jz	(&label("bad_pointer"));
+	&test	($key,$key);
+	&jz	(&label("bad_pointer"));
+
+	&call	(&label("pic"));
+&set_label("pic");
+	&blindpop("ebx");
+	&lea	("ebx",&DWP(&label("key_const")."-".&label("pic"),"ebx"));
+
+	&picmeup("ebp","OPENSSL_ia32cap_P","ebx",&label("key_const"));
+	&movups	("xmm0",&QWP(0,"eax"));	# pull first 128 bits of *userKey
+	&xorps	("xmm4","xmm4");	# low dword of xmm4 is assumed 0
+	&mov	("ebp",&DWP(4,"ebp"));
+	&lea	($key,&DWP(16,$key));
+	&and	("ebp",1<<28|1<<11);	# AVX and XOP bits
+	&cmp	($rounds,256);
+	&je	(&label("14rounds"));
+	&cmp	($rounds,192);
+	&je	(&label("12rounds"));
+	&cmp	($rounds,128);
+	&jne	(&label("bad_keybits"));
+
+&set_label("10rounds",16);
+	&cmp		("ebp",1<<28);
+	&je		(&label("10rounds_alt"));
+
+	&mov		($rounds,9);
+	&$movekey	(&QWP(-16,$key),"xmm0");	# round 0
+	&aeskeygenassist("xmm1","xmm0",0x01);		# round 1
+	&call		(&label("key_128_cold"));
+	&aeskeygenassist("xmm1","xmm0",0x2);		# round 2
+	&call		(&label("key_128"));
+	&aeskeygenassist("xmm1","xmm0",0x04);		# round 3
+	&call		(&label("key_128"));
+	&aeskeygenassist("xmm1","xmm0",0x08);		# round 4
+	&call		(&label("key_128"));
+	&aeskeygenassist("xmm1","xmm0",0x10);		# round 5
+	&call		(&label("key_128"));
+	&aeskeygenassist("xmm1","xmm0",0x20);		# round 6
+	&call		(&label("key_128"));
+	&aeskeygenassist("xmm1","xmm0",0x40);		# round 7
+	&call		(&label("key_128"));
+	&aeskeygenassist("xmm1","xmm0",0x80);		# round 8
+	&call		(&label("key_128"));
+	&aeskeygenassist("xmm1","xmm0",0x1b);		# round 9
+	&call		(&label("key_128"));
+	&aeskeygenassist("xmm1","xmm0",0x36);		# round 10
+	&call		(&label("key_128"));
+	&$movekey	(&QWP(0,$key),"xmm0");
+	&mov		(&DWP(80,$key),$rounds);
+
+	&jmp	(&label("good_key"));
+
+&set_label("key_128",16);
+	&$movekey	(&QWP(0,$key),"xmm0");
+	&lea		($key,&DWP(16,$key));
+&set_label("key_128_cold");
+	&shufps		("xmm4","xmm0",0b00010000);
+	&xorps		("xmm0","xmm4");
+	&shufps		("xmm4","xmm0",0b10001100);
+	&xorps		("xmm0","xmm4");
+	&shufps		("xmm1","xmm1",0b11111111);	# critical path
+	&xorps		("xmm0","xmm1");
+	&ret();
+
+&set_label("10rounds_alt",16);
+	&movdqa		("xmm5",&QWP(0x00,"ebx"));
+	&mov		($rounds,8);
+	&movdqa		("xmm4",&QWP(0x20,"ebx"));
+	&movdqa		("xmm2","xmm0");
+	&movdqu		(&QWP(-16,$key),"xmm0");
+
+&set_label("loop_key128");
+	&pshufb		("xmm0","xmm5");
+	&aesenclast	("xmm0","xmm4");
+	&pslld		("xmm4",1);
+	&lea		($key,&DWP(16,$key));
+
+	&movdqa		("xmm3","xmm2");
+	&pslldq		("xmm2",4);
+	&pxor		("xmm3","xmm2");
+	&pslldq		("xmm2",4);
+	&pxor		("xmm3","xmm2");
+	&pslldq		("xmm2",4);
+	&pxor		("xmm2","xmm3");
+
+	&pxor		("xmm0","xmm2");
+	&movdqu		(&QWP(-16,$key),"xmm0");
+	&movdqa		("xmm2","xmm0");
+
+	&dec		($rounds);
+	&jnz		(&label("loop_key128"));
+
+	&movdqa		("xmm4",&QWP(0x30,"ebx"));
+
+	&pshufb		("xmm0","xmm5");
+	&aesenclast	("xmm0","xmm4");
+	&pslld		("xmm4",1);
+
+	&movdqa		("xmm3","xmm2");
+	&pslldq		("xmm2",4);
+	&pxor		("xmm3","xmm2");
+	&pslldq		("xmm2",4);
+	&pxor		("xmm3","xmm2");
+	&pslldq		("xmm2",4);
+	&pxor		("xmm2","xmm3");
+
+	&pxor		("xmm0","xmm2");
+	&movdqu		(&QWP(0,$key),"xmm0");
+
+	&movdqa		("xmm2","xmm0");
+	&pshufb		("xmm0","xmm5");
+	&aesenclast	("xmm0","xmm4");
+
+	&movdqa		("xmm3","xmm2");
+	&pslldq		("xmm2",4);
+	&pxor		("xmm3","xmm2");
+	&pslldq		("xmm2",4);
+	&pxor		("xmm3","xmm2");
+	&pslldq		("xmm2",4);
+	&pxor		("xmm2","xmm3");
+
+	&pxor		("xmm0","xmm2");
+	&movdqu		(&QWP(16,$key),"xmm0");
+
+	&mov		($rounds,9);
+	&mov		(&DWP(96,$key),$rounds);
+
+	&jmp	(&label("good_key"));
+
+&set_label("12rounds",16);
+	&movq		("xmm2",&QWP(16,"eax"));	# remaining 1/3 of *userKey
+	&cmp		("ebp",1<<28);
+	&je		(&label("12rounds_alt"));
+
+	&mov		($rounds,11);
+	&$movekey	(&QWP(-16,$key),"xmm0");	# round 0
+	&aeskeygenassist("xmm1","xmm2",0x01);		# round 1,2
+	&call		(&label("key_192a_cold"));
+	&aeskeygenassist("xmm1","xmm2",0x02);		# round 2,3
+	&call		(&label("key_192b"));
+	&aeskeygenassist("xmm1","xmm2",0x04);		# round 4,5
+	&call		(&label("key_192a"));
+	&aeskeygenassist("xmm1","xmm2",0x08);		# round 5,6
+	&call		(&label("key_192b"));
+	&aeskeygenassist("xmm1","xmm2",0x10);		# round 7,8
+	&call		(&label("key_192a"));
+	&aeskeygenassist("xmm1","xmm2",0x20);		# round 8,9
+	&call		(&label("key_192b"));
+	&aeskeygenassist("xmm1","xmm2",0x40);		# round 10,11
+	&call		(&label("key_192a"));
+	&aeskeygenassist("xmm1","xmm2",0x80);		# round 11,12
+	&call		(&label("key_192b"));
+	&$movekey	(&QWP(0,$key),"xmm0");
+	&mov		(&DWP(48,$key),$rounds);
+
+	&jmp	(&label("good_key"));
+
+&set_label("key_192a",16);
+	&$movekey	(&QWP(0,$key),"xmm0");
+	&lea		($key,&DWP(16,$key));
+&set_label("key_192a_cold",16);
+	&movaps		("xmm5","xmm2");
+&set_label("key_192b_warm");
+	&shufps		("xmm4","xmm0",0b00010000);
+	&movdqa		("xmm3","xmm2");
+	&xorps		("xmm0","xmm4");
+	&shufps		("xmm4","xmm0",0b10001100);
+	&pslldq		("xmm3",4);
+	&xorps		("xmm0","xmm4");
+	&pshufd		("xmm1","xmm1",0b01010101);	# critical path
+	&pxor		("xmm2","xmm3");
+	&pxor		("xmm0","xmm1");
+	&pshufd		("xmm3","xmm0",0b11111111);
+	&pxor		("xmm2","xmm3");
+	&ret();
+
+&set_label("key_192b",16);
+	&movaps		("xmm3","xmm0");
+	&shufps		("xmm5","xmm0",0b01000100);
+	&$movekey	(&QWP(0,$key),"xmm5");
+	&shufps		("xmm3","xmm2",0b01001110);
+	&$movekey	(&QWP(16,$key),"xmm3");
+	&lea		($key,&DWP(32,$key));
+	&jmp		(&label("key_192b_warm"));
+
+&set_label("12rounds_alt",16);
+	&movdqa		("xmm5",&QWP(0x10,"ebx"));
+	&movdqa		("xmm4",&QWP(0x20,"ebx"));
+	&mov		($rounds,8);
+	&movdqu		(&QWP(-16,$key),"xmm0");
+
+&set_label("loop_key192");
+	&movq		(&QWP(0,$key),"xmm2");
+	&movdqa		("xmm1","xmm2");
+	&pshufb		("xmm2","xmm5");
+	&aesenclast	("xmm2","xmm4");
+	&pslld		("xmm4",1);
+	&lea		($key,&DWP(24,$key));
+
+	&movdqa		("xmm3","xmm0");
+	&pslldq		("xmm0",4);
+	&pxor		("xmm3","xmm0");
+	&pslldq		("xmm0",4);
+	&pxor		("xmm3","xmm0");
+	&pslldq		("xmm0",4);
+	&pxor		("xmm0","xmm3");
+
+	&pshufd		("xmm3","xmm0",0xff);
+	&pxor		("xmm3","xmm1");
+	&pslldq		("xmm1",4);
+	&pxor		("xmm3","xmm1");
+
+	&pxor		("xmm0","xmm2");
+	&pxor		("xmm2","xmm3");
+	&movdqu		(&QWP(-16,$key),"xmm0");
+
+	&dec		($rounds);
+	&jnz		(&label("loop_key192"));
+
+	&mov	($rounds,11);
+	&mov	(&DWP(32,$key),$rounds);
+
+	&jmp	(&label("good_key"));
+
+&set_label("14rounds",16);
+	&movups		("xmm2",&QWP(16,"eax"));	# remaining half of *userKey
+	&lea		($key,&DWP(16,$key));
+	&cmp		("ebp",1<<28);
+	&je		(&label("14rounds_alt"));
+
+	&mov		($rounds,13);
+	&$movekey	(&QWP(-32,$key),"xmm0");	# round 0
+	&$movekey	(&QWP(-16,$key),"xmm2");	# round 1
+	&aeskeygenassist("xmm1","xmm2",0x01);		# round 2
+	&call		(&label("key_256a_cold"));
+	&aeskeygenassist("xmm1","xmm0",0x01);		# round 3
+	&call		(&label("key_256b"));
+	&aeskeygenassist("xmm1","xmm2",0x02);		# round 4
+	&call		(&label("key_256a"));
+	&aeskeygenassist("xmm1","xmm0",0x02);		# round 5
+	&call		(&label("key_256b"));
+	&aeskeygenassist("xmm1","xmm2",0x04);		# round 6
+	&call		(&label("key_256a"));
+	&aeskeygenassist("xmm1","xmm0",0x04);		# round 7
+	&call		(&label("key_256b"));
+	&aeskeygenassist("xmm1","xmm2",0x08);		# round 8
+	&call		(&label("key_256a"));
+	&aeskeygenassist("xmm1","xmm0",0x08);		# round 9
+	&call		(&label("key_256b"));
+	&aeskeygenassist("xmm1","xmm2",0x10);		# round 10
+	&call		(&label("key_256a"));
+	&aeskeygenassist("xmm1","xmm0",0x10);		# round 11
+	&call		(&label("key_256b"));
+	&aeskeygenassist("xmm1","xmm2",0x20);		# round 12
+	&call		(&label("key_256a"));
+	&aeskeygenassist("xmm1","xmm0",0x20);		# round 13
+	&call		(&label("key_256b"));
+	&aeskeygenassist("xmm1","xmm2",0x40);		# round 14
+	&call		(&label("key_256a"));
+	&$movekey	(&QWP(0,$key),"xmm0");
+	&mov		(&DWP(16,$key),$rounds);
+	&xor		("eax","eax");
+
+	&jmp	(&label("good_key"));
+
+&set_label("key_256a",16);
+	&$movekey	(&QWP(0,$key),"xmm2");
+	&lea		($key,&DWP(16,$key));
+&set_label("key_256a_cold");
+	&shufps		("xmm4","xmm0",0b00010000);
+	&xorps		("xmm0","xmm4");
+	&shufps		("xmm4","xmm0",0b10001100);
+	&xorps		("xmm0","xmm4");
+	&shufps		("xmm1","xmm1",0b11111111);	# critical path
+	&xorps		("xmm0","xmm1");
+	&ret();
+
+&set_label("key_256b",16);
+	&$movekey	(&QWP(0,$key),"xmm0");
+	&lea		($key,&DWP(16,$key));
+
+	&shufps		("xmm4","xmm2",0b00010000);
+	&xorps		("xmm2","xmm4");
+	&shufps		("xmm4","xmm2",0b10001100);
+	&xorps		("xmm2","xmm4");
+	&shufps		("xmm1","xmm1",0b10101010);	# critical path
+	&xorps		("xmm2","xmm1");
+	&ret();
+
+&set_label("14rounds_alt",16);
+	&movdqa		("xmm5",&QWP(0x00,"ebx"));
+	&movdqa		("xmm4",&QWP(0x20,"ebx"));
+	&mov		($rounds,7);
+	&movdqu		(&QWP(-32,$key),"xmm0");
+	&movdqa		("xmm1","xmm2");
+	&movdqu		(&QWP(-16,$key),"xmm2");
+
+&set_label("loop_key256");
+	&pshufb		("xmm2","xmm5");
+	&aesenclast	("xmm2","xmm4");
+
+	&movdqa		("xmm3","xmm0");
+	&pslldq		("xmm0",4);
+	&pxor		("xmm3","xmm0");
+	&pslldq		("xmm0",4);
+	&pxor		("xmm3","xmm0");
+	&pslldq		("xmm0",4);
+	&pxor		("xmm0","xmm3");
+	&pslld		("xmm4",1);
+
+	&pxor		("xmm0","xmm2");
+	&movdqu		(&QWP(0,$key),"xmm0");
+
+	&dec		($rounds);
+	&jz		(&label("done_key256"));
+
+	&pshufd		("xmm2","xmm0",0xff);
+	&pxor		("xmm3","xmm3");
+	&aesenclast	("xmm2","xmm3");
+
+	&movdqa		("xmm3","xmm1");
+	&pslldq		("xmm1",4);
+	&pxor		("xmm3","xmm1");
+	&pslldq		("xmm1",4);
+	&pxor		("xmm3","xmm1");
+	&pslldq		("xmm1",4);
+	&pxor		("xmm1","xmm3");
+
+	&pxor		("xmm2","xmm1");
+	&movdqu		(&QWP(16,$key),"xmm2");
+	&lea		($key,&DWP(32,$key));
+	&movdqa		("xmm1","xmm2");
+	&jmp		(&label("loop_key256"));
+
+&set_label("done_key256");
+	&mov		($rounds,13);
+	&mov		(&DWP(16,$key),$rounds);
+
+&set_label("good_key");
+	&pxor	("xmm0","xmm0");
+	&pxor	("xmm1","xmm1");
+	&pxor	("xmm2","xmm2");
+	&pxor	("xmm3","xmm3");
+	&pxor	("xmm4","xmm4");
+	&pxor	("xmm5","xmm5");
+	&xor	("eax","eax");
+	&pop	("ebx");
+	&pop	("ebp");
+	&ret	();
+
+&set_label("bad_pointer",4);
+	&mov	("eax",-1);
+	&pop	("ebx");
+	&pop	("ebp");
+	&ret	();
+&set_label("bad_keybits",4);
+	&pxor	("xmm0","xmm0");
+	&mov	("eax",-2);
+	&pop	("ebx");
+	&pop	("ebp");
+	&ret	();
+&function_end_B("_aesni_set_encrypt_key");
+
+# int $PREFIX_set_encrypt_key (const unsigned char *userKey, int bits,
+#                              AES_KEY *key)
+&function_begin_B("${PREFIX}_set_encrypt_key");
+	&mov	("eax",&wparam(0));
+	&mov	($rounds,&wparam(1));
+	&mov	($key,&wparam(2));
+	&call	("_aesni_set_encrypt_key");
+	&ret	();
+&function_end_B("${PREFIX}_set_encrypt_key");
+
+# int $PREFIX_set_decrypt_key (const unsigned char *userKey, int bits,
+#                              AES_KEY *key)
+&function_begin_B("${PREFIX}_set_decrypt_key");
+	&mov	("eax",&wparam(0));
+	&mov	($rounds,&wparam(1));
+	&mov	($key,&wparam(2));
+	&call	("_aesni_set_encrypt_key");
+	&mov	($key,&wparam(2));
+	&shl	($rounds,4);	# rounds-1 after _aesni_set_encrypt_key
+	&test	("eax","eax");
+	&jnz	(&label("dec_key_ret"));
+	&lea	("eax",&DWP(16,$key,$rounds));	# end of key schedule
+
+	&$movekey	("xmm0",&QWP(0,$key));	# just swap
+	&$movekey	("xmm1",&QWP(0,"eax"));
+	&$movekey	(&QWP(0,"eax"),"xmm0");
+	&$movekey	(&QWP(0,$key),"xmm1");
+	&lea		($key,&DWP(16,$key));
+	&lea		("eax",&DWP(-16,"eax"));
+
+&set_label("dec_key_inverse");
+	&$movekey	("xmm0",&QWP(0,$key));	# swap and inverse
+	&$movekey	("xmm1",&QWP(0,"eax"));
+	&aesimc		("xmm0","xmm0");
+	&aesimc		("xmm1","xmm1");
+	&lea		($key,&DWP(16,$key));
+	&lea		("eax",&DWP(-16,"eax"));
+	&$movekey	(&QWP(16,"eax"),"xmm0");
+	&$movekey	(&QWP(-16,$key),"xmm1");
+	&cmp		("eax",$key);
+	&ja		(&label("dec_key_inverse"));
+
+	&$movekey	("xmm0",&QWP(0,$key));	# inverse middle
+	&aesimc		("xmm0","xmm0");
+	&$movekey	(&QWP(0,$key),"xmm0");
+
+	&pxor		("xmm0","xmm0");
+	&pxor		("xmm1","xmm1");
+	&xor		("eax","eax");		# return success
+&set_label("dec_key_ret");
+	&ret	();
+&function_end_B("${PREFIX}_set_decrypt_key");
+
+&set_label("key_const",64);
+&data_word(0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d);
+&data_word(0x04070605,0x04070605,0x04070605,0x04070605);
+&data_word(1,1,1,1);
+&data_word(0x1b,0x1b,0x1b,0x1b);
+&asciz("AES for Intel AES-NI, CRYPTOGAMS by <appro\@openssl.org>");
+
+&asm_finish();
+
+close STDOUT or die "error closing STDOUT: $!";
diff --git a/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aesni-x86_64.pl b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aesni-x86_64.pl
new file mode 100644
index 0000000..f8c2e23
--- /dev/null
+++ b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aesni-x86_64.pl
@@ -0,0 +1,5157 @@
+#! /usr/bin/env perl
+# Copyright 2009-2020 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License").  You may not use
+# this file except in compliance with the License.  You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# This module implements support for Intel AES-NI extension. In
+# OpenSSL context it's used with Intel engine, but can also be used as
+# drop-in replacement for crypto/aes/asm/aes-x86_64.pl [see below for
+# details].
+#
+# Performance.
+#
+# Given aes(enc|dec) instructions' latency asymptotic performance for
+# non-parallelizable modes such as CBC encrypt is 3.75 cycles per byte
+# processed with 128-bit key. And given their throughput asymptotic
+# performance for parallelizable modes is 1.25 cycles per byte. Being
+# asymptotic limit it's not something you commonly achieve in reality,
+# but how close does one get? Below are results collected for
+# different modes and block sized. Pairs of numbers are for en-/
+# decryption.
+#
+#	16-byte     64-byte     256-byte    1-KB        8-KB
+# ECB	4.25/4.25   1.38/1.38   1.28/1.28   1.26/1.26	1.26/1.26
+# CTR	5.42/5.42   1.92/1.92   1.44/1.44   1.28/1.28   1.26/1.26
+# CBC	4.38/4.43   4.15/1.43   4.07/1.32   4.07/1.29   4.06/1.28
+# CCM	5.66/9.42   4.42/5.41   4.16/4.40   4.09/4.15   4.06/4.07
+# OFB	5.42/5.42   4.64/4.64   4.44/4.44   4.39/4.39   4.38/4.38
+# CFB	5.73/5.85   5.56/5.62   5.48/5.56   5.47/5.55   5.47/5.55
+#
+# ECB, CTR, CBC and CCM results are free from EVP overhead. This means
+# that otherwise used 'openssl speed -evp aes-128-??? -engine aesni
+# [-decrypt]' will exhibit 10-15% worse results for smaller blocks.
+# The results were collected with specially crafted speed.c benchmark
+# in order to compare them with results reported in "Intel Advanced
+# Encryption Standard (AES) New Instruction Set" White Paper Revision
+# 3.0 dated May 2010. All above results are consistently better. This
+# module also provides better performance for block sizes smaller than
+# 128 bytes in points *not* represented in the above table.
+#
+# Looking at the results for 8-KB buffer.
+#
+# CFB and OFB results are far from the limit, because implementation
+# uses "generic" CRYPTO_[c|o]fb128_encrypt interfaces relying on
+# single-block aesni_encrypt, which is not the most optimal way to go.
+# CBC encrypt result is unexpectedly high and there is no documented
+# explanation for it. Seemingly there is a small penalty for feeding
+# the result back to AES unit the way it's done in CBC mode. There is
+# nothing one can do and the result appears optimal. CCM result is
+# identical to CBC, because CBC-MAC is essentially CBC encrypt without
+# saving output. CCM CTR "stays invisible," because it's neatly
+# interleaved with CBC-MAC. This provides ~30% improvement over
+# "straightforward" CCM implementation with CTR and CBC-MAC performed
+# disjointly. Parallelizable modes practically achieve the theoretical
+# limit.
+#
+# Looking at how results vary with buffer size.
+#
+# Curves are practically saturated at 1-KB buffer size. In most cases
+# "256-byte" performance is >95%, and "64-byte" is ~90% of "8-KB" one.
+# CTR curve doesn't follow this pattern and is "slowest" changing one
+# with "256-byte" result being 87% of "8-KB." This is because overhead
+# in CTR mode is most computationally intensive. Small-block CCM
+# decrypt is slower than encrypt, because first CTR and last CBC-MAC
+# iterations can't be interleaved.
+#
+# Results for 192- and 256-bit keys.
+#
+# EVP-free results were observed to scale perfectly with number of
+# rounds for larger block sizes, i.e. 192-bit result being 10/12 times
+# lower and 256-bit one - 10/14. Well, in CBC encrypt case differences
+# are a tad smaller, because the above mentioned penalty biases all
+# results by same constant value. In similar way function call
+# overhead affects small-block performance, as well as OFB and CFB
+# results. Differences are not large, most common coefficients are
+# 10/11.7 and 10/13.4 (as opposite to 10/12.0 and 10/14.0), but one
+# observe even 10/11.2 and 10/12.4 (CTR, OFB, CFB)...
+
+# January 2011
+#
+# While Westmere processor features 6 cycles latency for aes[enc|dec]
+# instructions, which can be scheduled every second cycle, Sandy
+# Bridge spends 8 cycles per instruction, but it can schedule them
+# every cycle. This means that code targeting Westmere would perform
+# suboptimally on Sandy Bridge. Therefore this update.
+#
+# In addition, non-parallelizable CBC encrypt (as well as CCM) is
+# optimized. Relative improvement might appear modest, 8% on Westmere,
+# but in absolute terms it's 3.77 cycles per byte encrypted with
+# 128-bit key on Westmere, and 5.07 - on Sandy Bridge. These numbers
+# should be compared to asymptotic limits of 3.75 for Westmere and
+# 5.00 for Sandy Bridge. Actually, the fact that they get this close
+# to asymptotic limits is quite amazing. Indeed, the limit is
+# calculated as latency times number of rounds, 10 for 128-bit key,
+# and divided by 16, the number of bytes in block, or in other words
+# it accounts *solely* for aesenc instructions. But there are extra
+# instructions, and numbers so close to the asymptotic limits mean
+# that it's as if it takes as little as *one* additional cycle to
+# execute all of them. How is it possible? It is possible thanks to
+# out-of-order execution logic, which manages to overlap post-
+# processing of previous block, things like saving the output, with
+# actual encryption of current block, as well as pre-processing of
+# current block, things like fetching input and xor-ing it with
+# 0-round element of the key schedule, with actual encryption of
+# previous block. Keep this in mind...
+#
+# For parallelizable modes, such as ECB, CBC decrypt, CTR, higher
+# performance is achieved by interleaving instructions working on
+# independent blocks. In which case asymptotic limit for such modes
+# can be obtained by dividing above mentioned numbers by AES
+# instructions' interleave factor. Westmere can execute at most 3
+# instructions at a time, meaning that optimal interleave factor is 3,
+# and that's where the "magic" number of 1.25 come from. "Optimal
+# interleave factor" means that increase of interleave factor does
+# not improve performance. The formula has proven to reflect reality
+# pretty well on Westmere... Sandy Bridge on the other hand can
+# execute up to 8 AES instructions at a time, so how does varying
+# interleave factor affect the performance? Here is table for ECB
+# (numbers are cycles per byte processed with 128-bit key):
+#
+# instruction interleave factor		3x	6x	8x
+# theoretical asymptotic limit		1.67	0.83	0.625
+# measured performance for 8KB block	1.05	0.86	0.84
+#
+# "as if" interleave factor		4.7x	5.8x	6.0x
+#
+# Further data for other parallelizable modes:
+#
+# CBC decrypt				1.16	0.93	0.74
+# CTR					1.14	0.91	0.74
+#
+# Well, given 3x column it's probably inappropriate to call the limit
+# asymptotic, if it can be surpassed, isn't it? What happens there?
+# Rewind to CBC paragraph for the answer. Yes, out-of-order execution
+# magic is responsible for this. Processor overlaps not only the
+# additional instructions with AES ones, but even AES instructions
+# processing adjacent triplets of independent blocks. In the 6x case
+# additional instructions  still claim disproportionally small amount
+# of additional cycles, but in 8x case number of instructions must be
+# a tad too high for out-of-order logic to cope with, and AES unit
+# remains underutilized... As you can see 8x interleave is hardly
+# justifiable, so there no need to feel bad that 32-bit aesni-x86.pl
+# utilizes 6x interleave because of limited register bank capacity.
+#
+# Higher interleave factors do have negative impact on Westmere
+# performance. While for ECB mode it's negligible ~1.5%, other
+# parallelizables perform ~5% worse, which is outweighed by ~25%
+# improvement on Sandy Bridge. To balance regression on Westmere
+# CTR mode was implemented with 6x aesenc interleave factor.
+
+# April 2011
+#
+# Add aesni_xts_[en|de]crypt. Westmere spends 1.25 cycles processing
+# one byte out of 8KB with 128-bit key, Sandy Bridge - 0.90. Just like
+# in CTR mode AES instruction interleave factor was chosen to be 6x.
+
+# November 2015
+#
+# Add aesni_ocb_[en|de]crypt. AES instruction interleave factor was
+# chosen to be 6x.
+
+######################################################################
+# Current large-block performance in cycles per byte processed with
+# 128-bit key (less is better).
+#
+#		CBC en-/decrypt	CTR	XTS	ECB	OCB
+# Westmere	3.77/1.25	1.25	1.25	1.26
+# * Bridge	5.07/0.74	0.75	0.90	0.85	0.98
+# Haswell	4.44/0.63	0.63	0.73	0.63	0.70
+# Skylake	2.62/0.63	0.63	0.63	0.63
+# Silvermont	5.75/3.54	3.56	4.12	3.87(*)	4.11
+# Knights L	2.54/0.77	0.78	0.85	-	1.50
+# Goldmont	3.82/1.26	1.26	1.29	1.29	1.50
+# Bulldozer	5.77/0.70	0.72	0.90	0.70	0.95
+# Ryzen		2.71/0.35	0.35	0.44	0.38	0.49
+#
+# (*)	Atom Silvermont ECB result is suboptimal because of penalties
+#	incurred by operations on %xmm8-15. As ECB is not considered
+#	critical, nothing was done to mitigate the problem.
+
+$PREFIX="aesni";	# if $PREFIX is set to "AES", the script
+			# generates drop-in replacement for
+			# crypto/aes/asm/aes-x86_64.pl:-)
+
+$flavour = shift;
+$output  = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
+*STDOUT=*OUT;
+
+$movkey = $PREFIX eq "aesni" ? "movups" : "movups";
+@_4args=$win64?	("%rcx","%rdx","%r8", "%r9") :	# Win64 order
+		("%rdi","%rsi","%rdx","%rcx");	# Unix order
+
+$code=".text\n";
+$code.=".extern	OPENSSL_ia32cap_P\n";
+
+$rounds="%eax";	# input to and changed by aesni_[en|de]cryptN !!!
+# this is natural Unix argument order for public $PREFIX_[ecb|cbc]_encrypt ...
+$inp="%rdi";
+$out="%rsi";
+$len="%rdx";
+$key="%rcx";	# input to and changed by aesni_[en|de]cryptN !!!
+$ivp="%r8";	# cbc, ctr, ...
+
+$rnds_="%r10d";	# backup copy for $rounds
+$key_="%r11";	# backup copy for $key
+
+# %xmm register layout
+$rndkey0="%xmm0";	$rndkey1="%xmm1";
+$inout0="%xmm2";	$inout1="%xmm3";
+$inout2="%xmm4";	$inout3="%xmm5";
+$inout4="%xmm6";	$inout5="%xmm7";
+$inout6="%xmm8";	$inout7="%xmm9";
+
+$in2="%xmm6";		$in1="%xmm7";	# used in CBC decrypt, CTR, ...
+$in0="%xmm8";		$iv="%xmm9";
+
+# Inline version of internal aesni_[en|de]crypt1.
+#
+# Why folded loop? Because aes[enc|dec] is slow enough to accommodate
+# cycles which take care of loop variables...
+{ my $sn;
+sub aesni_generate1 {
+my ($p,$key,$rounds,$inout,$ivec)=@_;	$inout=$inout0 if (!defined($inout));
+++$sn;
+$code.=<<___;
+	$movkey	($key),$rndkey0
+	$movkey	16($key),$rndkey1
+___
+$code.=<<___ if (defined($ivec));
+	xorps	$rndkey0,$ivec
+	lea	32($key),$key
+	xorps	$ivec,$inout
+___
+$code.=<<___ if (!defined($ivec));
+	lea	32($key),$key
+	xorps	$rndkey0,$inout
+___
+$code.=<<___;
+.Loop_${p}1_$sn:
+	aes${p}	$rndkey1,$inout
+	dec	$rounds
+	$movkey	($key),$rndkey1
+	lea	16($key),$key
+	jnz	.Loop_${p}1_$sn	# loop body is 16 bytes
+	aes${p}last	$rndkey1,$inout
+___
+}}
+# void $PREFIX_[en|de]crypt (const void *inp,void *out,const AES_KEY *key);
+#
+{ my ($inp,$out,$key) = @_4args;
+
+$code.=<<___;
+.globl	${PREFIX}_encrypt
+.type	${PREFIX}_encrypt,\@abi-omnipotent
+.align	16
+${PREFIX}_encrypt:
+.cfi_startproc
+	movups	($inp),$inout0		# load input
+	mov	240($key),$rounds	# key->rounds
+___
+	&aesni_generate1("enc",$key,$rounds);
+$code.=<<___;
+	 pxor	$rndkey0,$rndkey0	# clear register bank
+	 pxor	$rndkey1,$rndkey1
+	movups	$inout0,($out)		# output
+	 pxor	$inout0,$inout0
+	ret
+.cfi_endproc
+.size	${PREFIX}_encrypt,.-${PREFIX}_encrypt
+
+.globl	${PREFIX}_decrypt
+.type	${PREFIX}_decrypt,\@abi-omnipotent
+.align	16
+${PREFIX}_decrypt:
+.cfi_startproc
+	movups	($inp),$inout0		# load input
+	mov	240($key),$rounds	# key->rounds
+___
+	&aesni_generate1("dec",$key,$rounds);
+$code.=<<___;
+	 pxor	$rndkey0,$rndkey0	# clear register bank
+	 pxor	$rndkey1,$rndkey1
+	movups	$inout0,($out)		# output
+	 pxor	$inout0,$inout0
+	ret
+.cfi_endproc
+.size	${PREFIX}_decrypt, .-${PREFIX}_decrypt
+___
+}
+
+# _aesni_[en|de]cryptN are private interfaces, N denotes interleave
+# factor. Why 3x subroutine were originally used in loops? Even though
+# aes[enc|dec] latency was originally 6, it could be scheduled only
+# every *2nd* cycle. Thus 3x interleave was the one providing optimal
+# utilization, i.e. when subroutine's throughput is virtually same as
+# of non-interleaved subroutine [for number of input blocks up to 3].
+# This is why it originally made no sense to implement 2x subroutine.
+# But times change and it became appropriate to spend extra 192 bytes
+# on 2x subroutine on Atom Silvermont account. For processors that
+# can schedule aes[enc|dec] every cycle optimal interleave factor
+# equals to corresponding instructions latency. 8x is optimal for
+# * Bridge and "super-optimal" for other Intel CPUs...
+
+sub aesni_generate2 {
+my $dir=shift;
+# As already mentioned it takes in $key and $rounds, which are *not*
+# preserved. $inout[0-1] is cipher/clear text...
+$code.=<<___;
+.type	_aesni_${dir}rypt2,\@abi-omnipotent
+.align	16
+_aesni_${dir}rypt2:
+.cfi_startproc
+	$movkey	($key),$rndkey0
+	shl	\$4,$rounds
+	$movkey	16($key),$rndkey1
+	xorps	$rndkey0,$inout0
+	xorps	$rndkey0,$inout1
+	$movkey	32($key),$rndkey0
+	lea	32($key,$rounds),$key
+	neg	%rax				# $rounds
+	add	\$16,%rax
+
+.L${dir}_loop2:
+	aes${dir}	$rndkey1,$inout0
+	aes${dir}	$rndkey1,$inout1
+	$movkey		($key,%rax),$rndkey1
+	add		\$32,%rax
+	aes${dir}	$rndkey0,$inout0
+	aes${dir}	$rndkey0,$inout1
+	$movkey		-16($key,%rax),$rndkey0
+	jnz		.L${dir}_loop2
+
+	aes${dir}	$rndkey1,$inout0
+	aes${dir}	$rndkey1,$inout1
+	aes${dir}last	$rndkey0,$inout0
+	aes${dir}last	$rndkey0,$inout1
+	ret
+.cfi_endproc
+.size	_aesni_${dir}rypt2,.-_aesni_${dir}rypt2
+___
+}
+sub aesni_generate3 {
+my $dir=shift;
+# As already mentioned it takes in $key and $rounds, which are *not*
+# preserved. $inout[0-2] is cipher/clear text...
+$code.=<<___;
+.type	_aesni_${dir}rypt3,\@abi-omnipotent
+.align	16
+_aesni_${dir}rypt3:
+.cfi_startproc
+	$movkey	($key),$rndkey0
+	shl	\$4,$rounds
+	$movkey	16($key),$rndkey1
+	xorps	$rndkey0,$inout0
+	xorps	$rndkey0,$inout1
+	xorps	$rndkey0,$inout2
+	$movkey	32($key),$rndkey0
+	lea	32($key,$rounds),$key
+	neg	%rax				# $rounds
+	add	\$16,%rax
+
+.L${dir}_loop3:
+	aes${dir}	$rndkey1,$inout0
+	aes${dir}	$rndkey1,$inout1
+	aes${dir}	$rndkey1,$inout2
+	$movkey		($key,%rax),$rndkey1
+	add		\$32,%rax
+	aes${dir}	$rndkey0,$inout0
+	aes${dir}	$rndkey0,$inout1
+	aes${dir}	$rndkey0,$inout2
+	$movkey		-16($key,%rax),$rndkey0
+	jnz		.L${dir}_loop3
+
+	aes${dir}	$rndkey1,$inout0
+	aes${dir}	$rndkey1,$inout1
+	aes${dir}	$rndkey1,$inout2
+	aes${dir}last	$rndkey0,$inout0
+	aes${dir}last	$rndkey0,$inout1
+	aes${dir}last	$rndkey0,$inout2
+	ret
+.cfi_endproc
+.size	_aesni_${dir}rypt3,.-_aesni_${dir}rypt3
+___
+}
+# 4x interleave is implemented to improve small block performance,
+# most notably [and naturally] 4 block by ~30%. One can argue that one
+# should have implemented 5x as well, but improvement would be <20%,
+# so it's not worth it...
+sub aesni_generate4 {
+my $dir=shift;
+# As already mentioned it takes in $key and $rounds, which are *not*
+# preserved. $inout[0-3] is cipher/clear text...
+$code.=<<___;
+.type	_aesni_${dir}rypt4,\@abi-omnipotent
+.align	16
+_aesni_${dir}rypt4:
+.cfi_startproc
+	$movkey	($key),$rndkey0
+	shl	\$4,$rounds
+	$movkey	16($key),$rndkey1
+	xorps	$rndkey0,$inout0
+	xorps	$rndkey0,$inout1
+	xorps	$rndkey0,$inout2
+	xorps	$rndkey0,$inout3
+	$movkey	32($key),$rndkey0
+	lea	32($key,$rounds),$key
+	neg	%rax				# $rounds
+	.byte	0x0f,0x1f,0x00
+	add	\$16,%rax
+
+.L${dir}_loop4:
+	aes${dir}	$rndkey1,$inout0
+	aes${dir}	$rndkey1,$inout1
+	aes${dir}	$rndkey1,$inout2
+	aes${dir}	$rndkey1,$inout3
+	$movkey		($key,%rax),$rndkey1
+	add		\$32,%rax
+	aes${dir}	$rndkey0,$inout0
+	aes${dir}	$rndkey0,$inout1
+	aes${dir}	$rndkey0,$inout2
+	aes${dir}	$rndkey0,$inout3
+	$movkey		-16($key,%rax),$rndkey0
+	jnz		.L${dir}_loop4
+
+	aes${dir}	$rndkey1,$inout0
+	aes${dir}	$rndkey1,$inout1
+	aes${dir}	$rndkey1,$inout2
+	aes${dir}	$rndkey1,$inout3
+	aes${dir}last	$rndkey0,$inout0
+	aes${dir}last	$rndkey0,$inout1
+	aes${dir}last	$rndkey0,$inout2
+	aes${dir}last	$rndkey0,$inout3
+	ret
+.cfi_endproc
+.size	_aesni_${dir}rypt4,.-_aesni_${dir}rypt4
+___
+}
+sub aesni_generate6 {
+my $dir=shift;
+# As already mentioned it takes in $key and $rounds, which are *not*
+# preserved. $inout[0-5] is cipher/clear text...
+$code.=<<___;
+.type	_aesni_${dir}rypt6,\@abi-omnipotent
+.align	16
+_aesni_${dir}rypt6:
+.cfi_startproc
+	$movkey		($key),$rndkey0
+	shl		\$4,$rounds
+	$movkey		16($key),$rndkey1
+	xorps		$rndkey0,$inout0
+	pxor		$rndkey0,$inout1
+	pxor		$rndkey0,$inout2
+	aes${dir}	$rndkey1,$inout0
+	lea		32($key,$rounds),$key
+	neg		%rax			# $rounds
+	aes${dir}	$rndkey1,$inout1
+	pxor		$rndkey0,$inout3
+	pxor		$rndkey0,$inout4
+	aes${dir}	$rndkey1,$inout2
+	pxor		$rndkey0,$inout5
+	$movkey		($key,%rax),$rndkey0
+	add		\$16,%rax
+	jmp		.L${dir}_loop6_enter
+.align	16
+.L${dir}_loop6:
+	aes${dir}	$rndkey1,$inout0
+	aes${dir}	$rndkey1,$inout1
+	aes${dir}	$rndkey1,$inout2
+.L${dir}_loop6_enter:
+	aes${dir}	$rndkey1,$inout3
+	aes${dir}	$rndkey1,$inout4
+	aes${dir}	$rndkey1,$inout5
+	$movkey		($key,%rax),$rndkey1
+	add		\$32,%rax
+	aes${dir}	$rndkey0,$inout0
+	aes${dir}	$rndkey0,$inout1
+	aes${dir}	$rndkey0,$inout2
+	aes${dir}	$rndkey0,$inout3
+	aes${dir}	$rndkey0,$inout4
+	aes${dir}	$rndkey0,$inout5
+	$movkey		-16($key,%rax),$rndkey0
+	jnz		.L${dir}_loop6
+
+	aes${dir}	$rndkey1,$inout0
+	aes${dir}	$rndkey1,$inout1
+	aes${dir}	$rndkey1,$inout2
+	aes${dir}	$rndkey1,$inout3
+	aes${dir}	$rndkey1,$inout4
+	aes${dir}	$rndkey1,$inout5
+	aes${dir}last	$rndkey0,$inout0
+	aes${dir}last	$rndkey0,$inout1
+	aes${dir}last	$rndkey0,$inout2
+	aes${dir}last	$rndkey0,$inout3
+	aes${dir}last	$rndkey0,$inout4
+	aes${dir}last	$rndkey0,$inout5
+	ret
+.cfi_endproc
+.size	_aesni_${dir}rypt6,.-_aesni_${dir}rypt6
+___
+}
+sub aesni_generate8 {
+my $dir=shift;
+# As already mentioned it takes in $key and $rounds, which are *not*
+# preserved. $inout[0-7] is cipher/clear text...
+$code.=<<___;
+.type	_aesni_${dir}rypt8,\@abi-omnipotent
+.align	16
+_aesni_${dir}rypt8:
+.cfi_startproc
+	$movkey		($key),$rndkey0
+	shl		\$4,$rounds
+	$movkey		16($key),$rndkey1
+	xorps		$rndkey0,$inout0
+	xorps		$rndkey0,$inout1
+	pxor		$rndkey0,$inout2
+	pxor		$rndkey0,$inout3
+	pxor		$rndkey0,$inout4
+	lea		32($key,$rounds),$key
+	neg		%rax			# $rounds
+	aes${dir}	$rndkey1,$inout0
+	pxor		$rndkey0,$inout5
+	pxor		$rndkey0,$inout6
+	aes${dir}	$rndkey1,$inout1
+	pxor		$rndkey0,$inout7
+	$movkey		($key,%rax),$rndkey0
+	add		\$16,%rax
+	jmp		.L${dir}_loop8_inner
+.align	16
+.L${dir}_loop8:
+	aes${dir}	$rndkey1,$inout0
+	aes${dir}	$rndkey1,$inout1
+.L${dir}_loop8_inner:
+	aes${dir}	$rndkey1,$inout2
+	aes${dir}	$rndkey1,$inout3
+	aes${dir}	$rndkey1,$inout4
+	aes${dir}	$rndkey1,$inout5
+	aes${dir}	$rndkey1,$inout6
+	aes${dir}	$rndkey1,$inout7
+.L${dir}_loop8_enter:
+	$movkey		($key,%rax),$rndkey1
+	add		\$32,%rax
+	aes${dir}	$rndkey0,$inout0
+	aes${dir}	$rndkey0,$inout1
+	aes${dir}	$rndkey0,$inout2
+	aes${dir}	$rndkey0,$inout3
+	aes${dir}	$rndkey0,$inout4
+	aes${dir}	$rndkey0,$inout5
+	aes${dir}	$rndkey0,$inout6
+	aes${dir}	$rndkey0,$inout7
+	$movkey		-16($key,%rax),$rndkey0
+	jnz		.L${dir}_loop8
+
+	aes${dir}	$rndkey1,$inout0
+	aes${dir}	$rndkey1,$inout1
+	aes${dir}	$rndkey1,$inout2
+	aes${dir}	$rndkey1,$inout3
+	aes${dir}	$rndkey1,$inout4
+	aes${dir}	$rndkey1,$inout5
+	aes${dir}	$rndkey1,$inout6
+	aes${dir}	$rndkey1,$inout7
+	aes${dir}last	$rndkey0,$inout0
+	aes${dir}last	$rndkey0,$inout1
+	aes${dir}last	$rndkey0,$inout2
+	aes${dir}last	$rndkey0,$inout3
+	aes${dir}last	$rndkey0,$inout4
+	aes${dir}last	$rndkey0,$inout5
+	aes${dir}last	$rndkey0,$inout6
+	aes${dir}last	$rndkey0,$inout7
+	ret
+.cfi_endproc
+.size	_aesni_${dir}rypt8,.-_aesni_${dir}rypt8
+___
+}
+&aesni_generate2("enc") if ($PREFIX eq "aesni");
+&aesni_generate2("dec");
+&aesni_generate3("enc") if ($PREFIX eq "aesni");
+&aesni_generate3("dec");
+&aesni_generate4("enc") if ($PREFIX eq "aesni");
+&aesni_generate4("dec");
+&aesni_generate6("enc") if ($PREFIX eq "aesni");
+&aesni_generate6("dec");
+&aesni_generate8("enc") if ($PREFIX eq "aesni");
+&aesni_generate8("dec");
+
+if ($PREFIX eq "aesni") {
+########################################################################
+# void aesni_ecb_encrypt (const void *in, void *out,
+#			  size_t length, const AES_KEY *key,
+#			  int enc);
+$code.=<<___;
+.globl	aesni_ecb_encrypt
+.type	aesni_ecb_encrypt,\@function,5
+.align	16
+aesni_ecb_encrypt:
+.cfi_startproc
+___
+$code.=<<___ if ($win64);
+	lea	-0x58(%rsp),%rsp
+	movaps	%xmm6,(%rsp)		# offload $inout4..7
+	movaps	%xmm7,0x10(%rsp)
+	movaps	%xmm8,0x20(%rsp)
+	movaps	%xmm9,0x30(%rsp)
+.Lecb_enc_body:
+___
+$code.=<<___;
+	and	\$-16,$len		# if ($len<16)
+	jz	.Lecb_ret		# return
+
+	mov	240($key),$rounds	# key->rounds
+	$movkey	($key),$rndkey0
+	mov	$key,$key_		# backup $key
+	mov	$rounds,$rnds_		# backup $rounds
+	test	%r8d,%r8d		# 5th argument
+	jz	.Lecb_decrypt
+#--------------------------- ECB ENCRYPT ------------------------------#
+	cmp	\$0x80,$len		# if ($len<8*16)
+	jb	.Lecb_enc_tail		# short input
+
+	movdqu	($inp),$inout0		# load 8 input blocks
+	movdqu	0x10($inp),$inout1
+	movdqu	0x20($inp),$inout2
+	movdqu	0x30($inp),$inout3
+	movdqu	0x40($inp),$inout4
+	movdqu	0x50($inp),$inout5
+	movdqu	0x60($inp),$inout6
+	movdqu	0x70($inp),$inout7
+	lea	0x80($inp),$inp		# $inp+=8*16
+	sub	\$0x80,$len		# $len-=8*16 (can be zero)
+	jmp	.Lecb_enc_loop8_enter
+.align 16
+.Lecb_enc_loop8:
+	movups	$inout0,($out)		# store 8 output blocks
+	mov	$key_,$key		# restore $key
+	movdqu	($inp),$inout0		# load 8 input blocks
+	mov	$rnds_,$rounds		# restore $rounds
+	movups	$inout1,0x10($out)
+	movdqu	0x10($inp),$inout1
+	movups	$inout2,0x20($out)
+	movdqu	0x20($inp),$inout2
+	movups	$inout3,0x30($out)
+	movdqu	0x30($inp),$inout3
+	movups	$inout4,0x40($out)
+	movdqu	0x40($inp),$inout4
+	movups	$inout5,0x50($out)
+	movdqu	0x50($inp),$inout5
+	movups	$inout6,0x60($out)
+	movdqu	0x60($inp),$inout6
+	movups	$inout7,0x70($out)
+	lea	0x80($out),$out		# $out+=8*16
+	movdqu	0x70($inp),$inout7
+	lea	0x80($inp),$inp		# $inp+=8*16
+.Lecb_enc_loop8_enter:
+
+	call	_aesni_encrypt8
+
+	sub	\$0x80,$len
+	jnc	.Lecb_enc_loop8		# loop if $len-=8*16 didn't borrow
+
+	movups	$inout0,($out)		# store 8 output blocks
+	mov	$key_,$key		# restore $key
+	movups	$inout1,0x10($out)
+	mov	$rnds_,$rounds		# restore $rounds
+	movups	$inout2,0x20($out)
+	movups	$inout3,0x30($out)
+	movups	$inout4,0x40($out)
+	movups	$inout5,0x50($out)
+	movups	$inout6,0x60($out)
+	movups	$inout7,0x70($out)
+	lea	0x80($out),$out		# $out+=8*16
+	add	\$0x80,$len		# restore real remaining $len
+	jz	.Lecb_ret		# done if ($len==0)
+
+.Lecb_enc_tail:				# $len is less than 8*16
+	movups	($inp),$inout0
+	cmp	\$0x20,$len
+	jb	.Lecb_enc_one
+	movups	0x10($inp),$inout1
+	je	.Lecb_enc_two
+	movups	0x20($inp),$inout2
+	cmp	\$0x40,$len
+	jb	.Lecb_enc_three
+	movups	0x30($inp),$inout3
+	je	.Lecb_enc_four
+	movups	0x40($inp),$inout4
+	cmp	\$0x60,$len
+	jb	.Lecb_enc_five
+	movups	0x50($inp),$inout5
+	je	.Lecb_enc_six
+	movdqu	0x60($inp),$inout6
+	xorps	$inout7,$inout7
+	call	_aesni_encrypt8
+	movups	$inout0,($out)		# store 7 output blocks
+	movups	$inout1,0x10($out)
+	movups	$inout2,0x20($out)
+	movups	$inout3,0x30($out)
+	movups	$inout4,0x40($out)
+	movups	$inout5,0x50($out)
+	movups	$inout6,0x60($out)
+	jmp	.Lecb_ret
+.align	16
+.Lecb_enc_one:
+___
+	&aesni_generate1("enc",$key,$rounds);
+$code.=<<___;
+	movups	$inout0,($out)		# store one output block
+	jmp	.Lecb_ret
+.align	16
+.Lecb_enc_two:
+	call	_aesni_encrypt2
+	movups	$inout0,($out)		# store 2 output blocks
+	movups	$inout1,0x10($out)
+	jmp	.Lecb_ret
+.align	16
+.Lecb_enc_three:
+	call	_aesni_encrypt3
+	movups	$inout0,($out)		# store 3 output blocks
+	movups	$inout1,0x10($out)
+	movups	$inout2,0x20($out)
+	jmp	.Lecb_ret
+.align	16
+.Lecb_enc_four:
+	call	_aesni_encrypt4
+	movups	$inout0,($out)		# store 4 output blocks
+	movups	$inout1,0x10($out)
+	movups	$inout2,0x20($out)
+	movups	$inout3,0x30($out)
+	jmp	.Lecb_ret
+.align	16
+.Lecb_enc_five:
+	xorps	$inout5,$inout5
+	call	_aesni_encrypt6
+	movups	$inout0,($out)		# store 5 output blocks
+	movups	$inout1,0x10($out)
+	movups	$inout2,0x20($out)
+	movups	$inout3,0x30($out)
+	movups	$inout4,0x40($out)
+	jmp	.Lecb_ret
+.align	16
+.Lecb_enc_six:
+	call	_aesni_encrypt6
+	movups	$inout0,($out)		# store 6 output blocks
+	movups	$inout1,0x10($out)
+	movups	$inout2,0x20($out)
+	movups	$inout3,0x30($out)
+	movups	$inout4,0x40($out)
+	movups	$inout5,0x50($out)
+	jmp	.Lecb_ret
+#--------------------------- ECB DECRYPT ------------------------------#
+.align	16
+.Lecb_decrypt:
+	cmp	\$0x80,$len		# if ($len<8*16)
+	jb	.Lecb_dec_tail		# short input
+
+	movdqu	($inp),$inout0		# load 8 input blocks
+	movdqu	0x10($inp),$inout1
+	movdqu	0x20($inp),$inout2
+	movdqu	0x30($inp),$inout3
+	movdqu	0x40($inp),$inout4
+	movdqu	0x50($inp),$inout5
+	movdqu	0x60($inp),$inout6
+	movdqu	0x70($inp),$inout7
+	lea	0x80($inp),$inp		# $inp+=8*16
+	sub	\$0x80,$len		# $len-=8*16 (can be zero)
+	jmp	.Lecb_dec_loop8_enter
+.align 16
+.Lecb_dec_loop8:
+	movups	$inout0,($out)		# store 8 output blocks
+	mov	$key_,$key		# restore $key
+	movdqu	($inp),$inout0		# load 8 input blocks
+	mov	$rnds_,$rounds		# restore $rounds
+	movups	$inout1,0x10($out)
+	movdqu	0x10($inp),$inout1
+	movups	$inout2,0x20($out)
+	movdqu	0x20($inp),$inout2
+	movups	$inout3,0x30($out)
+	movdqu	0x30($inp),$inout3
+	movups	$inout4,0x40($out)
+	movdqu	0x40($inp),$inout4
+	movups	$inout5,0x50($out)
+	movdqu	0x50($inp),$inout5
+	movups	$inout6,0x60($out)
+	movdqu	0x60($inp),$inout6
+	movups	$inout7,0x70($out)
+	lea	0x80($out),$out		# $out+=8*16
+	movdqu	0x70($inp),$inout7
+	lea	0x80($inp),$inp		# $inp+=8*16
+.Lecb_dec_loop8_enter:
+
+	call	_aesni_decrypt8
+
+	$movkey	($key_),$rndkey0
+	sub	\$0x80,$len
+	jnc	.Lecb_dec_loop8		# loop if $len-=8*16 didn't borrow
+
+	movups	$inout0,($out)		# store 8 output blocks
+	 pxor	$inout0,$inout0		# clear register bank
+	mov	$key_,$key		# restore $key
+	movups	$inout1,0x10($out)
+	 pxor	$inout1,$inout1
+	mov	$rnds_,$rounds		# restore $rounds
+	movups	$inout2,0x20($out)
+	 pxor	$inout2,$inout2
+	movups	$inout3,0x30($out)
+	 pxor	$inout3,$inout3
+	movups	$inout4,0x40($out)
+	 pxor	$inout4,$inout4
+	movups	$inout5,0x50($out)
+	 pxor	$inout5,$inout5
+	movups	$inout6,0x60($out)
+	 pxor	$inout6,$inout6
+	movups	$inout7,0x70($out)
+	 pxor	$inout7,$inout7
+	lea	0x80($out),$out		# $out+=8*16
+	add	\$0x80,$len		# restore real remaining $len
+	jz	.Lecb_ret		# done if ($len==0)
+
+.Lecb_dec_tail:
+	movups	($inp),$inout0
+	cmp	\$0x20,$len
+	jb	.Lecb_dec_one
+	movups	0x10($inp),$inout1
+	je	.Lecb_dec_two
+	movups	0x20($inp),$inout2
+	cmp	\$0x40,$len
+	jb	.Lecb_dec_three
+	movups	0x30($inp),$inout3
+	je	.Lecb_dec_four
+	movups	0x40($inp),$inout4
+	cmp	\$0x60,$len
+	jb	.Lecb_dec_five
+	movups	0x50($inp),$inout5
+	je	.Lecb_dec_six
+	movups	0x60($inp),$inout6
+	$movkey	($key),$rndkey0
+	xorps	$inout7,$inout7
+	call	_aesni_decrypt8
+	movups	$inout0,($out)		# store 7 output blocks
+	 pxor	$inout0,$inout0		# clear register bank
+	movups	$inout1,0x10($out)
+	 pxor	$inout1,$inout1
+	movups	$inout2,0x20($out)
+	 pxor	$inout2,$inout2
+	movups	$inout3,0x30($out)
+	 pxor	$inout3,$inout3
+	movups	$inout4,0x40($out)
+	 pxor	$inout4,$inout4
+	movups	$inout5,0x50($out)
+	 pxor	$inout5,$inout5
+	movups	$inout6,0x60($out)
+	 pxor	$inout6,$inout6
+	 pxor	$inout7,$inout7
+	jmp	.Lecb_ret
+.align	16
+.Lecb_dec_one:
+___
+	&aesni_generate1("dec",$key,$rounds);
+$code.=<<___;
+	movups	$inout0,($out)		# store one output block
+	 pxor	$inout0,$inout0		# clear register bank
+	jmp	.Lecb_ret
+.align	16
+.Lecb_dec_two:
+	call	_aesni_decrypt2
+	movups	$inout0,($out)		# store 2 output blocks
+	 pxor	$inout0,$inout0		# clear register bank
+	movups	$inout1,0x10($out)
+	 pxor	$inout1,$inout1
+	jmp	.Lecb_ret
+.align	16
+.Lecb_dec_three:
+	call	_aesni_decrypt3
+	movups	$inout0,($out)		# store 3 output blocks
+	 pxor	$inout0,$inout0		# clear register bank
+	movups	$inout1,0x10($out)
+	 pxor	$inout1,$inout1
+	movups	$inout2,0x20($out)
+	 pxor	$inout2,$inout2
+	jmp	.Lecb_ret
+.align	16
+.Lecb_dec_four:
+	call	_aesni_decrypt4
+	movups	$inout0,($out)		# store 4 output blocks
+	 pxor	$inout0,$inout0		# clear register bank
+	movups	$inout1,0x10($out)
+	 pxor	$inout1,$inout1
+	movups	$inout2,0x20($out)
+	 pxor	$inout2,$inout2
+	movups	$inout3,0x30($out)
+	 pxor	$inout3,$inout3
+	jmp	.Lecb_ret
+.align	16
+.Lecb_dec_five:
+	xorps	$inout5,$inout5
+	call	_aesni_decrypt6
+	movups	$inout0,($out)		# store 5 output blocks
+	 pxor	$inout0,$inout0		# clear register bank
+	movups	$inout1,0x10($out)
+	 pxor	$inout1,$inout1
+	movups	$inout2,0x20($out)
+	 pxor	$inout2,$inout2
+	movups	$inout3,0x30($out)
+	 pxor	$inout3,$inout3
+	movups	$inout4,0x40($out)
+	 pxor	$inout4,$inout4
+	 pxor	$inout5,$inout5
+	jmp	.Lecb_ret
+.align	16
+.Lecb_dec_six:
+	call	_aesni_decrypt6
+	movups	$inout0,($out)		# store 6 output blocks
+	 pxor	$inout0,$inout0		# clear register bank
+	movups	$inout1,0x10($out)
+	 pxor	$inout1,$inout1
+	movups	$inout2,0x20($out)
+	 pxor	$inout2,$inout2
+	movups	$inout3,0x30($out)
+	 pxor	$inout3,$inout3
+	movups	$inout4,0x40($out)
+	 pxor	$inout4,$inout4
+	movups	$inout5,0x50($out)
+	 pxor	$inout5,$inout5
+
+.Lecb_ret:
+	xorps	$rndkey0,$rndkey0	# %xmm0
+	pxor	$rndkey1,$rndkey1
+___
+$code.=<<___ if ($win64);
+	movaps	(%rsp),%xmm6
+	movaps	%xmm0,(%rsp)		# clear stack
+	movaps	0x10(%rsp),%xmm7
+	movaps	%xmm0,0x10(%rsp)
+	movaps	0x20(%rsp),%xmm8
+	movaps	%xmm0,0x20(%rsp)
+	movaps	0x30(%rsp),%xmm9
+	movaps	%xmm0,0x30(%rsp)
+	lea	0x58(%rsp),%rsp
+.Lecb_enc_ret:
+___
+$code.=<<___;
+	ret
+.cfi_endproc
+.size	aesni_ecb_encrypt,.-aesni_ecb_encrypt
+___
+
+{
+######################################################################
+# void aesni_ccm64_[en|de]crypt_blocks (const void *in, void *out,
+#                         size_t blocks, const AES_KEY *key,
+#                         const char *ivec,char *cmac);
+#
+# Handles only complete blocks, operates on 64-bit counter and
+# does not update *ivec! Nor does it finalize CMAC value
+# (see engine/eng_aesni.c for details)
+#
+{
+my $cmac="%r9";	# 6th argument
+
+my $increment="%xmm9";
+my $iv="%xmm6";
+my $bswap_mask="%xmm7";
+
+$code.=<<___;
+.globl	aesni_ccm64_encrypt_blocks
+.type	aesni_ccm64_encrypt_blocks,\@function,6
+.align	16
+aesni_ccm64_encrypt_blocks:
+.cfi_startproc
+___
+$code.=<<___ if ($win64);
+	lea	-0x58(%rsp),%rsp
+	movaps	%xmm6,(%rsp)		# $iv
+	movaps	%xmm7,0x10(%rsp)	# $bswap_mask
+	movaps	%xmm8,0x20(%rsp)	# $in0
+	movaps	%xmm9,0x30(%rsp)	# $increment
+.Lccm64_enc_body:
+___
+$code.=<<___;
+	mov	240($key),$rounds		# key->rounds
+	movdqu	($ivp),$iv
+	movdqa	.Lincrement64(%rip),$increment
+	movdqa	.Lbswap_mask(%rip),$bswap_mask
+
+	shl	\$4,$rounds
+	mov	\$16,$rnds_
+	lea	0($key),$key_
+	movdqu	($cmac),$inout1
+	movdqa	$iv,$inout0
+	lea	32($key,$rounds),$key		# end of key schedule
+	pshufb	$bswap_mask,$iv
+	sub	%rax,%r10			# twisted $rounds
+	jmp	.Lccm64_enc_outer
+.align	16
+.Lccm64_enc_outer:
+	$movkey	($key_),$rndkey0
+	mov	%r10,%rax
+	movups	($inp),$in0			# load inp
+
+	xorps	$rndkey0,$inout0		# counter
+	$movkey	16($key_),$rndkey1
+	xorps	$in0,$rndkey0
+	xorps	$rndkey0,$inout1		# cmac^=inp
+	$movkey	32($key_),$rndkey0
+
+.Lccm64_enc2_loop:
+	aesenc	$rndkey1,$inout0
+	aesenc	$rndkey1,$inout1
+	$movkey	($key,%rax),$rndkey1
+	add	\$32,%rax
+	aesenc	$rndkey0,$inout0
+	aesenc	$rndkey0,$inout1
+	$movkey	-16($key,%rax),$rndkey0
+	jnz	.Lccm64_enc2_loop
+	aesenc	$rndkey1,$inout0
+	aesenc	$rndkey1,$inout1
+	paddq	$increment,$iv
+	dec	$len				# $len-- ($len is in blocks)
+	aesenclast	$rndkey0,$inout0
+	aesenclast	$rndkey0,$inout1
+
+	lea	16($inp),$inp
+	xorps	$inout0,$in0			# inp ^= E(iv)
+	movdqa	$iv,$inout0
+	movups	$in0,($out)			# save output
+	pshufb	$bswap_mask,$inout0
+	lea	16($out),$out			# $out+=16
+	jnz	.Lccm64_enc_outer		# loop if ($len!=0)
+
+	 pxor	$rndkey0,$rndkey0		# clear register bank
+	 pxor	$rndkey1,$rndkey1
+	 pxor	$inout0,$inout0
+	movups	$inout1,($cmac)			# store resulting mac
+	 pxor	$inout1,$inout1
+	 pxor	$in0,$in0
+	 pxor	$iv,$iv
+___
+$code.=<<___ if ($win64);
+	movaps	(%rsp),%xmm6
+	movaps	%xmm0,(%rsp)			# clear stack
+	movaps	0x10(%rsp),%xmm7
+	movaps	%xmm0,0x10(%rsp)
+	movaps	0x20(%rsp),%xmm8
+	movaps	%xmm0,0x20(%rsp)
+	movaps	0x30(%rsp),%xmm9
+	movaps	%xmm0,0x30(%rsp)
+	lea	0x58(%rsp),%rsp
+.Lccm64_enc_ret:
+___
+$code.=<<___;
+	ret
+.cfi_endproc
+.size	aesni_ccm64_encrypt_blocks,.-aesni_ccm64_encrypt_blocks
+___
+######################################################################
+$code.=<<___;
+.globl	aesni_ccm64_decrypt_blocks
+.type	aesni_ccm64_decrypt_blocks,\@function,6
+.align	16
+aesni_ccm64_decrypt_blocks:
+.cfi_startproc
+___
+$code.=<<___ if ($win64);
+	lea	-0x58(%rsp),%rsp
+	movaps	%xmm6,(%rsp)		# $iv
+	movaps	%xmm7,0x10(%rsp)	# $bswap_mask
+	movaps	%xmm8,0x20(%rsp)	# $in8
+	movaps	%xmm9,0x30(%rsp)	# $increment
+.Lccm64_dec_body:
+___
+$code.=<<___;
+	mov	240($key),$rounds		# key->rounds
+	movups	($ivp),$iv
+	movdqu	($cmac),$inout1
+	movdqa	.Lincrement64(%rip),$increment
+	movdqa	.Lbswap_mask(%rip),$bswap_mask
+
+	movaps	$iv,$inout0
+	mov	$rounds,$rnds_
+	mov	$key,$key_
+	pshufb	$bswap_mask,$iv
+___
+	&aesni_generate1("enc",$key,$rounds);
+$code.=<<___;
+	shl	\$4,$rnds_
+	mov	\$16,$rounds
+	movups	($inp),$in0			# load inp
+	paddq	$increment,$iv
+	lea	16($inp),$inp			# $inp+=16
+	sub	%r10,%rax			# twisted $rounds
+	lea	32($key_,$rnds_),$key		# end of key schedule
+	mov	%rax,%r10
+	jmp	.Lccm64_dec_outer
+.align	16
+.Lccm64_dec_outer:
+	xorps	$inout0,$in0			# inp ^= E(iv)
+	movdqa	$iv,$inout0
+	movups	$in0,($out)			# save output
+	lea	16($out),$out			# $out+=16
+	pshufb	$bswap_mask,$inout0
+
+	sub	\$1,$len			# $len-- ($len is in blocks)
+	jz	.Lccm64_dec_break		# if ($len==0) break
+
+	$movkey	($key_),$rndkey0
+	mov	%r10,%rax
+	$movkey	16($key_),$rndkey1
+	xorps	$rndkey0,$in0
+	xorps	$rndkey0,$inout0
+	xorps	$in0,$inout1			# cmac^=out
+	$movkey	32($key_),$rndkey0
+	jmp	.Lccm64_dec2_loop
+.align	16
+.Lccm64_dec2_loop:
+	aesenc	$rndkey1,$inout0
+	aesenc	$rndkey1,$inout1
+	$movkey	($key,%rax),$rndkey1
+	add	\$32,%rax
+	aesenc	$rndkey0,$inout0
+	aesenc	$rndkey0,$inout1
+	$movkey	-16($key,%rax),$rndkey0
+	jnz	.Lccm64_dec2_loop
+	movups	($inp),$in0			# load input
+	paddq	$increment,$iv
+	aesenc	$rndkey1,$inout0
+	aesenc	$rndkey1,$inout1
+	aesenclast	$rndkey0,$inout0
+	aesenclast	$rndkey0,$inout1
+	lea	16($inp),$inp			# $inp+=16
+	jmp	.Lccm64_dec_outer
+
+.align	16
+.Lccm64_dec_break:
+	#xorps	$in0,$inout1			# cmac^=out
+	mov	240($key_),$rounds
+___
+	&aesni_generate1("enc",$key_,$rounds,$inout1,$in0);
+$code.=<<___;
+	 pxor	$rndkey0,$rndkey0		# clear register bank
+	 pxor	$rndkey1,$rndkey1
+	 pxor	$inout0,$inout0
+	movups	$inout1,($cmac)			# store resulting mac
+	 pxor	$inout1,$inout1
+	 pxor	$in0,$in0
+	 pxor	$iv,$iv
+___
+$code.=<<___ if ($win64);
+	movaps	(%rsp),%xmm6
+	movaps	%xmm0,(%rsp)			# clear stack
+	movaps	0x10(%rsp),%xmm7
+	movaps	%xmm0,0x10(%rsp)
+	movaps	0x20(%rsp),%xmm8
+	movaps	%xmm0,0x20(%rsp)
+	movaps	0x30(%rsp),%xmm9
+	movaps	%xmm0,0x30(%rsp)
+	lea	0x58(%rsp),%rsp
+.Lccm64_dec_ret:
+___
+$code.=<<___;
+	ret
+.cfi_endproc
+.size	aesni_ccm64_decrypt_blocks,.-aesni_ccm64_decrypt_blocks
+___
+}
+######################################################################
+# void aesni_ctr32_encrypt_blocks (const void *in, void *out,
+#                         size_t blocks, const AES_KEY *key,
+#                         const char *ivec);
+#
+# Handles only complete blocks, operates on 32-bit counter and
+# does not update *ivec! (see crypto/modes/ctr128.c for details)
+#
+# Overhaul based on suggestions from Shay Gueron and Vlad Krasnov,
+# http://rt.openssl.org/Ticket/Display.html?id=3021&user=guest&pass=guest.
+# Keywords are full unroll and modulo-schedule counter calculations
+# with zero-round key xor.
+{
+my ($in0,$in1,$in2,$in3,$in4,$in5)=map("%xmm$_",(10..15));
+my ($key0,$ctr)=("%ebp","${ivp}d");
+my $frame_size = 0x80 + ($win64?160:0);
+
+$code.=<<___;
+.globl	aesni_ctr32_encrypt_blocks
+.type	aesni_ctr32_encrypt_blocks,\@function,5
+.align	16
+aesni_ctr32_encrypt_blocks:
+.cfi_startproc
+	cmp	\$1,$len
+	jne	.Lctr32_bulk
+
+	# handle single block without allocating stack frame,
+	# useful when handling edges
+	movups	($ivp),$inout0
+	movups	($inp),$inout1
+	mov	240($key),%edx			# key->rounds
+___
+	&aesni_generate1("enc",$key,"%edx");
+$code.=<<___;
+	 pxor	$rndkey0,$rndkey0		# clear register bank
+	 pxor	$rndkey1,$rndkey1
+	xorps	$inout1,$inout0
+	 pxor	$inout1,$inout1
+	movups	$inout0,($out)
+	 xorps	$inout0,$inout0
+	jmp	.Lctr32_epilogue
+
+.align	16
+.Lctr32_bulk:
+	lea	(%rsp),$key_			# use $key_ as frame pointer
+.cfi_def_cfa_register	$key_
+	push	%rbp
+.cfi_push	%rbp
+	sub	\$$frame_size,%rsp
+	and	\$-16,%rsp	# Linux kernel stack can be incorrectly seeded
+___
+$code.=<<___ if ($win64);
+	movaps	%xmm6,-0xa8($key_)		# offload everything
+	movaps	%xmm7,-0x98($key_)
+	movaps	%xmm8,-0x88($key_)
+	movaps	%xmm9,-0x78($key_)
+	movaps	%xmm10,-0x68($key_)
+	movaps	%xmm11,-0x58($key_)
+	movaps	%xmm12,-0x48($key_)
+	movaps	%xmm13,-0x38($key_)
+	movaps	%xmm14,-0x28($key_)
+	movaps	%xmm15,-0x18($key_)
+.Lctr32_body:
+___
+$code.=<<___;
+
+	# 8 16-byte words on top of stack are counter values
+	# xor-ed with zero-round key
+
+	movdqu	($ivp),$inout0
+	movdqu	($key),$rndkey0
+	mov	12($ivp),$ctr			# counter LSB
+	pxor	$rndkey0,$inout0
+	mov	12($key),$key0			# 0-round key LSB
+	movdqa	$inout0,0x00(%rsp)		# populate counter block
+	bswap	$ctr
+	movdqa	$inout0,$inout1
+	movdqa	$inout0,$inout2
+	movdqa	$inout0,$inout3
+	movdqa	$inout0,0x40(%rsp)
+	movdqa	$inout0,0x50(%rsp)
+	movdqa	$inout0,0x60(%rsp)
+	mov	%rdx,%r10			# about to borrow %rdx
+	movdqa	$inout0,0x70(%rsp)
+
+	lea	1($ctr),%rax
+	 lea	2($ctr),%rdx
+	bswap	%eax
+	 bswap	%edx
+	xor	$key0,%eax
+	 xor	$key0,%edx
+	pinsrd	\$3,%eax,$inout1
+	lea	3($ctr),%rax
+	movdqa	$inout1,0x10(%rsp)
+	 pinsrd	\$3,%edx,$inout2
+	bswap	%eax
+	 mov	%r10,%rdx			# restore %rdx
+	 lea	4($ctr),%r10
+	 movdqa	$inout2,0x20(%rsp)
+	xor	$key0,%eax
+	 bswap	%r10d
+	pinsrd	\$3,%eax,$inout3
+	 xor	$key0,%r10d
+	movdqa	$inout3,0x30(%rsp)
+	lea	5($ctr),%r9
+	 mov	%r10d,0x40+12(%rsp)
+	bswap	%r9d
+	 lea	6($ctr),%r10
+	mov	240($key),$rounds		# key->rounds
+	xor	$key0,%r9d
+	 bswap	%r10d
+	mov	%r9d,0x50+12(%rsp)
+	 xor	$key0,%r10d
+	lea	7($ctr),%r9
+	 mov	%r10d,0x60+12(%rsp)
+	bswap	%r9d
+	 mov	OPENSSL_ia32cap_P+4(%rip),%r10d
+	xor	$key0,%r9d
+	 and	\$`1<<26|1<<22`,%r10d		# isolate XSAVE+MOVBE
+	mov	%r9d,0x70+12(%rsp)
+
+	$movkey	0x10($key),$rndkey1
+
+	movdqa	0x40(%rsp),$inout4
+	movdqa	0x50(%rsp),$inout5
+
+	cmp	\$8,$len		# $len is in blocks
+	jb	.Lctr32_tail		# short input if ($len<8)
+
+	sub	\$6,$len		# $len is biased by -6
+	cmp	\$`1<<22`,%r10d		# check for MOVBE without XSAVE
+	je	.Lctr32_6x		# [which denotes Atom Silvermont]
+
+	lea	0x80($key),$key		# size optimization
+	sub	\$2,$len		# $len is biased by -8
+	jmp	.Lctr32_loop8
+
+.align	16
+.Lctr32_6x:
+	shl	\$4,$rounds
+	mov	\$48,$rnds_
+	bswap	$key0
+	lea	32($key,$rounds),$key	# end of key schedule
+	sub	%rax,%r10		# twisted $rounds
+	jmp	.Lctr32_loop6
+
+.align	16
+.Lctr32_loop6:
+	 add	\$6,$ctr		# next counter value
+	$movkey	-48($key,$rnds_),$rndkey0
+	aesenc	$rndkey1,$inout0
+	 mov	$ctr,%eax
+	 xor	$key0,%eax
+	aesenc	$rndkey1,$inout1
+	 movbe	%eax,`0x00+12`(%rsp)	# store next counter value
+	 lea	1($ctr),%eax
+	aesenc	$rndkey1,$inout2
+	 xor	$key0,%eax
+	 movbe	%eax,`0x10+12`(%rsp)
+	aesenc	$rndkey1,$inout3
+	 lea	2($ctr),%eax
+	 xor	$key0,%eax
+	aesenc	$rndkey1,$inout4
+	 movbe	%eax,`0x20+12`(%rsp)
+	 lea	3($ctr),%eax
+	aesenc	$rndkey1,$inout5
+	$movkey	-32($key,$rnds_),$rndkey1
+	 xor	$key0,%eax
+
+	aesenc	$rndkey0,$inout0
+	 movbe	%eax,`0x30+12`(%rsp)
+	 lea	4($ctr),%eax
+	aesenc	$rndkey0,$inout1
+	 xor	$key0,%eax
+	 movbe	%eax,`0x40+12`(%rsp)
+	aesenc	$rndkey0,$inout2
+	 lea	5($ctr),%eax
+	 xor	$key0,%eax
+	aesenc	$rndkey0,$inout3
+	 movbe	%eax,`0x50+12`(%rsp)
+	 mov	%r10,%rax		# mov	$rnds_,$rounds
+	aesenc	$rndkey0,$inout4
+	aesenc	$rndkey0,$inout5
+	$movkey	-16($key,$rnds_),$rndkey0
+
+	call	.Lenc_loop6
+
+	movdqu	($inp),$inout6		# load 6 input blocks
+	movdqu	0x10($inp),$inout7
+	movdqu	0x20($inp),$in0
+	movdqu	0x30($inp),$in1
+	movdqu	0x40($inp),$in2
+	movdqu	0x50($inp),$in3
+	lea	0x60($inp),$inp		# $inp+=6*16
+	$movkey	-64($key,$rnds_),$rndkey1
+	pxor	$inout0,$inout6		# inp^=E(ctr)
+	movaps	0x00(%rsp),$inout0	# load next counter [xor-ed with 0 round]
+	pxor	$inout1,$inout7
+	movaps	0x10(%rsp),$inout1
+	pxor	$inout2,$in0
+	movaps	0x20(%rsp),$inout2
+	pxor	$inout3,$in1
+	movaps	0x30(%rsp),$inout3
+	pxor	$inout4,$in2
+	movaps	0x40(%rsp),$inout4
+	pxor	$inout5,$in3
+	movaps	0x50(%rsp),$inout5
+	movdqu	$inout6,($out)		# store 6 output blocks
+	movdqu	$inout7,0x10($out)
+	movdqu	$in0,0x20($out)
+	movdqu	$in1,0x30($out)
+	movdqu	$in2,0x40($out)
+	movdqu	$in3,0x50($out)
+	lea	0x60($out),$out		# $out+=6*16
+
+	sub	\$6,$len
+	jnc	.Lctr32_loop6		# loop if $len-=6 didn't borrow
+
+	add	\$6,$len		# restore real remaining $len
+	jz	.Lctr32_done		# done if ($len==0)
+
+	lea	-48($rnds_),$rounds
+	lea	-80($key,$rnds_),$key	# restore $key
+	neg	$rounds
+	shr	\$4,$rounds		# restore $rounds
+	jmp	.Lctr32_tail
+
+.align	32
+.Lctr32_loop8:
+	 add		\$8,$ctr		# next counter value
+	movdqa		0x60(%rsp),$inout6
+	aesenc		$rndkey1,$inout0
+	 mov		$ctr,%r9d
+	movdqa		0x70(%rsp),$inout7
+	aesenc		$rndkey1,$inout1
+	 bswap		%r9d
+	$movkey		0x20-0x80($key),$rndkey0
+	aesenc		$rndkey1,$inout2
+	 xor		$key0,%r9d
+	 nop
+	aesenc		$rndkey1,$inout3
+	 mov		%r9d,0x00+12(%rsp)	# store next counter value
+	 lea		1($ctr),%r9
+	aesenc		$rndkey1,$inout4
+	aesenc		$rndkey1,$inout5
+	aesenc		$rndkey1,$inout6
+	aesenc		$rndkey1,$inout7
+	$movkey		0x30-0x80($key),$rndkey1
+___
+for($i=2;$i<8;$i++) {
+my $rndkeyx = ($i&1)?$rndkey1:$rndkey0;
+$code.=<<___;
+	 bswap		%r9d
+	aesenc		$rndkeyx,$inout0
+	aesenc		$rndkeyx,$inout1
+	 xor		$key0,%r9d
+	 .byte		0x66,0x90
+	aesenc		$rndkeyx,$inout2
+	aesenc		$rndkeyx,$inout3
+	 mov		%r9d,`0x10*($i-1)`+12(%rsp)
+	 lea		$i($ctr),%r9
+	aesenc		$rndkeyx,$inout4
+	aesenc		$rndkeyx,$inout5
+	aesenc		$rndkeyx,$inout6
+	aesenc		$rndkeyx,$inout7
+	$movkey		`0x20+0x10*$i`-0x80($key),$rndkeyx
+___
+}
+$code.=<<___;
+	 bswap		%r9d
+	aesenc		$rndkey0,$inout0
+	aesenc		$rndkey0,$inout1
+	aesenc		$rndkey0,$inout2
+	 xor		$key0,%r9d
+	 movdqu		0x00($inp),$in0		# start loading input
+	aesenc		$rndkey0,$inout3
+	 mov		%r9d,0x70+12(%rsp)
+	 cmp		\$11,$rounds
+	aesenc		$rndkey0,$inout4
+	aesenc		$rndkey0,$inout5
+	aesenc		$rndkey0,$inout6
+	aesenc		$rndkey0,$inout7
+	$movkey		0xa0-0x80($key),$rndkey0
+
+	jb		.Lctr32_enc_done
+
+	aesenc		$rndkey1,$inout0
+	aesenc		$rndkey1,$inout1
+	aesenc		$rndkey1,$inout2
+	aesenc		$rndkey1,$inout3
+	aesenc		$rndkey1,$inout4
+	aesenc		$rndkey1,$inout5
+	aesenc		$rndkey1,$inout6
+	aesenc		$rndkey1,$inout7
+	$movkey		0xb0-0x80($key),$rndkey1
+
+	aesenc		$rndkey0,$inout0
+	aesenc		$rndkey0,$inout1
+	aesenc		$rndkey0,$inout2
+	aesenc		$rndkey0,$inout3
+	aesenc		$rndkey0,$inout4
+	aesenc		$rndkey0,$inout5
+	aesenc		$rndkey0,$inout6
+	aesenc		$rndkey0,$inout7
+	$movkey		0xc0-0x80($key),$rndkey0
+	je		.Lctr32_enc_done
+
+	aesenc		$rndkey1,$inout0
+	aesenc		$rndkey1,$inout1
+	aesenc		$rndkey1,$inout2
+	aesenc		$rndkey1,$inout3
+	aesenc		$rndkey1,$inout4
+	aesenc		$rndkey1,$inout5
+	aesenc		$rndkey1,$inout6
+	aesenc		$rndkey1,$inout7
+	$movkey		0xd0-0x80($key),$rndkey1
+
+	aesenc		$rndkey0,$inout0
+	aesenc		$rndkey0,$inout1
+	aesenc		$rndkey0,$inout2
+	aesenc		$rndkey0,$inout3
+	aesenc		$rndkey0,$inout4
+	aesenc		$rndkey0,$inout5
+	aesenc		$rndkey0,$inout6
+	aesenc		$rndkey0,$inout7
+	$movkey		0xe0-0x80($key),$rndkey0
+	jmp		.Lctr32_enc_done
+
+.align	16
+.Lctr32_enc_done:
+	movdqu		0x10($inp),$in1
+	pxor		$rndkey0,$in0		# input^=round[last]
+	movdqu		0x20($inp),$in2
+	pxor		$rndkey0,$in1
+	movdqu		0x30($inp),$in3
+	pxor		$rndkey0,$in2
+	movdqu		0x40($inp),$in4
+	pxor		$rndkey0,$in3
+	movdqu		0x50($inp),$in5
+	pxor		$rndkey0,$in4
+	pxor		$rndkey0,$in5
+	aesenc		$rndkey1,$inout0
+	aesenc		$rndkey1,$inout1
+	aesenc		$rndkey1,$inout2
+	aesenc		$rndkey1,$inout3
+	aesenc		$rndkey1,$inout4
+	aesenc		$rndkey1,$inout5
+	aesenc		$rndkey1,$inout6
+	aesenc		$rndkey1,$inout7
+	movdqu		0x60($inp),$rndkey1	# borrow $rndkey1 for inp[6]
+	lea		0x80($inp),$inp		# $inp+=8*16
+
+	aesenclast	$in0,$inout0		# $inN is inp[N]^round[last]
+	pxor		$rndkey0,$rndkey1	# borrowed $rndkey
+	movdqu		0x70-0x80($inp),$in0
+	aesenclast	$in1,$inout1
+	pxor		$rndkey0,$in0
+	movdqa		0x00(%rsp),$in1		# load next counter block
+	aesenclast	$in2,$inout2
+	aesenclast	$in3,$inout3
+	movdqa		0x10(%rsp),$in2
+	movdqa		0x20(%rsp),$in3
+	aesenclast	$in4,$inout4
+	aesenclast	$in5,$inout5
+	movdqa		0x30(%rsp),$in4
+	movdqa		0x40(%rsp),$in5
+	aesenclast	$rndkey1,$inout6
+	movdqa		0x50(%rsp),$rndkey0
+	$movkey		0x10-0x80($key),$rndkey1#real 1st-round key
+	aesenclast	$in0,$inout7
+
+	movups		$inout0,($out)		# store 8 output blocks
+	movdqa		$in1,$inout0
+	movups		$inout1,0x10($out)
+	movdqa		$in2,$inout1
+	movups		$inout2,0x20($out)
+	movdqa		$in3,$inout2
+	movups		$inout3,0x30($out)
+	movdqa		$in4,$inout3
+	movups		$inout4,0x40($out)
+	movdqa		$in5,$inout4
+	movups		$inout5,0x50($out)
+	movdqa		$rndkey0,$inout5
+	movups		$inout6,0x60($out)
+	movups		$inout7,0x70($out)
+	lea		0x80($out),$out		# $out+=8*16
+
+	sub	\$8,$len
+	jnc	.Lctr32_loop8			# loop if $len-=8 didn't borrow
+
+	add	\$8,$len			# restore real remaining $len
+	jz	.Lctr32_done			# done if ($len==0)
+	lea	-0x80($key),$key
+
+.Lctr32_tail:
+	# note that at this point $inout0..5 are populated with
+	# counter values xor-ed with 0-round key
+	lea	16($key),$key
+	cmp	\$4,$len
+	jb	.Lctr32_loop3
+	je	.Lctr32_loop4
+
+	# if ($len>4) compute 7 E(counter)
+	shl		\$4,$rounds
+	movdqa		0x60(%rsp),$inout6
+	pxor		$inout7,$inout7
+
+	$movkey		16($key),$rndkey0
+	aesenc		$rndkey1,$inout0
+	aesenc		$rndkey1,$inout1
+	lea		32-16($key,$rounds),$key# prepare for .Lenc_loop8_enter
+	neg		%rax
+	aesenc		$rndkey1,$inout2
+	add		\$16,%rax		# prepare for .Lenc_loop8_enter
+	 movups		($inp),$in0
+	aesenc		$rndkey1,$inout3
+	aesenc		$rndkey1,$inout4
+	 movups		0x10($inp),$in1		# pre-load input
+	 movups		0x20($inp),$in2
+	aesenc		$rndkey1,$inout5
+	aesenc		$rndkey1,$inout6
+
+	call            .Lenc_loop8_enter
+
+	movdqu	0x30($inp),$in3
+	pxor	$in0,$inout0
+	movdqu	0x40($inp),$in0
+	pxor	$in1,$inout1
+	movdqu	$inout0,($out)			# store output
+	pxor	$in2,$inout2
+	movdqu	$inout1,0x10($out)
+	pxor	$in3,$inout3
+	movdqu	$inout2,0x20($out)
+	pxor	$in0,$inout4
+	movdqu	$inout3,0x30($out)
+	movdqu	$inout4,0x40($out)
+	cmp	\$6,$len
+	jb	.Lctr32_done			# $len was 5, stop store
+
+	movups	0x50($inp),$in1
+	xorps	$in1,$inout5
+	movups	$inout5,0x50($out)
+	je	.Lctr32_done			# $len was 6, stop store
+
+	movups	0x60($inp),$in2
+	xorps	$in2,$inout6
+	movups	$inout6,0x60($out)
+	jmp	.Lctr32_done			# $len was 7, stop store
+
+.align	32
+.Lctr32_loop4:
+	aesenc		$rndkey1,$inout0
+	lea		16($key),$key
+	dec		$rounds
+	aesenc		$rndkey1,$inout1
+	aesenc		$rndkey1,$inout2
+	aesenc		$rndkey1,$inout3
+	$movkey		($key),$rndkey1
+	jnz		.Lctr32_loop4
+	aesenclast	$rndkey1,$inout0
+	aesenclast	$rndkey1,$inout1
+	 movups		($inp),$in0		# load input
+	 movups		0x10($inp),$in1
+	aesenclast	$rndkey1,$inout2
+	aesenclast	$rndkey1,$inout3
+	 movups		0x20($inp),$in2
+	 movups		0x30($inp),$in3
+
+	xorps	$in0,$inout0
+	movups	$inout0,($out)			# store output
+	xorps	$in1,$inout1
+	movups	$inout1,0x10($out)
+	pxor	$in2,$inout2
+	movdqu	$inout2,0x20($out)
+	pxor	$in3,$inout3
+	movdqu	$inout3,0x30($out)
+	jmp	.Lctr32_done			# $len was 4, stop store
+
+.align	32
+.Lctr32_loop3:
+	aesenc		$rndkey1,$inout0
+	lea		16($key),$key
+	dec		$rounds
+	aesenc		$rndkey1,$inout1
+	aesenc		$rndkey1,$inout2
+	$movkey		($key),$rndkey1
+	jnz		.Lctr32_loop3
+	aesenclast	$rndkey1,$inout0
+	aesenclast	$rndkey1,$inout1
+	aesenclast	$rndkey1,$inout2
+
+	movups	($inp),$in0			# load input
+	xorps	$in0,$inout0
+	movups	$inout0,($out)			# store output
+	cmp	\$2,$len
+	jb	.Lctr32_done			# $len was 1, stop store
+
+	movups	0x10($inp),$in1
+	xorps	$in1,$inout1
+	movups	$inout1,0x10($out)
+	je	.Lctr32_done			# $len was 2, stop store
+
+	movups	0x20($inp),$in2
+	xorps	$in2,$inout2
+	movups	$inout2,0x20($out)		# $len was 3, stop store
+
+.Lctr32_done:
+	xorps	%xmm0,%xmm0			# clear register bank
+	xor	$key0,$key0
+	pxor	%xmm1,%xmm1
+	pxor	%xmm2,%xmm2
+	pxor	%xmm3,%xmm3
+	pxor	%xmm4,%xmm4
+	pxor	%xmm5,%xmm5
+___
+$code.=<<___ if (!$win64);
+	pxor	%xmm6,%xmm6
+	pxor	%xmm7,%xmm7
+	movaps	%xmm0,0x00(%rsp)		# clear stack
+	pxor	%xmm8,%xmm8
+	movaps	%xmm0,0x10(%rsp)
+	pxor	%xmm9,%xmm9
+	movaps	%xmm0,0x20(%rsp)
+	pxor	%xmm10,%xmm10
+	movaps	%xmm0,0x30(%rsp)
+	pxor	%xmm11,%xmm11
+	movaps	%xmm0,0x40(%rsp)
+	pxor	%xmm12,%xmm12
+	movaps	%xmm0,0x50(%rsp)
+	pxor	%xmm13,%xmm13
+	movaps	%xmm0,0x60(%rsp)
+	pxor	%xmm14,%xmm14
+	movaps	%xmm0,0x70(%rsp)
+	pxor	%xmm15,%xmm15
+___
+$code.=<<___ if ($win64);
+	movaps	-0xa8($key_),%xmm6
+	movaps	%xmm0,-0xa8($key_)		# clear stack
+	movaps	-0x98($key_),%xmm7
+	movaps	%xmm0,-0x98($key_)
+	movaps	-0x88($key_),%xmm8
+	movaps	%xmm0,-0x88($key_)
+	movaps	-0x78($key_),%xmm9
+	movaps	%xmm0,-0x78($key_)
+	movaps	-0x68($key_),%xmm10
+	movaps	%xmm0,-0x68($key_)
+	movaps	-0x58($key_),%xmm11
+	movaps	%xmm0,-0x58($key_)
+	movaps	-0x48($key_),%xmm12
+	movaps	%xmm0,-0x48($key_)
+	movaps	-0x38($key_),%xmm13
+	movaps	%xmm0,-0x38($key_)
+	movaps	-0x28($key_),%xmm14
+	movaps	%xmm0,-0x28($key_)
+	movaps	-0x18($key_),%xmm15
+	movaps	%xmm0,-0x18($key_)
+	movaps	%xmm0,0x00(%rsp)
+	movaps	%xmm0,0x10(%rsp)
+	movaps	%xmm0,0x20(%rsp)
+	movaps	%xmm0,0x30(%rsp)
+	movaps	%xmm0,0x40(%rsp)
+	movaps	%xmm0,0x50(%rsp)
+	movaps	%xmm0,0x60(%rsp)
+	movaps	%xmm0,0x70(%rsp)
+___
+$code.=<<___;
+	mov	-8($key_),%rbp
+.cfi_restore	%rbp
+	lea	($key_),%rsp
+.cfi_def_cfa_register	%rsp
+.Lctr32_epilogue:
+	ret
+.cfi_endproc
+.size	aesni_ctr32_encrypt_blocks,.-aesni_ctr32_encrypt_blocks
+___
+}
+
+######################################################################
+# void aesni_xts_[en|de]crypt(const char *inp,char *out,size_t len,
+#	const AES_KEY *key1, const AES_KEY *key2
+#	const unsigned char iv[16]);
+#
+{
+my @tweak=map("%xmm$_",(10..15));
+my ($twmask,$twres,$twtmp)=("%xmm8","%xmm9",@tweak[4]);
+my ($key2,$ivp,$len_)=("%r8","%r9","%r9");
+my $frame_size = 0x70 + ($win64?160:0);
+my $key_ = "%rbp";	# override so that we can use %r11 as FP
+
+$code.=<<___;
+.globl	aesni_xts_encrypt
+.type	aesni_xts_encrypt,\@function,6
+.align	16
+aesni_xts_encrypt:
+.cfi_startproc
+	lea	(%rsp),%r11			# frame pointer
+.cfi_def_cfa_register	%r11
+	push	%rbp
+.cfi_push	%rbp
+	sub	\$$frame_size,%rsp
+	and	\$-16,%rsp	# Linux kernel stack can be incorrectly seeded
+___
+$code.=<<___ if ($win64);
+	movaps	%xmm6,-0xa8(%r11)		# offload everything
+	movaps	%xmm7,-0x98(%r11)
+	movaps	%xmm8,-0x88(%r11)
+	movaps	%xmm9,-0x78(%r11)
+	movaps	%xmm10,-0x68(%r11)
+	movaps	%xmm11,-0x58(%r11)
+	movaps	%xmm12,-0x48(%r11)
+	movaps	%xmm13,-0x38(%r11)
+	movaps	%xmm14,-0x28(%r11)
+	movaps	%xmm15,-0x18(%r11)
+.Lxts_enc_body:
+___
+$code.=<<___;
+	movups	($ivp),$inout0			# load clear-text tweak
+	mov	240(%r8),$rounds		# key2->rounds
+	mov	240($key),$rnds_		# key1->rounds
+___
+	# generate the tweak
+	&aesni_generate1("enc",$key2,$rounds,$inout0);
+$code.=<<___;
+	$movkey	($key),$rndkey0			# zero round key
+	mov	$key,$key_			# backup $key
+	mov	$rnds_,$rounds			# backup $rounds
+	shl	\$4,$rnds_
+	mov	$len,$len_			# backup $len
+	and	\$-16,$len
+
+	$movkey	16($key,$rnds_),$rndkey1	# last round key
+
+	movdqa	.Lxts_magic(%rip),$twmask
+	movdqa	$inout0,@tweak[5]
+	pshufd	\$0x5f,$inout0,$twres
+	pxor	$rndkey0,$rndkey1
+___
+    # alternative tweak calculation algorithm is based on suggestions
+    # by Shay Gueron. psrad doesn't conflict with AES-NI instructions
+    # and should help in the future...
+    for ($i=0;$i<4;$i++) {
+    $code.=<<___;
+	movdqa	$twres,$twtmp
+	paddd	$twres,$twres
+	movdqa	@tweak[5],@tweak[$i]
+	psrad	\$31,$twtmp			# broadcast upper bits
+	paddq	@tweak[5],@tweak[5]
+	pand	$twmask,$twtmp
+	pxor	$rndkey0,@tweak[$i]
+	pxor	$twtmp,@tweak[5]
+___
+    }
+$code.=<<___;
+	movdqa	@tweak[5],@tweak[4]
+	psrad	\$31,$twres
+	paddq	@tweak[5],@tweak[5]
+	pand	$twmask,$twres
+	pxor	$rndkey0,@tweak[4]
+	pxor	$twres,@tweak[5]
+	movaps	$rndkey1,0x60(%rsp)		# save round[0]^round[last]
+
+	sub	\$16*6,$len
+	jc	.Lxts_enc_short			# if $len-=6*16 borrowed
+
+	mov	\$16+96,$rounds
+	lea	32($key_,$rnds_),$key		# end of key schedule
+	sub	%r10,%rax			# twisted $rounds
+	$movkey	16($key_),$rndkey1
+	mov	%rax,%r10			# backup twisted $rounds
+	lea	.Lxts_magic(%rip),%r8
+	jmp	.Lxts_enc_grandloop
+
+.align	32
+.Lxts_enc_grandloop:
+	movdqu	`16*0`($inp),$inout0		# load input
+	movdqa	$rndkey0,$twmask
+	movdqu	`16*1`($inp),$inout1
+	pxor	@tweak[0],$inout0		# input^=tweak^round[0]
+	movdqu	`16*2`($inp),$inout2
+	pxor	@tweak[1],$inout1
+	 aesenc		$rndkey1,$inout0
+	movdqu	`16*3`($inp),$inout3
+	pxor	@tweak[2],$inout2
+	 aesenc		$rndkey1,$inout1
+	movdqu	`16*4`($inp),$inout4
+	pxor	@tweak[3],$inout3
+	 aesenc		$rndkey1,$inout2
+	movdqu	`16*5`($inp),$inout5
+	pxor	@tweak[5],$twmask		# round[0]^=tweak[5]
+	 movdqa	0x60(%rsp),$twres		# load round[0]^round[last]
+	pxor	@tweak[4],$inout4
+	 aesenc		$rndkey1,$inout3
+	$movkey	32($key_),$rndkey0
+	lea	`16*6`($inp),$inp
+	pxor	$twmask,$inout5
+
+	 pxor	$twres,@tweak[0]		# calculate tweaks^round[last]
+	aesenc		$rndkey1,$inout4
+	 pxor	$twres,@tweak[1]
+	 movdqa	@tweak[0],`16*0`(%rsp)		# put aside tweaks^round[last]
+	aesenc		$rndkey1,$inout5
+	$movkey		48($key_),$rndkey1
+	 pxor	$twres,@tweak[2]
+
+	aesenc		$rndkey0,$inout0
+	 pxor	$twres,@tweak[3]
+	 movdqa	@tweak[1],`16*1`(%rsp)
+	aesenc		$rndkey0,$inout1
+	 pxor	$twres,@tweak[4]
+	 movdqa	@tweak[2],`16*2`(%rsp)
+	aesenc		$rndkey0,$inout2
+	aesenc		$rndkey0,$inout3
+	 pxor	$twres,$twmask
+	 movdqa	@tweak[4],`16*4`(%rsp)
+	aesenc		$rndkey0,$inout4
+	aesenc		$rndkey0,$inout5
+	$movkey		64($key_),$rndkey0
+	 movdqa	$twmask,`16*5`(%rsp)
+	pshufd	\$0x5f,@tweak[5],$twres
+	jmp	.Lxts_enc_loop6
+.align	32
+.Lxts_enc_loop6:
+	aesenc		$rndkey1,$inout0
+	aesenc		$rndkey1,$inout1
+	aesenc		$rndkey1,$inout2
+	aesenc		$rndkey1,$inout3
+	aesenc		$rndkey1,$inout4
+	aesenc		$rndkey1,$inout5
+	$movkey		-64($key,%rax),$rndkey1
+	add		\$32,%rax
+
+	aesenc		$rndkey0,$inout0
+	aesenc		$rndkey0,$inout1
+	aesenc		$rndkey0,$inout2
+	aesenc		$rndkey0,$inout3
+	aesenc		$rndkey0,$inout4
+	aesenc		$rndkey0,$inout5
+	$movkey		-80($key,%rax),$rndkey0
+	jnz		.Lxts_enc_loop6
+
+	movdqa	(%r8),$twmask			# start calculating next tweak
+	movdqa	$twres,$twtmp
+	paddd	$twres,$twres
+	 aesenc		$rndkey1,$inout0
+	paddq	@tweak[5],@tweak[5]
+	psrad	\$31,$twtmp
+	 aesenc		$rndkey1,$inout1
+	pand	$twmask,$twtmp
+	$movkey	($key_),@tweak[0]		# load round[0]
+	 aesenc		$rndkey1,$inout2
+	 aesenc		$rndkey1,$inout3
+	 aesenc		$rndkey1,$inout4
+	pxor	$twtmp,@tweak[5]
+	movaps	@tweak[0],@tweak[1]		# copy round[0]
+	 aesenc		$rndkey1,$inout5
+	 $movkey	-64($key),$rndkey1
+
+	movdqa	$twres,$twtmp
+	 aesenc		$rndkey0,$inout0
+	paddd	$twres,$twres
+	pxor	@tweak[5],@tweak[0]
+	 aesenc		$rndkey0,$inout1
+	psrad	\$31,$twtmp
+	paddq	@tweak[5],@tweak[5]
+	 aesenc		$rndkey0,$inout2
+	 aesenc		$rndkey0,$inout3
+	pand	$twmask,$twtmp
+	movaps	@tweak[1],@tweak[2]
+	 aesenc		$rndkey0,$inout4
+	pxor	$twtmp,@tweak[5]
+	movdqa	$twres,$twtmp
+	 aesenc		$rndkey0,$inout5
+	 $movkey	-48($key),$rndkey0
+
+	paddd	$twres,$twres
+	 aesenc		$rndkey1,$inout0
+	pxor	@tweak[5],@tweak[1]
+	psrad	\$31,$twtmp
+	 aesenc		$rndkey1,$inout1
+	paddq	@tweak[5],@tweak[5]
+	pand	$twmask,$twtmp
+	 aesenc		$rndkey1,$inout2
+	 aesenc		$rndkey1,$inout3
+	 movdqa	@tweak[3],`16*3`(%rsp)
+	pxor	$twtmp,@tweak[5]
+	 aesenc		$rndkey1,$inout4
+	movaps	@tweak[2],@tweak[3]
+	movdqa	$twres,$twtmp
+	 aesenc		$rndkey1,$inout5
+	 $movkey	-32($key),$rndkey1
+
+	paddd	$twres,$twres
+	 aesenc		$rndkey0,$inout0
+	pxor	@tweak[5],@tweak[2]
+	psrad	\$31,$twtmp
+	 aesenc		$rndkey0,$inout1
+	paddq	@tweak[5],@tweak[5]
+	pand	$twmask,$twtmp
+	 aesenc		$rndkey0,$inout2
+	 aesenc		$rndkey0,$inout3
+	 aesenc		$rndkey0,$inout4
+	pxor	$twtmp,@tweak[5]
+	movaps	@tweak[3],@tweak[4]
+	 aesenc		$rndkey0,$inout5
+
+	movdqa	$twres,$rndkey0
+	paddd	$twres,$twres
+	 aesenc		$rndkey1,$inout0
+	pxor	@tweak[5],@tweak[3]
+	psrad	\$31,$rndkey0
+	 aesenc		$rndkey1,$inout1
+	paddq	@tweak[5],@tweak[5]
+	pand	$twmask,$rndkey0
+	 aesenc		$rndkey1,$inout2
+	 aesenc		$rndkey1,$inout3
+	pxor	$rndkey0,@tweak[5]
+	$movkey		($key_),$rndkey0
+	 aesenc		$rndkey1,$inout4
+	 aesenc		$rndkey1,$inout5
+	$movkey		16($key_),$rndkey1
+
+	pxor	@tweak[5],@tweak[4]
+	 aesenclast	`16*0`(%rsp),$inout0
+	psrad	\$31,$twres
+	paddq	@tweak[5],@tweak[5]
+	 aesenclast	`16*1`(%rsp),$inout1
+	 aesenclast	`16*2`(%rsp),$inout2
+	pand	$twmask,$twres
+	mov	%r10,%rax			# restore $rounds
+	 aesenclast	`16*3`(%rsp),$inout3
+	 aesenclast	`16*4`(%rsp),$inout4
+	 aesenclast	`16*5`(%rsp),$inout5
+	pxor	$twres,@tweak[5]
+
+	lea	`16*6`($out),$out		# $out+=6*16
+	movups	$inout0,`-16*6`($out)		# store 6 output blocks
+	movups	$inout1,`-16*5`($out)
+	movups	$inout2,`-16*4`($out)
+	movups	$inout3,`-16*3`($out)
+	movups	$inout4,`-16*2`($out)
+	movups	$inout5,`-16*1`($out)
+	sub	\$16*6,$len
+	jnc	.Lxts_enc_grandloop		# loop if $len-=6*16 didn't borrow
+
+	mov	\$16+96,$rounds
+	sub	$rnds_,$rounds
+	mov	$key_,$key			# restore $key
+	shr	\$4,$rounds			# restore original value
+
+.Lxts_enc_short:
+	# at the point @tweak[0..5] are populated with tweak values
+	mov	$rounds,$rnds_			# backup $rounds
+	pxor	$rndkey0,@tweak[0]
+	add	\$16*6,$len			# restore real remaining $len
+	jz	.Lxts_enc_done			# done if ($len==0)
+
+	pxor	$rndkey0,@tweak[1]
+	cmp	\$0x20,$len
+	jb	.Lxts_enc_one			# $len is 1*16
+	pxor	$rndkey0,@tweak[2]
+	je	.Lxts_enc_two			# $len is 2*16
+
+	pxor	$rndkey0,@tweak[3]
+	cmp	\$0x40,$len
+	jb	.Lxts_enc_three			# $len is 3*16
+	pxor	$rndkey0,@tweak[4]
+	je	.Lxts_enc_four			# $len is 4*16
+
+	movdqu	($inp),$inout0			# $len is 5*16
+	movdqu	16*1($inp),$inout1
+	movdqu	16*2($inp),$inout2
+	pxor	@tweak[0],$inout0
+	movdqu	16*3($inp),$inout3
+	pxor	@tweak[1],$inout1
+	movdqu	16*4($inp),$inout4
+	lea	16*5($inp),$inp			# $inp+=5*16
+	pxor	@tweak[2],$inout2
+	pxor	@tweak[3],$inout3
+	pxor	@tweak[4],$inout4
+	pxor	$inout5,$inout5
+
+	call	_aesni_encrypt6
+
+	xorps	@tweak[0],$inout0
+	movdqa	@tweak[5],@tweak[0]
+	xorps	@tweak[1],$inout1
+	xorps	@tweak[2],$inout2
+	movdqu	$inout0,($out)			# store 5 output blocks
+	xorps	@tweak[3],$inout3
+	movdqu	$inout1,16*1($out)
+	xorps	@tweak[4],$inout4
+	movdqu	$inout2,16*2($out)
+	movdqu	$inout3,16*3($out)
+	movdqu	$inout4,16*4($out)
+	lea	16*5($out),$out			# $out+=5*16
+	jmp	.Lxts_enc_done
+
+.align	16
+.Lxts_enc_one:
+	movups	($inp),$inout0
+	lea	16*1($inp),$inp			# inp+=1*16
+	xorps	@tweak[0],$inout0
+___
+	&aesni_generate1("enc",$key,$rounds);
+$code.=<<___;
+	xorps	@tweak[0],$inout0
+	movdqa	@tweak[1],@tweak[0]
+	movups	$inout0,($out)			# store one output block
+	lea	16*1($out),$out			# $out+=1*16
+	jmp	.Lxts_enc_done
+
+.align	16
+.Lxts_enc_two:
+	movups	($inp),$inout0
+	movups	16($inp),$inout1
+	lea	32($inp),$inp			# $inp+=2*16
+	xorps	@tweak[0],$inout0
+	xorps	@tweak[1],$inout1
+
+	call	_aesni_encrypt2
+
+	xorps	@tweak[0],$inout0
+	movdqa	@tweak[2],@tweak[0]
+	xorps	@tweak[1],$inout1
+	movups	$inout0,($out)			# store 2 output blocks
+	movups	$inout1,16*1($out)
+	lea	16*2($out),$out			# $out+=2*16
+	jmp	.Lxts_enc_done
+
+.align	16
+.Lxts_enc_three:
+	movups	($inp),$inout0
+	movups	16*1($inp),$inout1
+	movups	16*2($inp),$inout2
+	lea	16*3($inp),$inp			# $inp+=3*16
+	xorps	@tweak[0],$inout0
+	xorps	@tweak[1],$inout1
+	xorps	@tweak[2],$inout2
+
+	call	_aesni_encrypt3
+
+	xorps	@tweak[0],$inout0
+	movdqa	@tweak[3],@tweak[0]
+	xorps	@tweak[1],$inout1
+	xorps	@tweak[2],$inout2
+	movups	$inout0,($out)			# store 3 output blocks
+	movups	$inout1,16*1($out)
+	movups	$inout2,16*2($out)
+	lea	16*3($out),$out			# $out+=3*16
+	jmp	.Lxts_enc_done
+
+.align	16
+.Lxts_enc_four:
+	movups	($inp),$inout0
+	movups	16*1($inp),$inout1
+	movups	16*2($inp),$inout2
+	xorps	@tweak[0],$inout0
+	movups	16*3($inp),$inout3
+	lea	16*4($inp),$inp			# $inp+=4*16
+	xorps	@tweak[1],$inout1
+	xorps	@tweak[2],$inout2
+	xorps	@tweak[3],$inout3
+
+	call	_aesni_encrypt4
+
+	pxor	@tweak[0],$inout0
+	movdqa	@tweak[4],@tweak[0]
+	pxor	@tweak[1],$inout1
+	pxor	@tweak[2],$inout2
+	movdqu	$inout0,($out)			# store 4 output blocks
+	pxor	@tweak[3],$inout3
+	movdqu	$inout1,16*1($out)
+	movdqu	$inout2,16*2($out)
+	movdqu	$inout3,16*3($out)
+	lea	16*4($out),$out			# $out+=4*16
+	jmp	.Lxts_enc_done
+
+.align	16
+.Lxts_enc_done:
+	and	\$15,$len_			# see if $len%16 is 0
+	jz	.Lxts_enc_ret
+	mov	$len_,$len
+
+.Lxts_enc_steal:
+	movzb	($inp),%eax			# borrow $rounds ...
+	movzb	-16($out),%ecx			# ... and $key
+	lea	1($inp),$inp
+	mov	%al,-16($out)
+	mov	%cl,0($out)
+	lea	1($out),$out
+	sub	\$1,$len
+	jnz	.Lxts_enc_steal
+
+	sub	$len_,$out			# rewind $out
+	mov	$key_,$key			# restore $key
+	mov	$rnds_,$rounds			# restore $rounds
+
+	movups	-16($out),$inout0
+	xorps	@tweak[0],$inout0
+___
+	&aesni_generate1("enc",$key,$rounds);
+$code.=<<___;
+	xorps	@tweak[0],$inout0
+	movups	$inout0,-16($out)
+
+.Lxts_enc_ret:
+	xorps	%xmm0,%xmm0			# clear register bank
+	pxor	%xmm1,%xmm1
+	pxor	%xmm2,%xmm2
+	pxor	%xmm3,%xmm3
+	pxor	%xmm4,%xmm4
+	pxor	%xmm5,%xmm5
+___
+$code.=<<___ if (!$win64);
+	pxor	%xmm6,%xmm6
+	pxor	%xmm7,%xmm7
+	movaps	%xmm0,0x00(%rsp)		# clear stack
+	pxor	%xmm8,%xmm8
+	movaps	%xmm0,0x10(%rsp)
+	pxor	%xmm9,%xmm9
+	movaps	%xmm0,0x20(%rsp)
+	pxor	%xmm10,%xmm10
+	movaps	%xmm0,0x30(%rsp)
+	pxor	%xmm11,%xmm11
+	movaps	%xmm0,0x40(%rsp)
+	pxor	%xmm12,%xmm12
+	movaps	%xmm0,0x50(%rsp)
+	pxor	%xmm13,%xmm13
+	movaps	%xmm0,0x60(%rsp)
+	pxor	%xmm14,%xmm14
+	pxor	%xmm15,%xmm15
+___
+$code.=<<___ if ($win64);
+	movaps	-0xa8(%r11),%xmm6
+	movaps	%xmm0,-0xa8(%r11)		# clear stack
+	movaps	-0x98(%r11),%xmm7
+	movaps	%xmm0,-0x98(%r11)
+	movaps	-0x88(%r11),%xmm8
+	movaps	%xmm0,-0x88(%r11)
+	movaps	-0x78(%r11),%xmm9
+	movaps	%xmm0,-0x78(%r11)
+	movaps	-0x68(%r11),%xmm10
+	movaps	%xmm0,-0x68(%r11)
+	movaps	-0x58(%r11),%xmm11
+	movaps	%xmm0,-0x58(%r11)
+	movaps	-0x48(%r11),%xmm12
+	movaps	%xmm0,-0x48(%r11)
+	movaps	-0x38(%r11),%xmm13
+	movaps	%xmm0,-0x38(%r11)
+	movaps	-0x28(%r11),%xmm14
+	movaps	%xmm0,-0x28(%r11)
+	movaps	-0x18(%r11),%xmm15
+	movaps	%xmm0,-0x18(%r11)
+	movaps	%xmm0,0x00(%rsp)
+	movaps	%xmm0,0x10(%rsp)
+	movaps	%xmm0,0x20(%rsp)
+	movaps	%xmm0,0x30(%rsp)
+	movaps	%xmm0,0x40(%rsp)
+	movaps	%xmm0,0x50(%rsp)
+	movaps	%xmm0,0x60(%rsp)
+___
+$code.=<<___;
+	mov	-8(%r11),%rbp
+.cfi_restore	%rbp
+	lea	(%r11),%rsp
+.cfi_def_cfa_register	%rsp
+.Lxts_enc_epilogue:
+	ret
+.cfi_endproc
+.size	aesni_xts_encrypt,.-aesni_xts_encrypt
+___
+
+$code.=<<___;
+.globl	aesni_xts_decrypt
+.type	aesni_xts_decrypt,\@function,6
+.align	16
+aesni_xts_decrypt:
+.cfi_startproc
+	lea	(%rsp),%r11			# frame pointer
+.cfi_def_cfa_register	%r11
+	push	%rbp
+.cfi_push	%rbp
+	sub	\$$frame_size,%rsp
+	and	\$-16,%rsp	# Linux kernel stack can be incorrectly seeded
+___
+$code.=<<___ if ($win64);
+	movaps	%xmm6,-0xa8(%r11)		# offload everything
+	movaps	%xmm7,-0x98(%r11)
+	movaps	%xmm8,-0x88(%r11)
+	movaps	%xmm9,-0x78(%r11)
+	movaps	%xmm10,-0x68(%r11)
+	movaps	%xmm11,-0x58(%r11)
+	movaps	%xmm12,-0x48(%r11)
+	movaps	%xmm13,-0x38(%r11)
+	movaps	%xmm14,-0x28(%r11)
+	movaps	%xmm15,-0x18(%r11)
+.Lxts_dec_body:
+___
+$code.=<<___;
+	movups	($ivp),$inout0			# load clear-text tweak
+	mov	240($key2),$rounds		# key2->rounds
+	mov	240($key),$rnds_		# key1->rounds
+___
+	# generate the tweak
+	&aesni_generate1("enc",$key2,$rounds,$inout0);
+$code.=<<___;
+	xor	%eax,%eax			# if ($len%16) len-=16;
+	test	\$15,$len
+	setnz	%al
+	shl	\$4,%rax
+	sub	%rax,$len
+
+	$movkey	($key),$rndkey0			# zero round key
+	mov	$key,$key_			# backup $key
+	mov	$rnds_,$rounds			# backup $rounds
+	shl	\$4,$rnds_
+	mov	$len,$len_			# backup $len
+	and	\$-16,$len
+
+	$movkey	16($key,$rnds_),$rndkey1	# last round key
+
+	movdqa	.Lxts_magic(%rip),$twmask
+	movdqa	$inout0,@tweak[5]
+	pshufd	\$0x5f,$inout0,$twres
+	pxor	$rndkey0,$rndkey1
+___
+    for ($i=0;$i<4;$i++) {
+    $code.=<<___;
+	movdqa	$twres,$twtmp
+	paddd	$twres,$twres
+	movdqa	@tweak[5],@tweak[$i]
+	psrad	\$31,$twtmp			# broadcast upper bits
+	paddq	@tweak[5],@tweak[5]
+	pand	$twmask,$twtmp
+	pxor	$rndkey0,@tweak[$i]
+	pxor	$twtmp,@tweak[5]
+___
+    }
+$code.=<<___;
+	movdqa	@tweak[5],@tweak[4]
+	psrad	\$31,$twres
+	paddq	@tweak[5],@tweak[5]
+	pand	$twmask,$twres
+	pxor	$rndkey0,@tweak[4]
+	pxor	$twres,@tweak[5]
+	movaps	$rndkey1,0x60(%rsp)		# save round[0]^round[last]
+
+	sub	\$16*6,$len
+	jc	.Lxts_dec_short			# if $len-=6*16 borrowed
+
+	mov	\$16+96,$rounds
+	lea	32($key_,$rnds_),$key		# end of key schedule
+	sub	%r10,%rax			# twisted $rounds
+	$movkey	16($key_),$rndkey1
+	mov	%rax,%r10			# backup twisted $rounds
+	lea	.Lxts_magic(%rip),%r8
+	jmp	.Lxts_dec_grandloop
+
+.align	32
+.Lxts_dec_grandloop:
+	movdqu	`16*0`($inp),$inout0		# load input
+	movdqa	$rndkey0,$twmask
+	movdqu	`16*1`($inp),$inout1
+	pxor	@tweak[0],$inout0		# input^=tweak^round[0]
+	movdqu	`16*2`($inp),$inout2
+	pxor	@tweak[1],$inout1
+	 aesdec		$rndkey1,$inout0
+	movdqu	`16*3`($inp),$inout3
+	pxor	@tweak[2],$inout2
+	 aesdec		$rndkey1,$inout1
+	movdqu	`16*4`($inp),$inout4
+	pxor	@tweak[3],$inout3
+	 aesdec		$rndkey1,$inout2
+	movdqu	`16*5`($inp),$inout5
+	pxor	@tweak[5],$twmask		# round[0]^=tweak[5]
+	 movdqa	0x60(%rsp),$twres		# load round[0]^round[last]
+	pxor	@tweak[4],$inout4
+	 aesdec		$rndkey1,$inout3
+	$movkey	32($key_),$rndkey0
+	lea	`16*6`($inp),$inp
+	pxor	$twmask,$inout5
+
+	 pxor	$twres,@tweak[0]		# calculate tweaks^round[last]
+	aesdec		$rndkey1,$inout4
+	 pxor	$twres,@tweak[1]
+	 movdqa	@tweak[0],`16*0`(%rsp)		# put aside tweaks^last round key
+	aesdec		$rndkey1,$inout5
+	$movkey		48($key_),$rndkey1
+	 pxor	$twres,@tweak[2]
+
+	aesdec		$rndkey0,$inout0
+	 pxor	$twres,@tweak[3]
+	 movdqa	@tweak[1],`16*1`(%rsp)
+	aesdec		$rndkey0,$inout1
+	 pxor	$twres,@tweak[4]
+	 movdqa	@tweak[2],`16*2`(%rsp)
+	aesdec		$rndkey0,$inout2
+	aesdec		$rndkey0,$inout3
+	 pxor	$twres,$twmask
+	 movdqa	@tweak[4],`16*4`(%rsp)
+	aesdec		$rndkey0,$inout4
+	aesdec		$rndkey0,$inout5
+	$movkey		64($key_),$rndkey0
+	 movdqa	$twmask,`16*5`(%rsp)
+	pshufd	\$0x5f,@tweak[5],$twres
+	jmp	.Lxts_dec_loop6
+.align	32
+.Lxts_dec_loop6:
+	aesdec		$rndkey1,$inout0
+	aesdec		$rndkey1,$inout1
+	aesdec		$rndkey1,$inout2
+	aesdec		$rndkey1,$inout3
+	aesdec		$rndkey1,$inout4
+	aesdec		$rndkey1,$inout5
+	$movkey		-64($key,%rax),$rndkey1
+	add		\$32,%rax
+
+	aesdec		$rndkey0,$inout0
+	aesdec		$rndkey0,$inout1
+	aesdec		$rndkey0,$inout2
+	aesdec		$rndkey0,$inout3
+	aesdec		$rndkey0,$inout4
+	aesdec		$rndkey0,$inout5
+	$movkey		-80($key,%rax),$rndkey0
+	jnz		.Lxts_dec_loop6
+
+	movdqa	(%r8),$twmask			# start calculating next tweak
+	movdqa	$twres,$twtmp
+	paddd	$twres,$twres
+	 aesdec		$rndkey1,$inout0
+	paddq	@tweak[5],@tweak[5]
+	psrad	\$31,$twtmp
+	 aesdec		$rndkey1,$inout1
+	pand	$twmask,$twtmp
+	$movkey	($key_),@tweak[0]		# load round[0]
+	 aesdec		$rndkey1,$inout2
+	 aesdec		$rndkey1,$inout3
+	 aesdec		$rndkey1,$inout4
+	pxor	$twtmp,@tweak[5]
+	movaps	@tweak[0],@tweak[1]		# copy round[0]
+	 aesdec		$rndkey1,$inout5
+	 $movkey	-64($key),$rndkey1
+
+	movdqa	$twres,$twtmp
+	 aesdec		$rndkey0,$inout0
+	paddd	$twres,$twres
+	pxor	@tweak[5],@tweak[0]
+	 aesdec		$rndkey0,$inout1
+	psrad	\$31,$twtmp
+	paddq	@tweak[5],@tweak[5]
+	 aesdec		$rndkey0,$inout2
+	 aesdec		$rndkey0,$inout3
+	pand	$twmask,$twtmp
+	movaps	@tweak[1],@tweak[2]
+	 aesdec		$rndkey0,$inout4
+	pxor	$twtmp,@tweak[5]
+	movdqa	$twres,$twtmp
+	 aesdec		$rndkey0,$inout5
+	 $movkey	-48($key),$rndkey0
+
+	paddd	$twres,$twres
+	 aesdec		$rndkey1,$inout0
+	pxor	@tweak[5],@tweak[1]
+	psrad	\$31,$twtmp
+	 aesdec		$rndkey1,$inout1
+	paddq	@tweak[5],@tweak[5]
+	pand	$twmask,$twtmp
+	 aesdec		$rndkey1,$inout2
+	 aesdec		$rndkey1,$inout3
+	 movdqa	@tweak[3],`16*3`(%rsp)
+	pxor	$twtmp,@tweak[5]
+	 aesdec		$rndkey1,$inout4
+	movaps	@tweak[2],@tweak[3]
+	movdqa	$twres,$twtmp
+	 aesdec		$rndkey1,$inout5
+	 $movkey	-32($key),$rndkey1
+
+	paddd	$twres,$twres
+	 aesdec		$rndkey0,$inout0
+	pxor	@tweak[5],@tweak[2]
+	psrad	\$31,$twtmp
+	 aesdec		$rndkey0,$inout1
+	paddq	@tweak[5],@tweak[5]
+	pand	$twmask,$twtmp
+	 aesdec		$rndkey0,$inout2
+	 aesdec		$rndkey0,$inout3
+	 aesdec		$rndkey0,$inout4
+	pxor	$twtmp,@tweak[5]
+	movaps	@tweak[3],@tweak[4]
+	 aesdec		$rndkey0,$inout5
+
+	movdqa	$twres,$rndkey0
+	paddd	$twres,$twres
+	 aesdec		$rndkey1,$inout0
+	pxor	@tweak[5],@tweak[3]
+	psrad	\$31,$rndkey0
+	 aesdec		$rndkey1,$inout1
+	paddq	@tweak[5],@tweak[5]
+	pand	$twmask,$rndkey0
+	 aesdec		$rndkey1,$inout2
+	 aesdec		$rndkey1,$inout3
+	pxor	$rndkey0,@tweak[5]
+	$movkey		($key_),$rndkey0
+	 aesdec		$rndkey1,$inout4
+	 aesdec		$rndkey1,$inout5
+	$movkey		16($key_),$rndkey1
+
+	pxor	@tweak[5],@tweak[4]
+	 aesdeclast	`16*0`(%rsp),$inout0
+	psrad	\$31,$twres
+	paddq	@tweak[5],@tweak[5]
+	 aesdeclast	`16*1`(%rsp),$inout1
+	 aesdeclast	`16*2`(%rsp),$inout2
+	pand	$twmask,$twres
+	mov	%r10,%rax			# restore $rounds
+	 aesdeclast	`16*3`(%rsp),$inout3
+	 aesdeclast	`16*4`(%rsp),$inout4
+	 aesdeclast	`16*5`(%rsp),$inout5
+	pxor	$twres,@tweak[5]
+
+	lea	`16*6`($out),$out		# $out+=6*16
+	movups	$inout0,`-16*6`($out)		# store 6 output blocks
+	movups	$inout1,`-16*5`($out)
+	movups	$inout2,`-16*4`($out)
+	movups	$inout3,`-16*3`($out)
+	movups	$inout4,`-16*2`($out)
+	movups	$inout5,`-16*1`($out)
+	sub	\$16*6,$len
+	jnc	.Lxts_dec_grandloop		# loop if $len-=6*16 didn't borrow
+
+	mov	\$16+96,$rounds
+	sub	$rnds_,$rounds
+	mov	$key_,$key			# restore $key
+	shr	\$4,$rounds			# restore original value
+
+.Lxts_dec_short:
+	# at the point @tweak[0..5] are populated with tweak values
+	mov	$rounds,$rnds_			# backup $rounds
+	pxor	$rndkey0,@tweak[0]
+	pxor	$rndkey0,@tweak[1]
+	add	\$16*6,$len			# restore real remaining $len
+	jz	.Lxts_dec_done			# done if ($len==0)
+
+	pxor	$rndkey0,@tweak[2]
+	cmp	\$0x20,$len
+	jb	.Lxts_dec_one			# $len is 1*16
+	pxor	$rndkey0,@tweak[3]
+	je	.Lxts_dec_two			# $len is 2*16
+
+	pxor	$rndkey0,@tweak[4]
+	cmp	\$0x40,$len
+	jb	.Lxts_dec_three			# $len is 3*16
+	je	.Lxts_dec_four			# $len is 4*16
+
+	movdqu	($inp),$inout0			# $len is 5*16
+	movdqu	16*1($inp),$inout1
+	movdqu	16*2($inp),$inout2
+	pxor	@tweak[0],$inout0
+	movdqu	16*3($inp),$inout3
+	pxor	@tweak[1],$inout1
+	movdqu	16*4($inp),$inout4
+	lea	16*5($inp),$inp			# $inp+=5*16
+	pxor	@tweak[2],$inout2
+	pxor	@tweak[3],$inout3
+	pxor	@tweak[4],$inout4
+
+	call	_aesni_decrypt6
+
+	xorps	@tweak[0],$inout0
+	xorps	@tweak[1],$inout1
+	xorps	@tweak[2],$inout2
+	movdqu	$inout0,($out)			# store 5 output blocks
+	xorps	@tweak[3],$inout3
+	movdqu	$inout1,16*1($out)
+	xorps	@tweak[4],$inout4
+	movdqu	$inout2,16*2($out)
+	 pxor		$twtmp,$twtmp
+	movdqu	$inout3,16*3($out)
+	 pcmpgtd	@tweak[5],$twtmp
+	movdqu	$inout4,16*4($out)
+	lea	16*5($out),$out			# $out+=5*16
+	 pshufd		\$0x13,$twtmp,@tweak[1]	# $twres
+	and	\$15,$len_
+	jz	.Lxts_dec_ret
+
+	movdqa	@tweak[5],@tweak[0]
+	paddq	@tweak[5],@tweak[5]		# psllq 1,$tweak
+	pand	$twmask,@tweak[1]		# isolate carry and residue
+	pxor	@tweak[5],@tweak[1]
+	jmp	.Lxts_dec_done2
+
+.align	16
+.Lxts_dec_one:
+	movups	($inp),$inout0
+	lea	16*1($inp),$inp			# $inp+=1*16
+	xorps	@tweak[0],$inout0
+___
+	&aesni_generate1("dec",$key,$rounds);
+$code.=<<___;
+	xorps	@tweak[0],$inout0
+	movdqa	@tweak[1],@tweak[0]
+	movups	$inout0,($out)			# store one output block
+	movdqa	@tweak[2],@tweak[1]
+	lea	16*1($out),$out			# $out+=1*16
+	jmp	.Lxts_dec_done
+
+.align	16
+.Lxts_dec_two:
+	movups	($inp),$inout0
+	movups	16($inp),$inout1
+	lea	32($inp),$inp			# $inp+=2*16
+	xorps	@tweak[0],$inout0
+	xorps	@tweak[1],$inout1
+
+	call	_aesni_decrypt2
+
+	xorps	@tweak[0],$inout0
+	movdqa	@tweak[2],@tweak[0]
+	xorps	@tweak[1],$inout1
+	movdqa	@tweak[3],@tweak[1]
+	movups	$inout0,($out)			# store 2 output blocks
+	movups	$inout1,16*1($out)
+	lea	16*2($out),$out			# $out+=2*16
+	jmp	.Lxts_dec_done
+
+.align	16
+.Lxts_dec_three:
+	movups	($inp),$inout0
+	movups	16*1($inp),$inout1
+	movups	16*2($inp),$inout2
+	lea	16*3($inp),$inp			# $inp+=3*16
+	xorps	@tweak[0],$inout0
+	xorps	@tweak[1],$inout1
+	xorps	@tweak[2],$inout2
+
+	call	_aesni_decrypt3
+
+	xorps	@tweak[0],$inout0
+	movdqa	@tweak[3],@tweak[0]
+	xorps	@tweak[1],$inout1
+	movdqa	@tweak[4],@tweak[1]
+	xorps	@tweak[2],$inout2
+	movups	$inout0,($out)			# store 3 output blocks
+	movups	$inout1,16*1($out)
+	movups	$inout2,16*2($out)
+	lea	16*3($out),$out			# $out+=3*16
+	jmp	.Lxts_dec_done
+
+.align	16
+.Lxts_dec_four:
+	movups	($inp),$inout0
+	movups	16*1($inp),$inout1
+	movups	16*2($inp),$inout2
+	xorps	@tweak[0],$inout0
+	movups	16*3($inp),$inout3
+	lea	16*4($inp),$inp			# $inp+=4*16
+	xorps	@tweak[1],$inout1
+	xorps	@tweak[2],$inout2
+	xorps	@tweak[3],$inout3
+
+	call	_aesni_decrypt4
+
+	pxor	@tweak[0],$inout0
+	movdqa	@tweak[4],@tweak[0]
+	pxor	@tweak[1],$inout1
+	movdqa	@tweak[5],@tweak[1]
+	pxor	@tweak[2],$inout2
+	movdqu	$inout0,($out)			# store 4 output blocks
+	pxor	@tweak[3],$inout3
+	movdqu	$inout1,16*1($out)
+	movdqu	$inout2,16*2($out)
+	movdqu	$inout3,16*3($out)
+	lea	16*4($out),$out			# $out+=4*16
+	jmp	.Lxts_dec_done
+
+.align	16
+.Lxts_dec_done:
+	and	\$15,$len_			# see if $len%16 is 0
+	jz	.Lxts_dec_ret
+.Lxts_dec_done2:
+	mov	$len_,$len
+	mov	$key_,$key			# restore $key
+	mov	$rnds_,$rounds			# restore $rounds
+
+	movups	($inp),$inout0
+	xorps	@tweak[1],$inout0
+___
+	&aesni_generate1("dec",$key,$rounds);
+$code.=<<___;
+	xorps	@tweak[1],$inout0
+	movups	$inout0,($out)
+
+.Lxts_dec_steal:
+	movzb	16($inp),%eax			# borrow $rounds ...
+	movzb	($out),%ecx			# ... and $key
+	lea	1($inp),$inp
+	mov	%al,($out)
+	mov	%cl,16($out)
+	lea	1($out),$out
+	sub	\$1,$len
+	jnz	.Lxts_dec_steal
+
+	sub	$len_,$out			# rewind $out
+	mov	$key_,$key			# restore $key
+	mov	$rnds_,$rounds			# restore $rounds
+
+	movups	($out),$inout0
+	xorps	@tweak[0],$inout0
+___
+	&aesni_generate1("dec",$key,$rounds);
+$code.=<<___;
+	xorps	@tweak[0],$inout0
+	movups	$inout0,($out)
+
+.Lxts_dec_ret:
+	xorps	%xmm0,%xmm0			# clear register bank
+	pxor	%xmm1,%xmm1
+	pxor	%xmm2,%xmm2
+	pxor	%xmm3,%xmm3
+	pxor	%xmm4,%xmm4
+	pxor	%xmm5,%xmm5
+___
+$code.=<<___ if (!$win64);
+	pxor	%xmm6,%xmm6
+	pxor	%xmm7,%xmm7
+	movaps	%xmm0,0x00(%rsp)		# clear stack
+	pxor	%xmm8,%xmm8
+	movaps	%xmm0,0x10(%rsp)
+	pxor	%xmm9,%xmm9
+	movaps	%xmm0,0x20(%rsp)
+	pxor	%xmm10,%xmm10
+	movaps	%xmm0,0x30(%rsp)
+	pxor	%xmm11,%xmm11
+	movaps	%xmm0,0x40(%rsp)
+	pxor	%xmm12,%xmm12
+	movaps	%xmm0,0x50(%rsp)
+	pxor	%xmm13,%xmm13
+	movaps	%xmm0,0x60(%rsp)
+	pxor	%xmm14,%xmm14
+	pxor	%xmm15,%xmm15
+___
+$code.=<<___ if ($win64);
+	movaps	-0xa8(%r11),%xmm6
+	movaps	%xmm0,-0xa8(%r11)		# clear stack
+	movaps	-0x98(%r11),%xmm7
+	movaps	%xmm0,-0x98(%r11)
+	movaps	-0x88(%r11),%xmm8
+	movaps	%xmm0,-0x88(%r11)
+	movaps	-0x78(%r11),%xmm9
+	movaps	%xmm0,-0x78(%r11)
+	movaps	-0x68(%r11),%xmm10
+	movaps	%xmm0,-0x68(%r11)
+	movaps	-0x58(%r11),%xmm11
+	movaps	%xmm0,-0x58(%r11)
+	movaps	-0x48(%r11),%xmm12
+	movaps	%xmm0,-0x48(%r11)
+	movaps	-0x38(%r11),%xmm13
+	movaps	%xmm0,-0x38(%r11)
+	movaps	-0x28(%r11),%xmm14
+	movaps	%xmm0,-0x28(%r11)
+	movaps	-0x18(%r11),%xmm15
+	movaps	%xmm0,-0x18(%r11)
+	movaps	%xmm0,0x00(%rsp)
+	movaps	%xmm0,0x10(%rsp)
+	movaps	%xmm0,0x20(%rsp)
+	movaps	%xmm0,0x30(%rsp)
+	movaps	%xmm0,0x40(%rsp)
+	movaps	%xmm0,0x50(%rsp)
+	movaps	%xmm0,0x60(%rsp)
+___
+$code.=<<___;
+	mov	-8(%r11),%rbp
+.cfi_restore	%rbp
+	lea	(%r11),%rsp
+.cfi_def_cfa_register	%rsp
+.Lxts_dec_epilogue:
+	ret
+.cfi_endproc
+.size	aesni_xts_decrypt,.-aesni_xts_decrypt
+___
+}
+
+######################################################################
+# void aesni_ocb_[en|de]crypt(const char *inp, char *out, size_t blocks,
+#	const AES_KEY *key, unsigned int start_block_num,
+#	unsigned char offset_i[16], const unsigned char L_[][16],
+#	unsigned char checksum[16]);
+#
+{
+my @offset=map("%xmm$_",(10..15));
+my ($checksum,$rndkey0l)=("%xmm8","%xmm9");
+my ($block_num,$offset_p)=("%r8","%r9");		# 5th and 6th arguments
+my ($L_p,$checksum_p) = ("%rbx","%rbp");
+my ($i1,$i3,$i5) = ("%r12","%r13","%r14");
+my $seventh_arg = $win64 ? 56 : 8;
+my $blocks = $len;
+
+$code.=<<___;
+.globl	aesni_ocb_encrypt
+.type	aesni_ocb_encrypt,\@function,6
+.align	32
+aesni_ocb_encrypt:
+.cfi_startproc
+	lea	(%rsp),%rax
+	push	%rbx
+.cfi_push	%rbx
+	push	%rbp
+.cfi_push	%rbp
+	push	%r12
+.cfi_push	%r12
+	push	%r13
+.cfi_push	%r13
+	push	%r14
+.cfi_push	%r14
+___
+$code.=<<___ if ($win64);
+	lea	-0xa0(%rsp),%rsp
+	movaps	%xmm6,0x00(%rsp)		# offload everything
+	movaps	%xmm7,0x10(%rsp)
+	movaps	%xmm8,0x20(%rsp)
+	movaps	%xmm9,0x30(%rsp)
+	movaps	%xmm10,0x40(%rsp)
+	movaps	%xmm11,0x50(%rsp)
+	movaps	%xmm12,0x60(%rsp)
+	movaps	%xmm13,0x70(%rsp)
+	movaps	%xmm14,0x80(%rsp)
+	movaps	%xmm15,0x90(%rsp)
+.Locb_enc_body:
+___
+$code.=<<___;
+	mov	$seventh_arg(%rax),$L_p		# 7th argument
+	mov	$seventh_arg+8(%rax),$checksum_p# 8th argument
+
+	mov	240($key),$rnds_
+	mov	$key,$key_
+	shl	\$4,$rnds_
+	$movkey	($key),$rndkey0l		# round[0]
+	$movkey	16($key,$rnds_),$rndkey1	# round[last]
+
+	movdqu	($offset_p),@offset[5]		# load last offset_i
+	pxor	$rndkey1,$rndkey0l		# round[0] ^ round[last]
+	pxor	$rndkey1,@offset[5]		# offset_i ^ round[last]
+
+	mov	\$16+32,$rounds
+	lea	32($key_,$rnds_),$key
+	$movkey	16($key_),$rndkey1		# round[1]
+	sub	%r10,%rax			# twisted $rounds
+	mov	%rax,%r10			# backup twisted $rounds
+
+	movdqu	($L_p),@offset[0]		# L_0 for all odd-numbered blocks
+	movdqu	($checksum_p),$checksum		# load checksum
+
+	test	\$1,$block_num			# is first block number odd?
+	jnz	.Locb_enc_odd
+
+	bsf	$block_num,$i1
+	add	\$1,$block_num
+	shl	\$4,$i1
+	movdqu	($L_p,$i1),$inout5		# borrow
+	movdqu	($inp),$inout0
+	lea	16($inp),$inp
+
+	call	__ocb_encrypt1
+
+	movdqa	$inout5,@offset[5]
+	movups	$inout0,($out)
+	lea	16($out),$out
+	sub	\$1,$blocks
+	jz	.Locb_enc_done
+
+.Locb_enc_odd:
+	lea	1($block_num),$i1		# even-numbered blocks
+	lea	3($block_num),$i3
+	lea	5($block_num),$i5
+	lea	6($block_num),$block_num
+	bsf	$i1,$i1				# ntz(block)
+	bsf	$i3,$i3
+	bsf	$i5,$i5
+	shl	\$4,$i1				# ntz(block) -> table offset
+	shl	\$4,$i3
+	shl	\$4,$i5
+
+	sub	\$6,$blocks
+	jc	.Locb_enc_short
+	jmp	.Locb_enc_grandloop
+
+.align	32
+.Locb_enc_grandloop:
+	movdqu	`16*0`($inp),$inout0		# load input
+	movdqu	`16*1`($inp),$inout1
+	movdqu	`16*2`($inp),$inout2
+	movdqu	`16*3`($inp),$inout3
+	movdqu	`16*4`($inp),$inout4
+	movdqu	`16*5`($inp),$inout5
+	lea	`16*6`($inp),$inp
+
+	call	__ocb_encrypt6
+
+	movups	$inout0,`16*0`($out)		# store output
+	movups	$inout1,`16*1`($out)
+	movups	$inout2,`16*2`($out)
+	movups	$inout3,`16*3`($out)
+	movups	$inout4,`16*4`($out)
+	movups	$inout5,`16*5`($out)
+	lea	`16*6`($out),$out
+	sub	\$6,$blocks
+	jnc	.Locb_enc_grandloop
+
+.Locb_enc_short:
+	add	\$6,$blocks
+	jz	.Locb_enc_done
+
+	movdqu	`16*0`($inp),$inout0
+	cmp	\$2,$blocks
+	jb	.Locb_enc_one
+	movdqu	`16*1`($inp),$inout1
+	je	.Locb_enc_two
+
+	movdqu	`16*2`($inp),$inout2
+	cmp	\$4,$blocks
+	jb	.Locb_enc_three
+	movdqu	`16*3`($inp),$inout3
+	je	.Locb_enc_four
+
+	movdqu	`16*4`($inp),$inout4
+	pxor	$inout5,$inout5
+
+	call	__ocb_encrypt6
+
+	movdqa	@offset[4],@offset[5]
+	movups	$inout0,`16*0`($out)
+	movups	$inout1,`16*1`($out)
+	movups	$inout2,`16*2`($out)
+	movups	$inout3,`16*3`($out)
+	movups	$inout4,`16*4`($out)
+
+	jmp	.Locb_enc_done
+
+.align	16
+.Locb_enc_one:
+	movdqa	@offset[0],$inout5		# borrow
+
+	call	__ocb_encrypt1
+
+	movdqa	$inout5,@offset[5]
+	movups	$inout0,`16*0`($out)
+	jmp	.Locb_enc_done
+
+.align	16
+.Locb_enc_two:
+	pxor	$inout2,$inout2
+	pxor	$inout3,$inout3
+
+	call	__ocb_encrypt4
+
+	movdqa	@offset[1],@offset[5]
+	movups	$inout0,`16*0`($out)
+	movups	$inout1,`16*1`($out)
+
+	jmp	.Locb_enc_done
+
+.align	16
+.Locb_enc_three:
+	pxor	$inout3,$inout3
+
+	call	__ocb_encrypt4
+
+	movdqa	@offset[2],@offset[5]
+	movups	$inout0,`16*0`($out)
+	movups	$inout1,`16*1`($out)
+	movups	$inout2,`16*2`($out)
+
+	jmp	.Locb_enc_done
+
+.align	16
+.Locb_enc_four:
+	call	__ocb_encrypt4
+
+	movdqa	@offset[3],@offset[5]
+	movups	$inout0,`16*0`($out)
+	movups	$inout1,`16*1`($out)
+	movups	$inout2,`16*2`($out)
+	movups	$inout3,`16*3`($out)
+
+.Locb_enc_done:
+	pxor	$rndkey0,@offset[5]		# "remove" round[last]
+	movdqu	$checksum,($checksum_p)		# store checksum
+	movdqu	@offset[5],($offset_p)		# store last offset_i
+
+	xorps	%xmm0,%xmm0			# clear register bank
+	pxor	%xmm1,%xmm1
+	pxor	%xmm2,%xmm2
+	pxor	%xmm3,%xmm3
+	pxor	%xmm4,%xmm4
+	pxor	%xmm5,%xmm5
+___
+$code.=<<___ if (!$win64);
+	pxor	%xmm6,%xmm6
+	pxor	%xmm7,%xmm7
+	pxor	%xmm8,%xmm8
+	pxor	%xmm9,%xmm9
+	pxor	%xmm10,%xmm10
+	pxor	%xmm11,%xmm11
+	pxor	%xmm12,%xmm12
+	pxor	%xmm13,%xmm13
+	pxor	%xmm14,%xmm14
+	pxor	%xmm15,%xmm15
+	lea	0x28(%rsp),%rax
+.cfi_def_cfa	%rax,8
+___
+$code.=<<___ if ($win64);
+	movaps	0x00(%rsp),%xmm6
+	movaps	%xmm0,0x00(%rsp)		# clear stack
+	movaps	0x10(%rsp),%xmm7
+	movaps	%xmm0,0x10(%rsp)
+	movaps	0x20(%rsp),%xmm8
+	movaps	%xmm0,0x20(%rsp)
+	movaps	0x30(%rsp),%xmm9
+	movaps	%xmm0,0x30(%rsp)
+	movaps	0x40(%rsp),%xmm10
+	movaps	%xmm0,0x40(%rsp)
+	movaps	0x50(%rsp),%xmm11
+	movaps	%xmm0,0x50(%rsp)
+	movaps	0x60(%rsp),%xmm12
+	movaps	%xmm0,0x60(%rsp)
+	movaps	0x70(%rsp),%xmm13
+	movaps	%xmm0,0x70(%rsp)
+	movaps	0x80(%rsp),%xmm14
+	movaps	%xmm0,0x80(%rsp)
+	movaps	0x90(%rsp),%xmm15
+	movaps	%xmm0,0x90(%rsp)
+	lea	0xa0+0x28(%rsp),%rax
+.Locb_enc_pop:
+___
+$code.=<<___;
+	mov	-40(%rax),%r14
+.cfi_restore	%r14
+	mov	-32(%rax),%r13
+.cfi_restore	%r13
+	mov	-24(%rax),%r12
+.cfi_restore	%r12
+	mov	-16(%rax),%rbp
+.cfi_restore	%rbp
+	mov	-8(%rax),%rbx
+.cfi_restore	%rbx
+	lea	(%rax),%rsp
+.cfi_def_cfa_register	%rsp
+.Locb_enc_epilogue:
+	ret
+.cfi_endproc
+.size	aesni_ocb_encrypt,.-aesni_ocb_encrypt
+
+.type	__ocb_encrypt6,\@abi-omnipotent
+.align	32
+__ocb_encrypt6:
+.cfi_startproc
+	 pxor		$rndkey0l,@offset[5]	# offset_i ^ round[0]
+	 movdqu		($L_p,$i1),@offset[1]
+	 movdqa		@offset[0],@offset[2]
+	 movdqu		($L_p,$i3),@offset[3]
+	 movdqa		@offset[0],@offset[4]
+	 pxor		@offset[5],@offset[0]
+	 movdqu		($L_p,$i5),@offset[5]
+	 pxor		@offset[0],@offset[1]
+	pxor		$inout0,$checksum	# accumulate checksum
+	pxor		@offset[0],$inout0	# input ^ round[0] ^ offset_i
+	 pxor		@offset[1],@offset[2]
+	pxor		$inout1,$checksum
+	pxor		@offset[1],$inout1
+	 pxor		@offset[2],@offset[3]
+	pxor		$inout2,$checksum
+	pxor		@offset[2],$inout2
+	 pxor		@offset[3],@offset[4]
+	pxor		$inout3,$checksum
+	pxor		@offset[3],$inout3
+	 pxor		@offset[4],@offset[5]
+	pxor		$inout4,$checksum
+	pxor		@offset[4],$inout4
+	pxor		$inout5,$checksum
+	pxor		@offset[5],$inout5
+	$movkey		32($key_),$rndkey0
+
+	lea		1($block_num),$i1	# even-numbered blocks
+	lea		3($block_num),$i3
+	lea		5($block_num),$i5
+	add		\$6,$block_num
+	 pxor		$rndkey0l,@offset[0]	# offset_i ^ round[last]
+	bsf		$i1,$i1			# ntz(block)
+	bsf		$i3,$i3
+	bsf		$i5,$i5
+
+	aesenc		$rndkey1,$inout0
+	aesenc		$rndkey1,$inout1
+	aesenc		$rndkey1,$inout2
+	aesenc		$rndkey1,$inout3
+	 pxor		$rndkey0l,@offset[1]
+	 pxor		$rndkey0l,@offset[2]
+	aesenc		$rndkey1,$inout4
+	 pxor		$rndkey0l,@offset[3]
+	 pxor		$rndkey0l,@offset[4]
+	aesenc		$rndkey1,$inout5
+	$movkey		48($key_),$rndkey1
+	 pxor		$rndkey0l,@offset[5]
+
+	aesenc		$rndkey0,$inout0
+	aesenc		$rndkey0,$inout1
+	aesenc		$rndkey0,$inout2
+	aesenc		$rndkey0,$inout3
+	aesenc		$rndkey0,$inout4
+	aesenc		$rndkey0,$inout5
+	$movkey		64($key_),$rndkey0
+	shl		\$4,$i1			# ntz(block) -> table offset
+	shl		\$4,$i3
+	jmp		.Locb_enc_loop6
+
+.align	32
+.Locb_enc_loop6:
+	aesenc		$rndkey1,$inout0
+	aesenc		$rndkey1,$inout1
+	aesenc		$rndkey1,$inout2
+	aesenc		$rndkey1,$inout3
+	aesenc		$rndkey1,$inout4
+	aesenc		$rndkey1,$inout5
+	$movkey		($key,%rax),$rndkey1
+	add		\$32,%rax
+
+	aesenc		$rndkey0,$inout0
+	aesenc		$rndkey0,$inout1
+	aesenc		$rndkey0,$inout2
+	aesenc		$rndkey0,$inout3
+	aesenc		$rndkey0,$inout4
+	aesenc		$rndkey0,$inout5
+	$movkey		-16($key,%rax),$rndkey0
+	jnz		.Locb_enc_loop6
+
+	aesenc		$rndkey1,$inout0
+	aesenc		$rndkey1,$inout1
+	aesenc		$rndkey1,$inout2
+	aesenc		$rndkey1,$inout3
+	aesenc		$rndkey1,$inout4
+	aesenc		$rndkey1,$inout5
+	$movkey		16($key_),$rndkey1
+	shl		\$4,$i5
+
+	aesenclast	@offset[0],$inout0
+	movdqu		($L_p),@offset[0]	# L_0 for all odd-numbered blocks
+	mov		%r10,%rax		# restore twisted rounds
+	aesenclast	@offset[1],$inout1
+	aesenclast	@offset[2],$inout2
+	aesenclast	@offset[3],$inout3
+	aesenclast	@offset[4],$inout4
+	aesenclast	@offset[5],$inout5
+	ret
+.cfi_endproc
+.size	__ocb_encrypt6,.-__ocb_encrypt6
+
+.type	__ocb_encrypt4,\@abi-omnipotent
+.align	32
+__ocb_encrypt4:
+.cfi_startproc
+	 pxor		$rndkey0l,@offset[5]	# offset_i ^ round[0]
+	 movdqu		($L_p,$i1),@offset[1]
+	 movdqa		@offset[0],@offset[2]
+	 movdqu		($L_p,$i3),@offset[3]
+	 pxor		@offset[5],@offset[0]
+	 pxor		@offset[0],@offset[1]
+	pxor		$inout0,$checksum	# accumulate checksum
+	pxor		@offset[0],$inout0	# input ^ round[0] ^ offset_i
+	 pxor		@offset[1],@offset[2]
+	pxor		$inout1,$checksum
+	pxor		@offset[1],$inout1
+	 pxor		@offset[2],@offset[3]
+	pxor		$inout2,$checksum
+	pxor		@offset[2],$inout2
+	pxor		$inout3,$checksum
+	pxor		@offset[3],$inout3
+	$movkey		32($key_),$rndkey0
+
+	 pxor		$rndkey0l,@offset[0]	# offset_i ^ round[last]
+	 pxor		$rndkey0l,@offset[1]
+	 pxor		$rndkey0l,@offset[2]
+	 pxor		$rndkey0l,@offset[3]
+
+	aesenc		$rndkey1,$inout0
+	aesenc		$rndkey1,$inout1
+	aesenc		$rndkey1,$inout2
+	aesenc		$rndkey1,$inout3
+	$movkey		48($key_),$rndkey1
+
+	aesenc		$rndkey0,$inout0
+	aesenc		$rndkey0,$inout1
+	aesenc		$rndkey0,$inout2
+	aesenc		$rndkey0,$inout3
+	$movkey		64($key_),$rndkey0
+	jmp		.Locb_enc_loop4
+
+.align	32
+.Locb_enc_loop4:
+	aesenc		$rndkey1,$inout0
+	aesenc		$rndkey1,$inout1
+	aesenc		$rndkey1,$inout2
+	aesenc		$rndkey1,$inout3
+	$movkey		($key,%rax),$rndkey1
+	add		\$32,%rax
+
+	aesenc		$rndkey0,$inout0
+	aesenc		$rndkey0,$inout1
+	aesenc		$rndkey0,$inout2
+	aesenc		$rndkey0,$inout3
+	$movkey		-16($key,%rax),$rndkey0
+	jnz		.Locb_enc_loop4
+
+	aesenc		$rndkey1,$inout0
+	aesenc		$rndkey1,$inout1
+	aesenc		$rndkey1,$inout2
+	aesenc		$rndkey1,$inout3
+	$movkey		16($key_),$rndkey1
+	mov		%r10,%rax		# restore twisted rounds
+
+	aesenclast	@offset[0],$inout0
+	aesenclast	@offset[1],$inout1
+	aesenclast	@offset[2],$inout2
+	aesenclast	@offset[3],$inout3
+	ret
+.cfi_endproc
+.size	__ocb_encrypt4,.-__ocb_encrypt4
+
+.type	__ocb_encrypt1,\@abi-omnipotent
+.align	32
+__ocb_encrypt1:
+.cfi_startproc
+	 pxor		@offset[5],$inout5	# offset_i
+	 pxor		$rndkey0l,$inout5	# offset_i ^ round[0]
+	pxor		$inout0,$checksum	# accumulate checksum
+	pxor		$inout5,$inout0		# input ^ round[0] ^ offset_i
+	$movkey		32($key_),$rndkey0
+
+	aesenc		$rndkey1,$inout0
+	$movkey		48($key_),$rndkey1
+	pxor		$rndkey0l,$inout5	# offset_i ^ round[last]
+
+	aesenc		$rndkey0,$inout0
+	$movkey		64($key_),$rndkey0
+	jmp		.Locb_enc_loop1
+
+.align	32
+.Locb_enc_loop1:
+	aesenc		$rndkey1,$inout0
+	$movkey		($key,%rax),$rndkey1
+	add		\$32,%rax
+
+	aesenc		$rndkey0,$inout0
+	$movkey		-16($key,%rax),$rndkey0
+	jnz		.Locb_enc_loop1
+
+	aesenc		$rndkey1,$inout0
+	$movkey		16($key_),$rndkey1	# redundant in tail
+	mov		%r10,%rax		# restore twisted rounds
+
+	aesenclast	$inout5,$inout0
+	ret
+.cfi_endproc
+.size	__ocb_encrypt1,.-__ocb_encrypt1
+
+.globl	aesni_ocb_decrypt
+.type	aesni_ocb_decrypt,\@function,6
+.align	32
+aesni_ocb_decrypt:
+.cfi_startproc
+	lea	(%rsp),%rax
+	push	%rbx
+.cfi_push	%rbx
+	push	%rbp
+.cfi_push	%rbp
+	push	%r12
+.cfi_push	%r12
+	push	%r13
+.cfi_push	%r13
+	push	%r14
+.cfi_push	%r14
+___
+$code.=<<___ if ($win64);
+	lea	-0xa0(%rsp),%rsp
+	movaps	%xmm6,0x00(%rsp)		# offload everything
+	movaps	%xmm7,0x10(%rsp)
+	movaps	%xmm8,0x20(%rsp)
+	movaps	%xmm9,0x30(%rsp)
+	movaps	%xmm10,0x40(%rsp)
+	movaps	%xmm11,0x50(%rsp)
+	movaps	%xmm12,0x60(%rsp)
+	movaps	%xmm13,0x70(%rsp)
+	movaps	%xmm14,0x80(%rsp)
+	movaps	%xmm15,0x90(%rsp)
+.Locb_dec_body:
+___
+$code.=<<___;
+	mov	$seventh_arg(%rax),$L_p		# 7th argument
+	mov	$seventh_arg+8(%rax),$checksum_p# 8th argument
+
+	mov	240($key),$rnds_
+	mov	$key,$key_
+	shl	\$4,$rnds_
+	$movkey	($key),$rndkey0l		# round[0]
+	$movkey	16($key,$rnds_),$rndkey1	# round[last]
+
+	movdqu	($offset_p),@offset[5]		# load last offset_i
+	pxor	$rndkey1,$rndkey0l		# round[0] ^ round[last]
+	pxor	$rndkey1,@offset[5]		# offset_i ^ round[last]
+
+	mov	\$16+32,$rounds
+	lea	32($key_,$rnds_),$key
+	$movkey	16($key_),$rndkey1		# round[1]
+	sub	%r10,%rax			# twisted $rounds
+	mov	%rax,%r10			# backup twisted $rounds
+
+	movdqu	($L_p),@offset[0]		# L_0 for all odd-numbered blocks
+	movdqu	($checksum_p),$checksum		# load checksum
+
+	test	\$1,$block_num			# is first block number odd?
+	jnz	.Locb_dec_odd
+
+	bsf	$block_num,$i1
+	add	\$1,$block_num
+	shl	\$4,$i1
+	movdqu	($L_p,$i1),$inout5		# borrow
+	movdqu	($inp),$inout0
+	lea	16($inp),$inp
+
+	call	__ocb_decrypt1
+
+	movdqa	$inout5,@offset[5]
+	movups	$inout0,($out)
+	xorps	$inout0,$checksum		# accumulate checksum
+	lea	16($out),$out
+	sub	\$1,$blocks
+	jz	.Locb_dec_done
+
+.Locb_dec_odd:
+	lea	1($block_num),$i1		# even-numbered blocks
+	lea	3($block_num),$i3
+	lea	5($block_num),$i5
+	lea	6($block_num),$block_num
+	bsf	$i1,$i1				# ntz(block)
+	bsf	$i3,$i3
+	bsf	$i5,$i5
+	shl	\$4,$i1				# ntz(block) -> table offset
+	shl	\$4,$i3
+	shl	\$4,$i5
+
+	sub	\$6,$blocks
+	jc	.Locb_dec_short
+	jmp	.Locb_dec_grandloop
+
+.align	32
+.Locb_dec_grandloop:
+	movdqu	`16*0`($inp),$inout0		# load input
+	movdqu	`16*1`($inp),$inout1
+	movdqu	`16*2`($inp),$inout2
+	movdqu	`16*3`($inp),$inout3
+	movdqu	`16*4`($inp),$inout4
+	movdqu	`16*5`($inp),$inout5
+	lea	`16*6`($inp),$inp
+
+	call	__ocb_decrypt6
+
+	movups	$inout0,`16*0`($out)		# store output
+	pxor	$inout0,$checksum		# accumulate checksum
+	movups	$inout1,`16*1`($out)
+	pxor	$inout1,$checksum
+	movups	$inout2,`16*2`($out)
+	pxor	$inout2,$checksum
+	movups	$inout3,`16*3`($out)
+	pxor	$inout3,$checksum
+	movups	$inout4,`16*4`($out)
+	pxor	$inout4,$checksum
+	movups	$inout5,`16*5`($out)
+	pxor	$inout5,$checksum
+	lea	`16*6`($out),$out
+	sub	\$6,$blocks
+	jnc	.Locb_dec_grandloop
+
+.Locb_dec_short:
+	add	\$6,$blocks
+	jz	.Locb_dec_done
+
+	movdqu	`16*0`($inp),$inout0
+	cmp	\$2,$blocks
+	jb	.Locb_dec_one
+	movdqu	`16*1`($inp),$inout1
+	je	.Locb_dec_two
+
+	movdqu	`16*2`($inp),$inout2
+	cmp	\$4,$blocks
+	jb	.Locb_dec_three
+	movdqu	`16*3`($inp),$inout3
+	je	.Locb_dec_four
+
+	movdqu	`16*4`($inp),$inout4
+	pxor	$inout5,$inout5
+
+	call	__ocb_decrypt6
+
+	movdqa	@offset[4],@offset[5]
+	movups	$inout0,`16*0`($out)		# store output
+	pxor	$inout0,$checksum		# accumulate checksum
+	movups	$inout1,`16*1`($out)
+	pxor	$inout1,$checksum
+	movups	$inout2,`16*2`($out)
+	pxor	$inout2,$checksum
+	movups	$inout3,`16*3`($out)
+	pxor	$inout3,$checksum
+	movups	$inout4,`16*4`($out)
+	pxor	$inout4,$checksum
+
+	jmp	.Locb_dec_done
+
+.align	16
+.Locb_dec_one:
+	movdqa	@offset[0],$inout5		# borrow
+
+	call	__ocb_decrypt1
+
+	movdqa	$inout5,@offset[5]
+	movups	$inout0,`16*0`($out)		# store output
+	xorps	$inout0,$checksum		# accumulate checksum
+	jmp	.Locb_dec_done
+
+.align	16
+.Locb_dec_two:
+	pxor	$inout2,$inout2
+	pxor	$inout3,$inout3
+
+	call	__ocb_decrypt4
+
+	movdqa	@offset[1],@offset[5]
+	movups	$inout0,`16*0`($out)		# store output
+	xorps	$inout0,$checksum		# accumulate checksum
+	movups	$inout1,`16*1`($out)
+	xorps	$inout1,$checksum
+
+	jmp	.Locb_dec_done
+
+.align	16
+.Locb_dec_three:
+	pxor	$inout3,$inout3
+
+	call	__ocb_decrypt4
+
+	movdqa	@offset[2],@offset[5]
+	movups	$inout0,`16*0`($out)		# store output
+	xorps	$inout0,$checksum		# accumulate checksum
+	movups	$inout1,`16*1`($out)
+	xorps	$inout1,$checksum
+	movups	$inout2,`16*2`($out)
+	xorps	$inout2,$checksum
+
+	jmp	.Locb_dec_done
+
+.align	16
+.Locb_dec_four:
+	call	__ocb_decrypt4
+
+	movdqa	@offset[3],@offset[5]
+	movups	$inout0,`16*0`($out)		# store output
+	pxor	$inout0,$checksum		# accumulate checksum
+	movups	$inout1,`16*1`($out)
+	pxor	$inout1,$checksum
+	movups	$inout2,`16*2`($out)
+	pxor	$inout2,$checksum
+	movups	$inout3,`16*3`($out)
+	pxor	$inout3,$checksum
+
+.Locb_dec_done:
+	pxor	$rndkey0,@offset[5]		# "remove" round[last]
+	movdqu	$checksum,($checksum_p)		# store checksum
+	movdqu	@offset[5],($offset_p)		# store last offset_i
+
+	xorps	%xmm0,%xmm0			# clear register bank
+	pxor	%xmm1,%xmm1
+	pxor	%xmm2,%xmm2
+	pxor	%xmm3,%xmm3
+	pxor	%xmm4,%xmm4
+	pxor	%xmm5,%xmm5
+___
+$code.=<<___ if (!$win64);
+	pxor	%xmm6,%xmm6
+	pxor	%xmm7,%xmm7
+	pxor	%xmm8,%xmm8
+	pxor	%xmm9,%xmm9
+	pxor	%xmm10,%xmm10
+	pxor	%xmm11,%xmm11
+	pxor	%xmm12,%xmm12
+	pxor	%xmm13,%xmm13
+	pxor	%xmm14,%xmm14
+	pxor	%xmm15,%xmm15
+	lea	0x28(%rsp),%rax
+.cfi_def_cfa	%rax,8
+___
+$code.=<<___ if ($win64);
+	movaps	0x00(%rsp),%xmm6
+	movaps	%xmm0,0x00(%rsp)		# clear stack
+	movaps	0x10(%rsp),%xmm7
+	movaps	%xmm0,0x10(%rsp)
+	movaps	0x20(%rsp),%xmm8
+	movaps	%xmm0,0x20(%rsp)
+	movaps	0x30(%rsp),%xmm9
+	movaps	%xmm0,0x30(%rsp)
+	movaps	0x40(%rsp),%xmm10
+	movaps	%xmm0,0x40(%rsp)
+	movaps	0x50(%rsp),%xmm11
+	movaps	%xmm0,0x50(%rsp)
+	movaps	0x60(%rsp),%xmm12
+	movaps	%xmm0,0x60(%rsp)
+	movaps	0x70(%rsp),%xmm13
+	movaps	%xmm0,0x70(%rsp)
+	movaps	0x80(%rsp),%xmm14
+	movaps	%xmm0,0x80(%rsp)
+	movaps	0x90(%rsp),%xmm15
+	movaps	%xmm0,0x90(%rsp)
+	lea	0xa0+0x28(%rsp),%rax
+.Locb_dec_pop:
+___
+$code.=<<___;
+	mov	-40(%rax),%r14
+.cfi_restore	%r14
+	mov	-32(%rax),%r13
+.cfi_restore	%r13
+	mov	-24(%rax),%r12
+.cfi_restore	%r12
+	mov	-16(%rax),%rbp
+.cfi_restore	%rbp
+	mov	-8(%rax),%rbx
+.cfi_restore	%rbx
+	lea	(%rax),%rsp
+.cfi_def_cfa_register	%rsp
+.Locb_dec_epilogue:
+	ret
+.cfi_endproc
+.size	aesni_ocb_decrypt,.-aesni_ocb_decrypt
+
+.type	__ocb_decrypt6,\@abi-omnipotent
+.align	32
+__ocb_decrypt6:
+.cfi_startproc
+	 pxor		$rndkey0l,@offset[5]	# offset_i ^ round[0]
+	 movdqu		($L_p,$i1),@offset[1]
+	 movdqa		@offset[0],@offset[2]
+	 movdqu		($L_p,$i3),@offset[3]
+	 movdqa		@offset[0],@offset[4]
+	 pxor		@offset[5],@offset[0]
+	 movdqu		($L_p,$i5),@offset[5]
+	 pxor		@offset[0],@offset[1]
+	pxor		@offset[0],$inout0	# input ^ round[0] ^ offset_i
+	 pxor		@offset[1],@offset[2]
+	pxor		@offset[1],$inout1
+	 pxor		@offset[2],@offset[3]
+	pxor		@offset[2],$inout2
+	 pxor		@offset[3],@offset[4]
+	pxor		@offset[3],$inout3
+	 pxor		@offset[4],@offset[5]
+	pxor		@offset[4],$inout4
+	pxor		@offset[5],$inout5
+	$movkey		32($key_),$rndkey0
+
+	lea		1($block_num),$i1	# even-numbered blocks
+	lea		3($block_num),$i3
+	lea		5($block_num),$i5
+	add		\$6,$block_num
+	 pxor		$rndkey0l,@offset[0]	# offset_i ^ round[last]
+	bsf		$i1,$i1			# ntz(block)
+	bsf		$i3,$i3
+	bsf		$i5,$i5
+
+	aesdec		$rndkey1,$inout0
+	aesdec		$rndkey1,$inout1
+	aesdec		$rndkey1,$inout2
+	aesdec		$rndkey1,$inout3
+	 pxor		$rndkey0l,@offset[1]
+	 pxor		$rndkey0l,@offset[2]
+	aesdec		$rndkey1,$inout4
+	 pxor		$rndkey0l,@offset[3]
+	 pxor		$rndkey0l,@offset[4]
+	aesdec		$rndkey1,$inout5
+	$movkey		48($key_),$rndkey1
+	 pxor		$rndkey0l,@offset[5]
+
+	aesdec		$rndkey0,$inout0
+	aesdec		$rndkey0,$inout1
+	aesdec		$rndkey0,$inout2
+	aesdec		$rndkey0,$inout3
+	aesdec		$rndkey0,$inout4
+	aesdec		$rndkey0,$inout5
+	$movkey		64($key_),$rndkey0
+	shl		\$4,$i1			# ntz(block) -> table offset
+	shl		\$4,$i3
+	jmp		.Locb_dec_loop6
+
+.align	32
+.Locb_dec_loop6:
+	aesdec		$rndkey1,$inout0
+	aesdec		$rndkey1,$inout1
+	aesdec		$rndkey1,$inout2
+	aesdec		$rndkey1,$inout3
+	aesdec		$rndkey1,$inout4
+	aesdec		$rndkey1,$inout5
+	$movkey		($key,%rax),$rndkey1
+	add		\$32,%rax
+
+	aesdec		$rndkey0,$inout0
+	aesdec		$rndkey0,$inout1
+	aesdec		$rndkey0,$inout2
+	aesdec		$rndkey0,$inout3
+	aesdec		$rndkey0,$inout4
+	aesdec		$rndkey0,$inout5
+	$movkey		-16($key,%rax),$rndkey0
+	jnz		.Locb_dec_loop6
+
+	aesdec		$rndkey1,$inout0
+	aesdec		$rndkey1,$inout1
+	aesdec		$rndkey1,$inout2
+	aesdec		$rndkey1,$inout3
+	aesdec		$rndkey1,$inout4
+	aesdec		$rndkey1,$inout5
+	$movkey		16($key_),$rndkey1
+	shl		\$4,$i5
+
+	aesdeclast	@offset[0],$inout0
+	movdqu		($L_p),@offset[0]	# L_0 for all odd-numbered blocks
+	mov		%r10,%rax		# restore twisted rounds
+	aesdeclast	@offset[1],$inout1
+	aesdeclast	@offset[2],$inout2
+	aesdeclast	@offset[3],$inout3
+	aesdeclast	@offset[4],$inout4
+	aesdeclast	@offset[5],$inout5
+	ret
+.cfi_endproc
+.size	__ocb_decrypt6,.-__ocb_decrypt6
+
+.type	__ocb_decrypt4,\@abi-omnipotent
+.align	32
+__ocb_decrypt4:
+.cfi_startproc
+	 pxor		$rndkey0l,@offset[5]	# offset_i ^ round[0]
+	 movdqu		($L_p,$i1),@offset[1]
+	 movdqa		@offset[0],@offset[2]
+	 movdqu		($L_p,$i3),@offset[3]
+	 pxor		@offset[5],@offset[0]
+	 pxor		@offset[0],@offset[1]
+	pxor		@offset[0],$inout0	# input ^ round[0] ^ offset_i
+	 pxor		@offset[1],@offset[2]
+	pxor		@offset[1],$inout1
+	 pxor		@offset[2],@offset[3]
+	pxor		@offset[2],$inout2
+	pxor		@offset[3],$inout3
+	$movkey		32($key_),$rndkey0
+
+	 pxor		$rndkey0l,@offset[0]	# offset_i ^ round[last]
+	 pxor		$rndkey0l,@offset[1]
+	 pxor		$rndkey0l,@offset[2]
+	 pxor		$rndkey0l,@offset[3]
+
+	aesdec		$rndkey1,$inout0
+	aesdec		$rndkey1,$inout1
+	aesdec		$rndkey1,$inout2
+	aesdec		$rndkey1,$inout3
+	$movkey		48($key_),$rndkey1
+
+	aesdec		$rndkey0,$inout0
+	aesdec		$rndkey0,$inout1
+	aesdec		$rndkey0,$inout2
+	aesdec		$rndkey0,$inout3
+	$movkey		64($key_),$rndkey0
+	jmp		.Locb_dec_loop4
+
+.align	32
+.Locb_dec_loop4:
+	aesdec		$rndkey1,$inout0
+	aesdec		$rndkey1,$inout1
+	aesdec		$rndkey1,$inout2
+	aesdec		$rndkey1,$inout3
+	$movkey		($key,%rax),$rndkey1
+	add		\$32,%rax
+
+	aesdec		$rndkey0,$inout0
+	aesdec		$rndkey0,$inout1
+	aesdec		$rndkey0,$inout2
+	aesdec		$rndkey0,$inout3
+	$movkey		-16($key,%rax),$rndkey0
+	jnz		.Locb_dec_loop4
+
+	aesdec		$rndkey1,$inout0
+	aesdec		$rndkey1,$inout1
+	aesdec		$rndkey1,$inout2
+	aesdec		$rndkey1,$inout3
+	$movkey		16($key_),$rndkey1
+	mov		%r10,%rax		# restore twisted rounds
+
+	aesdeclast	@offset[0],$inout0
+	aesdeclast	@offset[1],$inout1
+	aesdeclast	@offset[2],$inout2
+	aesdeclast	@offset[3],$inout3
+	ret
+.cfi_endproc
+.size	__ocb_decrypt4,.-__ocb_decrypt4
+
+.type	__ocb_decrypt1,\@abi-omnipotent
+.align	32
+__ocb_decrypt1:
+.cfi_startproc
+	 pxor		@offset[5],$inout5	# offset_i
+	 pxor		$rndkey0l,$inout5	# offset_i ^ round[0]
+	pxor		$inout5,$inout0		# input ^ round[0] ^ offset_i
+	$movkey		32($key_),$rndkey0
+
+	aesdec		$rndkey1,$inout0
+	$movkey		48($key_),$rndkey1
+	pxor		$rndkey0l,$inout5	# offset_i ^ round[last]
+
+	aesdec		$rndkey0,$inout0
+	$movkey		64($key_),$rndkey0
+	jmp		.Locb_dec_loop1
+
+.align	32
+.Locb_dec_loop1:
+	aesdec		$rndkey1,$inout0
+	$movkey		($key,%rax),$rndkey1
+	add		\$32,%rax
+
+	aesdec		$rndkey0,$inout0
+	$movkey		-16($key,%rax),$rndkey0
+	jnz		.Locb_dec_loop1
+
+	aesdec		$rndkey1,$inout0
+	$movkey		16($key_),$rndkey1	# redundant in tail
+	mov		%r10,%rax		# restore twisted rounds
+
+	aesdeclast	$inout5,$inout0
+	ret
+.cfi_endproc
+.size	__ocb_decrypt1,.-__ocb_decrypt1
+___
+} }}
+
+########################################################################
+# void $PREFIX_cbc_encrypt (const void *inp, void *out,
+#			    size_t length, const AES_KEY *key,
+#			    unsigned char *ivp,const int enc);
+{
+my $frame_size = 0x10 + ($win64?0xa0:0);	# used in decrypt
+my ($iv,$in0,$in1,$in2,$in3,$in4)=map("%xmm$_",(10..15));
+
+$code.=<<___;
+.globl	${PREFIX}_cbc_encrypt
+.type	${PREFIX}_cbc_encrypt,\@function,6
+.align	16
+${PREFIX}_cbc_encrypt:
+.cfi_startproc
+	test	$len,$len		# check length
+	jz	.Lcbc_ret
+
+	mov	240($key),$rnds_	# key->rounds
+	mov	$key,$key_		# backup $key
+	test	%r9d,%r9d		# 6th argument
+	jz	.Lcbc_decrypt
+#--------------------------- CBC ENCRYPT ------------------------------#
+	movups	($ivp),$inout0		# load iv as initial state
+	mov	$rnds_,$rounds
+	cmp	\$16,$len
+	jb	.Lcbc_enc_tail
+	sub	\$16,$len
+	jmp	.Lcbc_enc_loop
+.align	16
+.Lcbc_enc_loop:
+	movups	($inp),$inout1		# load input
+	lea	16($inp),$inp
+	#xorps	$inout1,$inout0
+___
+	&aesni_generate1("enc",$key,$rounds,$inout0,$inout1);
+$code.=<<___;
+	mov	$rnds_,$rounds		# restore $rounds
+	mov	$key_,$key		# restore $key
+	movups	$inout0,0($out)		# store output
+	lea	16($out),$out
+	sub	\$16,$len
+	jnc	.Lcbc_enc_loop
+	add	\$16,$len
+	jnz	.Lcbc_enc_tail
+	 pxor	$rndkey0,$rndkey0	# clear register bank
+	 pxor	$rndkey1,$rndkey1
+	movups	$inout0,($ivp)
+	 pxor	$inout0,$inout0
+	 pxor	$inout1,$inout1
+	jmp	.Lcbc_ret
+
+.Lcbc_enc_tail:
+	mov	$len,%rcx	# zaps $key
+	xchg	$inp,$out	# $inp is %rsi and $out is %rdi now
+	.long	0x9066A4F3	# rep movsb
+	mov	\$16,%ecx	# zero tail
+	sub	$len,%rcx
+	xor	%eax,%eax
+	.long	0x9066AAF3	# rep stosb
+	lea	-16(%rdi),%rdi	# rewind $out by 1 block
+	mov	$rnds_,$rounds	# restore $rounds
+	mov	%rdi,%rsi	# $inp and $out are the same
+	mov	$key_,$key	# restore $key
+	xor	$len,$len	# len=16
+	jmp	.Lcbc_enc_loop	# one more spin
+#--------------------------- CBC DECRYPT ------------------------------#
+.align	16
+.Lcbc_decrypt:
+	cmp	\$16,$len
+	jne	.Lcbc_decrypt_bulk
+
+	# handle single block without allocating stack frame,
+	# useful in ciphertext stealing mode
+	movdqu	($inp),$inout0		# load input
+	movdqu	($ivp),$inout1		# load iv
+	movdqa	$inout0,$inout2		# future iv
+___
+	&aesni_generate1("dec",$key,$rnds_);
+$code.=<<___;
+	 pxor	$rndkey0,$rndkey0	# clear register bank
+	 pxor	$rndkey1,$rndkey1
+	movdqu	$inout2,($ivp)		# store iv
+	xorps	$inout1,$inout0		# ^=iv
+	 pxor	$inout1,$inout1
+	movups	$inout0,($out)		# store output
+	 pxor	$inout0,$inout0
+	jmp	.Lcbc_ret
+.align	16
+.Lcbc_decrypt_bulk:
+	lea	(%rsp),%r11		# frame pointer
+.cfi_def_cfa_register	%r11
+	push	%rbp
+.cfi_push	%rbp
+	sub	\$$frame_size,%rsp
+	and	\$-16,%rsp	# Linux kernel stack can be incorrectly seeded
+___
+$code.=<<___ if ($win64);
+	movaps	%xmm6,0x10(%rsp)
+	movaps	%xmm7,0x20(%rsp)
+	movaps	%xmm8,0x30(%rsp)
+	movaps	%xmm9,0x40(%rsp)
+	movaps	%xmm10,0x50(%rsp)
+	movaps	%xmm11,0x60(%rsp)
+	movaps	%xmm12,0x70(%rsp)
+	movaps	%xmm13,0x80(%rsp)
+	movaps	%xmm14,0x90(%rsp)
+	movaps	%xmm15,0xa0(%rsp)
+.Lcbc_decrypt_body:
+___
+
+my $inp_=$key_="%rbp";			# reassign $key_
+
+$code.=<<___;
+	mov	$key,$key_		# [re-]backup $key [after reassignment]
+	movups	($ivp),$iv
+	mov	$rnds_,$rounds
+	cmp	\$0x50,$len
+	jbe	.Lcbc_dec_tail
+
+	$movkey	($key),$rndkey0
+	movdqu	0x00($inp),$inout0	# load input
+	movdqu	0x10($inp),$inout1
+	movdqa	$inout0,$in0
+	movdqu	0x20($inp),$inout2
+	movdqa	$inout1,$in1
+	movdqu	0x30($inp),$inout3
+	movdqa	$inout2,$in2
+	movdqu	0x40($inp),$inout4
+	movdqa	$inout3,$in3
+	movdqu	0x50($inp),$inout5
+	movdqa	$inout4,$in4
+	mov	OPENSSL_ia32cap_P+4(%rip),%r9d
+	cmp	\$0x70,$len
+	jbe	.Lcbc_dec_six_or_seven
+
+	and	\$`1<<26|1<<22`,%r9d	# isolate XSAVE+MOVBE
+	sub	\$0x50,$len		# $len is biased by -5*16
+	cmp	\$`1<<22`,%r9d		# check for MOVBE without XSAVE
+	je	.Lcbc_dec_loop6_enter	# [which denotes Atom Silvermont]
+	sub	\$0x20,$len		# $len is biased by -7*16
+	lea	0x70($key),$key		# size optimization
+	jmp	.Lcbc_dec_loop8_enter
+.align	16
+.Lcbc_dec_loop8:
+	movups	$inout7,($out)
+	lea	0x10($out),$out
+.Lcbc_dec_loop8_enter:
+	movdqu		0x60($inp),$inout6
+	pxor		$rndkey0,$inout0
+	movdqu		0x70($inp),$inout7
+	pxor		$rndkey0,$inout1
+	$movkey		0x10-0x70($key),$rndkey1
+	pxor		$rndkey0,$inout2
+	mov		\$-1,$inp_
+	cmp		\$0x70,$len	# is there at least 0x60 bytes ahead?
+	pxor		$rndkey0,$inout3
+	pxor		$rndkey0,$inout4
+	pxor		$rndkey0,$inout5
+	pxor		$rndkey0,$inout6
+
+	aesdec		$rndkey1,$inout0
+	pxor		$rndkey0,$inout7
+	$movkey		0x20-0x70($key),$rndkey0
+	aesdec		$rndkey1,$inout1
+	aesdec		$rndkey1,$inout2
+	aesdec		$rndkey1,$inout3
+	aesdec		$rndkey1,$inout4
+	aesdec		$rndkey1,$inout5
+	aesdec		$rndkey1,$inout6
+	adc		\$0,$inp_
+	and		\$128,$inp_
+	aesdec		$rndkey1,$inout7
+	add		$inp,$inp_
+	$movkey		0x30-0x70($key),$rndkey1
+___
+for($i=1;$i<12;$i++) {
+my $rndkeyx = ($i&1)?$rndkey0:$rndkey1;
+$code.=<<___	if ($i==7);
+	cmp		\$11,$rounds
+___
+$code.=<<___;
+	aesdec		$rndkeyx,$inout0
+	aesdec		$rndkeyx,$inout1
+	aesdec		$rndkeyx,$inout2
+	aesdec		$rndkeyx,$inout3
+	aesdec		$rndkeyx,$inout4
+	aesdec		$rndkeyx,$inout5
+	aesdec		$rndkeyx,$inout6
+	aesdec		$rndkeyx,$inout7
+	$movkey		`0x30+0x10*$i`-0x70($key),$rndkeyx
+___
+$code.=<<___	if ($i<6 || (!($i&1) && $i>7));
+	nop
+___
+$code.=<<___	if ($i==7);
+	jb		.Lcbc_dec_done
+___
+$code.=<<___	if ($i==9);
+	je		.Lcbc_dec_done
+___
+$code.=<<___	if ($i==11);
+	jmp		.Lcbc_dec_done
+___
+}
+$code.=<<___;
+.align	16
+.Lcbc_dec_done:
+	aesdec		$rndkey1,$inout0
+	aesdec		$rndkey1,$inout1
+	pxor		$rndkey0,$iv
+	pxor		$rndkey0,$in0
+	aesdec		$rndkey1,$inout2
+	aesdec		$rndkey1,$inout3
+	pxor		$rndkey0,$in1
+	pxor		$rndkey0,$in2
+	aesdec		$rndkey1,$inout4
+	aesdec		$rndkey1,$inout5
+	pxor		$rndkey0,$in3
+	pxor		$rndkey0,$in4
+	aesdec		$rndkey1,$inout6
+	aesdec		$rndkey1,$inout7
+	movdqu		0x50($inp),$rndkey1
+
+	aesdeclast	$iv,$inout0
+	movdqu		0x60($inp),$iv		# borrow $iv
+	pxor		$rndkey0,$rndkey1
+	aesdeclast	$in0,$inout1
+	pxor		$rndkey0,$iv
+	movdqu		0x70($inp),$rndkey0	# next IV
+	aesdeclast	$in1,$inout2
+	lea		0x80($inp),$inp
+	movdqu		0x00($inp_),$in0
+	aesdeclast	$in2,$inout3
+	aesdeclast	$in3,$inout4
+	movdqu		0x10($inp_),$in1
+	movdqu		0x20($inp_),$in2
+	aesdeclast	$in4,$inout5
+	aesdeclast	$rndkey1,$inout6
+	movdqu		0x30($inp_),$in3
+	movdqu		0x40($inp_),$in4
+	aesdeclast	$iv,$inout7
+	movdqa		$rndkey0,$iv		# return $iv
+	movdqu		0x50($inp_),$rndkey1
+	$movkey		-0x70($key),$rndkey0
+
+	movups		$inout0,($out)		# store output
+	movdqa		$in0,$inout0
+	movups		$inout1,0x10($out)
+	movdqa		$in1,$inout1
+	movups		$inout2,0x20($out)
+	movdqa		$in2,$inout2
+	movups		$inout3,0x30($out)
+	movdqa		$in3,$inout3
+	movups		$inout4,0x40($out)
+	movdqa		$in4,$inout4
+	movups		$inout5,0x50($out)
+	movdqa		$rndkey1,$inout5
+	movups		$inout6,0x60($out)
+	lea		0x70($out),$out
+
+	sub	\$0x80,$len
+	ja	.Lcbc_dec_loop8
+
+	movaps	$inout7,$inout0
+	lea	-0x70($key),$key
+	add	\$0x70,$len
+	jle	.Lcbc_dec_clear_tail_collected
+	movups	$inout7,($out)
+	lea	0x10($out),$out
+	cmp	\$0x50,$len
+	jbe	.Lcbc_dec_tail
+
+	movaps	$in0,$inout0
+.Lcbc_dec_six_or_seven:
+	cmp	\$0x60,$len
+	ja	.Lcbc_dec_seven
+
+	movaps	$inout5,$inout6
+	call	_aesni_decrypt6
+	pxor	$iv,$inout0		# ^= IV
+	movaps	$inout6,$iv
+	pxor	$in0,$inout1
+	movdqu	$inout0,($out)
+	pxor	$in1,$inout2
+	movdqu	$inout1,0x10($out)
+	 pxor	$inout1,$inout1		# clear register bank
+	pxor	$in2,$inout3
+	movdqu	$inout2,0x20($out)
+	 pxor	$inout2,$inout2
+	pxor	$in3,$inout4
+	movdqu	$inout3,0x30($out)
+	 pxor	$inout3,$inout3
+	pxor	$in4,$inout5
+	movdqu	$inout4,0x40($out)
+	 pxor	$inout4,$inout4
+	lea	0x50($out),$out
+	movdqa	$inout5,$inout0
+	 pxor	$inout5,$inout5
+	jmp	.Lcbc_dec_tail_collected
+
+.align	16
+.Lcbc_dec_seven:
+	movups	0x60($inp),$inout6
+	xorps	$inout7,$inout7
+	call	_aesni_decrypt8
+	movups	0x50($inp),$inout7
+	pxor	$iv,$inout0		# ^= IV
+	movups	0x60($inp),$iv
+	pxor	$in0,$inout1
+	movdqu	$inout0,($out)
+	pxor	$in1,$inout2
+	movdqu	$inout1,0x10($out)
+	 pxor	$inout1,$inout1		# clear register bank
+	pxor	$in2,$inout3
+	movdqu	$inout2,0x20($out)
+	 pxor	$inout2,$inout2
+	pxor	$in3,$inout4
+	movdqu	$inout3,0x30($out)
+	 pxor	$inout3,$inout3
+	pxor	$in4,$inout5
+	movdqu	$inout4,0x40($out)
+	 pxor	$inout4,$inout4
+	pxor	$inout7,$inout6
+	movdqu	$inout5,0x50($out)
+	 pxor	$inout5,$inout5
+	lea	0x60($out),$out
+	movdqa	$inout6,$inout0
+	 pxor	$inout6,$inout6
+	 pxor	$inout7,$inout7
+	jmp	.Lcbc_dec_tail_collected
+
+.align	16
+.Lcbc_dec_loop6:
+	movups	$inout5,($out)
+	lea	0x10($out),$out
+	movdqu	0x00($inp),$inout0	# load input
+	movdqu	0x10($inp),$inout1
+	movdqa	$inout0,$in0
+	movdqu	0x20($inp),$inout2
+	movdqa	$inout1,$in1
+	movdqu	0x30($inp),$inout3
+	movdqa	$inout2,$in2
+	movdqu	0x40($inp),$inout4
+	movdqa	$inout3,$in3
+	movdqu	0x50($inp),$inout5
+	movdqa	$inout4,$in4
+.Lcbc_dec_loop6_enter:
+	lea	0x60($inp),$inp
+	movdqa	$inout5,$inout6
+
+	call	_aesni_decrypt6
+
+	pxor	$iv,$inout0		# ^= IV
+	movdqa	$inout6,$iv
+	pxor	$in0,$inout1
+	movdqu	$inout0,($out)
+	pxor	$in1,$inout2
+	movdqu	$inout1,0x10($out)
+	pxor	$in2,$inout3
+	movdqu	$inout2,0x20($out)
+	pxor	$in3,$inout4
+	mov	$key_,$key
+	movdqu	$inout3,0x30($out)
+	pxor	$in4,$inout5
+	mov	$rnds_,$rounds
+	movdqu	$inout4,0x40($out)
+	lea	0x50($out),$out
+	sub	\$0x60,$len
+	ja	.Lcbc_dec_loop6
+
+	movdqa	$inout5,$inout0
+	add	\$0x50,$len
+	jle	.Lcbc_dec_clear_tail_collected
+	movups	$inout5,($out)
+	lea	0x10($out),$out
+
+.Lcbc_dec_tail:
+	movups	($inp),$inout0
+	sub	\$0x10,$len
+	jbe	.Lcbc_dec_one		# $len is 1*16 or less
+
+	movups	0x10($inp),$inout1
+	movaps	$inout0,$in0
+	sub	\$0x10,$len
+	jbe	.Lcbc_dec_two		# $len is 2*16 or less
+
+	movups	0x20($inp),$inout2
+	movaps	$inout1,$in1
+	sub	\$0x10,$len
+	jbe	.Lcbc_dec_three		# $len is 3*16 or less
+
+	movups	0x30($inp),$inout3
+	movaps	$inout2,$in2
+	sub	\$0x10,$len
+	jbe	.Lcbc_dec_four		# $len is 4*16 or less
+
+	movups	0x40($inp),$inout4	# $len is 5*16 or less
+	movaps	$inout3,$in3
+	movaps	$inout4,$in4
+	xorps	$inout5,$inout5
+	call	_aesni_decrypt6
+	pxor	$iv,$inout0
+	movaps	$in4,$iv
+	pxor	$in0,$inout1
+	movdqu	$inout0,($out)
+	pxor	$in1,$inout2
+	movdqu	$inout1,0x10($out)
+	 pxor	$inout1,$inout1		# clear register bank
+	pxor	$in2,$inout3
+	movdqu	$inout2,0x20($out)
+	 pxor	$inout2,$inout2
+	pxor	$in3,$inout4
+	movdqu	$inout3,0x30($out)
+	 pxor	$inout3,$inout3
+	lea	0x40($out),$out
+	movdqa	$inout4,$inout0
+	 pxor	$inout4,$inout4
+	 pxor	$inout5,$inout5
+	sub	\$0x10,$len
+	jmp	.Lcbc_dec_tail_collected
+
+.align	16
+.Lcbc_dec_one:
+	movaps	$inout0,$in0
+___
+	&aesni_generate1("dec",$key,$rounds);
+$code.=<<___;
+	xorps	$iv,$inout0
+	movaps	$in0,$iv
+	jmp	.Lcbc_dec_tail_collected
+.align	16
+.Lcbc_dec_two:
+	movaps	$inout1,$in1
+	call	_aesni_decrypt2
+	pxor	$iv,$inout0
+	movaps	$in1,$iv
+	pxor	$in0,$inout1
+	movdqu	$inout0,($out)
+	movdqa	$inout1,$inout0
+	 pxor	$inout1,$inout1		# clear register bank
+	lea	0x10($out),$out
+	jmp	.Lcbc_dec_tail_collected
+.align	16
+.Lcbc_dec_three:
+	movaps	$inout2,$in2
+	call	_aesni_decrypt3
+	pxor	$iv,$inout0
+	movaps	$in2,$iv
+	pxor	$in0,$inout1
+	movdqu	$inout0,($out)
+	pxor	$in1,$inout2
+	movdqu	$inout1,0x10($out)
+	 pxor	$inout1,$inout1		# clear register bank
+	movdqa	$inout2,$inout0
+	 pxor	$inout2,$inout2
+	lea	0x20($out),$out
+	jmp	.Lcbc_dec_tail_collected
+.align	16
+.Lcbc_dec_four:
+	movaps	$inout3,$in3
+	call	_aesni_decrypt4
+	pxor	$iv,$inout0
+	movaps	$in3,$iv
+	pxor	$in0,$inout1
+	movdqu	$inout0,($out)
+	pxor	$in1,$inout2
+	movdqu	$inout1,0x10($out)
+	 pxor	$inout1,$inout1		# clear register bank
+	pxor	$in2,$inout3
+	movdqu	$inout2,0x20($out)
+	 pxor	$inout2,$inout2
+	movdqa	$inout3,$inout0
+	 pxor	$inout3,$inout3
+	lea	0x30($out),$out
+	jmp	.Lcbc_dec_tail_collected
+
+.align	16
+.Lcbc_dec_clear_tail_collected:
+	pxor	$inout1,$inout1		# clear register bank
+	pxor	$inout2,$inout2
+	pxor	$inout3,$inout3
+___
+$code.=<<___ if (!$win64);
+	pxor	$inout4,$inout4		# %xmm6..9
+	pxor	$inout5,$inout5
+	pxor	$inout6,$inout6
+	pxor	$inout7,$inout7
+___
+$code.=<<___;
+.Lcbc_dec_tail_collected:
+	movups	$iv,($ivp)
+	and	\$15,$len
+	jnz	.Lcbc_dec_tail_partial
+	movups	$inout0,($out)
+	pxor	$inout0,$inout0
+	jmp	.Lcbc_dec_ret
+.align	16
+.Lcbc_dec_tail_partial:
+	movaps	$inout0,(%rsp)
+	pxor	$inout0,$inout0
+	mov	\$16,%rcx
+	mov	$out,%rdi
+	sub	$len,%rcx
+	lea	(%rsp),%rsi
+	.long	0x9066A4F3		# rep movsb
+	movdqa	$inout0,(%rsp)
+
+.Lcbc_dec_ret:
+	xorps	$rndkey0,$rndkey0	# %xmm0
+	pxor	$rndkey1,$rndkey1
+___
+$code.=<<___ if ($win64);
+	movaps	0x10(%rsp),%xmm6
+	movaps	%xmm0,0x10(%rsp)	# clear stack
+	movaps	0x20(%rsp),%xmm7
+	movaps	%xmm0,0x20(%rsp)
+	movaps	0x30(%rsp),%xmm8
+	movaps	%xmm0,0x30(%rsp)
+	movaps	0x40(%rsp),%xmm9
+	movaps	%xmm0,0x40(%rsp)
+	movaps	0x50(%rsp),%xmm10
+	movaps	%xmm0,0x50(%rsp)
+	movaps	0x60(%rsp),%xmm11
+	movaps	%xmm0,0x60(%rsp)
+	movaps	0x70(%rsp),%xmm12
+	movaps	%xmm0,0x70(%rsp)
+	movaps	0x80(%rsp),%xmm13
+	movaps	%xmm0,0x80(%rsp)
+	movaps	0x90(%rsp),%xmm14
+	movaps	%xmm0,0x90(%rsp)
+	movaps	0xa0(%rsp),%xmm15
+	movaps	%xmm0,0xa0(%rsp)
+___
+$code.=<<___;
+	mov	-8(%r11),%rbp
+.cfi_restore	%rbp
+	lea	(%r11),%rsp
+.cfi_def_cfa_register	%rsp
+.Lcbc_ret:
+	ret
+.cfi_endproc
+.size	${PREFIX}_cbc_encrypt,.-${PREFIX}_cbc_encrypt
+___
+} 
+# int ${PREFIX}_set_decrypt_key(const unsigned char *inp,
+#				int bits, AES_KEY *key)
+#
+# input:	$inp	user-supplied key
+#		$bits	$inp length in bits
+#		$key	pointer to key schedule
+# output:	%eax	0 denoting success, -1 or -2 - failure (see C)
+#		*$key	key schedule
+#
+{ my ($inp,$bits,$key) = @_4args;
+  $bits =~ s/%r/%e/;
+
+$code.=<<___;
+.globl	${PREFIX}_set_decrypt_key
+.type	${PREFIX}_set_decrypt_key,\@abi-omnipotent
+.align	16
+${PREFIX}_set_decrypt_key:
+.cfi_startproc
+	.byte	0x48,0x83,0xEC,0x08	# sub rsp,8
+.cfi_adjust_cfa_offset	8
+	call	__aesni_set_encrypt_key
+	shl	\$4,$bits		# rounds-1 after _aesni_set_encrypt_key
+	test	%eax,%eax
+	jnz	.Ldec_key_ret
+	lea	16($key,$bits),$inp	# points at the end of key schedule
+
+	$movkey	($key),%xmm0		# just swap
+	$movkey	($inp),%xmm1
+	$movkey	%xmm0,($inp)
+	$movkey	%xmm1,($key)
+	lea	16($key),$key
+	lea	-16($inp),$inp
+
+.Ldec_key_inverse:
+	$movkey	($key),%xmm0		# swap and inverse
+	$movkey	($inp),%xmm1
+	aesimc	%xmm0,%xmm0
+	aesimc	%xmm1,%xmm1
+	lea	16($key),$key
+	lea	-16($inp),$inp
+	$movkey	%xmm0,16($inp)
+	$movkey	%xmm1,-16($key)
+	cmp	$key,$inp
+	ja	.Ldec_key_inverse
+
+	$movkey	($key),%xmm0		# inverse middle
+	aesimc	%xmm0,%xmm0
+	pxor	%xmm1,%xmm1
+	$movkey	%xmm0,($inp)
+	pxor	%xmm0,%xmm0
+.Ldec_key_ret:
+	add	\$8,%rsp
+.cfi_adjust_cfa_offset	-8
+	ret
+.cfi_endproc
+.LSEH_end_set_decrypt_key:
+.size	${PREFIX}_set_decrypt_key,.-${PREFIX}_set_decrypt_key
+___
+
+# This is based on submission from Intel by
+#	Huang Ying
+#	Vinodh Gopal
+#	Kahraman Akdemir
+#
+# Aggressively optimized in respect to aeskeygenassist's critical path
+# and is contained in %xmm0-5 to meet Win64 ABI requirement.
+#
+# int ${PREFIX}_set_encrypt_key(const unsigned char *inp,
+#				int bits, AES_KEY * const key);
+#
+# input:	$inp	user-supplied key
+#		$bits	$inp length in bits
+#		$key	pointer to key schedule
+# output:	%eax	0 denoting success, -1 or -2 - failure (see C)
+#		$bits	rounds-1 (used in aesni_set_decrypt_key)
+#		*$key	key schedule
+#		$key	pointer to key schedule (used in
+#			aesni_set_decrypt_key)
+#
+# Subroutine is frame-less, which means that only volatile registers
+# are used. Note that it's declared "abi-omnipotent", which means that
+# amount of volatile registers is smaller on Windows.
+#
+$code.=<<___;
+.globl	${PREFIX}_set_encrypt_key
+.type	${PREFIX}_set_encrypt_key,\@abi-omnipotent
+.align	16
+${PREFIX}_set_encrypt_key:
+__aesni_set_encrypt_key:
+.cfi_startproc
+	.byte	0x48,0x83,0xEC,0x08	# sub rsp,8
+.cfi_adjust_cfa_offset	8
+	mov	\$-1,%rax
+	test	$inp,$inp
+	jz	.Lenc_key_ret
+	test	$key,$key
+	jz	.Lenc_key_ret
+
+	mov	\$`1<<28|1<<11`,%r10d	# AVX and XOP bits
+	movups	($inp),%xmm0		# pull first 128 bits of *userKey
+	xorps	%xmm4,%xmm4		# low dword of xmm4 is assumed 0
+	and	OPENSSL_ia32cap_P+4(%rip),%r10d
+	lea	16($key),%rax		# %rax is used as modifiable copy of $key
+	cmp	\$256,$bits
+	je	.L14rounds
+	cmp	\$192,$bits
+	je	.L12rounds
+	cmp	\$128,$bits
+	jne	.Lbad_keybits
+
+.L10rounds:
+	mov	\$9,$bits			# 10 rounds for 128-bit key
+	cmp	\$`1<<28`,%r10d			# AVX, bit no XOP
+	je	.L10rounds_alt
+
+	$movkey	%xmm0,($key)			# round 0
+	aeskeygenassist	\$0x1,%xmm0,%xmm1	# round 1
+	call		.Lkey_expansion_128_cold
+	aeskeygenassist	\$0x2,%xmm0,%xmm1	# round 2
+	call		.Lkey_expansion_128
+	aeskeygenassist	\$0x4,%xmm0,%xmm1	# round 3
+	call		.Lkey_expansion_128
+	aeskeygenassist	\$0x8,%xmm0,%xmm1	# round 4
+	call		.Lkey_expansion_128
+	aeskeygenassist	\$0x10,%xmm0,%xmm1	# round 5
+	call		.Lkey_expansion_128
+	aeskeygenassist	\$0x20,%xmm0,%xmm1	# round 6
+	call		.Lkey_expansion_128
+	aeskeygenassist	\$0x40,%xmm0,%xmm1	# round 7
+	call		.Lkey_expansion_128
+	aeskeygenassist	\$0x80,%xmm0,%xmm1	# round 8
+	call		.Lkey_expansion_128
+	aeskeygenassist	\$0x1b,%xmm0,%xmm1	# round 9
+	call		.Lkey_expansion_128
+	aeskeygenassist	\$0x36,%xmm0,%xmm1	# round 10
+	call		.Lkey_expansion_128
+	$movkey	%xmm0,(%rax)
+	mov	$bits,80(%rax)	# 240(%rdx)
+	xor	%eax,%eax
+	jmp	.Lenc_key_ret
+
+.align	16
+.L10rounds_alt:
+	movdqa	.Lkey_rotate(%rip),%xmm5
+	mov	\$8,%r10d
+	movdqa	.Lkey_rcon1(%rip),%xmm4
+	movdqa	%xmm0,%xmm2
+	movdqu	%xmm0,($key)
+	jmp	.Loop_key128
+
+.align	16
+.Loop_key128:
+	pshufb		%xmm5,%xmm0
+	aesenclast	%xmm4,%xmm0
+	pslld		\$1,%xmm4
+	lea		16(%rax),%rax
+
+	movdqa		%xmm2,%xmm3
+	pslldq		\$4,%xmm2
+	pxor		%xmm2,%xmm3
+	pslldq		\$4,%xmm2
+	pxor		%xmm2,%xmm3
+	pslldq		\$4,%xmm2
+	pxor		%xmm3,%xmm2
+
+	pxor		%xmm2,%xmm0
+	movdqu		%xmm0,-16(%rax)
+	movdqa		%xmm0,%xmm2
+
+	dec	%r10d
+	jnz	.Loop_key128
+
+	movdqa		.Lkey_rcon1b(%rip),%xmm4
+
+	pshufb		%xmm5,%xmm0
+	aesenclast	%xmm4,%xmm0
+	pslld		\$1,%xmm4
+
+	movdqa		%xmm2,%xmm3
+	pslldq		\$4,%xmm2
+	pxor		%xmm2,%xmm3
+	pslldq		\$4,%xmm2
+	pxor		%xmm2,%xmm3
+	pslldq		\$4,%xmm2
+	pxor		%xmm3,%xmm2
+
+	pxor		%xmm2,%xmm0
+	movdqu		%xmm0,(%rax)
+
+	movdqa		%xmm0,%xmm2
+	pshufb		%xmm5,%xmm0
+	aesenclast	%xmm4,%xmm0
+
+	movdqa		%xmm2,%xmm3
+	pslldq		\$4,%xmm2
+	pxor		%xmm2,%xmm3
+	pslldq		\$4,%xmm2
+	pxor		%xmm2,%xmm3
+	pslldq		\$4,%xmm2
+	pxor		%xmm3,%xmm2
+
+	pxor		%xmm2,%xmm0
+	movdqu		%xmm0,16(%rax)
+
+	mov	$bits,96(%rax)	# 240($key)
+	xor	%eax,%eax
+	jmp	.Lenc_key_ret
+
+.align	16
+.L12rounds:
+	movq	16($inp),%xmm2			# remaining 1/3 of *userKey
+	mov	\$11,$bits			# 12 rounds for 192
+	cmp	\$`1<<28`,%r10d			# AVX, but no XOP
+	je	.L12rounds_alt
+
+	$movkey	%xmm0,($key)			# round 0
+	aeskeygenassist	\$0x1,%xmm2,%xmm1	# round 1,2
+	call		.Lkey_expansion_192a_cold
+	aeskeygenassist	\$0x2,%xmm2,%xmm1	# round 2,3
+	call		.Lkey_expansion_192b
+	aeskeygenassist	\$0x4,%xmm2,%xmm1	# round 4,5
+	call		.Lkey_expansion_192a
+	aeskeygenassist	\$0x8,%xmm2,%xmm1	# round 5,6
+	call		.Lkey_expansion_192b
+	aeskeygenassist	\$0x10,%xmm2,%xmm1	# round 7,8
+	call		.Lkey_expansion_192a
+	aeskeygenassist	\$0x20,%xmm2,%xmm1	# round 8,9
+	call		.Lkey_expansion_192b
+	aeskeygenassist	\$0x40,%xmm2,%xmm1	# round 10,11
+	call		.Lkey_expansion_192a
+	aeskeygenassist	\$0x80,%xmm2,%xmm1	# round 11,12
+	call		.Lkey_expansion_192b
+	$movkey	%xmm0,(%rax)
+	mov	$bits,48(%rax)	# 240(%rdx)
+	xor	%rax, %rax
+	jmp	.Lenc_key_ret
+
+.align	16
+.L12rounds_alt:
+	movdqa	.Lkey_rotate192(%rip),%xmm5
+	movdqa	.Lkey_rcon1(%rip),%xmm4
+	mov	\$8,%r10d
+	movdqu	%xmm0,($key)
+	jmp	.Loop_key192
+
+.align	16
+.Loop_key192:
+	movq		%xmm2,0(%rax)
+	movdqa		%xmm2,%xmm1
+	pshufb		%xmm5,%xmm2
+	aesenclast	%xmm4,%xmm2
+	pslld		\$1, %xmm4
+	lea		24(%rax),%rax
+
+	movdqa		%xmm0,%xmm3
+	pslldq		\$4,%xmm0
+	pxor		%xmm0,%xmm3
+	pslldq		\$4,%xmm0
+	pxor		%xmm0,%xmm3
+	pslldq		\$4,%xmm0
+	pxor		%xmm3,%xmm0
+
+	pshufd		\$0xff,%xmm0,%xmm3
+	pxor		%xmm1,%xmm3
+	pslldq		\$4,%xmm1
+	pxor		%xmm1,%xmm3
+
+	pxor		%xmm2,%xmm0
+	pxor		%xmm3,%xmm2
+	movdqu		%xmm0,-16(%rax)
+
+	dec	%r10d
+	jnz	.Loop_key192
+
+	mov	$bits,32(%rax)	# 240($key)
+	xor	%eax,%eax
+	jmp	.Lenc_key_ret
+
+.align	16
+.L14rounds:
+	movups	16($inp),%xmm2			# remaining half of *userKey
+	mov	\$13,$bits			# 14 rounds for 256
+	lea	16(%rax),%rax
+	cmp	\$`1<<28`,%r10d			# AVX, but no XOP
+	je	.L14rounds_alt
+
+	$movkey	%xmm0,($key)			# round 0
+	$movkey	%xmm2,16($key)			# round 1
+	aeskeygenassist	\$0x1,%xmm2,%xmm1	# round 2
+	call		.Lkey_expansion_256a_cold
+	aeskeygenassist	\$0x1,%xmm0,%xmm1	# round 3
+	call		.Lkey_expansion_256b
+	aeskeygenassist	\$0x2,%xmm2,%xmm1	# round 4
+	call		.Lkey_expansion_256a
+	aeskeygenassist	\$0x2,%xmm0,%xmm1	# round 5
+	call		.Lkey_expansion_256b
+	aeskeygenassist	\$0x4,%xmm2,%xmm1	# round 6
+	call		.Lkey_expansion_256a
+	aeskeygenassist	\$0x4,%xmm0,%xmm1	# round 7
+	call		.Lkey_expansion_256b
+	aeskeygenassist	\$0x8,%xmm2,%xmm1	# round 8
+	call		.Lkey_expansion_256a
+	aeskeygenassist	\$0x8,%xmm0,%xmm1	# round 9
+	call		.Lkey_expansion_256b
+	aeskeygenassist	\$0x10,%xmm2,%xmm1	# round 10
+	call		.Lkey_expansion_256a
+	aeskeygenassist	\$0x10,%xmm0,%xmm1	# round 11
+	call		.Lkey_expansion_256b
+	aeskeygenassist	\$0x20,%xmm2,%xmm1	# round 12
+	call		.Lkey_expansion_256a
+	aeskeygenassist	\$0x20,%xmm0,%xmm1	# round 13
+	call		.Lkey_expansion_256b
+	aeskeygenassist	\$0x40,%xmm2,%xmm1	# round 14
+	call		.Lkey_expansion_256a
+	$movkey	%xmm0,(%rax)
+	mov	$bits,16(%rax)	# 240(%rdx)
+	xor	%rax,%rax
+	jmp	.Lenc_key_ret
+
+.align	16
+.L14rounds_alt:
+	movdqa	.Lkey_rotate(%rip),%xmm5
+	movdqa	.Lkey_rcon1(%rip),%xmm4
+	mov	\$7,%r10d
+	movdqu	%xmm0,0($key)
+	movdqa	%xmm2,%xmm1
+	movdqu	%xmm2,16($key)
+	jmp	.Loop_key256
+
+.align	16
+.Loop_key256:
+	pshufb		%xmm5,%xmm2
+	aesenclast	%xmm4,%xmm2
+
+	movdqa		%xmm0,%xmm3
+	pslldq		\$4,%xmm0
+	pxor		%xmm0,%xmm3
+	pslldq		\$4,%xmm0
+	pxor		%xmm0,%xmm3
+	pslldq		\$4,%xmm0
+	pxor		%xmm3,%xmm0
+	pslld		\$1,%xmm4
+
+	pxor		%xmm2,%xmm0
+	movdqu		%xmm0,(%rax)
+
+	dec	%r10d
+	jz	.Ldone_key256
+
+	pshufd		\$0xff,%xmm0,%xmm2
+	pxor		%xmm3,%xmm3
+	aesenclast	%xmm3,%xmm2
+
+	movdqa		%xmm1,%xmm3
+	pslldq		\$4,%xmm1
+	pxor		%xmm1,%xmm3
+	pslldq		\$4,%xmm1
+	pxor		%xmm1,%xmm3
+	pslldq		\$4,%xmm1
+	pxor		%xmm3,%xmm1
+
+	pxor		%xmm1,%xmm2
+	movdqu		%xmm2,16(%rax)
+	lea		32(%rax),%rax
+	movdqa		%xmm2,%xmm1
+
+	jmp	.Loop_key256
+
+.Ldone_key256:
+	mov	$bits,16(%rax)	# 240($key)
+	xor	%eax,%eax
+	jmp	.Lenc_key_ret
+
+.align	16
+.Lbad_keybits:
+	mov	\$-2,%rax
+.Lenc_key_ret:
+	pxor	%xmm0,%xmm0
+	pxor	%xmm1,%xmm1
+	pxor	%xmm2,%xmm2
+	pxor	%xmm3,%xmm3
+	pxor	%xmm4,%xmm4
+	pxor	%xmm5,%xmm5
+	add	\$8,%rsp
+.cfi_adjust_cfa_offset	-8
+	ret
+.LSEH_end_set_encrypt_key:
+
+.align	16
+.Lkey_expansion_128:
+	$movkey	%xmm0,(%rax)
+	lea	16(%rax),%rax
+.Lkey_expansion_128_cold:
+	shufps	\$0b00010000,%xmm0,%xmm4
+	xorps	%xmm4, %xmm0
+	shufps	\$0b10001100,%xmm0,%xmm4
+	xorps	%xmm4, %xmm0
+	shufps	\$0b11111111,%xmm1,%xmm1	# critical path
+	xorps	%xmm1,%xmm0
+	ret
+
+.align 16
+.Lkey_expansion_192a:
+	$movkey	%xmm0,(%rax)
+	lea	16(%rax),%rax
+.Lkey_expansion_192a_cold:
+	movaps	%xmm2, %xmm5
+.Lkey_expansion_192b_warm:
+	shufps	\$0b00010000,%xmm0,%xmm4
+	movdqa	%xmm2,%xmm3
+	xorps	%xmm4,%xmm0
+	shufps	\$0b10001100,%xmm0,%xmm4
+	pslldq	\$4,%xmm3
+	xorps	%xmm4,%xmm0
+	pshufd	\$0b01010101,%xmm1,%xmm1	# critical path
+	pxor	%xmm3,%xmm2
+	pxor	%xmm1,%xmm0
+	pshufd	\$0b11111111,%xmm0,%xmm3
+	pxor	%xmm3,%xmm2
+	ret
+
+.align 16
+.Lkey_expansion_192b:
+	movaps	%xmm0,%xmm3
+	shufps	\$0b01000100,%xmm0,%xmm5
+	$movkey	%xmm5,(%rax)
+	shufps	\$0b01001110,%xmm2,%xmm3
+	$movkey	%xmm3,16(%rax)
+	lea	32(%rax),%rax
+	jmp	.Lkey_expansion_192b_warm
+
+.align	16
+.Lkey_expansion_256a:
+	$movkey	%xmm2,(%rax)
+	lea	16(%rax),%rax
+.Lkey_expansion_256a_cold:
+	shufps	\$0b00010000,%xmm0,%xmm4
+	xorps	%xmm4,%xmm0
+	shufps	\$0b10001100,%xmm0,%xmm4
+	xorps	%xmm4,%xmm0
+	shufps	\$0b11111111,%xmm1,%xmm1	# critical path
+	xorps	%xmm1,%xmm0
+	ret
+
+.align 16
+.Lkey_expansion_256b:
+	$movkey	%xmm0,(%rax)
+	lea	16(%rax),%rax
+
+	shufps	\$0b00010000,%xmm2,%xmm4
+	xorps	%xmm4,%xmm2
+	shufps	\$0b10001100,%xmm2,%xmm4
+	xorps	%xmm4,%xmm2
+	shufps	\$0b10101010,%xmm1,%xmm1	# critical path
+	xorps	%xmm1,%xmm2
+	ret
+.cfi_endproc
+.size	${PREFIX}_set_encrypt_key,.-${PREFIX}_set_encrypt_key
+.size	__aesni_set_encrypt_key,.-__aesni_set_encrypt_key
+___
+}
+
+$code.=<<___;
+.align	64
+.Lbswap_mask:
+	.byte	15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
+.Lincrement32:
+	.long	6,6,6,0
+.Lincrement64:
+	.long	1,0,0,0
+.Lxts_magic:
+	.long	0x87,0,1,0
+.Lincrement1:
+	.byte	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1
+.Lkey_rotate:
+	.long	0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d
+.Lkey_rotate192:
+	.long	0x04070605,0x04070605,0x04070605,0x04070605
+.Lkey_rcon1:
+	.long	1,1,1,1
+.Lkey_rcon1b:
+	.long	0x1b,0x1b,0x1b,0x1b
+
+.asciz  "AES for Intel AES-NI, CRYPTOGAMS by <appro\@openssl.org>"
+.align	64
+___
+
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+#		CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64) {
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern	__imp_RtlVirtualUnwind
+___
+$code.=<<___ if ($PREFIX eq "aesni");
+.type	ecb_ccm64_se_handler,\@abi-omnipotent
+.align	16
+ecb_ccm64_se_handler:
+	push	%rsi
+	push	%rdi
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	pushfq
+	sub	\$64,%rsp
+
+	mov	120($context),%rax	# pull context->Rax
+	mov	248($context),%rbx	# pull context->Rip
+
+	mov	8($disp),%rsi		# disp->ImageBase
+	mov	56($disp),%r11		# disp->HandlerData
+
+	mov	0(%r11),%r10d		# HandlerData[0]
+	lea	(%rsi,%r10),%r10	# prologue label
+	cmp	%r10,%rbx		# context->Rip<prologue label
+	jb	.Lcommon_seh_tail
+
+	mov	152($context),%rax	# pull context->Rsp
+
+	mov	4(%r11),%r10d		# HandlerData[1]
+	lea	(%rsi,%r10),%r10	# epilogue label
+	cmp	%r10,%rbx		# context->Rip>=epilogue label
+	jae	.Lcommon_seh_tail
+
+	lea	0(%rax),%rsi		# %xmm save area
+	lea	512($context),%rdi	# &context.Xmm6
+	mov	\$8,%ecx		# 4*sizeof(%xmm0)/sizeof(%rax)
+	.long	0xa548f3fc		# cld; rep movsq
+	lea	0x58(%rax),%rax		# adjust stack pointer
+
+	jmp	.Lcommon_seh_tail
+.size	ecb_ccm64_se_handler,.-ecb_ccm64_se_handler
+
+.type	ctr_xts_se_handler,\@abi-omnipotent
+.align	16
+ctr_xts_se_handler:
+	push	%rsi
+	push	%rdi
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	pushfq
+	sub	\$64,%rsp
+
+	mov	120($context),%rax	# pull context->Rax
+	mov	248($context),%rbx	# pull context->Rip
+
+	mov	8($disp),%rsi		# disp->ImageBase
+	mov	56($disp),%r11		# disp->HandlerData
+
+	mov	0(%r11),%r10d		# HandlerData[0]
+	lea	(%rsi,%r10),%r10	# prologue label
+	cmp	%r10,%rbx		# context->Rip<prologue label
+	jb	.Lcommon_seh_tail
+
+	mov	152($context),%rax	# pull context->Rsp
+
+	mov	4(%r11),%r10d		# HandlerData[1]
+	lea	(%rsi,%r10),%r10	# epilogue label
+	cmp	%r10,%rbx		# context->Rip>=epilogue label
+	jae	.Lcommon_seh_tail
+
+	mov	208($context),%rax	# pull context->R11
+
+	lea	-0xa8(%rax),%rsi	# %xmm save area
+	lea	512($context),%rdi	# & context.Xmm6
+	mov	\$20,%ecx		# 10*sizeof(%xmm0)/sizeof(%rax)
+	.long	0xa548f3fc		# cld; rep movsq
+
+	mov	-8(%rax),%rbp		# restore saved %rbp
+	mov	%rbp,160($context)	# restore context->Rbp
+	jmp	.Lcommon_seh_tail
+.size	ctr_xts_se_handler,.-ctr_xts_se_handler
+
+.type	ocb_se_handler,\@abi-omnipotent
+.align	16
+ocb_se_handler:
+	push	%rsi
+	push	%rdi
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	pushfq
+	sub	\$64,%rsp
+
+	mov	120($context),%rax	# pull context->Rax
+	mov	248($context),%rbx	# pull context->Rip
+
+	mov	8($disp),%rsi		# disp->ImageBase
+	mov	56($disp),%r11		# disp->HandlerData
+
+	mov	0(%r11),%r10d		# HandlerData[0]
+	lea	(%rsi,%r10),%r10	# prologue label
+	cmp	%r10,%rbx		# context->Rip<prologue label
+	jb	.Lcommon_seh_tail
+
+	mov	4(%r11),%r10d		# HandlerData[1]
+	lea	(%rsi,%r10),%r10	# epilogue label
+	cmp	%r10,%rbx		# context->Rip>=epilogue label
+	jae	.Lcommon_seh_tail
+
+	mov	8(%r11),%r10d		# HandlerData[2]
+	lea	(%rsi,%r10),%r10
+	cmp	%r10,%rbx		# context->Rip>=pop label
+	jae	.Locb_no_xmm
+
+	mov	152($context),%rax	# pull context->Rsp
+
+	lea	(%rax),%rsi		# %xmm save area
+	lea	512($context),%rdi	# & context.Xmm6
+	mov	\$20,%ecx		# 10*sizeof(%xmm0)/sizeof(%rax)
+	.long	0xa548f3fc		# cld; rep movsq
+	lea	0xa0+0x28(%rax),%rax
+
+.Locb_no_xmm:
+	mov	-8(%rax),%rbx
+	mov	-16(%rax),%rbp
+	mov	-24(%rax),%r12
+	mov	-32(%rax),%r13
+	mov	-40(%rax),%r14
+
+	mov	%rbx,144($context)	# restore context->Rbx
+	mov	%rbp,160($context)	# restore context->Rbp
+	mov	%r12,216($context)	# restore context->R12
+	mov	%r13,224($context)	# restore context->R13
+	mov	%r14,232($context)	# restore context->R14
+
+	jmp	.Lcommon_seh_tail
+.size	ocb_se_handler,.-ocb_se_handler
+___
+$code.=<<___;
+.type	cbc_se_handler,\@abi-omnipotent
+.align	16
+cbc_se_handler:
+	push	%rsi
+	push	%rdi
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	pushfq
+	sub	\$64,%rsp
+
+	mov	152($context),%rax	# pull context->Rsp
+	mov	248($context),%rbx	# pull context->Rip
+
+	lea	.Lcbc_decrypt_bulk(%rip),%r10
+	cmp	%r10,%rbx		# context->Rip<"prologue" label
+	jb	.Lcommon_seh_tail
+
+	mov	120($context),%rax	# pull context->Rax
+
+	lea	.Lcbc_decrypt_body(%rip),%r10
+	cmp	%r10,%rbx		# context->Rip<cbc_decrypt_body
+	jb	.Lcommon_seh_tail
+
+	mov	152($context),%rax	# pull context->Rsp
+
+	lea	.Lcbc_ret(%rip),%r10
+	cmp	%r10,%rbx		# context->Rip>="epilogue" label
+	jae	.Lcommon_seh_tail
+
+	lea	16(%rax),%rsi		# %xmm save area
+	lea	512($context),%rdi	# &context.Xmm6
+	mov	\$20,%ecx		# 10*sizeof(%xmm0)/sizeof(%rax)
+	.long	0xa548f3fc		# cld; rep movsq
+
+	mov	208($context),%rax	# pull context->R11
+
+	mov	-8(%rax),%rbp		# restore saved %rbp
+	mov	%rbp,160($context)	# restore context->Rbp
+
+.Lcommon_seh_tail:
+	mov	8(%rax),%rdi
+	mov	16(%rax),%rsi
+	mov	%rax,152($context)	# restore context->Rsp
+	mov	%rsi,168($context)	# restore context->Rsi
+	mov	%rdi,176($context)	# restore context->Rdi
+
+	mov	40($disp),%rdi		# disp->ContextRecord
+	mov	$context,%rsi		# context
+	mov	\$154,%ecx		# sizeof(CONTEXT)
+	.long	0xa548f3fc		# cld; rep movsq
+
+	mov	$disp,%rsi
+	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
+	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
+	mov	0(%rsi),%r8		# arg3, disp->ControlPc
+	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
+	mov	40(%rsi),%r10		# disp->ContextRecord
+	lea	56(%rsi),%r11		# &disp->HandlerData
+	lea	24(%rsi),%r12		# &disp->EstablisherFrame
+	mov	%r10,32(%rsp)		# arg5
+	mov	%r11,40(%rsp)		# arg6
+	mov	%r12,48(%rsp)		# arg7
+	mov	%rcx,56(%rsp)		# arg8, (NULL)
+	call	*__imp_RtlVirtualUnwind(%rip)
+
+	mov	\$1,%eax		# ExceptionContinueSearch
+	add	\$64,%rsp
+	popfq
+	pop	%r15
+	pop	%r14
+	pop	%r13
+	pop	%r12
+	pop	%rbp
+	pop	%rbx
+	pop	%rdi
+	pop	%rsi
+	ret
+.size	cbc_se_handler,.-cbc_se_handler
+
+.section	.pdata
+.align	4
+___
+$code.=<<___ if ($PREFIX eq "aesni");
+	.rva	.LSEH_begin_aesni_ecb_encrypt
+	.rva	.LSEH_end_aesni_ecb_encrypt
+	.rva	.LSEH_info_ecb
+
+	.rva	.LSEH_begin_aesni_ccm64_encrypt_blocks
+	.rva	.LSEH_end_aesni_ccm64_encrypt_blocks
+	.rva	.LSEH_info_ccm64_enc
+
+	.rva	.LSEH_begin_aesni_ccm64_decrypt_blocks
+	.rva	.LSEH_end_aesni_ccm64_decrypt_blocks
+	.rva	.LSEH_info_ccm64_dec
+
+	.rva	.LSEH_begin_aesni_ctr32_encrypt_blocks
+	.rva	.LSEH_end_aesni_ctr32_encrypt_blocks
+	.rva	.LSEH_info_ctr32
+
+	.rva	.LSEH_begin_aesni_xts_encrypt
+	.rva	.LSEH_end_aesni_xts_encrypt
+	.rva	.LSEH_info_xts_enc
+
+	.rva	.LSEH_begin_aesni_xts_decrypt
+	.rva	.LSEH_end_aesni_xts_decrypt
+	.rva	.LSEH_info_xts_dec
+
+	.rva	.LSEH_begin_aesni_ocb_encrypt
+	.rva	.LSEH_end_aesni_ocb_encrypt
+	.rva	.LSEH_info_ocb_enc
+
+	.rva	.LSEH_begin_aesni_ocb_decrypt
+	.rva	.LSEH_end_aesni_ocb_decrypt
+	.rva	.LSEH_info_ocb_dec
+___
+$code.=<<___;
+	.rva	.LSEH_begin_${PREFIX}_cbc_encrypt
+	.rva	.LSEH_end_${PREFIX}_cbc_encrypt
+	.rva	.LSEH_info_cbc
+
+	.rva	${PREFIX}_set_decrypt_key
+	.rva	.LSEH_end_set_decrypt_key
+	.rva	.LSEH_info_key
+
+	.rva	${PREFIX}_set_encrypt_key
+	.rva	.LSEH_end_set_encrypt_key
+	.rva	.LSEH_info_key
+.section	.xdata
+.align	8
+___
+$code.=<<___ if ($PREFIX eq "aesni");
+.LSEH_info_ecb:
+	.byte	9,0,0,0
+	.rva	ecb_ccm64_se_handler
+	.rva	.Lecb_enc_body,.Lecb_enc_ret		# HandlerData[]
+.LSEH_info_ccm64_enc:
+	.byte	9,0,0,0
+	.rva	ecb_ccm64_se_handler
+	.rva	.Lccm64_enc_body,.Lccm64_enc_ret	# HandlerData[]
+.LSEH_info_ccm64_dec:
+	.byte	9,0,0,0
+	.rva	ecb_ccm64_se_handler
+	.rva	.Lccm64_dec_body,.Lccm64_dec_ret	# HandlerData[]
+.LSEH_info_ctr32:
+	.byte	9,0,0,0
+	.rva	ctr_xts_se_handler
+	.rva	.Lctr32_body,.Lctr32_epilogue		# HandlerData[]
+.LSEH_info_xts_enc:
+	.byte	9,0,0,0
+	.rva	ctr_xts_se_handler
+	.rva	.Lxts_enc_body,.Lxts_enc_epilogue	# HandlerData[]
+.LSEH_info_xts_dec:
+	.byte	9,0,0,0
+	.rva	ctr_xts_se_handler
+	.rva	.Lxts_dec_body,.Lxts_dec_epilogue	# HandlerData[]
+.LSEH_info_ocb_enc:
+	.byte	9,0,0,0
+	.rva	ocb_se_handler
+	.rva	.Locb_enc_body,.Locb_enc_epilogue	# HandlerData[]
+	.rva	.Locb_enc_pop
+	.long	0
+.LSEH_info_ocb_dec:
+	.byte	9,0,0,0
+	.rva	ocb_se_handler
+	.rva	.Locb_dec_body,.Locb_dec_epilogue	# HandlerData[]
+	.rva	.Locb_dec_pop
+	.long	0
+___
+$code.=<<___;
+.LSEH_info_cbc:
+	.byte	9,0,0,0
+	.rva	cbc_se_handler
+.LSEH_info_key:
+	.byte	0x01,0x04,0x01,0x00
+	.byte	0x04,0x02,0x00,0x00	# sub rsp,8
+___
+}
+
+sub rex {
+  local *opcode=shift;
+  my ($dst,$src)=@_;
+  my $rex=0;
+
+    $rex|=0x04			if($dst>=8);
+    $rex|=0x01			if($src>=8);
+    push @opcode,$rex|0x40	if($rex);
+}
+
+sub aesni {
+  my $line=shift;
+  my @opcode=(0x66);
+
+    if ($line=~/(aeskeygenassist)\s+\$([x0-9a-f]+),\s*%xmm([0-9]+),\s*%xmm([0-9]+)/) {
+	rex(\@opcode,$4,$3);
+	push @opcode,0x0f,0x3a,0xdf;
+	push @opcode,0xc0|($3&7)|(($4&7)<<3);	# ModR/M
+	my $c=$2;
+	push @opcode,$c=~/^0/?oct($c):$c;
+	return ".byte\t".join(',',@opcode);
+    }
+    elsif ($line=~/(aes[a-z]+)\s+%xmm([0-9]+),\s*%xmm([0-9]+)/) {
+	my %opcodelet = (
+		"aesimc" => 0xdb,
+		"aesenc" => 0xdc,	"aesenclast" => 0xdd,
+		"aesdec" => 0xde,	"aesdeclast" => 0xdf
+	);
+	return undef if (!defined($opcodelet{$1}));
+	rex(\@opcode,$3,$2);
+	push @opcode,0x0f,0x38,$opcodelet{$1};
+	push @opcode,0xc0|($2&7)|(($3&7)<<3);	# ModR/M
+	return ".byte\t".join(',',@opcode);
+    }
+    elsif ($line=~/(aes[a-z]+)\s+([0x1-9a-fA-F]*)\(%rsp\),\s*%xmm([0-9]+)/) {
+	my %opcodelet = (
+		"aesenc" => 0xdc,	"aesenclast" => 0xdd,
+		"aesdec" => 0xde,	"aesdeclast" => 0xdf
+	);
+	return undef if (!defined($opcodelet{$1}));
+	my $off = $2;
+	push @opcode,0x44 if ($3>=8);
+	push @opcode,0x0f,0x38,$opcodelet{$1};
+	push @opcode,0x44|(($3&7)<<3),0x24;	# ModR/M
+	push @opcode,($off=~/^0/?oct($off):$off)&0xff;
+	return ".byte\t".join(',',@opcode);
+    }
+    return $line;
+}
+
+sub movbe {
+	".byte	0x0f,0x38,0xf1,0x44,0x24,".shift;
+}
+
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+$code =~ s/\b(aes.*%xmm[0-9]+).*$/aesni($1)/gem;
+#$code =~ s/\bmovbe\s+%eax/bswap %eax; mov %eax/gm;	# debugging artefact
+$code =~ s/\bmovbe\s+%eax,\s*([0-9]+)\(%rsp\)/movbe($1)/gem;
+
+print $code;
+
+close STDOUT or die "error closing STDOUT: $!";
diff --git a/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aesp8-ppc.pl b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aesp8-ppc.pl
new file mode 100755
index 0000000..22a538f
--- /dev/null
+++ b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aesp8-ppc.pl
@@ -0,0 +1,3807 @@
+#! /usr/bin/env perl
+# Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License").  You may not use
+# this file except in compliance with the License.  You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# This module implements support for AES instructions as per PowerISA
+# specification version 2.07, first implemented by POWER8 processor.
+# The module is endian-agnostic in sense that it supports both big-
+# and little-endian cases. Data alignment in parallelizable modes is
+# handled with VSX loads and stores, which implies MSR.VSX flag being
+# set. It should also be noted that ISA specification doesn't prohibit
+# alignment exceptions for these instructions on page boundaries.
+# Initially alignment was handled in pure AltiVec/VMX way [when data
+# is aligned programmatically, which in turn guarantees exception-
+# free execution], but it turned to hamper performance when vcipher
+# instructions are interleaved. It's reckoned that eventual
+# misalignment penalties at page boundaries are in average lower
+# than additional overhead in pure AltiVec approach.
+#
+# May 2016
+#
+# Add XTS subroutine, 9x on little- and 12x improvement on big-endian
+# systems were measured.
+#
+######################################################################
+# Current large-block performance in cycles per byte processed with
+# 128-bit key (less is better).
+#
+#		CBC en-/decrypt	CTR	XTS
+# POWER8[le]	3.96/0.72	0.74	1.1
+# POWER8[be]	3.75/0.65	0.66	1.0
+# POWER9[le]	4.02/0.86	0.84	1.05
+# POWER9[be]	3.99/0.78	0.79	0.97
+
+$flavour = shift;
+
+if ($flavour =~ /64/) {
+	$SIZE_T	=8;
+	$LRSAVE	=2*$SIZE_T;
+	$STU	="stdu";
+	$POP	="ld";
+	$PUSH	="std";
+	$UCMP	="cmpld";
+	$SHL	="sldi";
+} elsif ($flavour =~ /32/) {
+	$SIZE_T	=4;
+	$LRSAVE	=$SIZE_T;
+	$STU	="stwu";
+	$POP	="lwz";
+	$PUSH	="stw";
+	$UCMP	="cmplw";
+	$SHL	="slwi";
+} else { die "nonsense $flavour"; }
+
+$LITTLE_ENDIAN = ($flavour=~/le$/) ? $SIZE_T : 0;
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
+die "can't locate ppc-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
+
+$FRAME=8*$SIZE_T;
+$prefix="aes_p8";
+
+$sp="r1";
+$vrsave="r12";
+
+#########################################################################
+{{{	# Key setup procedures						#
+my ($inp,$bits,$out,$ptr,$cnt,$rounds)=map("r$_",(3..8));
+my ($zero,$in0,$in1,$key,$rcon,$mask,$tmp)=map("v$_",(0..6));
+my ($stage,$outperm,$outmask,$outhead,$outtail)=map("v$_",(7..11));
+
+$code.=<<___;
+.machine	"any"
+
+.text
+
+.align	7
+rcon:
+.long	0x01000000, 0x01000000, 0x01000000, 0x01000000	?rev
+.long	0x1b000000, 0x1b000000, 0x1b000000, 0x1b000000	?rev
+.long	0x0d0e0f0c, 0x0d0e0f0c, 0x0d0e0f0c, 0x0d0e0f0c	?rev
+.long	0,0,0,0						?asis
+Lconsts:
+	mflr	r0
+	bcl	20,31,\$+4
+	mflr	$ptr	 #vvvvv "distance between . and rcon
+	addi	$ptr,$ptr,-0x48
+	mtlr	r0
+	blr
+	.long	0
+	.byte	0,12,0x14,0,0,0,0,0
+.asciz	"AES for PowerISA 2.07, CRYPTOGAMS by <appro\@openssl.org>"
+
+.globl	.${prefix}_set_encrypt_key
+.align	5
+.${prefix}_set_encrypt_key:
+Lset_encrypt_key:
+	mflr		r11
+	$PUSH		r11,$LRSAVE($sp)
+
+	li		$ptr,-1
+	${UCMP}i	$inp,0
+	beq-		Lenc_key_abort		# if ($inp==0) return -1;
+	${UCMP}i	$out,0
+	beq-		Lenc_key_abort		# if ($out==0) return -1;
+	li		$ptr,-2
+	cmpwi		$bits,128
+	blt-		Lenc_key_abort
+	cmpwi		$bits,256
+	bgt-		Lenc_key_abort
+	andi.		r0,$bits,0x3f
+	bne-		Lenc_key_abort
+
+	lis		r0,0xfff0
+	mfspr		$vrsave,256
+	mtspr		256,r0
+
+	bl		Lconsts
+	mtlr		r11
+
+	neg		r9,$inp
+	lvx		$in0,0,$inp
+	addi		$inp,$inp,15		# 15 is not typo
+	lvsr		$key,0,r9		# borrow $key
+	li		r8,0x20
+	cmpwi		$bits,192
+	lvx		$in1,0,$inp
+	le?vspltisb	$mask,0x0f		# borrow $mask
+	lvx		$rcon,0,$ptr
+	le?vxor		$key,$key,$mask		# adjust for byte swap
+	lvx		$mask,r8,$ptr
+	addi		$ptr,$ptr,0x10
+	vperm		$in0,$in0,$in1,$key	# align [and byte swap in LE]
+	li		$cnt,8
+	vxor		$zero,$zero,$zero
+	mtctr		$cnt
+
+	?lvsr		$outperm,0,$out
+	vspltisb	$outmask,-1
+	lvx		$outhead,0,$out
+	?vperm		$outmask,$zero,$outmask,$outperm
+
+	blt		Loop128
+	addi		$inp,$inp,8
+	beq		L192
+	addi		$inp,$inp,8
+	b		L256
+
+.align	4
+Loop128:
+	vperm		$key,$in0,$in0,$mask	# rotate-n-splat
+	vsldoi		$tmp,$zero,$in0,12	# >>32
+	 vperm		$outtail,$in0,$in0,$outperm	# rotate
+	 vsel		$stage,$outhead,$outtail,$outmask
+	 vmr		$outhead,$outtail
+	vcipherlast	$key,$key,$rcon
+	 stvx		$stage,0,$out
+	 addi		$out,$out,16
+
+	vxor		$in0,$in0,$tmp
+	vsldoi		$tmp,$zero,$tmp,12	# >>32
+	vxor		$in0,$in0,$tmp
+	vsldoi		$tmp,$zero,$tmp,12	# >>32
+	vxor		$in0,$in0,$tmp
+	 vadduwm	$rcon,$rcon,$rcon
+	vxor		$in0,$in0,$key
+	bdnz		Loop128
+
+	lvx		$rcon,0,$ptr		# last two round keys
+
+	vperm		$key,$in0,$in0,$mask	# rotate-n-splat
+	vsldoi		$tmp,$zero,$in0,12	# >>32
+	 vperm		$outtail,$in0,$in0,$outperm	# rotate
+	 vsel		$stage,$outhead,$outtail,$outmask
+	 vmr		$outhead,$outtail
+	vcipherlast	$key,$key,$rcon
+	 stvx		$stage,0,$out
+	 addi		$out,$out,16
+
+	vxor		$in0,$in0,$tmp
+	vsldoi		$tmp,$zero,$tmp,12	# >>32
+	vxor		$in0,$in0,$tmp
+	vsldoi		$tmp,$zero,$tmp,12	# >>32
+	vxor		$in0,$in0,$tmp
+	 vadduwm	$rcon,$rcon,$rcon
+	vxor		$in0,$in0,$key
+
+	vperm		$key,$in0,$in0,$mask	# rotate-n-splat
+	vsldoi		$tmp,$zero,$in0,12	# >>32
+	 vperm		$outtail,$in0,$in0,$outperm	# rotate
+	 vsel		$stage,$outhead,$outtail,$outmask
+	 vmr		$outhead,$outtail
+	vcipherlast	$key,$key,$rcon
+	 stvx		$stage,0,$out
+	 addi		$out,$out,16
+
+	vxor		$in0,$in0,$tmp
+	vsldoi		$tmp,$zero,$tmp,12	# >>32
+	vxor		$in0,$in0,$tmp
+	vsldoi		$tmp,$zero,$tmp,12	# >>32
+	vxor		$in0,$in0,$tmp
+	vxor		$in0,$in0,$key
+	 vperm		$outtail,$in0,$in0,$outperm	# rotate
+	 vsel		$stage,$outhead,$outtail,$outmask
+	 vmr		$outhead,$outtail
+	 stvx		$stage,0,$out
+
+	addi		$inp,$out,15		# 15 is not typo
+	addi		$out,$out,0x50
+
+	li		$rounds,10
+	b		Ldone
+
+.align	4
+L192:
+	lvx		$tmp,0,$inp
+	li		$cnt,4
+	 vperm		$outtail,$in0,$in0,$outperm	# rotate
+	 vsel		$stage,$outhead,$outtail,$outmask
+	 vmr		$outhead,$outtail
+	 stvx		$stage,0,$out
+	 addi		$out,$out,16
+	vperm		$in1,$in1,$tmp,$key	# align [and byte swap in LE]
+	vspltisb	$key,8			# borrow $key
+	mtctr		$cnt
+	vsububm		$mask,$mask,$key	# adjust the mask
+
+Loop192:
+	vperm		$key,$in1,$in1,$mask	# roate-n-splat
+	vsldoi		$tmp,$zero,$in0,12	# >>32
+	vcipherlast	$key,$key,$rcon
+
+	vxor		$in0,$in0,$tmp
+	vsldoi		$tmp,$zero,$tmp,12	# >>32
+	vxor		$in0,$in0,$tmp
+	vsldoi		$tmp,$zero,$tmp,12	# >>32
+	vxor		$in0,$in0,$tmp
+
+	 vsldoi		$stage,$zero,$in1,8
+	vspltw		$tmp,$in0,3
+	vxor		$tmp,$tmp,$in1
+	vsldoi		$in1,$zero,$in1,12	# >>32
+	 vadduwm	$rcon,$rcon,$rcon
+	vxor		$in1,$in1,$tmp
+	vxor		$in0,$in0,$key
+	vxor		$in1,$in1,$key
+	 vsldoi		$stage,$stage,$in0,8
+
+	vperm		$key,$in1,$in1,$mask	# rotate-n-splat
+	vsldoi		$tmp,$zero,$in0,12	# >>32
+	 vperm		$outtail,$stage,$stage,$outperm	# rotate
+	 vsel		$stage,$outhead,$outtail,$outmask
+	 vmr		$outhead,$outtail
+	vcipherlast	$key,$key,$rcon
+	 stvx		$stage,0,$out
+	 addi		$out,$out,16
+
+	 vsldoi		$stage,$in0,$in1,8
+	vxor		$in0,$in0,$tmp
+	vsldoi		$tmp,$zero,$tmp,12	# >>32
+	 vperm		$outtail,$stage,$stage,$outperm	# rotate
+	 vsel		$stage,$outhead,$outtail,$outmask
+	 vmr		$outhead,$outtail
+	vxor		$in0,$in0,$tmp
+	vsldoi		$tmp,$zero,$tmp,12	# >>32
+	vxor		$in0,$in0,$tmp
+	 stvx		$stage,0,$out
+	 addi		$out,$out,16
+
+	vspltw		$tmp,$in0,3
+	vxor		$tmp,$tmp,$in1
+	vsldoi		$in1,$zero,$in1,12	# >>32
+	 vadduwm	$rcon,$rcon,$rcon
+	vxor		$in1,$in1,$tmp
+	vxor		$in0,$in0,$key
+	vxor		$in1,$in1,$key
+	 vperm		$outtail,$in0,$in0,$outperm	# rotate
+	 vsel		$stage,$outhead,$outtail,$outmask
+	 vmr		$outhead,$outtail
+	 stvx		$stage,0,$out
+	 addi		$inp,$out,15		# 15 is not typo
+	 addi		$out,$out,16
+	bdnz		Loop192
+
+	li		$rounds,12
+	addi		$out,$out,0x20
+	b		Ldone
+
+.align	4
+L256:
+	lvx		$tmp,0,$inp
+	li		$cnt,7
+	li		$rounds,14
+	 vperm		$outtail,$in0,$in0,$outperm	# rotate
+	 vsel		$stage,$outhead,$outtail,$outmask
+	 vmr		$outhead,$outtail
+	 stvx		$stage,0,$out
+	 addi		$out,$out,16
+	vperm		$in1,$in1,$tmp,$key	# align [and byte swap in LE]
+	mtctr		$cnt
+
+Loop256:
+	vperm		$key,$in1,$in1,$mask	# rotate-n-splat
+	vsldoi		$tmp,$zero,$in0,12	# >>32
+	 vperm		$outtail,$in1,$in1,$outperm	# rotate
+	 vsel		$stage,$outhead,$outtail,$outmask
+	 vmr		$outhead,$outtail
+	vcipherlast	$key,$key,$rcon
+	 stvx		$stage,0,$out
+	 addi		$out,$out,16
+
+	vxor		$in0,$in0,$tmp
+	vsldoi		$tmp,$zero,$tmp,12	# >>32
+	vxor		$in0,$in0,$tmp
+	vsldoi		$tmp,$zero,$tmp,12	# >>32
+	vxor		$in0,$in0,$tmp
+	 vadduwm	$rcon,$rcon,$rcon
+	vxor		$in0,$in0,$key
+	 vperm		$outtail,$in0,$in0,$outperm	# rotate
+	 vsel		$stage,$outhead,$outtail,$outmask
+	 vmr		$outhead,$outtail
+	 stvx		$stage,0,$out
+	 addi		$inp,$out,15		# 15 is not typo
+	 addi		$out,$out,16
+	bdz		Ldone
+
+	vspltw		$key,$in0,3		# just splat
+	vsldoi		$tmp,$zero,$in1,12	# >>32
+	vsbox		$key,$key
+
+	vxor		$in1,$in1,$tmp
+	vsldoi		$tmp,$zero,$tmp,12	# >>32
+	vxor		$in1,$in1,$tmp
+	vsldoi		$tmp,$zero,$tmp,12	# >>32
+	vxor		$in1,$in1,$tmp
+
+	vxor		$in1,$in1,$key
+	b		Loop256
+
+.align	4
+Ldone:
+	lvx		$in1,0,$inp		# redundant in aligned case
+	vsel		$in1,$outhead,$in1,$outmask
+	stvx		$in1,0,$inp
+	li		$ptr,0
+	mtspr		256,$vrsave
+	stw		$rounds,0($out)
+
+Lenc_key_abort:
+	mr		r3,$ptr
+	blr
+	.long		0
+	.byte		0,12,0x14,1,0,0,3,0
+	.long		0
+.size	.${prefix}_set_encrypt_key,.-.${prefix}_set_encrypt_key
+
+.globl	.${prefix}_set_decrypt_key
+.align	5
+.${prefix}_set_decrypt_key:
+	$STU		$sp,-$FRAME($sp)
+	mflr		r10
+	$PUSH		r10,$FRAME+$LRSAVE($sp)
+	bl		Lset_encrypt_key
+	mtlr		r10
+
+	cmpwi		r3,0
+	bne-		Ldec_key_abort
+
+	slwi		$cnt,$rounds,4
+	subi		$inp,$out,240		# first round key
+	srwi		$rounds,$rounds,1
+	add		$out,$inp,$cnt		# last round key
+	mtctr		$rounds
+
+Ldeckey:
+	lwz		r0, 0($inp)
+	lwz		r6, 4($inp)
+	lwz		r7, 8($inp)
+	lwz		r8, 12($inp)
+	addi		$inp,$inp,16
+	lwz		r9, 0($out)
+	lwz		r10,4($out)
+	lwz		r11,8($out)
+	lwz		r12,12($out)
+	stw		r0, 0($out)
+	stw		r6, 4($out)
+	stw		r7, 8($out)
+	stw		r8, 12($out)
+	subi		$out,$out,16
+	stw		r9, -16($inp)
+	stw		r10,-12($inp)
+	stw		r11,-8($inp)
+	stw		r12,-4($inp)
+	bdnz		Ldeckey
+
+	xor		r3,r3,r3		# return value
+Ldec_key_abort:
+	addi		$sp,$sp,$FRAME
+	blr
+	.long		0
+	.byte		0,12,4,1,0x80,0,3,0
+	.long		0
+.size	.${prefix}_set_decrypt_key,.-.${prefix}_set_decrypt_key
+___
+}}}
+#########################################################################
+{{{	# Single block en- and decrypt procedures			#
+sub gen_block () {
+my $dir = shift;
+my $n   = $dir eq "de" ? "n" : "";
+my ($inp,$out,$key,$rounds,$idx)=map("r$_",(3..7));
+
+$code.=<<___;
+.globl	.${prefix}_${dir}crypt
+.align	5
+.${prefix}_${dir}crypt:
+	lwz		$rounds,240($key)
+	lis		r0,0xfc00
+	mfspr		$vrsave,256
+	li		$idx,15			# 15 is not typo
+	mtspr		256,r0
+
+	lvx		v0,0,$inp
+	neg		r11,$out
+	lvx		v1,$idx,$inp
+	lvsl		v2,0,$inp		# inpperm
+	le?vspltisb	v4,0x0f
+	?lvsl		v3,0,r11		# outperm
+	le?vxor		v2,v2,v4
+	li		$idx,16
+	vperm		v0,v0,v1,v2		# align [and byte swap in LE]
+	lvx		v1,0,$key
+	?lvsl		v5,0,$key		# keyperm
+	srwi		$rounds,$rounds,1
+	lvx		v2,$idx,$key
+	addi		$idx,$idx,16
+	subi		$rounds,$rounds,1
+	?vperm		v1,v1,v2,v5		# align round key
+
+	vxor		v0,v0,v1
+	lvx		v1,$idx,$key
+	addi		$idx,$idx,16
+	mtctr		$rounds
+
+Loop_${dir}c:
+	?vperm		v2,v2,v1,v5
+	v${n}cipher	v0,v0,v2
+	lvx		v2,$idx,$key
+	addi		$idx,$idx,16
+	?vperm		v1,v1,v2,v5
+	v${n}cipher	v0,v0,v1
+	lvx		v1,$idx,$key
+	addi		$idx,$idx,16
+	bdnz		Loop_${dir}c
+
+	?vperm		v2,v2,v1,v5
+	v${n}cipher	v0,v0,v2
+	lvx		v2,$idx,$key
+	?vperm		v1,v1,v2,v5
+	v${n}cipherlast	v0,v0,v1
+
+	vspltisb	v2,-1
+	vxor		v1,v1,v1
+	li		$idx,15			# 15 is not typo
+	?vperm		v2,v1,v2,v3		# outmask
+	le?vxor		v3,v3,v4
+	lvx		v1,0,$out		# outhead
+	vperm		v0,v0,v0,v3		# rotate [and byte swap in LE]
+	vsel		v1,v1,v0,v2
+	lvx		v4,$idx,$out
+	stvx		v1,0,$out
+	vsel		v0,v0,v4,v2
+	stvx		v0,$idx,$out
+
+	mtspr		256,$vrsave
+	blr
+	.long		0
+	.byte		0,12,0x14,0,0,0,3,0
+	.long		0
+.size	.${prefix}_${dir}crypt,.-.${prefix}_${dir}crypt
+___
+}
+&gen_block("en");
+&gen_block("de");
+}}}
+#########################################################################
+{{{	# CBC en- and decrypt procedures				#
+my ($inp,$out,$len,$key,$ivp,$enc,$rounds,$idx)=map("r$_",(3..10));
+my ($rndkey0,$rndkey1,$inout,$tmp)=		map("v$_",(0..3));
+my ($ivec,$inptail,$inpperm,$outhead,$outperm,$outmask,$keyperm)=
+						map("v$_",(4..10));
+$code.=<<___;
+.globl	.${prefix}_cbc_encrypt
+.align	5
+.${prefix}_cbc_encrypt:
+	${UCMP}i	$len,16
+	bltlr-
+
+	cmpwi		$enc,0			# test direction
+	lis		r0,0xffe0
+	mfspr		$vrsave,256
+	mtspr		256,r0
+
+	li		$idx,15
+	vxor		$rndkey0,$rndkey0,$rndkey0
+	le?vspltisb	$tmp,0x0f
+
+	lvx		$ivec,0,$ivp		# load [unaligned] iv
+	lvsl		$inpperm,0,$ivp
+	lvx		$inptail,$idx,$ivp
+	le?vxor		$inpperm,$inpperm,$tmp
+	vperm		$ivec,$ivec,$inptail,$inpperm
+
+	neg		r11,$inp
+	?lvsl		$keyperm,0,$key		# prepare for unaligned key
+	lwz		$rounds,240($key)
+
+	lvsr		$inpperm,0,r11		# prepare for unaligned load
+	lvx		$inptail,0,$inp
+	addi		$inp,$inp,15		# 15 is not typo
+	le?vxor		$inpperm,$inpperm,$tmp
+
+	?lvsr		$outperm,0,$out		# prepare for unaligned store
+	vspltisb	$outmask,-1
+	lvx		$outhead,0,$out
+	?vperm		$outmask,$rndkey0,$outmask,$outperm
+	le?vxor		$outperm,$outperm,$tmp
+
+	srwi		$rounds,$rounds,1
+	li		$idx,16
+	subi		$rounds,$rounds,1
+	beq		Lcbc_dec
+
+Lcbc_enc:
+	vmr		$inout,$inptail
+	lvx		$inptail,0,$inp
+	addi		$inp,$inp,16
+	mtctr		$rounds
+	subi		$len,$len,16		# len-=16
+
+	lvx		$rndkey0,0,$key
+	 vperm		$inout,$inout,$inptail,$inpperm
+	lvx		$rndkey1,$idx,$key
+	addi		$idx,$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vxor		$inout,$inout,$rndkey0
+	lvx		$rndkey0,$idx,$key
+	addi		$idx,$idx,16
+	vxor		$inout,$inout,$ivec
+
+Loop_cbc_enc:
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vcipher		$inout,$inout,$rndkey1
+	lvx		$rndkey1,$idx,$key
+	addi		$idx,$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vcipher		$inout,$inout,$rndkey0
+	lvx		$rndkey0,$idx,$key
+	addi		$idx,$idx,16
+	bdnz		Loop_cbc_enc
+
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vcipher		$inout,$inout,$rndkey1
+	lvx		$rndkey1,$idx,$key
+	li		$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vcipherlast	$ivec,$inout,$rndkey0
+	${UCMP}i	$len,16
+
+	vperm		$tmp,$ivec,$ivec,$outperm
+	vsel		$inout,$outhead,$tmp,$outmask
+	vmr		$outhead,$tmp
+	stvx		$inout,0,$out
+	addi		$out,$out,16
+	bge		Lcbc_enc
+
+	b		Lcbc_done
+
+.align	4
+Lcbc_dec:
+	${UCMP}i	$len,128
+	bge		_aesp8_cbc_decrypt8x
+	vmr		$tmp,$inptail
+	lvx		$inptail,0,$inp
+	addi		$inp,$inp,16
+	mtctr		$rounds
+	subi		$len,$len,16		# len-=16
+
+	lvx		$rndkey0,0,$key
+	 vperm		$tmp,$tmp,$inptail,$inpperm
+	lvx		$rndkey1,$idx,$key
+	addi		$idx,$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vxor		$inout,$tmp,$rndkey0
+	lvx		$rndkey0,$idx,$key
+	addi		$idx,$idx,16
+
+Loop_cbc_dec:
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vncipher	$inout,$inout,$rndkey1
+	lvx		$rndkey1,$idx,$key
+	addi		$idx,$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vncipher	$inout,$inout,$rndkey0
+	lvx		$rndkey0,$idx,$key
+	addi		$idx,$idx,16
+	bdnz		Loop_cbc_dec
+
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vncipher	$inout,$inout,$rndkey1
+	lvx		$rndkey1,$idx,$key
+	li		$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vncipherlast	$inout,$inout,$rndkey0
+	${UCMP}i	$len,16
+
+	vxor		$inout,$inout,$ivec
+	vmr		$ivec,$tmp
+	vperm		$tmp,$inout,$inout,$outperm
+	vsel		$inout,$outhead,$tmp,$outmask
+	vmr		$outhead,$tmp
+	stvx		$inout,0,$out
+	addi		$out,$out,16
+	bge		Lcbc_dec
+
+Lcbc_done:
+	addi		$out,$out,-1
+	lvx		$inout,0,$out		# redundant in aligned case
+	vsel		$inout,$outhead,$inout,$outmask
+	stvx		$inout,0,$out
+
+	neg		$enc,$ivp		# write [unaligned] iv
+	li		$idx,15			# 15 is not typo
+	vxor		$rndkey0,$rndkey0,$rndkey0
+	vspltisb	$outmask,-1
+	le?vspltisb	$tmp,0x0f
+	?lvsl		$outperm,0,$enc
+	?vperm		$outmask,$rndkey0,$outmask,$outperm
+	le?vxor		$outperm,$outperm,$tmp
+	lvx		$outhead,0,$ivp
+	vperm		$ivec,$ivec,$ivec,$outperm
+	vsel		$inout,$outhead,$ivec,$outmask
+	lvx		$inptail,$idx,$ivp
+	stvx		$inout,0,$ivp
+	vsel		$inout,$ivec,$inptail,$outmask
+	stvx		$inout,$idx,$ivp
+
+	mtspr		256,$vrsave
+	blr
+	.long		0
+	.byte		0,12,0x14,0,0,0,6,0
+	.long		0
+___
+#########################################################################
+{{	# Optimized CBC decrypt procedure				#
+my $key_="r11";
+my ($x00,$x10,$x20,$x30,$x40,$x50,$x60,$x70)=map("r$_",(0,8,26..31));
+    $x00=0 if ($flavour =~ /osx/);
+my ($in0, $in1, $in2, $in3, $in4, $in5, $in6, $in7 )=map("v$_",(0..3,10..13));
+my ($out0,$out1,$out2,$out3,$out4,$out5,$out6,$out7)=map("v$_",(14..21));
+my $rndkey0="v23";	# v24-v25 rotating buffer for first found keys
+			# v26-v31 last 6 round keys
+my ($tmp,$keyperm)=($in3,$in4);	# aliases with "caller", redundant assignment
+
+$code.=<<___;
+.align	5
+_aesp8_cbc_decrypt8x:
+	$STU		$sp,-`($FRAME+21*16+6*$SIZE_T)`($sp)
+	li		r10,`$FRAME+8*16+15`
+	li		r11,`$FRAME+8*16+31`
+	stvx		v20,r10,$sp		# ABI says so
+	addi		r10,r10,32
+	stvx		v21,r11,$sp
+	addi		r11,r11,32
+	stvx		v22,r10,$sp
+	addi		r10,r10,32
+	stvx		v23,r11,$sp
+	addi		r11,r11,32
+	stvx		v24,r10,$sp
+	addi		r10,r10,32
+	stvx		v25,r11,$sp
+	addi		r11,r11,32
+	stvx		v26,r10,$sp
+	addi		r10,r10,32
+	stvx		v27,r11,$sp
+	addi		r11,r11,32
+	stvx		v28,r10,$sp
+	addi		r10,r10,32
+	stvx		v29,r11,$sp
+	addi		r11,r11,32
+	stvx		v30,r10,$sp
+	stvx		v31,r11,$sp
+	li		r0,-1
+	stw		$vrsave,`$FRAME+21*16-4`($sp)	# save vrsave
+	li		$x10,0x10
+	$PUSH		r26,`$FRAME+21*16+0*$SIZE_T`($sp)
+	li		$x20,0x20
+	$PUSH		r27,`$FRAME+21*16+1*$SIZE_T`($sp)
+	li		$x30,0x30
+	$PUSH		r28,`$FRAME+21*16+2*$SIZE_T`($sp)
+	li		$x40,0x40
+	$PUSH		r29,`$FRAME+21*16+3*$SIZE_T`($sp)
+	li		$x50,0x50
+	$PUSH		r30,`$FRAME+21*16+4*$SIZE_T`($sp)
+	li		$x60,0x60
+	$PUSH		r31,`$FRAME+21*16+5*$SIZE_T`($sp)
+	li		$x70,0x70
+	mtspr		256,r0
+
+	subi		$rounds,$rounds,3	# -4 in total
+	subi		$len,$len,128		# bias
+
+	lvx		$rndkey0,$x00,$key	# load key schedule
+	lvx		v30,$x10,$key
+	addi		$key,$key,0x20
+	lvx		v31,$x00,$key
+	?vperm		$rndkey0,$rndkey0,v30,$keyperm
+	addi		$key_,$sp,$FRAME+15
+	mtctr		$rounds
+
+Load_cbc_dec_key:
+	?vperm		v24,v30,v31,$keyperm
+	lvx		v30,$x10,$key
+	addi		$key,$key,0x20
+	stvx		v24,$x00,$key_		# off-load round[1]
+	?vperm		v25,v31,v30,$keyperm
+	lvx		v31,$x00,$key
+	stvx		v25,$x10,$key_		# off-load round[2]
+	addi		$key_,$key_,0x20
+	bdnz		Load_cbc_dec_key
+
+	lvx		v26,$x10,$key
+	?vperm		v24,v30,v31,$keyperm
+	lvx		v27,$x20,$key
+	stvx		v24,$x00,$key_		# off-load round[3]
+	?vperm		v25,v31,v26,$keyperm
+	lvx		v28,$x30,$key
+	stvx		v25,$x10,$key_		# off-load round[4]
+	addi		$key_,$sp,$FRAME+15	# rewind $key_
+	?vperm		v26,v26,v27,$keyperm
+	lvx		v29,$x40,$key
+	?vperm		v27,v27,v28,$keyperm
+	lvx		v30,$x50,$key
+	?vperm		v28,v28,v29,$keyperm
+	lvx		v31,$x60,$key
+	?vperm		v29,v29,v30,$keyperm
+	lvx		$out0,$x70,$key		# borrow $out0
+	?vperm		v30,v30,v31,$keyperm
+	lvx		v24,$x00,$key_		# pre-load round[1]
+	?vperm		v31,v31,$out0,$keyperm
+	lvx		v25,$x10,$key_		# pre-load round[2]
+
+	#lvx		$inptail,0,$inp		# "caller" already did this
+	#addi		$inp,$inp,15		# 15 is not typo
+	subi		$inp,$inp,15		# undo "caller"
+
+	 le?li		$idx,8
+	lvx_u		$in0,$x00,$inp		# load first 8 "words"
+	 le?lvsl	$inpperm,0,$idx
+	 le?vspltisb	$tmp,0x0f
+	lvx_u		$in1,$x10,$inp
+	 le?vxor	$inpperm,$inpperm,$tmp	# transform for lvx_u/stvx_u
+	lvx_u		$in2,$x20,$inp
+	 le?vperm	$in0,$in0,$in0,$inpperm
+	lvx_u		$in3,$x30,$inp
+	 le?vperm	$in1,$in1,$in1,$inpperm
+	lvx_u		$in4,$x40,$inp
+	 le?vperm	$in2,$in2,$in2,$inpperm
+	vxor		$out0,$in0,$rndkey0
+	lvx_u		$in5,$x50,$inp
+	 le?vperm	$in3,$in3,$in3,$inpperm
+	vxor		$out1,$in1,$rndkey0
+	lvx_u		$in6,$x60,$inp
+	 le?vperm	$in4,$in4,$in4,$inpperm
+	vxor		$out2,$in2,$rndkey0
+	lvx_u		$in7,$x70,$inp
+	addi		$inp,$inp,0x80
+	 le?vperm	$in5,$in5,$in5,$inpperm
+	vxor		$out3,$in3,$rndkey0
+	 le?vperm	$in6,$in6,$in6,$inpperm
+	vxor		$out4,$in4,$rndkey0
+	 le?vperm	$in7,$in7,$in7,$inpperm
+	vxor		$out5,$in5,$rndkey0
+	vxor		$out6,$in6,$rndkey0
+	vxor		$out7,$in7,$rndkey0
+
+	mtctr		$rounds
+	b		Loop_cbc_dec8x
+.align	5
+Loop_cbc_dec8x:
+	vncipher	$out0,$out0,v24
+	vncipher	$out1,$out1,v24
+	vncipher	$out2,$out2,v24
+	vncipher	$out3,$out3,v24
+	vncipher	$out4,$out4,v24
+	vncipher	$out5,$out5,v24
+	vncipher	$out6,$out6,v24
+	vncipher	$out7,$out7,v24
+	lvx		v24,$x20,$key_		# round[3]
+	addi		$key_,$key_,0x20
+
+	vncipher	$out0,$out0,v25
+	vncipher	$out1,$out1,v25
+	vncipher	$out2,$out2,v25
+	vncipher	$out3,$out3,v25
+	vncipher	$out4,$out4,v25
+	vncipher	$out5,$out5,v25
+	vncipher	$out6,$out6,v25
+	vncipher	$out7,$out7,v25
+	lvx		v25,$x10,$key_		# round[4]
+	bdnz		Loop_cbc_dec8x
+
+	subic		$len,$len,128		# $len-=128
+	vncipher	$out0,$out0,v24
+	vncipher	$out1,$out1,v24
+	vncipher	$out2,$out2,v24
+	vncipher	$out3,$out3,v24
+	vncipher	$out4,$out4,v24
+	vncipher	$out5,$out5,v24
+	vncipher	$out6,$out6,v24
+	vncipher	$out7,$out7,v24
+
+	subfe.		r0,r0,r0		# borrow?-1:0
+	vncipher	$out0,$out0,v25
+	vncipher	$out1,$out1,v25
+	vncipher	$out2,$out2,v25
+	vncipher	$out3,$out3,v25
+	vncipher	$out4,$out4,v25
+	vncipher	$out5,$out5,v25
+	vncipher	$out6,$out6,v25
+	vncipher	$out7,$out7,v25
+
+	and		r0,r0,$len
+	vncipher	$out0,$out0,v26
+	vncipher	$out1,$out1,v26
+	vncipher	$out2,$out2,v26
+	vncipher	$out3,$out3,v26
+	vncipher	$out4,$out4,v26
+	vncipher	$out5,$out5,v26
+	vncipher	$out6,$out6,v26
+	vncipher	$out7,$out7,v26
+
+	add		$inp,$inp,r0		# $inp is adjusted in such
+						# way that at exit from the
+						# loop inX-in7 are loaded
+						# with last "words"
+	vncipher	$out0,$out0,v27
+	vncipher	$out1,$out1,v27
+	vncipher	$out2,$out2,v27
+	vncipher	$out3,$out3,v27
+	vncipher	$out4,$out4,v27
+	vncipher	$out5,$out5,v27
+	vncipher	$out6,$out6,v27
+	vncipher	$out7,$out7,v27
+
+	addi		$key_,$sp,$FRAME+15	# rewind $key_
+	vncipher	$out0,$out0,v28
+	vncipher	$out1,$out1,v28
+	vncipher	$out2,$out2,v28
+	vncipher	$out3,$out3,v28
+	vncipher	$out4,$out4,v28
+	vncipher	$out5,$out5,v28
+	vncipher	$out6,$out6,v28
+	vncipher	$out7,$out7,v28
+	lvx		v24,$x00,$key_		# re-pre-load round[1]
+
+	vncipher	$out0,$out0,v29
+	vncipher	$out1,$out1,v29
+	vncipher	$out2,$out2,v29
+	vncipher	$out3,$out3,v29
+	vncipher	$out4,$out4,v29
+	vncipher	$out5,$out5,v29
+	vncipher	$out6,$out6,v29
+	vncipher	$out7,$out7,v29
+	lvx		v25,$x10,$key_		# re-pre-load round[2]
+
+	vncipher	$out0,$out0,v30
+	 vxor		$ivec,$ivec,v31		# xor with last round key
+	vncipher	$out1,$out1,v30
+	 vxor		$in0,$in0,v31
+	vncipher	$out2,$out2,v30
+	 vxor		$in1,$in1,v31
+	vncipher	$out3,$out3,v30
+	 vxor		$in2,$in2,v31
+	vncipher	$out4,$out4,v30
+	 vxor		$in3,$in3,v31
+	vncipher	$out5,$out5,v30
+	 vxor		$in4,$in4,v31
+	vncipher	$out6,$out6,v30
+	 vxor		$in5,$in5,v31
+	vncipher	$out7,$out7,v30
+	 vxor		$in6,$in6,v31
+
+	vncipherlast	$out0,$out0,$ivec
+	vncipherlast	$out1,$out1,$in0
+	 lvx_u		$in0,$x00,$inp		# load next input block
+	vncipherlast	$out2,$out2,$in1
+	 lvx_u		$in1,$x10,$inp
+	vncipherlast	$out3,$out3,$in2
+	 le?vperm	$in0,$in0,$in0,$inpperm
+	 lvx_u		$in2,$x20,$inp
+	vncipherlast	$out4,$out4,$in3
+	 le?vperm	$in1,$in1,$in1,$inpperm
+	 lvx_u		$in3,$x30,$inp
+	vncipherlast	$out5,$out5,$in4
+	 le?vperm	$in2,$in2,$in2,$inpperm
+	 lvx_u		$in4,$x40,$inp
+	vncipherlast	$out6,$out6,$in5
+	 le?vperm	$in3,$in3,$in3,$inpperm
+	 lvx_u		$in5,$x50,$inp
+	vncipherlast	$out7,$out7,$in6
+	 le?vperm	$in4,$in4,$in4,$inpperm
+	 lvx_u		$in6,$x60,$inp
+	vmr		$ivec,$in7
+	 le?vperm	$in5,$in5,$in5,$inpperm
+	 lvx_u		$in7,$x70,$inp
+	 addi		$inp,$inp,0x80
+
+	le?vperm	$out0,$out0,$out0,$inpperm
+	le?vperm	$out1,$out1,$out1,$inpperm
+	stvx_u		$out0,$x00,$out
+	 le?vperm	$in6,$in6,$in6,$inpperm
+	 vxor		$out0,$in0,$rndkey0
+	le?vperm	$out2,$out2,$out2,$inpperm
+	stvx_u		$out1,$x10,$out
+	 le?vperm	$in7,$in7,$in7,$inpperm
+	 vxor		$out1,$in1,$rndkey0
+	le?vperm	$out3,$out3,$out3,$inpperm
+	stvx_u		$out2,$x20,$out
+	 vxor		$out2,$in2,$rndkey0
+	le?vperm	$out4,$out4,$out4,$inpperm
+	stvx_u		$out3,$x30,$out
+	 vxor		$out3,$in3,$rndkey0
+	le?vperm	$out5,$out5,$out5,$inpperm
+	stvx_u		$out4,$x40,$out
+	 vxor		$out4,$in4,$rndkey0
+	le?vperm	$out6,$out6,$out6,$inpperm
+	stvx_u		$out5,$x50,$out
+	 vxor		$out5,$in5,$rndkey0
+	le?vperm	$out7,$out7,$out7,$inpperm
+	stvx_u		$out6,$x60,$out
+	 vxor		$out6,$in6,$rndkey0
+	stvx_u		$out7,$x70,$out
+	addi		$out,$out,0x80
+	 vxor		$out7,$in7,$rndkey0
+
+	mtctr		$rounds
+	beq		Loop_cbc_dec8x		# did $len-=128 borrow?
+
+	addic.		$len,$len,128
+	beq		Lcbc_dec8x_done
+	nop
+	nop
+
+Loop_cbc_dec8x_tail:				# up to 7 "words" tail...
+	vncipher	$out1,$out1,v24
+	vncipher	$out2,$out2,v24
+	vncipher	$out3,$out3,v24
+	vncipher	$out4,$out4,v24
+	vncipher	$out5,$out5,v24
+	vncipher	$out6,$out6,v24
+	vncipher	$out7,$out7,v24
+	lvx		v24,$x20,$key_		# round[3]
+	addi		$key_,$key_,0x20
+
+	vncipher	$out1,$out1,v25
+	vncipher	$out2,$out2,v25
+	vncipher	$out3,$out3,v25
+	vncipher	$out4,$out4,v25
+	vncipher	$out5,$out5,v25
+	vncipher	$out6,$out6,v25
+	vncipher	$out7,$out7,v25
+	lvx		v25,$x10,$key_		# round[4]
+	bdnz		Loop_cbc_dec8x_tail
+
+	vncipher	$out1,$out1,v24
+	vncipher	$out2,$out2,v24
+	vncipher	$out3,$out3,v24
+	vncipher	$out4,$out4,v24
+	vncipher	$out5,$out5,v24
+	vncipher	$out6,$out6,v24
+	vncipher	$out7,$out7,v24
+
+	vncipher	$out1,$out1,v25
+	vncipher	$out2,$out2,v25
+	vncipher	$out3,$out3,v25
+	vncipher	$out4,$out4,v25
+	vncipher	$out5,$out5,v25
+	vncipher	$out6,$out6,v25
+	vncipher	$out7,$out7,v25
+
+	vncipher	$out1,$out1,v26
+	vncipher	$out2,$out2,v26
+	vncipher	$out3,$out3,v26
+	vncipher	$out4,$out4,v26
+	vncipher	$out5,$out5,v26
+	vncipher	$out6,$out6,v26
+	vncipher	$out7,$out7,v26
+
+	vncipher	$out1,$out1,v27
+	vncipher	$out2,$out2,v27
+	vncipher	$out3,$out3,v27
+	vncipher	$out4,$out4,v27
+	vncipher	$out5,$out5,v27
+	vncipher	$out6,$out6,v27
+	vncipher	$out7,$out7,v27
+
+	vncipher	$out1,$out1,v28
+	vncipher	$out2,$out2,v28
+	vncipher	$out3,$out3,v28
+	vncipher	$out4,$out4,v28
+	vncipher	$out5,$out5,v28
+	vncipher	$out6,$out6,v28
+	vncipher	$out7,$out7,v28
+
+	vncipher	$out1,$out1,v29
+	vncipher	$out2,$out2,v29
+	vncipher	$out3,$out3,v29
+	vncipher	$out4,$out4,v29
+	vncipher	$out5,$out5,v29
+	vncipher	$out6,$out6,v29
+	vncipher	$out7,$out7,v29
+
+	vncipher	$out1,$out1,v30
+	 vxor		$ivec,$ivec,v31		# last round key
+	vncipher	$out2,$out2,v30
+	 vxor		$in1,$in1,v31
+	vncipher	$out3,$out3,v30
+	 vxor		$in2,$in2,v31
+	vncipher	$out4,$out4,v30
+	 vxor		$in3,$in3,v31
+	vncipher	$out5,$out5,v30
+	 vxor		$in4,$in4,v31
+	vncipher	$out6,$out6,v30
+	 vxor		$in5,$in5,v31
+	vncipher	$out7,$out7,v30
+	 vxor		$in6,$in6,v31
+
+	cmplwi		$len,32			# switch($len)
+	blt		Lcbc_dec8x_one
+	nop
+	beq		Lcbc_dec8x_two
+	cmplwi		$len,64
+	blt		Lcbc_dec8x_three
+	nop
+	beq		Lcbc_dec8x_four
+	cmplwi		$len,96
+	blt		Lcbc_dec8x_five
+	nop
+	beq		Lcbc_dec8x_six
+
+Lcbc_dec8x_seven:
+	vncipherlast	$out1,$out1,$ivec
+	vncipherlast	$out2,$out2,$in1
+	vncipherlast	$out3,$out3,$in2
+	vncipherlast	$out4,$out4,$in3
+	vncipherlast	$out5,$out5,$in4
+	vncipherlast	$out6,$out6,$in5
+	vncipherlast	$out7,$out7,$in6
+	vmr		$ivec,$in7
+
+	le?vperm	$out1,$out1,$out1,$inpperm
+	le?vperm	$out2,$out2,$out2,$inpperm
+	stvx_u		$out1,$x00,$out
+	le?vperm	$out3,$out3,$out3,$inpperm
+	stvx_u		$out2,$x10,$out
+	le?vperm	$out4,$out4,$out4,$inpperm
+	stvx_u		$out3,$x20,$out
+	le?vperm	$out5,$out5,$out5,$inpperm
+	stvx_u		$out4,$x30,$out
+	le?vperm	$out6,$out6,$out6,$inpperm
+	stvx_u		$out5,$x40,$out
+	le?vperm	$out7,$out7,$out7,$inpperm
+	stvx_u		$out6,$x50,$out
+	stvx_u		$out7,$x60,$out
+	addi		$out,$out,0x70
+	b		Lcbc_dec8x_done
+
+.align	5
+Lcbc_dec8x_six:
+	vncipherlast	$out2,$out2,$ivec
+	vncipherlast	$out3,$out3,$in2
+	vncipherlast	$out4,$out4,$in3
+	vncipherlast	$out5,$out5,$in4
+	vncipherlast	$out6,$out6,$in5
+	vncipherlast	$out7,$out7,$in6
+	vmr		$ivec,$in7
+
+	le?vperm	$out2,$out2,$out2,$inpperm
+	le?vperm	$out3,$out3,$out3,$inpperm
+	stvx_u		$out2,$x00,$out
+	le?vperm	$out4,$out4,$out4,$inpperm
+	stvx_u		$out3,$x10,$out
+	le?vperm	$out5,$out5,$out5,$inpperm
+	stvx_u		$out4,$x20,$out
+	le?vperm	$out6,$out6,$out6,$inpperm
+	stvx_u		$out5,$x30,$out
+	le?vperm	$out7,$out7,$out7,$inpperm
+	stvx_u		$out6,$x40,$out
+	stvx_u		$out7,$x50,$out
+	addi		$out,$out,0x60
+	b		Lcbc_dec8x_done
+
+.align	5
+Lcbc_dec8x_five:
+	vncipherlast	$out3,$out3,$ivec
+	vncipherlast	$out4,$out4,$in3
+	vncipherlast	$out5,$out5,$in4
+	vncipherlast	$out6,$out6,$in5
+	vncipherlast	$out7,$out7,$in6
+	vmr		$ivec,$in7
+
+	le?vperm	$out3,$out3,$out3,$inpperm
+	le?vperm	$out4,$out4,$out4,$inpperm
+	stvx_u		$out3,$x00,$out
+	le?vperm	$out5,$out5,$out5,$inpperm
+	stvx_u		$out4,$x10,$out
+	le?vperm	$out6,$out6,$out6,$inpperm
+	stvx_u		$out5,$x20,$out
+	le?vperm	$out7,$out7,$out7,$inpperm
+	stvx_u		$out6,$x30,$out
+	stvx_u		$out7,$x40,$out
+	addi		$out,$out,0x50
+	b		Lcbc_dec8x_done
+
+.align	5
+Lcbc_dec8x_four:
+	vncipherlast	$out4,$out4,$ivec
+	vncipherlast	$out5,$out5,$in4
+	vncipherlast	$out6,$out6,$in5
+	vncipherlast	$out7,$out7,$in6
+	vmr		$ivec,$in7
+
+	le?vperm	$out4,$out4,$out4,$inpperm
+	le?vperm	$out5,$out5,$out5,$inpperm
+	stvx_u		$out4,$x00,$out
+	le?vperm	$out6,$out6,$out6,$inpperm
+	stvx_u		$out5,$x10,$out
+	le?vperm	$out7,$out7,$out7,$inpperm
+	stvx_u		$out6,$x20,$out
+	stvx_u		$out7,$x30,$out
+	addi		$out,$out,0x40
+	b		Lcbc_dec8x_done
+
+.align	5
+Lcbc_dec8x_three:
+	vncipherlast	$out5,$out5,$ivec
+	vncipherlast	$out6,$out6,$in5
+	vncipherlast	$out7,$out7,$in6
+	vmr		$ivec,$in7
+
+	le?vperm	$out5,$out5,$out5,$inpperm
+	le?vperm	$out6,$out6,$out6,$inpperm
+	stvx_u		$out5,$x00,$out
+	le?vperm	$out7,$out7,$out7,$inpperm
+	stvx_u		$out6,$x10,$out
+	stvx_u		$out7,$x20,$out
+	addi		$out,$out,0x30
+	b		Lcbc_dec8x_done
+
+.align	5
+Lcbc_dec8x_two:
+	vncipherlast	$out6,$out6,$ivec
+	vncipherlast	$out7,$out7,$in6
+	vmr		$ivec,$in7
+
+	le?vperm	$out6,$out6,$out6,$inpperm
+	le?vperm	$out7,$out7,$out7,$inpperm
+	stvx_u		$out6,$x00,$out
+	stvx_u		$out7,$x10,$out
+	addi		$out,$out,0x20
+	b		Lcbc_dec8x_done
+
+.align	5
+Lcbc_dec8x_one:
+	vncipherlast	$out7,$out7,$ivec
+	vmr		$ivec,$in7
+
+	le?vperm	$out7,$out7,$out7,$inpperm
+	stvx_u		$out7,0,$out
+	addi		$out,$out,0x10
+
+Lcbc_dec8x_done:
+	le?vperm	$ivec,$ivec,$ivec,$inpperm
+	stvx_u		$ivec,0,$ivp		# write [unaligned] iv
+
+	li		r10,`$FRAME+15`
+	li		r11,`$FRAME+31`
+	stvx		$inpperm,r10,$sp	# wipe copies of round keys
+	addi		r10,r10,32
+	stvx		$inpperm,r11,$sp
+	addi		r11,r11,32
+	stvx		$inpperm,r10,$sp
+	addi		r10,r10,32
+	stvx		$inpperm,r11,$sp
+	addi		r11,r11,32
+	stvx		$inpperm,r10,$sp
+	addi		r10,r10,32
+	stvx		$inpperm,r11,$sp
+	addi		r11,r11,32
+	stvx		$inpperm,r10,$sp
+	addi		r10,r10,32
+	stvx		$inpperm,r11,$sp
+	addi		r11,r11,32
+
+	mtspr		256,$vrsave
+	lvx		v20,r10,$sp		# ABI says so
+	addi		r10,r10,32
+	lvx		v21,r11,$sp
+	addi		r11,r11,32
+	lvx		v22,r10,$sp
+	addi		r10,r10,32
+	lvx		v23,r11,$sp
+	addi		r11,r11,32
+	lvx		v24,r10,$sp
+	addi		r10,r10,32
+	lvx		v25,r11,$sp
+	addi		r11,r11,32
+	lvx		v26,r10,$sp
+	addi		r10,r10,32
+	lvx		v27,r11,$sp
+	addi		r11,r11,32
+	lvx		v28,r10,$sp
+	addi		r10,r10,32
+	lvx		v29,r11,$sp
+	addi		r11,r11,32
+	lvx		v30,r10,$sp
+	lvx		v31,r11,$sp
+	$POP		r26,`$FRAME+21*16+0*$SIZE_T`($sp)
+	$POP		r27,`$FRAME+21*16+1*$SIZE_T`($sp)
+	$POP		r28,`$FRAME+21*16+2*$SIZE_T`($sp)
+	$POP		r29,`$FRAME+21*16+3*$SIZE_T`($sp)
+	$POP		r30,`$FRAME+21*16+4*$SIZE_T`($sp)
+	$POP		r31,`$FRAME+21*16+5*$SIZE_T`($sp)
+	addi		$sp,$sp,`$FRAME+21*16+6*$SIZE_T`
+	blr
+	.long		0
+	.byte		0,12,0x04,0,0x80,6,6,0
+	.long		0
+.size	.${prefix}_cbc_encrypt,.-.${prefix}_cbc_encrypt
+___
+}}	}}}
+
+#########################################################################
+{{{	# CTR procedure[s]						#
+my ($inp,$out,$len,$key,$ivp,$x10,$rounds,$idx)=map("r$_",(3..10));
+my ($rndkey0,$rndkey1,$inout,$tmp)=		map("v$_",(0..3));
+my ($ivec,$inptail,$inpperm,$outhead,$outperm,$outmask,$keyperm,$one)=
+						map("v$_",(4..11));
+my $dat=$tmp;
+
+$code.=<<___;
+.globl	.${prefix}_ctr32_encrypt_blocks
+.align	5
+.${prefix}_ctr32_encrypt_blocks:
+	${UCMP}i	$len,1
+	bltlr-
+
+	lis		r0,0xfff0
+	mfspr		$vrsave,256
+	mtspr		256,r0
+
+	li		$idx,15
+	vxor		$rndkey0,$rndkey0,$rndkey0
+	le?vspltisb	$tmp,0x0f
+
+	lvx		$ivec,0,$ivp		# load [unaligned] iv
+	lvsl		$inpperm,0,$ivp
+	lvx		$inptail,$idx,$ivp
+	 vspltisb	$one,1
+	le?vxor		$inpperm,$inpperm,$tmp
+	vperm		$ivec,$ivec,$inptail,$inpperm
+	 vsldoi		$one,$rndkey0,$one,1
+
+	neg		r11,$inp
+	?lvsl		$keyperm,0,$key		# prepare for unaligned key
+	lwz		$rounds,240($key)
+
+	lvsr		$inpperm,0,r11		# prepare for unaligned load
+	lvx		$inptail,0,$inp
+	addi		$inp,$inp,15		# 15 is not typo
+	le?vxor		$inpperm,$inpperm,$tmp
+
+	srwi		$rounds,$rounds,1
+	li		$idx,16
+	subi		$rounds,$rounds,1
+
+	${UCMP}i	$len,8
+	bge		_aesp8_ctr32_encrypt8x
+
+	?lvsr		$outperm,0,$out		# prepare for unaligned store
+	vspltisb	$outmask,-1
+	lvx		$outhead,0,$out
+	?vperm		$outmask,$rndkey0,$outmask,$outperm
+	le?vxor		$outperm,$outperm,$tmp
+
+	lvx		$rndkey0,0,$key
+	mtctr		$rounds
+	lvx		$rndkey1,$idx,$key
+	addi		$idx,$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vxor		$inout,$ivec,$rndkey0
+	lvx		$rndkey0,$idx,$key
+	addi		$idx,$idx,16
+	b		Loop_ctr32_enc
+
+.align	5
+Loop_ctr32_enc:
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vcipher		$inout,$inout,$rndkey1
+	lvx		$rndkey1,$idx,$key
+	addi		$idx,$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vcipher		$inout,$inout,$rndkey0
+	lvx		$rndkey0,$idx,$key
+	addi		$idx,$idx,16
+	bdnz		Loop_ctr32_enc
+
+	vadduwm		$ivec,$ivec,$one
+	 vmr		$dat,$inptail
+	 lvx		$inptail,0,$inp
+	 addi		$inp,$inp,16
+	 subic.		$len,$len,1		# blocks--
+
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vcipher		$inout,$inout,$rndkey1
+	lvx		$rndkey1,$idx,$key
+	 vperm		$dat,$dat,$inptail,$inpperm
+	 li		$idx,16
+	?vperm		$rndkey1,$rndkey0,$rndkey1,$keyperm
+	 lvx		$rndkey0,0,$key
+	vxor		$dat,$dat,$rndkey1	# last round key
+	vcipherlast	$inout,$inout,$dat
+
+	 lvx		$rndkey1,$idx,$key
+	 addi		$idx,$idx,16
+	vperm		$inout,$inout,$inout,$outperm
+	vsel		$dat,$outhead,$inout,$outmask
+	 mtctr		$rounds
+	 ?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vmr		$outhead,$inout
+	 vxor		$inout,$ivec,$rndkey0
+	 lvx		$rndkey0,$idx,$key
+	 addi		$idx,$idx,16
+	stvx		$dat,0,$out
+	addi		$out,$out,16
+	bne		Loop_ctr32_enc
+
+	addi		$out,$out,-1
+	lvx		$inout,0,$out		# redundant in aligned case
+	vsel		$inout,$outhead,$inout,$outmask
+	stvx		$inout,0,$out
+
+	mtspr		256,$vrsave
+	blr
+	.long		0
+	.byte		0,12,0x14,0,0,0,6,0
+	.long		0
+___
+#########################################################################
+{{	# Optimized CTR procedure					#
+my $key_="r11";
+my ($x00,$x10,$x20,$x30,$x40,$x50,$x60,$x70)=map("r$_",(0,8,26..31));
+    $x00=0 if ($flavour =~ /osx/);
+my ($in0, $in1, $in2, $in3, $in4, $in5, $in6, $in7 )=map("v$_",(0..3,10,12..14));
+my ($out0,$out1,$out2,$out3,$out4,$out5,$out6,$out7)=map("v$_",(15..22));
+my $rndkey0="v23";	# v24-v25 rotating buffer for first found keys
+			# v26-v31 last 6 round keys
+my ($tmp,$keyperm)=($in3,$in4);	# aliases with "caller", redundant assignment
+my ($two,$three,$four)=($outhead,$outperm,$outmask);
+
+$code.=<<___;
+.align	5
+_aesp8_ctr32_encrypt8x:
+	$STU		$sp,-`($FRAME+21*16+6*$SIZE_T)`($sp)
+	li		r10,`$FRAME+8*16+15`
+	li		r11,`$FRAME+8*16+31`
+	stvx		v20,r10,$sp		# ABI says so
+	addi		r10,r10,32
+	stvx		v21,r11,$sp
+	addi		r11,r11,32
+	stvx		v22,r10,$sp
+	addi		r10,r10,32
+	stvx		v23,r11,$sp
+	addi		r11,r11,32
+	stvx		v24,r10,$sp
+	addi		r10,r10,32
+	stvx		v25,r11,$sp
+	addi		r11,r11,32
+	stvx		v26,r10,$sp
+	addi		r10,r10,32
+	stvx		v27,r11,$sp
+	addi		r11,r11,32
+	stvx		v28,r10,$sp
+	addi		r10,r10,32
+	stvx		v29,r11,$sp
+	addi		r11,r11,32
+	stvx		v30,r10,$sp
+	stvx		v31,r11,$sp
+	li		r0,-1
+	stw		$vrsave,`$FRAME+21*16-4`($sp)	# save vrsave
+	li		$x10,0x10
+	$PUSH		r26,`$FRAME+21*16+0*$SIZE_T`($sp)
+	li		$x20,0x20
+	$PUSH		r27,`$FRAME+21*16+1*$SIZE_T`($sp)
+	li		$x30,0x30
+	$PUSH		r28,`$FRAME+21*16+2*$SIZE_T`($sp)
+	li		$x40,0x40
+	$PUSH		r29,`$FRAME+21*16+3*$SIZE_T`($sp)
+	li		$x50,0x50
+	$PUSH		r30,`$FRAME+21*16+4*$SIZE_T`($sp)
+	li		$x60,0x60
+	$PUSH		r31,`$FRAME+21*16+5*$SIZE_T`($sp)
+	li		$x70,0x70
+	mtspr		256,r0
+
+	subi		$rounds,$rounds,3	# -4 in total
+
+	lvx		$rndkey0,$x00,$key	# load key schedule
+	lvx		v30,$x10,$key
+	addi		$key,$key,0x20
+	lvx		v31,$x00,$key
+	?vperm		$rndkey0,$rndkey0,v30,$keyperm
+	addi		$key_,$sp,$FRAME+15
+	mtctr		$rounds
+
+Load_ctr32_enc_key:
+	?vperm		v24,v30,v31,$keyperm
+	lvx		v30,$x10,$key
+	addi		$key,$key,0x20
+	stvx		v24,$x00,$key_		# off-load round[1]
+	?vperm		v25,v31,v30,$keyperm
+	lvx		v31,$x00,$key
+	stvx		v25,$x10,$key_		# off-load round[2]
+	addi		$key_,$key_,0x20
+	bdnz		Load_ctr32_enc_key
+
+	lvx		v26,$x10,$key
+	?vperm		v24,v30,v31,$keyperm
+	lvx		v27,$x20,$key
+	stvx		v24,$x00,$key_		# off-load round[3]
+	?vperm		v25,v31,v26,$keyperm
+	lvx		v28,$x30,$key
+	stvx		v25,$x10,$key_		# off-load round[4]
+	addi		$key_,$sp,$FRAME+15	# rewind $key_
+	?vperm		v26,v26,v27,$keyperm
+	lvx		v29,$x40,$key
+	?vperm		v27,v27,v28,$keyperm
+	lvx		v30,$x50,$key
+	?vperm		v28,v28,v29,$keyperm
+	lvx		v31,$x60,$key
+	?vperm		v29,v29,v30,$keyperm
+	lvx		$out0,$x70,$key		# borrow $out0
+	?vperm		v30,v30,v31,$keyperm
+	lvx		v24,$x00,$key_		# pre-load round[1]
+	?vperm		v31,v31,$out0,$keyperm
+	lvx		v25,$x10,$key_		# pre-load round[2]
+
+	vadduwm		$two,$one,$one
+	subi		$inp,$inp,15		# undo "caller"
+	$SHL		$len,$len,4
+
+	vadduwm		$out1,$ivec,$one	# counter values ...
+	vadduwm		$out2,$ivec,$two
+	vxor		$out0,$ivec,$rndkey0	# ... xored with rndkey[0]
+	 le?li		$idx,8
+	vadduwm		$out3,$out1,$two
+	vxor		$out1,$out1,$rndkey0
+	 le?lvsl	$inpperm,0,$idx
+	vadduwm		$out4,$out2,$two
+	vxor		$out2,$out2,$rndkey0
+	 le?vspltisb	$tmp,0x0f
+	vadduwm		$out5,$out3,$two
+	vxor		$out3,$out3,$rndkey0
+	 le?vxor	$inpperm,$inpperm,$tmp	# transform for lvx_u/stvx_u
+	vadduwm		$out6,$out4,$two
+	vxor		$out4,$out4,$rndkey0
+	vadduwm		$out7,$out5,$two
+	vxor		$out5,$out5,$rndkey0
+	vadduwm		$ivec,$out6,$two	# next counter value
+	vxor		$out6,$out6,$rndkey0
+	vxor		$out7,$out7,$rndkey0
+
+	mtctr		$rounds
+	b		Loop_ctr32_enc8x
+.align	5
+Loop_ctr32_enc8x:
+	vcipher 	$out0,$out0,v24
+	vcipher 	$out1,$out1,v24
+	vcipher 	$out2,$out2,v24
+	vcipher 	$out3,$out3,v24
+	vcipher 	$out4,$out4,v24
+	vcipher 	$out5,$out5,v24
+	vcipher 	$out6,$out6,v24
+	vcipher 	$out7,$out7,v24
+Loop_ctr32_enc8x_middle:
+	lvx		v24,$x20,$key_		# round[3]
+	addi		$key_,$key_,0x20
+
+	vcipher 	$out0,$out0,v25
+	vcipher 	$out1,$out1,v25
+	vcipher 	$out2,$out2,v25
+	vcipher 	$out3,$out3,v25
+	vcipher 	$out4,$out4,v25
+	vcipher 	$out5,$out5,v25
+	vcipher 	$out6,$out6,v25
+	vcipher 	$out7,$out7,v25
+	lvx		v25,$x10,$key_		# round[4]
+	bdnz		Loop_ctr32_enc8x
+
+	subic		r11,$len,256		# $len-256, borrow $key_
+	vcipher 	$out0,$out0,v24
+	vcipher 	$out1,$out1,v24
+	vcipher 	$out2,$out2,v24
+	vcipher 	$out3,$out3,v24
+	vcipher 	$out4,$out4,v24
+	vcipher 	$out5,$out5,v24
+	vcipher 	$out6,$out6,v24
+	vcipher 	$out7,$out7,v24
+
+	subfe		r0,r0,r0		# borrow?-1:0
+	vcipher 	$out0,$out0,v25
+	vcipher 	$out1,$out1,v25
+	vcipher 	$out2,$out2,v25
+	vcipher 	$out3,$out3,v25
+	vcipher 	$out4,$out4,v25
+	vcipher		$out5,$out5,v25
+	vcipher		$out6,$out6,v25
+	vcipher		$out7,$out7,v25
+
+	and		r0,r0,r11
+	addi		$key_,$sp,$FRAME+15	# rewind $key_
+	vcipher		$out0,$out0,v26
+	vcipher		$out1,$out1,v26
+	vcipher		$out2,$out2,v26
+	vcipher		$out3,$out3,v26
+	vcipher		$out4,$out4,v26
+	vcipher		$out5,$out5,v26
+	vcipher		$out6,$out6,v26
+	vcipher		$out7,$out7,v26
+	lvx		v24,$x00,$key_		# re-pre-load round[1]
+
+	subic		$len,$len,129		# $len-=129
+	vcipher		$out0,$out0,v27
+	addi		$len,$len,1		# $len-=128 really
+	vcipher		$out1,$out1,v27
+	vcipher		$out2,$out2,v27
+	vcipher		$out3,$out3,v27
+	vcipher		$out4,$out4,v27
+	vcipher		$out5,$out5,v27
+	vcipher		$out6,$out6,v27
+	vcipher		$out7,$out7,v27
+	lvx		v25,$x10,$key_		# re-pre-load round[2]
+
+	vcipher		$out0,$out0,v28
+	 lvx_u		$in0,$x00,$inp		# load input
+	vcipher		$out1,$out1,v28
+	 lvx_u		$in1,$x10,$inp
+	vcipher		$out2,$out2,v28
+	 lvx_u		$in2,$x20,$inp
+	vcipher		$out3,$out3,v28
+	 lvx_u		$in3,$x30,$inp
+	vcipher		$out4,$out4,v28
+	 lvx_u		$in4,$x40,$inp
+	vcipher		$out5,$out5,v28
+	 lvx_u		$in5,$x50,$inp
+	vcipher		$out6,$out6,v28
+	 lvx_u		$in6,$x60,$inp
+	vcipher		$out7,$out7,v28
+	 lvx_u		$in7,$x70,$inp
+	 addi		$inp,$inp,0x80
+
+	vcipher		$out0,$out0,v29
+	 le?vperm	$in0,$in0,$in0,$inpperm
+	vcipher		$out1,$out1,v29
+	 le?vperm	$in1,$in1,$in1,$inpperm
+	vcipher		$out2,$out2,v29
+	 le?vperm	$in2,$in2,$in2,$inpperm
+	vcipher		$out3,$out3,v29
+	 le?vperm	$in3,$in3,$in3,$inpperm
+	vcipher		$out4,$out4,v29
+	 le?vperm	$in4,$in4,$in4,$inpperm
+	vcipher		$out5,$out5,v29
+	 le?vperm	$in5,$in5,$in5,$inpperm
+	vcipher		$out6,$out6,v29
+	 le?vperm	$in6,$in6,$in6,$inpperm
+	vcipher		$out7,$out7,v29
+	 le?vperm	$in7,$in7,$in7,$inpperm
+
+	add		$inp,$inp,r0		# $inp is adjusted in such
+						# way that at exit from the
+						# loop inX-in7 are loaded
+						# with last "words"
+	subfe.		r0,r0,r0		# borrow?-1:0
+	vcipher		$out0,$out0,v30
+	 vxor		$in0,$in0,v31		# xor with last round key
+	vcipher		$out1,$out1,v30
+	 vxor		$in1,$in1,v31
+	vcipher		$out2,$out2,v30
+	 vxor		$in2,$in2,v31
+	vcipher		$out3,$out3,v30
+	 vxor		$in3,$in3,v31
+	vcipher		$out4,$out4,v30
+	 vxor		$in4,$in4,v31
+	vcipher		$out5,$out5,v30
+	 vxor		$in5,$in5,v31
+	vcipher		$out6,$out6,v30
+	 vxor		$in6,$in6,v31
+	vcipher		$out7,$out7,v30
+	 vxor		$in7,$in7,v31
+
+	bne		Lctr32_enc8x_break	# did $len-129 borrow?
+
+	vcipherlast	$in0,$out0,$in0
+	vcipherlast	$in1,$out1,$in1
+	 vadduwm	$out1,$ivec,$one	# counter values ...
+	vcipherlast	$in2,$out2,$in2
+	 vadduwm	$out2,$ivec,$two
+	 vxor		$out0,$ivec,$rndkey0	# ... xored with rndkey[0]
+	vcipherlast	$in3,$out3,$in3
+	 vadduwm	$out3,$out1,$two
+	 vxor		$out1,$out1,$rndkey0
+	vcipherlast	$in4,$out4,$in4
+	 vadduwm	$out4,$out2,$two
+	 vxor		$out2,$out2,$rndkey0
+	vcipherlast	$in5,$out5,$in5
+	 vadduwm	$out5,$out3,$two
+	 vxor		$out3,$out3,$rndkey0
+	vcipherlast	$in6,$out6,$in6
+	 vadduwm	$out6,$out4,$two
+	 vxor		$out4,$out4,$rndkey0
+	vcipherlast	$in7,$out7,$in7
+	 vadduwm	$out7,$out5,$two
+	 vxor		$out5,$out5,$rndkey0
+	le?vperm	$in0,$in0,$in0,$inpperm
+	 vadduwm	$ivec,$out6,$two	# next counter value
+	 vxor		$out6,$out6,$rndkey0
+	le?vperm	$in1,$in1,$in1,$inpperm
+	 vxor		$out7,$out7,$rndkey0
+	mtctr		$rounds
+
+	 vcipher	$out0,$out0,v24
+	stvx_u		$in0,$x00,$out
+	le?vperm	$in2,$in2,$in2,$inpperm
+	 vcipher	$out1,$out1,v24
+	stvx_u		$in1,$x10,$out
+	le?vperm	$in3,$in3,$in3,$inpperm
+	 vcipher	$out2,$out2,v24
+	stvx_u		$in2,$x20,$out
+	le?vperm	$in4,$in4,$in4,$inpperm
+	 vcipher	$out3,$out3,v24
+	stvx_u		$in3,$x30,$out
+	le?vperm	$in5,$in5,$in5,$inpperm
+	 vcipher	$out4,$out4,v24
+	stvx_u		$in4,$x40,$out
+	le?vperm	$in6,$in6,$in6,$inpperm
+	 vcipher	$out5,$out5,v24
+	stvx_u		$in5,$x50,$out
+	le?vperm	$in7,$in7,$in7,$inpperm
+	 vcipher	$out6,$out6,v24
+	stvx_u		$in6,$x60,$out
+	 vcipher	$out7,$out7,v24
+	stvx_u		$in7,$x70,$out
+	addi		$out,$out,0x80
+
+	b		Loop_ctr32_enc8x_middle
+
+.align	5
+Lctr32_enc8x_break:
+	cmpwi		$len,-0x60
+	blt		Lctr32_enc8x_one
+	nop
+	beq		Lctr32_enc8x_two
+	cmpwi		$len,-0x40
+	blt		Lctr32_enc8x_three
+	nop
+	beq		Lctr32_enc8x_four
+	cmpwi		$len,-0x20
+	blt		Lctr32_enc8x_five
+	nop
+	beq		Lctr32_enc8x_six
+	cmpwi		$len,0x00
+	blt		Lctr32_enc8x_seven
+
+Lctr32_enc8x_eight:
+	vcipherlast	$out0,$out0,$in0
+	vcipherlast	$out1,$out1,$in1
+	vcipherlast	$out2,$out2,$in2
+	vcipherlast	$out3,$out3,$in3
+	vcipherlast	$out4,$out4,$in4
+	vcipherlast	$out5,$out5,$in5
+	vcipherlast	$out6,$out6,$in6
+	vcipherlast	$out7,$out7,$in7
+
+	le?vperm	$out0,$out0,$out0,$inpperm
+	le?vperm	$out1,$out1,$out1,$inpperm
+	stvx_u		$out0,$x00,$out
+	le?vperm	$out2,$out2,$out2,$inpperm
+	stvx_u		$out1,$x10,$out
+	le?vperm	$out3,$out3,$out3,$inpperm
+	stvx_u		$out2,$x20,$out
+	le?vperm	$out4,$out4,$out4,$inpperm
+	stvx_u		$out3,$x30,$out
+	le?vperm	$out5,$out5,$out5,$inpperm
+	stvx_u		$out4,$x40,$out
+	le?vperm	$out6,$out6,$out6,$inpperm
+	stvx_u		$out5,$x50,$out
+	le?vperm	$out7,$out7,$out7,$inpperm
+	stvx_u		$out6,$x60,$out
+	stvx_u		$out7,$x70,$out
+	addi		$out,$out,0x80
+	b		Lctr32_enc8x_done
+
+.align	5
+Lctr32_enc8x_seven:
+	vcipherlast	$out0,$out0,$in1
+	vcipherlast	$out1,$out1,$in2
+	vcipherlast	$out2,$out2,$in3
+	vcipherlast	$out3,$out3,$in4
+	vcipherlast	$out4,$out4,$in5
+	vcipherlast	$out5,$out5,$in6
+	vcipherlast	$out6,$out6,$in7
+
+	le?vperm	$out0,$out0,$out0,$inpperm
+	le?vperm	$out1,$out1,$out1,$inpperm
+	stvx_u		$out0,$x00,$out
+	le?vperm	$out2,$out2,$out2,$inpperm
+	stvx_u		$out1,$x10,$out
+	le?vperm	$out3,$out3,$out3,$inpperm
+	stvx_u		$out2,$x20,$out
+	le?vperm	$out4,$out4,$out4,$inpperm
+	stvx_u		$out3,$x30,$out
+	le?vperm	$out5,$out5,$out5,$inpperm
+	stvx_u		$out4,$x40,$out
+	le?vperm	$out6,$out6,$out6,$inpperm
+	stvx_u		$out5,$x50,$out
+	stvx_u		$out6,$x60,$out
+	addi		$out,$out,0x70
+	b		Lctr32_enc8x_done
+
+.align	5
+Lctr32_enc8x_six:
+	vcipherlast	$out0,$out0,$in2
+	vcipherlast	$out1,$out1,$in3
+	vcipherlast	$out2,$out2,$in4
+	vcipherlast	$out3,$out3,$in5
+	vcipherlast	$out4,$out4,$in6
+	vcipherlast	$out5,$out5,$in7
+
+	le?vperm	$out0,$out0,$out0,$inpperm
+	le?vperm	$out1,$out1,$out1,$inpperm
+	stvx_u		$out0,$x00,$out
+	le?vperm	$out2,$out2,$out2,$inpperm
+	stvx_u		$out1,$x10,$out
+	le?vperm	$out3,$out3,$out3,$inpperm
+	stvx_u		$out2,$x20,$out
+	le?vperm	$out4,$out4,$out4,$inpperm
+	stvx_u		$out3,$x30,$out
+	le?vperm	$out5,$out5,$out5,$inpperm
+	stvx_u		$out4,$x40,$out
+	stvx_u		$out5,$x50,$out
+	addi		$out,$out,0x60
+	b		Lctr32_enc8x_done
+
+.align	5
+Lctr32_enc8x_five:
+	vcipherlast	$out0,$out0,$in3
+	vcipherlast	$out1,$out1,$in4
+	vcipherlast	$out2,$out2,$in5
+	vcipherlast	$out3,$out3,$in6
+	vcipherlast	$out4,$out4,$in7
+
+	le?vperm	$out0,$out0,$out0,$inpperm
+	le?vperm	$out1,$out1,$out1,$inpperm
+	stvx_u		$out0,$x00,$out
+	le?vperm	$out2,$out2,$out2,$inpperm
+	stvx_u		$out1,$x10,$out
+	le?vperm	$out3,$out3,$out3,$inpperm
+	stvx_u		$out2,$x20,$out
+	le?vperm	$out4,$out4,$out4,$inpperm
+	stvx_u		$out3,$x30,$out
+	stvx_u		$out4,$x40,$out
+	addi		$out,$out,0x50
+	b		Lctr32_enc8x_done
+
+.align	5
+Lctr32_enc8x_four:
+	vcipherlast	$out0,$out0,$in4
+	vcipherlast	$out1,$out1,$in5
+	vcipherlast	$out2,$out2,$in6
+	vcipherlast	$out3,$out3,$in7
+
+	le?vperm	$out0,$out0,$out0,$inpperm
+	le?vperm	$out1,$out1,$out1,$inpperm
+	stvx_u		$out0,$x00,$out
+	le?vperm	$out2,$out2,$out2,$inpperm
+	stvx_u		$out1,$x10,$out
+	le?vperm	$out3,$out3,$out3,$inpperm
+	stvx_u		$out2,$x20,$out
+	stvx_u		$out3,$x30,$out
+	addi		$out,$out,0x40
+	b		Lctr32_enc8x_done
+
+.align	5
+Lctr32_enc8x_three:
+	vcipherlast	$out0,$out0,$in5
+	vcipherlast	$out1,$out1,$in6
+	vcipherlast	$out2,$out2,$in7
+
+	le?vperm	$out0,$out0,$out0,$inpperm
+	le?vperm	$out1,$out1,$out1,$inpperm
+	stvx_u		$out0,$x00,$out
+	le?vperm	$out2,$out2,$out2,$inpperm
+	stvx_u		$out1,$x10,$out
+	stvx_u		$out2,$x20,$out
+	addi		$out,$out,0x30
+	b		Lctr32_enc8x_done
+
+.align	5
+Lctr32_enc8x_two:
+	vcipherlast	$out0,$out0,$in6
+	vcipherlast	$out1,$out1,$in7
+
+	le?vperm	$out0,$out0,$out0,$inpperm
+	le?vperm	$out1,$out1,$out1,$inpperm
+	stvx_u		$out0,$x00,$out
+	stvx_u		$out1,$x10,$out
+	addi		$out,$out,0x20
+	b		Lctr32_enc8x_done
+
+.align	5
+Lctr32_enc8x_one:
+	vcipherlast	$out0,$out0,$in7
+
+	le?vperm	$out0,$out0,$out0,$inpperm
+	stvx_u		$out0,0,$out
+	addi		$out,$out,0x10
+
+Lctr32_enc8x_done:
+	li		r10,`$FRAME+15`
+	li		r11,`$FRAME+31`
+	stvx		$inpperm,r10,$sp	# wipe copies of round keys
+	addi		r10,r10,32
+	stvx		$inpperm,r11,$sp
+	addi		r11,r11,32
+	stvx		$inpperm,r10,$sp
+	addi		r10,r10,32
+	stvx		$inpperm,r11,$sp
+	addi		r11,r11,32
+	stvx		$inpperm,r10,$sp
+	addi		r10,r10,32
+	stvx		$inpperm,r11,$sp
+	addi		r11,r11,32
+	stvx		$inpperm,r10,$sp
+	addi		r10,r10,32
+	stvx		$inpperm,r11,$sp
+	addi		r11,r11,32
+
+	mtspr		256,$vrsave
+	lvx		v20,r10,$sp		# ABI says so
+	addi		r10,r10,32
+	lvx		v21,r11,$sp
+	addi		r11,r11,32
+	lvx		v22,r10,$sp
+	addi		r10,r10,32
+	lvx		v23,r11,$sp
+	addi		r11,r11,32
+	lvx		v24,r10,$sp
+	addi		r10,r10,32
+	lvx		v25,r11,$sp
+	addi		r11,r11,32
+	lvx		v26,r10,$sp
+	addi		r10,r10,32
+	lvx		v27,r11,$sp
+	addi		r11,r11,32
+	lvx		v28,r10,$sp
+	addi		r10,r10,32
+	lvx		v29,r11,$sp
+	addi		r11,r11,32
+	lvx		v30,r10,$sp
+	lvx		v31,r11,$sp
+	$POP		r26,`$FRAME+21*16+0*$SIZE_T`($sp)
+	$POP		r27,`$FRAME+21*16+1*$SIZE_T`($sp)
+	$POP		r28,`$FRAME+21*16+2*$SIZE_T`($sp)
+	$POP		r29,`$FRAME+21*16+3*$SIZE_T`($sp)
+	$POP		r30,`$FRAME+21*16+4*$SIZE_T`($sp)
+	$POP		r31,`$FRAME+21*16+5*$SIZE_T`($sp)
+	addi		$sp,$sp,`$FRAME+21*16+6*$SIZE_T`
+	blr
+	.long		0
+	.byte		0,12,0x04,0,0x80,6,6,0
+	.long		0
+.size	.${prefix}_ctr32_encrypt_blocks,.-.${prefix}_ctr32_encrypt_blocks
+___
+}}	}}}
+
+#########################################################################
+{{{	# XTS procedures						#
+# int aes_p8_xts_[en|de]crypt(const char *inp, char *out, size_t len,	#
+#                             const AES_KEY *key1, const AES_KEY *key2,	#
+#                             [const] unsigned char iv[16]);		#
+# If $key2 is NULL, then a "tweak chaining" mode is engaged, in which	#
+# input tweak value is assumed to be encrypted already, and last tweak	#
+# value, one suitable for consecutive call on same chunk of data, is	#
+# written back to original buffer. In addition, in "tweak chaining"	#
+# mode only complete input blocks are processed.			#
+
+my ($inp,$out,$len,$key1,$key2,$ivp,$rounds,$idx) =	map("r$_",(3..10));
+my ($rndkey0,$rndkey1,$inout) =				map("v$_",(0..2));
+my ($output,$inptail,$inpperm,$leperm,$keyperm) =	map("v$_",(3..7));
+my ($tweak,$seven,$eighty7,$tmp,$tweak1) =		map("v$_",(8..12));
+my $taillen = $key2;
+
+   ($inp,$idx) = ($idx,$inp);				# reassign
+
+$code.=<<___;
+.globl	.${prefix}_xts_encrypt
+.align	5
+.${prefix}_xts_encrypt:
+	mr		$inp,r3				# reassign
+	li		r3,-1
+	${UCMP}i	$len,16
+	bltlr-
+
+	lis		r0,0xfff0
+	mfspr		r12,256				# save vrsave
+	li		r11,0
+	mtspr		256,r0
+
+	vspltisb	$seven,0x07			# 0x070707..07
+	le?lvsl		$leperm,r11,r11
+	le?vspltisb	$tmp,0x0f
+	le?vxor		$leperm,$leperm,$seven
+
+	li		$idx,15
+	lvx		$tweak,0,$ivp			# load [unaligned] iv
+	lvsl		$inpperm,0,$ivp
+	lvx		$inptail,$idx,$ivp
+	le?vxor		$inpperm,$inpperm,$tmp
+	vperm		$tweak,$tweak,$inptail,$inpperm
+
+	neg		r11,$inp
+	lvsr		$inpperm,0,r11			# prepare for unaligned load
+	lvx		$inout,0,$inp
+	addi		$inp,$inp,15			# 15 is not typo
+	le?vxor		$inpperm,$inpperm,$tmp
+
+	${UCMP}i	$key2,0				# key2==NULL?
+	beq		Lxts_enc_no_key2
+
+	?lvsl		$keyperm,0,$key2		# prepare for unaligned key
+	lwz		$rounds,240($key2)
+	srwi		$rounds,$rounds,1
+	subi		$rounds,$rounds,1
+	li		$idx,16
+
+	lvx		$rndkey0,0,$key2
+	lvx		$rndkey1,$idx,$key2
+	addi		$idx,$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vxor		$tweak,$tweak,$rndkey0
+	lvx		$rndkey0,$idx,$key2
+	addi		$idx,$idx,16
+	mtctr		$rounds
+
+Ltweak_xts_enc:
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vcipher		$tweak,$tweak,$rndkey1
+	lvx		$rndkey1,$idx,$key2
+	addi		$idx,$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vcipher		$tweak,$tweak,$rndkey0
+	lvx		$rndkey0,$idx,$key2
+	addi		$idx,$idx,16
+	bdnz		Ltweak_xts_enc
+
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vcipher		$tweak,$tweak,$rndkey1
+	lvx		$rndkey1,$idx,$key2
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vcipherlast	$tweak,$tweak,$rndkey0
+
+	li		$ivp,0				# don't chain the tweak
+	b		Lxts_enc
+
+Lxts_enc_no_key2:
+	li		$idx,-16
+	and		$len,$len,$idx			# in "tweak chaining"
+							# mode only complete
+							# blocks are processed
+Lxts_enc:
+	lvx		$inptail,0,$inp
+	addi		$inp,$inp,16
+
+	?lvsl		$keyperm,0,$key1		# prepare for unaligned key
+	lwz		$rounds,240($key1)
+	srwi		$rounds,$rounds,1
+	subi		$rounds,$rounds,1
+	li		$idx,16
+
+	vslb		$eighty7,$seven,$seven		# 0x808080..80
+	vor		$eighty7,$eighty7,$seven	# 0x878787..87
+	vspltisb	$tmp,1				# 0x010101..01
+	vsldoi		$eighty7,$eighty7,$tmp,15	# 0x870101..01
+
+	${UCMP}i	$len,96
+	bge		_aesp8_xts_encrypt6x
+
+	andi.		$taillen,$len,15
+	subic		r0,$len,32
+	subi		$taillen,$taillen,16
+	subfe		r0,r0,r0
+	and		r0,r0,$taillen
+	add		$inp,$inp,r0
+
+	lvx		$rndkey0,0,$key1
+	lvx		$rndkey1,$idx,$key1
+	addi		$idx,$idx,16
+	vperm		$inout,$inout,$inptail,$inpperm
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vxor		$inout,$inout,$tweak
+	vxor		$inout,$inout,$rndkey0
+	lvx		$rndkey0,$idx,$key1
+	addi		$idx,$idx,16
+	mtctr		$rounds
+	b		Loop_xts_enc
+
+.align	5
+Loop_xts_enc:
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vcipher		$inout,$inout,$rndkey1
+	lvx		$rndkey1,$idx,$key1
+	addi		$idx,$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vcipher		$inout,$inout,$rndkey0
+	lvx		$rndkey0,$idx,$key1
+	addi		$idx,$idx,16
+	bdnz		Loop_xts_enc
+
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vcipher		$inout,$inout,$rndkey1
+	lvx		$rndkey1,$idx,$key1
+	li		$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vxor		$rndkey0,$rndkey0,$tweak
+	vcipherlast	$output,$inout,$rndkey0
+
+	le?vperm	$tmp,$output,$output,$leperm
+	be?nop
+	le?stvx_u	$tmp,0,$out
+	be?stvx_u	$output,0,$out
+	addi		$out,$out,16
+
+	subic.		$len,$len,16
+	beq		Lxts_enc_done
+
+	vmr		$inout,$inptail
+	lvx		$inptail,0,$inp
+	addi		$inp,$inp,16
+	lvx		$rndkey0,0,$key1
+	lvx		$rndkey1,$idx,$key1
+	addi		$idx,$idx,16
+
+	subic		r0,$len,32
+	subfe		r0,r0,r0
+	and		r0,r0,$taillen
+	add		$inp,$inp,r0
+
+	vsrab		$tmp,$tweak,$seven		# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	vand		$tmp,$tmp,$eighty7
+	vxor		$tweak,$tweak,$tmp
+
+	vperm		$inout,$inout,$inptail,$inpperm
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vxor		$inout,$inout,$tweak
+	vxor		$output,$output,$rndkey0	# just in case $len<16
+	vxor		$inout,$inout,$rndkey0
+	lvx		$rndkey0,$idx,$key1
+	addi		$idx,$idx,16
+
+	mtctr		$rounds
+	${UCMP}i	$len,16
+	bge		Loop_xts_enc
+
+	vxor		$output,$output,$tweak
+	lvsr		$inpperm,0,$len			# $inpperm is no longer needed
+	vxor		$inptail,$inptail,$inptail	# $inptail is no longer needed
+	vspltisb	$tmp,-1
+	vperm		$inptail,$inptail,$tmp,$inpperm
+	vsel		$inout,$inout,$output,$inptail
+
+	subi		r11,$out,17
+	subi		$out,$out,16
+	mtctr		$len
+	li		$len,16
+Loop_xts_enc_steal:
+	lbzu		r0,1(r11)
+	stb		r0,16(r11)
+	bdnz		Loop_xts_enc_steal
+
+	mtctr		$rounds
+	b		Loop_xts_enc			# one more time...
+
+Lxts_enc_done:
+	${UCMP}i	$ivp,0
+	beq		Lxts_enc_ret
+
+	vsrab		$tmp,$tweak,$seven		# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	vand		$tmp,$tmp,$eighty7
+	vxor		$tweak,$tweak,$tmp
+
+	le?vperm	$tweak,$tweak,$tweak,$leperm
+	stvx_u		$tweak,0,$ivp
+
+Lxts_enc_ret:
+	mtspr		256,r12				# restore vrsave
+	li		r3,0
+	blr
+	.long		0
+	.byte		0,12,0x04,0,0x80,6,6,0
+	.long		0
+.size	.${prefix}_xts_encrypt,.-.${prefix}_xts_encrypt
+
+.globl	.${prefix}_xts_decrypt
+.align	5
+.${prefix}_xts_decrypt:
+	mr		$inp,r3				# reassign
+	li		r3,-1
+	${UCMP}i	$len,16
+	bltlr-
+
+	lis		r0,0xfff8
+	mfspr		r12,256				# save vrsave
+	li		r11,0
+	mtspr		256,r0
+
+	andi.		r0,$len,15
+	neg		r0,r0
+	andi.		r0,r0,16
+	sub		$len,$len,r0
+
+	vspltisb	$seven,0x07			# 0x070707..07
+	le?lvsl		$leperm,r11,r11
+	le?vspltisb	$tmp,0x0f
+	le?vxor		$leperm,$leperm,$seven
+
+	li		$idx,15
+	lvx		$tweak,0,$ivp			# load [unaligned] iv
+	lvsl		$inpperm,0,$ivp
+	lvx		$inptail,$idx,$ivp
+	le?vxor		$inpperm,$inpperm,$tmp
+	vperm		$tweak,$tweak,$inptail,$inpperm
+
+	neg		r11,$inp
+	lvsr		$inpperm,0,r11			# prepare for unaligned load
+	lvx		$inout,0,$inp
+	addi		$inp,$inp,15			# 15 is not typo
+	le?vxor		$inpperm,$inpperm,$tmp
+
+	${UCMP}i	$key2,0				# key2==NULL?
+	beq		Lxts_dec_no_key2
+
+	?lvsl		$keyperm,0,$key2		# prepare for unaligned key
+	lwz		$rounds,240($key2)
+	srwi		$rounds,$rounds,1
+	subi		$rounds,$rounds,1
+	li		$idx,16
+
+	lvx		$rndkey0,0,$key2
+	lvx		$rndkey1,$idx,$key2
+	addi		$idx,$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vxor		$tweak,$tweak,$rndkey0
+	lvx		$rndkey0,$idx,$key2
+	addi		$idx,$idx,16
+	mtctr		$rounds
+
+Ltweak_xts_dec:
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vcipher		$tweak,$tweak,$rndkey1
+	lvx		$rndkey1,$idx,$key2
+	addi		$idx,$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vcipher		$tweak,$tweak,$rndkey0
+	lvx		$rndkey0,$idx,$key2
+	addi		$idx,$idx,16
+	bdnz		Ltweak_xts_dec
+
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vcipher		$tweak,$tweak,$rndkey1
+	lvx		$rndkey1,$idx,$key2
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vcipherlast	$tweak,$tweak,$rndkey0
+
+	li		$ivp,0				# don't chain the tweak
+	b		Lxts_dec
+
+Lxts_dec_no_key2:
+	neg		$idx,$len
+	andi.		$idx,$idx,15
+	add		$len,$len,$idx			# in "tweak chaining"
+							# mode only complete
+							# blocks are processed
+Lxts_dec:
+	lvx		$inptail,0,$inp
+	addi		$inp,$inp,16
+
+	?lvsl		$keyperm,0,$key1		# prepare for unaligned key
+	lwz		$rounds,240($key1)
+	srwi		$rounds,$rounds,1
+	subi		$rounds,$rounds,1
+	li		$idx,16
+
+	vslb		$eighty7,$seven,$seven		# 0x808080..80
+	vor		$eighty7,$eighty7,$seven	# 0x878787..87
+	vspltisb	$tmp,1				# 0x010101..01
+	vsldoi		$eighty7,$eighty7,$tmp,15	# 0x870101..01
+
+	${UCMP}i	$len,96
+	bge		_aesp8_xts_decrypt6x
+
+	lvx		$rndkey0,0,$key1
+	lvx		$rndkey1,$idx,$key1
+	addi		$idx,$idx,16
+	vperm		$inout,$inout,$inptail,$inpperm
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vxor		$inout,$inout,$tweak
+	vxor		$inout,$inout,$rndkey0
+	lvx		$rndkey0,$idx,$key1
+	addi		$idx,$idx,16
+	mtctr		$rounds
+
+	${UCMP}i	$len,16
+	blt		Ltail_xts_dec
+	be?b		Loop_xts_dec
+
+.align	5
+Loop_xts_dec:
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vncipher	$inout,$inout,$rndkey1
+	lvx		$rndkey1,$idx,$key1
+	addi		$idx,$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vncipher	$inout,$inout,$rndkey0
+	lvx		$rndkey0,$idx,$key1
+	addi		$idx,$idx,16
+	bdnz		Loop_xts_dec
+
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vncipher	$inout,$inout,$rndkey1
+	lvx		$rndkey1,$idx,$key1
+	li		$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vxor		$rndkey0,$rndkey0,$tweak
+	vncipherlast	$output,$inout,$rndkey0
+
+	le?vperm	$tmp,$output,$output,$leperm
+	be?nop
+	le?stvx_u	$tmp,0,$out
+	be?stvx_u	$output,0,$out
+	addi		$out,$out,16
+
+	subic.		$len,$len,16
+	beq		Lxts_dec_done
+
+	vmr		$inout,$inptail
+	lvx		$inptail,0,$inp
+	addi		$inp,$inp,16
+	lvx		$rndkey0,0,$key1
+	lvx		$rndkey1,$idx,$key1
+	addi		$idx,$idx,16
+
+	vsrab		$tmp,$tweak,$seven		# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	vand		$tmp,$tmp,$eighty7
+	vxor		$tweak,$tweak,$tmp
+
+	vperm		$inout,$inout,$inptail,$inpperm
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vxor		$inout,$inout,$tweak
+	vxor		$inout,$inout,$rndkey0
+	lvx		$rndkey0,$idx,$key1
+	addi		$idx,$idx,16
+
+	mtctr		$rounds
+	${UCMP}i	$len,16
+	bge		Loop_xts_dec
+
+Ltail_xts_dec:
+	vsrab		$tmp,$tweak,$seven		# next tweak value
+	vaddubm		$tweak1,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	vand		$tmp,$tmp,$eighty7
+	vxor		$tweak1,$tweak1,$tmp
+
+	subi		$inp,$inp,16
+	add		$inp,$inp,$len
+
+	vxor		$inout,$inout,$tweak		# :-(
+	vxor		$inout,$inout,$tweak1		# :-)
+
+Loop_xts_dec_short:
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vncipher	$inout,$inout,$rndkey1
+	lvx		$rndkey1,$idx,$key1
+	addi		$idx,$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vncipher	$inout,$inout,$rndkey0
+	lvx		$rndkey0,$idx,$key1
+	addi		$idx,$idx,16
+	bdnz		Loop_xts_dec_short
+
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vncipher	$inout,$inout,$rndkey1
+	lvx		$rndkey1,$idx,$key1
+	li		$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vxor		$rndkey0,$rndkey0,$tweak1
+	vncipherlast	$output,$inout,$rndkey0
+
+	le?vperm	$tmp,$output,$output,$leperm
+	be?nop
+	le?stvx_u	$tmp,0,$out
+	be?stvx_u	$output,0,$out
+
+	vmr		$inout,$inptail
+	lvx		$inptail,0,$inp
+	#addi		$inp,$inp,16
+	lvx		$rndkey0,0,$key1
+	lvx		$rndkey1,$idx,$key1
+	addi		$idx,$idx,16
+	vperm		$inout,$inout,$inptail,$inpperm
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+
+	lvsr		$inpperm,0,$len			# $inpperm is no longer needed
+	vxor		$inptail,$inptail,$inptail	# $inptail is no longer needed
+	vspltisb	$tmp,-1
+	vperm		$inptail,$inptail,$tmp,$inpperm
+	vsel		$inout,$inout,$output,$inptail
+
+	vxor		$rndkey0,$rndkey0,$tweak
+	vxor		$inout,$inout,$rndkey0
+	lvx		$rndkey0,$idx,$key1
+	addi		$idx,$idx,16
+
+	subi		r11,$out,1
+	mtctr		$len
+	li		$len,16
+Loop_xts_dec_steal:
+	lbzu		r0,1(r11)
+	stb		r0,16(r11)
+	bdnz		Loop_xts_dec_steal
+
+	mtctr		$rounds
+	b		Loop_xts_dec			# one more time...
+
+Lxts_dec_done:
+	${UCMP}i	$ivp,0
+	beq		Lxts_dec_ret
+
+	vsrab		$tmp,$tweak,$seven		# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	vand		$tmp,$tmp,$eighty7
+	vxor		$tweak,$tweak,$tmp
+
+	le?vperm	$tweak,$tweak,$tweak,$leperm
+	stvx_u		$tweak,0,$ivp
+
+Lxts_dec_ret:
+	mtspr		256,r12				# restore vrsave
+	li		r3,0
+	blr
+	.long		0
+	.byte		0,12,0x04,0,0x80,6,6,0
+	.long		0
+.size	.${prefix}_xts_decrypt,.-.${prefix}_xts_decrypt
+___
+#########################################################################
+{{	# Optimized XTS procedures					#
+my $key_=$key2;
+my ($x00,$x10,$x20,$x30,$x40,$x50,$x60,$x70)=map("r$_",(0,3,26..31));
+    $x00=0 if ($flavour =~ /osx/);
+my ($in0,  $in1,  $in2,  $in3,  $in4,  $in5 )=map("v$_",(0..5));
+my ($out0, $out1, $out2, $out3, $out4, $out5)=map("v$_",(7,12..16));
+my ($twk0, $twk1, $twk2, $twk3, $twk4, $twk5)=map("v$_",(17..22));
+my $rndkey0="v23";	# v24-v25 rotating buffer for first found keys
+			# v26-v31 last 6 round keys
+my ($keyperm)=($out0);	# aliases with "caller", redundant assignment
+my $taillen=$x70;
+
+$code.=<<___;
+.align	5
+_aesp8_xts_encrypt6x:
+	$STU		$sp,-`($FRAME+21*16+6*$SIZE_T)`($sp)
+	mflr		r11
+	li		r7,`$FRAME+8*16+15`
+	li		r3,`$FRAME+8*16+31`
+	$PUSH		r11,`$FRAME+21*16+6*$SIZE_T+$LRSAVE`($sp)
+	stvx		v20,r7,$sp		# ABI says so
+	addi		r7,r7,32
+	stvx		v21,r3,$sp
+	addi		r3,r3,32
+	stvx		v22,r7,$sp
+	addi		r7,r7,32
+	stvx		v23,r3,$sp
+	addi		r3,r3,32
+	stvx		v24,r7,$sp
+	addi		r7,r7,32
+	stvx		v25,r3,$sp
+	addi		r3,r3,32
+	stvx		v26,r7,$sp
+	addi		r7,r7,32
+	stvx		v27,r3,$sp
+	addi		r3,r3,32
+	stvx		v28,r7,$sp
+	addi		r7,r7,32
+	stvx		v29,r3,$sp
+	addi		r3,r3,32
+	stvx		v30,r7,$sp
+	stvx		v31,r3,$sp
+	li		r0,-1
+	stw		$vrsave,`$FRAME+21*16-4`($sp)	# save vrsave
+	li		$x10,0x10
+	$PUSH		r26,`$FRAME+21*16+0*$SIZE_T`($sp)
+	li		$x20,0x20
+	$PUSH		r27,`$FRAME+21*16+1*$SIZE_T`($sp)
+	li		$x30,0x30
+	$PUSH		r28,`$FRAME+21*16+2*$SIZE_T`($sp)
+	li		$x40,0x40
+	$PUSH		r29,`$FRAME+21*16+3*$SIZE_T`($sp)
+	li		$x50,0x50
+	$PUSH		r30,`$FRAME+21*16+4*$SIZE_T`($sp)
+	li		$x60,0x60
+	$PUSH		r31,`$FRAME+21*16+5*$SIZE_T`($sp)
+	li		$x70,0x70
+	mtspr		256,r0
+
+	subi		$rounds,$rounds,3	# -4 in total
+
+	lvx		$rndkey0,$x00,$key1	# load key schedule
+	lvx		v30,$x10,$key1
+	addi		$key1,$key1,0x20
+	lvx		v31,$x00,$key1
+	?vperm		$rndkey0,$rndkey0,v30,$keyperm
+	addi		$key_,$sp,$FRAME+15
+	mtctr		$rounds
+
+Load_xts_enc_key:
+	?vperm		v24,v30,v31,$keyperm
+	lvx		v30,$x10,$key1
+	addi		$key1,$key1,0x20
+	stvx		v24,$x00,$key_		# off-load round[1]
+	?vperm		v25,v31,v30,$keyperm
+	lvx		v31,$x00,$key1
+	stvx		v25,$x10,$key_		# off-load round[2]
+	addi		$key_,$key_,0x20
+	bdnz		Load_xts_enc_key
+
+	lvx		v26,$x10,$key1
+	?vperm		v24,v30,v31,$keyperm
+	lvx		v27,$x20,$key1
+	stvx		v24,$x00,$key_		# off-load round[3]
+	?vperm		v25,v31,v26,$keyperm
+	lvx		v28,$x30,$key1
+	stvx		v25,$x10,$key_		# off-load round[4]
+	addi		$key_,$sp,$FRAME+15	# rewind $key_
+	?vperm		v26,v26,v27,$keyperm
+	lvx		v29,$x40,$key1
+	?vperm		v27,v27,v28,$keyperm
+	lvx		v30,$x50,$key1
+	?vperm		v28,v28,v29,$keyperm
+	lvx		v31,$x60,$key1
+	?vperm		v29,v29,v30,$keyperm
+	lvx		$twk5,$x70,$key1	# borrow $twk5
+	?vperm		v30,v30,v31,$keyperm
+	lvx		v24,$x00,$key_		# pre-load round[1]
+	?vperm		v31,v31,$twk5,$keyperm
+	lvx		v25,$x10,$key_		# pre-load round[2]
+
+	 vperm		$in0,$inout,$inptail,$inpperm
+	 subi		$inp,$inp,31		# undo "caller"
+	vxor		$twk0,$tweak,$rndkey0
+	vsrab		$tmp,$tweak,$seven	# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	vand		$tmp,$tmp,$eighty7
+	 vxor		$out0,$in0,$twk0
+	vxor		$tweak,$tweak,$tmp
+
+	 lvx_u		$in1,$x10,$inp
+	vxor		$twk1,$tweak,$rndkey0
+	vsrab		$tmp,$tweak,$seven	# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	 le?vperm	$in1,$in1,$in1,$leperm
+	vand		$tmp,$tmp,$eighty7
+	 vxor		$out1,$in1,$twk1
+	vxor		$tweak,$tweak,$tmp
+
+	 lvx_u		$in2,$x20,$inp
+	 andi.		$taillen,$len,15
+	vxor		$twk2,$tweak,$rndkey0
+	vsrab		$tmp,$tweak,$seven	# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	 le?vperm	$in2,$in2,$in2,$leperm
+	vand		$tmp,$tmp,$eighty7
+	 vxor		$out2,$in2,$twk2
+	vxor		$tweak,$tweak,$tmp
+
+	 lvx_u		$in3,$x30,$inp
+	 sub		$len,$len,$taillen
+	vxor		$twk3,$tweak,$rndkey0
+	vsrab		$tmp,$tweak,$seven	# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	 le?vperm	$in3,$in3,$in3,$leperm
+	vand		$tmp,$tmp,$eighty7
+	 vxor		$out3,$in3,$twk3
+	vxor		$tweak,$tweak,$tmp
+
+	 lvx_u		$in4,$x40,$inp
+	 subi		$len,$len,0x60
+	vxor		$twk4,$tweak,$rndkey0
+	vsrab		$tmp,$tweak,$seven	# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	 le?vperm	$in4,$in4,$in4,$leperm
+	vand		$tmp,$tmp,$eighty7
+	 vxor		$out4,$in4,$twk4
+	vxor		$tweak,$tweak,$tmp
+
+	 lvx_u		$in5,$x50,$inp
+	 addi		$inp,$inp,0x60
+	vxor		$twk5,$tweak,$rndkey0
+	vsrab		$tmp,$tweak,$seven	# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	 le?vperm	$in5,$in5,$in5,$leperm
+	vand		$tmp,$tmp,$eighty7
+	 vxor		$out5,$in5,$twk5
+	vxor		$tweak,$tweak,$tmp
+
+	vxor		v31,v31,$rndkey0
+	mtctr		$rounds
+	b		Loop_xts_enc6x
+
+.align	5
+Loop_xts_enc6x:
+	vcipher		$out0,$out0,v24
+	vcipher		$out1,$out1,v24
+	vcipher		$out2,$out2,v24
+	vcipher		$out3,$out3,v24
+	vcipher		$out4,$out4,v24
+	vcipher		$out5,$out5,v24
+	lvx		v24,$x20,$key_		# round[3]
+	addi		$key_,$key_,0x20
+
+	vcipher		$out0,$out0,v25
+	vcipher		$out1,$out1,v25
+	vcipher		$out2,$out2,v25
+	vcipher		$out3,$out3,v25
+	vcipher		$out4,$out4,v25
+	vcipher		$out5,$out5,v25
+	lvx		v25,$x10,$key_		# round[4]
+	bdnz		Loop_xts_enc6x
+
+	subic		$len,$len,96		# $len-=96
+	 vxor		$in0,$twk0,v31		# xor with last round key
+	vcipher		$out0,$out0,v24
+	vcipher		$out1,$out1,v24
+	 vsrab		$tmp,$tweak,$seven	# next tweak value
+	 vxor		$twk0,$tweak,$rndkey0
+	 vaddubm	$tweak,$tweak,$tweak
+	vcipher		$out2,$out2,v24
+	vcipher		$out3,$out3,v24
+	 vsldoi		$tmp,$tmp,$tmp,15
+	vcipher		$out4,$out4,v24
+	vcipher		$out5,$out5,v24
+
+	subfe.		r0,r0,r0		# borrow?-1:0
+	 vand		$tmp,$tmp,$eighty7
+	vcipher		$out0,$out0,v25
+	vcipher		$out1,$out1,v25
+	 vxor		$tweak,$tweak,$tmp
+	vcipher		$out2,$out2,v25
+	vcipher		$out3,$out3,v25
+	 vxor		$in1,$twk1,v31
+	 vsrab		$tmp,$tweak,$seven	# next tweak value
+	 vxor		$twk1,$tweak,$rndkey0
+	vcipher		$out4,$out4,v25
+	vcipher		$out5,$out5,v25
+
+	and		r0,r0,$len
+	 vaddubm	$tweak,$tweak,$tweak
+	 vsldoi		$tmp,$tmp,$tmp,15
+	vcipher		$out0,$out0,v26
+	vcipher		$out1,$out1,v26
+	 vand		$tmp,$tmp,$eighty7
+	vcipher		$out2,$out2,v26
+	vcipher		$out3,$out3,v26
+	 vxor		$tweak,$tweak,$tmp
+	vcipher		$out4,$out4,v26
+	vcipher		$out5,$out5,v26
+
+	add		$inp,$inp,r0		# $inp is adjusted in such
+						# way that at exit from the
+						# loop inX-in5 are loaded
+						# with last "words"
+	 vxor		$in2,$twk2,v31
+	 vsrab		$tmp,$tweak,$seven	# next tweak value
+	 vxor		$twk2,$tweak,$rndkey0
+	 vaddubm	$tweak,$tweak,$tweak
+	vcipher		$out0,$out0,v27
+	vcipher		$out1,$out1,v27
+	 vsldoi		$tmp,$tmp,$tmp,15
+	vcipher		$out2,$out2,v27
+	vcipher		$out3,$out3,v27
+	 vand		$tmp,$tmp,$eighty7
+	vcipher		$out4,$out4,v27
+	vcipher		$out5,$out5,v27
+
+	addi		$key_,$sp,$FRAME+15	# rewind $key_
+	 vxor		$tweak,$tweak,$tmp
+	vcipher		$out0,$out0,v28
+	vcipher		$out1,$out1,v28
+	 vxor		$in3,$twk3,v31
+	 vsrab		$tmp,$tweak,$seven	# next tweak value
+	 vxor		$twk3,$tweak,$rndkey0
+	vcipher		$out2,$out2,v28
+	vcipher		$out3,$out3,v28
+	 vaddubm	$tweak,$tweak,$tweak
+	 vsldoi		$tmp,$tmp,$tmp,15
+	vcipher		$out4,$out4,v28
+	vcipher		$out5,$out5,v28
+	lvx		v24,$x00,$key_		# re-pre-load round[1]
+	 vand		$tmp,$tmp,$eighty7
+
+	vcipher		$out0,$out0,v29
+	vcipher		$out1,$out1,v29
+	 vxor		$tweak,$tweak,$tmp
+	vcipher		$out2,$out2,v29
+	vcipher		$out3,$out3,v29
+	 vxor		$in4,$twk4,v31
+	 vsrab		$tmp,$tweak,$seven	# next tweak value
+	 vxor		$twk4,$tweak,$rndkey0
+	vcipher		$out4,$out4,v29
+	vcipher		$out5,$out5,v29
+	lvx		v25,$x10,$key_		# re-pre-load round[2]
+	 vaddubm	$tweak,$tweak,$tweak
+	 vsldoi		$tmp,$tmp,$tmp,15
+
+	vcipher		$out0,$out0,v30
+	vcipher		$out1,$out1,v30
+	 vand		$tmp,$tmp,$eighty7
+	vcipher		$out2,$out2,v30
+	vcipher		$out3,$out3,v30
+	 vxor		$tweak,$tweak,$tmp
+	vcipher		$out4,$out4,v30
+	vcipher		$out5,$out5,v30
+	 vxor		$in5,$twk5,v31
+	 vsrab		$tmp,$tweak,$seven	# next tweak value
+	 vxor		$twk5,$tweak,$rndkey0
+
+	vcipherlast	$out0,$out0,$in0
+	 lvx_u		$in0,$x00,$inp		# load next input block
+	 vaddubm	$tweak,$tweak,$tweak
+	 vsldoi		$tmp,$tmp,$tmp,15
+	vcipherlast	$out1,$out1,$in1
+	 lvx_u		$in1,$x10,$inp
+	vcipherlast	$out2,$out2,$in2
+	 le?vperm	$in0,$in0,$in0,$leperm
+	 lvx_u		$in2,$x20,$inp
+	 vand		$tmp,$tmp,$eighty7
+	vcipherlast	$out3,$out3,$in3
+	 le?vperm	$in1,$in1,$in1,$leperm
+	 lvx_u		$in3,$x30,$inp
+	vcipherlast	$out4,$out4,$in4
+	 le?vperm	$in2,$in2,$in2,$leperm
+	 lvx_u		$in4,$x40,$inp
+	 vxor		$tweak,$tweak,$tmp
+	vcipherlast	$tmp,$out5,$in5		# last block might be needed
+						# in stealing mode
+	 le?vperm	$in3,$in3,$in3,$leperm
+	 lvx_u		$in5,$x50,$inp
+	 addi		$inp,$inp,0x60
+	 le?vperm	$in4,$in4,$in4,$leperm
+	 le?vperm	$in5,$in5,$in5,$leperm
+
+	le?vperm	$out0,$out0,$out0,$leperm
+	le?vperm	$out1,$out1,$out1,$leperm
+	stvx_u		$out0,$x00,$out		# store output
+	 vxor		$out0,$in0,$twk0
+	le?vperm	$out2,$out2,$out2,$leperm
+	stvx_u		$out1,$x10,$out
+	 vxor		$out1,$in1,$twk1
+	le?vperm	$out3,$out3,$out3,$leperm
+	stvx_u		$out2,$x20,$out
+	 vxor		$out2,$in2,$twk2
+	le?vperm	$out4,$out4,$out4,$leperm
+	stvx_u		$out3,$x30,$out
+	 vxor		$out3,$in3,$twk3
+	le?vperm	$out5,$tmp,$tmp,$leperm
+	stvx_u		$out4,$x40,$out
+	 vxor		$out4,$in4,$twk4
+	le?stvx_u	$out5,$x50,$out
+	be?stvx_u	$tmp, $x50,$out
+	 vxor		$out5,$in5,$twk5
+	addi		$out,$out,0x60
+
+	mtctr		$rounds
+	beq		Loop_xts_enc6x		# did $len-=96 borrow?
+
+	addic.		$len,$len,0x60
+	beq		Lxts_enc6x_zero
+	cmpwi		$len,0x20
+	blt		Lxts_enc6x_one
+	nop
+	beq		Lxts_enc6x_two
+	cmpwi		$len,0x40
+	blt		Lxts_enc6x_three
+	nop
+	beq		Lxts_enc6x_four
+
+Lxts_enc6x_five:
+	vxor		$out0,$in1,$twk0
+	vxor		$out1,$in2,$twk1
+	vxor		$out2,$in3,$twk2
+	vxor		$out3,$in4,$twk3
+	vxor		$out4,$in5,$twk4
+
+	bl		_aesp8_xts_enc5x
+
+	le?vperm	$out0,$out0,$out0,$leperm
+	vmr		$twk0,$twk5		# unused tweak
+	le?vperm	$out1,$out1,$out1,$leperm
+	stvx_u		$out0,$x00,$out		# store output
+	le?vperm	$out2,$out2,$out2,$leperm
+	stvx_u		$out1,$x10,$out
+	le?vperm	$out3,$out3,$out3,$leperm
+	stvx_u		$out2,$x20,$out
+	vxor		$tmp,$out4,$twk5	# last block prep for stealing
+	le?vperm	$out4,$out4,$out4,$leperm
+	stvx_u		$out3,$x30,$out
+	stvx_u		$out4,$x40,$out
+	addi		$out,$out,0x50
+	bne		Lxts_enc6x_steal
+	b		Lxts_enc6x_done
+
+.align	4
+Lxts_enc6x_four:
+	vxor		$out0,$in2,$twk0
+	vxor		$out1,$in3,$twk1
+	vxor		$out2,$in4,$twk2
+	vxor		$out3,$in5,$twk3
+	vxor		$out4,$out4,$out4
+
+	bl		_aesp8_xts_enc5x
+
+	le?vperm	$out0,$out0,$out0,$leperm
+	vmr		$twk0,$twk4		# unused tweak
+	le?vperm	$out1,$out1,$out1,$leperm
+	stvx_u		$out0,$x00,$out		# store output
+	le?vperm	$out2,$out2,$out2,$leperm
+	stvx_u		$out1,$x10,$out
+	vxor		$tmp,$out3,$twk4	# last block prep for stealing
+	le?vperm	$out3,$out3,$out3,$leperm
+	stvx_u		$out2,$x20,$out
+	stvx_u		$out3,$x30,$out
+	addi		$out,$out,0x40
+	bne		Lxts_enc6x_steal
+	b		Lxts_enc6x_done
+
+.align	4
+Lxts_enc6x_three:
+	vxor		$out0,$in3,$twk0
+	vxor		$out1,$in4,$twk1
+	vxor		$out2,$in5,$twk2
+	vxor		$out3,$out3,$out3
+	vxor		$out4,$out4,$out4
+
+	bl		_aesp8_xts_enc5x
+
+	le?vperm	$out0,$out0,$out0,$leperm
+	vmr		$twk0,$twk3		# unused tweak
+	le?vperm	$out1,$out1,$out1,$leperm
+	stvx_u		$out0,$x00,$out		# store output
+	vxor		$tmp,$out2,$twk3	# last block prep for stealing
+	le?vperm	$out2,$out2,$out2,$leperm
+	stvx_u		$out1,$x10,$out
+	stvx_u		$out2,$x20,$out
+	addi		$out,$out,0x30
+	bne		Lxts_enc6x_steal
+	b		Lxts_enc6x_done
+
+.align	4
+Lxts_enc6x_two:
+	vxor		$out0,$in4,$twk0
+	vxor		$out1,$in5,$twk1
+	vxor		$out2,$out2,$out2
+	vxor		$out3,$out3,$out3
+	vxor		$out4,$out4,$out4
+
+	bl		_aesp8_xts_enc5x
+
+	le?vperm	$out0,$out0,$out0,$leperm
+	vmr		$twk0,$twk2		# unused tweak
+	vxor		$tmp,$out1,$twk2	# last block prep for stealing
+	le?vperm	$out1,$out1,$out1,$leperm
+	stvx_u		$out0,$x00,$out		# store output
+	stvx_u		$out1,$x10,$out
+	addi		$out,$out,0x20
+	bne		Lxts_enc6x_steal
+	b		Lxts_enc6x_done
+
+.align	4
+Lxts_enc6x_one:
+	vxor		$out0,$in5,$twk0
+	nop
+Loop_xts_enc1x:
+	vcipher		$out0,$out0,v24
+	lvx		v24,$x20,$key_		# round[3]
+	addi		$key_,$key_,0x20
+
+	vcipher		$out0,$out0,v25
+	lvx		v25,$x10,$key_		# round[4]
+	bdnz		Loop_xts_enc1x
+
+	add		$inp,$inp,$taillen
+	cmpwi		$taillen,0
+	vcipher		$out0,$out0,v24
+
+	subi		$inp,$inp,16
+	vcipher		$out0,$out0,v25
+
+	lvsr		$inpperm,0,$taillen
+	vcipher		$out0,$out0,v26
+
+	lvx_u		$in0,0,$inp
+	vcipher		$out0,$out0,v27
+
+	addi		$key_,$sp,$FRAME+15	# rewind $key_
+	vcipher		$out0,$out0,v28
+	lvx		v24,$x00,$key_		# re-pre-load round[1]
+
+	vcipher		$out0,$out0,v29
+	lvx		v25,$x10,$key_		# re-pre-load round[2]
+	 vxor		$twk0,$twk0,v31
+
+	le?vperm	$in0,$in0,$in0,$leperm
+	vcipher		$out0,$out0,v30
+
+	vperm		$in0,$in0,$in0,$inpperm
+	vcipherlast	$out0,$out0,$twk0
+
+	vmr		$twk0,$twk1		# unused tweak
+	vxor		$tmp,$out0,$twk1	# last block prep for stealing
+	le?vperm	$out0,$out0,$out0,$leperm
+	stvx_u		$out0,$x00,$out		# store output
+	addi		$out,$out,0x10
+	bne		Lxts_enc6x_steal
+	b		Lxts_enc6x_done
+
+.align	4
+Lxts_enc6x_zero:
+	cmpwi		$taillen,0
+	beq		Lxts_enc6x_done
+
+	add		$inp,$inp,$taillen
+	subi		$inp,$inp,16
+	lvx_u		$in0,0,$inp
+	lvsr		$inpperm,0,$taillen	# $in5 is no more
+	le?vperm	$in0,$in0,$in0,$leperm
+	vperm		$in0,$in0,$in0,$inpperm
+	vxor		$tmp,$tmp,$twk0
+Lxts_enc6x_steal:
+	vxor		$in0,$in0,$twk0
+	vxor		$out0,$out0,$out0
+	vspltisb	$out1,-1
+	vperm		$out0,$out0,$out1,$inpperm
+	vsel		$out0,$in0,$tmp,$out0	# $tmp is last block, remember?
+
+	subi		r30,$out,17
+	subi		$out,$out,16
+	mtctr		$taillen
+Loop_xts_enc6x_steal:
+	lbzu		r0,1(r30)
+	stb		r0,16(r30)
+	bdnz		Loop_xts_enc6x_steal
+
+	li		$taillen,0
+	mtctr		$rounds
+	b		Loop_xts_enc1x		# one more time...
+
+.align	4
+Lxts_enc6x_done:
+	${UCMP}i	$ivp,0
+	beq		Lxts_enc6x_ret
+
+	vxor		$tweak,$twk0,$rndkey0
+	le?vperm	$tweak,$tweak,$tweak,$leperm
+	stvx_u		$tweak,0,$ivp
+
+Lxts_enc6x_ret:
+	mtlr		r11
+	li		r10,`$FRAME+15`
+	li		r11,`$FRAME+31`
+	stvx		$seven,r10,$sp		# wipe copies of round keys
+	addi		r10,r10,32
+	stvx		$seven,r11,$sp
+	addi		r11,r11,32
+	stvx		$seven,r10,$sp
+	addi		r10,r10,32
+	stvx		$seven,r11,$sp
+	addi		r11,r11,32
+	stvx		$seven,r10,$sp
+	addi		r10,r10,32
+	stvx		$seven,r11,$sp
+	addi		r11,r11,32
+	stvx		$seven,r10,$sp
+	addi		r10,r10,32
+	stvx		$seven,r11,$sp
+	addi		r11,r11,32
+
+	mtspr		256,$vrsave
+	lvx		v20,r10,$sp		# ABI says so
+	addi		r10,r10,32
+	lvx		v21,r11,$sp
+	addi		r11,r11,32
+	lvx		v22,r10,$sp
+	addi		r10,r10,32
+	lvx		v23,r11,$sp
+	addi		r11,r11,32
+	lvx		v24,r10,$sp
+	addi		r10,r10,32
+	lvx		v25,r11,$sp
+	addi		r11,r11,32
+	lvx		v26,r10,$sp
+	addi		r10,r10,32
+	lvx		v27,r11,$sp
+	addi		r11,r11,32
+	lvx		v28,r10,$sp
+	addi		r10,r10,32
+	lvx		v29,r11,$sp
+	addi		r11,r11,32
+	lvx		v30,r10,$sp
+	lvx		v31,r11,$sp
+	$POP		r26,`$FRAME+21*16+0*$SIZE_T`($sp)
+	$POP		r27,`$FRAME+21*16+1*$SIZE_T`($sp)
+	$POP		r28,`$FRAME+21*16+2*$SIZE_T`($sp)
+	$POP		r29,`$FRAME+21*16+3*$SIZE_T`($sp)
+	$POP		r30,`$FRAME+21*16+4*$SIZE_T`($sp)
+	$POP		r31,`$FRAME+21*16+5*$SIZE_T`($sp)
+	addi		$sp,$sp,`$FRAME+21*16+6*$SIZE_T`
+	blr
+	.long		0
+	.byte		0,12,0x04,1,0x80,6,6,0
+	.long		0
+
+.align	5
+_aesp8_xts_enc5x:
+	vcipher		$out0,$out0,v24
+	vcipher		$out1,$out1,v24
+	vcipher		$out2,$out2,v24
+	vcipher		$out3,$out3,v24
+	vcipher		$out4,$out4,v24
+	lvx		v24,$x20,$key_		# round[3]
+	addi		$key_,$key_,0x20
+
+	vcipher		$out0,$out0,v25
+	vcipher		$out1,$out1,v25
+	vcipher		$out2,$out2,v25
+	vcipher		$out3,$out3,v25
+	vcipher		$out4,$out4,v25
+	lvx		v25,$x10,$key_		# round[4]
+	bdnz		_aesp8_xts_enc5x
+
+	add		$inp,$inp,$taillen
+	cmpwi		$taillen,0
+	vcipher		$out0,$out0,v24
+	vcipher		$out1,$out1,v24
+	vcipher		$out2,$out2,v24
+	vcipher		$out3,$out3,v24
+	vcipher		$out4,$out4,v24
+
+	subi		$inp,$inp,16
+	vcipher		$out0,$out0,v25
+	vcipher		$out1,$out1,v25
+	vcipher		$out2,$out2,v25
+	vcipher		$out3,$out3,v25
+	vcipher		$out4,$out4,v25
+	 vxor		$twk0,$twk0,v31
+
+	vcipher		$out0,$out0,v26
+	lvsr		$inpperm,0,$taillen	# $in5 is no more
+	vcipher		$out1,$out1,v26
+	vcipher		$out2,$out2,v26
+	vcipher		$out3,$out3,v26
+	vcipher		$out4,$out4,v26
+	 vxor		$in1,$twk1,v31
+
+	vcipher		$out0,$out0,v27
+	lvx_u		$in0,0,$inp
+	vcipher		$out1,$out1,v27
+	vcipher		$out2,$out2,v27
+	vcipher		$out3,$out3,v27
+	vcipher		$out4,$out4,v27
+	 vxor		$in2,$twk2,v31
+
+	addi		$key_,$sp,$FRAME+15	# rewind $key_
+	vcipher		$out0,$out0,v28
+	vcipher		$out1,$out1,v28
+	vcipher		$out2,$out2,v28
+	vcipher		$out3,$out3,v28
+	vcipher		$out4,$out4,v28
+	lvx		v24,$x00,$key_		# re-pre-load round[1]
+	 vxor		$in3,$twk3,v31
+
+	vcipher		$out0,$out0,v29
+	le?vperm	$in0,$in0,$in0,$leperm
+	vcipher		$out1,$out1,v29
+	vcipher		$out2,$out2,v29
+	vcipher		$out3,$out3,v29
+	vcipher		$out4,$out4,v29
+	lvx		v25,$x10,$key_		# re-pre-load round[2]
+	 vxor		$in4,$twk4,v31
+
+	vcipher		$out0,$out0,v30
+	vperm		$in0,$in0,$in0,$inpperm
+	vcipher		$out1,$out1,v30
+	vcipher		$out2,$out2,v30
+	vcipher		$out3,$out3,v30
+	vcipher		$out4,$out4,v30
+
+	vcipherlast	$out0,$out0,$twk0
+	vcipherlast	$out1,$out1,$in1
+	vcipherlast	$out2,$out2,$in2
+	vcipherlast	$out3,$out3,$in3
+	vcipherlast	$out4,$out4,$in4
+	blr
+        .long   	0
+        .byte   	0,12,0x14,0,0,0,0,0
+
+.align	5
+_aesp8_xts_decrypt6x:
+	$STU		$sp,-`($FRAME+21*16+6*$SIZE_T)`($sp)
+	mflr		r11
+	li		r7,`$FRAME+8*16+15`
+	li		r3,`$FRAME+8*16+31`
+	$PUSH		r11,`$FRAME+21*16+6*$SIZE_T+$LRSAVE`($sp)
+	stvx		v20,r7,$sp		# ABI says so
+	addi		r7,r7,32
+	stvx		v21,r3,$sp
+	addi		r3,r3,32
+	stvx		v22,r7,$sp
+	addi		r7,r7,32
+	stvx		v23,r3,$sp
+	addi		r3,r3,32
+	stvx		v24,r7,$sp
+	addi		r7,r7,32
+	stvx		v25,r3,$sp
+	addi		r3,r3,32
+	stvx		v26,r7,$sp
+	addi		r7,r7,32
+	stvx		v27,r3,$sp
+	addi		r3,r3,32
+	stvx		v28,r7,$sp
+	addi		r7,r7,32
+	stvx		v29,r3,$sp
+	addi		r3,r3,32
+	stvx		v30,r7,$sp
+	stvx		v31,r3,$sp
+	li		r0,-1
+	stw		$vrsave,`$FRAME+21*16-4`($sp)	# save vrsave
+	li		$x10,0x10
+	$PUSH		r26,`$FRAME+21*16+0*$SIZE_T`($sp)
+	li		$x20,0x20
+	$PUSH		r27,`$FRAME+21*16+1*$SIZE_T`($sp)
+	li		$x30,0x30
+	$PUSH		r28,`$FRAME+21*16+2*$SIZE_T`($sp)
+	li		$x40,0x40
+	$PUSH		r29,`$FRAME+21*16+3*$SIZE_T`($sp)
+	li		$x50,0x50
+	$PUSH		r30,`$FRAME+21*16+4*$SIZE_T`($sp)
+	li		$x60,0x60
+	$PUSH		r31,`$FRAME+21*16+5*$SIZE_T`($sp)
+	li		$x70,0x70
+	mtspr		256,r0
+
+	subi		$rounds,$rounds,3	# -4 in total
+
+	lvx		$rndkey0,$x00,$key1	# load key schedule
+	lvx		v30,$x10,$key1
+	addi		$key1,$key1,0x20
+	lvx		v31,$x00,$key1
+	?vperm		$rndkey0,$rndkey0,v30,$keyperm
+	addi		$key_,$sp,$FRAME+15
+	mtctr		$rounds
+
+Load_xts_dec_key:
+	?vperm		v24,v30,v31,$keyperm
+	lvx		v30,$x10,$key1
+	addi		$key1,$key1,0x20
+	stvx		v24,$x00,$key_		# off-load round[1]
+	?vperm		v25,v31,v30,$keyperm
+	lvx		v31,$x00,$key1
+	stvx		v25,$x10,$key_		# off-load round[2]
+	addi		$key_,$key_,0x20
+	bdnz		Load_xts_dec_key
+
+	lvx		v26,$x10,$key1
+	?vperm		v24,v30,v31,$keyperm
+	lvx		v27,$x20,$key1
+	stvx		v24,$x00,$key_		# off-load round[3]
+	?vperm		v25,v31,v26,$keyperm
+	lvx		v28,$x30,$key1
+	stvx		v25,$x10,$key_		# off-load round[4]
+	addi		$key_,$sp,$FRAME+15	# rewind $key_
+	?vperm		v26,v26,v27,$keyperm
+	lvx		v29,$x40,$key1
+	?vperm		v27,v27,v28,$keyperm
+	lvx		v30,$x50,$key1
+	?vperm		v28,v28,v29,$keyperm
+	lvx		v31,$x60,$key1
+	?vperm		v29,v29,v30,$keyperm
+	lvx		$twk5,$x70,$key1	# borrow $twk5
+	?vperm		v30,v30,v31,$keyperm
+	lvx		v24,$x00,$key_		# pre-load round[1]
+	?vperm		v31,v31,$twk5,$keyperm
+	lvx		v25,$x10,$key_		# pre-load round[2]
+
+	 vperm		$in0,$inout,$inptail,$inpperm
+	 subi		$inp,$inp,31		# undo "caller"
+	vxor		$twk0,$tweak,$rndkey0
+	vsrab		$tmp,$tweak,$seven	# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	vand		$tmp,$tmp,$eighty7
+	 vxor		$out0,$in0,$twk0
+	vxor		$tweak,$tweak,$tmp
+
+	 lvx_u		$in1,$x10,$inp
+	vxor		$twk1,$tweak,$rndkey0
+	vsrab		$tmp,$tweak,$seven	# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	 le?vperm	$in1,$in1,$in1,$leperm
+	vand		$tmp,$tmp,$eighty7
+	 vxor		$out1,$in1,$twk1
+	vxor		$tweak,$tweak,$tmp
+
+	 lvx_u		$in2,$x20,$inp
+	 andi.		$taillen,$len,15
+	vxor		$twk2,$tweak,$rndkey0
+	vsrab		$tmp,$tweak,$seven	# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	 le?vperm	$in2,$in2,$in2,$leperm
+	vand		$tmp,$tmp,$eighty7
+	 vxor		$out2,$in2,$twk2
+	vxor		$tweak,$tweak,$tmp
+
+	 lvx_u		$in3,$x30,$inp
+	 sub		$len,$len,$taillen
+	vxor		$twk3,$tweak,$rndkey0
+	vsrab		$tmp,$tweak,$seven	# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	 le?vperm	$in3,$in3,$in3,$leperm
+	vand		$tmp,$tmp,$eighty7
+	 vxor		$out3,$in3,$twk3
+	vxor		$tweak,$tweak,$tmp
+
+	 lvx_u		$in4,$x40,$inp
+	 subi		$len,$len,0x60
+	vxor		$twk4,$tweak,$rndkey0
+	vsrab		$tmp,$tweak,$seven	# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	 le?vperm	$in4,$in4,$in4,$leperm
+	vand		$tmp,$tmp,$eighty7
+	 vxor		$out4,$in4,$twk4
+	vxor		$tweak,$tweak,$tmp
+
+	 lvx_u		$in5,$x50,$inp
+	 addi		$inp,$inp,0x60
+	vxor		$twk5,$tweak,$rndkey0
+	vsrab		$tmp,$tweak,$seven	# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	 le?vperm	$in5,$in5,$in5,$leperm
+	vand		$tmp,$tmp,$eighty7
+	 vxor		$out5,$in5,$twk5
+	vxor		$tweak,$tweak,$tmp
+
+	vxor		v31,v31,$rndkey0
+	mtctr		$rounds
+	b		Loop_xts_dec6x
+
+.align	5
+Loop_xts_dec6x:
+	vncipher	$out0,$out0,v24
+	vncipher	$out1,$out1,v24
+	vncipher	$out2,$out2,v24
+	vncipher	$out3,$out3,v24
+	vncipher	$out4,$out4,v24
+	vncipher	$out5,$out5,v24
+	lvx		v24,$x20,$key_		# round[3]
+	addi		$key_,$key_,0x20
+
+	vncipher	$out0,$out0,v25
+	vncipher	$out1,$out1,v25
+	vncipher	$out2,$out2,v25
+	vncipher	$out3,$out3,v25
+	vncipher	$out4,$out4,v25
+	vncipher	$out5,$out5,v25
+	lvx		v25,$x10,$key_		# round[4]
+	bdnz		Loop_xts_dec6x
+
+	subic		$len,$len,96		# $len-=96
+	 vxor		$in0,$twk0,v31		# xor with last round key
+	vncipher	$out0,$out0,v24
+	vncipher	$out1,$out1,v24
+	 vsrab		$tmp,$tweak,$seven	# next tweak value
+	 vxor		$twk0,$tweak,$rndkey0
+	 vaddubm	$tweak,$tweak,$tweak
+	vncipher	$out2,$out2,v24
+	vncipher	$out3,$out3,v24
+	 vsldoi		$tmp,$tmp,$tmp,15
+	vncipher	$out4,$out4,v24
+	vncipher	$out5,$out5,v24
+
+	subfe.		r0,r0,r0		# borrow?-1:0
+	 vand		$tmp,$tmp,$eighty7
+	vncipher	$out0,$out0,v25
+	vncipher	$out1,$out1,v25
+	 vxor		$tweak,$tweak,$tmp
+	vncipher	$out2,$out2,v25
+	vncipher	$out3,$out3,v25
+	 vxor		$in1,$twk1,v31
+	 vsrab		$tmp,$tweak,$seven	# next tweak value
+	 vxor		$twk1,$tweak,$rndkey0
+	vncipher	$out4,$out4,v25
+	vncipher	$out5,$out5,v25
+
+	and		r0,r0,$len
+	 vaddubm	$tweak,$tweak,$tweak
+	 vsldoi		$tmp,$tmp,$tmp,15
+	vncipher	$out0,$out0,v26
+	vncipher	$out1,$out1,v26
+	 vand		$tmp,$tmp,$eighty7
+	vncipher	$out2,$out2,v26
+	vncipher	$out3,$out3,v26
+	 vxor		$tweak,$tweak,$tmp
+	vncipher	$out4,$out4,v26
+	vncipher	$out5,$out5,v26
+
+	add		$inp,$inp,r0		# $inp is adjusted in such
+						# way that at exit from the
+						# loop inX-in5 are loaded
+						# with last "words"
+	 vxor		$in2,$twk2,v31
+	 vsrab		$tmp,$tweak,$seven	# next tweak value
+	 vxor		$twk2,$tweak,$rndkey0
+	 vaddubm	$tweak,$tweak,$tweak
+	vncipher	$out0,$out0,v27
+	vncipher	$out1,$out1,v27
+	 vsldoi		$tmp,$tmp,$tmp,15
+	vncipher	$out2,$out2,v27
+	vncipher	$out3,$out3,v27
+	 vand		$tmp,$tmp,$eighty7
+	vncipher	$out4,$out4,v27
+	vncipher	$out5,$out5,v27
+
+	addi		$key_,$sp,$FRAME+15	# rewind $key_
+	 vxor		$tweak,$tweak,$tmp
+	vncipher	$out0,$out0,v28
+	vncipher	$out1,$out1,v28
+	 vxor		$in3,$twk3,v31
+	 vsrab		$tmp,$tweak,$seven	# next tweak value
+	 vxor		$twk3,$tweak,$rndkey0
+	vncipher	$out2,$out2,v28
+	vncipher	$out3,$out3,v28
+	 vaddubm	$tweak,$tweak,$tweak
+	 vsldoi		$tmp,$tmp,$tmp,15
+	vncipher	$out4,$out4,v28
+	vncipher	$out5,$out5,v28
+	lvx		v24,$x00,$key_		# re-pre-load round[1]
+	 vand		$tmp,$tmp,$eighty7
+
+	vncipher	$out0,$out0,v29
+	vncipher	$out1,$out1,v29
+	 vxor		$tweak,$tweak,$tmp
+	vncipher	$out2,$out2,v29
+	vncipher	$out3,$out3,v29
+	 vxor		$in4,$twk4,v31
+	 vsrab		$tmp,$tweak,$seven	# next tweak value
+	 vxor		$twk4,$tweak,$rndkey0
+	vncipher	$out4,$out4,v29
+	vncipher	$out5,$out5,v29
+	lvx		v25,$x10,$key_		# re-pre-load round[2]
+	 vaddubm	$tweak,$tweak,$tweak
+	 vsldoi		$tmp,$tmp,$tmp,15
+
+	vncipher	$out0,$out0,v30
+	vncipher	$out1,$out1,v30
+	 vand		$tmp,$tmp,$eighty7
+	vncipher	$out2,$out2,v30
+	vncipher	$out3,$out3,v30
+	 vxor		$tweak,$tweak,$tmp
+	vncipher	$out4,$out4,v30
+	vncipher	$out5,$out5,v30
+	 vxor		$in5,$twk5,v31
+	 vsrab		$tmp,$tweak,$seven	# next tweak value
+	 vxor		$twk5,$tweak,$rndkey0
+
+	vncipherlast	$out0,$out0,$in0
+	 lvx_u		$in0,$x00,$inp		# load next input block
+	 vaddubm	$tweak,$tweak,$tweak
+	 vsldoi		$tmp,$tmp,$tmp,15
+	vncipherlast	$out1,$out1,$in1
+	 lvx_u		$in1,$x10,$inp
+	vncipherlast	$out2,$out2,$in2
+	 le?vperm	$in0,$in0,$in0,$leperm
+	 lvx_u		$in2,$x20,$inp
+	 vand		$tmp,$tmp,$eighty7
+	vncipherlast	$out3,$out3,$in3
+	 le?vperm	$in1,$in1,$in1,$leperm
+	 lvx_u		$in3,$x30,$inp
+	vncipherlast	$out4,$out4,$in4
+	 le?vperm	$in2,$in2,$in2,$leperm
+	 lvx_u		$in4,$x40,$inp
+	 vxor		$tweak,$tweak,$tmp
+	vncipherlast	$out5,$out5,$in5
+	 le?vperm	$in3,$in3,$in3,$leperm
+	 lvx_u		$in5,$x50,$inp
+	 addi		$inp,$inp,0x60
+	 le?vperm	$in4,$in4,$in4,$leperm
+	 le?vperm	$in5,$in5,$in5,$leperm
+
+	le?vperm	$out0,$out0,$out0,$leperm
+	le?vperm	$out1,$out1,$out1,$leperm
+	stvx_u		$out0,$x00,$out		# store output
+	 vxor		$out0,$in0,$twk0
+	le?vperm	$out2,$out2,$out2,$leperm
+	stvx_u		$out1,$x10,$out
+	 vxor		$out1,$in1,$twk1
+	le?vperm	$out3,$out3,$out3,$leperm
+	stvx_u		$out2,$x20,$out
+	 vxor		$out2,$in2,$twk2
+	le?vperm	$out4,$out4,$out4,$leperm
+	stvx_u		$out3,$x30,$out
+	 vxor		$out3,$in3,$twk3
+	le?vperm	$out5,$out5,$out5,$leperm
+	stvx_u		$out4,$x40,$out
+	 vxor		$out4,$in4,$twk4
+	stvx_u		$out5,$x50,$out
+	 vxor		$out5,$in5,$twk5
+	addi		$out,$out,0x60
+
+	mtctr		$rounds
+	beq		Loop_xts_dec6x		# did $len-=96 borrow?
+
+	addic.		$len,$len,0x60
+	beq		Lxts_dec6x_zero
+	cmpwi		$len,0x20
+	blt		Lxts_dec6x_one
+	nop
+	beq		Lxts_dec6x_two
+	cmpwi		$len,0x40
+	blt		Lxts_dec6x_three
+	nop
+	beq		Lxts_dec6x_four
+
+Lxts_dec6x_five:
+	vxor		$out0,$in1,$twk0
+	vxor		$out1,$in2,$twk1
+	vxor		$out2,$in3,$twk2
+	vxor		$out3,$in4,$twk3
+	vxor		$out4,$in5,$twk4
+
+	bl		_aesp8_xts_dec5x
+
+	le?vperm	$out0,$out0,$out0,$leperm
+	vmr		$twk0,$twk5		# unused tweak
+	vxor		$twk1,$tweak,$rndkey0
+	le?vperm	$out1,$out1,$out1,$leperm
+	stvx_u		$out0,$x00,$out		# store output
+	vxor		$out0,$in0,$twk1
+	le?vperm	$out2,$out2,$out2,$leperm
+	stvx_u		$out1,$x10,$out
+	le?vperm	$out3,$out3,$out3,$leperm
+	stvx_u		$out2,$x20,$out
+	le?vperm	$out4,$out4,$out4,$leperm
+	stvx_u		$out3,$x30,$out
+	stvx_u		$out4,$x40,$out
+	addi		$out,$out,0x50
+	bne		Lxts_dec6x_steal
+	b		Lxts_dec6x_done
+
+.align	4
+Lxts_dec6x_four:
+	vxor		$out0,$in2,$twk0
+	vxor		$out1,$in3,$twk1
+	vxor		$out2,$in4,$twk2
+	vxor		$out3,$in5,$twk3
+	vxor		$out4,$out4,$out4
+
+	bl		_aesp8_xts_dec5x
+
+	le?vperm	$out0,$out0,$out0,$leperm
+	vmr		$twk0,$twk4		# unused tweak
+	vmr		$twk1,$twk5
+	le?vperm	$out1,$out1,$out1,$leperm
+	stvx_u		$out0,$x00,$out		# store output
+	vxor		$out0,$in0,$twk5
+	le?vperm	$out2,$out2,$out2,$leperm
+	stvx_u		$out1,$x10,$out
+	le?vperm	$out3,$out3,$out3,$leperm
+	stvx_u		$out2,$x20,$out
+	stvx_u		$out3,$x30,$out
+	addi		$out,$out,0x40
+	bne		Lxts_dec6x_steal
+	b		Lxts_dec6x_done
+
+.align	4
+Lxts_dec6x_three:
+	vxor		$out0,$in3,$twk0
+	vxor		$out1,$in4,$twk1
+	vxor		$out2,$in5,$twk2
+	vxor		$out3,$out3,$out3
+	vxor		$out4,$out4,$out4
+
+	bl		_aesp8_xts_dec5x
+
+	le?vperm	$out0,$out0,$out0,$leperm
+	vmr		$twk0,$twk3		# unused tweak
+	vmr		$twk1,$twk4
+	le?vperm	$out1,$out1,$out1,$leperm
+	stvx_u		$out0,$x00,$out		# store output
+	vxor		$out0,$in0,$twk4
+	le?vperm	$out2,$out2,$out2,$leperm
+	stvx_u		$out1,$x10,$out
+	stvx_u		$out2,$x20,$out
+	addi		$out,$out,0x30
+	bne		Lxts_dec6x_steal
+	b		Lxts_dec6x_done
+
+.align	4
+Lxts_dec6x_two:
+	vxor		$out0,$in4,$twk0
+	vxor		$out1,$in5,$twk1
+	vxor		$out2,$out2,$out2
+	vxor		$out3,$out3,$out3
+	vxor		$out4,$out4,$out4
+
+	bl		_aesp8_xts_dec5x
+
+	le?vperm	$out0,$out0,$out0,$leperm
+	vmr		$twk0,$twk2		# unused tweak
+	vmr		$twk1,$twk3
+	le?vperm	$out1,$out1,$out1,$leperm
+	stvx_u		$out0,$x00,$out		# store output
+	vxor		$out0,$in0,$twk3
+	stvx_u		$out1,$x10,$out
+	addi		$out,$out,0x20
+	bne		Lxts_dec6x_steal
+	b		Lxts_dec6x_done
+
+.align	4
+Lxts_dec6x_one:
+	vxor		$out0,$in5,$twk0
+	nop
+Loop_xts_dec1x:
+	vncipher	$out0,$out0,v24
+	lvx		v24,$x20,$key_		# round[3]
+	addi		$key_,$key_,0x20
+
+	vncipher	$out0,$out0,v25
+	lvx		v25,$x10,$key_		# round[4]
+	bdnz		Loop_xts_dec1x
+
+	subi		r0,$taillen,1
+	vncipher	$out0,$out0,v24
+
+	andi.		r0,r0,16
+	cmpwi		$taillen,0
+	vncipher	$out0,$out0,v25
+
+	sub		$inp,$inp,r0
+	vncipher	$out0,$out0,v26
+
+	lvx_u		$in0,0,$inp
+	vncipher	$out0,$out0,v27
+
+	addi		$key_,$sp,$FRAME+15	# rewind $key_
+	vncipher	$out0,$out0,v28
+	lvx		v24,$x00,$key_		# re-pre-load round[1]
+
+	vncipher	$out0,$out0,v29
+	lvx		v25,$x10,$key_		# re-pre-load round[2]
+	 vxor		$twk0,$twk0,v31
+
+	le?vperm	$in0,$in0,$in0,$leperm
+	vncipher	$out0,$out0,v30
+
+	mtctr		$rounds
+	vncipherlast	$out0,$out0,$twk0
+
+	vmr		$twk0,$twk1		# unused tweak
+	vmr		$twk1,$twk2
+	le?vperm	$out0,$out0,$out0,$leperm
+	stvx_u		$out0,$x00,$out		# store output
+	addi		$out,$out,0x10
+	vxor		$out0,$in0,$twk2
+	bne		Lxts_dec6x_steal
+	b		Lxts_dec6x_done
+
+.align	4
+Lxts_dec6x_zero:
+	cmpwi		$taillen,0
+	beq		Lxts_dec6x_done
+
+	lvx_u		$in0,0,$inp
+	le?vperm	$in0,$in0,$in0,$leperm
+	vxor		$out0,$in0,$twk1
+Lxts_dec6x_steal:
+	vncipher	$out0,$out0,v24
+	lvx		v24,$x20,$key_		# round[3]
+	addi		$key_,$key_,0x20
+
+	vncipher	$out0,$out0,v25
+	lvx		v25,$x10,$key_		# round[4]
+	bdnz		Lxts_dec6x_steal
+
+	add		$inp,$inp,$taillen
+	vncipher	$out0,$out0,v24
+
+	cmpwi		$taillen,0
+	vncipher	$out0,$out0,v25
+
+	lvx_u		$in0,0,$inp
+	vncipher	$out0,$out0,v26
+
+	lvsr		$inpperm,0,$taillen	# $in5 is no more
+	vncipher	$out0,$out0,v27
+
+	addi		$key_,$sp,$FRAME+15	# rewind $key_
+	vncipher	$out0,$out0,v28
+	lvx		v24,$x00,$key_		# re-pre-load round[1]
+
+	vncipher	$out0,$out0,v29
+	lvx		v25,$x10,$key_		# re-pre-load round[2]
+	 vxor		$twk1,$twk1,v31
+
+	le?vperm	$in0,$in0,$in0,$leperm
+	vncipher	$out0,$out0,v30
+
+	vperm		$in0,$in0,$in0,$inpperm
+	vncipherlast	$tmp,$out0,$twk1
+
+	le?vperm	$out0,$tmp,$tmp,$leperm
+	le?stvx_u	$out0,0,$out
+	be?stvx_u	$tmp,0,$out
+
+	vxor		$out0,$out0,$out0
+	vspltisb	$out1,-1
+	vperm		$out0,$out0,$out1,$inpperm
+	vsel		$out0,$in0,$tmp,$out0
+	vxor		$out0,$out0,$twk0
+
+	subi		r30,$out,1
+	mtctr		$taillen
+Loop_xts_dec6x_steal:
+	lbzu		r0,1(r30)
+	stb		r0,16(r30)
+	bdnz		Loop_xts_dec6x_steal
+
+	li		$taillen,0
+	mtctr		$rounds
+	b		Loop_xts_dec1x		# one more time...
+
+.align	4
+Lxts_dec6x_done:
+	${UCMP}i	$ivp,0
+	beq		Lxts_dec6x_ret
+
+	vxor		$tweak,$twk0,$rndkey0
+	le?vperm	$tweak,$tweak,$tweak,$leperm
+	stvx_u		$tweak,0,$ivp
+
+Lxts_dec6x_ret:
+	mtlr		r11
+	li		r10,`$FRAME+15`
+	li		r11,`$FRAME+31`
+	stvx		$seven,r10,$sp		# wipe copies of round keys
+	addi		r10,r10,32
+	stvx		$seven,r11,$sp
+	addi		r11,r11,32
+	stvx		$seven,r10,$sp
+	addi		r10,r10,32
+	stvx		$seven,r11,$sp
+	addi		r11,r11,32
+	stvx		$seven,r10,$sp
+	addi		r10,r10,32
+	stvx		$seven,r11,$sp
+	addi		r11,r11,32
+	stvx		$seven,r10,$sp
+	addi		r10,r10,32
+	stvx		$seven,r11,$sp
+	addi		r11,r11,32
+
+	mtspr		256,$vrsave
+	lvx		v20,r10,$sp		# ABI says so
+	addi		r10,r10,32
+	lvx		v21,r11,$sp
+	addi		r11,r11,32
+	lvx		v22,r10,$sp
+	addi		r10,r10,32
+	lvx		v23,r11,$sp
+	addi		r11,r11,32
+	lvx		v24,r10,$sp
+	addi		r10,r10,32
+	lvx		v25,r11,$sp
+	addi		r11,r11,32
+	lvx		v26,r10,$sp
+	addi		r10,r10,32
+	lvx		v27,r11,$sp
+	addi		r11,r11,32
+	lvx		v28,r10,$sp
+	addi		r10,r10,32
+	lvx		v29,r11,$sp
+	addi		r11,r11,32
+	lvx		v30,r10,$sp
+	lvx		v31,r11,$sp
+	$POP		r26,`$FRAME+21*16+0*$SIZE_T`($sp)
+	$POP		r27,`$FRAME+21*16+1*$SIZE_T`($sp)
+	$POP		r28,`$FRAME+21*16+2*$SIZE_T`($sp)
+	$POP		r29,`$FRAME+21*16+3*$SIZE_T`($sp)
+	$POP		r30,`$FRAME+21*16+4*$SIZE_T`($sp)
+	$POP		r31,`$FRAME+21*16+5*$SIZE_T`($sp)
+	addi		$sp,$sp,`$FRAME+21*16+6*$SIZE_T`
+	blr
+	.long		0
+	.byte		0,12,0x04,1,0x80,6,6,0
+	.long		0
+
+.align	5
+_aesp8_xts_dec5x:
+	vncipher	$out0,$out0,v24
+	vncipher	$out1,$out1,v24
+	vncipher	$out2,$out2,v24
+	vncipher	$out3,$out3,v24
+	vncipher	$out4,$out4,v24
+	lvx		v24,$x20,$key_		# round[3]
+	addi		$key_,$key_,0x20
+
+	vncipher	$out0,$out0,v25
+	vncipher	$out1,$out1,v25
+	vncipher	$out2,$out2,v25
+	vncipher	$out3,$out3,v25
+	vncipher	$out4,$out4,v25
+	lvx		v25,$x10,$key_		# round[4]
+	bdnz		_aesp8_xts_dec5x
+
+	subi		r0,$taillen,1
+	vncipher	$out0,$out0,v24
+	vncipher	$out1,$out1,v24
+	vncipher	$out2,$out2,v24
+	vncipher	$out3,$out3,v24
+	vncipher	$out4,$out4,v24
+
+	andi.		r0,r0,16
+	cmpwi		$taillen,0
+	vncipher	$out0,$out0,v25
+	vncipher	$out1,$out1,v25
+	vncipher	$out2,$out2,v25
+	vncipher	$out3,$out3,v25
+	vncipher	$out4,$out4,v25
+	 vxor		$twk0,$twk0,v31
+
+	sub		$inp,$inp,r0
+	vncipher	$out0,$out0,v26
+	vncipher	$out1,$out1,v26
+	vncipher	$out2,$out2,v26
+	vncipher	$out3,$out3,v26
+	vncipher	$out4,$out4,v26
+	 vxor		$in1,$twk1,v31
+
+	vncipher	$out0,$out0,v27
+	lvx_u		$in0,0,$inp
+	vncipher	$out1,$out1,v27
+	vncipher	$out2,$out2,v27
+	vncipher	$out3,$out3,v27
+	vncipher	$out4,$out4,v27
+	 vxor		$in2,$twk2,v31
+
+	addi		$key_,$sp,$FRAME+15	# rewind $key_
+	vncipher	$out0,$out0,v28
+	vncipher	$out1,$out1,v28
+	vncipher	$out2,$out2,v28
+	vncipher	$out3,$out3,v28
+	vncipher	$out4,$out4,v28
+	lvx		v24,$x00,$key_		# re-pre-load round[1]
+	 vxor		$in3,$twk3,v31
+
+	vncipher	$out0,$out0,v29
+	le?vperm	$in0,$in0,$in0,$leperm
+	vncipher	$out1,$out1,v29
+	vncipher	$out2,$out2,v29
+	vncipher	$out3,$out3,v29
+	vncipher	$out4,$out4,v29
+	lvx		v25,$x10,$key_		# re-pre-load round[2]
+	 vxor		$in4,$twk4,v31
+
+	vncipher	$out0,$out0,v30
+	vncipher	$out1,$out1,v30
+	vncipher	$out2,$out2,v30
+	vncipher	$out3,$out3,v30
+	vncipher	$out4,$out4,v30
+
+	vncipherlast	$out0,$out0,$twk0
+	vncipherlast	$out1,$out1,$in1
+	vncipherlast	$out2,$out2,$in2
+	vncipherlast	$out3,$out3,$in3
+	vncipherlast	$out4,$out4,$in4
+	mtctr		$rounds
+	blr
+        .long   	0
+        .byte   	0,12,0x14,0,0,0,0,0
+___
+}}	}}}
+
+my $consts=1;
+foreach(split("\n",$code)) {
+        s/\`([^\`]*)\`/eval($1)/geo;
+
+	# constants table endian-specific conversion
+	if ($consts && m/\.(long|byte)\s+(.+)\s+(\?[a-z]*)$/o) {
+	    my $conv=$3;
+	    my @bytes=();
+
+	    # convert to endian-agnostic format
+	    if ($1 eq "long") {
+	      foreach (split(/,\s*/,$2)) {
+		my $l = /^0/?oct:int;
+		push @bytes,($l>>24)&0xff,($l>>16)&0xff,($l>>8)&0xff,$l&0xff;
+	      }
+	    } else {
+		@bytes = map(/^0/?oct:int,split(/,\s*/,$2));
+	    }
+
+	    # little-endian conversion
+	    if ($flavour =~ /le$/o) {
+		SWITCH: for($conv)  {
+		    /\?inv/ && do   { @bytes=map($_^0xf,@bytes); last; };
+		    /\?rev/ && do   { @bytes=reverse(@bytes);    last; };
+		}
+	    }
+
+	    #emit
+	    print ".byte\t",join(',',map (sprintf("0x%02x",$_),@bytes)),"\n";
+	    next;
+	}
+	$consts=0 if (m/Lconsts:/o);	# end of table
+
+	# instructions prefixed with '?' are endian-specific and need
+	# to be adjusted accordingly...
+	if ($flavour =~ /le$/o) {	# little-endian
+	    s/le\?//o		or
+	    s/be\?/#be#/o	or
+	    s/\?lvsr/lvsl/o	or
+	    s/\?lvsl/lvsr/o	or
+	    s/\?(vperm\s+v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+)/$1$3$2$4/o or
+	    s/\?(vsldoi\s+v[0-9]+,\s*)(v[0-9]+,)\s*(v[0-9]+,\s*)([0-9]+)/$1$3$2 16-$4/o or
+	    s/\?(vspltw\s+v[0-9]+,\s*)(v[0-9]+,)\s*([0-9])/$1$2 3-$3/o;
+	} else {			# big-endian
+	    s/le\?/#le#/o	or
+	    s/be\?//o		or
+	    s/\?([a-z]+)/$1/o;
+	}
+
+        print $_,"\n";
+}
+
+close STDOUT or die "error closing STDOUT: $!";
diff --git a/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aest4-sparcv9.pl b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aest4-sparcv9.pl
new file mode 100644
index 0000000..478c97e
--- /dev/null
+++ b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aest4-sparcv9.pl
@@ -0,0 +1,929 @@
+#! /usr/bin/env perl
+# Copyright 2012-2020 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License").  You may not use
+# this file except in compliance with the License.  You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+
+# ====================================================================
+# Written by David S. Miller and Andy Polyakov.
+# The module is licensed under 2-clause BSD license. October 2012.
+# All rights reserved.
+# ====================================================================
+
+######################################################################
+# AES for SPARC T4.
+#
+# AES round instructions complete in 3 cycles and can be issued every
+# cycle. It means that round calculations should take 4*rounds cycles,
+# because any given round instruction depends on result of *both*
+# previous instructions:
+#
+#	|0 |1 |2 |3 |4
+#	|01|01|01|
+#	   |23|23|23|
+#	            |01|01|...
+#	               |23|...
+#
+# Provided that fxor [with IV] takes 3 cycles to complete, critical
+# path length for CBC encrypt would be 3+4*rounds, or in other words
+# it should process one byte in at least (3+4*rounds)/16 cycles. This
+# estimate doesn't account for "collateral" instructions, such as
+# fetching input from memory, xor-ing it with zero-round key and
+# storing the result. Yet, *measured* performance [for data aligned
+# at 64-bit boundary!] deviates from this equation by less than 0.5%:
+#
+#		128-bit key	192-		256-
+# CBC encrypt	2.70/2.90(*)	3.20/3.40	3.70/3.90
+#			 (*) numbers after slash are for
+#			     misaligned data.
+#
+# Out-of-order execution logic managed to fully overlap "collateral"
+# instructions with those on critical path. Amazing!
+#
+# As with Intel AES-NI, question is if it's possible to improve
+# performance of parallelizable modes by interleaving round
+# instructions. Provided round instruction latency and throughput
+# optimal interleave factor is 2. But can we expect 2x performance
+# improvement? Well, as round instructions can be issued one per
+# cycle, they don't saturate the 2-way issue pipeline and therefore
+# there is room for "collateral" calculations... Yet, 2x speed-up
+# over CBC encrypt remains unattaintable:
+#
+#		128-bit key	192-		256-
+# CBC decrypt	1.64/2.11	1.89/2.37	2.23/2.61
+# CTR		1.64/2.08(*)	1.89/2.33	2.23/2.61
+#			 (*) numbers after slash are for
+#			     misaligned data.
+#
+# Estimates based on amount of instructions under assumption that
+# round instructions are not pairable with any other instruction
+# suggest that latter is the actual case and pipeline runs
+# underutilized. It should be noted that T4 out-of-order execution
+# logic is so capable that performance gain from 2x interleave is
+# not even impressive, ~7-13% over non-interleaved code, largest
+# for 256-bit keys.
+
+# To anchor to something else, software implementation processes
+# one byte in 29 cycles with 128-bit key on same processor. Intel
+# Sandy Bridge encrypts byte in 5.07 cycles in CBC mode and decrypts
+# in 0.93, naturally with AES-NI.
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../perlasm");
+require "sparcv9_modes.pl";
+
+$output = pop;
+open STDOUT,">$output";
+
+$::evp=1;	# if $evp is set to 0, script generates module with
+# AES_[en|de]crypt, AES_set_[en|de]crypt_key and AES_cbc_encrypt entry
+# points. These however are not fully compatible with openssl/aes.h,
+# because they expect AES_KEY to be aligned at 64-bit boundary. When
+# used through EVP, alignment is arranged at EVP layer. Second thing
+# that is arranged by EVP is at least 32-bit alignment of IV.
+
+######################################################################
+# single-round subroutines
+#
+{
+my ($inp,$out,$key,$rounds,$tmp,$mask)=map("%o$_",(0..5));
+
+$code.=<<___;
+#include "sparc_arch.h"
+
+#ifdef	__arch64__
+.register	%g2,#scratch
+.register	%g3,#scratch
+#endif
+
+.text
+
+.globl	aes_t4_encrypt
+.align	32
+aes_t4_encrypt:
+	andcc		$inp, 7, %g1		! is input aligned?
+	andn		$inp, 7, $inp
+
+	ldx		[$key + 0], %g4
+	ldx		[$key + 8], %g5
+
+	ldx		[$inp + 0], %o4
+	bz,pt		%icc, 1f
+	ldx		[$inp + 8], %o5
+	ldx		[$inp + 16], $inp
+	sll		%g1, 3, %g1
+	sub		%g0, %g1, %o3
+	sllx		%o4, %g1, %o4
+	sllx		%o5, %g1, %g1
+	srlx		%o5, %o3, %o5
+	srlx		$inp, %o3, %o3
+	or		%o5, %o4, %o4
+	or		%o3, %g1, %o5
+1:
+	ld		[$key + 240], $rounds
+	ldd		[$key + 16], %f12
+	ldd		[$key + 24], %f14
+	xor		%g4, %o4, %o4
+	xor		%g5, %o5, %o5
+	movxtod		%o4, %f0
+	movxtod		%o5, %f2
+	srl		$rounds, 1, $rounds
+	ldd		[$key + 32], %f16
+	sub		$rounds, 1, $rounds
+	ldd		[$key + 40], %f18
+	add		$key, 48, $key
+
+.Lenc:
+	aes_eround01	%f12, %f0, %f2, %f4
+	aes_eround23	%f14, %f0, %f2, %f2
+	ldd		[$key + 0], %f12
+	ldd		[$key + 8], %f14
+	sub		$rounds,1,$rounds
+	aes_eround01	%f16, %f4, %f2, %f0
+	aes_eround23	%f18, %f4, %f2, %f2
+	ldd		[$key + 16], %f16
+	ldd		[$key + 24], %f18
+	brnz,pt		$rounds, .Lenc
+	add		$key, 32, $key
+
+	andcc		$out, 7, $tmp		! is output aligned?
+	aes_eround01	%f12, %f0, %f2, %f4
+	aes_eround23	%f14, %f0, %f2, %f2
+	aes_eround01_l	%f16, %f4, %f2, %f0
+	aes_eround23_l	%f18, %f4, %f2, %f2
+
+	bnz,pn		%icc, 2f
+	nop
+
+	std		%f0, [$out + 0]
+	retl
+	std		%f2, [$out + 8]
+
+2:	alignaddrl	$out, %g0, $out
+	mov		0xff, $mask
+	srl		$mask, $tmp, $mask
+
+	faligndata	%f0, %f0, %f4
+	faligndata	%f0, %f2, %f6
+	faligndata	%f2, %f2, %f8
+
+	stda		%f4, [$out + $mask]0xc0	! partial store
+	std		%f6, [$out + 8]
+	add		$out, 16, $out
+	orn		%g0, $mask, $mask
+	retl
+	stda		%f8, [$out + $mask]0xc0	! partial store
+.type	aes_t4_encrypt,#function
+.size	aes_t4_encrypt,.-aes_t4_encrypt
+
+.globl	aes_t4_decrypt
+.align	32
+aes_t4_decrypt:
+	andcc		$inp, 7, %g1		! is input aligned?
+	andn		$inp, 7, $inp
+
+	ldx		[$key + 0], %g4
+	ldx		[$key + 8], %g5
+
+	ldx		[$inp + 0], %o4
+	bz,pt		%icc, 1f
+	ldx		[$inp + 8], %o5
+	ldx		[$inp + 16], $inp
+	sll		%g1, 3, %g1
+	sub		%g0, %g1, %o3
+	sllx		%o4, %g1, %o4
+	sllx		%o5, %g1, %g1
+	srlx		%o5, %o3, %o5
+	srlx		$inp, %o3, %o3
+	or		%o5, %o4, %o4
+	or		%o3, %g1, %o5
+1:
+	ld		[$key + 240], $rounds
+	ldd		[$key + 16], %f12
+	ldd		[$key + 24], %f14
+	xor		%g4, %o4, %o4
+	xor		%g5, %o5, %o5
+	movxtod		%o4, %f0
+	movxtod		%o5, %f2
+	srl		$rounds, 1, $rounds
+	ldd		[$key + 32], %f16
+	sub		$rounds, 1, $rounds
+	ldd		[$key + 40], %f18
+	add		$key, 48, $key
+
+.Ldec:
+	aes_dround01	%f12, %f0, %f2, %f4
+	aes_dround23	%f14, %f0, %f2, %f2
+	ldd		[$key + 0], %f12
+	ldd		[$key + 8], %f14
+	sub		$rounds,1,$rounds
+	aes_dround01	%f16, %f4, %f2, %f0
+	aes_dround23	%f18, %f4, %f2, %f2
+	ldd		[$key + 16], %f16
+	ldd		[$key + 24], %f18
+	brnz,pt		$rounds, .Ldec
+	add		$key, 32, $key
+
+	andcc		$out, 7, $tmp		! is output aligned?
+	aes_dround01	%f12, %f0, %f2, %f4
+	aes_dround23	%f14, %f0, %f2, %f2
+	aes_dround01_l	%f16, %f4, %f2, %f0
+	aes_dround23_l	%f18, %f4, %f2, %f2
+
+	bnz,pn		%icc, 2f
+	nop
+
+	std		%f0, [$out + 0]
+	retl
+	std		%f2, [$out + 8]
+
+2:	alignaddrl	$out, %g0, $out
+	mov		0xff, $mask
+	srl		$mask, $tmp, $mask
+
+	faligndata	%f0, %f0, %f4
+	faligndata	%f0, %f2, %f6
+	faligndata	%f2, %f2, %f8
+
+	stda		%f4, [$out + $mask]0xc0	! partial store
+	std		%f6, [$out + 8]
+	add		$out, 16, $out
+	orn		%g0, $mask, $mask
+	retl
+	stda		%f8, [$out + $mask]0xc0	! partial store
+.type	aes_t4_decrypt,#function
+.size	aes_t4_decrypt,.-aes_t4_decrypt
+___
+}
+
+######################################################################
+# key setup subroutines
+#
+{
+my ($inp,$bits,$out,$tmp)=map("%o$_",(0..5));
+$code.=<<___;
+.globl	aes_t4_set_encrypt_key
+.align	32
+aes_t4_set_encrypt_key:
+.Lset_encrypt_key:
+	and		$inp, 7, $tmp
+	alignaddr	$inp, %g0, $inp
+	cmp		$bits, 192
+	ldd		[$inp + 0], %f0
+	bl,pt		%icc,.L128
+	ldd		[$inp + 8], %f2
+
+	be,pt		%icc,.L192
+	ldd		[$inp + 16], %f4
+	brz,pt		$tmp, .L256aligned
+	ldd		[$inp + 24], %f6
+
+	ldd		[$inp + 32], %f8
+	faligndata	%f0, %f2, %f0
+	faligndata	%f2, %f4, %f2
+	faligndata	%f4, %f6, %f4
+	faligndata	%f6, %f8, %f6
+.L256aligned:
+___
+for ($i=0; $i<6; $i++) {
+    $code.=<<___;
+	std		%f0, [$out + `32*$i+0`]
+	aes_kexpand1	%f0, %f6, $i, %f0
+	std		%f2, [$out + `32*$i+8`]
+	aes_kexpand2	%f2, %f0, %f2
+	std		%f4, [$out + `32*$i+16`]
+	aes_kexpand0	%f4, %f2, %f4
+	std		%f6, [$out + `32*$i+24`]
+	aes_kexpand2	%f6, %f4, %f6
+___
+}
+$code.=<<___;
+	std		%f0, [$out + `32*$i+0`]
+	aes_kexpand1	%f0, %f6, $i, %f0
+	std		%f2, [$out + `32*$i+8`]
+	aes_kexpand2	%f2, %f0, %f2
+	std		%f4, [$out + `32*$i+16`]
+	std		%f6, [$out + `32*$i+24`]
+	std		%f0, [$out + `32*$i+32`]
+	std		%f2, [$out + `32*$i+40`]
+
+	mov		14, $tmp
+	st		$tmp, [$out + 240]
+	retl
+	xor		%o0, %o0, %o0
+
+.align	16
+.L192:
+	brz,pt		$tmp, .L192aligned
+	nop
+
+	ldd		[$inp + 24], %f6
+	faligndata	%f0, %f2, %f0
+	faligndata	%f2, %f4, %f2
+	faligndata	%f4, %f6, %f4
+.L192aligned:
+___
+for ($i=0; $i<7; $i++) {
+    $code.=<<___;
+	std		%f0, [$out + `24*$i+0`]
+	aes_kexpand1	%f0, %f4, $i, %f0
+	std		%f2, [$out + `24*$i+8`]
+	aes_kexpand2	%f2, %f0, %f2
+	std		%f4, [$out + `24*$i+16`]
+	aes_kexpand2	%f4, %f2, %f4
+___
+}
+$code.=<<___;
+	std		%f0, [$out + `24*$i+0`]
+	aes_kexpand1	%f0, %f4, $i, %f0
+	std		%f2, [$out + `24*$i+8`]
+	aes_kexpand2	%f2, %f0, %f2
+	std		%f4, [$out + `24*$i+16`]
+	std		%f0, [$out + `24*$i+24`]
+	std		%f2, [$out + `24*$i+32`]
+
+	mov		12, $tmp
+	st		$tmp, [$out + 240]
+	retl
+	xor		%o0, %o0, %o0
+
+.align	16
+.L128:
+	brz,pt		$tmp, .L128aligned
+	nop
+
+	ldd		[$inp + 16], %f4
+	faligndata	%f0, %f2, %f0
+	faligndata	%f2, %f4, %f2
+.L128aligned:
+___
+for ($i=0; $i<10; $i++) {
+    $code.=<<___;
+	std		%f0, [$out + `16*$i+0`]
+	aes_kexpand1	%f0, %f2, $i, %f0
+	std		%f2, [$out + `16*$i+8`]
+	aes_kexpand2	%f2, %f0, %f2
+___
+}
+$code.=<<___;
+	std		%f0, [$out + `16*$i+0`]
+	std		%f2, [$out + `16*$i+8`]
+
+	mov		10, $tmp
+	st		$tmp, [$out + 240]
+	retl
+	xor		%o0, %o0, %o0
+.type	aes_t4_set_encrypt_key,#function
+.size	aes_t4_set_encrypt_key,.-aes_t4_set_encrypt_key
+
+.globl	aes_t4_set_decrypt_key
+.align	32
+aes_t4_set_decrypt_key:
+	mov		%o7, %o5
+	call		.Lset_encrypt_key
+	nop
+
+	mov		%o5, %o7
+	sll		$tmp, 4, $inp		! $tmp is number of rounds
+	add		$tmp, 2, $tmp
+	add		$out, $inp, $inp	! $inp=$out+16*rounds
+	srl		$tmp, 2, $tmp		! $tmp=(rounds+2)/4
+
+.Lkey_flip:
+	ldd		[$out + 0],  %f0
+	ldd		[$out + 8],  %f2
+	ldd		[$out + 16], %f4
+	ldd		[$out + 24], %f6
+	ldd		[$inp + 0],  %f8
+	ldd		[$inp + 8],  %f10
+	ldd		[$inp - 16], %f12
+	ldd		[$inp - 8],  %f14
+	sub		$tmp, 1, $tmp
+	std		%f0, [$inp + 0]
+	std		%f2, [$inp + 8]
+	std		%f4, [$inp - 16]
+	std		%f6, [$inp - 8]
+	std		%f8, [$out + 0]
+	std		%f10, [$out + 8]
+	std		%f12, [$out + 16]
+	std		%f14, [$out + 24]
+	add		$out, 32, $out
+	brnz		$tmp, .Lkey_flip
+	sub		$inp, 32, $inp
+
+	retl
+	xor		%o0, %o0, %o0
+.type	aes_t4_set_decrypt_key,#function
+.size	aes_t4_set_decrypt_key,.-aes_t4_set_decrypt_key
+___
+}
+
+{{{
+my ($inp,$out,$len,$key,$ivec,$enc)=map("%i$_",(0..5));
+my ($ileft,$iright,$ooff,$omask,$ivoff)=map("%l$_",(1..7));
+
+$code.=<<___;
+.align	32
+_aes128_encrypt_1x:
+___
+for ($i=0; $i<4; $i++) {
+    $code.=<<___;
+	aes_eround01	%f`16+8*$i+0`, %f0, %f2, %f4
+	aes_eround23	%f`16+8*$i+2`, %f0, %f2, %f2
+	aes_eround01	%f`16+8*$i+4`, %f4, %f2, %f0
+	aes_eround23	%f`16+8*$i+6`, %f4, %f2, %f2
+___
+}
+$code.=<<___;
+	aes_eround01	%f48, %f0, %f2, %f4
+	aes_eround23	%f50, %f0, %f2, %f2
+	aes_eround01_l	%f52, %f4, %f2, %f0
+	retl
+	aes_eround23_l	%f54, %f4, %f2, %f2
+.type	_aes128_encrypt_1x,#function
+.size	_aes128_encrypt_1x,.-_aes128_encrypt_1x
+
+.align	32
+_aes128_encrypt_2x:
+___
+for ($i=0; $i<4; $i++) {
+    $code.=<<___;
+	aes_eround01	%f`16+8*$i+0`, %f0, %f2, %f8
+	aes_eround23	%f`16+8*$i+2`, %f0, %f2, %f2
+	aes_eround01	%f`16+8*$i+0`, %f4, %f6, %f10
+	aes_eround23	%f`16+8*$i+2`, %f4, %f6, %f6
+	aes_eround01	%f`16+8*$i+4`, %f8, %f2, %f0
+	aes_eround23	%f`16+8*$i+6`, %f8, %f2, %f2
+	aes_eround01	%f`16+8*$i+4`, %f10, %f6, %f4
+	aes_eround23	%f`16+8*$i+6`, %f10, %f6, %f6
+___
+}
+$code.=<<___;
+	aes_eround01	%f48, %f0, %f2, %f8
+	aes_eround23	%f50, %f0, %f2, %f2
+	aes_eround01	%f48, %f4, %f6, %f10
+	aes_eround23	%f50, %f4, %f6, %f6
+	aes_eround01_l	%f52, %f8, %f2, %f0
+	aes_eround23_l	%f54, %f8, %f2, %f2
+	aes_eround01_l	%f52, %f10, %f6, %f4
+	retl
+	aes_eround23_l	%f54, %f10, %f6, %f6
+.type	_aes128_encrypt_2x,#function
+.size	_aes128_encrypt_2x,.-_aes128_encrypt_2x
+
+.align	32
+_aes128_loadkey:
+	ldx		[$key + 0], %g4
+	ldx		[$key + 8], %g5
+___
+for ($i=2; $i<22;$i++) {			# load key schedule
+    $code.=<<___;
+	ldd		[$key + `8*$i`], %f`12+2*$i`
+___
+}
+$code.=<<___;
+	retl
+	nop
+.type	_aes128_loadkey,#function
+.size	_aes128_loadkey,.-_aes128_loadkey
+_aes128_load_enckey=_aes128_loadkey
+_aes128_load_deckey=_aes128_loadkey
+
+___
+
+&alg_cbc_encrypt_implement("aes",128);
+if ($::evp) {
+    &alg_ctr32_implement("aes",128);
+    &alg_xts_implement("aes",128,"en");
+    &alg_xts_implement("aes",128,"de");
+}
+&alg_cbc_decrypt_implement("aes",128);
+
+$code.=<<___;
+.align	32
+_aes128_decrypt_1x:
+___
+for ($i=0; $i<4; $i++) {
+    $code.=<<___;
+	aes_dround01	%f`16+8*$i+0`, %f0, %f2, %f4
+	aes_dround23	%f`16+8*$i+2`, %f0, %f2, %f2
+	aes_dround01	%f`16+8*$i+4`, %f4, %f2, %f0
+	aes_dround23	%f`16+8*$i+6`, %f4, %f2, %f2
+___
+}
+$code.=<<___;
+	aes_dround01	%f48, %f0, %f2, %f4
+	aes_dround23	%f50, %f0, %f2, %f2
+	aes_dround01_l	%f52, %f4, %f2, %f0
+	retl
+	aes_dround23_l	%f54, %f4, %f2, %f2
+.type	_aes128_decrypt_1x,#function
+.size	_aes128_decrypt_1x,.-_aes128_decrypt_1x
+
+.align	32
+_aes128_decrypt_2x:
+___
+for ($i=0; $i<4; $i++) {
+    $code.=<<___;
+	aes_dround01	%f`16+8*$i+0`, %f0, %f2, %f8
+	aes_dround23	%f`16+8*$i+2`, %f0, %f2, %f2
+	aes_dround01	%f`16+8*$i+0`, %f4, %f6, %f10
+	aes_dround23	%f`16+8*$i+2`, %f4, %f6, %f6
+	aes_dround01	%f`16+8*$i+4`, %f8, %f2, %f0
+	aes_dround23	%f`16+8*$i+6`, %f8, %f2, %f2
+	aes_dround01	%f`16+8*$i+4`, %f10, %f6, %f4
+	aes_dround23	%f`16+8*$i+6`, %f10, %f6, %f6
+___
+}
+$code.=<<___;
+	aes_dround01	%f48, %f0, %f2, %f8
+	aes_dround23	%f50, %f0, %f2, %f2
+	aes_dround01	%f48, %f4, %f6, %f10
+	aes_dround23	%f50, %f4, %f6, %f6
+	aes_dround01_l	%f52, %f8, %f2, %f0
+	aes_dround23_l	%f54, %f8, %f2, %f2
+	aes_dround01_l	%f52, %f10, %f6, %f4
+	retl
+	aes_dround23_l	%f54, %f10, %f6, %f6
+.type	_aes128_decrypt_2x,#function
+.size	_aes128_decrypt_2x,.-_aes128_decrypt_2x
+___
+
+$code.=<<___;
+.align	32
+_aes192_encrypt_1x:
+___
+for ($i=0; $i<5; $i++) {
+    $code.=<<___;
+	aes_eround01	%f`16+8*$i+0`, %f0, %f2, %f4
+	aes_eround23	%f`16+8*$i+2`, %f0, %f2, %f2
+	aes_eround01	%f`16+8*$i+4`, %f4, %f2, %f0
+	aes_eround23	%f`16+8*$i+6`, %f4, %f2, %f2
+___
+}
+$code.=<<___;
+	aes_eround01	%f56, %f0, %f2, %f4
+	aes_eround23	%f58, %f0, %f2, %f2
+	aes_eround01_l	%f60, %f4, %f2, %f0
+	retl
+	aes_eround23_l	%f62, %f4, %f2, %f2
+.type	_aes192_encrypt_1x,#function
+.size	_aes192_encrypt_1x,.-_aes192_encrypt_1x
+
+.align	32
+_aes192_encrypt_2x:
+___
+for ($i=0; $i<5; $i++) {
+    $code.=<<___;
+	aes_eround01	%f`16+8*$i+0`, %f0, %f2, %f8
+	aes_eround23	%f`16+8*$i+2`, %f0, %f2, %f2
+	aes_eround01	%f`16+8*$i+0`, %f4, %f6, %f10
+	aes_eround23	%f`16+8*$i+2`, %f4, %f6, %f6
+	aes_eround01	%f`16+8*$i+4`, %f8, %f2, %f0
+	aes_eround23	%f`16+8*$i+6`, %f8, %f2, %f2
+	aes_eround01	%f`16+8*$i+4`, %f10, %f6, %f4
+	aes_eround23	%f`16+8*$i+6`, %f10, %f6, %f6
+___
+}
+$code.=<<___;
+	aes_eround01	%f56, %f0, %f2, %f8
+	aes_eround23	%f58, %f0, %f2, %f2
+	aes_eround01	%f56, %f4, %f6, %f10
+	aes_eround23	%f58, %f4, %f6, %f6
+	aes_eround01_l	%f60, %f8, %f2, %f0
+	aes_eround23_l	%f62, %f8, %f2, %f2
+	aes_eround01_l	%f60, %f10, %f6, %f4
+	retl
+	aes_eround23_l	%f62, %f10, %f6, %f6
+.type	_aes192_encrypt_2x,#function
+.size	_aes192_encrypt_2x,.-_aes192_encrypt_2x
+
+.align	32
+_aes256_encrypt_1x:
+	aes_eround01	%f16, %f0, %f2, %f4
+	aes_eround23	%f18, %f0, %f2, %f2
+	ldd		[$key + 208], %f16
+	ldd		[$key + 216], %f18
+	aes_eround01	%f20, %f4, %f2, %f0
+	aes_eround23	%f22, %f4, %f2, %f2
+	ldd		[$key + 224], %f20
+	ldd		[$key + 232], %f22
+___
+for ($i=1; $i<6; $i++) {
+    $code.=<<___;
+	aes_eround01	%f`16+8*$i+0`, %f0, %f2, %f4
+	aes_eround23	%f`16+8*$i+2`, %f0, %f2, %f2
+	aes_eround01	%f`16+8*$i+4`, %f4, %f2, %f0
+	aes_eround23	%f`16+8*$i+6`, %f4, %f2, %f2
+___
+}
+$code.=<<___;
+	aes_eround01	%f16, %f0, %f2, %f4
+	aes_eround23	%f18, %f0, %f2, %f2
+	ldd		[$key + 16], %f16
+	ldd		[$key + 24], %f18
+	aes_eround01_l	%f20, %f4, %f2, %f0
+	aes_eround23_l	%f22, %f4, %f2, %f2
+	ldd		[$key + 32], %f20
+	retl
+	ldd		[$key + 40], %f22
+.type	_aes256_encrypt_1x,#function
+.size	_aes256_encrypt_1x,.-_aes256_encrypt_1x
+
+.align	32
+_aes256_encrypt_2x:
+	aes_eround01	%f16, %f0, %f2, %f8
+	aes_eround23	%f18, %f0, %f2, %f2
+	aes_eround01	%f16, %f4, %f6, %f10
+	aes_eround23	%f18, %f4, %f6, %f6
+	ldd		[$key + 208], %f16
+	ldd		[$key + 216], %f18
+	aes_eround01	%f20, %f8, %f2, %f0
+	aes_eround23	%f22, %f8, %f2, %f2
+	aes_eround01	%f20, %f10, %f6, %f4
+	aes_eround23	%f22, %f10, %f6, %f6
+	ldd		[$key + 224], %f20
+	ldd		[$key + 232], %f22
+___
+for ($i=1; $i<6; $i++) {
+    $code.=<<___;
+	aes_eround01	%f`16+8*$i+0`, %f0, %f2, %f8
+	aes_eround23	%f`16+8*$i+2`, %f0, %f2, %f2
+	aes_eround01	%f`16+8*$i+0`, %f4, %f6, %f10
+	aes_eround23	%f`16+8*$i+2`, %f4, %f6, %f6
+	aes_eround01	%f`16+8*$i+4`, %f8, %f2, %f0
+	aes_eround23	%f`16+8*$i+6`, %f8, %f2, %f2
+	aes_eround01	%f`16+8*$i+4`, %f10, %f6, %f4
+	aes_eround23	%f`16+8*$i+6`, %f10, %f6, %f6
+___
+}
+$code.=<<___;
+	aes_eround01	%f16, %f0, %f2, %f8
+	aes_eround23	%f18, %f0, %f2, %f2
+	aes_eround01	%f16, %f4, %f6, %f10
+	aes_eround23	%f18, %f4, %f6, %f6
+	ldd		[$key + 16], %f16
+	ldd		[$key + 24], %f18
+	aes_eround01_l	%f20, %f8, %f2, %f0
+	aes_eround23_l	%f22, %f8, %f2, %f2
+	aes_eround01_l	%f20, %f10, %f6, %f4
+	aes_eround23_l	%f22, %f10, %f6, %f6
+	ldd		[$key + 32], %f20
+	retl
+	ldd		[$key + 40], %f22
+.type	_aes256_encrypt_2x,#function
+.size	_aes256_encrypt_2x,.-_aes256_encrypt_2x
+
+.align	32
+_aes192_loadkey:
+	ldx		[$key + 0], %g4
+	ldx		[$key + 8], %g5
+___
+for ($i=2; $i<26;$i++) {			# load key schedule
+    $code.=<<___;
+	ldd		[$key + `8*$i`], %f`12+2*$i`
+___
+}
+$code.=<<___;
+	retl
+	nop
+.type	_aes192_loadkey,#function
+.size	_aes192_loadkey,.-_aes192_loadkey
+_aes256_loadkey=_aes192_loadkey
+_aes192_load_enckey=_aes192_loadkey
+_aes192_load_deckey=_aes192_loadkey
+_aes256_load_enckey=_aes192_loadkey
+_aes256_load_deckey=_aes192_loadkey
+___
+
+&alg_cbc_encrypt_implement("aes",256);
+&alg_cbc_encrypt_implement("aes",192);
+if ($::evp) {
+    &alg_ctr32_implement("aes",256);
+    &alg_xts_implement("aes",256,"en");
+    &alg_xts_implement("aes",256,"de");
+    &alg_ctr32_implement("aes",192);
+}
+&alg_cbc_decrypt_implement("aes",192);
+&alg_cbc_decrypt_implement("aes",256);
+
+$code.=<<___;
+.align	32
+_aes256_decrypt_1x:
+	aes_dround01	%f16, %f0, %f2, %f4
+	aes_dround23	%f18, %f0, %f2, %f2
+	ldd		[$key + 208], %f16
+	ldd		[$key + 216], %f18
+	aes_dround01	%f20, %f4, %f2, %f0
+	aes_dround23	%f22, %f4, %f2, %f2
+	ldd		[$key + 224], %f20
+	ldd		[$key + 232], %f22
+___
+for ($i=1; $i<6; $i++) {
+    $code.=<<___;
+	aes_dround01	%f`16+8*$i+0`, %f0, %f2, %f4
+	aes_dround23	%f`16+8*$i+2`, %f0, %f2, %f2
+	aes_dround01	%f`16+8*$i+4`, %f4, %f2, %f0
+	aes_dround23	%f`16+8*$i+6`, %f4, %f2, %f2
+___
+}
+$code.=<<___;
+	aes_dround01	%f16, %f0, %f2, %f4
+	aes_dround23	%f18, %f0, %f2, %f2
+	ldd		[$key + 16], %f16
+	ldd		[$key + 24], %f18
+	aes_dround01_l	%f20, %f4, %f2, %f0
+	aes_dround23_l	%f22, %f4, %f2, %f2
+	ldd		[$key + 32], %f20
+	retl
+	ldd		[$key + 40], %f22
+.type	_aes256_decrypt_1x,#function
+.size	_aes256_decrypt_1x,.-_aes256_decrypt_1x
+
+.align	32
+_aes256_decrypt_2x:
+	aes_dround01	%f16, %f0, %f2, %f8
+	aes_dround23	%f18, %f0, %f2, %f2
+	aes_dround01	%f16, %f4, %f6, %f10
+	aes_dround23	%f18, %f4, %f6, %f6
+	ldd		[$key + 208], %f16
+	ldd		[$key + 216], %f18
+	aes_dround01	%f20, %f8, %f2, %f0
+	aes_dround23	%f22, %f8, %f2, %f2
+	aes_dround01	%f20, %f10, %f6, %f4
+	aes_dround23	%f22, %f10, %f6, %f6
+	ldd		[$key + 224], %f20
+	ldd		[$key + 232], %f22
+___
+for ($i=1; $i<6; $i++) {
+    $code.=<<___;
+	aes_dround01	%f`16+8*$i+0`, %f0, %f2, %f8
+	aes_dround23	%f`16+8*$i+2`, %f0, %f2, %f2
+	aes_dround01	%f`16+8*$i+0`, %f4, %f6, %f10
+	aes_dround23	%f`16+8*$i+2`, %f4, %f6, %f6
+	aes_dround01	%f`16+8*$i+4`, %f8, %f2, %f0
+	aes_dround23	%f`16+8*$i+6`, %f8, %f2, %f2
+	aes_dround01	%f`16+8*$i+4`, %f10, %f6, %f4
+	aes_dround23	%f`16+8*$i+6`, %f10, %f6, %f6
+___
+}
+$code.=<<___;
+	aes_dround01	%f16, %f0, %f2, %f8
+	aes_dround23	%f18, %f0, %f2, %f2
+	aes_dround01	%f16, %f4, %f6, %f10
+	aes_dround23	%f18, %f4, %f6, %f6
+	ldd		[$key + 16], %f16
+	ldd		[$key + 24], %f18
+	aes_dround01_l	%f20, %f8, %f2, %f0
+	aes_dround23_l	%f22, %f8, %f2, %f2
+	aes_dround01_l	%f20, %f10, %f6, %f4
+	aes_dround23_l	%f22, %f10, %f6, %f6
+	ldd		[$key + 32], %f20
+	retl
+	ldd		[$key + 40], %f22
+.type	_aes256_decrypt_2x,#function
+.size	_aes256_decrypt_2x,.-_aes256_decrypt_2x
+
+.align	32
+_aes192_decrypt_1x:
+___
+for ($i=0; $i<5; $i++) {
+    $code.=<<___;
+	aes_dround01	%f`16+8*$i+0`, %f0, %f2, %f4
+	aes_dround23	%f`16+8*$i+2`, %f0, %f2, %f2
+	aes_dround01	%f`16+8*$i+4`, %f4, %f2, %f0
+	aes_dround23	%f`16+8*$i+6`, %f4, %f2, %f2
+___
+}
+$code.=<<___;
+	aes_dround01	%f56, %f0, %f2, %f4
+	aes_dround23	%f58, %f0, %f2, %f2
+	aes_dround01_l	%f60, %f4, %f2, %f0
+	retl
+	aes_dround23_l	%f62, %f4, %f2, %f2
+.type	_aes192_decrypt_1x,#function
+.size	_aes192_decrypt_1x,.-_aes192_decrypt_1x
+
+.align	32
+_aes192_decrypt_2x:
+___
+for ($i=0; $i<5; $i++) {
+    $code.=<<___;
+	aes_dround01	%f`16+8*$i+0`, %f0, %f2, %f8
+	aes_dround23	%f`16+8*$i+2`, %f0, %f2, %f2
+	aes_dround01	%f`16+8*$i+0`, %f4, %f6, %f10
+	aes_dround23	%f`16+8*$i+2`, %f4, %f6, %f6
+	aes_dround01	%f`16+8*$i+4`, %f8, %f2, %f0
+	aes_dround23	%f`16+8*$i+6`, %f8, %f2, %f2
+	aes_dround01	%f`16+8*$i+4`, %f10, %f6, %f4
+	aes_dround23	%f`16+8*$i+6`, %f10, %f6, %f6
+___
+}
+$code.=<<___;
+	aes_dround01	%f56, %f0, %f2, %f8
+	aes_dround23	%f58, %f0, %f2, %f2
+	aes_dround01	%f56, %f4, %f6, %f10
+	aes_dround23	%f58, %f4, %f6, %f6
+	aes_dround01_l	%f60, %f8, %f2, %f0
+	aes_dround23_l	%f62, %f8, %f2, %f2
+	aes_dround01_l	%f60, %f10, %f6, %f4
+	retl
+	aes_dround23_l	%f62, %f10, %f6, %f6
+.type	_aes192_decrypt_2x,#function
+.size	_aes192_decrypt_2x,.-_aes192_decrypt_2x
+___
+}}}
+
+if (!$::evp) {
+$code.=<<___;
+.global	AES_encrypt
+AES_encrypt=aes_t4_encrypt
+.global	AES_decrypt
+AES_decrypt=aes_t4_decrypt
+.global	AES_set_encrypt_key
+.align	32
+AES_set_encrypt_key:
+	andcc		%o2, 7, %g0		! check alignment
+	bnz,a,pn	%icc, 1f
+	mov		-1, %o0
+	brz,a,pn	%o0, 1f
+	mov		-1, %o0
+	brz,a,pn	%o2, 1f
+	mov		-1, %o0
+	andncc		%o1, 0x1c0, %g0
+	bnz,a,pn	%icc, 1f
+	mov		-2, %o0
+	cmp		%o1, 128
+	bl,a,pn		%icc, 1f
+	mov		-2, %o0
+	b		aes_t4_set_encrypt_key
+	nop
+1:	retl
+	nop
+.type	AES_set_encrypt_key,#function
+.size	AES_set_encrypt_key,.-AES_set_encrypt_key
+
+.global	AES_set_decrypt_key
+.align	32
+AES_set_decrypt_key:
+	andcc		%o2, 7, %g0		! check alignment
+	bnz,a,pn	%icc, 1f
+	mov		-1, %o0
+	brz,a,pn	%o0, 1f
+	mov		-1, %o0
+	brz,a,pn	%o2, 1f
+	mov		-1, %o0
+	andncc		%o1, 0x1c0, %g0
+	bnz,a,pn	%icc, 1f
+	mov		-2, %o0
+	cmp		%o1, 128
+	bl,a,pn		%icc, 1f
+	mov		-2, %o0
+	b		aes_t4_set_decrypt_key
+	nop
+1:	retl
+	nop
+.type	AES_set_decrypt_key,#function
+.size	AES_set_decrypt_key,.-AES_set_decrypt_key
+___
+
+my ($inp,$out,$len,$key,$ivec,$enc)=map("%o$_",(0..5));
+
+$code.=<<___;
+.globl	AES_cbc_encrypt
+.align	32
+AES_cbc_encrypt:
+	ld		[$key + 240], %g1
+	nop
+	brz		$enc, .Lcbc_decrypt
+	cmp		%g1, 12
+
+	bl,pt		%icc, aes128_t4_cbc_encrypt
+	nop
+	be,pn		%icc, aes192_t4_cbc_encrypt
+	nop
+	ba		aes256_t4_cbc_encrypt
+	nop
+
+.Lcbc_decrypt:
+	bl,pt		%icc, aes128_t4_cbc_decrypt
+	nop
+	be,pn		%icc, aes192_t4_cbc_decrypt
+	nop
+	ba		aes256_t4_cbc_decrypt
+	nop
+.type	AES_cbc_encrypt,#function
+.size	AES_cbc_encrypt,.-AES_cbc_encrypt
+___
+}
+$code.=<<___;
+.asciz	"AES for SPARC T4, David S. Miller, Andy Polyakov"
+.align	4
+___
+
+&emit_assembler();
+
+close STDOUT or die "error closing STDOUT: $!";
diff --git a/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aesv8-armx.pl b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aesv8-armx.pl
new file mode 100755
index 0000000..2b0e982
--- /dev/null
+++ b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/aesv8-armx.pl
@@ -0,0 +1,1019 @@
+#! /usr/bin/env perl
+# Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License").  You may not use
+# this file except in compliance with the License.  You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# This module implements support for ARMv8 AES instructions. The
+# module is endian-agnostic in sense that it supports both big- and
+# little-endian cases. As does it support both 32- and 64-bit modes
+# of operation. Latter is achieved by limiting amount of utilized
+# registers to 16, which implies additional NEON load and integer
+# instructions. This has no effect on mighty Apple A7, where results
+# are literally equal to the theoretical estimates based on AES
+# instruction latencies and issue rates. On Cortex-A53, an in-order
+# execution core, this costs up to 10-15%, which is partially
+# compensated by implementing dedicated code path for 128-bit
+# CBC encrypt case. On Cortex-A57 parallelizable mode performance
+# seems to be limited by sheer amount of NEON instructions...
+#
+# Performance in cycles per byte processed with 128-bit key:
+#
+#		CBC enc		CBC dec		CTR
+# Apple A7	2.39		1.20		1.20
+# Cortex-A53	1.32		1.29		1.46
+# Cortex-A57(*)	1.95		0.85		0.93
+# Denver	1.96		0.86		0.80
+# Mongoose	1.33		1.20		1.20
+# Kryo		1.26		0.94		1.00
+#
+# (*)	original 3.64/1.34/1.32 results were for r0p0 revision
+#	and are still same even for updated module;
+
+$flavour = shift;
+$output  = shift;
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
+die "can't locate arm-xlate.pl";
+
+open OUT,"| \"$^X\" $xlate $flavour $output";
+*STDOUT=*OUT;
+
+$prefix="aes_v8";
+
+$code=<<___;
+#include "arm_arch.h"
+
+#if __ARM_MAX_ARCH__>=7
+.text
+___
+$code.=".arch	armv8-a+crypto\n"			if ($flavour =~ /64/);
+$code.=<<___						if ($flavour !~ /64/);
+.arch	armv7-a	// don't confuse not-so-latest binutils with argv8 :-)
+.fpu	neon
+.code	32
+#undef	__thumb2__
+___
+
+# Assembler mnemonics are an eclectic mix of 32- and 64-bit syntax,
+# NEON is mostly 32-bit mnemonics, integer - mostly 64. Goal is to
+# maintain both 32- and 64-bit codes within single module and
+# transliterate common code to either flavour with regex vodoo.
+#
+{{{
+my ($inp,$bits,$out,$ptr,$rounds)=("x0","w1","x2","x3","w12");
+my ($zero,$rcon,$mask,$in0,$in1,$tmp,$key)=
+	$flavour=~/64/? map("q$_",(0..6)) : map("q$_",(0..3,8..10));
+
+
+$code.=<<___;
+.align	5
+.Lrcon:
+.long	0x01,0x01,0x01,0x01
+.long	0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d	// rotate-n-splat
+.long	0x1b,0x1b,0x1b,0x1b
+
+.globl	${prefix}_set_encrypt_key
+.type	${prefix}_set_encrypt_key,%function
+.align	5
+${prefix}_set_encrypt_key:
+.Lenc_key:
+___
+$code.=<<___	if ($flavour =~ /64/);
+	stp	x29,x30,[sp,#-16]!
+	add	x29,sp,#0
+___
+$code.=<<___;
+	mov	$ptr,#-1
+	cmp	$inp,#0
+	b.eq	.Lenc_key_abort
+	cmp	$out,#0
+	b.eq	.Lenc_key_abort
+	mov	$ptr,#-2
+	cmp	$bits,#128
+	b.lt	.Lenc_key_abort
+	cmp	$bits,#256
+	b.gt	.Lenc_key_abort
+	tst	$bits,#0x3f
+	b.ne	.Lenc_key_abort
+
+	adr	$ptr,.Lrcon
+	cmp	$bits,#192
+
+	veor	$zero,$zero,$zero
+	vld1.8	{$in0},[$inp],#16
+	mov	$bits,#8		// reuse $bits
+	vld1.32	{$rcon,$mask},[$ptr],#32
+
+	b.lt	.Loop128
+	b.eq	.L192
+	b	.L256
+
+.align	4
+.Loop128:
+	vtbl.8	$key,{$in0},$mask
+	vext.8	$tmp,$zero,$in0,#12
+	vst1.32	{$in0},[$out],#16
+	aese	$key,$zero
+	subs	$bits,$bits,#1
+
+	veor	$in0,$in0,$tmp
+	vext.8	$tmp,$zero,$tmp,#12
+	veor	$in0,$in0,$tmp
+	vext.8	$tmp,$zero,$tmp,#12
+	 veor	$key,$key,$rcon
+	veor	$in0,$in0,$tmp
+	vshl.u8	$rcon,$rcon,#1
+	veor	$in0,$in0,$key
+	b.ne	.Loop128
+
+	vld1.32	{$rcon},[$ptr]
+
+	vtbl.8	$key,{$in0},$mask
+	vext.8	$tmp,$zero,$in0,#12
+	vst1.32	{$in0},[$out],#16
+	aese	$key,$zero
+
+	veor	$in0,$in0,$tmp
+	vext.8	$tmp,$zero,$tmp,#12
+	veor	$in0,$in0,$tmp
+	vext.8	$tmp,$zero,$tmp,#12
+	 veor	$key,$key,$rcon
+	veor	$in0,$in0,$tmp
+	vshl.u8	$rcon,$rcon,#1
+	veor	$in0,$in0,$key
+
+	vtbl.8	$key,{$in0},$mask
+	vext.8	$tmp,$zero,$in0,#12
+	vst1.32	{$in0},[$out],#16
+	aese	$key,$zero
+
+	veor	$in0,$in0,$tmp
+	vext.8	$tmp,$zero,$tmp,#12
+	veor	$in0,$in0,$tmp
+	vext.8	$tmp,$zero,$tmp,#12
+	 veor	$key,$key,$rcon
+	veor	$in0,$in0,$tmp
+	veor	$in0,$in0,$key
+	vst1.32	{$in0},[$out]
+	add	$out,$out,#0x50
+
+	mov	$rounds,#10
+	b	.Ldone
+
+.align	4
+.L192:
+	vld1.8	{$in1},[$inp],#8
+	vmov.i8	$key,#8			// borrow $key
+	vst1.32	{$in0},[$out],#16
+	vsub.i8	$mask,$mask,$key	// adjust the mask
+
+.Loop192:
+	vtbl.8	$key,{$in1},$mask
+	vext.8	$tmp,$zero,$in0,#12
+#ifdef __ARMEB__
+	vst1.32	{$in1},[$out],#16
+	sub	$out,$out,#8
+#else
+	vst1.32	{$in1},[$out],#8
+#endif
+	aese	$key,$zero
+	subs	$bits,$bits,#1
+
+	veor	$in0,$in0,$tmp
+	vext.8	$tmp,$zero,$tmp,#12
+	veor	$in0,$in0,$tmp
+	vext.8	$tmp,$zero,$tmp,#12
+	veor	$in0,$in0,$tmp
+
+	vdup.32	$tmp,${in0}[3]
+	veor	$tmp,$tmp,$in1
+	 veor	$key,$key,$rcon
+	vext.8	$in1,$zero,$in1,#12
+	vshl.u8	$rcon,$rcon,#1
+	veor	$in1,$in1,$tmp
+	veor	$in0,$in0,$key
+	veor	$in1,$in1,$key
+	vst1.32	{$in0},[$out],#16
+	b.ne	.Loop192
+
+	mov	$rounds,#12
+	add	$out,$out,#0x20
+	b	.Ldone
+
+.align	4
+.L256:
+	vld1.8	{$in1},[$inp]
+	mov	$bits,#7
+	mov	$rounds,#14
+	vst1.32	{$in0},[$out],#16
+
+.Loop256:
+	vtbl.8	$key,{$in1},$mask
+	vext.8	$tmp,$zero,$in0,#12
+	vst1.32	{$in1},[$out],#16
+	aese	$key,$zero
+	subs	$bits,$bits,#1
+
+	veor	$in0,$in0,$tmp
+	vext.8	$tmp,$zero,$tmp,#12
+	veor	$in0,$in0,$tmp
+	vext.8	$tmp,$zero,$tmp,#12
+	 veor	$key,$key,$rcon
+	veor	$in0,$in0,$tmp
+	vshl.u8	$rcon,$rcon,#1
+	veor	$in0,$in0,$key
+	vst1.32	{$in0},[$out],#16
+	b.eq	.Ldone
+
+	vdup.32	$key,${in0}[3]		// just splat
+	vext.8	$tmp,$zero,$in1,#12
+	aese	$key,$zero
+
+	veor	$in1,$in1,$tmp
+	vext.8	$tmp,$zero,$tmp,#12
+	veor	$in1,$in1,$tmp
+	vext.8	$tmp,$zero,$tmp,#12
+	veor	$in1,$in1,$tmp
+
+	veor	$in1,$in1,$key
+	b	.Loop256
+
+.Ldone:
+	str	$rounds,[$out]
+	mov	$ptr,#0
+
+.Lenc_key_abort:
+	mov	x0,$ptr			// return value
+	`"ldr	x29,[sp],#16"		if ($flavour =~ /64/)`
+	ret
+.size	${prefix}_set_encrypt_key,.-${prefix}_set_encrypt_key
+
+.globl	${prefix}_set_decrypt_key
+.type	${prefix}_set_decrypt_key,%function
+.align	5
+${prefix}_set_decrypt_key:
+___
+$code.=<<___	if ($flavour =~ /64/);
+	.inst	0xd503233f		// paciasp
+	stp	x29,x30,[sp,#-16]!
+	add	x29,sp,#0
+___
+$code.=<<___	if ($flavour !~ /64/);
+	stmdb	sp!,{r4,lr}
+___
+$code.=<<___;
+	bl	.Lenc_key
+
+	cmp	x0,#0
+	b.ne	.Ldec_key_abort
+
+	sub	$out,$out,#240		// restore original $out
+	mov	x4,#-16
+	add	$inp,$out,x12,lsl#4	// end of key schedule
+
+	vld1.32	{v0.16b},[$out]
+	vld1.32	{v1.16b},[$inp]
+	vst1.32	{v0.16b},[$inp],x4
+	vst1.32	{v1.16b},[$out],#16
+
+.Loop_imc:
+	vld1.32	{v0.16b},[$out]
+	vld1.32	{v1.16b},[$inp]
+	aesimc	v0.16b,v0.16b
+	aesimc	v1.16b,v1.16b
+	vst1.32	{v0.16b},[$inp],x4
+	vst1.32	{v1.16b},[$out],#16
+	cmp	$inp,$out
+	b.hi	.Loop_imc
+
+	vld1.32	{v0.16b},[$out]
+	aesimc	v0.16b,v0.16b
+	vst1.32	{v0.16b},[$inp]
+
+	eor	x0,x0,x0		// return value
+.Ldec_key_abort:
+___
+$code.=<<___	if ($flavour !~ /64/);
+	ldmia	sp!,{r4,pc}
+___
+$code.=<<___	if ($flavour =~ /64/);
+	ldp	x29,x30,[sp],#16
+	.inst	0xd50323bf		// autiasp
+	ret
+___
+$code.=<<___;
+.size	${prefix}_set_decrypt_key,.-${prefix}_set_decrypt_key
+___
+}}}
+{{{
+sub gen_block () {
+my $dir = shift;
+my ($e,$mc) = $dir eq "en" ? ("e","mc") : ("d","imc");
+my ($inp,$out,$key)=map("x$_",(0..2));
+my $rounds="w3";
+my ($rndkey0,$rndkey1,$inout)=map("q$_",(0..3));
+
+$code.=<<___;
+.globl	${prefix}_${dir}crypt
+.type	${prefix}_${dir}crypt,%function
+.align	5
+${prefix}_${dir}crypt:
+	ldr	$rounds,[$key,#240]
+	vld1.32	{$rndkey0},[$key],#16
+	vld1.8	{$inout},[$inp]
+	sub	$rounds,$rounds,#2
+	vld1.32	{$rndkey1},[$key],#16
+
+.Loop_${dir}c:
+	aes$e	$inout,$rndkey0
+	aes$mc	$inout,$inout
+	vld1.32	{$rndkey0},[$key],#16
+	subs	$rounds,$rounds,#2
+	aes$e	$inout,$rndkey1
+	aes$mc	$inout,$inout
+	vld1.32	{$rndkey1},[$key],#16
+	b.gt	.Loop_${dir}c
+
+	aes$e	$inout,$rndkey0
+	aes$mc	$inout,$inout
+	vld1.32	{$rndkey0},[$key]
+	aes$e	$inout,$rndkey1
+	veor	$inout,$inout,$rndkey0
+
+	vst1.8	{$inout},[$out]
+	ret
+.size	${prefix}_${dir}crypt,.-${prefix}_${dir}crypt
+___
+}
+&gen_block("en");
+&gen_block("de");
+}}}
+{{{
+my ($inp,$out,$len,$key,$ivp)=map("x$_",(0..4)); my $enc="w5";
+my ($rounds,$cnt,$key_,$step,$step1)=($enc,"w6","x7","x8","x12");
+my ($dat0,$dat1,$in0,$in1,$tmp0,$tmp1,$ivec,$rndlast)=map("q$_",(0..7));
+
+my ($dat,$tmp,$rndzero_n_last)=($dat0,$tmp0,$tmp1);
+my ($key4,$key5,$key6,$key7)=("x6","x12","x14",$key);
+
+### q8-q15	preloaded key schedule
+
+$code.=<<___;
+.globl	${prefix}_cbc_encrypt
+.type	${prefix}_cbc_encrypt,%function
+.align	5
+${prefix}_cbc_encrypt:
+___
+$code.=<<___	if ($flavour =~ /64/);
+	stp	x29,x30,[sp,#-16]!
+	add	x29,sp,#0
+___
+$code.=<<___	if ($flavour !~ /64/);
+	mov	ip,sp
+	stmdb	sp!,{r4-r8,lr}
+	vstmdb	sp!,{d8-d15}            @ ABI specification says so
+	ldmia	ip,{r4-r5}		@ load remaining args
+___
+$code.=<<___;
+	subs	$len,$len,#16
+	mov	$step,#16
+	b.lo	.Lcbc_abort
+	cclr	$step,eq
+
+	cmp	$enc,#0			// en- or decrypting?
+	ldr	$rounds,[$key,#240]
+	and	$len,$len,#-16
+	vld1.8	{$ivec},[$ivp]
+	vld1.8	{$dat},[$inp],$step
+
+	vld1.32	{q8-q9},[$key]		// load key schedule...
+	sub	$rounds,$rounds,#6
+	add	$key_,$key,x5,lsl#4	// pointer to last 7 round keys
+	sub	$rounds,$rounds,#2
+	vld1.32	{q10-q11},[$key_],#32
+	vld1.32	{q12-q13},[$key_],#32
+	vld1.32	{q14-q15},[$key_],#32
+	vld1.32	{$rndlast},[$key_]
+
+	add	$key_,$key,#32
+	mov	$cnt,$rounds
+	b.eq	.Lcbc_dec
+
+	cmp	$rounds,#2
+	veor	$dat,$dat,$ivec
+	veor	$rndzero_n_last,q8,$rndlast
+	b.eq	.Lcbc_enc128
+
+	vld1.32	{$in0-$in1},[$key_]
+	add	$key_,$key,#16
+	add	$key4,$key,#16*4
+	add	$key5,$key,#16*5
+	aese	$dat,q8
+	aesmc	$dat,$dat
+	add	$key6,$key,#16*6
+	add	$key7,$key,#16*7
+	b	.Lenter_cbc_enc
+
+.align	4
+.Loop_cbc_enc:
+	aese	$dat,q8
+	aesmc	$dat,$dat
+	 vst1.8	{$ivec},[$out],#16
+.Lenter_cbc_enc:
+	aese	$dat,q9
+	aesmc	$dat,$dat
+	aese	$dat,$in0
+	aesmc	$dat,$dat
+	vld1.32	{q8},[$key4]
+	cmp	$rounds,#4
+	aese	$dat,$in1
+	aesmc	$dat,$dat
+	vld1.32	{q9},[$key5]
+	b.eq	.Lcbc_enc192
+
+	aese	$dat,q8
+	aesmc	$dat,$dat
+	vld1.32	{q8},[$key6]
+	aese	$dat,q9
+	aesmc	$dat,$dat
+	vld1.32	{q9},[$key7]
+	nop
+
+.Lcbc_enc192:
+	aese	$dat,q8
+	aesmc	$dat,$dat
+	 subs	$len,$len,#16
+	aese	$dat,q9
+	aesmc	$dat,$dat
+	 cclr	$step,eq
+	aese	$dat,q10
+	aesmc	$dat,$dat
+	aese	$dat,q11
+	aesmc	$dat,$dat
+	 vld1.8	{q8},[$inp],$step
+	aese	$dat,q12
+	aesmc	$dat,$dat
+	 veor	q8,q8,$rndzero_n_last
+	aese	$dat,q13
+	aesmc	$dat,$dat
+	 vld1.32 {q9},[$key_]		// re-pre-load rndkey[1]
+	aese	$dat,q14
+	aesmc	$dat,$dat
+	aese	$dat,q15
+	veor	$ivec,$dat,$rndlast
+	b.hs	.Loop_cbc_enc
+
+	vst1.8	{$ivec},[$out],#16
+	b	.Lcbc_done
+
+.align	5
+.Lcbc_enc128:
+	vld1.32	{$in0-$in1},[$key_]
+	aese	$dat,q8
+	aesmc	$dat,$dat
+	b	.Lenter_cbc_enc128
+.Loop_cbc_enc128:
+	aese	$dat,q8
+	aesmc	$dat,$dat
+	 vst1.8	{$ivec},[$out],#16
+.Lenter_cbc_enc128:
+	aese	$dat,q9
+	aesmc	$dat,$dat
+	 subs	$len,$len,#16
+	aese	$dat,$in0
+	aesmc	$dat,$dat
+	 cclr	$step,eq
+	aese	$dat,$in1
+	aesmc	$dat,$dat
+	aese	$dat,q10
+	aesmc	$dat,$dat
+	aese	$dat,q11
+	aesmc	$dat,$dat
+	 vld1.8	{q8},[$inp],$step
+	aese	$dat,q12
+	aesmc	$dat,$dat
+	aese	$dat,q13
+	aesmc	$dat,$dat
+	aese	$dat,q14
+	aesmc	$dat,$dat
+	 veor	q8,q8,$rndzero_n_last
+	aese	$dat,q15
+	veor	$ivec,$dat,$rndlast
+	b.hs	.Loop_cbc_enc128
+
+	vst1.8	{$ivec},[$out],#16
+	b	.Lcbc_done
+___
+{
+my ($dat2,$in2,$tmp2)=map("q$_",(10,11,9));
+$code.=<<___;
+.align	5
+.Lcbc_dec:
+	vld1.8	{$dat2},[$inp],#16
+	subs	$len,$len,#32		// bias
+	add	$cnt,$rounds,#2
+	vorr	$in1,$dat,$dat
+	vorr	$dat1,$dat,$dat
+	vorr	$in2,$dat2,$dat2
+	b.lo	.Lcbc_dec_tail
+
+	vorr	$dat1,$dat2,$dat2
+	vld1.8	{$dat2},[$inp],#16
+	vorr	$in0,$dat,$dat
+	vorr	$in1,$dat1,$dat1
+	vorr	$in2,$dat2,$dat2
+
+.Loop3x_cbc_dec:
+	aesd	$dat0,q8
+	aesimc	$dat0,$dat0
+	aesd	$dat1,q8
+	aesimc	$dat1,$dat1
+	aesd	$dat2,q8
+	aesimc	$dat2,$dat2
+	vld1.32	{q8},[$key_],#16
+	subs	$cnt,$cnt,#2
+	aesd	$dat0,q9
+	aesimc	$dat0,$dat0
+	aesd	$dat1,q9
+	aesimc	$dat1,$dat1
+	aesd	$dat2,q9
+	aesimc	$dat2,$dat2
+	vld1.32	{q9},[$key_],#16
+	b.gt	.Loop3x_cbc_dec
+
+	aesd	$dat0,q8
+	aesimc	$dat0,$dat0
+	aesd	$dat1,q8
+	aesimc	$dat1,$dat1
+	aesd	$dat2,q8
+	aesimc	$dat2,$dat2
+	 veor	$tmp0,$ivec,$rndlast
+	 subs	$len,$len,#0x30
+	 veor	$tmp1,$in0,$rndlast
+	 mov.lo	x6,$len			// x6, $cnt, is zero at this point
+	aesd	$dat0,q9
+	aesimc	$dat0,$dat0
+	aesd	$dat1,q9
+	aesimc	$dat1,$dat1
+	aesd	$dat2,q9
+	aesimc	$dat2,$dat2
+	 veor	$tmp2,$in1,$rndlast
+	 add	$inp,$inp,x6		// $inp is adjusted in such way that
+					// at exit from the loop $dat1-$dat2
+					// are loaded with last "words"
+	 vorr	$ivec,$in2,$in2
+	 mov	$key_,$key
+	aesd	$dat0,q12
+	aesimc	$dat0,$dat0
+	aesd	$dat1,q12
+	aesimc	$dat1,$dat1
+	aesd	$dat2,q12
+	aesimc	$dat2,$dat2
+	 vld1.8	{$in0},[$inp],#16
+	aesd	$dat0,q13
+	aesimc	$dat0,$dat0
+	aesd	$dat1,q13
+	aesimc	$dat1,$dat1
+	aesd	$dat2,q13
+	aesimc	$dat2,$dat2
+	 vld1.8	{$in1},[$inp],#16
+	aesd	$dat0,q14
+	aesimc	$dat0,$dat0
+	aesd	$dat1,q14
+	aesimc	$dat1,$dat1
+	aesd	$dat2,q14
+	aesimc	$dat2,$dat2
+	 vld1.8	{$in2},[$inp],#16
+	aesd	$dat0,q15
+	aesd	$dat1,q15
+	aesd	$dat2,q15
+	 vld1.32 {q8},[$key_],#16	// re-pre-load rndkey[0]
+	 add	$cnt,$rounds,#2
+	veor	$tmp0,$tmp0,$dat0
+	veor	$tmp1,$tmp1,$dat1
+	veor	$dat2,$dat2,$tmp2
+	 vld1.32 {q9},[$key_],#16	// re-pre-load rndkey[1]
+	vst1.8	{$tmp0},[$out],#16
+	 vorr	$dat0,$in0,$in0
+	vst1.8	{$tmp1},[$out],#16
+	 vorr	$dat1,$in1,$in1
+	vst1.8	{$dat2},[$out],#16
+	 vorr	$dat2,$in2,$in2
+	b.hs	.Loop3x_cbc_dec
+
+	cmn	$len,#0x30
+	b.eq	.Lcbc_done
+	nop
+
+.Lcbc_dec_tail:
+	aesd	$dat1,q8
+	aesimc	$dat1,$dat1
+	aesd	$dat2,q8
+	aesimc	$dat2,$dat2
+	vld1.32	{q8},[$key_],#16
+	subs	$cnt,$cnt,#2
+	aesd	$dat1,q9
+	aesimc	$dat1,$dat1
+	aesd	$dat2,q9
+	aesimc	$dat2,$dat2
+	vld1.32	{q9},[$key_],#16
+	b.gt	.Lcbc_dec_tail
+
+	aesd	$dat1,q8
+	aesimc	$dat1,$dat1
+	aesd	$dat2,q8
+	aesimc	$dat2,$dat2
+	aesd	$dat1,q9
+	aesimc	$dat1,$dat1
+	aesd	$dat2,q9
+	aesimc	$dat2,$dat2
+	aesd	$dat1,q12
+	aesimc	$dat1,$dat1
+	aesd	$dat2,q12
+	aesimc	$dat2,$dat2
+	 cmn	$len,#0x20
+	aesd	$dat1,q13
+	aesimc	$dat1,$dat1
+	aesd	$dat2,q13
+	aesimc	$dat2,$dat2
+	 veor	$tmp1,$ivec,$rndlast
+	aesd	$dat1,q14
+	aesimc	$dat1,$dat1
+	aesd	$dat2,q14
+	aesimc	$dat2,$dat2
+	 veor	$tmp2,$in1,$rndlast
+	aesd	$dat1,q15
+	aesd	$dat2,q15
+	b.eq	.Lcbc_dec_one
+	veor	$tmp1,$tmp1,$dat1
+	veor	$tmp2,$tmp2,$dat2
+	 vorr	$ivec,$in2,$in2
+	vst1.8	{$tmp1},[$out],#16
+	vst1.8	{$tmp2},[$out],#16
+	b	.Lcbc_done
+
+.Lcbc_dec_one:
+	veor	$tmp1,$tmp1,$dat2
+	 vorr	$ivec,$in2,$in2
+	vst1.8	{$tmp1},[$out],#16
+
+.Lcbc_done:
+	vst1.8	{$ivec},[$ivp]
+.Lcbc_abort:
+___
+}
+$code.=<<___	if ($flavour !~ /64/);
+	vldmia	sp!,{d8-d15}
+	ldmia	sp!,{r4-r8,pc}
+___
+$code.=<<___	if ($flavour =~ /64/);
+	ldr	x29,[sp],#16
+	ret
+___
+$code.=<<___;
+.size	${prefix}_cbc_encrypt,.-${prefix}_cbc_encrypt
+___
+}}}
+{{{
+my ($inp,$out,$len,$key,$ivp)=map("x$_",(0..4));
+my ($rounds,$cnt,$key_)=("w5","w6","x7");
+my ($ctr,$tctr0,$tctr1,$tctr2)=map("w$_",(8..10,12));
+my $step="x12";		# aliases with $tctr2
+
+my ($dat0,$dat1,$in0,$in1,$tmp0,$tmp1,$ivec,$rndlast)=map("q$_",(0..7));
+my ($dat2,$in2,$tmp2)=map("q$_",(10,11,9));
+
+my ($dat,$tmp)=($dat0,$tmp0);
+
+### q8-q15	preloaded key schedule
+
+$code.=<<___;
+.globl	${prefix}_ctr32_encrypt_blocks
+.type	${prefix}_ctr32_encrypt_blocks,%function
+.align	5
+${prefix}_ctr32_encrypt_blocks:
+___
+$code.=<<___	if ($flavour =~ /64/);
+	stp		x29,x30,[sp,#-16]!
+	add		x29,sp,#0
+___
+$code.=<<___	if ($flavour !~ /64/);
+	mov		ip,sp
+	stmdb		sp!,{r4-r10,lr}
+	vstmdb		sp!,{d8-d15}            @ ABI specification says so
+	ldr		r4, [ip]		@ load remaining arg
+___
+$code.=<<___;
+	ldr		$rounds,[$key,#240]
+
+	ldr		$ctr, [$ivp, #12]
+#ifdef __ARMEB__
+	vld1.8		{$dat0},[$ivp]
+#else
+	vld1.32		{$dat0},[$ivp]
+#endif
+	vld1.32		{q8-q9},[$key]		// load key schedule...
+	sub		$rounds,$rounds,#4
+	mov		$step,#16
+	cmp		$len,#2
+	add		$key_,$key,x5,lsl#4	// pointer to last 5 round keys
+	sub		$rounds,$rounds,#2
+	vld1.32		{q12-q13},[$key_],#32
+	vld1.32		{q14-q15},[$key_],#32
+	vld1.32		{$rndlast},[$key_]
+	add		$key_,$key,#32
+	mov		$cnt,$rounds
+	cclr		$step,lo
+#ifndef __ARMEB__
+	rev		$ctr, $ctr
+#endif
+	add		$tctr1, $ctr, #1
+	vorr		$ivec,$dat0,$dat0
+	rev		$tctr1, $tctr1
+	vmov.32		${ivec}[3],$tctr1
+	add		$ctr, $ctr, #2
+	vorr		$dat1,$ivec,$ivec
+	b.ls		.Lctr32_tail
+	rev		$tctr2, $ctr
+	vmov.32		${ivec}[3],$tctr2
+	sub		$len,$len,#3		// bias
+	vorr		$dat2,$ivec,$ivec
+	b		.Loop3x_ctr32
+
+.align	4
+.Loop3x_ctr32:
+	aese		$dat0,q8
+	aesmc		$dat0,$dat0
+	aese		$dat1,q8
+	aesmc		$dat1,$dat1
+	aese		$dat2,q8
+	aesmc		$dat2,$dat2
+	vld1.32		{q8},[$key_],#16
+	subs		$cnt,$cnt,#2
+	aese		$dat0,q9
+	aesmc		$dat0,$dat0
+	aese		$dat1,q9
+	aesmc		$dat1,$dat1
+	aese		$dat2,q9
+	aesmc		$dat2,$dat2
+	vld1.32		{q9},[$key_],#16
+	b.gt		.Loop3x_ctr32
+
+	aese		$dat0,q8
+	aesmc		$tmp0,$dat0
+	aese		$dat1,q8
+	aesmc		$tmp1,$dat1
+	 vld1.8		{$in0},[$inp],#16
+	 add		$tctr0,$ctr,#1
+	aese		$dat2,q8
+	aesmc		$dat2,$dat2
+	 vld1.8		{$in1},[$inp],#16
+	 rev		$tctr0,$tctr0
+	aese		$tmp0,q9
+	aesmc		$tmp0,$tmp0
+	aese		$tmp1,q9
+	aesmc		$tmp1,$tmp1
+	 vld1.8		{$in2},[$inp],#16
+	 mov		$key_,$key
+	aese		$dat2,q9
+	aesmc		$tmp2,$dat2
+	aese		$tmp0,q12
+	aesmc		$tmp0,$tmp0
+	aese		$tmp1,q12
+	aesmc		$tmp1,$tmp1
+	 veor		$in0,$in0,$rndlast
+	 add		$tctr1,$ctr,#2
+	aese		$tmp2,q12
+	aesmc		$tmp2,$tmp2
+	 veor		$in1,$in1,$rndlast
+	 add		$ctr,$ctr,#3
+	aese		$tmp0,q13
+	aesmc		$tmp0,$tmp0
+	aese		$tmp1,q13
+	aesmc		$tmp1,$tmp1
+	 veor		$in2,$in2,$rndlast
+	 vmov.32	${ivec}[3], $tctr0
+	aese		$tmp2,q13
+	aesmc		$tmp2,$tmp2
+	 vorr		$dat0,$ivec,$ivec
+	 rev		$tctr1,$tctr1
+	aese		$tmp0,q14
+	aesmc		$tmp0,$tmp0
+	 vmov.32	${ivec}[3], $tctr1
+	 rev		$tctr2,$ctr
+	aese		$tmp1,q14
+	aesmc		$tmp1,$tmp1
+	 vorr		$dat1,$ivec,$ivec
+	 vmov.32	${ivec}[3], $tctr2
+	aese		$tmp2,q14
+	aesmc		$tmp2,$tmp2
+	 vorr		$dat2,$ivec,$ivec
+	 subs		$len,$len,#3
+	aese		$tmp0,q15
+	aese		$tmp1,q15
+	aese		$tmp2,q15
+
+	veor		$in0,$in0,$tmp0
+	 vld1.32	 {q8},[$key_],#16	// re-pre-load rndkey[0]
+	vst1.8		{$in0},[$out],#16
+	veor		$in1,$in1,$tmp1
+	 mov		$cnt,$rounds
+	vst1.8		{$in1},[$out],#16
+	veor		$in2,$in2,$tmp2
+	 vld1.32	 {q9},[$key_],#16	// re-pre-load rndkey[1]
+	vst1.8		{$in2},[$out],#16
+	b.hs		.Loop3x_ctr32
+
+	adds		$len,$len,#3
+	b.eq		.Lctr32_done
+	cmp		$len,#1
+	mov		$step,#16
+	cclr		$step,eq
+
+.Lctr32_tail:
+	aese		$dat0,q8
+	aesmc		$dat0,$dat0
+	aese		$dat1,q8
+	aesmc		$dat1,$dat1
+	vld1.32		{q8},[$key_],#16
+	subs		$cnt,$cnt,#2
+	aese		$dat0,q9
+	aesmc		$dat0,$dat0
+	aese		$dat1,q9
+	aesmc		$dat1,$dat1
+	vld1.32		{q9},[$key_],#16
+	b.gt		.Lctr32_tail
+
+	aese		$dat0,q8
+	aesmc		$dat0,$dat0
+	aese		$dat1,q8
+	aesmc		$dat1,$dat1
+	aese		$dat0,q9
+	aesmc		$dat0,$dat0
+	aese		$dat1,q9
+	aesmc		$dat1,$dat1
+	 vld1.8		{$in0},[$inp],$step
+	aese		$dat0,q12
+	aesmc		$dat0,$dat0
+	aese		$dat1,q12
+	aesmc		$dat1,$dat1
+	 vld1.8		{$in1},[$inp]
+	aese		$dat0,q13
+	aesmc		$dat0,$dat0
+	aese		$dat1,q13
+	aesmc		$dat1,$dat1
+	 veor		$in0,$in0,$rndlast
+	aese		$dat0,q14
+	aesmc		$dat0,$dat0
+	aese		$dat1,q14
+	aesmc		$dat1,$dat1
+	 veor		$in1,$in1,$rndlast
+	aese		$dat0,q15
+	aese		$dat1,q15
+
+	cmp		$len,#1
+	veor		$in0,$in0,$dat0
+	veor		$in1,$in1,$dat1
+	vst1.8		{$in0},[$out],#16
+	b.eq		.Lctr32_done
+	vst1.8		{$in1},[$out]
+
+.Lctr32_done:
+___
+$code.=<<___	if ($flavour !~ /64/);
+	vldmia		sp!,{d8-d15}
+	ldmia		sp!,{r4-r10,pc}
+___
+$code.=<<___	if ($flavour =~ /64/);
+	ldr		x29,[sp],#16
+	ret
+___
+$code.=<<___;
+.size	${prefix}_ctr32_encrypt_blocks,.-${prefix}_ctr32_encrypt_blocks
+___
+}}}
+$code.=<<___;
+#endif
+___
+########################################
+if ($flavour =~ /64/) {			######## 64-bit code
+    my %opcode = (
+	"aesd"	=>	0x4e285800,	"aese"	=>	0x4e284800,
+	"aesimc"=>	0x4e287800,	"aesmc"	=>	0x4e286800	);
+
+    local *unaes = sub {
+	my ($mnemonic,$arg)=@_;
+
+	$arg =~ m/[qv]([0-9]+)[^,]*,\s*[qv]([0-9]+)/o	&&
+	sprintf ".inst\t0x%08x\t//%s %s",
+			$opcode{$mnemonic}|$1|($2<<5),
+			$mnemonic,$arg;
+    };
+
+    foreach(split("\n",$code)) {
+	s/\`([^\`]*)\`/eval($1)/geo;
+
+	s/\bq([0-9]+)\b/"v".($1<8?$1:$1+8).".16b"/geo;	# old->new registers
+	s/@\s/\/\//o;			# old->new style commentary
+
+	#s/[v]?(aes\w+)\s+([qv].*)/unaes($1,$2)/geo	or
+	s/cclr\s+([wx])([^,]+),\s*([a-z]+)/csel	$1$2,$1zr,$1$2,$3/o	or
+	s/mov\.([a-z]+)\s+([wx][0-9]+),\s*([wx][0-9]+)/csel	$2,$3,$2,$1/o	or
+	s/vmov\.i8/movi/o	or	# fix up legacy mnemonics
+	s/vext\.8/ext/o		or
+	s/vrev32\.8/rev32/o	or
+	s/vtst\.8/cmtst/o	or
+	s/vshr/ushr/o		or
+	s/^(\s+)v/$1/o		or	# strip off v prefix
+	s/\bbx\s+lr\b/ret/o;
+
+	# fix up remaining legacy suffixes
+	s/\.[ui]?8//o;
+	m/\],#8/o and s/\.16b/\.8b/go;
+	s/\.[ui]?32//o and s/\.16b/\.4s/go;
+	s/\.[ui]?64//o and s/\.16b/\.2d/go;
+	s/\.[42]([sd])\[([0-3])\]/\.$1\[$2\]/o;
+
+	print $_,"\n";
+    }
+} else {				######## 32-bit code
+    my %opcode = (
+	"aesd"	=>	0xf3b00340,	"aese"	=>	0xf3b00300,
+	"aesimc"=>	0xf3b003c0,	"aesmc"	=>	0xf3b00380	);
+
+    local *unaes = sub {
+	my ($mnemonic,$arg)=@_;
+
+	if ($arg =~ m/[qv]([0-9]+)[^,]*,\s*[qv]([0-9]+)/o) {
+	    my $word = $opcode{$mnemonic}|(($1&7)<<13)|(($1&8)<<19)
+					 |(($2&7)<<1) |(($2&8)<<2);
+	    # since ARMv7 instructions are always encoded little-endian.
+	    # correct solution is to use .inst directive, but older
+	    # assemblers don't implement it:-(
+	    sprintf ".byte\t0x%02x,0x%02x,0x%02x,0x%02x\t@ %s %s",
+			$word&0xff,($word>>8)&0xff,
+			($word>>16)&0xff,($word>>24)&0xff,
+			$mnemonic,$arg;
+	}
+    };
+
+    sub unvtbl {
+	my $arg=shift;
+
+	$arg =~ m/q([0-9]+),\s*\{q([0-9]+)\},\s*q([0-9]+)/o &&
+	sprintf	"vtbl.8	d%d,{q%d},d%d\n\t".
+		"vtbl.8	d%d,{q%d},d%d", 2*$1,$2,2*$3, 2*$1+1,$2,2*$3+1;
+    }
+
+    sub unvdup32 {
+	my $arg=shift;
+
+	$arg =~ m/q([0-9]+),\s*q([0-9]+)\[([0-3])\]/o &&
+	sprintf	"vdup.32	q%d,d%d[%d]",$1,2*$2+($3>>1),$3&1;
+    }
+
+    sub unvmov32 {
+	my $arg=shift;
+
+	$arg =~ m/q([0-9]+)\[([0-3])\],(.*)/o &&
+	sprintf	"vmov.32	d%d[%d],%s",2*$1+($2>>1),$2&1,$3;
+    }
+
+    foreach(split("\n",$code)) {
+	s/\`([^\`]*)\`/eval($1)/geo;
+
+	s/\b[wx]([0-9]+)\b/r$1/go;		# new->old registers
+	s/\bv([0-9])\.[12468]+[bsd]\b/q$1/go;	# new->old registers
+	s/\/\/\s?/@ /o;				# new->old style commentary
+
+	# fix up remaining new-style suffixes
+	s/\{q([0-9]+)\},\s*\[(.+)\],#8/sprintf "{d%d},[$2]!",2*$1/eo	or
+	s/\],#[0-9]+/]!/o;
+
+	s/[v]?(aes\w+)\s+([qv].*)/unaes($1,$2)/geo	or
+	s/cclr\s+([^,]+),\s*([a-z]+)/mov$2	$1,#0/o	or
+	s/vtbl\.8\s+(.*)/unvtbl($1)/geo			or
+	s/vdup\.32\s+(.*)/unvdup32($1)/geo		or
+	s/vmov\.32\s+(.*)/unvmov32($1)/geo		or
+	s/^(\s+)b\./$1b/o				or
+	s/^(\s+)mov\./$1mov/o				or
+	s/^(\s+)ret/$1bx\tlr/o;
+
+	print $_,"\n";
+    }
+}
+
+close STDOUT or die "error closing STDOUT: $!";
diff --git a/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/bsaes-armv7.pl b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/bsaes-armv7.pl
new file mode 100644
index 0000000..2b9f241
--- /dev/null
+++ b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/bsaes-armv7.pl
@@ -0,0 +1,2491 @@
+#! /usr/bin/env perl
+# Copyright 2012-2020 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License").  You may not use
+# this file except in compliance with the License.  You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+#
+# Specific modes and adaptation for Linux kernel by Ard Biesheuvel
+# of Linaro. Permission to use under GPL terms is granted.
+# ====================================================================
+
+# Bit-sliced AES for ARM NEON
+#
+# February 2012.
+#
+# This implementation is direct adaptation of bsaes-x86_64 module for
+# ARM NEON. Except that this module is endian-neutral [in sense that
+# it can be compiled for either endianness] by courtesy of vld1.8's
+# neutrality. Initial version doesn't implement interface to OpenSSL,
+# only low-level primitives and unsupported entry points, just enough
+# to collect performance results, which for Cortex-A8 core are:
+#
+# encrypt	19.5 cycles per byte processed with 128-bit key
+# decrypt	22.1 cycles per byte processed with 128-bit key
+# key conv.	440  cycles per 128-bit key/0.18 of 8x block
+#
+# Snapdragon S4 encrypts byte in 17.6 cycles and decrypts in 19.7,
+# which is [much] worse than anticipated (for further details see
+# http://www.openssl.org/~appro/Snapdragon-S4.html).
+#
+# Cortex-A15 manages in 14.2/16.1 cycles [when integer-only code
+# manages in 20.0 cycles].
+#
+# When comparing to x86_64 results keep in mind that NEON unit is
+# [mostly] single-issue and thus can't [fully] benefit from
+# instruction-level parallelism. And when comparing to aes-armv4
+# results keep in mind key schedule conversion overhead (see
+# bsaes-x86_64.pl for further details)...
+#
+#						<appro@openssl.org>
+
+# April-August 2013
+# Add CBC, CTR and XTS subroutines and adapt for kernel use; courtesy of Ard.
+
+$flavour = shift;
+if ($flavour=~/\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; }
+else { while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {} }
+
+if ($flavour && $flavour ne "void") {
+    $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+    ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
+    ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
+    die "can't locate arm-xlate.pl";
+
+    open STDOUT,"| \"$^X\" $xlate $flavour $output";
+} else {
+    open STDOUT,">$output";
+}
+
+my ($inp,$out,$len,$key)=("r0","r1","r2","r3");
+my @XMM=map("q$_",(0..15));
+
+{
+my ($key,$rounds,$const)=("r4","r5","r6");
+
+sub Dlo()   { shift=~m|q([1]?[0-9])|?"d".($1*2):"";     }
+sub Dhi()   { shift=~m|q([1]?[0-9])|?"d".($1*2+1):"";   }
+
+sub Sbox {
+# input in  lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb > [b0, b1, b4, b6, b3, b7, b2, b5] < msb
+my @b=@_[0..7];
+my @t=@_[8..11];
+my @s=@_[12..15];
+	&InBasisChange	(@b);
+	&Inv_GF256	(@b[6,5,0,3,7,1,4,2],@t,@s);
+	&OutBasisChange	(@b[7,1,4,2,6,5,0,3]);
+}
+
+sub InBasisChange {
+# input in  lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb > [b6, b5, b0, b3, b7, b1, b4, b2] < msb
+my @b=@_[0..7];
+$code.=<<___;
+	veor	@b[2], @b[2], @b[1]
+	veor	@b[5], @b[5], @b[6]
+	veor	@b[3], @b[3], @b[0]
+	veor	@b[6], @b[6], @b[2]
+	veor	@b[5], @b[5], @b[0]
+
+	veor	@b[6], @b[6], @b[3]
+	veor	@b[3], @b[3], @b[7]
+	veor	@b[7], @b[7], @b[5]
+	veor	@b[3], @b[3], @b[4]
+	veor	@b[4], @b[4], @b[5]
+
+	veor	@b[2], @b[2], @b[7]
+	veor	@b[3], @b[3], @b[1]
+	veor	@b[1], @b[1], @b[5]
+___
+}
+
+sub OutBasisChange {
+# input in  lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb > [b6, b1, b2, b4, b7, b0, b3, b5] < msb
+my @b=@_[0..7];
+$code.=<<___;
+	veor	@b[0], @b[0], @b[6]
+	veor	@b[1], @b[1], @b[4]
+	veor	@b[4], @b[4], @b[6]
+	veor	@b[2], @b[2], @b[0]
+	veor	@b[6], @b[6], @b[1]
+
+	veor	@b[1], @b[1], @b[5]
+	veor	@b[5], @b[5], @b[3]
+	veor	@b[3], @b[3], @b[7]
+	veor	@b[7], @b[7], @b[5]
+	veor	@b[2], @b[2], @b[5]
+
+	veor	@b[4], @b[4], @b[7]
+___
+}
+
+sub InvSbox {
+# input in lsb 	> [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb	> [b0, b1, b6, b4, b2, b7, b3, b5] < msb
+my @b=@_[0..7];
+my @t=@_[8..11];
+my @s=@_[12..15];
+	&InvInBasisChange	(@b);
+	&Inv_GF256		(@b[5,1,2,6,3,7,0,4],@t,@s);
+	&InvOutBasisChange	(@b[3,7,0,4,5,1,2,6]);
+}
+
+sub InvInBasisChange {		# OutBasisChange in reverse (with twist)
+my @b=@_[5,1,2,6,3,7,0,4];
+$code.=<<___
+	 veor	@b[1], @b[1], @b[7]
+	veor	@b[4], @b[4], @b[7]
+
+	veor	@b[7], @b[7], @b[5]
+	 veor	@b[1], @b[1], @b[3]
+	veor	@b[2], @b[2], @b[5]
+	veor	@b[3], @b[3], @b[7]
+
+	veor	@b[6], @b[6], @b[1]
+	veor	@b[2], @b[2], @b[0]
+	 veor	@b[5], @b[5], @b[3]
+	veor	@b[4], @b[4], @b[6]
+	veor	@b[0], @b[0], @b[6]
+	veor	@b[1], @b[1], @b[4]
+___
+}
+
+sub InvOutBasisChange {		# InBasisChange in reverse
+my @b=@_[2,5,7,3,6,1,0,4];
+$code.=<<___;
+	veor	@b[1], @b[1], @b[5]
+	veor	@b[2], @b[2], @b[7]
+
+	veor	@b[3], @b[3], @b[1]
+	veor	@b[4], @b[4], @b[5]
+	veor	@b[7], @b[7], @b[5]
+	veor	@b[3], @b[3], @b[4]
+	 veor 	@b[5], @b[5], @b[0]
+	veor	@b[3], @b[3], @b[7]
+	 veor	@b[6], @b[6], @b[2]
+	 veor	@b[2], @b[2], @b[1]
+	veor	@b[6], @b[6], @b[3]
+
+	veor	@b[3], @b[3], @b[0]
+	veor	@b[5], @b[5], @b[6]
+___
+}
+
+sub Mul_GF4 {
+#;*************************************************************
+#;* Mul_GF4: Input x0-x1,y0-y1 Output x0-x1 Temp t0 (8) *
+#;*************************************************************
+my ($x0,$x1,$y0,$y1,$t0,$t1)=@_;
+$code.=<<___;
+	veor 	$t0, $y0, $y1
+	vand	$t0, $t0, $x0
+	veor	$x0, $x0, $x1
+	vand	$t1, $x1, $y0
+	vand	$x0, $x0, $y1
+	veor	$x1, $t1, $t0
+	veor	$x0, $x0, $t1
+___
+}
+
+sub Mul_GF4_N {				# not used, see next subroutine
+# multiply and scale by N
+my ($x0,$x1,$y0,$y1,$t0)=@_;
+$code.=<<___;
+	veor	$t0, $y0, $y1
+	vand	$t0, $t0, $x0
+	veor	$x0, $x0, $x1
+	vand	$x1, $x1, $y0
+	vand	$x0, $x0, $y1
+	veor	$x1, $x1, $x0
+	veor	$x0, $x0, $t0
+___
+}
+
+sub Mul_GF4_N_GF4 {
+# interleaved Mul_GF4_N and Mul_GF4
+my ($x0,$x1,$y0,$y1,$t0,
+    $x2,$x3,$y2,$y3,$t1)=@_;
+$code.=<<___;
+	veor	$t0, $y0, $y1
+	 veor 	$t1, $y2, $y3
+	vand	$t0, $t0, $x0
+	 vand	$t1, $t1, $x2
+	veor	$x0, $x0, $x1
+	 veor	$x2, $x2, $x3
+	vand	$x1, $x1, $y0
+	 vand	$x3, $x3, $y2
+	vand	$x0, $x0, $y1
+	 vand	$x2, $x2, $y3
+	veor	$x1, $x1, $x0
+	 veor	$x2, $x2, $x3
+	veor	$x0, $x0, $t0
+	 veor	$x3, $x3, $t1
+___
+}
+sub Mul_GF16_2 {
+my @x=@_[0..7];
+my @y=@_[8..11];
+my @t=@_[12..15];
+$code.=<<___;
+	veor	@t[0], @x[0], @x[2]
+	veor	@t[1], @x[1], @x[3]
+___
+	&Mul_GF4  	(@x[0], @x[1], @y[0], @y[1], @t[2..3]);
+$code.=<<___;
+	veor	@y[0], @y[0], @y[2]
+	veor	@y[1], @y[1], @y[3]
+___
+	Mul_GF4_N_GF4	(@t[0], @t[1], @y[0], @y[1], @t[3],
+			 @x[2], @x[3], @y[2], @y[3], @t[2]);
+$code.=<<___;
+	veor	@x[0], @x[0], @t[0]
+	veor	@x[2], @x[2], @t[0]
+	veor	@x[1], @x[1], @t[1]
+	veor	@x[3], @x[3], @t[1]
+
+	veor	@t[0], @x[4], @x[6]
+	veor	@t[1], @x[5], @x[7]
+___
+	&Mul_GF4_N_GF4	(@t[0], @t[1], @y[0], @y[1], @t[3],
+			 @x[6], @x[7], @y[2], @y[3], @t[2]);
+$code.=<<___;
+	veor	@y[0], @y[0], @y[2]
+	veor	@y[1], @y[1], @y[3]
+___
+	&Mul_GF4  	(@x[4], @x[5], @y[0], @y[1], @t[2..3]);
+$code.=<<___;
+	veor	@x[4], @x[4], @t[0]
+	veor	@x[6], @x[6], @t[0]
+	veor	@x[5], @x[5], @t[1]
+	veor	@x[7], @x[7], @t[1]
+___
+}
+sub Inv_GF256 {
+#;********************************************************************
+#;* Inv_GF256: Input x0-x7 Output x0-x7 Temp t0-t3,s0-s3 (144)       *
+#;********************************************************************
+my @x=@_[0..7];
+my @t=@_[8..11];
+my @s=@_[12..15];
+# direct optimizations from hardware
+$code.=<<___;
+	veor	@t[3], @x[4], @x[6]
+	veor	@t[2], @x[5], @x[7]
+	veor	@t[1], @x[1], @x[3]
+	veor	@s[1], @x[7], @x[6]
+	 vmov	@t[0], @t[2]
+	veor	@s[0], @x[0], @x[2]
+
+	vorr	@t[2], @t[2], @t[1]
+	veor	@s[3], @t[3], @t[0]
+	vand	@s[2], @t[3], @s[0]
+	vorr	@t[3], @t[3], @s[0]
+	veor	@s[0], @s[0], @t[1]
+	vand	@t[0], @t[0], @t[1]
+	veor	@t[1], @x[3], @x[2]
+	vand	@s[3], @s[3], @s[0]
+	vand	@s[1], @s[1], @t[1]
+	veor	@t[1], @x[4], @x[5]
+	veor	@s[0], @x[1], @x[0]
+	veor	@t[3], @t[3], @s[1]
+	veor	@t[2], @t[2], @s[1]
+	vand	@s[1], @t[1], @s[0]
+	vorr	@t[1], @t[1], @s[0]
+	veor	@t[3], @t[3], @s[3]
+	veor	@t[0], @t[0], @s[1]
+	veor	@t[2], @t[2], @s[2]
+	veor	@t[1], @t[1], @s[3]
+	veor	@t[0], @t[0], @s[2]
+	vand	@s[0], @x[7], @x[3]
+	veor	@t[1], @t[1], @s[2]
+	vand	@s[1], @x[6], @x[2]
+	vand	@s[2], @x[5], @x[1]
+	vorr	@s[3], @x[4], @x[0]
+	veor	@t[3], @t[3], @s[0]
+	veor	@t[1], @t[1], @s[2]
+	veor	@t[0], @t[0], @s[3]
+	veor	@t[2], @t[2], @s[1]
+
+	@ Inv_GF16 \t0, \t1, \t2, \t3, \s0, \s1, \s2, \s3
+
+	@ new smaller inversion
+
+	vand	@s[2], @t[3], @t[1]
+	vmov	@s[0], @t[0]
+
+	veor	@s[1], @t[2], @s[2]
+	veor	@s[3], @t[0], @s[2]
+	veor	@s[2], @t[0], @s[2]	@ @s[2]=@s[3]
+
+	vbsl	@s[1], @t[1], @t[0]
+	vbsl	@s[3], @t[3], @t[2]
+	veor	@t[3], @t[3], @t[2]
+
+	vbsl	@s[0], @s[1], @s[2]
+	vbsl	@t[0], @s[2], @s[1]
+
+	vand	@s[2], @s[0], @s[3]
+	veor	@t[1], @t[1], @t[0]
+
+	veor	@s[2], @s[2], @t[3]
+___
+# output in s3, s2, s1, t1
+
+# Mul_GF16_2 \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \t2, \t3, \t0, \t1, \s0, \s1, \s2, \s3
+
+# Mul_GF16_2 \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \s3, \s2, \s1, \t1, \s0, \t0, \t2, \t3
+	&Mul_GF16_2(@x,@s[3,2,1],@t[1],@s[0],@t[0,2,3]);
+
+### output msb > [x3,x2,x1,x0,x7,x6,x5,x4] < lsb
+}
+
+# AES linear components
+
+sub ShiftRows {
+my @x=@_[0..7];
+my @t=@_[8..11];
+my $mask=pop;
+$code.=<<___;
+	vldmia	$key!, {@t[0]-@t[3]}
+	veor	@t[0], @t[0], @x[0]
+	veor	@t[1], @t[1], @x[1]
+	vtbl.8	`&Dlo(@x[0])`, {@t[0]}, `&Dlo($mask)`
+	vtbl.8	`&Dhi(@x[0])`, {@t[0]}, `&Dhi($mask)`
+	vldmia	$key!, {@t[0]}
+	veor	@t[2], @t[2], @x[2]
+	vtbl.8	`&Dlo(@x[1])`, {@t[1]}, `&Dlo($mask)`
+	vtbl.8	`&Dhi(@x[1])`, {@t[1]}, `&Dhi($mask)`
+	vldmia	$key!, {@t[1]}
+	veor	@t[3], @t[3], @x[3]
+	vtbl.8	`&Dlo(@x[2])`, {@t[2]}, `&Dlo($mask)`
+	vtbl.8	`&Dhi(@x[2])`, {@t[2]}, `&Dhi($mask)`
+	vldmia	$key!, {@t[2]}
+	vtbl.8	`&Dlo(@x[3])`, {@t[3]}, `&Dlo($mask)`
+	vtbl.8	`&Dhi(@x[3])`, {@t[3]}, `&Dhi($mask)`
+	vldmia	$key!, {@t[3]}
+	veor	@t[0], @t[0], @x[4]
+	veor	@t[1], @t[1], @x[5]
+	vtbl.8	`&Dlo(@x[4])`, {@t[0]}, `&Dlo($mask)`
+	vtbl.8	`&Dhi(@x[4])`, {@t[0]}, `&Dhi($mask)`
+	veor	@t[2], @t[2], @x[6]
+	vtbl.8	`&Dlo(@x[5])`, {@t[1]}, `&Dlo($mask)`
+	vtbl.8	`&Dhi(@x[5])`, {@t[1]}, `&Dhi($mask)`
+	veor	@t[3], @t[3], @x[7]
+	vtbl.8	`&Dlo(@x[6])`, {@t[2]}, `&Dlo($mask)`
+	vtbl.8	`&Dhi(@x[6])`, {@t[2]}, `&Dhi($mask)`
+	vtbl.8	`&Dlo(@x[7])`, {@t[3]}, `&Dlo($mask)`
+	vtbl.8	`&Dhi(@x[7])`, {@t[3]}, `&Dhi($mask)`
+___
+}
+
+sub MixColumns {
+# modified to emit output in order suitable for feeding back to aesenc[last]
+my @x=@_[0..7];
+my @t=@_[8..15];
+my $inv=@_[16];	# optional
+$code.=<<___;
+	vext.8	@t[0], @x[0], @x[0], #12	@ x0 <<< 32
+	vext.8	@t[1], @x[1], @x[1], #12
+	 veor	@x[0], @x[0], @t[0]		@ x0 ^ (x0 <<< 32)
+	vext.8	@t[2], @x[2], @x[2], #12
+	 veor	@x[1], @x[1], @t[1]
+	vext.8	@t[3], @x[3], @x[3], #12
+	 veor	@x[2], @x[2], @t[2]
+	vext.8	@t[4], @x[4], @x[4], #12
+	 veor	@x[3], @x[3], @t[3]
+	vext.8	@t[5], @x[5], @x[5], #12
+	 veor	@x[4], @x[4], @t[4]
+	vext.8	@t[6], @x[6], @x[6], #12
+	 veor	@x[5], @x[5], @t[5]
+	vext.8	@t[7], @x[7], @x[7], #12
+	 veor	@x[6], @x[6], @t[6]
+
+	veor	@t[1], @t[1], @x[0]
+	 veor	@x[7], @x[7], @t[7]
+	 vext.8	@x[0], @x[0], @x[0], #8		@ (x0 ^ (x0 <<< 32)) <<< 64)
+	veor	@t[2], @t[2], @x[1]
+	veor	@t[0], @t[0], @x[7]
+	veor	@t[1], @t[1], @x[7]
+	 vext.8	@x[1], @x[1], @x[1], #8
+	veor	@t[5], @t[5], @x[4]
+	 veor	@x[0], @x[0], @t[0]
+	veor	@t[6], @t[6], @x[5]
+	 veor	@x[1], @x[1], @t[1]
+	 vext.8	@t[0], @x[4], @x[4], #8
+	veor	@t[4], @t[4], @x[3]
+	 vext.8	@t[1], @x[5], @x[5], #8
+	veor	@t[7], @t[7], @x[6]
+	 vext.8	@x[4], @x[3], @x[3], #8
+	veor	@t[3], @t[3], @x[2]
+	 vext.8	@x[5], @x[7], @x[7], #8
+	veor	@t[4], @t[4], @x[7]
+	 vext.8	@x[3], @x[6], @x[6], #8
+	veor	@t[3], @t[3], @x[7]
+	 vext.8	@x[6], @x[2], @x[2], #8
+	veor	@x[7], @t[1], @t[5]
+___
+$code.=<<___ if (!$inv);
+	veor	@x[2], @t[0], @t[4]
+	veor	@x[4], @x[4], @t[3]
+	veor	@x[5], @x[5], @t[7]
+	veor	@x[3], @x[3], @t[6]
+	 @ vmov	@x[2], @t[0]
+	veor	@x[6], @x[6], @t[2]
+	 @ vmov	@x[7], @t[1]
+___
+$code.=<<___ if ($inv);
+	veor	@t[3], @t[3], @x[4]
+	veor	@x[5], @x[5], @t[7]
+	veor	@x[2], @x[3], @t[6]
+	veor	@x[3], @t[0], @t[4]
+	veor	@x[4], @x[6], @t[2]
+	vmov	@x[6], @t[3]
+	 @ vmov	@x[7], @t[1]
+___
+}
+
+sub InvMixColumns_orig {
+my @x=@_[0..7];
+my @t=@_[8..15];
+
+$code.=<<___;
+	@ multiplication by 0x0e
+	vext.8	@t[7], @x[7], @x[7], #12
+	vmov	@t[2], @x[2]
+	veor	@x[2], @x[2], @x[5]		@ 2 5
+	veor	@x[7], @x[7], @x[5]		@ 7 5
+	vext.8	@t[0], @x[0], @x[0], #12
+	vmov	@t[5], @x[5]
+	veor	@x[5], @x[5], @x[0]		@ 5 0		[1]
+	veor	@x[0], @x[0], @x[1]		@ 0 1
+	vext.8	@t[1], @x[1], @x[1], #12
+	veor	@x[1], @x[1], @x[2]		@ 1 25
+	veor	@x[0], @x[0], @x[6]		@ 01 6		[2]
+	vext.8	@t[3], @x[3], @x[3], #12
+	veor	@x[1], @x[1], @x[3]		@ 125 3		[4]
+	veor	@x[2], @x[2], @x[0]		@ 25 016	[3]
+	veor	@x[3], @x[3], @x[7]		@ 3 75
+	veor	@x[7], @x[7], @x[6]		@ 75 6		[0]
+	vext.8	@t[6], @x[6], @x[6], #12
+	vmov	@t[4], @x[4]
+	veor	@x[6], @x[6], @x[4]		@ 6 4
+	veor	@x[4], @x[4], @x[3]		@ 4 375		[6]
+	veor	@x[3], @x[3], @x[7]		@ 375 756=36
+	veor	@x[6], @x[6], @t[5]		@ 64 5		[7]
+	veor	@x[3], @x[3], @t[2]		@ 36 2
+	vext.8	@t[5], @t[5], @t[5], #12
+	veor	@x[3], @x[3], @t[4]		@ 362 4		[5]
+___
+					my @y = @x[7,5,0,2,1,3,4,6];
+$code.=<<___;
+	@ multiplication by 0x0b
+	veor	@y[1], @y[1], @y[0]
+	veor	@y[0], @y[0], @t[0]
+	vext.8	@t[2], @t[2], @t[2], #12
+	veor	@y[1], @y[1], @t[1]
+	veor	@y[0], @y[0], @t[5]
+	vext.8	@t[4], @t[4], @t[4], #12
+	veor	@y[1], @y[1], @t[6]
+	veor	@y[0], @y[0], @t[7]
+	veor	@t[7], @t[7], @t[6]		@ clobber t[7]
+
+	veor	@y[3], @y[3], @t[0]
+	 veor	@y[1], @y[1], @y[0]
+	vext.8	@t[0], @t[0], @t[0], #12
+	veor	@y[2], @y[2], @t[1]
+	veor	@y[4], @y[4], @t[1]
+	vext.8	@t[1], @t[1], @t[1], #12
+	veor	@y[2], @y[2], @t[2]
+	veor	@y[3], @y[3], @t[2]
+	veor	@y[5], @y[5], @t[2]
+	veor	@y[2], @y[2], @t[7]
+	vext.8	@t[2], @t[2], @t[2], #12
+	veor	@y[3], @y[3], @t[3]
+	veor	@y[6], @y[6], @t[3]
+	veor	@y[4], @y[4], @t[3]
+	veor	@y[7], @y[7], @t[4]
+	vext.8	@t[3], @t[3], @t[3], #12
+	veor	@y[5], @y[5], @t[4]
+	veor	@y[7], @y[7], @t[7]
+	veor	@t[7], @t[7], @t[5]		@ clobber t[7] even more
+	veor	@y[3], @y[3], @t[5]
+	veor	@y[4], @y[4], @t[4]
+
+	veor	@y[5], @y[5], @t[7]
+	vext.8	@t[4], @t[4], @t[4], #12
+	veor	@y[6], @y[6], @t[7]
+	veor	@y[4], @y[4], @t[7]
+
+	veor	@t[7], @t[7], @t[5]
+	vext.8	@t[5], @t[5], @t[5], #12
+
+	@ multiplication by 0x0d
+	veor	@y[4], @y[4], @y[7]
+	 veor	@t[7], @t[7], @t[6]		@ restore t[7]
+	veor	@y[7], @y[7], @t[4]
+	vext.8	@t[6], @t[6], @t[6], #12
+	veor	@y[2], @y[2], @t[0]
+	veor	@y[7], @y[7], @t[5]
+	vext.8	@t[7], @t[7], @t[7], #12
+	veor	@y[2], @y[2], @t[2]
+
+	veor	@y[3], @y[3], @y[1]
+	veor	@y[1], @y[1], @t[1]
+	veor	@y[0], @y[0], @t[0]
+	veor	@y[3], @y[3], @t[0]
+	veor	@y[1], @y[1], @t[5]
+	veor	@y[0], @y[0], @t[5]
+	vext.8	@t[0], @t[0], @t[0], #12
+	veor	@y[1], @y[1], @t[7]
+	veor	@y[0], @y[0], @t[6]
+	veor	@y[3], @y[3], @y[1]
+	veor	@y[4], @y[4], @t[1]
+	vext.8	@t[1], @t[1], @t[1], #12
+
+	veor	@y[7], @y[7], @t[7]
+	veor	@y[4], @y[4], @t[2]
+	veor	@y[5], @y[5], @t[2]
+	veor	@y[2], @y[2], @t[6]
+	veor	@t[6], @t[6], @t[3]		@ clobber t[6]
+	vext.8	@t[2], @t[2], @t[2], #12
+	veor	@y[4], @y[4], @y[7]
+	veor	@y[3], @y[3], @t[6]
+
+	veor	@y[6], @y[6], @t[6]
+	veor	@y[5], @y[5], @t[5]
+	vext.8	@t[5], @t[5], @t[5], #12
+	veor	@y[6], @y[6], @t[4]
+	vext.8	@t[4], @t[4], @t[4], #12
+	veor	@y[5], @y[5], @t[6]
+	veor	@y[6], @y[6], @t[7]
+	vext.8	@t[7], @t[7], @t[7], #12
+	veor	@t[6], @t[6], @t[3]		@ restore t[6]
+	vext.8	@t[3], @t[3], @t[3], #12
+
+	@ multiplication by 0x09
+	veor	@y[4], @y[4], @y[1]
+	veor	@t[1], @t[1], @y[1]		@ t[1]=y[1]
+	veor	@t[0], @t[0], @t[5]		@ clobber t[0]
+	vext.8	@t[6], @t[6], @t[6], #12
+	veor	@t[1], @t[1], @t[5]
+	veor	@y[3], @y[3], @t[0]
+	veor	@t[0], @t[0], @y[0]		@ t[0]=y[0]
+	veor	@t[1], @t[1], @t[6]
+	veor	@t[6], @t[6], @t[7]		@ clobber t[6]
+	veor	@y[4], @y[4], @t[1]
+	veor	@y[7], @y[7], @t[4]
+	veor	@y[6], @y[6], @t[3]
+	veor	@y[5], @y[5], @t[2]
+	veor	@t[4], @t[4], @y[4]		@ t[4]=y[4]
+	veor	@t[3], @t[3], @y[3]		@ t[3]=y[3]
+	veor	@t[5], @t[5], @y[5]		@ t[5]=y[5]
+	veor	@t[2], @t[2], @y[2]		@ t[2]=y[2]
+	veor	@t[3], @t[3], @t[7]
+	veor	@XMM[5], @t[5], @t[6]
+	veor	@XMM[6], @t[6], @y[6]		@ t[6]=y[6]
+	veor	@XMM[2], @t[2], @t[6]
+	veor	@XMM[7], @t[7], @y[7]		@ t[7]=y[7]
+
+	vmov	@XMM[0], @t[0]
+	vmov	@XMM[1], @t[1]
+	@ vmov	@XMM[2], @t[2]
+	vmov	@XMM[3], @t[3]
+	vmov	@XMM[4], @t[4]
+	@ vmov	@XMM[5], @t[5]
+	@ vmov	@XMM[6], @t[6]
+	@ vmov	@XMM[7], @t[7]
+___
+}
+
+sub InvMixColumns {
+my @x=@_[0..7];
+my @t=@_[8..15];
+
+# Thanks to Jussi Kivilinna for providing pointer to
+#
+# | 0e 0b 0d 09 |   | 02 03 01 01 |   | 05 00 04 00 |
+# | 09 0e 0b 0d | = | 01 02 03 01 | x | 00 05 00 04 |
+# | 0d 09 0e 0b |   | 01 01 02 03 |   | 04 00 05 00 |
+# | 0b 0d 09 0e |   | 03 01 01 02 |   | 00 04 00 05 |
+
+$code.=<<___;
+	@ multiplication by 0x05-0x00-0x04-0x00
+	vext.8	@t[0], @x[0], @x[0], #8
+	vext.8	@t[6], @x[6], @x[6], #8
+	vext.8	@t[7], @x[7], @x[7], #8
+	veor	@t[0], @t[0], @x[0]
+	vext.8	@t[1], @x[1], @x[1], #8
+	veor	@t[6], @t[6], @x[6]
+	vext.8	@t[2], @x[2], @x[2], #8
+	veor	@t[7], @t[7], @x[7]
+	vext.8	@t[3], @x[3], @x[3], #8
+	veor	@t[1], @t[1], @x[1]
+	vext.8	@t[4], @x[4], @x[4], #8
+	veor	@t[2], @t[2], @x[2]
+	vext.8	@t[5], @x[5], @x[5], #8
+	veor	@t[3], @t[3], @x[3]
+	veor	@t[4], @t[4], @x[4]
+	veor	@t[5], @t[5], @x[5]
+
+	 veor	@x[0], @x[0], @t[6]
+	 veor	@x[1], @x[1], @t[6]
+	 veor	@x[2], @x[2], @t[0]
+	 veor	@x[4], @x[4], @t[2]
+	 veor	@x[3], @x[3], @t[1]
+	 veor	@x[1], @x[1], @t[7]
+	 veor	@x[2], @x[2], @t[7]
+	 veor	@x[4], @x[4], @t[6]
+	 veor	@x[5], @x[5], @t[3]
+	 veor	@x[3], @x[3], @t[6]
+	 veor	@x[6], @x[6], @t[4]
+	 veor	@x[4], @x[4], @t[7]
+	 veor	@x[5], @x[5], @t[7]
+	 veor	@x[7], @x[7], @t[5]
+___
+	&MixColumns	(@x,@t,1);	# flipped 2<->3 and 4<->6
+}
+
+sub swapmove {
+my ($a,$b,$n,$mask,$t)=@_;
+$code.=<<___;
+	vshr.u64	$t, $b, #$n
+	veor		$t, $t, $a
+	vand		$t, $t, $mask
+	veor		$a, $a, $t
+	vshl.u64	$t, $t, #$n
+	veor		$b, $b, $t
+___
+}
+sub swapmove2x {
+my ($a0,$b0,$a1,$b1,$n,$mask,$t0,$t1)=@_;
+$code.=<<___;
+	vshr.u64	$t0, $b0, #$n
+	 vshr.u64	$t1, $b1, #$n
+	veor		$t0, $t0, $a0
+	 veor		$t1, $t1, $a1
+	vand		$t0, $t0, $mask
+	 vand		$t1, $t1, $mask
+	veor		$a0, $a0, $t0
+	vshl.u64	$t0, $t0, #$n
+	 veor		$a1, $a1, $t1
+	 vshl.u64	$t1, $t1, #$n
+	veor		$b0, $b0, $t0
+	 veor		$b1, $b1, $t1
+___
+}
+
+sub bitslice {
+my @x=reverse(@_[0..7]);
+my ($t0,$t1,$t2,$t3)=@_[8..11];
+$code.=<<___;
+	vmov.i8	$t0,#0x55			@ compose .LBS0
+	vmov.i8	$t1,#0x33			@ compose .LBS1
+___
+	&swapmove2x(@x[0,1,2,3],1,$t0,$t2,$t3);
+	&swapmove2x(@x[4,5,6,7],1,$t0,$t2,$t3);
+$code.=<<___;
+	vmov.i8	$t0,#0x0f			@ compose .LBS2
+___
+	&swapmove2x(@x[0,2,1,3],2,$t1,$t2,$t3);
+	&swapmove2x(@x[4,6,5,7],2,$t1,$t2,$t3);
+
+	&swapmove2x(@x[0,4,1,5],4,$t0,$t2,$t3);
+	&swapmove2x(@x[2,6,3,7],4,$t0,$t2,$t3);
+}
+
+$code.=<<___;
+#ifndef __KERNEL__
+# include "arm_arch.h"
+
+# define VFP_ABI_PUSH	vstmdb	sp!,{d8-d15}
+# define VFP_ABI_POP	vldmia	sp!,{d8-d15}
+# define VFP_ABI_FRAME	0x40
+#else
+# define VFP_ABI_PUSH
+# define VFP_ABI_POP
+# define VFP_ABI_FRAME	0
+# define BSAES_ASM_EXTENDED_KEY
+# define XTS_CHAIN_TWEAK
+# define __ARM_ARCH__ __LINUX_ARM_ARCH__
+# define __ARM_MAX_ARCH__ 7
+#endif
+
+#ifdef __thumb__
+# define adrl adr
+#endif
+
+#if __ARM_MAX_ARCH__>=7
+.arch	armv7-a
+.fpu	neon
+
+.text
+.syntax	unified 	@ ARMv7-capable assembler is expected to handle this
+#if defined(__thumb2__) && !defined(__APPLE__)
+.thumb
+#else
+.code   32
+# undef __thumb2__
+#endif
+
+.type	_bsaes_decrypt8,%function
+.align	4
+_bsaes_decrypt8:
+	adr	$const,.
+	vldmia	$key!, {@XMM[9]}		@ round 0 key
+#if defined(__thumb2__) || defined(__APPLE__)
+	adr	$const,.LM0ISR
+#else
+	add	$const,$const,#.LM0ISR-_bsaes_decrypt8
+#endif
+
+	vldmia	$const!, {@XMM[8]}		@ .LM0ISR
+	veor	@XMM[10], @XMM[0], @XMM[9]	@ xor with round0 key
+	veor	@XMM[11], @XMM[1], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[0])`, {@XMM[10]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[0])`, {@XMM[10]}, `&Dhi(@XMM[8])`
+	veor	@XMM[12], @XMM[2], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[1])`, {@XMM[11]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[1])`, {@XMM[11]}, `&Dhi(@XMM[8])`
+	veor	@XMM[13], @XMM[3], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[2])`, {@XMM[12]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[2])`, {@XMM[12]}, `&Dhi(@XMM[8])`
+	veor	@XMM[14], @XMM[4], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[3])`, {@XMM[13]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[3])`, {@XMM[13]}, `&Dhi(@XMM[8])`
+	veor	@XMM[15], @XMM[5], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[4])`, {@XMM[14]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[4])`, {@XMM[14]}, `&Dhi(@XMM[8])`
+	veor	@XMM[10], @XMM[6], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[5])`, {@XMM[15]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[5])`, {@XMM[15]}, `&Dhi(@XMM[8])`
+	veor	@XMM[11], @XMM[7], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[6])`, {@XMM[10]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[6])`, {@XMM[10]}, `&Dhi(@XMM[8])`
+	 vtbl.8	`&Dlo(@XMM[7])`, {@XMM[11]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[7])`, {@XMM[11]}, `&Dhi(@XMM[8])`
+___
+	&bitslice	(@XMM[0..7, 8..11]);
+$code.=<<___;
+	sub	$rounds,$rounds,#1
+	b	.Ldec_sbox
+.align	4
+.Ldec_loop:
+___
+	&ShiftRows	(@XMM[0..7, 8..12]);
+$code.=".Ldec_sbox:\n";
+	&InvSbox	(@XMM[0..7, 8..15]);
+$code.=<<___;
+	subs	$rounds,$rounds,#1
+	bcc	.Ldec_done
+___
+	&InvMixColumns	(@XMM[0,1,6,4,2,7,3,5, 8..15]);
+$code.=<<___;
+	vldmia	$const, {@XMM[12]}		@ .LISR
+	ite	eq				@ Thumb2 thing, sanity check in ARM
+	addeq	$const,$const,#0x10
+	bne	.Ldec_loop
+	vldmia	$const, {@XMM[12]}		@ .LISRM0
+	b	.Ldec_loop
+.align	4
+.Ldec_done:
+___
+	&bitslice	(@XMM[0,1,6,4,2,7,3,5, 8..11]);
+$code.=<<___;
+	vldmia	$key, {@XMM[8]}			@ last round key
+	veor	@XMM[6], @XMM[6], @XMM[8]
+	veor	@XMM[4], @XMM[4], @XMM[8]
+	veor	@XMM[2], @XMM[2], @XMM[8]
+	veor	@XMM[7], @XMM[7], @XMM[8]
+	veor	@XMM[3], @XMM[3], @XMM[8]
+	veor	@XMM[5], @XMM[5], @XMM[8]
+	veor	@XMM[0], @XMM[0], @XMM[8]
+	veor	@XMM[1], @XMM[1], @XMM[8]
+	bx	lr
+.size	_bsaes_decrypt8,.-_bsaes_decrypt8
+
+.type	_bsaes_const,%object
+.align	6
+_bsaes_const:
+.LM0ISR:	@ InvShiftRows constants
+	.quad	0x0a0e0206070b0f03, 0x0004080c0d010509
+.LISR:
+	.quad	0x0504070602010003, 0x0f0e0d0c080b0a09
+.LISRM0:
+	.quad	0x01040b0e0205080f, 0x0306090c00070a0d
+.LM0SR:		@ ShiftRows constants
+	.quad	0x0a0e02060f03070b, 0x0004080c05090d01
+.LSR:
+	.quad	0x0504070600030201, 0x0f0e0d0c0a09080b
+.LSRM0:
+	.quad	0x0304090e00050a0f, 0x01060b0c0207080d
+.LM0:
+	.quad	0x02060a0e03070b0f, 0x0004080c0105090d
+.LREVM0SR:
+	.quad	0x090d01050c000408, 0x03070b0f060a0e02
+.asciz	"Bit-sliced AES for NEON, CRYPTOGAMS by <appro\@openssl.org>"
+.align	6
+.size	_bsaes_const,.-_bsaes_const
+
+.type	_bsaes_encrypt8,%function
+.align	4
+_bsaes_encrypt8:
+	adr	$const,.
+	vldmia	$key!, {@XMM[9]}		@ round 0 key
+#if defined(__thumb2__) || defined(__APPLE__)
+	adr	$const,.LM0SR
+#else
+	sub	$const,$const,#_bsaes_encrypt8-.LM0SR
+#endif
+
+	vldmia	$const!, {@XMM[8]}		@ .LM0SR
+_bsaes_encrypt8_alt:
+	veor	@XMM[10], @XMM[0], @XMM[9]	@ xor with round0 key
+	veor	@XMM[11], @XMM[1], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[0])`, {@XMM[10]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[0])`, {@XMM[10]}, `&Dhi(@XMM[8])`
+	veor	@XMM[12], @XMM[2], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[1])`, {@XMM[11]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[1])`, {@XMM[11]}, `&Dhi(@XMM[8])`
+	veor	@XMM[13], @XMM[3], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[2])`, {@XMM[12]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[2])`, {@XMM[12]}, `&Dhi(@XMM[8])`
+	veor	@XMM[14], @XMM[4], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[3])`, {@XMM[13]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[3])`, {@XMM[13]}, `&Dhi(@XMM[8])`
+	veor	@XMM[15], @XMM[5], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[4])`, {@XMM[14]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[4])`, {@XMM[14]}, `&Dhi(@XMM[8])`
+	veor	@XMM[10], @XMM[6], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[5])`, {@XMM[15]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[5])`, {@XMM[15]}, `&Dhi(@XMM[8])`
+	veor	@XMM[11], @XMM[7], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[6])`, {@XMM[10]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[6])`, {@XMM[10]}, `&Dhi(@XMM[8])`
+	 vtbl.8	`&Dlo(@XMM[7])`, {@XMM[11]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[7])`, {@XMM[11]}, `&Dhi(@XMM[8])`
+_bsaes_encrypt8_bitslice:
+___
+	&bitslice	(@XMM[0..7, 8..11]);
+$code.=<<___;
+	sub	$rounds,$rounds,#1
+	b	.Lenc_sbox
+.align	4
+.Lenc_loop:
+___
+	&ShiftRows	(@XMM[0..7, 8..12]);
+$code.=".Lenc_sbox:\n";
+	&Sbox		(@XMM[0..7, 8..15]);
+$code.=<<___;
+	subs	$rounds,$rounds,#1
+	bcc	.Lenc_done
+___
+	&MixColumns	(@XMM[0,1,4,6,3,7,2,5, 8..15]);
+$code.=<<___;
+	vldmia	$const, {@XMM[12]}		@ .LSR
+	ite	eq				@ Thumb2 thing, samity check in ARM
+	addeq	$const,$const,#0x10
+	bne	.Lenc_loop
+	vldmia	$const, {@XMM[12]}		@ .LSRM0
+	b	.Lenc_loop
+.align	4
+.Lenc_done:
+___
+	# output in lsb > [t0, t1, t4, t6, t3, t7, t2, t5] < msb
+	&bitslice	(@XMM[0,1,4,6,3,7,2,5, 8..11]);
+$code.=<<___;
+	vldmia	$key, {@XMM[8]}			@ last round key
+	veor	@XMM[4], @XMM[4], @XMM[8]
+	veor	@XMM[6], @XMM[6], @XMM[8]
+	veor	@XMM[3], @XMM[3], @XMM[8]
+	veor	@XMM[7], @XMM[7], @XMM[8]
+	veor	@XMM[2], @XMM[2], @XMM[8]
+	veor	@XMM[5], @XMM[5], @XMM[8]
+	veor	@XMM[0], @XMM[0], @XMM[8]
+	veor	@XMM[1], @XMM[1], @XMM[8]
+	bx	lr
+.size	_bsaes_encrypt8,.-_bsaes_encrypt8
+___
+}
+{
+my ($out,$inp,$rounds,$const)=("r12","r4","r5","r6");
+
+sub bitslice_key {
+my @x=reverse(@_[0..7]);
+my ($bs0,$bs1,$bs2,$t2,$t3)=@_[8..12];
+
+	&swapmove	(@x[0,1],1,$bs0,$t2,$t3);
+$code.=<<___;
+	@ &swapmove(@x[2,3],1,$t0,$t2,$t3);
+	vmov	@x[2], @x[0]
+	vmov	@x[3], @x[1]
+___
+	#&swapmove2x(@x[4,5,6,7],1,$t0,$t2,$t3);
+
+	&swapmove2x	(@x[0,2,1,3],2,$bs1,$t2,$t3);
+$code.=<<___;
+	@ &swapmove2x(@x[4,6,5,7],2,$t1,$t2,$t3);
+	vmov	@x[4], @x[0]
+	vmov	@x[6], @x[2]
+	vmov	@x[5], @x[1]
+	vmov	@x[7], @x[3]
+___
+	&swapmove2x	(@x[0,4,1,5],4,$bs2,$t2,$t3);
+	&swapmove2x	(@x[2,6,3,7],4,$bs2,$t2,$t3);
+}
+
+$code.=<<___;
+.type	_bsaes_key_convert,%function
+.align	4
+_bsaes_key_convert:
+	adr	$const,.
+	vld1.8	{@XMM[7]},  [$inp]!		@ load round 0 key
+#if defined(__thumb2__) || defined(__APPLE__)
+	adr	$const,.LM0
+#else
+	sub	$const,$const,#_bsaes_key_convert-.LM0
+#endif
+	vld1.8	{@XMM[15]}, [$inp]!		@ load round 1 key
+
+	vmov.i8	@XMM[8],  #0x01			@ bit masks
+	vmov.i8	@XMM[9],  #0x02
+	vmov.i8	@XMM[10], #0x04
+	vmov.i8	@XMM[11], #0x08
+	vmov.i8	@XMM[12], #0x10
+	vmov.i8	@XMM[13], #0x20
+	vldmia	$const, {@XMM[14]}		@ .LM0
+
+#ifdef __ARMEL__
+	vrev32.8	@XMM[7],  @XMM[7]
+	vrev32.8	@XMM[15], @XMM[15]
+#endif
+	sub	$rounds,$rounds,#1
+	vstmia	$out!, {@XMM[7]}		@ save round 0 key
+	b	.Lkey_loop
+
+.align	4
+.Lkey_loop:
+	vtbl.8	`&Dlo(@XMM[7])`,{@XMM[15]},`&Dlo(@XMM[14])`
+	vtbl.8	`&Dhi(@XMM[7])`,{@XMM[15]},`&Dhi(@XMM[14])`
+	vmov.i8	@XMM[6],  #0x40
+	vmov.i8	@XMM[15], #0x80
+
+	vtst.8	@XMM[0], @XMM[7], @XMM[8]
+	vtst.8	@XMM[1], @XMM[7], @XMM[9]
+	vtst.8	@XMM[2], @XMM[7], @XMM[10]
+	vtst.8	@XMM[3], @XMM[7], @XMM[11]
+	vtst.8	@XMM[4], @XMM[7], @XMM[12]
+	vtst.8	@XMM[5], @XMM[7], @XMM[13]
+	vtst.8	@XMM[6], @XMM[7], @XMM[6]
+	vtst.8	@XMM[7], @XMM[7], @XMM[15]
+	vld1.8	{@XMM[15]}, [$inp]!		@ load next round key
+	vmvn	@XMM[0], @XMM[0]		@ "pnot"
+	vmvn	@XMM[1], @XMM[1]
+	vmvn	@XMM[5], @XMM[5]
+	vmvn	@XMM[6], @XMM[6]
+#ifdef __ARMEL__
+	vrev32.8	@XMM[15], @XMM[15]
+#endif
+	subs	$rounds,$rounds,#1
+	vstmia	$out!,{@XMM[0]-@XMM[7]}		@ write bit-sliced round key
+	bne	.Lkey_loop
+
+	vmov.i8	@XMM[7],#0x63			@ compose .L63
+	@ don't save last round key
+	bx	lr
+.size	_bsaes_key_convert,.-_bsaes_key_convert
+___
+}
+
+if (0) {		# following four functions are unsupported interface
+			# used for benchmarking...
+$code.=<<___;
+.globl	bsaes_enc_key_convert
+.type	bsaes_enc_key_convert,%function
+.align	4
+bsaes_enc_key_convert:
+	stmdb	sp!,{r4-r6,lr}
+	vstmdb	sp!,{d8-d15}		@ ABI specification says so
+
+	ldr	r5,[$inp,#240]			@ pass rounds
+	mov	r4,$inp				@ pass key
+	mov	r12,$out			@ pass key schedule
+	bl	_bsaes_key_convert
+	veor	@XMM[7],@XMM[7],@XMM[15]	@ fix up last round key
+	vstmia	r12, {@XMM[7]}			@ save last round key
+
+	vldmia	sp!,{d8-d15}
+	ldmia	sp!,{r4-r6,pc}
+.size	bsaes_enc_key_convert,.-bsaes_enc_key_convert
+
+.globl	bsaes_encrypt_128
+.type	bsaes_encrypt_128,%function
+.align	4
+bsaes_encrypt_128:
+	stmdb	sp!,{r4-r6,lr}
+	vstmdb	sp!,{d8-d15}		@ ABI specification says so
+.Lenc128_loop:
+	vld1.8	{@XMM[0]-@XMM[1]}, [$inp]!	@ load input
+	vld1.8	{@XMM[2]-@XMM[3]}, [$inp]!
+	mov	r4,$key				@ pass the key
+	vld1.8	{@XMM[4]-@XMM[5]}, [$inp]!
+	mov	r5,#10				@ pass rounds
+	vld1.8	{@XMM[6]-@XMM[7]}, [$inp]!
+
+	bl	_bsaes_encrypt8
+
+	vst1.8	{@XMM[0]-@XMM[1]}, [$out]!	@ write output
+	vst1.8	{@XMM[4]}, [$out]!
+	vst1.8	{@XMM[6]}, [$out]!
+	vst1.8	{@XMM[3]}, [$out]!
+	vst1.8	{@XMM[7]}, [$out]!
+	vst1.8	{@XMM[2]}, [$out]!
+	subs	$len,$len,#0x80
+	vst1.8	{@XMM[5]}, [$out]!
+	bhi	.Lenc128_loop
+
+	vldmia	sp!,{d8-d15}
+	ldmia	sp!,{r4-r6,pc}
+.size	bsaes_encrypt_128,.-bsaes_encrypt_128
+
+.globl	bsaes_dec_key_convert
+.type	bsaes_dec_key_convert,%function
+.align	4
+bsaes_dec_key_convert:
+	stmdb	sp!,{r4-r6,lr}
+	vstmdb	sp!,{d8-d15}		@ ABI specification says so
+
+	ldr	r5,[$inp,#240]			@ pass rounds
+	mov	r4,$inp				@ pass key
+	mov	r12,$out			@ pass key schedule
+	bl	_bsaes_key_convert
+	vldmia	$out, {@XMM[6]}
+	vstmia	r12,  {@XMM[15]}		@ save last round key
+	veor	@XMM[7], @XMM[7], @XMM[6]	@ fix up round 0 key
+	vstmia	$out, {@XMM[7]}
+
+	vldmia	sp!,{d8-d15}
+	ldmia	sp!,{r4-r6,pc}
+.size	bsaes_dec_key_convert,.-bsaes_dec_key_convert
+
+.globl	bsaes_decrypt_128
+.type	bsaes_decrypt_128,%function
+.align	4
+bsaes_decrypt_128:
+	stmdb	sp!,{r4-r6,lr}
+	vstmdb	sp!,{d8-d15}		@ ABI specification says so
+.Ldec128_loop:
+	vld1.8	{@XMM[0]-@XMM[1]}, [$inp]!	@ load input
+	vld1.8	{@XMM[2]-@XMM[3]}, [$inp]!
+	mov	r4,$key				@ pass the key
+	vld1.8	{@XMM[4]-@XMM[5]}, [$inp]!
+	mov	r5,#10				@ pass rounds
+	vld1.8	{@XMM[6]-@XMM[7]}, [$inp]!
+
+	bl	_bsaes_decrypt8
+
+	vst1.8	{@XMM[0]-@XMM[1]}, [$out]!	@ write output
+	vst1.8	{@XMM[6]}, [$out]!
+	vst1.8	{@XMM[4]}, [$out]!
+	vst1.8	{@XMM[2]}, [$out]!
+	vst1.8	{@XMM[7]}, [$out]!
+	vst1.8	{@XMM[3]}, [$out]!
+	subs	$len,$len,#0x80
+	vst1.8	{@XMM[5]}, [$out]!
+	bhi	.Ldec128_loop
+
+	vldmia	sp!,{d8-d15}
+	ldmia	sp!,{r4-r6,pc}
+.size	bsaes_decrypt_128,.-bsaes_decrypt_128
+___
+}
+{
+my ($inp,$out,$len,$key, $ivp,$fp,$rounds)=map("r$_",(0..3,8..10));
+my ($keysched)=("sp");
+
+$code.=<<___;
+.extern AES_cbc_encrypt
+.extern AES_decrypt
+
+.global	bsaes_cbc_encrypt
+.type	bsaes_cbc_encrypt,%function
+.align	5
+bsaes_cbc_encrypt:
+#ifndef	__KERNEL__
+	cmp	$len, #128
+#ifndef	__thumb__
+	blo	AES_cbc_encrypt
+#else
+	bhs	1f
+	b	AES_cbc_encrypt
+1:
+#endif
+#endif
+
+	@ it is up to the caller to make sure we are called with enc == 0
+
+	mov	ip, sp
+	stmdb	sp!, {r4-r10, lr}
+	VFP_ABI_PUSH
+	ldr	$ivp, [ip]			@ IV is 1st arg on the stack
+	mov	$len, $len, lsr#4		@ len in 16 byte blocks
+	sub	sp, #0x10			@ scratch space to carry over the IV
+	mov	$fp, sp				@ save sp
+
+	ldr	$rounds, [$key, #240]		@ get # of rounds
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	@ allocate the key schedule on the stack
+	sub	r12, sp, $rounds, lsl#7		@ 128 bytes per inner round key
+	add	r12, #`128-32`			@ sifze of bit-slices key schedule
+
+	@ populate the key schedule
+	mov	r4, $key			@ pass key
+	mov	r5, $rounds			@ pass # of rounds
+	mov	sp, r12				@ sp is $keysched
+	bl	_bsaes_key_convert
+	vldmia	$keysched, {@XMM[6]}
+	vstmia	r12,  {@XMM[15]}		@ save last round key
+	veor	@XMM[7], @XMM[7], @XMM[6]	@ fix up round 0 key
+	vstmia	$keysched, {@XMM[7]}
+#else
+	ldr	r12, [$key, #244]
+	eors	r12, #1
+	beq	0f
+
+	@ populate the key schedule
+	str	r12, [$key, #244]
+	mov	r4, $key			@ pass key
+	mov	r5, $rounds			@ pass # of rounds
+	add	r12, $key, #248			@ pass key schedule
+	bl	_bsaes_key_convert
+	add	r4, $key, #248
+	vldmia	r4, {@XMM[6]}
+	vstmia	r12, {@XMM[15]}			@ save last round key
+	veor	@XMM[7], @XMM[7], @XMM[6]	@ fix up round 0 key
+	vstmia	r4, {@XMM[7]}
+
+.align	2
+0:
+#endif
+
+	vld1.8	{@XMM[15]}, [$ivp]		@ load IV
+	b	.Lcbc_dec_loop
+
+.align	4
+.Lcbc_dec_loop:
+	subs	$len, $len, #0x8
+	bmi	.Lcbc_dec_loop_finish
+
+	vld1.8	{@XMM[0]-@XMM[1]}, [$inp]!	@ load input
+	vld1.8	{@XMM[2]-@XMM[3]}, [$inp]!
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	mov	r4, $keysched			@ pass the key
+#else
+	add	r4, $key, #248
+#endif
+	vld1.8	{@XMM[4]-@XMM[5]}, [$inp]!
+	mov	r5, $rounds
+	vld1.8	{@XMM[6]-@XMM[7]}, [$inp]
+	sub	$inp, $inp, #0x60
+	vstmia	$fp, {@XMM[15]}			@ put aside IV
+
+	bl	_bsaes_decrypt8
+
+	vldmia	$fp, {@XMM[14]}			@ reload IV
+	vld1.8	{@XMM[8]-@XMM[9]}, [$inp]!	@ reload input
+	veor	@XMM[0], @XMM[0], @XMM[14]	@ ^= IV
+	vld1.8	{@XMM[10]-@XMM[11]}, [$inp]!
+	veor	@XMM[1], @XMM[1], @XMM[8]
+	veor	@XMM[6], @XMM[6], @XMM[9]
+	vld1.8	{@XMM[12]-@XMM[13]}, [$inp]!
+	veor	@XMM[4], @XMM[4], @XMM[10]
+	veor	@XMM[2], @XMM[2], @XMM[11]
+	vld1.8	{@XMM[14]-@XMM[15]}, [$inp]!
+	veor	@XMM[7], @XMM[7], @XMM[12]
+	vst1.8	{@XMM[0]-@XMM[1]}, [$out]!	@ write output
+	veor	@XMM[3], @XMM[3], @XMM[13]
+	vst1.8	{@XMM[6]}, [$out]!
+	veor	@XMM[5], @XMM[5], @XMM[14]
+	vst1.8	{@XMM[4]}, [$out]!
+	vst1.8	{@XMM[2]}, [$out]!
+	vst1.8	{@XMM[7]}, [$out]!
+	vst1.8	{@XMM[3]}, [$out]!
+	vst1.8	{@XMM[5]}, [$out]!
+
+	b	.Lcbc_dec_loop
+
+.Lcbc_dec_loop_finish:
+	adds	$len, $len, #8
+	beq	.Lcbc_dec_done
+
+	vld1.8	{@XMM[0]}, [$inp]!		@ load input
+	cmp	$len, #2
+	blo	.Lcbc_dec_one
+	vld1.8	{@XMM[1]}, [$inp]!
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	mov	r4, $keysched			@ pass the key
+#else
+	add	r4, $key, #248
+#endif
+	mov	r5, $rounds
+	vstmia	$fp, {@XMM[15]}			@ put aside IV
+	beq	.Lcbc_dec_two
+	vld1.8	{@XMM[2]}, [$inp]!
+	cmp	$len, #4
+	blo	.Lcbc_dec_three
+	vld1.8	{@XMM[3]}, [$inp]!
+	beq	.Lcbc_dec_four
+	vld1.8	{@XMM[4]}, [$inp]!
+	cmp	$len, #6
+	blo	.Lcbc_dec_five
+	vld1.8	{@XMM[5]}, [$inp]!
+	beq	.Lcbc_dec_six
+	vld1.8	{@XMM[6]}, [$inp]!
+	sub	$inp, $inp, #0x70
+
+	bl	_bsaes_decrypt8
+
+	vldmia	$fp, {@XMM[14]}			@ reload IV
+	vld1.8	{@XMM[8]-@XMM[9]}, [$inp]!	@ reload input
+	veor	@XMM[0], @XMM[0], @XMM[14]	@ ^= IV
+	vld1.8	{@XMM[10]-@XMM[11]}, [$inp]!
+	veor	@XMM[1], @XMM[1], @XMM[8]
+	veor	@XMM[6], @XMM[6], @XMM[9]
+	vld1.8	{@XMM[12]-@XMM[13]}, [$inp]!
+	veor	@XMM[4], @XMM[4], @XMM[10]
+	veor	@XMM[2], @XMM[2], @XMM[11]
+	vld1.8	{@XMM[15]}, [$inp]!
+	veor	@XMM[7], @XMM[7], @XMM[12]
+	vst1.8	{@XMM[0]-@XMM[1]}, [$out]!	@ write output
+	veor	@XMM[3], @XMM[3], @XMM[13]
+	vst1.8	{@XMM[6]}, [$out]!
+	vst1.8	{@XMM[4]}, [$out]!
+	vst1.8	{@XMM[2]}, [$out]!
+	vst1.8	{@XMM[7]}, [$out]!
+	vst1.8	{@XMM[3]}, [$out]!
+	b	.Lcbc_dec_done
+.align	4
+.Lcbc_dec_six:
+	sub	$inp, $inp, #0x60
+	bl	_bsaes_decrypt8
+	vldmia	$fp,{@XMM[14]}			@ reload IV
+	vld1.8	{@XMM[8]-@XMM[9]}, [$inp]!	@ reload input
+	veor	@XMM[0], @XMM[0], @XMM[14]	@ ^= IV
+	vld1.8	{@XMM[10]-@XMM[11]}, [$inp]!
+	veor	@XMM[1], @XMM[1], @XMM[8]
+	veor	@XMM[6], @XMM[6], @XMM[9]
+	vld1.8	{@XMM[12]}, [$inp]!
+	veor	@XMM[4], @XMM[4], @XMM[10]
+	veor	@XMM[2], @XMM[2], @XMM[11]
+	vld1.8	{@XMM[15]}, [$inp]!
+	veor	@XMM[7], @XMM[7], @XMM[12]
+	vst1.8	{@XMM[0]-@XMM[1]}, [$out]!	@ write output
+	vst1.8	{@XMM[6]}, [$out]!
+	vst1.8	{@XMM[4]}, [$out]!
+	vst1.8	{@XMM[2]}, [$out]!
+	vst1.8	{@XMM[7]}, [$out]!
+	b	.Lcbc_dec_done
+.align	4
+.Lcbc_dec_five:
+	sub	$inp, $inp, #0x50
+	bl	_bsaes_decrypt8
+	vldmia	$fp, {@XMM[14]}			@ reload IV
+	vld1.8	{@XMM[8]-@XMM[9]}, [$inp]!	@ reload input
+	veor	@XMM[0], @XMM[0], @XMM[14]	@ ^= IV
+	vld1.8	{@XMM[10]-@XMM[11]}, [$inp]!
+	veor	@XMM[1], @XMM[1], @XMM[8]
+	veor	@XMM[6], @XMM[6], @XMM[9]
+	vld1.8	{@XMM[15]}, [$inp]!
+	veor	@XMM[4], @XMM[4], @XMM[10]
+	vst1.8	{@XMM[0]-@XMM[1]}, [$out]!	@ write output
+	veor	@XMM[2], @XMM[2], @XMM[11]
+	vst1.8	{@XMM[6]}, [$out]!
+	vst1.8	{@XMM[4]}, [$out]!
+	vst1.8	{@XMM[2]}, [$out]!
+	b	.Lcbc_dec_done
+.align	4
+.Lcbc_dec_four:
+	sub	$inp, $inp, #0x40
+	bl	_bsaes_decrypt8
+	vldmia	$fp, {@XMM[14]}			@ reload IV
+	vld1.8	{@XMM[8]-@XMM[9]}, [$inp]!	@ reload input
+	veor	@XMM[0], @XMM[0], @XMM[14]	@ ^= IV
+	vld1.8	{@XMM[10]}, [$inp]!
+	veor	@XMM[1], @XMM[1], @XMM[8]
+	veor	@XMM[6], @XMM[6], @XMM[9]
+	vld1.8	{@XMM[15]}, [$inp]!
+	veor	@XMM[4], @XMM[4], @XMM[10]
+	vst1.8	{@XMM[0]-@XMM[1]}, [$out]!	@ write output
+	vst1.8	{@XMM[6]}, [$out]!
+	vst1.8	{@XMM[4]}, [$out]!
+	b	.Lcbc_dec_done
+.align	4
+.Lcbc_dec_three:
+	sub	$inp, $inp, #0x30
+	bl	_bsaes_decrypt8
+	vldmia	$fp, {@XMM[14]}			@ reload IV
+	vld1.8	{@XMM[8]-@XMM[9]}, [$inp]!	@ reload input
+	veor	@XMM[0], @XMM[0], @XMM[14]	@ ^= IV
+	vld1.8	{@XMM[15]}, [$inp]!
+	veor	@XMM[1], @XMM[1], @XMM[8]
+	veor	@XMM[6], @XMM[6], @XMM[9]
+	vst1.8	{@XMM[0]-@XMM[1]}, [$out]!	@ write output
+	vst1.8	{@XMM[6]}, [$out]!
+	b	.Lcbc_dec_done
+.align	4
+.Lcbc_dec_two:
+	sub	$inp, $inp, #0x20
+	bl	_bsaes_decrypt8
+	vldmia	$fp, {@XMM[14]}			@ reload IV
+	vld1.8	{@XMM[8]}, [$inp]!		@ reload input
+	veor	@XMM[0], @XMM[0], @XMM[14]	@ ^= IV
+	vld1.8	{@XMM[15]}, [$inp]!		@ reload input
+	veor	@XMM[1], @XMM[1], @XMM[8]
+	vst1.8	{@XMM[0]-@XMM[1]}, [$out]!	@ write output
+	b	.Lcbc_dec_done
+.align	4
+.Lcbc_dec_one:
+	sub	$inp, $inp, #0x10
+	mov	$rounds, $out			@ save original out pointer
+	mov	$out, $fp			@ use the iv scratch space as out buffer
+	mov	r2, $key
+	vmov	@XMM[4],@XMM[15]		@ just in case ensure that IV
+	vmov	@XMM[5],@XMM[0]			@ and input are preserved
+	bl	AES_decrypt
+	vld1.8	{@XMM[0]}, [$fp]		@ load result
+	veor	@XMM[0], @XMM[0], @XMM[4]	@ ^= IV
+	vmov	@XMM[15], @XMM[5]		@ @XMM[5] holds input
+	vst1.8	{@XMM[0]}, [$rounds]		@ write output
+
+.Lcbc_dec_done:
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	vmov.i32	q0, #0
+	vmov.i32	q1, #0
+.Lcbc_dec_bzero:				@ wipe key schedule [if any]
+	vstmia		$keysched!, {q0-q1}
+	cmp		$keysched, $fp
+	bne		.Lcbc_dec_bzero
+#endif
+
+	mov	sp, $fp
+	add	sp, #0x10			@ add sp,$fp,#0x10 is no good for thumb
+	vst1.8	{@XMM[15]}, [$ivp]		@ return IV
+	VFP_ABI_POP
+	ldmia	sp!, {r4-r10, pc}
+.size	bsaes_cbc_encrypt,.-bsaes_cbc_encrypt
+___
+}
+{
+my ($inp,$out,$len,$key, $ctr,$fp,$rounds)=(map("r$_",(0..3,8..10)));
+my $const = "r6";	# shared with _bsaes_encrypt8_alt
+my $keysched = "sp";
+
+$code.=<<___;
+.extern	AES_encrypt
+.global	bsaes_ctr32_encrypt_blocks
+.type	bsaes_ctr32_encrypt_blocks,%function
+.align	5
+bsaes_ctr32_encrypt_blocks:
+	cmp	$len, #8			@ use plain AES for
+	blo	.Lctr_enc_short			@ small sizes
+
+	mov	ip, sp
+	stmdb	sp!, {r4-r10, lr}
+	VFP_ABI_PUSH
+	ldr	$ctr, [ip]			@ ctr is 1st arg on the stack
+	sub	sp, sp, #0x10			@ scratch space to carry over the ctr
+	mov	$fp, sp				@ save sp
+
+	ldr	$rounds, [$key, #240]		@ get # of rounds
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	@ allocate the key schedule on the stack
+	sub	r12, sp, $rounds, lsl#7		@ 128 bytes per inner round key
+	add	r12, #`128-32`			@ size of bit-sliced key schedule
+
+	@ populate the key schedule
+	mov	r4, $key			@ pass key
+	mov	r5, $rounds			@ pass # of rounds
+	mov	sp, r12				@ sp is $keysched
+	bl	_bsaes_key_convert
+	veor	@XMM[7],@XMM[7],@XMM[15]	@ fix up last round key
+	vstmia	r12, {@XMM[7]}			@ save last round key
+
+	vld1.8	{@XMM[0]}, [$ctr]		@ load counter
+#ifdef	__APPLE__
+	mov	$ctr, #:lower16:(.LREVM0SR-.LM0)
+	add	$ctr, $const, $ctr
+#else
+	add	$ctr, $const, #.LREVM0SR-.LM0	@ borrow $ctr
+#endif
+	vldmia	$keysched, {@XMM[4]}		@ load round0 key
+#else
+	ldr	r12, [$key, #244]
+	eors	r12, #1
+	beq	0f
+
+	@ populate the key schedule
+	str	r12, [$key, #244]
+	mov	r4, $key			@ pass key
+	mov	r5, $rounds			@ pass # of rounds
+	add	r12, $key, #248			@ pass key schedule
+	bl	_bsaes_key_convert
+	veor	@XMM[7],@XMM[7],@XMM[15]	@ fix up last round key
+	vstmia	r12, {@XMM[7]}			@ save last round key
+
+.align	2
+0:	add	r12, $key, #248
+	vld1.8	{@XMM[0]}, [$ctr]		@ load counter
+	adrl	$ctr, .LREVM0SR			@ borrow $ctr
+	vldmia	r12, {@XMM[4]}			@ load round0 key
+	sub	sp, #0x10			@ place for adjusted round0 key
+#endif
+
+	vmov.i32	@XMM[8],#1		@ compose 1<<96
+	veor		@XMM[9],@XMM[9],@XMM[9]
+	vrev32.8	@XMM[0],@XMM[0]
+	vext.8		@XMM[8],@XMM[9],@XMM[8],#4
+	vrev32.8	@XMM[4],@XMM[4]
+	vadd.u32	@XMM[9],@XMM[8],@XMM[8]	@ compose 2<<96
+	vstmia	$keysched, {@XMM[4]}		@ save adjusted round0 key
+	b	.Lctr_enc_loop
+
+.align	4
+.Lctr_enc_loop:
+	vadd.u32	@XMM[10], @XMM[8], @XMM[9]	@ compose 3<<96
+	vadd.u32	@XMM[1], @XMM[0], @XMM[8]	@ +1
+	vadd.u32	@XMM[2], @XMM[0], @XMM[9]	@ +2
+	vadd.u32	@XMM[3], @XMM[0], @XMM[10]	@ +3
+	vadd.u32	@XMM[4], @XMM[1], @XMM[10]
+	vadd.u32	@XMM[5], @XMM[2], @XMM[10]
+	vadd.u32	@XMM[6], @XMM[3], @XMM[10]
+	vadd.u32	@XMM[7], @XMM[4], @XMM[10]
+	vadd.u32	@XMM[10], @XMM[5], @XMM[10]	@ next counter
+
+	@ Borrow prologue from _bsaes_encrypt8 to use the opportunity
+	@ to flip byte order in 32-bit counter
+
+	vldmia		$keysched, {@XMM[9]}		@ load round0 key
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, $keysched, #0x10		@ pass next round key
+#else
+	add		r4, $key, #`248+16`
+#endif
+	vldmia		$ctr, {@XMM[8]}			@ .LREVM0SR
+	mov		r5, $rounds			@ pass rounds
+	vstmia		$fp, {@XMM[10]}			@ save next counter
+#ifdef	__APPLE__
+	mov		$const, #:lower16:(.LREVM0SR-.LSR)
+	sub		$const, $ctr, $const
+#else
+	sub		$const, $ctr, #.LREVM0SR-.LSR	@ pass constants
+#endif
+
+	bl		_bsaes_encrypt8_alt
+
+	subs		$len, $len, #8
+	blo		.Lctr_enc_loop_done
+
+	vld1.8		{@XMM[8]-@XMM[9]}, [$inp]!	@ load input
+	vld1.8		{@XMM[10]-@XMM[11]}, [$inp]!
+	veor		@XMM[0], @XMM[8]
+	veor		@XMM[1], @XMM[9]
+	vld1.8		{@XMM[12]-@XMM[13]}, [$inp]!
+	veor		@XMM[4], @XMM[10]
+	veor		@XMM[6], @XMM[11]
+	vld1.8		{@XMM[14]-@XMM[15]}, [$inp]!
+	veor		@XMM[3], @XMM[12]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!	@ write output
+	veor		@XMM[7], @XMM[13]
+	veor		@XMM[2], @XMM[14]
+	vst1.8		{@XMM[4]}, [$out]!
+	veor		@XMM[5], @XMM[15]
+	vst1.8		{@XMM[6]}, [$out]!
+	vmov.i32	@XMM[8], #1			@ compose 1<<96
+	vst1.8		{@XMM[3]}, [$out]!
+	veor		@XMM[9], @XMM[9], @XMM[9]
+	vst1.8		{@XMM[7]}, [$out]!
+	vext.8		@XMM[8], @XMM[9], @XMM[8], #4
+	vst1.8		{@XMM[2]}, [$out]!
+	vadd.u32	@XMM[9],@XMM[8],@XMM[8]		@ compose 2<<96
+	vst1.8		{@XMM[5]}, [$out]!
+	vldmia		$fp, {@XMM[0]}			@ load counter
+
+	bne		.Lctr_enc_loop
+	b		.Lctr_enc_done
+
+.align	4
+.Lctr_enc_loop_done:
+	add		$len, $len, #8
+	vld1.8		{@XMM[8]}, [$inp]!	@ load input
+	veor		@XMM[0], @XMM[8]
+	vst1.8		{@XMM[0]}, [$out]!	@ write output
+	cmp		$len, #2
+	blo		.Lctr_enc_done
+	vld1.8		{@XMM[9]}, [$inp]!
+	veor		@XMM[1], @XMM[9]
+	vst1.8		{@XMM[1]}, [$out]!
+	beq		.Lctr_enc_done
+	vld1.8		{@XMM[10]}, [$inp]!
+	veor		@XMM[4], @XMM[10]
+	vst1.8		{@XMM[4]}, [$out]!
+	cmp		$len, #4
+	blo		.Lctr_enc_done
+	vld1.8		{@XMM[11]}, [$inp]!
+	veor		@XMM[6], @XMM[11]
+	vst1.8		{@XMM[6]}, [$out]!
+	beq		.Lctr_enc_done
+	vld1.8		{@XMM[12]}, [$inp]!
+	veor		@XMM[3], @XMM[12]
+	vst1.8		{@XMM[3]}, [$out]!
+	cmp		$len, #6
+	blo		.Lctr_enc_done
+	vld1.8		{@XMM[13]}, [$inp]!
+	veor		@XMM[7], @XMM[13]
+	vst1.8		{@XMM[7]}, [$out]!
+	beq		.Lctr_enc_done
+	vld1.8		{@XMM[14]}, [$inp]
+	veor		@XMM[2], @XMM[14]
+	vst1.8		{@XMM[2]}, [$out]!
+
+.Lctr_enc_done:
+	vmov.i32	q0, #0
+	vmov.i32	q1, #0
+#ifndef	BSAES_ASM_EXTENDED_KEY
+.Lctr_enc_bzero:			@ wipe key schedule [if any]
+	vstmia		$keysched!, {q0-q1}
+	cmp		$keysched, $fp
+	bne		.Lctr_enc_bzero
+#else
+	vstmia		$keysched, {q0-q1}
+#endif
+
+	mov	sp, $fp
+	add	sp, #0x10		@ add sp,$fp,#0x10 is no good for thumb
+	VFP_ABI_POP
+	ldmia	sp!, {r4-r10, pc}	@ return
+
+.align	4
+.Lctr_enc_short:
+	ldr	ip, [sp]		@ ctr pointer is passed on stack
+	stmdb	sp!, {r4-r8, lr}
+
+	mov	r4, $inp		@ copy arguments
+	mov	r5, $out
+	mov	r6, $len
+	mov	r7, $key
+	ldr	r8, [ip, #12]		@ load counter LSW
+	vld1.8	{@XMM[1]}, [ip]		@ load whole counter value
+#ifdef __ARMEL__
+	rev	r8, r8
+#endif
+	sub	sp, sp, #0x10
+	vst1.8	{@XMM[1]}, [sp]		@ copy counter value
+	sub	sp, sp, #0x10
+
+.Lctr_enc_short_loop:
+	add	r0, sp, #0x10		@ input counter value
+	mov	r1, sp			@ output on the stack
+	mov	r2, r7			@ key
+
+	bl	AES_encrypt
+
+	vld1.8	{@XMM[0]}, [r4]!	@ load input
+	vld1.8	{@XMM[1]}, [sp]		@ load encrypted counter
+	add	r8, r8, #1
+#ifdef __ARMEL__
+	rev	r0, r8
+	str	r0, [sp, #0x1c]		@ next counter value
+#else
+	str	r8, [sp, #0x1c]		@ next counter value
+#endif
+	veor	@XMM[0],@XMM[0],@XMM[1]
+	vst1.8	{@XMM[0]}, [r5]!	@ store output
+	subs	r6, r6, #1
+	bne	.Lctr_enc_short_loop
+
+	vmov.i32	q0, #0
+	vmov.i32	q1, #0
+	vstmia		sp!, {q0-q1}
+
+	ldmia	sp!, {r4-r8, pc}
+.size	bsaes_ctr32_encrypt_blocks,.-bsaes_ctr32_encrypt_blocks
+___
+}
+{
+######################################################################
+# void bsaes_xts_[en|de]crypt(const char *inp,char *out,size_t len,
+#	const AES_KEY *key1, const AES_KEY *key2,
+#	const unsigned char iv[16]);
+#
+my ($inp,$out,$len,$key,$rounds,$magic,$fp)=(map("r$_",(7..10,1..3)));
+my $const="r6";		# returned by _bsaes_key_convert
+my $twmask=@XMM[5];
+my @T=@XMM[6..7];
+
+$code.=<<___;
+.globl	bsaes_xts_encrypt
+.type	bsaes_xts_encrypt,%function
+.align	4
+bsaes_xts_encrypt:
+	mov	ip, sp
+	stmdb	sp!, {r4-r10, lr}		@ 0x20
+	VFP_ABI_PUSH
+	mov	r6, sp				@ future $fp
+
+	mov	$inp, r0
+	mov	$out, r1
+	mov	$len, r2
+	mov	$key, r3
+
+	sub	r0, sp, #0x10			@ 0x10
+	bic	r0, #0xf			@ align at 16 bytes
+	mov	sp, r0
+
+#ifdef	XTS_CHAIN_TWEAK
+	ldr	r0, [ip]			@ pointer to input tweak
+#else
+	@ generate initial tweak
+	ldr	r0, [ip, #4]			@ iv[]
+	mov	r1, sp
+	ldr	r2, [ip, #0]			@ key2
+	bl	AES_encrypt
+	mov	r0,sp				@ pointer to initial tweak
+#endif
+
+	ldr	$rounds, [$key, #240]		@ get # of rounds
+	mov	$fp, r6
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	@ allocate the key schedule on the stack
+	sub	r12, sp, $rounds, lsl#7		@ 128 bytes per inner round key
+	@ add	r12, #`128-32`			@ size of bit-sliced key schedule
+	sub	r12, #`32+16`			@ place for tweak[9]
+
+	@ populate the key schedule
+	mov	r4, $key			@ pass key
+	mov	r5, $rounds			@ pass # of rounds
+	mov	sp, r12
+	add	r12, #0x90			@ pass key schedule
+	bl	_bsaes_key_convert
+	veor	@XMM[7], @XMM[7], @XMM[15]	@ fix up last round key
+	vstmia	r12, {@XMM[7]}			@ save last round key
+#else
+	ldr	r12, [$key, #244]
+	eors	r12, #1
+	beq	0f
+
+	str	r12, [$key, #244]
+	mov	r4, $key			@ pass key
+	mov	r5, $rounds			@ pass # of rounds
+	add	r12, $key, #248			@ pass key schedule
+	bl	_bsaes_key_convert
+	veor	@XMM[7], @XMM[7], @XMM[15]	@ fix up last round key
+	vstmia	r12, {@XMM[7]}
+
+.align	2
+0:	sub	sp, #0x90			@ place for tweak[9]
+#endif
+
+	vld1.8	{@XMM[8]}, [r0]			@ initial tweak
+	adr	$magic, .Lxts_magic
+
+	subs	$len, #0x80
+	blo	.Lxts_enc_short
+	b	.Lxts_enc_loop
+
+.align	4
+.Lxts_enc_loop:
+	vldmia		$magic, {$twmask}	@ load XTS magic
+	vshr.s64	@T[0], @XMM[8], #63
+	mov		r0, sp
+	vand		@T[0], @T[0], $twmask
+___
+for($i=9;$i<16;$i++) {
+$code.=<<___;
+	vadd.u64	@XMM[$i], @XMM[$i-1], @XMM[$i-1]
+	vst1.64		{@XMM[$i-1]}, [r0,:128]!
+	vswp		`&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+	vshr.s64	@T[1], @XMM[$i], #63
+	veor		@XMM[$i], @XMM[$i], @T[0]
+	vand		@T[1], @T[1], $twmask
+___
+	@T=reverse(@T);
+
+$code.=<<___ if ($i>=10);
+	vld1.8		{@XMM[$i-10]}, [$inp]!
+___
+$code.=<<___ if ($i>=11);
+	veor		@XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
+___
+}
+$code.=<<___;
+	vadd.u64	@XMM[8], @XMM[15], @XMM[15]
+	vst1.64		{@XMM[15]}, [r0,:128]!
+	vswp		`&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+	veor		@XMM[8], @XMM[8], @T[0]
+	vst1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+
+	vld1.8		{@XMM[6]-@XMM[7]}, [$inp]!
+	veor		@XMM[5], @XMM[5], @XMM[13]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[6], @XMM[6], @XMM[14]
+	mov		r5, $rounds			@ pass rounds
+	veor		@XMM[7], @XMM[7], @XMM[15]
+	mov		r0, sp
+
+	bl		_bsaes_encrypt8
+
+	vld1.64		{@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+	vld1.64		{@XMM[10]-@XMM[11]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	vld1.64		{@XMM[12]-@XMM[13]}, [r0,:128]!
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[4], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	veor		@XMM[9], @XMM[6], @XMM[11]
+	vld1.64		{@XMM[14]-@XMM[15]}, [r0,:128]!
+	veor		@XMM[10], @XMM[3], @XMM[12]
+	vst1.8		{@XMM[8]-@XMM[9]}, [$out]!
+	veor		@XMM[11], @XMM[7], @XMM[13]
+	veor		@XMM[12], @XMM[2], @XMM[14]
+	vst1.8		{@XMM[10]-@XMM[11]}, [$out]!
+	veor		@XMM[13], @XMM[5], @XMM[15]
+	vst1.8		{@XMM[12]-@XMM[13]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+
+	subs		$len, #0x80
+	bpl		.Lxts_enc_loop
+
+.Lxts_enc_short:
+	adds		$len, #0x70
+	bmi		.Lxts_enc_done
+
+	vldmia		$magic, {$twmask}	@ load XTS magic
+	vshr.s64	@T[0], @XMM[8], #63
+	mov		r0, sp
+	vand		@T[0], @T[0], $twmask
+___
+for($i=9;$i<16;$i++) {
+$code.=<<___;
+	vadd.u64	@XMM[$i], @XMM[$i-1], @XMM[$i-1]
+	vst1.64		{@XMM[$i-1]}, [r0,:128]!
+	vswp		`&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+	vshr.s64	@T[1], @XMM[$i], #63
+	veor		@XMM[$i], @XMM[$i], @T[0]
+	vand		@T[1], @T[1], $twmask
+___
+	@T=reverse(@T);
+
+$code.=<<___ if ($i>=10);
+	vld1.8		{@XMM[$i-10]}, [$inp]!
+	subs		$len, #0x10
+	bmi		.Lxts_enc_`$i-9`
+___
+$code.=<<___ if ($i>=11);
+	veor		@XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
+___
+}
+$code.=<<___;
+	sub		$len, #0x10
+	vst1.64		{@XMM[15]}, [r0,:128]		@ next round tweak
+
+	vld1.8		{@XMM[6]}, [$inp]!
+	veor		@XMM[5], @XMM[5], @XMM[13]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[6], @XMM[6], @XMM[14]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_encrypt8
+
+	vld1.64		{@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+	vld1.64		{@XMM[10]-@XMM[11]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	vld1.64		{@XMM[12]-@XMM[13]}, [r0,:128]!
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[4], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	veor		@XMM[9], @XMM[6], @XMM[11]
+	vld1.64		{@XMM[14]}, [r0,:128]!
+	veor		@XMM[10], @XMM[3], @XMM[12]
+	vst1.8		{@XMM[8]-@XMM[9]}, [$out]!
+	veor		@XMM[11], @XMM[7], @XMM[13]
+	veor		@XMM[12], @XMM[2], @XMM[14]
+	vst1.8		{@XMM[10]-@XMM[11]}, [$out]!
+	vst1.8		{@XMM[12]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_enc_done
+.align	4
+.Lxts_enc_6:
+	veor		@XMM[4], @XMM[4], @XMM[12]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[5], @XMM[5], @XMM[13]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_encrypt8
+
+	vld1.64		{@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+	vld1.64		{@XMM[10]-@XMM[11]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	vld1.64		{@XMM[12]-@XMM[13]}, [r0,:128]!
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[4], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	veor		@XMM[9], @XMM[6], @XMM[11]
+	veor		@XMM[10], @XMM[3], @XMM[12]
+	vst1.8		{@XMM[8]-@XMM[9]}, [$out]!
+	veor		@XMM[11], @XMM[7], @XMM[13]
+	vst1.8		{@XMM[10]-@XMM[11]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_enc_done
+
+@ put this in range for both ARM and Thumb mode adr instructions
+.align	5
+.Lxts_magic:
+	.quad	1, 0x87
+
+.align	5
+.Lxts_enc_5:
+	veor		@XMM[3], @XMM[3], @XMM[11]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[4], @XMM[4], @XMM[12]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_encrypt8
+
+	vld1.64		{@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+	vld1.64		{@XMM[10]-@XMM[11]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	vld1.64		{@XMM[12]}, [r0,:128]!
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[4], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	veor		@XMM[9], @XMM[6], @XMM[11]
+	veor		@XMM[10], @XMM[3], @XMM[12]
+	vst1.8		{@XMM[8]-@XMM[9]}, [$out]!
+	vst1.8		{@XMM[10]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_enc_done
+.align	4
+.Lxts_enc_4:
+	veor		@XMM[2], @XMM[2], @XMM[10]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[3], @XMM[3], @XMM[11]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_encrypt8
+
+	vld1.64		{@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+	vld1.64		{@XMM[10]-@XMM[11]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[4], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	veor		@XMM[9], @XMM[6], @XMM[11]
+	vst1.8		{@XMM[8]-@XMM[9]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_enc_done
+.align	4
+.Lxts_enc_3:
+	veor		@XMM[1], @XMM[1], @XMM[9]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[2], @XMM[2], @XMM[10]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_encrypt8
+
+	vld1.64		{@XMM[8]-@XMM[9]}, [r0,:128]!
+	vld1.64		{@XMM[10]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[4], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	vst1.8		{@XMM[8]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_enc_done
+.align	4
+.Lxts_enc_2:
+	veor		@XMM[0], @XMM[0], @XMM[8]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[1], @XMM[1], @XMM[9]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_encrypt8
+
+	vld1.64		{@XMM[8]-@XMM[9]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_enc_done
+.align	4
+.Lxts_enc_1:
+	mov		r0, sp
+	veor		@XMM[0], @XMM[0], @XMM[8]
+	mov		r1, sp
+	vst1.8		{@XMM[0]}, [sp,:128]
+	mov		r2, $key
+	mov		r4, $fp				@ preserve fp
+
+	bl		AES_encrypt
+
+	vld1.8		{@XMM[0]}, [sp,:128]
+	veor		@XMM[0], @XMM[0], @XMM[8]
+	vst1.8		{@XMM[0]}, [$out]!
+	mov		$fp, r4
+
+	vmov		@XMM[8], @XMM[9]		@ next round tweak
+
+.Lxts_enc_done:
+#ifndef	XTS_CHAIN_TWEAK
+	adds		$len, #0x10
+	beq		.Lxts_enc_ret
+	sub		r6, $out, #0x10
+
+.Lxts_enc_steal:
+	ldrb		r0, [$inp], #1
+	ldrb		r1, [$out, #-0x10]
+	strb		r0, [$out, #-0x10]
+	strb		r1, [$out], #1
+
+	subs		$len, #1
+	bhi		.Lxts_enc_steal
+
+	vld1.8		{@XMM[0]}, [r6]
+	mov		r0, sp
+	veor		@XMM[0], @XMM[0], @XMM[8]
+	mov		r1, sp
+	vst1.8		{@XMM[0]}, [sp,:128]
+	mov		r2, $key
+	mov		r4, $fp			@ preserve fp
+
+	bl		AES_encrypt
+
+	vld1.8		{@XMM[0]}, [sp,:128]
+	veor		@XMM[0], @XMM[0], @XMM[8]
+	vst1.8		{@XMM[0]}, [r6]
+	mov		$fp, r4
+#endif
+
+.Lxts_enc_ret:
+	bic		r0, $fp, #0xf
+	vmov.i32	q0, #0
+	vmov.i32	q1, #0
+#ifdef	XTS_CHAIN_TWEAK
+	ldr		r1, [$fp, #0x20+VFP_ABI_FRAME]	@ chain tweak
+#endif
+.Lxts_enc_bzero:				@ wipe key schedule [if any]
+	vstmia		sp!, {q0-q1}
+	cmp		sp, r0
+	bne		.Lxts_enc_bzero
+
+	mov		sp, $fp
+#ifdef	XTS_CHAIN_TWEAK
+	vst1.8		{@XMM[8]}, [r1]
+#endif
+	VFP_ABI_POP
+	ldmia		sp!, {r4-r10, pc}	@ return
+
+.size	bsaes_xts_encrypt,.-bsaes_xts_encrypt
+
+.globl	bsaes_xts_decrypt
+.type	bsaes_xts_decrypt,%function
+.align	4
+bsaes_xts_decrypt:
+	mov	ip, sp
+	stmdb	sp!, {r4-r10, lr}		@ 0x20
+	VFP_ABI_PUSH
+	mov	r6, sp				@ future $fp
+
+	mov	$inp, r0
+	mov	$out, r1
+	mov	$len, r2
+	mov	$key, r3
+
+	sub	r0, sp, #0x10			@ 0x10
+	bic	r0, #0xf			@ align at 16 bytes
+	mov	sp, r0
+
+#ifdef	XTS_CHAIN_TWEAK
+	ldr	r0, [ip]			@ pointer to input tweak
+#else
+	@ generate initial tweak
+	ldr	r0, [ip, #4]			@ iv[]
+	mov	r1, sp
+	ldr	r2, [ip, #0]			@ key2
+	bl	AES_encrypt
+	mov	r0, sp				@ pointer to initial tweak
+#endif
+
+	ldr	$rounds, [$key, #240]		@ get # of rounds
+	mov	$fp, r6
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	@ allocate the key schedule on the stack
+	sub	r12, sp, $rounds, lsl#7		@ 128 bytes per inner round key
+	@ add	r12, #`128-32`			@ size of bit-sliced key schedule
+	sub	r12, #`32+16`			@ place for tweak[9]
+
+	@ populate the key schedule
+	mov	r4, $key			@ pass key
+	mov	r5, $rounds			@ pass # of rounds
+	mov	sp, r12
+	add	r12, #0x90			@ pass key schedule
+	bl	_bsaes_key_convert
+	add	r4, sp, #0x90
+	vldmia	r4, {@XMM[6]}
+	vstmia	r12,  {@XMM[15]}		@ save last round key
+	veor	@XMM[7], @XMM[7], @XMM[6]	@ fix up round 0 key
+	vstmia	r4, {@XMM[7]}
+#else
+	ldr	r12, [$key, #244]
+	eors	r12, #1
+	beq	0f
+
+	str	r12, [$key, #244]
+	mov	r4, $key			@ pass key
+	mov	r5, $rounds			@ pass # of rounds
+	add	r12, $key, #248			@ pass key schedule
+	bl	_bsaes_key_convert
+	add	r4, $key, #248
+	vldmia	r4, {@XMM[6]}
+	vstmia	r12,  {@XMM[15]}		@ save last round key
+	veor	@XMM[7], @XMM[7], @XMM[6]	@ fix up round 0 key
+	vstmia	r4, {@XMM[7]}
+
+.align	2
+0:	sub	sp, #0x90			@ place for tweak[9]
+#endif
+	vld1.8	{@XMM[8]}, [r0]			@ initial tweak
+	adr	$magic, .Lxts_magic
+
+#ifndef	XTS_CHAIN_TWEAK
+	tst	$len, #0xf			@ if not multiple of 16
+	it	ne				@ Thumb2 thing, sanity check in ARM
+	subne	$len, #0x10			@ subtract another 16 bytes
+#endif
+	subs	$len, #0x80
+
+	blo	.Lxts_dec_short
+	b	.Lxts_dec_loop
+
+.align	4
+.Lxts_dec_loop:
+	vldmia		$magic, {$twmask}	@ load XTS magic
+	vshr.s64	@T[0], @XMM[8], #63
+	mov		r0, sp
+	vand		@T[0], @T[0], $twmask
+___
+for($i=9;$i<16;$i++) {
+$code.=<<___;
+	vadd.u64	@XMM[$i], @XMM[$i-1], @XMM[$i-1]
+	vst1.64		{@XMM[$i-1]}, [r0,:128]!
+	vswp		`&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+	vshr.s64	@T[1], @XMM[$i], #63
+	veor		@XMM[$i], @XMM[$i], @T[0]
+	vand		@T[1], @T[1], $twmask
+___
+	@T=reverse(@T);
+
+$code.=<<___ if ($i>=10);
+	vld1.8		{@XMM[$i-10]}, [$inp]!
+___
+$code.=<<___ if ($i>=11);
+	veor		@XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
+___
+}
+$code.=<<___;
+	vadd.u64	@XMM[8], @XMM[15], @XMM[15]
+	vst1.64		{@XMM[15]}, [r0,:128]!
+	vswp		`&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+	veor		@XMM[8], @XMM[8], @T[0]
+	vst1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+
+	vld1.8		{@XMM[6]-@XMM[7]}, [$inp]!
+	veor		@XMM[5], @XMM[5], @XMM[13]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[6], @XMM[6], @XMM[14]
+	mov		r5, $rounds			@ pass rounds
+	veor		@XMM[7], @XMM[7], @XMM[15]
+	mov		r0, sp
+
+	bl		_bsaes_decrypt8
+
+	vld1.64		{@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+	vld1.64		{@XMM[10]-@XMM[11]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	vld1.64		{@XMM[12]-@XMM[13]}, [r0,:128]!
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[6], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	veor		@XMM[9], @XMM[4], @XMM[11]
+	vld1.64		{@XMM[14]-@XMM[15]}, [r0,:128]!
+	veor		@XMM[10], @XMM[2], @XMM[12]
+	vst1.8		{@XMM[8]-@XMM[9]}, [$out]!
+	veor		@XMM[11], @XMM[7], @XMM[13]
+	veor		@XMM[12], @XMM[3], @XMM[14]
+	vst1.8		{@XMM[10]-@XMM[11]}, [$out]!
+	veor		@XMM[13], @XMM[5], @XMM[15]
+	vst1.8		{@XMM[12]-@XMM[13]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+
+	subs		$len, #0x80
+	bpl		.Lxts_dec_loop
+
+.Lxts_dec_short:
+	adds		$len, #0x70
+	bmi		.Lxts_dec_done
+
+	vldmia		$magic, {$twmask}	@ load XTS magic
+	vshr.s64	@T[0], @XMM[8], #63
+	mov		r0, sp
+	vand		@T[0], @T[0], $twmask
+___
+for($i=9;$i<16;$i++) {
+$code.=<<___;
+	vadd.u64	@XMM[$i], @XMM[$i-1], @XMM[$i-1]
+	vst1.64		{@XMM[$i-1]}, [r0,:128]!
+	vswp		`&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+	vshr.s64	@T[1], @XMM[$i], #63
+	veor		@XMM[$i], @XMM[$i], @T[0]
+	vand		@T[1], @T[1], $twmask
+___
+	@T=reverse(@T);
+
+$code.=<<___ if ($i>=10);
+	vld1.8		{@XMM[$i-10]}, [$inp]!
+	subs		$len, #0x10
+	bmi		.Lxts_dec_`$i-9`
+___
+$code.=<<___ if ($i>=11);
+	veor		@XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
+___
+}
+$code.=<<___;
+	sub		$len, #0x10
+	vst1.64		{@XMM[15]}, [r0,:128]		@ next round tweak
+
+	vld1.8		{@XMM[6]}, [$inp]!
+	veor		@XMM[5], @XMM[5], @XMM[13]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[6], @XMM[6], @XMM[14]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_decrypt8
+
+	vld1.64		{@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+	vld1.64		{@XMM[10]-@XMM[11]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	vld1.64		{@XMM[12]-@XMM[13]}, [r0,:128]!
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[6], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	veor		@XMM[9], @XMM[4], @XMM[11]
+	vld1.64		{@XMM[14]}, [r0,:128]!
+	veor		@XMM[10], @XMM[2], @XMM[12]
+	vst1.8		{@XMM[8]-@XMM[9]}, [$out]!
+	veor		@XMM[11], @XMM[7], @XMM[13]
+	veor		@XMM[12], @XMM[3], @XMM[14]
+	vst1.8		{@XMM[10]-@XMM[11]}, [$out]!
+	vst1.8		{@XMM[12]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_dec_done
+.align	4
+.Lxts_dec_6:
+	vst1.64		{@XMM[14]}, [r0,:128]		@ next round tweak
+
+	veor		@XMM[4], @XMM[4], @XMM[12]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[5], @XMM[5], @XMM[13]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_decrypt8
+
+	vld1.64		{@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+	vld1.64		{@XMM[10]-@XMM[11]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	vld1.64		{@XMM[12]-@XMM[13]}, [r0,:128]!
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[6], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	veor		@XMM[9], @XMM[4], @XMM[11]
+	veor		@XMM[10], @XMM[2], @XMM[12]
+	vst1.8		{@XMM[8]-@XMM[9]}, [$out]!
+	veor		@XMM[11], @XMM[7], @XMM[13]
+	vst1.8		{@XMM[10]-@XMM[11]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_dec_done
+.align	4
+.Lxts_dec_5:
+	veor		@XMM[3], @XMM[3], @XMM[11]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[4], @XMM[4], @XMM[12]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_decrypt8
+
+	vld1.64		{@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+	vld1.64		{@XMM[10]-@XMM[11]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	vld1.64		{@XMM[12]}, [r0,:128]!
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[6], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	veor		@XMM[9], @XMM[4], @XMM[11]
+	veor		@XMM[10], @XMM[2], @XMM[12]
+	vst1.8		{@XMM[8]-@XMM[9]}, [$out]!
+	vst1.8		{@XMM[10]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_dec_done
+.align	4
+.Lxts_dec_4:
+	veor		@XMM[2], @XMM[2], @XMM[10]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[3], @XMM[3], @XMM[11]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_decrypt8
+
+	vld1.64		{@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+	vld1.64		{@XMM[10]-@XMM[11]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[6], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	veor		@XMM[9], @XMM[4], @XMM[11]
+	vst1.8		{@XMM[8]-@XMM[9]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_dec_done
+.align	4
+.Lxts_dec_3:
+	veor		@XMM[1], @XMM[1], @XMM[9]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[2], @XMM[2], @XMM[10]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_decrypt8
+
+	vld1.64		{@XMM[8]-@XMM[9]}, [r0,:128]!
+	vld1.64		{@XMM[10]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[6], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	vst1.8		{@XMM[8]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_dec_done
+.align	4
+.Lxts_dec_2:
+	veor		@XMM[0], @XMM[0], @XMM[8]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[1], @XMM[1], @XMM[9]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_decrypt8
+
+	vld1.64		{@XMM[8]-@XMM[9]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_dec_done
+.align	4
+.Lxts_dec_1:
+	mov		r0, sp
+	veor		@XMM[0], @XMM[0], @XMM[8]
+	mov		r1, sp
+	vst1.8		{@XMM[0]}, [sp,:128]
+	mov		r5, $magic			@ preserve magic
+	mov		r2, $key
+	mov		r4, $fp				@ preserve fp
+
+	bl		AES_decrypt
+
+	vld1.8		{@XMM[0]}, [sp,:128]
+	veor		@XMM[0], @XMM[0], @XMM[8]
+	vst1.8		{@XMM[0]}, [$out]!
+	mov		$fp, r4
+	mov		$magic, r5
+
+	vmov		@XMM[8], @XMM[9]		@ next round tweak
+
+.Lxts_dec_done:
+#ifndef	XTS_CHAIN_TWEAK
+	adds		$len, #0x10
+	beq		.Lxts_dec_ret
+
+	@ calculate one round of extra tweak for the stolen ciphertext
+	vldmia		$magic, {$twmask}
+	vshr.s64	@XMM[6], @XMM[8], #63
+	vand		@XMM[6], @XMM[6], $twmask
+	vadd.u64	@XMM[9], @XMM[8], @XMM[8]
+	vswp		`&Dhi("@XMM[6]")`,`&Dlo("@XMM[6]")`
+	veor		@XMM[9], @XMM[9], @XMM[6]
+
+	@ perform the final decryption with the last tweak value
+	vld1.8		{@XMM[0]}, [$inp]!
+	mov		r0, sp
+	veor		@XMM[0], @XMM[0], @XMM[9]
+	mov		r1, sp
+	vst1.8		{@XMM[0]}, [sp,:128]
+	mov		r2, $key
+	mov		r4, $fp			@ preserve fp
+
+	bl		AES_decrypt
+
+	vld1.8		{@XMM[0]}, [sp,:128]
+	veor		@XMM[0], @XMM[0], @XMM[9]
+	vst1.8		{@XMM[0]}, [$out]
+
+	mov		r6, $out
+.Lxts_dec_steal:
+	ldrb		r1, [$out]
+	ldrb		r0, [$inp], #1
+	strb		r1, [$out, #0x10]
+	strb		r0, [$out], #1
+
+	subs		$len, #1
+	bhi		.Lxts_dec_steal
+
+	vld1.8		{@XMM[0]}, [r6]
+	mov		r0, sp
+	veor		@XMM[0], @XMM[8]
+	mov		r1, sp
+	vst1.8		{@XMM[0]}, [sp,:128]
+	mov		r2, $key
+
+	bl		AES_decrypt
+
+	vld1.8		{@XMM[0]}, [sp,:128]
+	veor		@XMM[0], @XMM[0], @XMM[8]
+	vst1.8		{@XMM[0]}, [r6]
+	mov		$fp, r4
+#endif
+
+.Lxts_dec_ret:
+	bic		r0, $fp, #0xf
+	vmov.i32	q0, #0
+	vmov.i32	q1, #0
+#ifdef	XTS_CHAIN_TWEAK
+	ldr		r1, [$fp, #0x20+VFP_ABI_FRAME]	@ chain tweak
+#endif
+.Lxts_dec_bzero:				@ wipe key schedule [if any]
+	vstmia		sp!, {q0-q1}
+	cmp		sp, r0
+	bne		.Lxts_dec_bzero
+
+	mov		sp, $fp
+#ifdef	XTS_CHAIN_TWEAK
+	vst1.8		{@XMM[8]}, [r1]
+#endif
+	VFP_ABI_POP
+	ldmia		sp!, {r4-r10, pc}	@ return
+
+.size	bsaes_xts_decrypt,.-bsaes_xts_decrypt
+___
+}
+$code.=<<___;
+#endif
+___
+
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+
+open SELF,$0;
+while(<SELF>) {
+	next if (/^#!/);
+        last if (!s/^#/@/ and !/^$/);
+        print;
+}
+close SELF;
+
+print $code;
+
+close STDOUT or die "error closing STDOUT: $!";
diff --git a/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/vpaes-armv8.pl b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/vpaes-armv8.pl
new file mode 100755
index 0000000..7a9ffbd
--- /dev/null
+++ b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/vpaes-armv8.pl
@@ -0,0 +1,1277 @@
+#! /usr/bin/env perl
+# Copyright 2015-2020 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License").  You may not use
+# this file except in compliance with the License.  You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+
+######################################################################
+## Constant-time SSSE3 AES core implementation.
+## version 0.1
+##
+## By Mike Hamburg (Stanford University), 2009
+## Public domain.
+##
+## For details see http://shiftleft.org/papers/vector_aes/ and
+## http://crypto.stanford.edu/vpaes/.
+##
+######################################################################
+# ARMv8 NEON adaptation by <appro@openssl.org>
+#
+# Reason for undertaken effort is that there is at least one popular
+# SoC based on Cortex-A53 that doesn't have crypto extensions.
+#
+#                   CBC enc     ECB enc/dec(*)   [bit-sliced enc/dec]
+# Cortex-A53        21.5        18.1/20.6        [17.5/19.8         ]
+# Cortex-A57        36.0(**)    20.4/24.9(**)    [14.4/16.6         ]
+# X-Gene            45.9(**)    45.8/57.7(**)    [33.1/37.6(**)     ]
+# Denver(***)       16.6(**)    15.1/17.8(**)    [8.80/9.93         ]
+# Apple A7(***)     22.7(**)    10.9/14.3        [8.45/10.0         ]
+# Mongoose(***)     26.3(**)    21.0/25.0(**)    [13.3/16.8         ]
+#
+# (*)	ECB denotes approximate result for parallelizable modes
+#	such as CBC decrypt, CTR, etc.;
+# (**)	these results are worse than scalar compiler-generated
+#	code, but it's constant-time and therefore preferred;
+# (***)	presented for reference/comparison purposes;
+
+$flavour = shift;
+while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {}
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
+die "can't locate arm-xlate.pl";
+
+open OUT,"| \"$^X\" $xlate $flavour $output";
+*STDOUT=*OUT;
+
+$code.=<<___;
+.text
+
+.type	_vpaes_consts,%object
+.align	7	// totally strategic alignment
+_vpaes_consts:
+.Lk_mc_forward:	// mc_forward
+	.quad	0x0407060500030201, 0x0C0F0E0D080B0A09
+	.quad	0x080B0A0904070605, 0x000302010C0F0E0D
+	.quad	0x0C0F0E0D080B0A09, 0x0407060500030201
+	.quad	0x000302010C0F0E0D, 0x080B0A0904070605
+.Lk_mc_backward:// mc_backward
+	.quad	0x0605040702010003, 0x0E0D0C0F0A09080B
+	.quad	0x020100030E0D0C0F, 0x0A09080B06050407
+	.quad	0x0E0D0C0F0A09080B, 0x0605040702010003
+	.quad	0x0A09080B06050407, 0x020100030E0D0C0F
+.Lk_sr:		// sr
+	.quad	0x0706050403020100, 0x0F0E0D0C0B0A0908
+	.quad	0x030E09040F0A0500, 0x0B06010C07020D08
+	.quad	0x0F060D040B020900, 0x070E050C030A0108
+	.quad	0x0B0E0104070A0D00, 0x0306090C0F020508
+
+//
+// "Hot" constants
+//
+.Lk_inv:	// inv, inva
+	.quad	0x0E05060F0D080180, 0x040703090A0B0C02
+	.quad	0x01040A060F0B0780, 0x030D0E0C02050809
+.Lk_ipt:	// input transform (lo, hi)
+	.quad	0xC2B2E8985A2A7000, 0xCABAE09052227808
+	.quad	0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81
+.Lk_sbo:	// sbou, sbot
+	.quad	0xD0D26D176FBDC700, 0x15AABF7AC502A878
+	.quad	0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA
+.Lk_sb1:	// sb1u, sb1t
+	.quad	0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF
+	.quad	0xB19BE18FCB503E00, 0xA5DF7A6E142AF544
+.Lk_sb2:	// sb2u, sb2t
+	.quad	0x69EB88400AE12900, 0xC2A163C8AB82234A
+	.quad	0xE27A93C60B712400, 0x5EB7E955BC982FCD
+
+//
+//  Decryption stuff
+//
+.Lk_dipt:	// decryption input transform
+	.quad	0x0F505B040B545F00, 0x154A411E114E451A
+	.quad	0x86E383E660056500, 0x12771772F491F194
+.Lk_dsbo:	// decryption sbox final output
+	.quad	0x1387EA537EF94000, 0xC7AA6DB9D4943E2D
+	.quad	0x12D7560F93441D00, 0xCA4B8159D8C58E9C
+.Lk_dsb9:	// decryption sbox output *9*u, *9*t
+	.quad	0x851C03539A86D600, 0xCAD51F504F994CC9
+	.quad	0xC03B1789ECD74900, 0x725E2C9EB2FBA565
+.Lk_dsbd:	// decryption sbox output *D*u, *D*t
+	.quad	0x7D57CCDFE6B1A200, 0xF56E9B13882A4439
+	.quad	0x3CE2FAF724C6CB00, 0x2931180D15DEEFD3
+.Lk_dsbb:	// decryption sbox output *B*u, *B*t
+	.quad	0xD022649296B44200, 0x602646F6B0F2D404
+	.quad	0xC19498A6CD596700, 0xF3FF0C3E3255AA6B
+.Lk_dsbe:	// decryption sbox output *E*u, *E*t
+	.quad	0x46F2929626D4D000, 0x2242600464B4F6B0
+	.quad	0x0C55A6CDFFAAC100, 0x9467F36B98593E32
+
+//
+//  Key schedule constants
+//
+.Lk_dksd:	// decryption key schedule: invskew x*D
+	.quad	0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9
+	.quad	0x41C277F4B5368300, 0x5FDC69EAAB289D1E
+.Lk_dksb:	// decryption key schedule: invskew x*B
+	.quad	0x9A4FCA1F8550D500, 0x03D653861CC94C99
+	.quad	0x115BEDA7B6FC4A00, 0xD993256F7E3482C8
+.Lk_dkse:	// decryption key schedule: invskew x*E + 0x63
+	.quad	0xD5031CCA1FC9D600, 0x53859A4C994F5086
+	.quad	0xA23196054FDC7BE8, 0xCD5EF96A20B31487
+.Lk_dks9:	// decryption key schedule: invskew x*9
+	.quad	0xB6116FC87ED9A700, 0x4AED933482255BFC
+	.quad	0x4576516227143300, 0x8BB89FACE9DAFDCE
+
+.Lk_rcon:	// rcon
+	.quad	0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81
+
+.Lk_opt:	// output transform
+	.quad	0xFF9F4929D6B66000, 0xF7974121DEBE6808
+	.quad	0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0
+.Lk_deskew:	// deskew tables: inverts the sbox's "skew"
+	.quad	0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A
+	.quad	0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77
+
+.asciz  "Vector Permutation AES for ARMv8, Mike Hamburg (Stanford University)"
+.size	_vpaes_consts,.-_vpaes_consts
+.align	6
+___
+
+{
+my ($inp,$out,$key) = map("x$_",(0..2));
+
+my ($invlo,$invhi,$iptlo,$ipthi,$sbou,$sbot) = map("v$_.16b",(18..23));
+my ($sb1u,$sb1t,$sb2u,$sb2t) = map("v$_.16b",(24..27));
+my ($sb9u,$sb9t,$sbdu,$sbdt,$sbbu,$sbbt,$sbeu,$sbet)=map("v$_.16b",(24..31));
+
+$code.=<<___;
+##
+##  _aes_preheat
+##
+##  Fills register %r10 -> .aes_consts (so you can -fPIC)
+##  and %xmm9-%xmm15 as specified below.
+##
+.type	_vpaes_encrypt_preheat,%function
+.align	4
+_vpaes_encrypt_preheat:
+	adr	x10, .Lk_inv
+	movi	v17.16b, #0x0f
+	ld1	{v18.2d-v19.2d}, [x10],#32	// .Lk_inv
+	ld1	{v20.2d-v23.2d}, [x10],#64	// .Lk_ipt, .Lk_sbo
+	ld1	{v24.2d-v27.2d}, [x10]		// .Lk_sb1, .Lk_sb2
+	ret
+.size	_vpaes_encrypt_preheat,.-_vpaes_encrypt_preheat
+
+##
+##  _aes_encrypt_core
+##
+##  AES-encrypt %xmm0.
+##
+##  Inputs:
+##     %xmm0 = input
+##     %xmm9-%xmm15 as in _vpaes_preheat
+##    (%rdx) = scheduled keys
+##
+##  Output in %xmm0
+##  Clobbers  %xmm1-%xmm5, %r9, %r10, %r11, %rax
+##  Preserves %xmm6 - %xmm8 so you get some local vectors
+##
+##
+.type	_vpaes_encrypt_core,%function
+.align 4
+_vpaes_encrypt_core:
+	mov	x9, $key
+	ldr	w8, [$key,#240]			// pull rounds
+	adr	x11, .Lk_mc_forward+16
+						// vmovdqa	.Lk_ipt(%rip),	%xmm2	# iptlo
+	ld1	{v16.2d}, [x9], #16		// vmovdqu	(%r9),	%xmm5		# round0 key
+	and	v1.16b, v7.16b, v17.16b		// vpand	%xmm9,	%xmm0,	%xmm1
+	ushr	v0.16b, v7.16b, #4		// vpsrlb	\$4,	%xmm0,	%xmm0
+	tbl	v1.16b, {$iptlo}, v1.16b	// vpshufb	%xmm1,	%xmm2,	%xmm1
+						// vmovdqa	.Lk_ipt+16(%rip), %xmm3	# ipthi
+	tbl	v2.16b, {$ipthi}, v0.16b	// vpshufb	%xmm0,	%xmm3,	%xmm2
+	eor	v0.16b, v1.16b, v16.16b		// vpxor	%xmm5,	%xmm1,	%xmm0
+	eor	v0.16b, v0.16b, v2.16b		// vpxor	%xmm2,	%xmm0,	%xmm0
+	b	.Lenc_entry
+
+.align 4
+.Lenc_loop:
+	// middle of middle round
+	add	x10, x11, #0x40
+	tbl	v4.16b, {$sb1t}, v2.16b		// vpshufb	%xmm2,	%xmm13,	%xmm4	# 4 = sb1u
+	ld1	{v1.2d}, [x11], #16		// vmovdqa	-0x40(%r11,%r10), %xmm1	# .Lk_mc_forward[]
+	tbl	v0.16b, {$sb1u}, v3.16b		// vpshufb	%xmm3,	%xmm12,	%xmm0	# 0 = sb1t
+	eor	v4.16b, v4.16b, v16.16b		// vpxor	%xmm5,	%xmm4,	%xmm4	# 4 = sb1u + k
+	tbl	v5.16b,	{$sb2t}, v2.16b		// vpshufb	%xmm2,	%xmm15,	%xmm5	# 4 = sb2u
+	eor	v0.16b, v0.16b, v4.16b		// vpxor	%xmm4,	%xmm0,	%xmm0	# 0 = A
+	tbl	v2.16b, {$sb2u}, v3.16b		// vpshufb	%xmm3,	%xmm14,	%xmm2	# 2 = sb2t
+	ld1	{v4.2d}, [x10]			// vmovdqa	(%r11,%r10), %xmm4	# .Lk_mc_backward[]
+	tbl	v3.16b, {v0.16b}, v1.16b	// vpshufb	%xmm1,	%xmm0,	%xmm3	# 0 = B
+	eor	v2.16b, v2.16b, v5.16b		// vpxor	%xmm5,	%xmm2,	%xmm2	# 2 = 2A
+	tbl	v0.16b, {v0.16b}, v4.16b	// vpshufb	%xmm4,	%xmm0,	%xmm0	# 3 = D
+	eor	v3.16b, v3.16b, v2.16b		// vpxor	%xmm2,	%xmm3,	%xmm3	# 0 = 2A+B
+	tbl	v4.16b, {v3.16b}, v1.16b	// vpshufb	%xmm1,	%xmm3,	%xmm4	# 0 = 2B+C
+	eor	v0.16b, v0.16b, v3.16b		// vpxor	%xmm3,	%xmm0,	%xmm0	# 3 = 2A+B+D
+	and	x11, x11, #~(1<<6)		// and		\$0x30,	%r11		# ... mod 4
+	eor	v0.16b, v0.16b, v4.16b		// vpxor	%xmm4,	%xmm0, %xmm0	# 0 = 2A+3B+C+D
+	sub	w8, w8, #1			// nr--
+
+.Lenc_entry:
+	// top of round
+	and	v1.16b, v0.16b, v17.16b		// vpand	%xmm0,	%xmm9,	%xmm1   # 0 = k
+	ushr	v0.16b, v0.16b, #4		// vpsrlb	\$4,	%xmm0,	%xmm0	# 1 = i
+	tbl	v5.16b, {$invhi}, v1.16b	// vpshufb	%xmm1,	%xmm11,	%xmm5	# 2 = a/k
+	eor	v1.16b, v1.16b, v0.16b		// vpxor	%xmm0,	%xmm1,	%xmm1	# 0 = j
+	tbl	v3.16b, {$invlo}, v0.16b	// vpshufb	%xmm0, 	%xmm10,	%xmm3  	# 3 = 1/i
+	tbl	v4.16b, {$invlo}, v1.16b	// vpshufb	%xmm1, 	%xmm10,	%xmm4  	# 4 = 1/j
+	eor	v3.16b, v3.16b, v5.16b		// vpxor	%xmm5,	%xmm3,	%xmm3	# 3 = iak = 1/i + a/k
+	eor	v4.16b, v4.16b, v5.16b		// vpxor	%xmm5,	%xmm4,	%xmm4  	# 4 = jak = 1/j + a/k
+	tbl	v2.16b, {$invlo}, v3.16b	// vpshufb	%xmm3,	%xmm10,	%xmm2  	# 2 = 1/iak
+	tbl	v3.16b, {$invlo}, v4.16b	// vpshufb	%xmm4,	%xmm10,	%xmm3	# 3 = 1/jak
+	eor	v2.16b, v2.16b, v1.16b		// vpxor	%xmm1,	%xmm2,	%xmm2  	# 2 = io
+	eor	v3.16b, v3.16b, v0.16b		// vpxor	%xmm0,	%xmm3,	%xmm3	# 3 = jo
+	ld1	{v16.2d}, [x9],#16		// vmovdqu	(%r9),	%xmm5
+	cbnz	w8, .Lenc_loop
+
+	// middle of last round
+	add	x10, x11, #0x80
+						// vmovdqa	-0x60(%r10), %xmm4	# 3 : sbou	.Lk_sbo
+						// vmovdqa	-0x50(%r10), %xmm0	# 0 : sbot	.Lk_sbo+16
+	tbl	v4.16b, {$sbou}, v2.16b		// vpshufb	%xmm2,	%xmm4,	%xmm4	# 4 = sbou
+	ld1	{v1.2d}, [x10]			// vmovdqa	0x40(%r11,%r10), %xmm1	# .Lk_sr[]
+	tbl	v0.16b, {$sbot}, v3.16b		// vpshufb	%xmm3,	%xmm0,	%xmm0	# 0 = sb1t
+	eor	v4.16b, v4.16b, v16.16b		// vpxor	%xmm5,	%xmm4,	%xmm4	# 4 = sb1u + k
+	eor	v0.16b, v0.16b, v4.16b		// vpxor	%xmm4,	%xmm0,	%xmm0	# 0 = A
+	tbl	v0.16b, {v0.16b}, v1.16b	// vpshufb	%xmm1,	%xmm0,	%xmm0
+	ret
+.size	_vpaes_encrypt_core,.-_vpaes_encrypt_core
+
+.globl	vpaes_encrypt
+.type	vpaes_encrypt,%function
+.align	4
+vpaes_encrypt:
+	.inst	0xd503233f			// paciasp
+	stp	x29,x30,[sp,#-16]!
+	add	x29,sp,#0
+
+	ld1	{v7.16b}, [$inp]
+	bl	_vpaes_encrypt_preheat
+	bl	_vpaes_encrypt_core
+	st1	{v0.16b}, [$out]
+
+	ldp	x29,x30,[sp],#16
+	.inst	0xd50323bf			// autiasp
+	ret
+.size	vpaes_encrypt,.-vpaes_encrypt
+
+.type	_vpaes_encrypt_2x,%function
+.align 4
+_vpaes_encrypt_2x:
+	mov	x9, $key
+	ldr	w8, [$key,#240]			// pull rounds
+	adr	x11, .Lk_mc_forward+16
+						// vmovdqa	.Lk_ipt(%rip),	%xmm2	# iptlo
+	ld1	{v16.2d}, [x9], #16		// vmovdqu	(%r9),	%xmm5		# round0 key
+	and	v1.16b,  v14.16b,  v17.16b	// vpand	%xmm9,	%xmm0,	%xmm1
+	ushr	v0.16b,  v14.16b,  #4		// vpsrlb	\$4,	%xmm0,	%xmm0
+	 and	v9.16b,  v15.16b,  v17.16b
+	 ushr	v8.16b,  v15.16b,  #4
+	tbl	v1.16b,  {$iptlo}, v1.16b	// vpshufb	%xmm1,	%xmm2,	%xmm1
+	 tbl	v9.16b,  {$iptlo}, v9.16b
+						// vmovdqa	.Lk_ipt+16(%rip), %xmm3	# ipthi
+	tbl	v2.16b,  {$ipthi}, v0.16b	// vpshufb	%xmm0,	%xmm3,	%xmm2
+	 tbl	v10.16b, {$ipthi}, v8.16b
+	eor	v0.16b,  v1.16b,   v16.16b	// vpxor	%xmm5,	%xmm1,	%xmm0
+	 eor	v8.16b,  v9.16b,   v16.16b
+	eor	v0.16b,  v0.16b,   v2.16b	// vpxor	%xmm2,	%xmm0,	%xmm0
+	 eor	v8.16b,  v8.16b,   v10.16b
+	b	.Lenc_2x_entry
+
+.align 4
+.Lenc_2x_loop:
+	// middle of middle round
+	add	x10, x11, #0x40
+	tbl	v4.16b,  {$sb1t}, v2.16b	// vpshufb	%xmm2,	%xmm13,	%xmm4	# 4 = sb1u
+	 tbl	v12.16b, {$sb1t}, v10.16b
+	ld1	{v1.2d}, [x11], #16		// vmovdqa	-0x40(%r11,%r10), %xmm1	# .Lk_mc_forward[]
+	tbl	v0.16b,  {$sb1u}, v3.16b	// vpshufb	%xmm3,	%xmm12,	%xmm0	# 0 = sb1t
+	 tbl	v8.16b,  {$sb1u}, v11.16b
+	eor	v4.16b,  v4.16b,  v16.16b	// vpxor	%xmm5,	%xmm4,	%xmm4	# 4 = sb1u + k
+	 eor	v12.16b, v12.16b, v16.16b
+	tbl	v5.16b,	 {$sb2t}, v2.16b	// vpshufb	%xmm2,	%xmm15,	%xmm5	# 4 = sb2u
+	 tbl	v13.16b, {$sb2t}, v10.16b
+	eor	v0.16b,  v0.16b,  v4.16b	// vpxor	%xmm4,	%xmm0,	%xmm0	# 0 = A
+	 eor	v8.16b,  v8.16b,  v12.16b
+	tbl	v2.16b,  {$sb2u}, v3.16b	// vpshufb	%xmm3,	%xmm14,	%xmm2	# 2 = sb2t
+	 tbl	v10.16b, {$sb2u}, v11.16b
+	ld1	{v4.2d}, [x10]			// vmovdqa	(%r11,%r10), %xmm4	# .Lk_mc_backward[]
+	tbl	v3.16b,  {v0.16b}, v1.16b	// vpshufb	%xmm1,	%xmm0,	%xmm3	# 0 = B
+	 tbl	v11.16b, {v8.16b}, v1.16b
+	eor	v2.16b,  v2.16b,  v5.16b	// vpxor	%xmm5,	%xmm2,	%xmm2	# 2 = 2A
+	 eor	v10.16b, v10.16b, v13.16b
+	tbl	v0.16b,  {v0.16b}, v4.16b	// vpshufb	%xmm4,	%xmm0,	%xmm0	# 3 = D
+	 tbl	v8.16b,  {v8.16b}, v4.16b
+	eor	v3.16b,  v3.16b,  v2.16b	// vpxor	%xmm2,	%xmm3,	%xmm3	# 0 = 2A+B
+	 eor	v11.16b, v11.16b, v10.16b
+	tbl	v4.16b,  {v3.16b}, v1.16b	// vpshufb	%xmm1,	%xmm3,	%xmm4	# 0 = 2B+C
+	 tbl	v12.16b, {v11.16b},v1.16b
+	eor	v0.16b,  v0.16b,  v3.16b	// vpxor	%xmm3,	%xmm0,	%xmm0	# 3 = 2A+B+D
+	 eor	v8.16b,  v8.16b,  v11.16b
+	and	x11, x11, #~(1<<6)		// and		\$0x30,	%r11		# ... mod 4
+	eor	v0.16b,  v0.16b,  v4.16b	// vpxor	%xmm4,	%xmm0, %xmm0	# 0 = 2A+3B+C+D
+	 eor	v8.16b,  v8.16b,  v12.16b
+	sub	w8, w8, #1			// nr--
+
+.Lenc_2x_entry:
+	// top of round
+	and	v1.16b,  v0.16b, v17.16b	// vpand	%xmm0,	%xmm9,	%xmm1   # 0 = k
+	ushr	v0.16b,  v0.16b, #4		// vpsrlb	\$4,	%xmm0,	%xmm0	# 1 = i
+	 and	v9.16b,  v8.16b, v17.16b
+	 ushr	v8.16b,  v8.16b, #4
+	tbl	v5.16b,  {$invhi},v1.16b	// vpshufb	%xmm1,	%xmm11,	%xmm5	# 2 = a/k
+	 tbl	v13.16b, {$invhi},v9.16b
+	eor	v1.16b,  v1.16b,  v0.16b	// vpxor	%xmm0,	%xmm1,	%xmm1	# 0 = j
+	 eor	v9.16b,  v9.16b,  v8.16b
+	tbl	v3.16b,  {$invlo},v0.16b	// vpshufb	%xmm0, 	%xmm10,	%xmm3  	# 3 = 1/i
+	 tbl	v11.16b, {$invlo},v8.16b
+	tbl	v4.16b,  {$invlo},v1.16b	// vpshufb	%xmm1, 	%xmm10,	%xmm4  	# 4 = 1/j
+	 tbl	v12.16b, {$invlo},v9.16b
+	eor	v3.16b,  v3.16b,  v5.16b	// vpxor	%xmm5,	%xmm3,	%xmm3	# 3 = iak = 1/i + a/k
+	 eor	v11.16b, v11.16b, v13.16b
+	eor	v4.16b,  v4.16b,  v5.16b	// vpxor	%xmm5,	%xmm4,	%xmm4  	# 4 = jak = 1/j + a/k
+	 eor	v12.16b, v12.16b, v13.16b
+	tbl	v2.16b,  {$invlo},v3.16b	// vpshufb	%xmm3,	%xmm10,	%xmm2  	# 2 = 1/iak
+	 tbl	v10.16b, {$invlo},v11.16b
+	tbl	v3.16b,  {$invlo},v4.16b	// vpshufb	%xmm4,	%xmm10,	%xmm3	# 3 = 1/jak
+	 tbl	v11.16b, {$invlo},v12.16b
+	eor	v2.16b,  v2.16b,  v1.16b	// vpxor	%xmm1,	%xmm2,	%xmm2  	# 2 = io
+	 eor	v10.16b, v10.16b, v9.16b
+	eor	v3.16b,  v3.16b,  v0.16b	// vpxor	%xmm0,	%xmm3,	%xmm3	# 3 = jo
+	 eor	v11.16b, v11.16b, v8.16b
+	ld1	{v16.2d}, [x9],#16		// vmovdqu	(%r9),	%xmm5
+	cbnz	w8, .Lenc_2x_loop
+
+	// middle of last round
+	add	x10, x11, #0x80
+						// vmovdqa	-0x60(%r10), %xmm4	# 3 : sbou	.Lk_sbo
+						// vmovdqa	-0x50(%r10), %xmm0	# 0 : sbot	.Lk_sbo+16
+	tbl	v4.16b,  {$sbou}, v2.16b	// vpshufb	%xmm2,	%xmm4,	%xmm4	# 4 = sbou
+	 tbl	v12.16b, {$sbou}, v10.16b
+	ld1	{v1.2d}, [x10]			// vmovdqa	0x40(%r11,%r10), %xmm1	# .Lk_sr[]
+	tbl	v0.16b,  {$sbot}, v3.16b	// vpshufb	%xmm3,	%xmm0,	%xmm0	# 0 = sb1t
+	 tbl	v8.16b,  {$sbot}, v11.16b
+	eor	v4.16b,  v4.16b,  v16.16b	// vpxor	%xmm5,	%xmm4,	%xmm4	# 4 = sb1u + k
+	 eor	v12.16b, v12.16b, v16.16b
+	eor	v0.16b,  v0.16b,  v4.16b	// vpxor	%xmm4,	%xmm0,	%xmm0	# 0 = A
+	 eor	v8.16b,  v8.16b,  v12.16b
+	tbl	v0.16b,  {v0.16b},v1.16b	// vpshufb	%xmm1,	%xmm0,	%xmm0
+	 tbl	v1.16b,  {v8.16b},v1.16b
+	ret
+.size	_vpaes_encrypt_2x,.-_vpaes_encrypt_2x
+
+.type	_vpaes_decrypt_preheat,%function
+.align	4
+_vpaes_decrypt_preheat:
+	adr	x10, .Lk_inv
+	movi	v17.16b, #0x0f
+	adr	x11, .Lk_dipt
+	ld1	{v18.2d-v19.2d}, [x10],#32	// .Lk_inv
+	ld1	{v20.2d-v23.2d}, [x11],#64	// .Lk_dipt, .Lk_dsbo
+	ld1	{v24.2d-v27.2d}, [x11],#64	// .Lk_dsb9, .Lk_dsbd
+	ld1	{v28.2d-v31.2d}, [x11]		// .Lk_dsbb, .Lk_dsbe
+	ret
+.size	_vpaes_decrypt_preheat,.-_vpaes_decrypt_preheat
+
+##
+##  Decryption core
+##
+##  Same API as encryption core.
+##
+.type	_vpaes_decrypt_core,%function
+.align	4
+_vpaes_decrypt_core:
+	mov	x9, $key
+	ldr	w8, [$key,#240]			// pull rounds
+
+						// vmovdqa	.Lk_dipt(%rip), %xmm2	# iptlo
+	lsl	x11, x8, #4			// mov	%rax,	%r11;	shl	\$4, %r11
+	eor	x11, x11, #0x30			// xor		\$0x30,	%r11
+	adr	x10, .Lk_sr
+	and	x11, x11, #0x30			// and		\$0x30,	%r11
+	add	x11, x11, x10
+	adr	x10, .Lk_mc_forward+48
+
+	ld1	{v16.2d}, [x9],#16		// vmovdqu	(%r9),	%xmm4		# round0 key
+	and	v1.16b, v7.16b, v17.16b		// vpand	%xmm9,	%xmm0,	%xmm1
+	ushr	v0.16b, v7.16b, #4		// vpsrlb	\$4,	%xmm0,	%xmm0
+	tbl	v2.16b, {$iptlo}, v1.16b	// vpshufb	%xmm1,	%xmm2,	%xmm2
+	ld1	{v5.2d}, [x10]			// vmovdqa	.Lk_mc_forward+48(%rip), %xmm5
+						// vmovdqa	.Lk_dipt+16(%rip), %xmm1 # ipthi
+	tbl	v0.16b, {$ipthi}, v0.16b	// vpshufb	%xmm0,	%xmm1,	%xmm0
+	eor	v2.16b, v2.16b, v16.16b		// vpxor	%xmm4,	%xmm2,	%xmm2
+	eor	v0.16b, v0.16b, v2.16b		// vpxor	%xmm2,	%xmm0,	%xmm0
+	b	.Ldec_entry
+
+.align 4
+.Ldec_loop:
+//
+//  Inverse mix columns
+//
+						// vmovdqa	-0x20(%r10),%xmm4		# 4 : sb9u
+						// vmovdqa	-0x10(%r10),%xmm1		# 0 : sb9t
+	tbl	v4.16b, {$sb9u}, v2.16b		// vpshufb	%xmm2,	%xmm4,	%xmm4		# 4 = sb9u
+	tbl	v1.16b, {$sb9t}, v3.16b		// vpshufb	%xmm3,	%xmm1,	%xmm1		# 0 = sb9t
+	eor	v0.16b, v4.16b, v16.16b		// vpxor	%xmm4,	%xmm0,	%xmm0
+						// vmovdqa	0x00(%r10),%xmm4		# 4 : sbdu
+	eor	v0.16b, v0.16b, v1.16b		// vpxor	%xmm1,	%xmm0,	%xmm0		# 0 = ch
+						// vmovdqa	0x10(%r10),%xmm1		# 0 : sbdt
+
+	tbl	v4.16b, {$sbdu}, v2.16b		// vpshufb	%xmm2,	%xmm4,	%xmm4		# 4 = sbdu
+	tbl 	v0.16b, {v0.16b}, v5.16b	// vpshufb	%xmm5,	%xmm0,	%xmm0		# MC ch
+	tbl	v1.16b, {$sbdt}, v3.16b		// vpshufb	%xmm3,	%xmm1,	%xmm1		# 0 = sbdt
+	eor	v0.16b, v0.16b, v4.16b		// vpxor	%xmm4,	%xmm0,	%xmm0		# 4 = ch
+						// vmovdqa	0x20(%r10),	%xmm4		# 4 : sbbu
+	eor	v0.16b, v0.16b, v1.16b		// vpxor	%xmm1,	%xmm0,	%xmm0		# 0 = ch
+						// vmovdqa	0x30(%r10),	%xmm1		# 0 : sbbt
+
+	tbl	v4.16b, {$sbbu}, v2.16b		// vpshufb	%xmm2,	%xmm4,	%xmm4		# 4 = sbbu
+	tbl	v0.16b, {v0.16b}, v5.16b	// vpshufb	%xmm5,	%xmm0,	%xmm0		# MC ch
+	tbl	v1.16b, {$sbbt}, v3.16b		// vpshufb	%xmm3,	%xmm1,	%xmm1		# 0 = sbbt
+	eor	v0.16b, v0.16b, v4.16b		// vpxor	%xmm4,	%xmm0,	%xmm0		# 4 = ch
+						// vmovdqa	0x40(%r10),	%xmm4		# 4 : sbeu
+	eor	v0.16b, v0.16b, v1.16b		// vpxor	%xmm1,	%xmm0,	%xmm0		# 0 = ch
+						// vmovdqa	0x50(%r10),	%xmm1		# 0 : sbet
+
+	tbl	v4.16b, {$sbeu}, v2.16b		// vpshufb	%xmm2,	%xmm4,	%xmm4		# 4 = sbeu
+	tbl	v0.16b, {v0.16b}, v5.16b	// vpshufb	%xmm5,	%xmm0,	%xmm0		# MC ch
+	tbl	v1.16b, {$sbet}, v3.16b		// vpshufb	%xmm3,	%xmm1,	%xmm1		# 0 = sbet
+	eor	v0.16b, v0.16b, v4.16b		// vpxor	%xmm4,	%xmm0,	%xmm0		# 4 = ch
+	ext	v5.16b, v5.16b, v5.16b, #12	// vpalignr \$12,	%xmm5,	%xmm5,	%xmm5
+	eor	v0.16b, v0.16b, v1.16b		// vpxor	%xmm1,	%xmm0,	%xmm0		# 0 = ch
+	sub	w8, w8, #1			// sub		\$1,%rax			# nr--
+
+.Ldec_entry:
+	// top of round
+	and	v1.16b, v0.16b, v17.16b		// vpand	%xmm9,	%xmm0,	%xmm1	# 0 = k
+	ushr	v0.16b, v0.16b, #4		// vpsrlb	\$4,	%xmm0,	%xmm0	# 1 = i
+	tbl	v2.16b, {$invhi}, v1.16b	// vpshufb	%xmm1,	%xmm11,	%xmm2	# 2 = a/k
+	eor	v1.16b,	v1.16b, v0.16b		// vpxor	%xmm0,	%xmm1,	%xmm1	# 0 = j
+	tbl	v3.16b, {$invlo}, v0.16b	// vpshufb	%xmm0, 	%xmm10,	%xmm3	# 3 = 1/i
+	tbl	v4.16b, {$invlo}, v1.16b	// vpshufb	%xmm1,	%xmm10,	%xmm4	# 4 = 1/j
+	eor	v3.16b, v3.16b, v2.16b		// vpxor	%xmm2,	%xmm3,	%xmm3	# 3 = iak = 1/i + a/k
+	eor	v4.16b, v4.16b, v2.16b		// vpxor	%xmm2, 	%xmm4,	%xmm4	# 4 = jak = 1/j + a/k
+	tbl	v2.16b, {$invlo}, v3.16b	// vpshufb	%xmm3,	%xmm10,	%xmm2	# 2 = 1/iak
+	tbl	v3.16b, {$invlo}, v4.16b	// vpshufb	%xmm4,  %xmm10,	%xmm3	# 3 = 1/jak
+	eor	v2.16b, v2.16b, v1.16b		// vpxor	%xmm1,	%xmm2,	%xmm2	# 2 = io
+	eor	v3.16b, v3.16b, v0.16b		// vpxor	%xmm0,  %xmm3,	%xmm3	# 3 = jo
+	ld1	{v16.2d}, [x9],#16		// vmovdqu	(%r9),	%xmm0
+	cbnz	w8, .Ldec_loop
+
+	// middle of last round
+						// vmovdqa	0x60(%r10),	%xmm4	# 3 : sbou
+	tbl	v4.16b, {$sbou}, v2.16b		// vpshufb	%xmm2,	%xmm4,	%xmm4	# 4 = sbou
+						// vmovdqa	0x70(%r10),	%xmm1	# 0 : sbot
+	ld1	{v2.2d}, [x11]			// vmovdqa	-0x160(%r11),	%xmm2	# .Lk_sr-.Lk_dsbd=-0x160
+	tbl	v1.16b, {$sbot}, v3.16b		// vpshufb	%xmm3,	%xmm1,	%xmm1	# 0 = sb1t
+	eor	v4.16b, v4.16b, v16.16b		// vpxor	%xmm0,	%xmm4,	%xmm4	# 4 = sb1u + k
+	eor	v0.16b, v1.16b, v4.16b		// vpxor	%xmm4,	%xmm1,	%xmm0	# 0 = A
+	tbl	v0.16b, {v0.16b}, v2.16b	// vpshufb	%xmm2,	%xmm0,	%xmm0
+	ret
+.size	_vpaes_decrypt_core,.-_vpaes_decrypt_core
+
+.globl	vpaes_decrypt
+.type	vpaes_decrypt,%function
+.align	4
+vpaes_decrypt:
+	.inst	0xd503233f			// paciasp
+	stp	x29,x30,[sp,#-16]!
+	add	x29,sp,#0
+
+	ld1	{v7.16b}, [$inp]
+	bl	_vpaes_decrypt_preheat
+	bl	_vpaes_decrypt_core
+	st1	{v0.16b}, [$out]
+
+	ldp	x29,x30,[sp],#16
+	.inst	0xd50323bf			// autiasp
+	ret
+.size	vpaes_decrypt,.-vpaes_decrypt
+
+// v14-v15 input, v0-v1 output
+.type	_vpaes_decrypt_2x,%function
+.align	4
+_vpaes_decrypt_2x:
+	mov	x9, $key
+	ldr	w8, [$key,#240]			// pull rounds
+
+						// vmovdqa	.Lk_dipt(%rip), %xmm2	# iptlo
+	lsl	x11, x8, #4			// mov	%rax,	%r11;	shl	\$4, %r11
+	eor	x11, x11, #0x30			// xor		\$0x30,	%r11
+	adr	x10, .Lk_sr
+	and	x11, x11, #0x30			// and		\$0x30,	%r11
+	add	x11, x11, x10
+	adr	x10, .Lk_mc_forward+48
+
+	ld1	{v16.2d}, [x9],#16		// vmovdqu	(%r9),	%xmm4		# round0 key
+	and	v1.16b,  v14.16b, v17.16b	// vpand	%xmm9,	%xmm0,	%xmm1
+	ushr	v0.16b,  v14.16b, #4		// vpsrlb	\$4,	%xmm0,	%xmm0
+	 and	v9.16b,  v15.16b, v17.16b
+	 ushr	v8.16b,  v15.16b, #4
+	tbl	v2.16b,  {$iptlo},v1.16b	// vpshufb	%xmm1,	%xmm2,	%xmm2
+	 tbl	v10.16b, {$iptlo},v9.16b
+	ld1	{v5.2d}, [x10]			// vmovdqa	.Lk_mc_forward+48(%rip), %xmm5
+						// vmovdqa	.Lk_dipt+16(%rip), %xmm1 # ipthi
+	tbl	v0.16b,  {$ipthi},v0.16b	// vpshufb	%xmm0,	%xmm1,	%xmm0
+	 tbl	v8.16b,  {$ipthi},v8.16b
+	eor	v2.16b,  v2.16b,  v16.16b	// vpxor	%xmm4,	%xmm2,	%xmm2
+	 eor	v10.16b, v10.16b, v16.16b
+	eor	v0.16b,  v0.16b,  v2.16b	// vpxor	%xmm2,	%xmm0,	%xmm0
+	 eor	v8.16b,  v8.16b,  v10.16b
+	b	.Ldec_2x_entry
+
+.align 4
+.Ldec_2x_loop:
+//
+//  Inverse mix columns
+//
+						// vmovdqa	-0x20(%r10),%xmm4		# 4 : sb9u
+						// vmovdqa	-0x10(%r10),%xmm1		# 0 : sb9t
+	tbl	v4.16b,  {$sb9u}, v2.16b	// vpshufb	%xmm2,	%xmm4,	%xmm4		# 4 = sb9u
+	 tbl	v12.16b, {$sb9u}, v10.16b
+	tbl	v1.16b,  {$sb9t}, v3.16b	// vpshufb	%xmm3,	%xmm1,	%xmm1		# 0 = sb9t
+	 tbl	v9.16b,  {$sb9t}, v11.16b
+	eor	v0.16b,  v4.16b,  v16.16b	// vpxor	%xmm4,	%xmm0,	%xmm0
+	 eor	v8.16b,  v12.16b, v16.16b
+						// vmovdqa	0x00(%r10),%xmm4		# 4 : sbdu
+	eor	v0.16b,  v0.16b,  v1.16b	// vpxor	%xmm1,	%xmm0,	%xmm0		# 0 = ch
+	 eor	v8.16b,  v8.16b,  v9.16b	// vpxor	%xmm1,	%xmm0,	%xmm0		# 0 = ch
+						// vmovdqa	0x10(%r10),%xmm1		# 0 : sbdt
+
+	tbl	v4.16b,  {$sbdu}, v2.16b	// vpshufb	%xmm2,	%xmm4,	%xmm4		# 4 = sbdu
+	 tbl	v12.16b, {$sbdu}, v10.16b
+	tbl 	v0.16b,  {v0.16b},v5.16b	// vpshufb	%xmm5,	%xmm0,	%xmm0		# MC ch
+	 tbl 	v8.16b,  {v8.16b},v5.16b
+	tbl	v1.16b,  {$sbdt}, v3.16b	// vpshufb	%xmm3,	%xmm1,	%xmm1		# 0 = sbdt
+	 tbl	v9.16b,  {$sbdt}, v11.16b
+	eor	v0.16b,  v0.16b,  v4.16b	// vpxor	%xmm4,	%xmm0,	%xmm0		# 4 = ch
+	 eor	v8.16b,  v8.16b,  v12.16b
+						// vmovdqa	0x20(%r10),	%xmm4		# 4 : sbbu
+	eor	v0.16b,  v0.16b,  v1.16b	// vpxor	%xmm1,	%xmm0,	%xmm0		# 0 = ch
+	 eor	v8.16b,  v8.16b,  v9.16b
+						// vmovdqa	0x30(%r10),	%xmm1		# 0 : sbbt
+
+	tbl	v4.16b,  {$sbbu}, v2.16b	// vpshufb	%xmm2,	%xmm4,	%xmm4		# 4 = sbbu
+	 tbl	v12.16b, {$sbbu}, v10.16b
+	tbl	v0.16b,  {v0.16b},v5.16b	// vpshufb	%xmm5,	%xmm0,	%xmm0		# MC ch
+	 tbl	v8.16b,  {v8.16b},v5.16b
+	tbl	v1.16b,  {$sbbt}, v3.16b	// vpshufb	%xmm3,	%xmm1,	%xmm1		# 0 = sbbt
+	 tbl	v9.16b,  {$sbbt}, v11.16b
+	eor	v0.16b,  v0.16b,  v4.16b	// vpxor	%xmm4,	%xmm0,	%xmm0		# 4 = ch
+	 eor	v8.16b,  v8.16b,  v12.16b
+						// vmovdqa	0x40(%r10),	%xmm4		# 4 : sbeu
+	eor	v0.16b,  v0.16b,  v1.16b	// vpxor	%xmm1,	%xmm0,	%xmm0		# 0 = ch
+	 eor	v8.16b,  v8.16b,  v9.16b
+						// vmovdqa	0x50(%r10),	%xmm1		# 0 : sbet
+
+	tbl	v4.16b,  {$sbeu}, v2.16b	// vpshufb	%xmm2,	%xmm4,	%xmm4		# 4 = sbeu
+	 tbl	v12.16b, {$sbeu}, v10.16b
+	tbl	v0.16b,  {v0.16b},v5.16b	// vpshufb	%xmm5,	%xmm0,	%xmm0		# MC ch
+	 tbl	v8.16b,  {v8.16b},v5.16b
+	tbl	v1.16b,  {$sbet}, v3.16b	// vpshufb	%xmm3,	%xmm1,	%xmm1		# 0 = sbet
+	 tbl	v9.16b,  {$sbet}, v11.16b
+	eor	v0.16b,  v0.16b,  v4.16b	// vpxor	%xmm4,	%xmm0,	%xmm0		# 4 = ch
+	 eor	v8.16b,  v8.16b,  v12.16b
+	ext	v5.16b,  v5.16b,  v5.16b, #12	// vpalignr \$12,	%xmm5,	%xmm5,	%xmm5
+	eor	v0.16b,  v0.16b,  v1.16b	// vpxor	%xmm1,	%xmm0,	%xmm0		# 0 = ch
+	 eor	v8.16b,  v8.16b,  v9.16b
+	sub	w8, w8, #1			// sub		\$1,%rax			# nr--
+
+.Ldec_2x_entry:
+	// top of round
+	and	v1.16b,  v0.16b,  v17.16b	// vpand	%xmm9,	%xmm0,	%xmm1	# 0 = k
+	ushr	v0.16b,  v0.16b,  #4		// vpsrlb	\$4,	%xmm0,	%xmm0	# 1 = i
+	 and	v9.16b,  v8.16b,  v17.16b
+	 ushr	v8.16b,  v8.16b,  #4
+	tbl	v2.16b,  {$invhi},v1.16b	// vpshufb	%xmm1,	%xmm11,	%xmm2	# 2 = a/k
+	 tbl	v10.16b, {$invhi},v9.16b
+	eor	v1.16b,	 v1.16b,  v0.16b	// vpxor	%xmm0,	%xmm1,	%xmm1	# 0 = j
+	 eor	v9.16b,	 v9.16b,  v8.16b
+	tbl	v3.16b,  {$invlo},v0.16b	// vpshufb	%xmm0, 	%xmm10,	%xmm3	# 3 = 1/i
+	 tbl	v11.16b, {$invlo},v8.16b
+	tbl	v4.16b,  {$invlo},v1.16b	// vpshufb	%xmm1,	%xmm10,	%xmm4	# 4 = 1/j
+	 tbl	v12.16b, {$invlo},v9.16b
+	eor	v3.16b,  v3.16b,  v2.16b	// vpxor	%xmm2,	%xmm3,	%xmm3	# 3 = iak = 1/i + a/k
+	 eor	v11.16b, v11.16b, v10.16b
+	eor	v4.16b,  v4.16b,  v2.16b	// vpxor	%xmm2, 	%xmm4,	%xmm4	# 4 = jak = 1/j + a/k
+	 eor	v12.16b, v12.16b, v10.16b
+	tbl	v2.16b,  {$invlo},v3.16b	// vpshufb	%xmm3,	%xmm10,	%xmm2	# 2 = 1/iak
+	 tbl	v10.16b, {$invlo},v11.16b
+	tbl	v3.16b,  {$invlo},v4.16b	// vpshufb	%xmm4,  %xmm10,	%xmm3	# 3 = 1/jak
+	 tbl	v11.16b, {$invlo},v12.16b
+	eor	v2.16b,  v2.16b,  v1.16b	// vpxor	%xmm1,	%xmm2,	%xmm2	# 2 = io
+	 eor	v10.16b, v10.16b, v9.16b
+	eor	v3.16b,  v3.16b,  v0.16b	// vpxor	%xmm0,  %xmm3,	%xmm3	# 3 = jo
+	 eor	v11.16b, v11.16b, v8.16b
+	ld1	{v16.2d}, [x9],#16		// vmovdqu	(%r9),	%xmm0
+	cbnz	w8, .Ldec_2x_loop
+
+	// middle of last round
+						// vmovdqa	0x60(%r10),	%xmm4	# 3 : sbou
+	tbl	v4.16b,  {$sbou}, v2.16b	// vpshufb	%xmm2,	%xmm4,	%xmm4	# 4 = sbou
+	 tbl	v12.16b, {$sbou}, v10.16b
+						// vmovdqa	0x70(%r10),	%xmm1	# 0 : sbot
+	tbl	v1.16b,  {$sbot}, v3.16b	// vpshufb	%xmm3,	%xmm1,	%xmm1	# 0 = sb1t
+	 tbl	v9.16b,  {$sbot}, v11.16b
+	ld1	{v2.2d}, [x11]			// vmovdqa	-0x160(%r11),	%xmm2	# .Lk_sr-.Lk_dsbd=-0x160
+	eor	v4.16b,  v4.16b,  v16.16b	// vpxor	%xmm0,	%xmm4,	%xmm4	# 4 = sb1u + k
+	 eor	v12.16b, v12.16b, v16.16b
+	eor	v0.16b,  v1.16b,  v4.16b	// vpxor	%xmm4,	%xmm1,	%xmm0	# 0 = A
+	 eor	v8.16b,  v9.16b,  v12.16b
+	tbl	v0.16b,  {v0.16b},v2.16b	// vpshufb	%xmm2,	%xmm0,	%xmm0
+	 tbl	v1.16b,  {v8.16b},v2.16b
+	ret
+.size	_vpaes_decrypt_2x,.-_vpaes_decrypt_2x
+___
+}
+{
+my ($inp,$bits,$out,$dir)=("x0","w1","x2","w3");
+my ($invlo,$invhi,$iptlo,$ipthi,$rcon) = map("v$_.16b",(18..21,8));
+
+$code.=<<___;
+########################################################
+##                                                    ##
+##                  AES key schedule                  ##
+##                                                    ##
+########################################################
+.type	_vpaes_key_preheat,%function
+.align	4
+_vpaes_key_preheat:
+	adr	x10, .Lk_inv
+	movi	v16.16b, #0x5b			// .Lk_s63
+	adr	x11, .Lk_sb1
+	movi	v17.16b, #0x0f			// .Lk_s0F
+	ld1	{v18.2d-v21.2d}, [x10]		// .Lk_inv, .Lk_ipt
+	adr	x10, .Lk_dksd
+	ld1	{v22.2d-v23.2d}, [x11]		// .Lk_sb1
+	adr	x11, .Lk_mc_forward
+	ld1	{v24.2d-v27.2d}, [x10],#64	// .Lk_dksd, .Lk_dksb
+	ld1	{v28.2d-v31.2d}, [x10],#64	// .Lk_dkse, .Lk_dks9
+	ld1	{v8.2d}, [x10]			// .Lk_rcon
+	ld1	{v9.2d}, [x11]			// .Lk_mc_forward[0]
+	ret
+.size	_vpaes_key_preheat,.-_vpaes_key_preheat
+
+.type	_vpaes_schedule_core,%function
+.align	4
+_vpaes_schedule_core:
+	.inst	0xd503233f			// paciasp
+	stp	x29, x30, [sp,#-16]!
+	add	x29,sp,#0
+
+	bl	_vpaes_key_preheat		// load the tables
+
+	ld1	{v0.16b}, [$inp],#16		// vmovdqu	(%rdi),	%xmm0		# load key (unaligned)
+
+	// input transform
+	mov	v3.16b, v0.16b			// vmovdqa	%xmm0,	%xmm3
+	bl	_vpaes_schedule_transform
+	mov	v7.16b, v0.16b			// vmovdqa	%xmm0,	%xmm7
+
+	adr	x10, .Lk_sr			// lea	.Lk_sr(%rip),%r10
+	add	x8, x8, x10
+	cbnz	$dir, .Lschedule_am_decrypting
+
+	// encrypting, output zeroth round key after transform
+	st1	{v0.2d}, [$out]			// vmovdqu	%xmm0,	(%rdx)
+	b	.Lschedule_go
+
+.Lschedule_am_decrypting:
+	// decrypting, output zeroth round key after shiftrows
+	ld1	{v1.2d}, [x8]			// vmovdqa	(%r8,%r10),	%xmm1
+	tbl	v3.16b, {v3.16b}, v1.16b	// vpshufb  %xmm1,	%xmm3,	%xmm3
+	st1	{v3.2d}, [$out]			// vmovdqu	%xmm3,	(%rdx)
+	eor	x8, x8, #0x30			// xor	\$0x30, %r8
+
+.Lschedule_go:
+	cmp	$bits, #192			// cmp	\$192,	%esi
+	b.hi	.Lschedule_256
+	b.eq	.Lschedule_192
+	// 128: fall though
+
+##
+##  .schedule_128
+##
+##  128-bit specific part of key schedule.
+##
+##  This schedule is really simple, because all its parts
+##  are accomplished by the subroutines.
+##
+.Lschedule_128:
+	mov	$inp, #10			// mov	\$10, %esi
+
+.Loop_schedule_128:
+	sub	$inp, $inp, #1			// dec	%esi
+	bl 	_vpaes_schedule_round
+	cbz 	$inp, .Lschedule_mangle_last
+	bl	_vpaes_schedule_mangle		// write output
+	b 	.Loop_schedule_128
+
+##
+##  .aes_schedule_192
+##
+##  192-bit specific part of key schedule.
+##
+##  The main body of this schedule is the same as the 128-bit
+##  schedule, but with more smearing.  The long, high side is
+##  stored in %xmm7 as before, and the short, low side is in
+##  the high bits of %xmm6.
+##
+##  This schedule is somewhat nastier, however, because each
+##  round produces 192 bits of key material, or 1.5 round keys.
+##  Therefore, on each cycle we do 2 rounds and produce 3 round
+##  keys.
+##
+.align	4
+.Lschedule_192:
+	sub	$inp, $inp, #8
+	ld1	{v0.16b}, [$inp]		// vmovdqu	8(%rdi),%xmm0		# load key part 2 (very unaligned)
+	bl	_vpaes_schedule_transform	// input transform
+	mov	v6.16b, v0.16b			// vmovdqa	%xmm0,	%xmm6		# save short part
+	eor	v4.16b, v4.16b, v4.16b		// vpxor	%xmm4,	%xmm4, %xmm4	# clear 4
+	ins	v6.d[0], v4.d[0]		// vmovhlps	%xmm4,	%xmm6,	%xmm6		# clobber low side with zeros
+	mov	$inp, #4			// mov	\$4,	%esi
+
+.Loop_schedule_192:
+	sub	$inp, $inp, #1			// dec	%esi
+	bl	_vpaes_schedule_round
+	ext	v0.16b, v6.16b, v0.16b, #8	// vpalignr	\$8,%xmm6,%xmm0,%xmm0
+	bl	_vpaes_schedule_mangle		// save key n
+	bl	_vpaes_schedule_192_smear
+	bl	_vpaes_schedule_mangle		// save key n+1
+	bl	_vpaes_schedule_round
+	cbz 	$inp, .Lschedule_mangle_last
+	bl	_vpaes_schedule_mangle		// save key n+2
+	bl	_vpaes_schedule_192_smear
+	b	.Loop_schedule_192
+
+##
+##  .aes_schedule_256
+##
+##  256-bit specific part of key schedule.
+##
+##  The structure here is very similar to the 128-bit
+##  schedule, but with an additional "low side" in
+##  %xmm6.  The low side's rounds are the same as the
+##  high side's, except no rcon and no rotation.
+##
+.align	4
+.Lschedule_256:
+	ld1	{v0.16b}, [$inp]		// vmovdqu	16(%rdi),%xmm0		# load key part 2 (unaligned)
+	bl	_vpaes_schedule_transform	// input transform
+	mov	$inp, #7			// mov	\$7, %esi
+
+.Loop_schedule_256:
+	sub	$inp, $inp, #1			// dec	%esi
+	bl	_vpaes_schedule_mangle		// output low result
+	mov	v6.16b, v0.16b			// vmovdqa	%xmm0,	%xmm6		# save cur_lo in xmm6
+
+	// high round
+	bl	_vpaes_schedule_round
+	cbz 	$inp, .Lschedule_mangle_last
+	bl	_vpaes_schedule_mangle
+
+	// low round. swap xmm7 and xmm6
+	dup	v0.4s, v0.s[3]			// vpshufd	\$0xFF,	%xmm0,	%xmm0
+	movi	v4.16b, #0
+	mov	v5.16b, v7.16b			// vmovdqa	%xmm7,	%xmm5
+	mov	v7.16b, v6.16b			// vmovdqa	%xmm6,	%xmm7
+	bl	_vpaes_schedule_low_round
+	mov	v7.16b, v5.16b			// vmovdqa	%xmm5,	%xmm7
+
+	b	.Loop_schedule_256
+
+##
+##  .aes_schedule_mangle_last
+##
+##  Mangler for last round of key schedule
+##  Mangles %xmm0
+##    when encrypting, outputs out(%xmm0) ^ 63
+##    when decrypting, outputs unskew(%xmm0)
+##
+##  Always called right before return... jumps to cleanup and exits
+##
+.align	4
+.Lschedule_mangle_last:
+	// schedule last round key from xmm0
+	adr	x11, .Lk_deskew			// lea	.Lk_deskew(%rip),%r11	# prepare to deskew
+	cbnz	$dir, .Lschedule_mangle_last_dec
+
+	// encrypting
+	ld1	{v1.2d}, [x8]			// vmovdqa	(%r8,%r10),%xmm1
+	adr	x11, .Lk_opt			// lea	.Lk_opt(%rip),	%r11		# prepare to output transform
+	add	$out, $out, #32			// add	\$32,	%rdx
+	tbl	v0.16b, {v0.16b}, v1.16b	// vpshufb	%xmm1,	%xmm0,	%xmm0		# output permute
+
+.Lschedule_mangle_last_dec:
+	ld1	{v20.2d-v21.2d}, [x11]		// reload constants
+	sub	$out, $out, #16			// add	\$-16,	%rdx
+	eor	v0.16b, v0.16b, v16.16b		// vpxor	.Lk_s63(%rip),	%xmm0,	%xmm0
+	bl	_vpaes_schedule_transform	// output transform
+	st1	{v0.2d}, [$out]			// vmovdqu	%xmm0,	(%rdx)		# save last key
+
+	// cleanup
+	eor	v0.16b, v0.16b, v0.16b		// vpxor	%xmm0,	%xmm0,	%xmm0
+	eor	v1.16b, v1.16b, v1.16b		// vpxor	%xmm1,	%xmm1,	%xmm1
+	eor	v2.16b, v2.16b, v2.16b		// vpxor	%xmm2,	%xmm2,	%xmm2
+	eor	v3.16b, v3.16b, v3.16b		// vpxor	%xmm3,	%xmm3,	%xmm3
+	eor	v4.16b, v4.16b, v4.16b		// vpxor	%xmm4,	%xmm4,	%xmm4
+	eor	v5.16b, v5.16b, v5.16b		// vpxor	%xmm5,	%xmm5,	%xmm5
+	eor	v6.16b, v6.16b, v6.16b		// vpxor	%xmm6,	%xmm6,	%xmm6
+	eor	v7.16b, v7.16b, v7.16b		// vpxor	%xmm7,	%xmm7,	%xmm7
+	ldp	x29, x30, [sp],#16
+	.inst	0xd50323bf			// autiasp
+	ret
+.size	_vpaes_schedule_core,.-_vpaes_schedule_core
+
+##
+##  .aes_schedule_192_smear
+##
+##  Smear the short, low side in the 192-bit key schedule.
+##
+##  Inputs:
+##    %xmm7: high side, b  a  x  y
+##    %xmm6:  low side, d  c  0  0
+##    %xmm13: 0
+##
+##  Outputs:
+##    %xmm6: b+c+d  b+c  0  0
+##    %xmm0: b+c+d  b+c  b  a
+##
+.type	_vpaes_schedule_192_smear,%function
+.align	4
+_vpaes_schedule_192_smear:
+	movi	v1.16b, #0
+	dup	v0.4s, v7.s[3]
+	ins	v1.s[3], v6.s[2]	// vpshufd	\$0x80,	%xmm6,	%xmm1	# d c 0 0 -> c 0 0 0
+	ins	v0.s[0], v7.s[2]	// vpshufd	\$0xFE,	%xmm7,	%xmm0	# b a _ _ -> b b b a
+	eor	v6.16b, v6.16b, v1.16b	// vpxor	%xmm1,	%xmm6,	%xmm6	# -> c+d c 0 0
+	eor	v1.16b, v1.16b, v1.16b	// vpxor	%xmm1,	%xmm1,	%xmm1
+	eor	v6.16b, v6.16b, v0.16b	// vpxor	%xmm0,	%xmm6,	%xmm6	# -> b+c+d b+c b a
+	mov	v0.16b, v6.16b		// vmovdqa	%xmm6,	%xmm0
+	ins	v6.d[0], v1.d[0]	// vmovhlps	%xmm1,	%xmm6,	%xmm6	# clobber low side with zeros
+	ret
+.size	_vpaes_schedule_192_smear,.-_vpaes_schedule_192_smear
+
+##
+##  .aes_schedule_round
+##
+##  Runs one main round of the key schedule on %xmm0, %xmm7
+##
+##  Specifically, runs subbytes on the high dword of %xmm0
+##  then rotates it by one byte and xors into the low dword of
+##  %xmm7.
+##
+##  Adds rcon from low byte of %xmm8, then rotates %xmm8 for
+##  next rcon.
+##
+##  Smears the dwords of %xmm7 by xoring the low into the
+##  second low, result into third, result into highest.
+##
+##  Returns results in %xmm7 = %xmm0.
+##  Clobbers %xmm1-%xmm4, %r11.
+##
+.type	_vpaes_schedule_round,%function
+.align	4
+_vpaes_schedule_round:
+	// extract rcon from xmm8
+	movi	v4.16b, #0			// vpxor	%xmm4,	%xmm4,	%xmm4
+	ext	v1.16b, $rcon, v4.16b, #15	// vpalignr	\$15,	%xmm8,	%xmm4,	%xmm1
+	ext	$rcon, $rcon, $rcon, #15	// vpalignr	\$15,	%xmm8,	%xmm8,	%xmm8
+	eor	v7.16b, v7.16b, v1.16b		// vpxor	%xmm1,	%xmm7,	%xmm7
+
+	// rotate
+	dup	v0.4s, v0.s[3]			// vpshufd	\$0xFF,	%xmm0,	%xmm0
+	ext	v0.16b, v0.16b, v0.16b, #1	// vpalignr	\$1,	%xmm0,	%xmm0,	%xmm0
+
+	// fall through...
+
+	// low round: same as high round, but no rotation and no rcon.
+_vpaes_schedule_low_round:
+	// smear xmm7
+	ext	v1.16b, v4.16b, v7.16b, #12	// vpslldq	\$4,	%xmm7,	%xmm1
+	eor	v7.16b, v7.16b, v1.16b		// vpxor	%xmm1,	%xmm7,	%xmm7
+	ext	v4.16b, v4.16b, v7.16b, #8	// vpslldq	\$8,	%xmm7,	%xmm4
+
+	// subbytes
+	and	v1.16b, v0.16b, v17.16b		// vpand	%xmm9,	%xmm0,	%xmm1		# 0 = k
+	ushr	v0.16b, v0.16b, #4		// vpsrlb	\$4,	%xmm0,	%xmm0		# 1 = i
+	 eor	v7.16b, v7.16b, v4.16b		// vpxor	%xmm4,	%xmm7,	%xmm7
+	tbl	v2.16b, {$invhi}, v1.16b	// vpshufb	%xmm1,	%xmm11,	%xmm2		# 2 = a/k
+	eor	v1.16b, v1.16b, v0.16b		// vpxor	%xmm0,	%xmm1,	%xmm1		# 0 = j
+	tbl	v3.16b, {$invlo}, v0.16b	// vpshufb	%xmm0, 	%xmm10,	%xmm3		# 3 = 1/i
+	eor	v3.16b, v3.16b, v2.16b		// vpxor	%xmm2,	%xmm3,	%xmm3		# 3 = iak = 1/i + a/k
+	tbl	v4.16b, {$invlo}, v1.16b	// vpshufb	%xmm1,	%xmm10,	%xmm4		# 4 = 1/j
+	 eor	v7.16b, v7.16b, v16.16b		// vpxor	.Lk_s63(%rip),	%xmm7,	%xmm7
+	tbl	v3.16b, {$invlo}, v3.16b	// vpshufb	%xmm3,	%xmm10,	%xmm3		# 2 = 1/iak
+	eor	v4.16b, v4.16b, v2.16b		// vpxor	%xmm2,	%xmm4,	%xmm4		# 4 = jak = 1/j + a/k
+	tbl	v2.16b, {$invlo}, v4.16b	// vpshufb	%xmm4,	%xmm10,	%xmm2		# 3 = 1/jak
+	eor	v3.16b, v3.16b, v1.16b		// vpxor	%xmm1,	%xmm3,	%xmm3		# 2 = io
+	eor	v2.16b, v2.16b, v0.16b		// vpxor	%xmm0,	%xmm2,	%xmm2		# 3 = jo
+	tbl	v4.16b, {v23.16b}, v3.16b	// vpshufb	%xmm3,	%xmm13,	%xmm4		# 4 = sbou
+	tbl	v1.16b, {v22.16b}, v2.16b	// vpshufb	%xmm2,	%xmm12,	%xmm1		# 0 = sb1t
+	eor	v1.16b, v1.16b, v4.16b		// vpxor	%xmm4,	%xmm1,	%xmm1		# 0 = sbox output
+
+	// add in smeared stuff
+	eor	v0.16b, v1.16b, v7.16b		// vpxor	%xmm7,	%xmm1,	%xmm0
+	eor	v7.16b, v1.16b, v7.16b		// vmovdqa	%xmm0,	%xmm7
+	ret
+.size	_vpaes_schedule_round,.-_vpaes_schedule_round
+
+##
+##  .aes_schedule_transform
+##
+##  Linear-transform %xmm0 according to tables at (%r11)
+##
+##  Requires that %xmm9 = 0x0F0F... as in preheat
+##  Output in %xmm0
+##  Clobbers %xmm1, %xmm2
+##
+.type	_vpaes_schedule_transform,%function
+.align	4
+_vpaes_schedule_transform:
+	and	v1.16b, v0.16b, v17.16b		// vpand	%xmm9,	%xmm0,	%xmm1
+	ushr	v0.16b, v0.16b, #4		// vpsrlb	\$4,	%xmm0,	%xmm0
+						// vmovdqa	(%r11),	%xmm2 	# lo
+	tbl	v2.16b, {$iptlo}, v1.16b	// vpshufb	%xmm1,	%xmm2,	%xmm2
+						// vmovdqa	16(%r11),	%xmm1 # hi
+	tbl	v0.16b, {$ipthi}, v0.16b	// vpshufb	%xmm0,	%xmm1,	%xmm0
+	eor	v0.16b, v0.16b, v2.16b		// vpxor	%xmm2,	%xmm0,	%xmm0
+	ret
+.size	_vpaes_schedule_transform,.-_vpaes_schedule_transform
+
+##
+##  .aes_schedule_mangle
+##
+##  Mangle xmm0 from (basis-transformed) standard version
+##  to our version.
+##
+##  On encrypt,
+##    xor with 0x63
+##    multiply by circulant 0,1,1,1
+##    apply shiftrows transform
+##
+##  On decrypt,
+##    xor with 0x63
+##    multiply by "inverse mixcolumns" circulant E,B,D,9
+##    deskew
+##    apply shiftrows transform
+##
+##
+##  Writes out to (%rdx), and increments or decrements it
+##  Keeps track of round number mod 4 in %r8
+##  Preserves xmm0
+##  Clobbers xmm1-xmm5
+##
+.type	_vpaes_schedule_mangle,%function
+.align	4
+_vpaes_schedule_mangle:
+	mov	v4.16b, v0.16b			// vmovdqa	%xmm0,	%xmm4	# save xmm0 for later
+						// vmovdqa	.Lk_mc_forward(%rip),%xmm5
+	cbnz	$dir, .Lschedule_mangle_dec
+
+	// encrypting
+	eor	v4.16b, v0.16b, v16.16b		// vpxor	.Lk_s63(%rip),	%xmm0,	%xmm4
+	add	$out, $out, #16			// add	\$16,	%rdx
+	tbl	v4.16b, {v4.16b}, v9.16b	// vpshufb	%xmm5,	%xmm4,	%xmm4
+	tbl	v1.16b, {v4.16b}, v9.16b	// vpshufb	%xmm5,	%xmm4,	%xmm1
+	tbl	v3.16b, {v1.16b}, v9.16b	// vpshufb	%xmm5,	%xmm1,	%xmm3
+	eor	v4.16b, v4.16b, v1.16b		// vpxor	%xmm1,	%xmm4,	%xmm4
+	ld1	{v1.2d}, [x8]			// vmovdqa	(%r8,%r10),	%xmm1
+	eor	v3.16b, v3.16b, v4.16b		// vpxor	%xmm4,	%xmm3,	%xmm3
+
+	b	.Lschedule_mangle_both
+.align	4
+.Lschedule_mangle_dec:
+	// inverse mix columns
+						// lea	.Lk_dksd(%rip),%r11
+	ushr	v1.16b, v4.16b, #4		// vpsrlb	\$4,	%xmm4,	%xmm1	# 1 = hi
+	and	v4.16b, v4.16b, v17.16b		// vpand	%xmm9,	%xmm4,	%xmm4	# 4 = lo
+
+						// vmovdqa	0x00(%r11),	%xmm2
+	tbl	v2.16b, {v24.16b}, v4.16b	// vpshufb	%xmm4,	%xmm2,	%xmm2
+						// vmovdqa	0x10(%r11),	%xmm3
+	tbl	v3.16b,	{v25.16b}, v1.16b	// vpshufb	%xmm1,	%xmm3,	%xmm3
+	eor	v3.16b, v3.16b, v2.16b		// vpxor	%xmm2,	%xmm3,	%xmm3
+	tbl	v3.16b, {v3.16b}, v9.16b	// vpshufb	%xmm5,	%xmm3,	%xmm3
+
+						// vmovdqa	0x20(%r11),	%xmm2
+	tbl	v2.16b, {v26.16b}, v4.16b	// vpshufb	%xmm4,	%xmm2,	%xmm2
+	eor	v2.16b, v2.16b, v3.16b		// vpxor	%xmm3,	%xmm2,	%xmm2
+						// vmovdqa	0x30(%r11),	%xmm3
+	tbl	v3.16b, {v27.16b}, v1.16b	// vpshufb	%xmm1,	%xmm3,	%xmm3
+	eor	v3.16b, v3.16b, v2.16b		// vpxor	%xmm2,	%xmm3,	%xmm3
+	tbl	v3.16b, {v3.16b}, v9.16b	// vpshufb	%xmm5,	%xmm3,	%xmm3
+
+						// vmovdqa	0x40(%r11),	%xmm2
+	tbl	v2.16b, {v28.16b}, v4.16b	// vpshufb	%xmm4,	%xmm2,	%xmm2
+	eor	v2.16b, v2.16b, v3.16b		// vpxor	%xmm3,	%xmm2,	%xmm2
+						// vmovdqa	0x50(%r11),	%xmm3
+	tbl	v3.16b, {v29.16b}, v1.16b	// vpshufb	%xmm1,	%xmm3,	%xmm3
+	eor	v3.16b, v3.16b, v2.16b		// vpxor	%xmm2,	%xmm3,	%xmm3
+
+						// vmovdqa	0x60(%r11),	%xmm2
+	tbl	v2.16b, {v30.16b}, v4.16b	// vpshufb	%xmm4,	%xmm2,	%xmm2
+	tbl	v3.16b, {v3.16b}, v9.16b	// vpshufb	%xmm5,	%xmm3,	%xmm3
+						// vmovdqa	0x70(%r11),	%xmm4
+	tbl	v4.16b, {v31.16b}, v1.16b	// vpshufb	%xmm1,	%xmm4,	%xmm4
+	ld1	{v1.2d}, [x8]			// vmovdqa	(%r8,%r10),	%xmm1
+	eor	v2.16b, v2.16b, v3.16b		// vpxor	%xmm3,	%xmm2,	%xmm2
+	eor	v3.16b, v4.16b, v2.16b		// vpxor	%xmm2,	%xmm4,	%xmm3
+
+	sub	$out, $out, #16			// add	\$-16,	%rdx
+
+.Lschedule_mangle_both:
+	tbl	v3.16b, {v3.16b}, v1.16b	// vpshufb	%xmm1,	%xmm3,	%xmm3
+	add	x8, x8, #64-16			// add	\$-16,	%r8
+	and	x8, x8, #~(1<<6)		// and	\$0x30,	%r8
+	st1	{v3.2d}, [$out]			// vmovdqu	%xmm3,	(%rdx)
+	ret
+.size	_vpaes_schedule_mangle,.-_vpaes_schedule_mangle
+
+.globl	vpaes_set_encrypt_key
+.type	vpaes_set_encrypt_key,%function
+.align	4
+vpaes_set_encrypt_key:
+	.inst	0xd503233f		// paciasp
+	stp	x29,x30,[sp,#-16]!
+	add	x29,sp,#0
+	stp	d8,d9,[sp,#-16]!	// ABI spec says so
+
+	lsr	w9, $bits, #5		// shr	\$5,%eax
+	add	w9, w9, #5		// \$5,%eax
+	str	w9, [$out,#240]		// mov	%eax,240(%rdx)	# AES_KEY->rounds = nbits/32+5;
+
+	mov	$dir, #0		// mov	\$0,%ecx
+	mov	x8, #0x30		// mov	\$0x30,%r8d
+	bl	_vpaes_schedule_core
+	eor	x0, x0, x0
+
+	ldp	d8,d9,[sp],#16
+	ldp	x29,x30,[sp],#16
+	.inst	0xd50323bf		// autiasp
+	ret
+.size	vpaes_set_encrypt_key,.-vpaes_set_encrypt_key
+
+.globl	vpaes_set_decrypt_key
+.type	vpaes_set_decrypt_key,%function
+.align	4
+vpaes_set_decrypt_key:
+	.inst	0xd503233f		// paciasp
+	stp	x29,x30,[sp,#-16]!
+	add	x29,sp,#0
+	stp	d8,d9,[sp,#-16]!	// ABI spec says so
+
+	lsr	w9, $bits, #5		// shr	\$5,%eax
+	add	w9, w9, #5		// \$5,%eax
+	str	w9, [$out,#240]		// mov	%eax,240(%rdx)	# AES_KEY->rounds = nbits/32+5;
+	lsl	w9, w9, #4		// shl	\$4,%eax
+	add	$out, $out, #16		// lea	16(%rdx,%rax),%rdx
+	add	$out, $out, x9
+
+	mov	$dir, #1		// mov	\$1,%ecx
+	lsr	w8, $bits, #1		// shr	\$1,%r8d
+	and	x8, x8, #32		// and	\$32,%r8d
+	eor	x8, x8, #32		// xor	\$32,%r8d	# nbits==192?0:32
+	bl	_vpaes_schedule_core
+
+	ldp	d8,d9,[sp],#16
+	ldp	x29,x30,[sp],#16
+	.inst	0xd50323bf		// autiasp
+	ret
+.size	vpaes_set_decrypt_key,.-vpaes_set_decrypt_key
+___
+}
+{
+my ($inp,$out,$len,$key,$ivec,$dir) = map("x$_",(0..5));
+
+$code.=<<___;
+.globl	vpaes_cbc_encrypt
+.type	vpaes_cbc_encrypt,%function
+.align	4
+vpaes_cbc_encrypt:
+	cbz	$len, .Lcbc_abort
+	cmp	w5, #0			// check direction
+	b.eq	vpaes_cbc_decrypt
+
+	.inst	0xd503233f		// paciasp
+	stp	x29,x30,[sp,#-16]!
+	add	x29,sp,#0
+
+	mov	x17, $len		// reassign
+	mov	x2,  $key		// reassign
+
+	ld1	{v0.16b}, [$ivec]	// load ivec
+	bl	_vpaes_encrypt_preheat
+	b	.Lcbc_enc_loop
+
+.align	4
+.Lcbc_enc_loop:
+	ld1	{v7.16b}, [$inp],#16	// load input
+	eor	v7.16b, v7.16b, v0.16b	// xor with ivec
+	bl	_vpaes_encrypt_core
+	st1	{v0.16b}, [$out],#16	// save output
+	subs	x17, x17, #16
+	b.hi	.Lcbc_enc_loop
+
+	st1	{v0.16b}, [$ivec]	// write ivec
+
+	ldp	x29,x30,[sp],#16
+	.inst	0xd50323bf		// autiasp
+.Lcbc_abort:
+	ret
+.size	vpaes_cbc_encrypt,.-vpaes_cbc_encrypt
+
+.type	vpaes_cbc_decrypt,%function
+.align	4
+vpaes_cbc_decrypt:
+	.inst	0xd503233f		// paciasp
+	stp	x29,x30,[sp,#-16]!
+	add	x29,sp,#0
+	stp	d8,d9,[sp,#-16]!	// ABI spec says so
+	stp	d10,d11,[sp,#-16]!
+	stp	d12,d13,[sp,#-16]!
+	stp	d14,d15,[sp,#-16]!
+
+	mov	x17, $len		// reassign
+	mov	x2,  $key		// reassign
+	ld1	{v6.16b}, [$ivec]	// load ivec
+	bl	_vpaes_decrypt_preheat
+	tst	x17, #16
+	b.eq	.Lcbc_dec_loop2x
+
+	ld1	{v7.16b}, [$inp], #16	// load input
+	bl	_vpaes_decrypt_core
+	eor	v0.16b, v0.16b, v6.16b	// xor with ivec
+	orr	v6.16b, v7.16b, v7.16b	// next ivec value
+	st1	{v0.16b}, [$out], #16
+	subs	x17, x17, #16
+	b.ls	.Lcbc_dec_done
+
+.align	4
+.Lcbc_dec_loop2x:
+	ld1	{v14.16b,v15.16b}, [$inp], #32
+	bl	_vpaes_decrypt_2x
+	eor	v0.16b, v0.16b, v6.16b	// xor with ivec
+	eor	v1.16b, v1.16b, v14.16b
+	orr	v6.16b, v15.16b, v15.16b
+	st1	{v0.16b,v1.16b}, [$out], #32
+	subs	x17, x17, #32
+	b.hi	.Lcbc_dec_loop2x
+
+.Lcbc_dec_done:
+	st1	{v6.16b}, [$ivec]
+
+	ldp	d14,d15,[sp],#16
+	ldp	d12,d13,[sp],#16
+	ldp	d10,d11,[sp],#16
+	ldp	d8,d9,[sp],#16
+	ldp	x29,x30,[sp],#16
+	.inst	0xd50323bf		// autiasp
+	ret
+.size	vpaes_cbc_decrypt,.-vpaes_cbc_decrypt
+___
+if (1) {
+$code.=<<___;
+.globl	vpaes_ecb_encrypt
+.type	vpaes_ecb_encrypt,%function
+.align	4
+vpaes_ecb_encrypt:
+	.inst	0xd503233f		// paciasp
+	stp	x29,x30,[sp,#-16]!
+	add	x29,sp,#0
+	stp	d8,d9,[sp,#-16]!	// ABI spec says so
+	stp	d10,d11,[sp,#-16]!
+	stp	d12,d13,[sp,#-16]!
+	stp	d14,d15,[sp,#-16]!
+
+	mov	x17, $len
+	mov	x2,  $key
+	bl	_vpaes_encrypt_preheat
+	tst	x17, #16
+	b.eq	.Lecb_enc_loop
+
+	ld1	{v7.16b}, [$inp],#16
+	bl	_vpaes_encrypt_core
+	st1	{v0.16b}, [$out],#16
+	subs	x17, x17, #16
+	b.ls	.Lecb_enc_done
+
+.align	4
+.Lecb_enc_loop:
+	ld1	{v14.16b,v15.16b}, [$inp], #32
+	bl	_vpaes_encrypt_2x
+	st1	{v0.16b,v1.16b}, [$out], #32
+	subs	x17, x17, #32
+	b.hi	.Lecb_enc_loop
+
+.Lecb_enc_done:
+	ldp	d14,d15,[sp],#16
+	ldp	d12,d13,[sp],#16
+	ldp	d10,d11,[sp],#16
+	ldp	d8,d9,[sp],#16
+	ldp	x29,x30,[sp],#16
+	.inst	0xd50323bf		// autiasp
+	ret
+.size	vpaes_ecb_encrypt,.-vpaes_ecb_encrypt
+
+.globl	vpaes_ecb_decrypt
+.type	vpaes_ecb_decrypt,%function
+.align	4
+vpaes_ecb_decrypt:
+	.inst	0xd503233f		// paciasp
+	stp	x29,x30,[sp,#-16]!
+	add	x29,sp,#0
+	stp	d8,d9,[sp,#-16]!	// ABI spec says so
+	stp	d10,d11,[sp,#-16]!
+	stp	d12,d13,[sp,#-16]!
+	stp	d14,d15,[sp,#-16]!
+
+	mov	x17, $len
+	mov	x2,  $key
+	bl	_vpaes_decrypt_preheat
+	tst	x17, #16
+	b.eq	.Lecb_dec_loop
+
+	ld1	{v7.16b}, [$inp],#16
+	bl	_vpaes_encrypt_core
+	st1	{v0.16b}, [$out],#16
+	subs	x17, x17, #16
+	b.ls	.Lecb_dec_done
+
+.align	4
+.Lecb_dec_loop:
+	ld1	{v14.16b,v15.16b}, [$inp], #32
+	bl	_vpaes_decrypt_2x
+	st1	{v0.16b,v1.16b}, [$out], #32
+	subs	x17, x17, #32
+	b.hi	.Lecb_dec_loop
+
+.Lecb_dec_done:
+	ldp	d14,d15,[sp],#16
+	ldp	d12,d13,[sp],#16
+	ldp	d10,d11,[sp],#16
+	ldp	d8,d9,[sp],#16
+	ldp	x29,x30,[sp],#16
+	.inst	0xd50323bf		// autiasp
+	ret
+.size	vpaes_ecb_decrypt,.-vpaes_ecb_decrypt
+___
+}	}
+print $code;
+
+close STDOUT or die "error closing STDOUT: $!";
diff --git a/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/vpaes-ppc.pl b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/vpaes-ppc.pl
new file mode 100644
index 0000000..0260a5c
--- /dev/null
+++ b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/vpaes-ppc.pl
@@ -0,0 +1,1594 @@
+#! /usr/bin/env perl
+# Copyright 2013-2020 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License").  You may not use
+# this file except in compliance with the License.  You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+
+######################################################################
+## Constant-time SSSE3 AES core implementation.
+## version 0.1
+##
+## By Mike Hamburg (Stanford University), 2009
+## Public domain.
+##
+## For details see http://shiftleft.org/papers/vector_aes/ and
+## http://crypto.stanford.edu/vpaes/.
+
+# CBC encrypt/decrypt performance in cycles per byte processed with
+# 128-bit key.
+#
+#		aes-ppc.pl		this
+# PPC74x0/G4e	35.5/52.1/(23.8)	11.9(*)/15.4
+# PPC970/G5	37.9/55.0/(28.5)	22.2/28.5
+# POWER6	42.7/54.3/(28.2)	63.0/92.8(**)
+# POWER7	32.3/42.9/(18.4)	18.5/23.3
+#
+# (*)	This is ~10% worse than reported in paper. The reason is
+#	twofold. This module doesn't make any assumption about
+#	key schedule (or data for that matter) alignment and handles
+#	it in-line. Secondly it, being transliterated from
+#	vpaes-x86_64.pl, relies on "nested inversion" better suited
+#	for Intel CPUs.
+# (**)	Inadequate POWER6 performance is due to astronomic AltiVec
+#	latency, 9 cycles per simple logical operation.
+
+$flavour = shift;
+
+if ($flavour =~ /64/) {
+	$SIZE_T	=8;
+	$LRSAVE	=2*$SIZE_T;
+	$STU	="stdu";
+	$POP	="ld";
+	$PUSH	="std";
+	$UCMP	="cmpld";
+} elsif ($flavour =~ /32/) {
+	$SIZE_T	=4;
+	$LRSAVE	=$SIZE_T;
+	$STU	="stwu";
+	$POP	="lwz";
+	$PUSH	="stw";
+	$UCMP	="cmplw";
+} else { die "nonsense $flavour"; }
+
+$sp="r1";
+$FRAME=6*$SIZE_T+13*16;	# 13*16 is for v20-v31 offload
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
+die "can't locate ppc-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
+
+$code.=<<___;
+.machine	"any"
+
+.text
+
+.align	7	# totally strategic alignment
+_vpaes_consts:
+Lk_mc_forward:	# mc_forward
+	.long	0x01020300, 0x05060704, 0x090a0b08, 0x0d0e0f0c	?inv
+	.long	0x05060704, 0x090a0b08, 0x0d0e0f0c, 0x01020300	?inv
+	.long	0x090a0b08, 0x0d0e0f0c, 0x01020300, 0x05060704	?inv
+	.long	0x0d0e0f0c, 0x01020300, 0x05060704, 0x090a0b08	?inv
+Lk_mc_backward:	# mc_backward
+	.long	0x03000102, 0x07040506, 0x0b08090a, 0x0f0c0d0e	?inv
+	.long	0x0f0c0d0e, 0x03000102, 0x07040506, 0x0b08090a	?inv
+	.long	0x0b08090a, 0x0f0c0d0e, 0x03000102, 0x07040506	?inv
+	.long	0x07040506, 0x0b08090a, 0x0f0c0d0e, 0x03000102	?inv
+Lk_sr:		# sr
+	.long	0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f	?inv
+	.long	0x00050a0f, 0x04090e03, 0x080d0207, 0x0c01060b	?inv
+	.long	0x0009020b, 0x040d060f, 0x08010a03, 0x0c050e07	?inv
+	.long	0x000d0a07, 0x04010e0b, 0x0805020f, 0x0c090603	?inv
+
+##
+## "Hot" constants
+##
+Lk_inv:		# inv, inva
+	.long	0xf001080d, 0x0f06050e, 0x020c0b0a, 0x09030704	?rev
+	.long	0xf0070b0f, 0x060a0401, 0x09080502, 0x0c0e0d03	?rev
+Lk_ipt:		# input transform (lo, hi)
+	.long	0x00702a5a, 0x98e8b2c2, 0x08782252, 0x90e0baca	?rev
+	.long	0x004d7c31, 0x7d30014c, 0x81ccfdb0, 0xfcb180cd	?rev
+Lk_sbo:		# sbou, sbot
+	.long	0x00c7bd6f, 0x176dd2d0, 0x78a802c5, 0x7abfaa15	?rev
+	.long	0x006abb5f, 0xa574e4cf, 0xfa352b41, 0xd1901e8e	?rev
+Lk_sb1:		# sb1u, sb1t
+	.long	0x0023e2fa, 0x15d41836, 0xefd92e0d, 0xc1ccf73b	?rev
+	.long	0x003e50cb, 0x8fe19bb1, 0x44f52a14, 0x6e7adfa5	?rev
+Lk_sb2:		# sb2u, sb2t
+	.long	0x0029e10a, 0x4088eb69, 0x4a2382ab, 0xc863a1c2	?rev
+	.long	0x0024710b, 0xc6937ae2, 0xcd2f98bc, 0x55e9b75e	?rev
+
+##
+##  Decryption stuff
+##
+Lk_dipt:	# decryption input transform
+	.long	0x005f540b, 0x045b500f, 0x1a454e11, 0x1e414a15	?rev
+	.long	0x00650560, 0xe683e386, 0x94f191f4, 0x72177712	?rev
+Lk_dsbo:	# decryption sbox final output
+	.long	0x0040f97e, 0x53ea8713, 0x2d3e94d4, 0xb96daac7	?rev
+	.long	0x001d4493, 0x0f56d712, 0x9c8ec5d8, 0x59814bca	?rev
+Lk_dsb9:	# decryption sbox output *9*u, *9*t
+	.long	0x00d6869a, 0x53031c85, 0xc94c994f, 0x501fd5ca	?rev
+	.long	0x0049d7ec, 0x89173bc0, 0x65a5fbb2, 0x9e2c5e72	?rev
+Lk_dsbd:	# decryption sbox output *D*u, *D*t
+	.long	0x00a2b1e6, 0xdfcc577d, 0x39442a88, 0x139b6ef5	?rev
+	.long	0x00cbc624, 0xf7fae23c, 0xd3efde15, 0x0d183129	?rev
+Lk_dsbb:	# decryption sbox output *B*u, *B*t
+	.long	0x0042b496, 0x926422d0, 0x04d4f2b0, 0xf6462660	?rev
+	.long	0x006759cd, 0xa69894c1, 0x6baa5532, 0x3e0cfff3	?rev
+Lk_dsbe:	# decryption sbox output *E*u, *E*t
+	.long	0x00d0d426, 0x9692f246, 0xb0f6b464, 0x04604222	?rev
+	.long	0x00c1aaff, 0xcda6550c, 0x323e5998, 0x6bf36794	?rev
+
+##
+##  Key schedule constants
+##
+Lk_dksd:	# decryption key schedule: invskew x*D
+	.long	0x0047e4a3, 0x5d1ab9fe, 0xf9be1d5a, 0xa4e34007	?rev
+	.long	0x008336b5, 0xf477c241, 0x1e9d28ab, 0xea69dc5f	?rev
+Lk_dksb:	# decryption key schedule: invskew x*B
+	.long	0x00d55085, 0x1fca4f9a, 0x994cc91c, 0x8653d603	?rev
+	.long	0x004afcb6, 0xa7ed5b11, 0xc882347e, 0x6f2593d9	?rev
+Lk_dkse:	# decryption key schedule: invskew x*E + 0x63
+	.long	0x00d6c91f, 0xca1c03d5, 0x86504f99, 0x4c9a8553	?rev
+	.long	0xe87bdc4f, 0x059631a2, 0x8714b320, 0x6af95ecd	?rev
+Lk_dks9:	# decryption key schedule: invskew x*9
+	.long	0x00a7d97e, 0xc86f11b6, 0xfc5b2582, 0x3493ed4a	?rev
+	.long	0x00331427, 0x62517645, 0xcefddae9, 0xac9fb88b	?rev
+
+Lk_rcon:	# rcon
+	.long	0xb6ee9daf, 0xb991831f, 0x817d7c4d, 0x08982a70	?asis
+Lk_s63:
+	.long	0x5b5b5b5b, 0x5b5b5b5b, 0x5b5b5b5b, 0x5b5b5b5b	?asis
+
+Lk_opt:		# output transform
+	.long	0x0060b6d6, 0x29499fff, 0x0868bede, 0x214197f7	?rev
+	.long	0x00ecbc50, 0x51bded01, 0xe00c5cb0, 0xb15d0de1	?rev
+Lk_deskew:	# deskew tables: inverts the sbox's "skew"
+	.long	0x00e3a447, 0x40a3e407, 0x1af9be5d, 0x5ab9fe1d	?rev
+	.long	0x0069ea83, 0xdcb5365f, 0x771e9df4, 0xabc24128	?rev
+.align	5
+Lconsts:
+	mflr	r0
+	bcl	20,31,\$+4
+	mflr	r12	#vvvvv "distance between . and _vpaes_consts
+	addi	r12,r12,-0x308
+	mtlr	r0
+	blr
+	.long	0
+	.byte	0,12,0x14,0,0,0,0,0
+.asciz  "Vector Permutation AES for AltiVec, Mike Hamburg (Stanford University)"
+.align	6
+___
+
+my ($inptail,$inpperm,$outhead,$outperm,$outmask,$keyperm) = map("v$_",(26..31));
+{
+my ($inp,$out,$key) = map("r$_",(3..5));
+
+my ($invlo,$invhi,$iptlo,$ipthi,$sbou,$sbot) = map("v$_",(10..15));
+my ($sb1u,$sb1t,$sb2u,$sb2t) = map("v$_",(16..19));
+my ($sb9u,$sb9t,$sbdu,$sbdt,$sbbu,$sbbt,$sbeu,$sbet)=map("v$_",(16..23));
+
+$code.=<<___;
+##
+##  _aes_preheat
+##
+##  Fills register %r10 -> .aes_consts (so you can -fPIC)
+##  and %xmm9-%xmm15 as specified below.
+##
+.align	4
+_vpaes_encrypt_preheat:
+	mflr	r8
+	bl	Lconsts
+	mtlr	r8
+	li	r11, 0xc0		# Lk_inv
+	li	r10, 0xd0
+	li	r9,  0xe0		# Lk_ipt
+	li	r8,  0xf0
+	vxor	v7, v7, v7		# 0x00..00
+	vspltisb	v8,4		# 0x04..04
+	vspltisb	v9,0x0f		# 0x0f..0f
+	lvx	$invlo, r12, r11
+	li	r11, 0x100
+	lvx	$invhi, r12, r10
+	li	r10, 0x110
+	lvx	$iptlo, r12, r9
+	li	r9,  0x120
+	lvx	$ipthi, r12, r8
+	li	r8,  0x130
+	lvx	$sbou, r12, r11
+	li	r11, 0x140
+	lvx	$sbot, r12, r10
+	li	r10, 0x150
+	lvx	$sb1u, r12, r9
+	lvx	$sb1t, r12, r8
+	lvx	$sb2u, r12, r11
+	lvx	$sb2t, r12, r10
+	blr
+	.long	0
+	.byte	0,12,0x14,0,0,0,0,0
+
+##
+##  _aes_encrypt_core
+##
+##  AES-encrypt %xmm0.
+##
+##  Inputs:
+##     %xmm0 = input
+##     %xmm9-%xmm15 as in _vpaes_preheat
+##    (%rdx) = scheduled keys
+##
+##  Output in %xmm0
+##  Clobbers  %xmm1-%xmm6, %r9, %r10, %r11, %rax
+##
+##
+.align 5
+_vpaes_encrypt_core:
+	lwz	r8, 240($key)		# pull rounds
+	li	r9, 16
+	lvx	v5, 0, $key		# vmovdqu	(%r9),	%xmm5		# round0 key
+	li	r11, 0x10
+	lvx	v6, r9, $key
+	addi	r9, r9, 16
+	?vperm	v5, v5, v6, $keyperm	# align round key
+	addi	r10, r11, 0x40
+	vsrb	v1, v0, v8		# vpsrlb	\$4,	%xmm0,	%xmm0
+	vperm	v0, $iptlo, $iptlo, v0	# vpshufb	%xmm1,	%xmm2,	%xmm1
+	vperm	v1, $ipthi, $ipthi, v1	# vpshufb	%xmm0,	%xmm3,	%xmm2
+	vxor	v0, v0, v5		# vpxor	%xmm5,	%xmm1,	%xmm0
+	vxor	v0, v0, v1		# vpxor	%xmm2,	%xmm0,	%xmm0
+	mtctr	r8
+	b	Lenc_entry
+
+.align 4
+Lenc_loop:
+	# middle of middle round
+	vperm	v4, $sb1t, v7, v2	# vpshufb	%xmm2,	%xmm13,	%xmm4	# 4 = sb1u
+	lvx	v1, r12, r11		# vmovdqa	-0x40(%r11,%r10), %xmm1	# .Lk_mc_forward[]
+	addi	r11, r11, 16
+	vperm	v0, $sb1u, v7, v3	# vpshufb	%xmm3,	%xmm12,	%xmm0	# 0 = sb1t
+	vxor	v4, v4, v5		# vpxor		%xmm5,	%xmm4,	%xmm4	# 4 = sb1u + k
+	andi.	r11, r11, 0x30		# and		\$0x30, %r11	# ... mod 4
+	vperm	v5, $sb2t, v7, v2	# vpshufb	%xmm2,	%xmm15,	%xmm5	# 4 = sb2u
+	vxor	v0, v0, v4		# vpxor		%xmm4,	%xmm0,	%xmm0	# 0 = A
+	vperm	v2, $sb2u, v7, v3	# vpshufb	%xmm3,	%xmm14,	%xmm2	# 2 = sb2t
+	lvx	v4, r12, r10		# vmovdqa	(%r11,%r10), %xmm4	# .Lk_mc_backward[]
+	addi	r10, r11, 0x40
+	vperm	v3, v0, v7, v1		# vpshufb	%xmm1,	%xmm0,	%xmm3	# 0 = B
+	vxor	v2, v2, v5		# vpxor		%xmm5,	%xmm2,	%xmm2	# 2 = 2A
+	vperm	v0, v0, v7, v4		# vpshufb	%xmm4,	%xmm0,	%xmm0	# 3 = D
+	vxor	v3, v3, v2		# vpxor		%xmm2,	%xmm3,	%xmm3	# 0 = 2A+B
+	vperm	v4, v3, v7, v1		# vpshufb	%xmm1,	%xmm3,	%xmm4	# 0 = 2B+C
+	vxor	v0, v0, v3		# vpxor		%xmm3,	%xmm0,	%xmm0	# 3 = 2A+B+D
+	vxor	v0, v0, v4		# vpxor		%xmm4,	%xmm0, %xmm0	# 0 = 2A+3B+C+D
+
+Lenc_entry:
+	# top of round
+	vsrb	v1, v0, v8		# vpsrlb	\$4,	%xmm0,	%xmm0	# 1 = i
+	vperm	v5, $invhi, $invhi, v0	# vpshufb	%xmm1,	%xmm11,	%xmm5	# 2 = a/k
+	vxor	v0, v0, v1		# vpxor		%xmm0,	%xmm1,	%xmm1	# 0 = j
+	vperm	v3, $invlo, $invlo, v1	# vpshufb	%xmm0, 	%xmm10,	%xmm3  	# 3 = 1/i
+	vperm	v4, $invlo, $invlo, v0	# vpshufb	%xmm1, 	%xmm10,	%xmm4  	# 4 = 1/j
+	vand	v0, v0, v9
+	vxor	v3, v3, v5		# vpxor		%xmm5,	%xmm3,	%xmm3	# 3 = iak = 1/i + a/k
+	vxor	v4, v4, v5		# vpxor		%xmm5,	%xmm4,	%xmm4  	# 4 = jak = 1/j + a/k
+	vperm	v2, $invlo, v7, v3	# vpshufb	%xmm3,	%xmm10,	%xmm2  	# 2 = 1/iak
+	vmr	v5, v6
+	lvx	v6, r9, $key		# vmovdqu	(%r9), %xmm5
+	vperm	v3, $invlo, v7, v4	# vpshufb	%xmm4,	%xmm10,	%xmm3	# 3 = 1/jak
+	addi	r9, r9, 16
+	vxor	v2, v2, v0		# vpxor		%xmm1,	%xmm2,	%xmm2  	# 2 = io
+	?vperm	v5, v5, v6, $keyperm	# align round key
+	vxor	v3, v3, v1		# vpxor		%xmm0,	%xmm3,	%xmm3	# 3 = jo
+	bdnz	Lenc_loop
+
+	# middle of last round
+	addi	r10, r11, 0x80
+					# vmovdqa	-0x60(%r10), %xmm4	# 3 : sbou	.Lk_sbo
+					# vmovdqa	-0x50(%r10), %xmm0	# 0 : sbot	.Lk_sbo+16
+	vperm	v4, $sbou, v7, v2	# vpshufb	%xmm2,	%xmm4,	%xmm4	# 4 = sbou
+	lvx	v1, r12, r10		# vmovdqa	0x40(%r11,%r10), %xmm1	# .Lk_sr[]
+	vperm	v0, $sbot, v7, v3	# vpshufb	%xmm3,	%xmm0,	%xmm0	# 0 = sb1t
+	vxor	v4, v4, v5		# vpxor		%xmm5,	%xmm4,	%xmm4	# 4 = sb1u + k
+	vxor	v0, v0, v4		# vpxor		%xmm4,	%xmm0,	%xmm0	# 0 = A
+	vperm	v0, v0, v7, v1		# vpshufb	%xmm1,	%xmm0,	%xmm0
+	blr
+	.long	0
+	.byte	0,12,0x14,0,0,0,0,0
+
+.globl	.vpaes_encrypt
+.align	5
+.vpaes_encrypt:
+	$STU	$sp,-$FRAME($sp)
+	li	r10,`15+6*$SIZE_T`
+	li	r11,`31+6*$SIZE_T`
+	mflr	r6
+	mfspr	r7, 256			# save vrsave
+	stvx	v20,r10,$sp
+	addi	r10,r10,32
+	stvx	v21,r11,$sp
+	addi	r11,r11,32
+	stvx	v22,r10,$sp
+	addi	r10,r10,32
+	stvx	v23,r11,$sp
+	addi	r11,r11,32
+	stvx	v24,r10,$sp
+	addi	r10,r10,32
+	stvx	v25,r11,$sp
+	addi	r11,r11,32
+	stvx	v26,r10,$sp
+	addi	r10,r10,32
+	stvx	v27,r11,$sp
+	addi	r11,r11,32
+	stvx	v28,r10,$sp
+	addi	r10,r10,32
+	stvx	v29,r11,$sp
+	addi	r11,r11,32
+	stvx	v30,r10,$sp
+	stvx	v31,r11,$sp
+	stw	r7,`$FRAME-4`($sp)	# save vrsave
+	li	r0, -1
+	$PUSH	r6,`$FRAME+$LRSAVE`($sp)
+	mtspr	256, r0			# preserve all AltiVec registers
+
+	bl	_vpaes_encrypt_preheat
+
+	?lvsl	$inpperm, 0, $inp	# prepare for unaligned access
+	lvx	v0, 0, $inp
+	addi	$inp, $inp, 15		# 15 is not a typo
+	 ?lvsr	$outperm, 0, $out
+	?lvsl	$keyperm, 0, $key	# prepare for unaligned access
+	lvx	$inptail, 0, $inp	# redundant in aligned case
+	?vperm	v0, v0, $inptail, $inpperm
+
+	bl	_vpaes_encrypt_core
+
+	andi.	r8, $out, 15
+	li	r9, 16
+	beq	Lenc_out_aligned
+
+	vperm	v0, v0, v0, $outperm	# rotate right/left
+	mtctr	r9
+Lenc_out_unaligned:
+	stvebx	v0, 0, $out
+	addi	$out, $out, 1
+	bdnz	Lenc_out_unaligned
+	b	Lenc_done
+
+.align	4
+Lenc_out_aligned:
+	stvx	v0, 0, $out
+Lenc_done:
+
+	li	r10,`15+6*$SIZE_T`
+	li	r11,`31+6*$SIZE_T`
+	mtlr	r6
+	mtspr	256, r7			# restore vrsave
+	lvx	v20,r10,$sp
+	addi	r10,r10,32
+	lvx	v21,r11,$sp
+	addi	r11,r11,32
+	lvx	v22,r10,$sp
+	addi	r10,r10,32
+	lvx	v23,r11,$sp
+	addi	r11,r11,32
+	lvx	v24,r10,$sp
+	addi	r10,r10,32
+	lvx	v25,r11,$sp
+	addi	r11,r11,32
+	lvx	v26,r10,$sp
+	addi	r10,r10,32
+	lvx	v27,r11,$sp
+	addi	r11,r11,32
+	lvx	v28,r10,$sp
+	addi	r10,r10,32
+	lvx	v29,r11,$sp
+	addi	r11,r11,32
+	lvx	v30,r10,$sp
+	lvx	v31,r11,$sp
+	addi	$sp,$sp,$FRAME
+	blr
+	.long	0
+	.byte	0,12,0x04,1,0x80,0,3,0
+	.long	0
+.size	.vpaes_encrypt,.-.vpaes_encrypt
+
+.align	4
+_vpaes_decrypt_preheat:
+	mflr	r8
+	bl	Lconsts
+	mtlr	r8
+	li	r11, 0xc0		# Lk_inv
+	li	r10, 0xd0
+	li	r9,  0x160		# Ldipt
+	li	r8,  0x170
+	vxor	v7, v7, v7		# 0x00..00
+	vspltisb	v8,4		# 0x04..04
+	vspltisb	v9,0x0f		# 0x0f..0f
+	lvx	$invlo, r12, r11
+	li	r11, 0x180
+	lvx	$invhi, r12, r10
+	li	r10, 0x190
+	lvx	$iptlo, r12, r9
+	li	r9,  0x1a0
+	lvx	$ipthi, r12, r8
+	li	r8,  0x1b0
+	lvx	$sbou, r12, r11
+	li	r11, 0x1c0
+	lvx	$sbot, r12, r10
+	li	r10, 0x1d0
+	lvx	$sb9u, r12, r9
+	li	r9,  0x1e0
+	lvx	$sb9t, r12, r8
+	li	r8,  0x1f0
+	lvx	$sbdu, r12, r11
+	li	r11, 0x200
+	lvx	$sbdt, r12, r10
+	li	r10, 0x210
+	lvx	$sbbu, r12, r9
+	lvx	$sbbt, r12, r8
+	lvx	$sbeu, r12, r11
+	lvx	$sbet, r12, r10
+	blr
+	.long	0
+	.byte	0,12,0x14,0,0,0,0,0
+
+##
+##  Decryption core
+##
+##  Same API as encryption core.
+##
+.align	4
+_vpaes_decrypt_core:
+	lwz	r8, 240($key)		# pull rounds
+	li	r9, 16
+	lvx	v5, 0, $key		# vmovdqu	(%r9),	%xmm4		# round0 key
+	li	r11, 0x30
+	lvx	v6, r9, $key
+	addi	r9, r9, 16
+	?vperm	v5, v5, v6, $keyperm	# align round key
+	vsrb	v1, v0, v8		# vpsrlb	\$4,	%xmm0,	%xmm0
+	vperm	v0, $iptlo, $iptlo, v0	# vpshufb	%xmm1,	%xmm2,	%xmm2
+	vperm	v1, $ipthi, $ipthi, v1	# vpshufb	%xmm0,	%xmm1,	%xmm0
+	vxor	v0, v0, v5		# vpxor	%xmm4,	%xmm2,	%xmm2
+	vxor	v0, v0, v1		# vpxor	%xmm2,	%xmm0,	%xmm0
+	mtctr	r8
+	b	Ldec_entry
+
+.align 4
+Ldec_loop:
+#
+#  Inverse mix columns
+#
+	lvx	v0, r12, r11		# v5 and v0 are flipped
+					# vmovdqa	-0x20(%r10),%xmm4		# 4 : sb9u
+					# vmovdqa	-0x10(%r10),%xmm1		# 0 : sb9t
+	vperm	v4, $sb9u, v7, v2	# vpshufb	%xmm2,	%xmm4,	%xmm4		# 4 = sb9u
+	subi	r11, r11, 16
+	vperm	v1, $sb9t, v7, v3	# vpshufb	%xmm3,	%xmm1,	%xmm1		# 0 = sb9t
+	andi.	r11, r11, 0x30
+	vxor	v5, v5, v4		# vpxor		%xmm4,	%xmm0,	%xmm0
+					# vmovdqa	0x00(%r10),%xmm4		# 4 : sbdu
+	vxor	v5, v5, v1		# vpxor		%xmm1,	%xmm0,	%xmm0		# 0 = ch
+					# vmovdqa	0x10(%r10),%xmm1		# 0 : sbdt
+
+	vperm	v4, $sbdu, v7, v2	# vpshufb	%xmm2,	%xmm4,	%xmm4		# 4 = sbdu
+	vperm 	v5, v5, v7, v0		# vpshufb	%xmm5,	%xmm0,	%xmm0		# MC ch
+	vperm	v1, $sbdt, v7, v3	# vpshufb	%xmm3,	%xmm1,	%xmm1		# 0 = sbdt
+	vxor	v5, v5, v4		# vpxor		%xmm4,	%xmm0,	%xmm0		# 4 = ch
+					# vmovdqa	0x20(%r10),	%xmm4		# 4 : sbbu
+	vxor	v5, v5, v1		# vpxor		%xmm1,	%xmm0,	%xmm0		# 0 = ch
+					# vmovdqa	0x30(%r10),	%xmm1		# 0 : sbbt
+
+	vperm	v4, $sbbu, v7, v2	# vpshufb	%xmm2,	%xmm4,	%xmm4		# 4 = sbbu
+	vperm	v5, v5, v7, v0		# vpshufb	%xmm5,	%xmm0,	%xmm0		# MC ch
+	vperm	v1, $sbbt, v7, v3	# vpshufb	%xmm3,	%xmm1,	%xmm1		# 0 = sbbt
+	vxor	v5, v5, v4		# vpxor		%xmm4,	%xmm0,	%xmm0		# 4 = ch
+					# vmovdqa	0x40(%r10),	%xmm4		# 4 : sbeu
+	vxor	v5, v5, v1		# vpxor		%xmm1,	%xmm0,	%xmm0		# 0 = ch
+					# vmovdqa	0x50(%r10),	%xmm1		# 0 : sbet
+
+	vperm	v4, $sbeu, v7, v2	# vpshufb	%xmm2,	%xmm4,	%xmm4		# 4 = sbeu
+	vperm	v5, v5, v7, v0		# vpshufb	%xmm5,	%xmm0,	%xmm0		# MC ch
+	vperm	v1, $sbet, v7, v3	# vpshufb	%xmm3,	%xmm1,	%xmm1		# 0 = sbet
+	vxor	v0, v5, v4		# vpxor		%xmm4,	%xmm0,	%xmm0		# 4 = ch
+	vxor	v0, v0, v1		# vpxor		%xmm1,	%xmm0,	%xmm0		# 0 = ch
+
+Ldec_entry:
+	# top of round
+	vsrb	v1, v0, v8		# vpsrlb	\$4,	%xmm0,	%xmm0	# 1 = i
+	vperm	v2, $invhi, $invhi, v0	# vpshufb	%xmm1,	%xmm11,	%xmm2	# 2 = a/k
+	vxor	v0, v0, v1		# vpxor		%xmm0,	%xmm1,	%xmm1	# 0 = j
+	vperm	v3, $invlo, $invlo, v1	# vpshufb	%xmm0, 	%xmm10,	%xmm3	# 3 = 1/i
+	vperm	v4, $invlo, $invlo, v0	# vpshufb	%xmm1,	%xmm10,	%xmm4	# 4 = 1/j
+	vand	v0, v0, v9
+	vxor	v3, v3, v2		# vpxor		%xmm2,	%xmm3,	%xmm3	# 3 = iak = 1/i + a/k
+	vxor	v4, v4, v2		# vpxor		%xmm2, 	%xmm4,	%xmm4	# 4 = jak = 1/j + a/k
+	vperm	v2, $invlo, v7, v3	# vpshufb	%xmm3,	%xmm10,	%xmm2	# 2 = 1/iak
+	vmr	v5, v6
+	lvx	v6, r9, $key		# vmovdqu	(%r9),	%xmm0
+	vperm	v3, $invlo, v7, v4	# vpshufb	%xmm4,  %xmm10,	%xmm3	# 3 = 1/jak
+	addi	r9, r9, 16
+	vxor	v2, v2, v0		# vpxor		%xmm1,	%xmm2,	%xmm2	# 2 = io
+	?vperm	v5, v5, v6, $keyperm	# align round key
+	vxor	v3, v3, v1		# vpxor		%xmm0,  %xmm3,	%xmm3	# 3 = jo
+	bdnz	Ldec_loop
+
+	# middle of last round
+	addi	r10, r11, 0x80
+					# vmovdqa	0x60(%r10),	%xmm4	# 3 : sbou
+	vperm	v4, $sbou, v7, v2	# vpshufb	%xmm2,	%xmm4,	%xmm4	# 4 = sbou
+					# vmovdqa	0x70(%r10),	%xmm1	# 0 : sbot
+	lvx	v2, r12, r10		# vmovdqa	-0x160(%r11),	%xmm2	# .Lk_sr-.Lk_dsbd=-0x160
+	vperm	v1, $sbot, v7, v3	# vpshufb	%xmm3,	%xmm1,	%xmm1	# 0 = sb1t
+	vxor	v4, v4, v5		# vpxor		%xmm0,	%xmm4,	%xmm4	# 4 = sb1u + k
+	vxor	v0, v1, v4		# vpxor		%xmm4,	%xmm1,	%xmm0	# 0 = A
+	vperm	v0, v0, v7, v2		# vpshufb	%xmm2,	%xmm0,	%xmm0
+	blr
+	.long	0
+	.byte	0,12,0x14,0,0,0,0,0
+
+.globl	.vpaes_decrypt
+.align	5
+.vpaes_decrypt:
+	$STU	$sp,-$FRAME($sp)
+	li	r10,`15+6*$SIZE_T`
+	li	r11,`31+6*$SIZE_T`
+	mflr	r6
+	mfspr	r7, 256			# save vrsave
+	stvx	v20,r10,$sp
+	addi	r10,r10,32
+	stvx	v21,r11,$sp
+	addi	r11,r11,32
+	stvx	v22,r10,$sp
+	addi	r10,r10,32
+	stvx	v23,r11,$sp
+	addi	r11,r11,32
+	stvx	v24,r10,$sp
+	addi	r10,r10,32
+	stvx	v25,r11,$sp
+	addi	r11,r11,32
+	stvx	v26,r10,$sp
+	addi	r10,r10,32
+	stvx	v27,r11,$sp
+	addi	r11,r11,32
+	stvx	v28,r10,$sp
+	addi	r10,r10,32
+	stvx	v29,r11,$sp
+	addi	r11,r11,32
+	stvx	v30,r10,$sp
+	stvx	v31,r11,$sp
+	stw	r7,`$FRAME-4`($sp)	# save vrsave
+	li	r0, -1
+	$PUSH	r6,`$FRAME+$LRSAVE`($sp)
+	mtspr	256, r0			# preserve all AltiVec registers
+
+	bl	_vpaes_decrypt_preheat
+
+	?lvsl	$inpperm, 0, $inp	# prepare for unaligned access
+	lvx	v0, 0, $inp
+	addi	$inp, $inp, 15		# 15 is not a typo
+	 ?lvsr	$outperm, 0, $out
+	?lvsl	$keyperm, 0, $key
+	lvx	$inptail, 0, $inp	# redundant in aligned case
+	?vperm	v0, v0, $inptail, $inpperm
+
+	bl	_vpaes_decrypt_core
+
+	andi.	r8, $out, 15
+	li	r9, 16
+	beq	Ldec_out_aligned
+
+	vperm	v0, v0, v0, $outperm	# rotate right/left
+	mtctr	r9
+Ldec_out_unaligned:
+	stvebx	v0, 0, $out
+	addi	$out, $out, 1
+	bdnz	Ldec_out_unaligned
+	b	Ldec_done
+
+.align	4
+Ldec_out_aligned:
+	stvx	v0, 0, $out
+Ldec_done:
+
+	li	r10,`15+6*$SIZE_T`
+	li	r11,`31+6*$SIZE_T`
+	mtlr	r6
+	mtspr	256, r7			# restore vrsave
+	lvx	v20,r10,$sp
+	addi	r10,r10,32
+	lvx	v21,r11,$sp
+	addi	r11,r11,32
+	lvx	v22,r10,$sp
+	addi	r10,r10,32
+	lvx	v23,r11,$sp
+	addi	r11,r11,32
+	lvx	v24,r10,$sp
+	addi	r10,r10,32
+	lvx	v25,r11,$sp
+	addi	r11,r11,32
+	lvx	v26,r10,$sp
+	addi	r10,r10,32
+	lvx	v27,r11,$sp
+	addi	r11,r11,32
+	lvx	v28,r10,$sp
+	addi	r10,r10,32
+	lvx	v29,r11,$sp
+	addi	r11,r11,32
+	lvx	v30,r10,$sp
+	lvx	v31,r11,$sp
+	addi	$sp,$sp,$FRAME
+	blr
+	.long	0
+	.byte	0,12,0x04,1,0x80,0,3,0
+	.long	0
+.size	.vpaes_decrypt,.-.vpaes_decrypt
+
+.globl	.vpaes_cbc_encrypt
+.align	5
+.vpaes_cbc_encrypt:
+	${UCMP}i r5,16
+	bltlr-
+
+	$STU	$sp,-`($FRAME+2*$SIZE_T)`($sp)
+	mflr	r0
+	li	r10,`15+6*$SIZE_T`
+	li	r11,`31+6*$SIZE_T`
+	mfspr	r12, 256
+	stvx	v20,r10,$sp
+	addi	r10,r10,32
+	stvx	v21,r11,$sp
+	addi	r11,r11,32
+	stvx	v22,r10,$sp
+	addi	r10,r10,32
+	stvx	v23,r11,$sp
+	addi	r11,r11,32
+	stvx	v24,r10,$sp
+	addi	r10,r10,32
+	stvx	v25,r11,$sp
+	addi	r11,r11,32
+	stvx	v26,r10,$sp
+	addi	r10,r10,32
+	stvx	v27,r11,$sp
+	addi	r11,r11,32
+	stvx	v28,r10,$sp
+	addi	r10,r10,32
+	stvx	v29,r11,$sp
+	addi	r11,r11,32
+	stvx	v30,r10,$sp
+	stvx	v31,r11,$sp
+	stw	r12,`$FRAME-4`($sp)	# save vrsave
+	$PUSH	r30,`$FRAME+$SIZE_T*0`($sp)
+	$PUSH	r31,`$FRAME+$SIZE_T*1`($sp)
+	li	r9, -16
+	$PUSH	r0, `$FRAME+$SIZE_T*2+$LRSAVE`($sp)
+
+	and	r30, r5, r9		# copy length&-16
+	andi.	r9, $out, 15		# is $out aligned?
+	mr	r5, r6			# copy pointer to key
+	mr	r31, r7			# copy pointer to iv
+	li	r6, -1
+	mcrf	cr1, cr0		# put aside $out alignment flag
+	mr	r7, r12			# copy vrsave
+	mtspr	256, r6			# preserve all AltiVec registers
+
+	lvx	v24, 0, r31		# load [potentially unaligned] iv
+	li	r9, 15
+	?lvsl	$inpperm, 0, r31
+	lvx	v25, r9, r31
+	?vperm	v24, v24, v25, $inpperm
+
+	cmpwi	r8, 0			# test direction
+	neg	r8, $inp		# prepare for unaligned access
+	 vxor	v7, v7, v7
+	?lvsl	$keyperm, 0, $key
+	 ?lvsr	$outperm, 0, $out
+	?lvsr	$inpperm, 0, r8		# -$inp
+	 vnor	$outmask, v7, v7	# 0xff..ff
+	lvx	$inptail, 0, $inp
+	 ?vperm	$outmask, v7, $outmask, $outperm
+	addi	$inp, $inp, 15		# 15 is not a typo
+
+	beq	Lcbc_decrypt
+
+	bl	_vpaes_encrypt_preheat
+	li	r0, 16
+
+	beq	cr1, Lcbc_enc_loop	# $out is aligned
+
+	vmr	v0, $inptail
+	lvx	$inptail, 0, $inp
+	addi	$inp, $inp, 16
+	?vperm	v0, v0, $inptail, $inpperm
+	vxor	v0, v0, v24		# ^= iv
+
+	bl	_vpaes_encrypt_core
+
+	andi.	r8, $out, 15
+	vmr	v24, v0			# put aside iv
+	sub	r9, $out, r8
+	vperm	$outhead, v0, v0, $outperm	# rotate right/left
+
+Lcbc_enc_head:
+	stvebx	$outhead, r8, r9
+	cmpwi	r8, 15
+	addi	r8, r8, 1
+	bne	Lcbc_enc_head
+
+	sub.	r30, r30, r0		# len -= 16
+	addi	$out, $out, 16
+	beq	Lcbc_unaligned_done
+
+Lcbc_enc_loop:
+	vmr	v0, $inptail
+	lvx	$inptail, 0, $inp
+	addi	$inp, $inp, 16
+	?vperm	v0, v0, $inptail, $inpperm
+	vxor	v0, v0, v24		# ^= iv
+
+	bl	_vpaes_encrypt_core
+
+	vmr	v24, v0			# put aside iv
+	sub.	r30, r30, r0		# len -= 16
+	vperm	v0, v0, v0, $outperm	# rotate right/left
+	vsel	v1, $outhead, v0, $outmask
+	vmr	$outhead, v0
+	stvx	v1, 0, $out
+	addi	$out, $out, 16
+	bne	Lcbc_enc_loop
+
+	b	Lcbc_done
+
+.align	5
+Lcbc_decrypt:
+	bl	_vpaes_decrypt_preheat
+	li	r0, 16
+
+	beq	cr1, Lcbc_dec_loop	# $out is aligned
+
+	vmr	v0, $inptail
+	lvx	$inptail, 0, $inp
+	addi	$inp, $inp, 16
+	?vperm	v0, v0, $inptail, $inpperm
+	vmr	v25, v0			# put aside input
+
+	bl	_vpaes_decrypt_core
+
+	andi.	r8, $out, 15
+	vxor	v0, v0, v24		# ^= iv
+	vmr	v24, v25
+	sub	r9, $out, r8
+	vperm	$outhead, v0, v0, $outperm	# rotate right/left
+
+Lcbc_dec_head:
+	stvebx	$outhead, r8, r9
+	cmpwi	r8, 15
+	addi	r8, r8, 1
+	bne	Lcbc_dec_head
+
+	sub.	r30, r30, r0		# len -= 16
+	addi	$out, $out, 16
+	beq	Lcbc_unaligned_done
+
+Lcbc_dec_loop:
+	vmr	v0, $inptail
+	lvx	$inptail, 0, $inp
+	addi	$inp, $inp, 16
+	?vperm	v0, v0, $inptail, $inpperm
+	vmr	v25, v0			# put aside input
+
+	bl	_vpaes_decrypt_core
+
+	vxor	v0, v0, v24		# ^= iv
+	vmr	v24, v25
+	sub.	r30, r30, r0		# len -= 16
+	vperm	v0, v0, v0, $outperm	# rotate right/left
+	vsel	v1, $outhead, v0, $outmask
+	vmr	$outhead, v0
+	stvx	v1, 0, $out
+	addi	$out, $out, 16
+	bne	Lcbc_dec_loop
+
+Lcbc_done:
+	beq	cr1, Lcbc_write_iv	# $out is aligned
+
+Lcbc_unaligned_done:
+	andi.	r8, $out, 15
+	sub	$out, $out, r8
+	li	r9, 0
+Lcbc_tail:
+	stvebx	$outhead, r9, $out
+	addi	r9, r9, 1
+	cmpw	r9, r8
+	bne	Lcbc_tail
+
+Lcbc_write_iv:
+	neg	r8, r31			# write [potentially unaligned] iv
+	li	r10, 4
+	?lvsl	$outperm, 0, r8
+	li	r11, 8
+	li	r12, 12
+	vperm	v24, v24, v24, $outperm	# rotate right/left
+	stvewx	v24, 0, r31		# ivp is at least 32-bit aligned
+	stvewx	v24, r10, r31
+	stvewx	v24, r11, r31
+	stvewx	v24, r12, r31
+
+	mtspr	256, r7			# restore vrsave
+	li	r10,`15+6*$SIZE_T`
+	li	r11,`31+6*$SIZE_T`
+	lvx	v20,r10,$sp
+	addi	r10,r10,32
+	lvx	v21,r11,$sp
+	addi	r11,r11,32
+	lvx	v22,r10,$sp
+	addi	r10,r10,32
+	lvx	v23,r11,$sp
+	addi	r11,r11,32
+	lvx	v24,r10,$sp
+	addi	r10,r10,32
+	lvx	v25,r11,$sp
+	addi	r11,r11,32
+	lvx	v26,r10,$sp
+	addi	r10,r10,32
+	lvx	v27,r11,$sp
+	addi	r11,r11,32
+	lvx	v28,r10,$sp
+	addi	r10,r10,32
+	lvx	v29,r11,$sp
+	addi	r11,r11,32
+	lvx	v30,r10,$sp
+	lvx	v31,r11,$sp
+Lcbc_abort:
+	$POP	r0, `$FRAME+$SIZE_T*2+$LRSAVE`($sp)
+	$POP	r30,`$FRAME+$SIZE_T*0`($sp)
+	$POP	r31,`$FRAME+$SIZE_T*1`($sp)
+	mtlr	r0
+	addi	$sp,$sp,`$FRAME+$SIZE_T*2`
+	blr
+	.long	0
+	.byte	0,12,0x04,1,0x80,2,6,0
+	.long	0
+.size	.vpaes_cbc_encrypt,.-.vpaes_cbc_encrypt
+___
+}
+{
+my ($inp,$bits,$out)=map("r$_",(3..5));
+my $dir="cr1";
+my ($invlo,$invhi,$iptlo,$ipthi,$rcon) = map("v$_",(10..13,24));
+
+$code.=<<___;
+########################################################
+##                                                    ##
+##                  AES key schedule                  ##
+##                                                    ##
+########################################################
+.align	4
+_vpaes_key_preheat:
+	mflr	r8
+	bl	Lconsts
+	mtlr	r8
+	li	r11, 0xc0		# Lk_inv
+	li	r10, 0xd0
+	li	r9,  0xe0		# L_ipt
+	li	r8,  0xf0
+
+	vspltisb	v8,4		# 0x04..04
+	vxor	v9,v9,v9		# 0x00..00
+	lvx	$invlo, r12, r11	# Lk_inv
+	li	r11, 0x120
+	lvx	$invhi, r12, r10
+	li	r10, 0x130
+	lvx	$iptlo, r12, r9		# Lk_ipt
+	li	r9, 0x220
+	lvx	$ipthi, r12, r8
+	li	r8, 0x230
+
+	lvx	v14, r12, r11		# Lk_sb1
+	li	r11, 0x240
+	lvx	v15, r12, r10
+	li	r10, 0x250
+
+	lvx	v16, r12, r9		# Lk_dksd
+	li	r9, 0x260
+	lvx	v17, r12, r8
+	li	r8, 0x270
+	lvx	v18, r12, r11		# Lk_dksb
+	li	r11, 0x280
+	lvx	v19, r12, r10
+	li	r10, 0x290
+	lvx	v20, r12, r9		# Lk_dkse
+	li	r9, 0x2a0
+	lvx	v21, r12, r8
+	li	r8, 0x2b0
+	lvx	v22, r12, r11		# Lk_dks9
+	lvx	v23, r12, r10
+
+	lvx	v24, r12, r9		# Lk_rcon
+	lvx	v25, 0, r12		# Lk_mc_forward[0]
+	lvx	v26, r12, r8		# Lks63
+	blr
+	.long	0
+	.byte	0,12,0x14,0,0,0,0,0
+
+.align	4
+_vpaes_schedule_core:
+	mflr	r7
+
+	bl	_vpaes_key_preheat	# load the tables
+
+	#lvx	v0, 0, $inp		# vmovdqu	(%rdi),	%xmm0		# load key (unaligned)
+	neg	r8, $inp		# prepare for unaligned access
+	lvx	v0, 0, $inp
+	addi	$inp, $inp, 15		# 15 is not typo
+	?lvsr	$inpperm, 0, r8		# -$inp
+	lvx	v6, 0, $inp		# v6 serves as inptail
+	addi	$inp, $inp, 8
+	?vperm	v0, v0, v6, $inpperm
+
+	# input transform
+	vmr	v3, v0			# vmovdqa	%xmm0,	%xmm3
+	bl	_vpaes_schedule_transform
+	vmr	v7, v0			# vmovdqa	%xmm0,	%xmm7
+
+	bne	$dir, Lschedule_am_decrypting
+
+	# encrypting, output zeroth round key after transform
+	li	r8, 0x30		# mov	\$0x30,%r8d
+	li	r9, 4
+	li	r10, 8
+	li	r11, 12
+
+	?lvsr	$outperm, 0, $out	# prepare for unaligned access
+	vnor	$outmask, v9, v9	# 0xff..ff
+	?vperm	$outmask, v9, $outmask, $outperm
+
+	#stvx	v0, 0, $out		# vmovdqu	%xmm0,	(%rdx)
+	vperm	$outhead, v0, v0, $outperm	# rotate right/left
+	stvewx	$outhead, 0, $out	# some are superfluous
+	stvewx	$outhead, r9, $out
+	stvewx	$outhead, r10, $out
+	addi	r10, r12, 0x80		# lea	.Lk_sr(%rip),%r10
+	stvewx	$outhead, r11, $out
+	b	Lschedule_go
+
+Lschedule_am_decrypting:
+	srwi	r8, $bits, 1		# shr	\$1,%r8d
+	andi.	r8, r8, 32		# and	\$32,%r8d
+	xori	r8, r8, 32		# xor	\$32,%r8d	# nbits==192?0:32
+	addi	r10, r12, 0x80		# lea	.Lk_sr(%rip),%r10
+	# decrypting, output zeroth round key after shiftrows
+	lvx	v1, r8, r10		# vmovdqa	(%r8,%r10),	%xmm1
+	li	r9, 4
+	li	r10, 8
+	li	r11, 12
+	vperm	v4, v3, v3, v1		# vpshufb	%xmm1,	%xmm3,	%xmm3
+
+	neg	r0, $out		# prepare for unaligned access
+	?lvsl	$outperm, 0, r0
+	vnor	$outmask, v9, v9	# 0xff..ff
+	?vperm	$outmask, $outmask, v9, $outperm
+
+	#stvx	v4, 0, $out		# vmovdqu	%xmm3,	(%rdx)
+	vperm	$outhead, v4, v4, $outperm	# rotate right/left
+	stvewx	$outhead, 0, $out	# some are superfluous
+	stvewx	$outhead, r9, $out
+	stvewx	$outhead, r10, $out
+	addi	r10, r12, 0x80		# lea	.Lk_sr(%rip),%r10
+	stvewx	$outhead, r11, $out
+	addi	$out, $out, 15		# 15 is not typo
+	xori	r8, r8, 0x30		# xor	\$0x30, %r8
+
+Lschedule_go:
+	cmplwi	$bits, 192		# cmp	\$192,	%esi
+	bgt	Lschedule_256
+	beq	Lschedule_192
+	# 128: fall though
+
+##
+##  .schedule_128
+##
+##  128-bit specific part of key schedule.
+##
+##  This schedule is really simple, because all its parts
+##  are accomplished by the subroutines.
+##
+Lschedule_128:
+	li	r0, 10			# mov	\$10, %esi
+	mtctr	r0
+
+Loop_schedule_128:
+	bl 	_vpaes_schedule_round
+	bdz 	Lschedule_mangle_last	# dec	%esi
+	bl	_vpaes_schedule_mangle	# write output
+	b 	Loop_schedule_128
+
+##
+##  .aes_schedule_192
+##
+##  192-bit specific part of key schedule.
+##
+##  The main body of this schedule is the same as the 128-bit
+##  schedule, but with more smearing.  The long, high side is
+##  stored in %xmm7 as before, and the short, low side is in
+##  the high bits of %xmm6.
+##
+##  This schedule is somewhat nastier, however, because each
+##  round produces 192 bits of key material, or 1.5 round keys.
+##  Therefore, on each cycle we do 2 rounds and produce 3 round
+##  keys.
+##
+.align	4
+Lschedule_192:
+	li	r0, 4			# mov	\$4,	%esi
+	lvx	v0, 0, $inp
+	?vperm	v0, v6, v0, $inpperm
+	?vsldoi	v0, v3, v0, 8		# vmovdqu	8(%rdi),%xmm0		# load key part 2 (very unaligned)
+	bl	_vpaes_schedule_transform	# input transform
+	?vsldoi	v6, v0, v9, 8
+	?vsldoi	v6, v9, v6, 8		# clobber "low" side with zeros
+	mtctr	r0
+
+Loop_schedule_192:
+	bl	_vpaes_schedule_round
+	?vsldoi	v0, v6, v0, 8		# vpalignr	\$8,%xmm6,%xmm0,%xmm0
+	bl	_vpaes_schedule_mangle	# save key n
+	bl	_vpaes_schedule_192_smear
+	bl	_vpaes_schedule_mangle	# save key n+1
+	bl	_vpaes_schedule_round
+	bdz 	Lschedule_mangle_last	# dec	%esi
+	bl	_vpaes_schedule_mangle	# save key n+2
+	bl	_vpaes_schedule_192_smear
+	b	Loop_schedule_192
+
+##
+##  .aes_schedule_256
+##
+##  256-bit specific part of key schedule.
+##
+##  The structure here is very similar to the 128-bit
+##  schedule, but with an additional "low side" in
+##  %xmm6.  The low side's rounds are the same as the
+##  high side's, except no rcon and no rotation.
+##
+.align	4
+Lschedule_256:
+	li	r0, 7			# mov	\$7, %esi
+	addi	$inp, $inp, 8
+	lvx	v0, 0, $inp		# vmovdqu	16(%rdi),%xmm0		# load key part 2 (unaligned)
+	?vperm	v0, v6, v0, $inpperm
+	bl	_vpaes_schedule_transform	# input transform
+	mtctr	r0
+
+Loop_schedule_256:
+	bl	_vpaes_schedule_mangle	# output low result
+	vmr	v6, v0			# vmovdqa	%xmm0,	%xmm6		# save cur_lo in xmm6
+
+	# high round
+	bl	_vpaes_schedule_round
+	bdz 	Lschedule_mangle_last	# dec	%esi
+	bl	_vpaes_schedule_mangle
+
+	# low round. swap xmm7 and xmm6
+	?vspltw	v0, v0, 3		# vpshufd	\$0xFF,	%xmm0,	%xmm0
+	vmr	v5, v7			# vmovdqa	%xmm7,	%xmm5
+	vmr	v7, v6			# vmovdqa	%xmm6,	%xmm7
+	bl	_vpaes_schedule_low_round
+	vmr	v7, v5			# vmovdqa	%xmm5,	%xmm7
+
+	b	Loop_schedule_256
+##
+##  .aes_schedule_mangle_last
+##
+##  Mangler for last round of key schedule
+##  Mangles %xmm0
+##    when encrypting, outputs out(%xmm0) ^ 63
+##    when decrypting, outputs unskew(%xmm0)
+##
+##  Always called right before return... jumps to cleanup and exits
+##
+.align	4
+Lschedule_mangle_last:
+	# schedule last round key from xmm0
+	li	r11, 0x2e0		# lea	.Lk_deskew(%rip),%r11
+	li	r9,  0x2f0
+	bne	$dir, Lschedule_mangle_last_dec
+
+	# encrypting
+	lvx	v1, r8, r10		# vmovdqa	(%r8,%r10),%xmm1
+	li	r11, 0x2c0		# lea		.Lk_opt(%rip),	%r11	# prepare to output transform
+	li	r9,  0x2d0		# prepare to output transform
+	vperm	v0, v0, v0, v1		# vpshufb	%xmm1,	%xmm0,	%xmm0	# output permute
+
+	lvx	$iptlo, r11, r12	# reload $ipt
+	lvx	$ipthi, r9, r12
+	addi	$out, $out, 16		# add	\$16,	%rdx
+	vxor	v0, v0, v26		# vpxor		.Lk_s63(%rip),	%xmm0,	%xmm0
+	bl	_vpaes_schedule_transform	# output transform
+
+	#stvx	v0, r0, $out		# vmovdqu	%xmm0,	(%rdx)		# save last key
+	vperm	v0, v0, v0, $outperm	# rotate right/left
+	li	r10, 4
+	vsel	v2, $outhead, v0, $outmask
+	li	r11, 8
+	stvx	v2, 0, $out
+	li	r12, 12
+	stvewx	v0, 0, $out		# some (or all) are redundant
+	stvewx	v0, r10, $out
+	stvewx	v0, r11, $out
+	stvewx	v0, r12, $out
+	b	Lschedule_mangle_done
+
+.align	4
+Lschedule_mangle_last_dec:
+	lvx	$iptlo, r11, r12	# reload $ipt
+	lvx	$ipthi, r9,  r12
+	addi	$out, $out, -16		# add	\$-16,	%rdx
+	vxor	v0, v0, v26		# vpxor	.Lk_s63(%rip),	%xmm0,	%xmm0
+	bl	_vpaes_schedule_transform	# output transform
+
+	#stvx	v0, r0, $out		# vmovdqu	%xmm0,	(%rdx)		# save last key
+	addi	r9, $out, -15		# -15 is not typo
+	vperm	v0, v0, v0, $outperm	# rotate right/left
+	li	r10, 4
+	vsel	v2, $outhead, v0, $outmask
+	li	r11, 8
+	stvx	v2, 0, $out
+	li	r12, 12
+	stvewx	v0, 0, r9		# some (or all) are redundant
+	stvewx	v0, r10, r9
+	stvewx	v0, r11, r9
+	stvewx	v0, r12, r9
+
+
+Lschedule_mangle_done:
+	mtlr	r7
+	# cleanup
+	vxor	v0, v0, v0		# vpxor		%xmm0,	%xmm0,	%xmm0
+	vxor	v1, v1, v1		# vpxor		%xmm1,	%xmm1,	%xmm1
+	vxor	v2, v2, v2		# vpxor		%xmm2,	%xmm2,	%xmm2
+	vxor	v3, v3, v3		# vpxor		%xmm3,	%xmm3,	%xmm3
+	vxor	v4, v4, v4		# vpxor		%xmm4,	%xmm4,	%xmm4
+	vxor	v5, v5, v5		# vpxor		%xmm5,	%xmm5,	%xmm5
+	vxor	v6, v6, v6		# vpxor		%xmm6,	%xmm6,	%xmm6
+	vxor	v7, v7, v7		# vpxor		%xmm7,	%xmm7,	%xmm7
+
+	blr
+	.long	0
+	.byte	0,12,0x14,0,0,0,0,0
+
+##
+##  .aes_schedule_192_smear
+##
+##  Smear the short, low side in the 192-bit key schedule.
+##
+##  Inputs:
+##    %xmm7: high side, b  a  x  y
+##    %xmm6:  low side, d  c  0  0
+##    %xmm13: 0
+##
+##  Outputs:
+##    %xmm6: b+c+d  b+c  0  0
+##    %xmm0: b+c+d  b+c  b  a
+##
+.align	4
+_vpaes_schedule_192_smear:
+	?vspltw	v0, v7, 3
+	?vsldoi	v1, v9, v6, 12		# vpshufd	\$0x80,	%xmm6,	%xmm1	# d c 0 0 -> c 0 0 0
+	?vsldoi	v0, v7, v0, 8		# vpshufd	\$0xFE,	%xmm7,	%xmm0	# b a _ _ -> b b b a
+	vxor	v6, v6, v1		# vpxor		%xmm1,	%xmm6,	%xmm6	# -> c+d c 0 0
+	vxor	v6, v6, v0		# vpxor		%xmm0,	%xmm6,	%xmm6	# -> b+c+d b+c b a
+	vmr	v0, v6
+	?vsldoi	v6, v6, v9, 8
+	?vsldoi	v6, v9, v6, 8		# clobber low side with zeros
+	blr
+	.long	0
+	.byte	0,12,0x14,0,0,0,0,0
+
+##
+##  .aes_schedule_round
+##
+##  Runs one main round of the key schedule on %xmm0, %xmm7
+##
+##  Specifically, runs subbytes on the high dword of %xmm0
+##  then rotates it by one byte and xors into the low dword of
+##  %xmm7.
+##
+##  Adds rcon from low byte of %xmm8, then rotates %xmm8 for
+##  next rcon.
+##
+##  Smears the dwords of %xmm7 by xoring the low into the
+##  second low, result into third, result into highest.
+##
+##  Returns results in %xmm7 = %xmm0.
+##  Clobbers %xmm1-%xmm4, %r11.
+##
+.align	4
+_vpaes_schedule_round:
+	# extract rcon from xmm8
+	#vxor	v4, v4, v4		# vpxor		%xmm4,	%xmm4,	%xmm4
+	?vsldoi	v1, $rcon, v9, 15	# vpalignr	\$15,	%xmm8,	%xmm4,	%xmm1
+	?vsldoi	$rcon, $rcon, $rcon, 15	# vpalignr	\$15,	%xmm8,	%xmm8,	%xmm8
+	vxor	v7, v7, v1		# vpxor		%xmm1,	%xmm7,	%xmm7
+
+	# rotate
+	?vspltw	v0, v0, 3		# vpshufd	\$0xFF,	%xmm0,	%xmm0
+	?vsldoi	v0, v0, v0, 1		# vpalignr	\$1,	%xmm0,	%xmm0,	%xmm0
+
+	# fall through...
+
+	# low round: same as high round, but no rotation and no rcon.
+_vpaes_schedule_low_round:
+	# smear xmm7
+	?vsldoi	v1, v9, v7, 12		# vpslldq	\$4,	%xmm7,	%xmm1
+	vxor	v7, v7, v1		# vpxor		%xmm1,	%xmm7,	%xmm7
+	vspltisb	v1, 0x0f	# 0x0f..0f
+	?vsldoi	v4, v9, v7, 8		# vpslldq	\$8,	%xmm7,	%xmm4
+
+	# subbytes
+	vand	v1, v1, v0		# vpand		%xmm9,	%xmm0,	%xmm1		# 0 = k
+	vsrb	v0, v0, v8		# vpsrlb	\$4,	%xmm0,	%xmm0		# 1 = i
+	 vxor	v7, v7, v4		# vpxor		%xmm4,	%xmm7,	%xmm7
+	vperm	v2, $invhi, v9, v1	# vpshufb	%xmm1,	%xmm11,	%xmm2		# 2 = a/k
+	vxor	v1, v1, v0		# vpxor		%xmm0,	%xmm1,	%xmm1		# 0 = j
+	vperm	v3, $invlo, v9, v0	# vpshufb	%xmm0, 	%xmm10,	%xmm3		# 3 = 1/i
+	vxor	v3, v3, v2		# vpxor		%xmm2,	%xmm3,	%xmm3		# 3 = iak = 1/i + a/k
+	vperm	v4, $invlo, v9, v1	# vpshufb	%xmm1,	%xmm10,	%xmm4		# 4 = 1/j
+	 vxor	v7, v7, v26		# vpxor		.Lk_s63(%rip),	%xmm7,	%xmm7
+	vperm	v3, $invlo, v9, v3	# vpshufb	%xmm3,	%xmm10,	%xmm3		# 2 = 1/iak
+	vxor	v4, v4, v2		# vpxor		%xmm2,	%xmm4,	%xmm4		# 4 = jak = 1/j + a/k
+	vperm	v2, $invlo, v9, v4	# vpshufb	%xmm4,	%xmm10,	%xmm2		# 3 = 1/jak
+	vxor	v3, v3, v1		# vpxor		%xmm1,	%xmm3,	%xmm3		# 2 = io
+	vxor	v2, v2, v0		# vpxor		%xmm0,	%xmm2,	%xmm2		# 3 = jo
+	vperm	v4, v15, v9, v3		# vpshufb	%xmm3,	%xmm13,	%xmm4		# 4 = sbou
+	vperm	v1, v14, v9, v2		# vpshufb	%xmm2,	%xmm12,	%xmm1		# 0 = sb1t
+	vxor	v1, v1, v4		# vpxor		%xmm4,	%xmm1,	%xmm1		# 0 = sbox output
+
+	# add in smeared stuff
+	vxor	v0, v1, v7		# vpxor		%xmm7,	%xmm1,	%xmm0
+	vxor	v7, v1, v7		# vmovdqa	%xmm0,	%xmm7
+	blr
+	.long	0
+	.byte	0,12,0x14,0,0,0,0,0
+
+##
+##  .aes_schedule_transform
+##
+##  Linear-transform %xmm0 according to tables at (%r11)
+##
+##  Requires that %xmm9 = 0x0F0F... as in preheat
+##  Output in %xmm0
+##  Clobbers %xmm2
+##
+.align	4
+_vpaes_schedule_transform:
+	#vand	v1, v0, v9		# vpand		%xmm9,	%xmm0,	%xmm1
+	vsrb	v2, v0, v8		# vpsrlb	\$4,	%xmm0,	%xmm0
+					# vmovdqa	(%r11),	%xmm2 	# lo
+	vperm	v0, $iptlo, $iptlo, v0	# vpshufb	%xmm1,	%xmm2,	%xmm2
+					# vmovdqa	16(%r11),	%xmm1 # hi
+	vperm	v2, $ipthi, $ipthi, v2	# vpshufb	%xmm0,	%xmm1,	%xmm0
+	vxor	v0, v0, v2		# vpxor		%xmm2,	%xmm0,	%xmm0
+	blr
+	.long	0
+	.byte	0,12,0x14,0,0,0,0,0
+
+##
+##  .aes_schedule_mangle
+##
+##  Mangle xmm0 from (basis-transformed) standard version
+##  to our version.
+##
+##  On encrypt,
+##    xor with 0x63
+##    multiply by circulant 0,1,1,1
+##    apply shiftrows transform
+##
+##  On decrypt,
+##    xor with 0x63
+##    multiply by "inverse mixcolumns" circulant E,B,D,9
+##    deskew
+##    apply shiftrows transform
+##
+##
+##  Writes out to (%rdx), and increments or decrements it
+##  Keeps track of round number mod 4 in %r8
+##  Preserves xmm0
+##  Clobbers xmm1-xmm5
+##
+.align	4
+_vpaes_schedule_mangle:
+	#vmr	v4, v0			# vmovdqa	%xmm0,	%xmm4	# save xmm0 for later
+					# vmovdqa	.Lk_mc_forward(%rip),%xmm5
+	bne	$dir, Lschedule_mangle_dec
+
+	# encrypting
+	vxor	v4, v0, v26		# vpxor	.Lk_s63(%rip),	%xmm0,	%xmm4
+	addi	$out, $out, 16		# add	\$16,	%rdx
+	vperm	v4, v4, v4, v25		# vpshufb	%xmm5,	%xmm4,	%xmm4
+	vperm	v1, v4, v4, v25		# vpshufb	%xmm5,	%xmm4,	%xmm1
+	vperm	v3, v1, v1, v25		# vpshufb	%xmm5,	%xmm1,	%xmm3
+	vxor	v4, v4, v1		# vpxor		%xmm1,	%xmm4,	%xmm4
+	lvx	v1, r8, r10		# vmovdqa	(%r8,%r10),	%xmm1
+	vxor	v3, v3, v4		# vpxor		%xmm4,	%xmm3,	%xmm3
+
+	vperm	v3, v3, v3, v1		# vpshufb	%xmm1,	%xmm3,	%xmm3
+	addi	r8, r8, -16		# add	\$-16,	%r8
+	andi.	r8, r8, 0x30		# and	\$0x30,	%r8
+
+	#stvx	v3, 0, $out		# vmovdqu	%xmm3,	(%rdx)
+	vperm	v1, v3, v3, $outperm	# rotate right/left
+	vsel	v2, $outhead, v1, $outmask
+	vmr	$outhead, v1
+	stvx	v2, 0, $out
+	blr
+
+.align	4
+Lschedule_mangle_dec:
+	# inverse mix columns
+					# lea	.Lk_dksd(%rip),%r11
+	vsrb	v1, v0, v8		# vpsrlb	\$4,	%xmm4,	%xmm1	# 1 = hi
+	#and	v4, v0, v9		# vpand		%xmm9,	%xmm4,	%xmm4	# 4 = lo
+
+					# vmovdqa	0x00(%r11),	%xmm2
+	vperm	v2, v16, v16, v0	# vpshufb	%xmm4,	%xmm2,	%xmm2
+					# vmovdqa	0x10(%r11),	%xmm3
+	vperm	v3, v17, v17, v1	# vpshufb	%xmm1,	%xmm3,	%xmm3
+	vxor	v3, v3, v2		# vpxor		%xmm2,	%xmm3,	%xmm3
+	vperm	v3, v3, v9, v25		# vpshufb	%xmm5,	%xmm3,	%xmm3
+
+					# vmovdqa	0x20(%r11),	%xmm2
+	vperm	v2, v18, v18, v0	# vpshufb	%xmm4,	%xmm2,	%xmm2
+	vxor	v2, v2, v3		# vpxor		%xmm3,	%xmm2,	%xmm2
+					# vmovdqa	0x30(%r11),	%xmm3
+	vperm	v3, v19, v19, v1	# vpshufb	%xmm1,	%xmm3,	%xmm3
+	vxor	v3, v3, v2		# vpxor		%xmm2,	%xmm3,	%xmm3
+	vperm	v3, v3, v9, v25		# vpshufb	%xmm5,	%xmm3,	%xmm3
+
+					# vmovdqa	0x40(%r11),	%xmm2
+	vperm	v2, v20, v20, v0	# vpshufb	%xmm4,	%xmm2,	%xmm2
+	vxor	v2, v2, v3		# vpxor		%xmm3,	%xmm2,	%xmm2
+					# vmovdqa	0x50(%r11),	%xmm3
+	vperm	v3, v21, v21, v1	# vpshufb	%xmm1,	%xmm3,	%xmm3
+	vxor	v3, v3, v2		# vpxor		%xmm2,	%xmm3,	%xmm3
+
+					# vmovdqa	0x60(%r11),	%xmm2
+	vperm	v2, v22, v22, v0	# vpshufb	%xmm4,	%xmm2,	%xmm2
+	vperm	v3, v3, v9, v25		# vpshufb	%xmm5,	%xmm3,	%xmm3
+					# vmovdqa	0x70(%r11),	%xmm4
+	vperm	v4, v23, v23, v1	# vpshufb	%xmm1,	%xmm4,	%xmm4
+	lvx	v1, r8, r10		# vmovdqa	(%r8,%r10),	%xmm1
+	vxor	v2, v2, v3		# vpxor		%xmm3,	%xmm2,	%xmm2
+	vxor	v3, v4, v2		# vpxor		%xmm2,	%xmm4,	%xmm3
+
+	addi	$out, $out, -16		# add	\$-16,	%rdx
+
+	vperm	v3, v3, v3, v1		# vpshufb	%xmm1,	%xmm3,	%xmm3
+	addi	r8, r8, -16		# add	\$-16,	%r8
+	andi.	r8, r8, 0x30		# and	\$0x30,	%r8
+
+	#stvx	v3, 0, $out		# vmovdqu	%xmm3,	(%rdx)
+	vperm	v1, v3, v3, $outperm	# rotate right/left
+	vsel	v2, $outhead, v1, $outmask
+	vmr	$outhead, v1
+	stvx	v2, 0, $out
+	blr
+	.long	0
+	.byte	0,12,0x14,0,0,0,0,0
+
+.globl	.vpaes_set_encrypt_key
+.align	5
+.vpaes_set_encrypt_key:
+	$STU	$sp,-$FRAME($sp)
+	li	r10,`15+6*$SIZE_T`
+	li	r11,`31+6*$SIZE_T`
+	mflr	r0
+	mfspr	r6, 256			# save vrsave
+	stvx	v20,r10,$sp
+	addi	r10,r10,32
+	stvx	v21,r11,$sp
+	addi	r11,r11,32
+	stvx	v22,r10,$sp
+	addi	r10,r10,32
+	stvx	v23,r11,$sp
+	addi	r11,r11,32
+	stvx	v24,r10,$sp
+	addi	r10,r10,32
+	stvx	v25,r11,$sp
+	addi	r11,r11,32
+	stvx	v26,r10,$sp
+	addi	r10,r10,32
+	stvx	v27,r11,$sp
+	addi	r11,r11,32
+	stvx	v28,r10,$sp
+	addi	r10,r10,32
+	stvx	v29,r11,$sp
+	addi	r11,r11,32
+	stvx	v30,r10,$sp
+	stvx	v31,r11,$sp
+	stw	r6,`$FRAME-4`($sp)	# save vrsave
+	li	r7, -1
+	$PUSH	r0, `$FRAME+$LRSAVE`($sp)
+	mtspr	256, r7			# preserve all AltiVec registers
+
+	srwi	r9, $bits, 5		# shr	\$5,%eax
+	addi	r9, r9, 6		# add	\$5,%eax
+	stw	r9, 240($out)		# mov	%eax,240(%rdx)	# AES_KEY->rounds = nbits/32+5;
+
+	cmplw	$dir, $bits, $bits	# set encrypt direction
+	li	r8, 0x30		# mov	\$0x30,%r8d
+	bl	_vpaes_schedule_core
+
+	$POP	r0, `$FRAME+$LRSAVE`($sp)
+	li	r10,`15+6*$SIZE_T`
+	li	r11,`31+6*$SIZE_T`
+	mtspr	256, r6			# restore vrsave
+	mtlr	r0
+	xor	r3, r3, r3
+	lvx	v20,r10,$sp
+	addi	r10,r10,32
+	lvx	v21,r11,$sp
+	addi	r11,r11,32
+	lvx	v22,r10,$sp
+	addi	r10,r10,32
+	lvx	v23,r11,$sp
+	addi	r11,r11,32
+	lvx	v24,r10,$sp
+	addi	r10,r10,32
+	lvx	v25,r11,$sp
+	addi	r11,r11,32
+	lvx	v26,r10,$sp
+	addi	r10,r10,32
+	lvx	v27,r11,$sp
+	addi	r11,r11,32
+	lvx	v28,r10,$sp
+	addi	r10,r10,32
+	lvx	v29,r11,$sp
+	addi	r11,r11,32
+	lvx	v30,r10,$sp
+	lvx	v31,r11,$sp
+	addi	$sp,$sp,$FRAME
+	blr
+	.long	0
+	.byte	0,12,0x04,1,0x80,0,3,0
+	.long	0
+.size	.vpaes_set_encrypt_key,.-.vpaes_set_encrypt_key
+
+.globl	.vpaes_set_decrypt_key
+.align	4
+.vpaes_set_decrypt_key:
+	$STU	$sp,-$FRAME($sp)
+	li	r10,`15+6*$SIZE_T`
+	li	r11,`31+6*$SIZE_T`
+	mflr	r0
+	mfspr	r6, 256			# save vrsave
+	stvx	v20,r10,$sp
+	addi	r10,r10,32
+	stvx	v21,r11,$sp
+	addi	r11,r11,32
+	stvx	v22,r10,$sp
+	addi	r10,r10,32
+	stvx	v23,r11,$sp
+	addi	r11,r11,32
+	stvx	v24,r10,$sp
+	addi	r10,r10,32
+	stvx	v25,r11,$sp
+	addi	r11,r11,32
+	stvx	v26,r10,$sp
+	addi	r10,r10,32
+	stvx	v27,r11,$sp
+	addi	r11,r11,32
+	stvx	v28,r10,$sp
+	addi	r10,r10,32
+	stvx	v29,r11,$sp
+	addi	r11,r11,32
+	stvx	v30,r10,$sp
+	stvx	v31,r11,$sp
+	stw	r6,`$FRAME-4`($sp)	# save vrsave
+	li	r7, -1
+	$PUSH	r0, `$FRAME+$LRSAVE`($sp)
+	mtspr	256, r7			# preserve all AltiVec registers
+
+	srwi	r9, $bits, 5		# shr	\$5,%eax
+	addi	r9, r9, 6		# add	\$5,%eax
+	stw	r9, 240($out)		# mov	%eax,240(%rdx)	# AES_KEY->rounds = nbits/32+5;
+
+	slwi	r9, r9, 4		# shl	\$4,%eax
+	add	$out, $out, r9		# lea	(%rdx,%rax),%rdx
+
+	cmplwi	$dir, $bits, 0		# set decrypt direction
+	srwi	r8, $bits, 1		# shr	\$1,%r8d
+	andi.	r8, r8, 32		# and	\$32,%r8d
+	xori	r8, r8, 32		# xor	\$32,%r8d	# nbits==192?0:32
+	bl	_vpaes_schedule_core
+
+	$POP	r0,  `$FRAME+$LRSAVE`($sp)
+	li	r10,`15+6*$SIZE_T`
+	li	r11,`31+6*$SIZE_T`
+	mtspr	256, r6			# restore vrsave
+	mtlr	r0
+	xor	r3, r3, r3
+	lvx	v20,r10,$sp
+	addi	r10,r10,32
+	lvx	v21,r11,$sp
+	addi	r11,r11,32
+	lvx	v22,r10,$sp
+	addi	r10,r10,32
+	lvx	v23,r11,$sp
+	addi	r11,r11,32
+	lvx	v24,r10,$sp
+	addi	r10,r10,32
+	lvx	v25,r11,$sp
+	addi	r11,r11,32
+	lvx	v26,r10,$sp
+	addi	r10,r10,32
+	lvx	v27,r11,$sp
+	addi	r11,r11,32
+	lvx	v28,r10,$sp
+	addi	r10,r10,32
+	lvx	v29,r11,$sp
+	addi	r11,r11,32
+	lvx	v30,r10,$sp
+	lvx	v31,r11,$sp
+	addi	$sp,$sp,$FRAME
+	blr
+	.long	0
+	.byte	0,12,0x04,1,0x80,0,3,0
+	.long	0
+.size	.vpaes_set_decrypt_key,.-.vpaes_set_decrypt_key
+___
+}
+
+my $consts=1;
+foreach  (split("\n",$code)) {
+	s/\`([^\`]*)\`/eval $1/geo;
+
+	# constants table endian-specific conversion
+	if ($consts && m/\.long\s+(.+)\s+(\?[a-z]*)$/o) {
+	    my $conv=$2;
+	    my @bytes=();
+
+	    # convert to endian-agnostic format
+	    foreach (split(/,\s+/,$1)) {
+		my $l = /^0/?oct:int;
+		push @bytes,($l>>24)&0xff,($l>>16)&0xff,($l>>8)&0xff,$l&0xff;
+	    }
+
+	    # little-endian conversion
+	    if ($flavour =~ /le$/o) {
+		SWITCH: for($conv)  {
+		    /\?inv/ && do   { @bytes=map($_^0xf,@bytes); last; };
+		    /\?rev/ && do   { @bytes=reverse(@bytes);    last; };
+		}
+	    }
+
+	    #emit
+	    print ".byte\t",join(',',map (sprintf("0x%02x",$_),@bytes)),"\n";
+	    next;
+	}
+	$consts=0 if (m/Lconsts:/o);	# end of table
+
+	# instructions prefixed with '?' are endian-specific and need
+	# to be adjusted accordingly...
+	if ($flavour =~ /le$/o) {	# little-endian
+	    s/\?lvsr/lvsl/o or
+	    s/\?lvsl/lvsr/o or
+	    s/\?(vperm\s+v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+)/$1$3$2$4/o or
+	    s/\?(vsldoi\s+v[0-9]+,\s*)(v[0-9]+,)\s*(v[0-9]+,\s*)([0-9]+)/$1$3$2 16-$4/o or
+	    s/\?(vspltw\s+v[0-9]+,\s*)(v[0-9]+,)\s*([0-9])/$1$2 3-$3/o;
+	} else {			# big-endian
+	    s/\?([a-z]+)/$1/o;
+	}
+
+	print $_,"\n";
+}
+
+close STDOUT or die "error closing STDOUT: $!";
diff --git a/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/vpaes-x86.pl b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/vpaes-x86.pl
new file mode 100644
index 0000000..fb02a41
--- /dev/null
+++ b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/vpaes-x86.pl
@@ -0,0 +1,916 @@
+#! /usr/bin/env perl
+# Copyright 2011-2020 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License").  You may not use
+# this file except in compliance with the License.  You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+
+######################################################################
+## Constant-time SSSE3 AES core implementation.
+## version 0.1
+##
+## By Mike Hamburg (Stanford University), 2009
+## Public domain.
+##
+## For details see http://shiftleft.org/papers/vector_aes/ and
+## http://crypto.stanford.edu/vpaes/.
+
+######################################################################
+# September 2011.
+#
+# Port vpaes-x86_64.pl as 32-bit "almost" drop-in replacement for
+# aes-586.pl. "Almost" refers to the fact that AES_cbc_encrypt
+# doesn't handle partial vectors (doesn't have to if called from
+# EVP only). "Drop-in" implies that this module doesn't share key
+# schedule structure with the original nor does it make assumption
+# about its alignment...
+#
+# Performance summary. aes-586.pl column lists large-block CBC
+# encrypt/decrypt/with-hyper-threading-off(*) results in cycles per
+# byte processed with 128-bit key, and vpaes-x86.pl column - [also
+# large-block CBC] encrypt/decrypt.
+#
+#		aes-586.pl		vpaes-x86.pl
+#
+# Core 2(**)	28.1/41.4/18.3		21.9/25.2(***)
+# Nehalem	27.9/40.4/18.1		10.2/11.9
+# Atom		70.7/92.1/60.1		61.1/75.4(***)
+# Silvermont	45.4/62.9/24.1		49.2/61.1(***)
+#
+# (*)	"Hyper-threading" in the context refers rather to cache shared
+#	among multiple cores, than to specifically Intel HTT. As vast
+#	majority of contemporary cores share cache, slower code path
+#	is common place. In other words "with-hyper-threading-off"
+#	results are presented mostly for reference purposes.
+#
+# (**)	"Core 2" refers to initial 65nm design, a.k.a. Conroe.
+#
+# (***)	Less impressive improvement on Core 2 and Atom is due to slow
+#	pshufb,	yet it's respectable +28%/64%  improvement on Core 2
+#	and +15% on Atom (as implied, over "hyper-threading-safe"
+#	code path).
+#
+#						<appro@openssl.org>
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../perlasm");
+require "x86asm.pl";
+
+$output = pop;
+open OUT,">$output";
+*STDOUT=*OUT;
+
+&asm_init($ARGV[0],$x86only = $ARGV[$#ARGV] eq "386");
+
+$PREFIX="vpaes";
+
+my  ($round, $base, $magic, $key, $const, $inp, $out)=
+    ("eax",  "ebx", "ecx",  "edx","ebp",  "esi","edi");
+
+&static_label("_vpaes_consts");
+&static_label("_vpaes_schedule_low_round");
+
+&set_label("_vpaes_consts",64);
+$k_inv=-0x30;		# inv, inva
+	&data_word(0x0D080180,0x0E05060F,0x0A0B0C02,0x04070309);
+	&data_word(0x0F0B0780,0x01040A06,0x02050809,0x030D0E0C);
+
+$k_s0F=-0x10;		# s0F
+	&data_word(0x0F0F0F0F,0x0F0F0F0F,0x0F0F0F0F,0x0F0F0F0F);
+
+$k_ipt=0x00;		# input transform (lo, hi)
+	&data_word(0x5A2A7000,0xC2B2E898,0x52227808,0xCABAE090);
+	&data_word(0x317C4D00,0x4C01307D,0xB0FDCC81,0xCD80B1FC);
+
+$k_sb1=0x20;		# sb1u, sb1t
+	&data_word(0xCB503E00,0xB19BE18F,0x142AF544,0xA5DF7A6E);
+	&data_word(0xFAE22300,0x3618D415,0x0D2ED9EF,0x3BF7CCC1);
+$k_sb2=0x40;		# sb2u, sb2t
+	&data_word(0x0B712400,0xE27A93C6,0xBC982FCD,0x5EB7E955);
+	&data_word(0x0AE12900,0x69EB8840,0xAB82234A,0xC2A163C8);
+$k_sbo=0x60;		# sbou, sbot
+	&data_word(0x6FBDC700,0xD0D26D17,0xC502A878,0x15AABF7A);
+	&data_word(0x5FBB6A00,0xCFE474A5,0x412B35FA,0x8E1E90D1);
+
+$k_mc_forward=0x80;	# mc_forward
+	&data_word(0x00030201,0x04070605,0x080B0A09,0x0C0F0E0D);
+	&data_word(0x04070605,0x080B0A09,0x0C0F0E0D,0x00030201);
+	&data_word(0x080B0A09,0x0C0F0E0D,0x00030201,0x04070605);
+	&data_word(0x0C0F0E0D,0x00030201,0x04070605,0x080B0A09);
+
+$k_mc_backward=0xc0;	# mc_backward
+	&data_word(0x02010003,0x06050407,0x0A09080B,0x0E0D0C0F);
+	&data_word(0x0E0D0C0F,0x02010003,0x06050407,0x0A09080B);
+	&data_word(0x0A09080B,0x0E0D0C0F,0x02010003,0x06050407);
+	&data_word(0x06050407,0x0A09080B,0x0E0D0C0F,0x02010003);
+
+$k_sr=0x100;		# sr
+	&data_word(0x03020100,0x07060504,0x0B0A0908,0x0F0E0D0C);
+	&data_word(0x0F0A0500,0x030E0904,0x07020D08,0x0B06010C);
+	&data_word(0x0B020900,0x0F060D04,0x030A0108,0x070E050C);
+	&data_word(0x070A0D00,0x0B0E0104,0x0F020508,0x0306090C);
+
+$k_rcon=0x140;		# rcon
+	&data_word(0xAF9DEEB6,0x1F8391B9,0x4D7C7D81,0x702A9808);
+
+$k_s63=0x150;		# s63: all equal to 0x63 transformed
+	&data_word(0x5B5B5B5B,0x5B5B5B5B,0x5B5B5B5B,0x5B5B5B5B);
+
+$k_opt=0x160;		# output transform
+	&data_word(0xD6B66000,0xFF9F4929,0xDEBE6808,0xF7974121);
+	&data_word(0x50BCEC00,0x01EDBD51,0xB05C0CE0,0xE10D5DB1);
+
+$k_deskew=0x180;	# deskew tables: inverts the sbox's "skew"
+	&data_word(0x47A4E300,0x07E4A340,0x5DBEF91A,0x1DFEB95A);
+	&data_word(0x83EA6900,0x5F36B5DC,0xF49D1E77,0x2841C2AB);
+##
+##  Decryption stuff
+##  Key schedule constants
+##
+$k_dksd=0x1a0;		# decryption key schedule: invskew x*D
+	&data_word(0xA3E44700,0xFEB91A5D,0x5A1DBEF9,0x0740E3A4);
+	&data_word(0xB5368300,0x41C277F4,0xAB289D1E,0x5FDC69EA);
+$k_dksb=0x1c0;		# decryption key schedule: invskew x*B
+	&data_word(0x8550D500,0x9A4FCA1F,0x1CC94C99,0x03D65386);
+	&data_word(0xB6FC4A00,0x115BEDA7,0x7E3482C8,0xD993256F);
+$k_dkse=0x1e0;		# decryption key schedule: invskew x*E + 0x63
+	&data_word(0x1FC9D600,0xD5031CCA,0x994F5086,0x53859A4C);
+	&data_word(0x4FDC7BE8,0xA2319605,0x20B31487,0xCD5EF96A);
+$k_dks9=0x200;		# decryption key schedule: invskew x*9
+	&data_word(0x7ED9A700,0xB6116FC8,0x82255BFC,0x4AED9334);
+	&data_word(0x27143300,0x45765162,0xE9DAFDCE,0x8BB89FAC);
+
+##
+##  Decryption stuff
+##  Round function constants
+##
+$k_dipt=0x220;		# decryption input transform
+	&data_word(0x0B545F00,0x0F505B04,0x114E451A,0x154A411E);
+	&data_word(0x60056500,0x86E383E6,0xF491F194,0x12771772);
+
+$k_dsb9=0x240;		# decryption sbox output *9*u, *9*t
+	&data_word(0x9A86D600,0x851C0353,0x4F994CC9,0xCAD51F50);
+	&data_word(0xECD74900,0xC03B1789,0xB2FBA565,0x725E2C9E);
+$k_dsbd=0x260;		# decryption sbox output *D*u, *D*t
+	&data_word(0xE6B1A200,0x7D57CCDF,0x882A4439,0xF56E9B13);
+	&data_word(0x24C6CB00,0x3CE2FAF7,0x15DEEFD3,0x2931180D);
+$k_dsbb=0x280;		# decryption sbox output *B*u, *B*t
+	&data_word(0x96B44200,0xD0226492,0xB0F2D404,0x602646F6);
+	&data_word(0xCD596700,0xC19498A6,0x3255AA6B,0xF3FF0C3E);
+$k_dsbe=0x2a0;		# decryption sbox output *E*u, *E*t
+	&data_word(0x26D4D000,0x46F29296,0x64B4F6B0,0x22426004);
+	&data_word(0xFFAAC100,0x0C55A6CD,0x98593E32,0x9467F36B);
+$k_dsbo=0x2c0;		# decryption sbox final output
+	&data_word(0x7EF94000,0x1387EA53,0xD4943E2D,0xC7AA6DB9);
+	&data_word(0x93441D00,0x12D7560F,0xD8C58E9C,0xCA4B8159);
+&asciz	("Vector Permutation AES for x86/SSSE3, Mike Hamburg (Stanford University)");
+&align	(64);
+
+&function_begin_B("_vpaes_preheat");
+	&add	($const,&DWP(0,"esp"));
+	&movdqa	("xmm7",&QWP($k_inv,$const));
+	&movdqa	("xmm6",&QWP($k_s0F,$const));
+	&ret	();
+&function_end_B("_vpaes_preheat");
+
+##
+##  _aes_encrypt_core
+##
+##  AES-encrypt %xmm0.
+##
+##  Inputs:
+##     %xmm0 = input
+##     %xmm6-%xmm7 as in _vpaes_preheat
+##    (%edx) = scheduled keys
+##
+##  Output in %xmm0
+##  Clobbers  %xmm1-%xmm5, %eax, %ebx, %ecx, %edx
+##
+##
+&function_begin_B("_vpaes_encrypt_core");
+	&mov	($magic,16);
+	&mov	($round,&DWP(240,$key));
+	&movdqa	("xmm1","xmm6")
+	&movdqa	("xmm2",&QWP($k_ipt,$const));
+	&pandn	("xmm1","xmm0");
+	&pand	("xmm0","xmm6");
+	&movdqu	("xmm5",&QWP(0,$key));
+	&pshufb	("xmm2","xmm0");
+	&movdqa	("xmm0",&QWP($k_ipt+16,$const));
+	&pxor	("xmm2","xmm5");
+	&psrld	("xmm1",4);
+	&add	($key,16);
+	&pshufb	("xmm0","xmm1");
+	&lea	($base,&DWP($k_mc_backward,$const));
+	&pxor	("xmm0","xmm2");
+	&jmp	(&label("enc_entry"));
+
+
+&set_label("enc_loop",16);
+	# middle of middle round
+	&movdqa	("xmm4",&QWP($k_sb1,$const));	# 4 : sb1u
+	&movdqa	("xmm0",&QWP($k_sb1+16,$const));# 0 : sb1t
+	&pshufb	("xmm4","xmm2");		# 4 = sb1u
+	&pshufb	("xmm0","xmm3");		# 0 = sb1t
+	&pxor	("xmm4","xmm5");		# 4 = sb1u + k
+	&movdqa	("xmm5",&QWP($k_sb2,$const));	# 4 : sb2u
+	&pxor	("xmm0","xmm4");		# 0 = A
+	&movdqa	("xmm1",&QWP(-0x40,$base,$magic));# .Lk_mc_forward[]
+	&pshufb	("xmm5","xmm2");		# 4 = sb2u
+	&movdqa	("xmm2",&QWP($k_sb2+16,$const));# 2 : sb2t
+	&movdqa	("xmm4",&QWP(0,$base,$magic));	# .Lk_mc_backward[]
+	&pshufb	("xmm2","xmm3");		# 2 = sb2t
+	&movdqa	("xmm3","xmm0");		# 3 = A
+	&pxor	("xmm2","xmm5");		# 2 = 2A
+	&pshufb	("xmm0","xmm1");		# 0 = B
+	&add	($key,16);			# next key
+	&pxor	("xmm0","xmm2");		# 0 = 2A+B
+	&pshufb	("xmm3","xmm4");		# 3 = D
+	&add	($magic,16);			# next mc
+	&pxor	("xmm3","xmm0");		# 3 = 2A+B+D
+	&pshufb	("xmm0","xmm1");		# 0 = 2B+C
+	&and	($magic,0x30);			# ... mod 4
+	&sub	($round,1);			# nr--
+	&pxor	("xmm0","xmm3");		# 0 = 2A+3B+C+D
+
+&set_label("enc_entry");
+	# top of round
+	&movdqa	("xmm1","xmm6");		# 1 : i
+	&movdqa	("xmm5",&QWP($k_inv+16,$const));# 2 : a/k
+	&pandn	("xmm1","xmm0");		# 1 = i<<4
+	&psrld	("xmm1",4);			# 1 = i
+	&pand	("xmm0","xmm6");		# 0 = k
+	&pshufb	("xmm5","xmm0");		# 2 = a/k
+	&movdqa	("xmm3","xmm7");		# 3 : 1/i
+	&pxor	("xmm0","xmm1");		# 0 = j
+	&pshufb	("xmm3","xmm1");		# 3 = 1/i
+	&movdqa	("xmm4","xmm7");		# 4 : 1/j
+	&pxor	("xmm3","xmm5");		# 3 = iak = 1/i + a/k
+	&pshufb	("xmm4","xmm0");		# 4 = 1/j
+	&movdqa	("xmm2","xmm7");		# 2 : 1/iak
+	&pxor	("xmm4","xmm5");		# 4 = jak = 1/j + a/k
+	&pshufb	("xmm2","xmm3");		# 2 = 1/iak
+	&movdqa	("xmm3","xmm7");		# 3 : 1/jak
+	&pxor	("xmm2","xmm0");		# 2 = io
+	&pshufb	("xmm3","xmm4");		# 3 = 1/jak
+	&movdqu	("xmm5",&QWP(0,$key));
+	&pxor	("xmm3","xmm1");		# 3 = jo
+	&jnz	(&label("enc_loop"));
+
+	# middle of last round
+	&movdqa	("xmm4",&QWP($k_sbo,$const));	# 3 : sbou      .Lk_sbo
+	&movdqa	("xmm0",&QWP($k_sbo+16,$const));# 3 : sbot      .Lk_sbo+16
+	&pshufb	("xmm4","xmm2");		# 4 = sbou
+	&pxor	("xmm4","xmm5");		# 4 = sb1u + k
+	&pshufb	("xmm0","xmm3");		# 0 = sb1t
+	&movdqa	("xmm1",&QWP(0x40,$base,$magic));# .Lk_sr[]
+	&pxor	("xmm0","xmm4");		# 0 = A
+	&pshufb	("xmm0","xmm1");
+	&ret	();
+&function_end_B("_vpaes_encrypt_core");
+
+##
+##  Decryption core
+##
+##  Same API as encryption core.
+##
+&function_begin_B("_vpaes_decrypt_core");
+	&lea	($base,&DWP($k_dsbd,$const));
+	&mov	($round,&DWP(240,$key));
+	&movdqa	("xmm1","xmm6");
+	&movdqa	("xmm2",&QWP($k_dipt-$k_dsbd,$base));
+	&pandn	("xmm1","xmm0");
+	&mov	($magic,$round);
+	&psrld	("xmm1",4)
+	&movdqu	("xmm5",&QWP(0,$key));
+	&shl	($magic,4);
+	&pand	("xmm0","xmm6");
+	&pshufb	("xmm2","xmm0");
+	&movdqa	("xmm0",&QWP($k_dipt-$k_dsbd+16,$base));
+	&xor	($magic,0x30);
+	&pshufb	("xmm0","xmm1");
+	&and	($magic,0x30);
+	&pxor	("xmm2","xmm5");
+	&movdqa	("xmm5",&QWP($k_mc_forward+48,$const));
+	&pxor	("xmm0","xmm2");
+	&add	($key,16);
+	&lea	($magic,&DWP($k_sr-$k_dsbd,$base,$magic));
+	&jmp	(&label("dec_entry"));
+
+&set_label("dec_loop",16);
+##
+##  Inverse mix columns
+##
+	&movdqa	("xmm4",&QWP(-0x20,$base));	# 4 : sb9u
+	&movdqa	("xmm1",&QWP(-0x10,$base));	# 0 : sb9t
+	&pshufb	("xmm4","xmm2");		# 4 = sb9u
+	&pshufb	("xmm1","xmm3");		# 0 = sb9t
+	&pxor	("xmm0","xmm4");
+	&movdqa	("xmm4",&QWP(0,$base));		# 4 : sbdu
+	&pxor	("xmm0","xmm1");		# 0 = ch
+	&movdqa	("xmm1",&QWP(0x10,$base));	# 0 : sbdt
+
+	&pshufb	("xmm4","xmm2");		# 4 = sbdu
+	&pshufb	("xmm0","xmm5");		# MC ch
+	&pshufb	("xmm1","xmm3");		# 0 = sbdt
+	&pxor	("xmm0","xmm4");		# 4 = ch
+	&movdqa	("xmm4",&QWP(0x20,$base));	# 4 : sbbu
+	&pxor	("xmm0","xmm1");		# 0 = ch
+	&movdqa	("xmm1",&QWP(0x30,$base));	# 0 : sbbt
+
+	&pshufb	("xmm4","xmm2");		# 4 = sbbu
+	&pshufb	("xmm0","xmm5");		# MC ch
+	&pshufb	("xmm1","xmm3");		# 0 = sbbt
+	&pxor	("xmm0","xmm4");		# 4 = ch
+	&movdqa	("xmm4",&QWP(0x40,$base));	# 4 : sbeu
+	&pxor	("xmm0","xmm1");		# 0 = ch
+	&movdqa	("xmm1",&QWP(0x50,$base));	# 0 : sbet
+
+	&pshufb	("xmm4","xmm2");		# 4 = sbeu
+	&pshufb	("xmm0","xmm5");		# MC ch
+	&pshufb	("xmm1","xmm3");		# 0 = sbet
+	&pxor	("xmm0","xmm4");		# 4 = ch
+	&add	($key,16);			# next round key
+	&palignr("xmm5","xmm5",12);
+	&pxor	("xmm0","xmm1");		# 0 = ch
+	&sub	($round,1);			# nr--
+
+&set_label("dec_entry");
+	# top of round
+	&movdqa	("xmm1","xmm6");		# 1 : i
+	&movdqa	("xmm2",&QWP($k_inv+16,$const));# 2 : a/k
+	&pandn	("xmm1","xmm0");		# 1 = i<<4
+	&pand	("xmm0","xmm6");		# 0 = k
+	&psrld	("xmm1",4);			# 1 = i
+	&pshufb	("xmm2","xmm0");		# 2 = a/k
+	&movdqa	("xmm3","xmm7");		# 3 : 1/i
+	&pxor	("xmm0","xmm1");		# 0 = j
+	&pshufb	("xmm3","xmm1");		# 3 = 1/i
+	&movdqa	("xmm4","xmm7");		# 4 : 1/j
+	&pxor	("xmm3","xmm2");		# 3 = iak = 1/i + a/k
+	&pshufb	("xmm4","xmm0");		# 4 = 1/j
+	&pxor	("xmm4","xmm2");		# 4 = jak = 1/j + a/k
+	&movdqa	("xmm2","xmm7");		# 2 : 1/iak
+	&pshufb	("xmm2","xmm3");		# 2 = 1/iak
+	&movdqa	("xmm3","xmm7");		# 3 : 1/jak
+	&pxor	("xmm2","xmm0");		# 2 = io
+	&pshufb	("xmm3","xmm4");		# 3 = 1/jak
+	&movdqu	("xmm0",&QWP(0,$key));
+	&pxor	("xmm3","xmm1");		# 3 = jo
+	&jnz	(&label("dec_loop"));
+
+	# middle of last round
+	&movdqa	("xmm4",&QWP(0x60,$base));	# 3 : sbou
+	&pshufb	("xmm4","xmm2");		# 4 = sbou
+	&pxor	("xmm4","xmm0");		# 4 = sb1u + k
+	&movdqa	("xmm0",&QWP(0x70,$base));	# 0 : sbot
+	&movdqa	("xmm2",&QWP(0,$magic));
+	&pshufb	("xmm0","xmm3");		# 0 = sb1t
+	&pxor	("xmm0","xmm4");		# 0 = A
+	&pshufb	("xmm0","xmm2");
+	&ret	();
+&function_end_B("_vpaes_decrypt_core");
+
+########################################################
+##                                                    ##
+##                  AES key schedule                  ##
+##                                                    ##
+########################################################
+&function_begin_B("_vpaes_schedule_core");
+	&add	($const,&DWP(0,"esp"));
+	&movdqu	("xmm0",&QWP(0,$inp));		# load key (unaligned)
+	&movdqa	("xmm2",&QWP($k_rcon,$const));	# load rcon
+
+	# input transform
+	&movdqa	("xmm3","xmm0");
+	&lea	($base,&DWP($k_ipt,$const));
+	&movdqa	(&QWP(4,"esp"),"xmm2");		# xmm8
+	&call	("_vpaes_schedule_transform");
+	&movdqa	("xmm7","xmm0");
+
+	&test	($out,$out);
+	&jnz	(&label("schedule_am_decrypting"));
+
+	# encrypting, output zeroth round key after transform
+	&movdqu	(&QWP(0,$key),"xmm0");
+	&jmp	(&label("schedule_go"));
+
+&set_label("schedule_am_decrypting");
+	# decrypting, output zeroth round key after shiftrows
+	&movdqa	("xmm1",&QWP($k_sr,$const,$magic));
+	&pshufb	("xmm3","xmm1");
+	&movdqu	(&QWP(0,$key),"xmm3");
+	&xor	($magic,0x30);
+
+&set_label("schedule_go");
+	&cmp	($round,192);
+	&ja	(&label("schedule_256"));
+	&je	(&label("schedule_192"));
+	# 128: fall though
+
+##
+##  .schedule_128
+##
+##  128-bit specific part of key schedule.
+##
+##  This schedule is really simple, because all its parts
+##  are accomplished by the subroutines.
+##
+&set_label("schedule_128");
+	&mov	($round,10);
+
+&set_label("loop_schedule_128");
+	&call	("_vpaes_schedule_round");
+	&dec	($round);
+	&jz	(&label("schedule_mangle_last"));
+	&call	("_vpaes_schedule_mangle");	# write output
+	&jmp	(&label("loop_schedule_128"));
+
+##
+##  .aes_schedule_192
+##
+##  192-bit specific part of key schedule.
+##
+##  The main body of this schedule is the same as the 128-bit
+##  schedule, but with more smearing.  The long, high side is
+##  stored in %xmm7 as before, and the short, low side is in
+##  the high bits of %xmm6.
+##
+##  This schedule is somewhat nastier, however, because each
+##  round produces 192 bits of key material, or 1.5 round keys.
+##  Therefore, on each cycle we do 2 rounds and produce 3 round
+##  keys.
+##
+&set_label("schedule_192",16);
+	&movdqu	("xmm0",&QWP(8,$inp));		# load key part 2 (very unaligned)
+	&call	("_vpaes_schedule_transform");	# input transform
+	&movdqa	("xmm6","xmm0");		# save short part
+	&pxor	("xmm4","xmm4");		# clear 4
+	&movhlps("xmm6","xmm4");		# clobber low side with zeros
+	&mov	($round,4);
+
+&set_label("loop_schedule_192");
+	&call	("_vpaes_schedule_round");
+	&palignr("xmm0","xmm6",8);
+	&call	("_vpaes_schedule_mangle");	# save key n
+	&call	("_vpaes_schedule_192_smear");
+	&call	("_vpaes_schedule_mangle");	# save key n+1
+	&call	("_vpaes_schedule_round");
+	&dec	($round);
+	&jz	(&label("schedule_mangle_last"));
+	&call	("_vpaes_schedule_mangle");	# save key n+2
+	&call	("_vpaes_schedule_192_smear");
+	&jmp	(&label("loop_schedule_192"));
+
+##
+##  .aes_schedule_256
+##
+##  256-bit specific part of key schedule.
+##
+##  The structure here is very similar to the 128-bit
+##  schedule, but with an additional "low side" in
+##  %xmm6.  The low side's rounds are the same as the
+##  high side's, except no rcon and no rotation.
+##
+&set_label("schedule_256",16);
+	&movdqu	("xmm0",&QWP(16,$inp));		# load key part 2 (unaligned)
+	&call	("_vpaes_schedule_transform");	# input transform
+	&mov	($round,7);
+
+&set_label("loop_schedule_256");
+	&call	("_vpaes_schedule_mangle");	# output low result
+	&movdqa	("xmm6","xmm0");		# save cur_lo in xmm6
+
+	# high round
+	&call	("_vpaes_schedule_round");
+	&dec	($round);
+	&jz	(&label("schedule_mangle_last"));
+	&call	("_vpaes_schedule_mangle");
+
+	# low round. swap xmm7 and xmm6
+	&pshufd	("xmm0","xmm0",0xFF);
+	&movdqa	(&QWP(20,"esp"),"xmm7");
+	&movdqa	("xmm7","xmm6");
+	&call	("_vpaes_schedule_low_round");
+	&movdqa	("xmm7",&QWP(20,"esp"));
+
+	&jmp	(&label("loop_schedule_256"));
+
+##
+##  .aes_schedule_mangle_last
+##
+##  Mangler for last round of key schedule
+##  Mangles %xmm0
+##    when encrypting, outputs out(%xmm0) ^ 63
+##    when decrypting, outputs unskew(%xmm0)
+##
+##  Always called right before return... jumps to cleanup and exits
+##
+&set_label("schedule_mangle_last",16);
+	# schedule last round key from xmm0
+	&lea	($base,&DWP($k_deskew,$const));
+	&test	($out,$out);
+	&jnz	(&label("schedule_mangle_last_dec"));
+
+	# encrypting
+	&movdqa	("xmm1",&QWP($k_sr,$const,$magic));
+	&pshufb	("xmm0","xmm1");		# output permute
+	&lea	($base,&DWP($k_opt,$const));	# prepare to output transform
+	&add	($key,32);
+
+&set_label("schedule_mangle_last_dec");
+	&add	($key,-16);
+	&pxor	("xmm0",&QWP($k_s63,$const));
+	&call	("_vpaes_schedule_transform");	# output transform
+	&movdqu	(&QWP(0,$key),"xmm0");		# save last key
+
+	# cleanup
+	&pxor	("xmm0","xmm0");
+	&pxor	("xmm1","xmm1");
+	&pxor	("xmm2","xmm2");
+	&pxor	("xmm3","xmm3");
+	&pxor	("xmm4","xmm4");
+	&pxor	("xmm5","xmm5");
+	&pxor	("xmm6","xmm6");
+	&pxor	("xmm7","xmm7");
+	&ret	();
+&function_end_B("_vpaes_schedule_core");
+
+##
+##  .aes_schedule_192_smear
+##
+##  Smear the short, low side in the 192-bit key schedule.
+##
+##  Inputs:
+##    %xmm7: high side, b  a  x  y
+##    %xmm6:  low side, d  c  0  0
+##    %xmm13: 0
+##
+##  Outputs:
+##    %xmm6: b+c+d  b+c  0  0
+##    %xmm0: b+c+d  b+c  b  a
+##
+&function_begin_B("_vpaes_schedule_192_smear");
+	&pshufd	("xmm1","xmm6",0x80);		# d c 0 0 -> c 0 0 0
+	&pshufd	("xmm0","xmm7",0xFE);		# b a _ _ -> b b b a
+	&pxor	("xmm6","xmm1");		# -> c+d c 0 0
+	&pxor	("xmm1","xmm1");
+	&pxor	("xmm6","xmm0");		# -> b+c+d b+c b a
+	&movdqa	("xmm0","xmm6");
+	&movhlps("xmm6","xmm1");		# clobber low side with zeros
+	&ret	();
+&function_end_B("_vpaes_schedule_192_smear");
+
+##
+##  .aes_schedule_round
+##
+##  Runs one main round of the key schedule on %xmm0, %xmm7
+##
+##  Specifically, runs subbytes on the high dword of %xmm0
+##  then rotates it by one byte and xors into the low dword of
+##  %xmm7.
+##
+##  Adds rcon from low byte of %xmm8, then rotates %xmm8 for
+##  next rcon.
+##
+##  Smears the dwords of %xmm7 by xoring the low into the
+##  second low, result into third, result into highest.
+##
+##  Returns results in %xmm7 = %xmm0.
+##  Clobbers %xmm1-%xmm5.
+##
+&function_begin_B("_vpaes_schedule_round");
+	# extract rcon from xmm8
+	&movdqa	("xmm2",&QWP(8,"esp"));		# xmm8
+	&pxor	("xmm1","xmm1");
+	&palignr("xmm1","xmm2",15);
+	&palignr("xmm2","xmm2",15);
+	&pxor	("xmm7","xmm1");
+
+	# rotate
+	&pshufd	("xmm0","xmm0",0xFF);
+	&palignr("xmm0","xmm0",1);
+
+	# fall through...
+	&movdqa	(&QWP(8,"esp"),"xmm2");		# xmm8
+
+	# low round: same as high round, but no rotation and no rcon.
+&set_label("_vpaes_schedule_low_round");
+	# smear xmm7
+	&movdqa	("xmm1","xmm7");
+	&pslldq	("xmm7",4);
+	&pxor	("xmm7","xmm1");
+	&movdqa	("xmm1","xmm7");
+	&pslldq	("xmm7",8);
+	&pxor	("xmm7","xmm1");
+	&pxor	("xmm7",&QWP($k_s63,$const));
+
+	# subbyte
+	&movdqa	("xmm4",&QWP($k_s0F,$const));
+	&movdqa	("xmm5",&QWP($k_inv,$const));	# 4 : 1/j
+	&movdqa	("xmm1","xmm4");
+	&pandn	("xmm1","xmm0");
+	&psrld	("xmm1",4);			# 1 = i
+	&pand	("xmm0","xmm4");		# 0 = k
+	&movdqa	("xmm2",&QWP($k_inv+16,$const));# 2 : a/k
+	&pshufb	("xmm2","xmm0");		# 2 = a/k
+	&pxor	("xmm0","xmm1");		# 0 = j
+	&movdqa	("xmm3","xmm5");		# 3 : 1/i
+	&pshufb	("xmm3","xmm1");		# 3 = 1/i
+	&pxor	("xmm3","xmm2");		# 3 = iak = 1/i + a/k
+	&movdqa	("xmm4","xmm5");		# 4 : 1/j
+	&pshufb	("xmm4","xmm0");		# 4 = 1/j
+	&pxor	("xmm4","xmm2");		# 4 = jak = 1/j + a/k
+	&movdqa	("xmm2","xmm5");		# 2 : 1/iak
+	&pshufb	("xmm2","xmm3");		# 2 = 1/iak
+	&pxor	("xmm2","xmm0");		# 2 = io
+	&movdqa	("xmm3","xmm5");		# 3 : 1/jak
+	&pshufb	("xmm3","xmm4");		# 3 = 1/jak
+	&pxor	("xmm3","xmm1");		# 3 = jo
+	&movdqa	("xmm4",&QWP($k_sb1,$const));	# 4 : sbou
+	&pshufb	("xmm4","xmm2");		# 4 = sbou
+	&movdqa	("xmm0",&QWP($k_sb1+16,$const));# 0 : sbot
+	&pshufb	("xmm0","xmm3");		# 0 = sb1t
+	&pxor	("xmm0","xmm4");		# 0 = sbox output
+
+	# add in smeared stuff
+	&pxor	("xmm0","xmm7");
+	&movdqa	("xmm7","xmm0");
+	&ret	();
+&function_end_B("_vpaes_schedule_round");
+
+##
+##  .aes_schedule_transform
+##
+##  Linear-transform %xmm0 according to tables at (%ebx)
+##
+##  Output in %xmm0
+##  Clobbers %xmm1, %xmm2
+##
+&function_begin_B("_vpaes_schedule_transform");
+	&movdqa	("xmm2",&QWP($k_s0F,$const));
+	&movdqa	("xmm1","xmm2");
+	&pandn	("xmm1","xmm0");
+	&psrld	("xmm1",4);
+	&pand	("xmm0","xmm2");
+	&movdqa	("xmm2",&QWP(0,$base));
+	&pshufb	("xmm2","xmm0");
+	&movdqa	("xmm0",&QWP(16,$base));
+	&pshufb	("xmm0","xmm1");
+	&pxor	("xmm0","xmm2");
+	&ret	();
+&function_end_B("_vpaes_schedule_transform");
+
+##
+##  .aes_schedule_mangle
+##
+##  Mangle xmm0 from (basis-transformed) standard version
+##  to our version.
+##
+##  On encrypt,
+##    xor with 0x63
+##    multiply by circulant 0,1,1,1
+##    apply shiftrows transform
+##
+##  On decrypt,
+##    xor with 0x63
+##    multiply by "inverse mixcolumns" circulant E,B,D,9
+##    deskew
+##    apply shiftrows transform
+##
+##
+##  Writes out to (%edx), and increments or decrements it
+##  Keeps track of round number mod 4 in %ecx
+##  Preserves xmm0
+##  Clobbers xmm1-xmm5
+##
+&function_begin_B("_vpaes_schedule_mangle");
+	&movdqa	("xmm4","xmm0");	# save xmm0 for later
+	&movdqa	("xmm5",&QWP($k_mc_forward,$const));
+	&test	($out,$out);
+	&jnz	(&label("schedule_mangle_dec"));
+
+	# encrypting
+	&add	($key,16);
+	&pxor	("xmm4",&QWP($k_s63,$const));
+	&pshufb	("xmm4","xmm5");
+	&movdqa	("xmm3","xmm4");
+	&pshufb	("xmm4","xmm5");
+	&pxor	("xmm3","xmm4");
+	&pshufb	("xmm4","xmm5");
+	&pxor	("xmm3","xmm4");
+
+	&jmp	(&label("schedule_mangle_both"));
+
+&set_label("schedule_mangle_dec",16);
+	# inverse mix columns
+	&movdqa	("xmm2",&QWP($k_s0F,$const));
+	&lea	($inp,&DWP($k_dksd,$const));
+	&movdqa	("xmm1","xmm2");
+	&pandn	("xmm1","xmm4");
+	&psrld	("xmm1",4);			# 1 = hi
+	&pand	("xmm4","xmm2");		# 4 = lo
+
+	&movdqa	("xmm2",&QWP(0,$inp));
+	&pshufb	("xmm2","xmm4");
+	&movdqa	("xmm3",&QWP(0x10,$inp));
+	&pshufb	("xmm3","xmm1");
+	&pxor	("xmm3","xmm2");
+	&pshufb	("xmm3","xmm5");
+
+	&movdqa	("xmm2",&QWP(0x20,$inp));
+	&pshufb	("xmm2","xmm4");
+	&pxor	("xmm2","xmm3");
+	&movdqa	("xmm3",&QWP(0x30,$inp));
+	&pshufb	("xmm3","xmm1");
+	&pxor	("xmm3","xmm2");
+	&pshufb	("xmm3","xmm5");
+
+	&movdqa	("xmm2",&QWP(0x40,$inp));
+	&pshufb	("xmm2","xmm4");
+	&pxor	("xmm2","xmm3");
+	&movdqa	("xmm3",&QWP(0x50,$inp));
+	&pshufb	("xmm3","xmm1");
+	&pxor	("xmm3","xmm2");
+	&pshufb	("xmm3","xmm5");
+
+	&movdqa	("xmm2",&QWP(0x60,$inp));
+	&pshufb	("xmm2","xmm4");
+	&pxor	("xmm2","xmm3");
+	&movdqa	("xmm3",&QWP(0x70,$inp));
+	&pshufb	("xmm3","xmm1");
+	&pxor	("xmm3","xmm2");
+
+	&add	($key,-16);
+
+&set_label("schedule_mangle_both");
+	&movdqa	("xmm1",&QWP($k_sr,$const,$magic));
+	&pshufb	("xmm3","xmm1");
+	&add	($magic,-16);
+	&and	($magic,0x30);
+	&movdqu	(&QWP(0,$key),"xmm3");
+	&ret	();
+&function_end_B("_vpaes_schedule_mangle");
+
+#
+# Interface to OpenSSL
+#
+&function_begin("${PREFIX}_set_encrypt_key");
+	&mov	($inp,&wparam(0));		# inp
+	&lea	($base,&DWP(-56,"esp"));
+	&mov	($round,&wparam(1));		# bits
+	&and	($base,-16);
+	&mov	($key,&wparam(2));		# key
+	&xchg	($base,"esp");			# alloca
+	&mov	(&DWP(48,"esp"),$base);
+
+	&mov	($base,$round);
+	&shr	($base,5);
+	&add	($base,5);
+	&mov	(&DWP(240,$key),$base);		# AES_KEY->rounds = nbits/32+5;
+	&mov	($magic,0x30);
+	&mov	($out,0);
+
+	&lea	($const,&DWP(&label("_vpaes_consts")."+0x30-".&label("pic_point")));
+	&call	("_vpaes_schedule_core");
+&set_label("pic_point");
+
+	&mov	("esp",&DWP(48,"esp"));
+	&xor	("eax","eax");
+&function_end("${PREFIX}_set_encrypt_key");
+
+&function_begin("${PREFIX}_set_decrypt_key");
+	&mov	($inp,&wparam(0));		# inp
+	&lea	($base,&DWP(-56,"esp"));
+	&mov	($round,&wparam(1));		# bits
+	&and	($base,-16);
+	&mov	($key,&wparam(2));		# key
+	&xchg	($base,"esp");			# alloca
+	&mov	(&DWP(48,"esp"),$base);
+
+	&mov	($base,$round);
+	&shr	($base,5);
+	&add	($base,5);
+	&mov	(&DWP(240,$key),$base);	# AES_KEY->rounds = nbits/32+5;
+	&shl	($base,4);
+	&lea	($key,&DWP(16,$key,$base));
+
+	&mov	($out,1);
+	&mov	($magic,$round);
+	&shr	($magic,1);
+	&and	($magic,32);
+	&xor	($magic,32);			# nbist==192?0:32;
+
+	&lea	($const,&DWP(&label("_vpaes_consts")."+0x30-".&label("pic_point")));
+	&call	("_vpaes_schedule_core");
+&set_label("pic_point");
+
+	&mov	("esp",&DWP(48,"esp"));
+	&xor	("eax","eax");
+&function_end("${PREFIX}_set_decrypt_key");
+
+&function_begin("${PREFIX}_encrypt");
+	&lea	($const,&DWP(&label("_vpaes_consts")."+0x30-".&label("pic_point")));
+	&call	("_vpaes_preheat");
+&set_label("pic_point");
+	&mov	($inp,&wparam(0));		# inp
+	&lea	($base,&DWP(-56,"esp"));
+	&mov	($out,&wparam(1));		# out
+	&and	($base,-16);
+	&mov	($key,&wparam(2));		# key
+	&xchg	($base,"esp");			# alloca
+	&mov	(&DWP(48,"esp"),$base);
+
+	&movdqu	("xmm0",&QWP(0,$inp));
+	&call	("_vpaes_encrypt_core");
+	&movdqu	(&QWP(0,$out),"xmm0");
+
+	&mov	("esp",&DWP(48,"esp"));
+&function_end("${PREFIX}_encrypt");
+
+&function_begin("${PREFIX}_decrypt");
+	&lea	($const,&DWP(&label("_vpaes_consts")."+0x30-".&label("pic_point")));
+	&call	("_vpaes_preheat");
+&set_label("pic_point");
+	&mov	($inp,&wparam(0));		# inp
+	&lea	($base,&DWP(-56,"esp"));
+	&mov	($out,&wparam(1));		# out
+	&and	($base,-16);
+	&mov	($key,&wparam(2));		# key
+	&xchg	($base,"esp");			# alloca
+	&mov	(&DWP(48,"esp"),$base);
+
+	&movdqu	("xmm0",&QWP(0,$inp));
+	&call	("_vpaes_decrypt_core");
+	&movdqu	(&QWP(0,$out),"xmm0");
+
+	&mov	("esp",&DWP(48,"esp"));
+&function_end("${PREFIX}_decrypt");
+
+&function_begin("${PREFIX}_cbc_encrypt");
+	&mov	($inp,&wparam(0));		# inp
+	&mov	($out,&wparam(1));		# out
+	&mov	($round,&wparam(2));		# len
+	&mov	($key,&wparam(3));		# key
+	&sub	($round,16);
+	&jc	(&label("cbc_abort"));
+	&lea	($base,&DWP(-56,"esp"));
+	&mov	($const,&wparam(4));		# ivp
+	&and	($base,-16);
+	&mov	($magic,&wparam(5));		# enc
+	&xchg	($base,"esp");			# alloca
+	&movdqu	("xmm1",&QWP(0,$const));	# load IV
+	&sub	($out,$inp);
+	&mov	(&DWP(48,"esp"),$base);
+
+	&mov	(&DWP(0,"esp"),$out);		# save out
+	&mov	(&DWP(4,"esp"),$key)		# save key
+	&mov	(&DWP(8,"esp"),$const);		# save ivp
+	&mov	($out,$round);			# $out works as $len
+
+	&lea	($const,&DWP(&label("_vpaes_consts")."+0x30-".&label("pic_point")));
+	&call	("_vpaes_preheat");
+&set_label("pic_point");
+	&cmp	($magic,0);
+	&je	(&label("cbc_dec_loop"));
+	&jmp	(&label("cbc_enc_loop"));
+
+&set_label("cbc_enc_loop",16);
+	&movdqu	("xmm0",&QWP(0,$inp));		# load input
+	&pxor	("xmm0","xmm1");		# inp^=iv
+	&call	("_vpaes_encrypt_core");
+	&mov	($base,&DWP(0,"esp"));		# restore out
+	&mov	($key,&DWP(4,"esp"));		# restore key
+	&movdqa	("xmm1","xmm0");
+	&movdqu	(&QWP(0,$base,$inp),"xmm0");	# write output
+	&lea	($inp,&DWP(16,$inp));
+	&sub	($out,16);
+	&jnc	(&label("cbc_enc_loop"));
+	&jmp	(&label("cbc_done"));
+
+&set_label("cbc_dec_loop",16);
+	&movdqu	("xmm0",&QWP(0,$inp));		# load input
+	&movdqa	(&QWP(16,"esp"),"xmm1");	# save IV
+	&movdqa	(&QWP(32,"esp"),"xmm0");	# save future IV
+	&call	("_vpaes_decrypt_core");
+	&mov	($base,&DWP(0,"esp"));		# restore out
+	&mov	($key,&DWP(4,"esp"));		# restore key
+	&pxor	("xmm0",&QWP(16,"esp"));	# out^=iv
+	&movdqa	("xmm1",&QWP(32,"esp"));	# load next IV
+	&movdqu	(&QWP(0,$base,$inp),"xmm0");	# write output
+	&lea	($inp,&DWP(16,$inp));
+	&sub	($out,16);
+	&jnc	(&label("cbc_dec_loop"));
+
+&set_label("cbc_done");
+	&mov	($base,&DWP(8,"esp"));		# restore ivp
+	&mov	("esp",&DWP(48,"esp"));
+	&movdqu	(&QWP(0,$base),"xmm1");		# write IV
+&set_label("cbc_abort");
+&function_end("${PREFIX}_cbc_encrypt");
+
+&asm_finish();
+
+close STDOUT or die "error closing STDOUT: $!";
diff --git a/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/vpaes-x86_64.pl b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/vpaes-x86_64.pl
new file mode 100644
index 0000000..099a686
--- /dev/null
+++ b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/asm/vpaes-x86_64.pl
@@ -0,0 +1,1241 @@
+#! /usr/bin/env perl
+# Copyright 2011-2020 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License").  You may not use
+# this file except in compliance with the License.  You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+
+######################################################################
+## Constant-time SSSE3 AES core implementation.
+## version 0.1
+##
+## By Mike Hamburg (Stanford University), 2009
+## Public domain.
+##
+## For details see http://shiftleft.org/papers/vector_aes/ and
+## http://crypto.stanford.edu/vpaes/.
+
+######################################################################
+# September 2011.
+#
+# Interface to OpenSSL as "almost" drop-in replacement for
+# aes-x86_64.pl. "Almost" refers to the fact that AES_cbc_encrypt
+# doesn't handle partial vectors (doesn't have to if called from
+# EVP only). "Drop-in" implies that this module doesn't share key
+# schedule structure with the original nor does it make assumption
+# about its alignment...
+#
+# Performance summary. aes-x86_64.pl column lists large-block CBC
+# encrypt/decrypt/with-hyper-threading-off(*) results in cycles per
+# byte processed with 128-bit key, and vpaes-x86_64.pl column -
+# [also large-block CBC] encrypt/decrypt.
+#
+#		aes-x86_64.pl		vpaes-x86_64.pl
+#
+# Core 2(**)	29.6/41.1/14.3		21.9/25.2(***)
+# Nehalem	29.6/40.3/14.6		10.0/11.8
+# Atom		57.3/74.2/32.1		60.9/77.2(***)
+# Silvermont	52.7/64.0/19.5		48.8/60.8(***)
+# Goldmont	38.9/49.0/17.8		10.6/12.6
+#
+# (*)	"Hyper-threading" in the context refers rather to cache shared
+#	among multiple cores, than to specifically Intel HTT. As vast
+#	majority of contemporary cores share cache, slower code path
+#	is common place. In other words "with-hyper-threading-off"
+#	results are presented mostly for reference purposes.
+#
+# (**)	"Core 2" refers to initial 65nm design, a.k.a. Conroe.
+#
+# (***)	Less impressive improvement on Core 2 and Atom is due to slow
+#	pshufb,	yet it's respectable +36%/62% improvement on Core 2
+#	(as implied, over "hyper-threading-safe" code path).
+#
+#						<appro@openssl.org>
+
+$flavour = shift;
+$output  = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
+*STDOUT=*OUT;
+
+$PREFIX="vpaes";
+
+$code.=<<___;
+.text
+
+##
+##  _aes_encrypt_core
+##
+##  AES-encrypt %xmm0.
+##
+##  Inputs:
+##     %xmm0 = input
+##     %xmm9-%xmm15 as in _vpaes_preheat
+##    (%rdx) = scheduled keys
+##
+##  Output in %xmm0
+##  Clobbers  %xmm1-%xmm5, %r9, %r10, %r11, %rax
+##  Preserves %xmm6 - %xmm8 so you get some local vectors
+##
+##
+.type	_vpaes_encrypt_core,\@abi-omnipotent
+.align 16
+_vpaes_encrypt_core:
+.cfi_startproc
+	mov	%rdx,	%r9
+	mov	\$16,	%r11
+	mov	240(%rdx),%eax
+	movdqa	%xmm9,	%xmm1
+	movdqa	.Lk_ipt(%rip), %xmm2	# iptlo
+	pandn	%xmm0,	%xmm1
+	movdqu	(%r9),	%xmm5		# round0 key
+	psrld	\$4,	%xmm1
+	pand	%xmm9,	%xmm0
+	pshufb	%xmm0,	%xmm2
+	movdqa	.Lk_ipt+16(%rip), %xmm0	# ipthi
+	pshufb	%xmm1,	%xmm0
+	pxor	%xmm5,	%xmm2
+	add	\$16,	%r9
+	pxor	%xmm2,	%xmm0
+	lea	.Lk_mc_backward(%rip),%r10
+	jmp	.Lenc_entry
+
+.align 16
+.Lenc_loop:
+	# middle of middle round
+	movdqa  %xmm13,	%xmm4	# 4 : sb1u
+	movdqa  %xmm12,	%xmm0	# 0 : sb1t
+	pshufb  %xmm2,	%xmm4	# 4 = sb1u
+	pshufb  %xmm3,	%xmm0	# 0 = sb1t
+	pxor	%xmm5,	%xmm4	# 4 = sb1u + k
+	movdqa  %xmm15,	%xmm5	# 4 : sb2u
+	pxor	%xmm4,	%xmm0	# 0 = A
+	movdqa	-0x40(%r11,%r10), %xmm1		# .Lk_mc_forward[]
+	pshufb	%xmm2,	%xmm5	# 4 = sb2u
+	movdqa	(%r11,%r10), %xmm4		# .Lk_mc_backward[]
+	movdqa	%xmm14, %xmm2	# 2 : sb2t
+	pshufb	%xmm3,  %xmm2	# 2 = sb2t
+	movdqa	%xmm0,  %xmm3	# 3 = A
+	pxor	%xmm5,	%xmm2	# 2 = 2A
+	pshufb  %xmm1,  %xmm0	# 0 = B
+	add	\$16,	%r9	# next key
+	pxor	%xmm2,  %xmm0	# 0 = 2A+B
+	pshufb	%xmm4,	%xmm3	# 3 = D
+	add	\$16,	%r11	# next mc
+	pxor	%xmm0,	%xmm3	# 3 = 2A+B+D
+	pshufb  %xmm1,	%xmm0	# 0 = 2B+C
+	and	\$0x30,	%r11	# ... mod 4
+	sub	\$1,%rax	# nr--
+	pxor	%xmm3,	%xmm0	# 0 = 2A+3B+C+D
+
+.Lenc_entry:
+	# top of round
+	movdqa  %xmm9, 	%xmm1	# 1 : i
+	movdqa	%xmm11, %xmm5	# 2 : a/k
+	pandn	%xmm0, 	%xmm1	# 1 = i<<4
+	psrld	\$4,   	%xmm1   # 1 = i
+	pand	%xmm9, 	%xmm0   # 0 = k
+	pshufb  %xmm0,  %xmm5	# 2 = a/k
+	movdqa	%xmm10,	%xmm3  	# 3 : 1/i
+	pxor	%xmm1,	%xmm0	# 0 = j
+	pshufb  %xmm1, 	%xmm3  	# 3 = 1/i
+	movdqa	%xmm10,	%xmm4  	# 4 : 1/j
+	pxor	%xmm5, 	%xmm3  	# 3 = iak = 1/i + a/k
+	pshufb	%xmm0, 	%xmm4  	# 4 = 1/j
+	movdqa	%xmm10,	%xmm2  	# 2 : 1/iak
+	pxor	%xmm5, 	%xmm4  	# 4 = jak = 1/j + a/k
+	pshufb  %xmm3,	%xmm2  	# 2 = 1/iak
+	movdqa	%xmm10, %xmm3   # 3 : 1/jak
+	pxor	%xmm0, 	%xmm2  	# 2 = io
+	pshufb  %xmm4,  %xmm3   # 3 = 1/jak
+	movdqu	(%r9),	%xmm5
+	pxor	%xmm1,  %xmm3   # 3 = jo
+	jnz	.Lenc_loop
+
+	# middle of last round
+	movdqa	-0x60(%r10), %xmm4	# 3 : sbou	.Lk_sbo
+	movdqa	-0x50(%r10), %xmm0	# 0 : sbot	.Lk_sbo+16
+	pshufb  %xmm2,  %xmm4	# 4 = sbou
+	pxor	%xmm5,  %xmm4	# 4 = sb1u + k
+	pshufb  %xmm3,	%xmm0	# 0 = sb1t
+	movdqa	0x40(%r11,%r10), %xmm1		# .Lk_sr[]
+	pxor	%xmm4,	%xmm0	# 0 = A
+	pshufb	%xmm1,	%xmm0
+	ret
+.cfi_endproc
+.size	_vpaes_encrypt_core,.-_vpaes_encrypt_core
+
+##
+##  Decryption core
+##
+##  Same API as encryption core.
+##
+.type	_vpaes_decrypt_core,\@abi-omnipotent
+.align	16
+_vpaes_decrypt_core:
+.cfi_startproc
+	mov	%rdx,	%r9		# load key
+	mov	240(%rdx),%eax
+	movdqa	%xmm9,	%xmm1
+	movdqa	.Lk_dipt(%rip), %xmm2	# iptlo
+	pandn	%xmm0,	%xmm1
+	mov	%rax,	%r11
+	psrld	\$4,	%xmm1
+	movdqu	(%r9),	%xmm5		# round0 key
+	shl	\$4,	%r11
+	pand	%xmm9,	%xmm0
+	pshufb	%xmm0,	%xmm2
+	movdqa	.Lk_dipt+16(%rip), %xmm0 # ipthi
+	xor	\$0x30,	%r11
+	lea	.Lk_dsbd(%rip),%r10
+	pshufb	%xmm1,	%xmm0
+	and	\$0x30,	%r11
+	pxor	%xmm5,	%xmm2
+	movdqa	.Lk_mc_forward+48(%rip), %xmm5
+	pxor	%xmm2,	%xmm0
+	add	\$16,	%r9
+	add	%r10,	%r11
+	jmp	.Ldec_entry
+
+.align 16
+.Ldec_loop:
+##
+##  Inverse mix columns
+##
+	movdqa  -0x20(%r10),%xmm4	# 4 : sb9u
+	movdqa  -0x10(%r10),%xmm1	# 0 : sb9t
+	pshufb	%xmm2,	%xmm4		# 4 = sb9u
+	pshufb	%xmm3,	%xmm1		# 0 = sb9t
+	pxor	%xmm4,	%xmm0
+	movdqa  0x00(%r10),%xmm4	# 4 : sbdu
+	pxor	%xmm1,	%xmm0		# 0 = ch
+	movdqa  0x10(%r10),%xmm1	# 0 : sbdt
+
+	pshufb	%xmm2,	%xmm4		# 4 = sbdu
+	pshufb	%xmm5,	%xmm0		# MC ch
+	pshufb	%xmm3,	%xmm1		# 0 = sbdt
+	pxor	%xmm4,	%xmm0		# 4 = ch
+	movdqa  0x20(%r10),%xmm4	# 4 : sbbu
+	pxor	%xmm1,	%xmm0		# 0 = ch
+	movdqa  0x30(%r10),%xmm1	# 0 : sbbt
+
+	pshufb	%xmm2,	%xmm4		# 4 = sbbu
+	pshufb	%xmm5,	%xmm0		# MC ch
+	pshufb	%xmm3,	%xmm1		# 0 = sbbt
+	pxor	%xmm4,	%xmm0		# 4 = ch
+	movdqa  0x40(%r10),%xmm4	# 4 : sbeu
+	pxor	%xmm1,	%xmm0		# 0 = ch
+	movdqa  0x50(%r10),%xmm1	# 0 : sbet
+
+	pshufb	%xmm2,	%xmm4		# 4 = sbeu
+	pshufb	%xmm5,	%xmm0		# MC ch
+	pshufb	%xmm3,	%xmm1		# 0 = sbet
+	pxor	%xmm4,	%xmm0		# 4 = ch
+	add	\$16, %r9		# next round key
+	palignr	\$12,	%xmm5,	%xmm5
+	pxor	%xmm1,	%xmm0		# 0 = ch
+	sub	\$1,%rax		# nr--
+
+.Ldec_entry:
+	# top of round
+	movdqa  %xmm9, 	%xmm1	# 1 : i
+	pandn	%xmm0, 	%xmm1	# 1 = i<<4
+	movdqa	%xmm11, %xmm2	# 2 : a/k
+	psrld	\$4,    %xmm1	# 1 = i
+	pand	%xmm9, 	%xmm0	# 0 = k
+	pshufb  %xmm0,  %xmm2	# 2 = a/k
+	movdqa	%xmm10,	%xmm3	# 3 : 1/i
+	pxor	%xmm1,	%xmm0	# 0 = j
+	pshufb  %xmm1, 	%xmm3	# 3 = 1/i
+	movdqa	%xmm10,	%xmm4	# 4 : 1/j
+	pxor	%xmm2, 	%xmm3	# 3 = iak = 1/i + a/k
+	pshufb	%xmm0, 	%xmm4	# 4 = 1/j
+	pxor	%xmm2, 	%xmm4	# 4 = jak = 1/j + a/k
+	movdqa	%xmm10,	%xmm2	# 2 : 1/iak
+	pshufb  %xmm3,	%xmm2	# 2 = 1/iak
+	movdqa	%xmm10, %xmm3	# 3 : 1/jak
+	pxor	%xmm0, 	%xmm2	# 2 = io
+	pshufb  %xmm4,  %xmm3	# 3 = 1/jak
+	movdqu	(%r9),	%xmm0
+	pxor	%xmm1,  %xmm3	# 3 = jo
+	jnz	.Ldec_loop
+
+	# middle of last round
+	movdqa	0x60(%r10), %xmm4	# 3 : sbou
+	pshufb  %xmm2,  %xmm4	# 4 = sbou
+	pxor	%xmm0,  %xmm4	# 4 = sb1u + k
+	movdqa	0x70(%r10), %xmm0	# 0 : sbot
+	movdqa	-0x160(%r11), %xmm2	# .Lk_sr-.Lk_dsbd=-0x160
+	pshufb  %xmm3,	%xmm0	# 0 = sb1t
+	pxor	%xmm4,	%xmm0	# 0 = A
+	pshufb	%xmm2,	%xmm0
+	ret
+.cfi_endproc
+.size	_vpaes_decrypt_core,.-_vpaes_decrypt_core
+
+########################################################
+##                                                    ##
+##                  AES key schedule                  ##
+##                                                    ##
+########################################################
+.type	_vpaes_schedule_core,\@abi-omnipotent
+.align	16
+_vpaes_schedule_core:
+.cfi_startproc
+	# rdi = key
+	# rsi = size in bits
+	# rdx = buffer
+	# rcx = direction.  0=encrypt, 1=decrypt
+
+	call	_vpaes_preheat		# load the tables
+	movdqa	.Lk_rcon(%rip), %xmm8	# load rcon
+	movdqu	(%rdi),	%xmm0		# load key (unaligned)
+
+	# input transform
+	movdqa	%xmm0,	%xmm3
+	lea	.Lk_ipt(%rip), %r11
+	call	_vpaes_schedule_transform
+	movdqa	%xmm0,	%xmm7
+
+	lea	.Lk_sr(%rip),%r10
+	test	%rcx,	%rcx
+	jnz	.Lschedule_am_decrypting
+
+	# encrypting, output zeroth round key after transform
+	movdqu	%xmm0,	(%rdx)
+	jmp	.Lschedule_go
+
+.Lschedule_am_decrypting:
+	# decrypting, output zeroth round key after shiftrows
+	movdqa	(%r8,%r10),%xmm1
+	pshufb  %xmm1,	%xmm3
+	movdqu	%xmm3,	(%rdx)
+	xor	\$0x30, %r8
+
+.Lschedule_go:
+	cmp	\$192,	%esi
+	ja	.Lschedule_256
+	je	.Lschedule_192
+	# 128: fall though
+
+##
+##  .schedule_128
+##
+##  128-bit specific part of key schedule.
+##
+##  This schedule is really simple, because all its parts
+##  are accomplished by the subroutines.
+##
+.Lschedule_128:
+	mov	\$10, %esi
+
+.Loop_schedule_128:
+	call 	_vpaes_schedule_round
+	dec	%rsi
+	jz 	.Lschedule_mangle_last
+	call	_vpaes_schedule_mangle	# write output
+	jmp 	.Loop_schedule_128
+
+##
+##  .aes_schedule_192
+##
+##  192-bit specific part of key schedule.
+##
+##  The main body of this schedule is the same as the 128-bit
+##  schedule, but with more smearing.  The long, high side is
+##  stored in %xmm7 as before, and the short, low side is in
+##  the high bits of %xmm6.
+##
+##  This schedule is somewhat nastier, however, because each
+##  round produces 192 bits of key material, or 1.5 round keys.
+##  Therefore, on each cycle we do 2 rounds and produce 3 round
+##  keys.
+##
+.align	16
+.Lschedule_192:
+	movdqu	8(%rdi),%xmm0		# load key part 2 (very unaligned)
+	call	_vpaes_schedule_transform	# input transform
+	movdqa	%xmm0,	%xmm6		# save short part
+	pxor	%xmm4,	%xmm4		# clear 4
+	movhlps	%xmm4,	%xmm6		# clobber low side with zeros
+	mov	\$4,	%esi
+
+.Loop_schedule_192:
+	call	_vpaes_schedule_round
+	palignr	\$8,%xmm6,%xmm0
+	call	_vpaes_schedule_mangle	# save key n
+	call	_vpaes_schedule_192_smear
+	call	_vpaes_schedule_mangle	# save key n+1
+	call	_vpaes_schedule_round
+	dec	%rsi
+	jz 	.Lschedule_mangle_last
+	call	_vpaes_schedule_mangle	# save key n+2
+	call	_vpaes_schedule_192_smear
+	jmp	.Loop_schedule_192
+
+##
+##  .aes_schedule_256
+##
+##  256-bit specific part of key schedule.
+##
+##  The structure here is very similar to the 128-bit
+##  schedule, but with an additional "low side" in
+##  %xmm6.  The low side's rounds are the same as the
+##  high side's, except no rcon and no rotation.
+##
+.align	16
+.Lschedule_256:
+	movdqu	16(%rdi),%xmm0		# load key part 2 (unaligned)
+	call	_vpaes_schedule_transform	# input transform
+	mov	\$7, %esi
+
+.Loop_schedule_256:
+	call	_vpaes_schedule_mangle	# output low result
+	movdqa	%xmm0,	%xmm6		# save cur_lo in xmm6
+
+	# high round
+	call	_vpaes_schedule_round
+	dec	%rsi
+	jz 	.Lschedule_mangle_last
+	call	_vpaes_schedule_mangle
+
+	# low round. swap xmm7 and xmm6
+	pshufd	\$0xFF,	%xmm0,	%xmm0
+	movdqa	%xmm7,	%xmm5
+	movdqa	%xmm6,	%xmm7
+	call	_vpaes_schedule_low_round
+	movdqa	%xmm5,	%xmm7
+
+	jmp	.Loop_schedule_256
+
+
+##
+##  .aes_schedule_mangle_last
+##
+##  Mangler for last round of key schedule
+##  Mangles %xmm0
+##    when encrypting, outputs out(%xmm0) ^ 63
+##    when decrypting, outputs unskew(%xmm0)
+##
+##  Always called right before return... jumps to cleanup and exits
+##
+.align	16
+.Lschedule_mangle_last:
+	# schedule last round key from xmm0
+	lea	.Lk_deskew(%rip),%r11	# prepare to deskew
+	test	%rcx, 	%rcx
+	jnz	.Lschedule_mangle_last_dec
+
+	# encrypting
+	movdqa	(%r8,%r10),%xmm1
+	pshufb	%xmm1,	%xmm0		# output permute
+	lea	.Lk_opt(%rip),	%r11	# prepare to output transform
+	add	\$32,	%rdx
+
+.Lschedule_mangle_last_dec:
+	add	\$-16,	%rdx
+	pxor	.Lk_s63(%rip),	%xmm0
+	call	_vpaes_schedule_transform # output transform
+	movdqu	%xmm0,	(%rdx)		# save last key
+
+	# cleanup
+	pxor	%xmm0,  %xmm0
+	pxor	%xmm1,  %xmm1
+	pxor	%xmm2,  %xmm2
+	pxor	%xmm3,  %xmm3
+	pxor	%xmm4,  %xmm4
+	pxor	%xmm5,  %xmm5
+	pxor	%xmm6,  %xmm6
+	pxor	%xmm7,  %xmm7
+	ret
+.cfi_endproc
+.size	_vpaes_schedule_core,.-_vpaes_schedule_core
+
+##
+##  .aes_schedule_192_smear
+##
+##  Smear the short, low side in the 192-bit key schedule.
+##
+##  Inputs:
+##    %xmm7: high side, b  a  x  y
+##    %xmm6:  low side, d  c  0  0
+##    %xmm13: 0
+##
+##  Outputs:
+##    %xmm6: b+c+d  b+c  0  0
+##    %xmm0: b+c+d  b+c  b  a
+##
+.type	_vpaes_schedule_192_smear,\@abi-omnipotent
+.align	16
+_vpaes_schedule_192_smear:
+.cfi_startproc
+	pshufd	\$0x80,	%xmm6,	%xmm1	# d c 0 0 -> c 0 0 0
+	pshufd	\$0xFE,	%xmm7,	%xmm0	# b a _ _ -> b b b a
+	pxor	%xmm1,	%xmm6		# -> c+d c 0 0
+	pxor	%xmm1,	%xmm1
+	pxor	%xmm0,	%xmm6		# -> b+c+d b+c b a
+	movdqa	%xmm6,	%xmm0
+	movhlps	%xmm1,	%xmm6		# clobber low side with zeros
+	ret
+.cfi_endproc
+.size	_vpaes_schedule_192_smear,.-_vpaes_schedule_192_smear
+
+##
+##  .aes_schedule_round
+##
+##  Runs one main round of the key schedule on %xmm0, %xmm7
+##
+##  Specifically, runs subbytes on the high dword of %xmm0
+##  then rotates it by one byte and xors into the low dword of
+##  %xmm7.
+##
+##  Adds rcon from low byte of %xmm8, then rotates %xmm8 for
+##  next rcon.
+##
+##  Smears the dwords of %xmm7 by xoring the low into the
+##  second low, result into third, result into highest.
+##
+##  Returns results in %xmm7 = %xmm0.
+##  Clobbers %xmm1-%xmm4, %r11.
+##
+.type	_vpaes_schedule_round,\@abi-omnipotent
+.align	16
+_vpaes_schedule_round:
+.cfi_startproc
+	# extract rcon from xmm8
+	pxor	%xmm1,	%xmm1
+	palignr	\$15,	%xmm8,	%xmm1
+	palignr	\$15,	%xmm8,	%xmm8
+	pxor	%xmm1,	%xmm7
+
+	# rotate
+	pshufd	\$0xFF,	%xmm0,	%xmm0
+	palignr	\$1,	%xmm0,	%xmm0
+
+	# fall through...
+
+	# low round: same as high round, but no rotation and no rcon.
+_vpaes_schedule_low_round:
+	# smear xmm7
+	movdqa	%xmm7,	%xmm1
+	pslldq	\$4,	%xmm7
+	pxor	%xmm1,	%xmm7
+	movdqa	%xmm7,	%xmm1
+	pslldq	\$8,	%xmm7
+	pxor	%xmm1,	%xmm7
+	pxor	.Lk_s63(%rip), %xmm7
+
+	# subbytes
+	movdqa  %xmm9, 	%xmm1
+	pandn	%xmm0, 	%xmm1
+	psrld	\$4,    %xmm1		# 1 = i
+	pand	%xmm9, 	%xmm0		# 0 = k
+	movdqa	%xmm11, %xmm2		# 2 : a/k
+	pshufb  %xmm0,  %xmm2		# 2 = a/k
+	pxor	%xmm1,	%xmm0		# 0 = j
+	movdqa	%xmm10,	%xmm3		# 3 : 1/i
+	pshufb  %xmm1, 	%xmm3		# 3 = 1/i
+	pxor	%xmm2, 	%xmm3		# 3 = iak = 1/i + a/k
+	movdqa	%xmm10,	%xmm4		# 4 : 1/j
+	pshufb	%xmm0, 	%xmm4		# 4 = 1/j
+	pxor	%xmm2, 	%xmm4		# 4 = jak = 1/j + a/k
+	movdqa	%xmm10,	%xmm2		# 2 : 1/iak
+	pshufb  %xmm3,	%xmm2		# 2 = 1/iak
+	pxor	%xmm0, 	%xmm2		# 2 = io
+	movdqa	%xmm10, %xmm3		# 3 : 1/jak
+	pshufb  %xmm4,  %xmm3		# 3 = 1/jak
+	pxor	%xmm1,  %xmm3		# 3 = jo
+	movdqa	%xmm13, %xmm4		# 4 : sbou
+	pshufb  %xmm2,  %xmm4		# 4 = sbou
+	movdqa	%xmm12, %xmm0		# 0 : sbot
+	pshufb  %xmm3,	%xmm0		# 0 = sb1t
+	pxor	%xmm4, 	%xmm0		# 0 = sbox output
+
+	# add in smeared stuff
+	pxor	%xmm7,	%xmm0
+	movdqa	%xmm0,	%xmm7
+	ret
+.cfi_endproc
+.size	_vpaes_schedule_round,.-_vpaes_schedule_round
+
+##
+##  .aes_schedule_transform
+##
+##  Linear-transform %xmm0 according to tables at (%r11)
+##
+##  Requires that %xmm9 = 0x0F0F... as in preheat
+##  Output in %xmm0
+##  Clobbers %xmm1, %xmm2
+##
+.type	_vpaes_schedule_transform,\@abi-omnipotent
+.align	16
+_vpaes_schedule_transform:
+.cfi_startproc
+	movdqa	%xmm9,	%xmm1
+	pandn	%xmm0,	%xmm1
+	psrld	\$4,	%xmm1
+	pand	%xmm9,	%xmm0
+	movdqa	(%r11), %xmm2 	# lo
+	pshufb	%xmm0,	%xmm2
+	movdqa	16(%r11), %xmm0 # hi
+	pshufb	%xmm1,	%xmm0
+	pxor	%xmm2,	%xmm0
+	ret
+.cfi_endproc
+.size	_vpaes_schedule_transform,.-_vpaes_schedule_transform
+
+##
+##  .aes_schedule_mangle
+##
+##  Mangle xmm0 from (basis-transformed) standard version
+##  to our version.
+##
+##  On encrypt,
+##    xor with 0x63
+##    multiply by circulant 0,1,1,1
+##    apply shiftrows transform
+##
+##  On decrypt,
+##    xor with 0x63
+##    multiply by "inverse mixcolumns" circulant E,B,D,9
+##    deskew
+##    apply shiftrows transform
+##
+##
+##  Writes out to (%rdx), and increments or decrements it
+##  Keeps track of round number mod 4 in %r8
+##  Preserves xmm0
+##  Clobbers xmm1-xmm5
+##
+.type	_vpaes_schedule_mangle,\@abi-omnipotent
+.align	16
+_vpaes_schedule_mangle:
+.cfi_startproc
+	movdqa	%xmm0,	%xmm4	# save xmm0 for later
+	movdqa	.Lk_mc_forward(%rip),%xmm5
+	test	%rcx, 	%rcx
+	jnz	.Lschedule_mangle_dec
+
+	# encrypting
+	add	\$16,	%rdx
+	pxor	.Lk_s63(%rip),%xmm4
+	pshufb	%xmm5,	%xmm4
+	movdqa	%xmm4,	%xmm3
+	pshufb	%xmm5,	%xmm4
+	pxor	%xmm4,	%xmm3
+	pshufb	%xmm5,	%xmm4
+	pxor	%xmm4,	%xmm3
+
+	jmp	.Lschedule_mangle_both
+.align	16
+.Lschedule_mangle_dec:
+	# inverse mix columns
+	lea	.Lk_dksd(%rip),%r11
+	movdqa	%xmm9,	%xmm1
+	pandn	%xmm4,	%xmm1
+	psrld	\$4,	%xmm1	# 1 = hi
+	pand	%xmm9,	%xmm4	# 4 = lo
+
+	movdqa	0x00(%r11), %xmm2
+	pshufb	%xmm4,	%xmm2
+	movdqa	0x10(%r11), %xmm3
+	pshufb	%xmm1,	%xmm3
+	pxor	%xmm2,	%xmm3
+	pshufb	%xmm5,	%xmm3
+
+	movdqa	0x20(%r11), %xmm2
+	pshufb	%xmm4,	%xmm2
+	pxor	%xmm3,	%xmm2
+	movdqa	0x30(%r11), %xmm3
+	pshufb	%xmm1,	%xmm3
+	pxor	%xmm2,	%xmm3
+	pshufb	%xmm5,	%xmm3
+
+	movdqa	0x40(%r11), %xmm2
+	pshufb	%xmm4,	%xmm2
+	pxor	%xmm3,	%xmm2
+	movdqa	0x50(%r11), %xmm3
+	pshufb	%xmm1,	%xmm3
+	pxor	%xmm2,	%xmm3
+	pshufb	%xmm5,	%xmm3
+
+	movdqa	0x60(%r11), %xmm2
+	pshufb	%xmm4,	%xmm2
+	pxor	%xmm3,	%xmm2
+	movdqa	0x70(%r11), %xmm3
+	pshufb	%xmm1,	%xmm3
+	pxor	%xmm2,	%xmm3
+
+	add	\$-16,	%rdx
+
+.Lschedule_mangle_both:
+	movdqa	(%r8,%r10),%xmm1
+	pshufb	%xmm1,%xmm3
+	add	\$-16,	%r8
+	and	\$0x30,	%r8
+	movdqu	%xmm3,	(%rdx)
+	ret
+.cfi_endproc
+.size	_vpaes_schedule_mangle,.-_vpaes_schedule_mangle
+
+#
+# Interface to OpenSSL
+#
+.globl	${PREFIX}_set_encrypt_key
+.type	${PREFIX}_set_encrypt_key,\@function,3
+.align	16
+${PREFIX}_set_encrypt_key:
+.cfi_startproc
+___
+$code.=<<___ if ($win64);
+	lea	-0xb8(%rsp),%rsp
+	movaps	%xmm6,0x10(%rsp)
+	movaps	%xmm7,0x20(%rsp)
+	movaps	%xmm8,0x30(%rsp)
+	movaps	%xmm9,0x40(%rsp)
+	movaps	%xmm10,0x50(%rsp)
+	movaps	%xmm11,0x60(%rsp)
+	movaps	%xmm12,0x70(%rsp)
+	movaps	%xmm13,0x80(%rsp)
+	movaps	%xmm14,0x90(%rsp)
+	movaps	%xmm15,0xa0(%rsp)
+.Lenc_key_body:
+___
+$code.=<<___;
+	mov	%esi,%eax
+	shr	\$5,%eax
+	add	\$5,%eax
+	mov	%eax,240(%rdx)	# AES_KEY->rounds = nbits/32+5;
+
+	mov	\$0,%ecx
+	mov	\$0x30,%r8d
+	call	_vpaes_schedule_core
+___
+$code.=<<___ if ($win64);
+	movaps	0x10(%rsp),%xmm6
+	movaps	0x20(%rsp),%xmm7
+	movaps	0x30(%rsp),%xmm8
+	movaps	0x40(%rsp),%xmm9
+	movaps	0x50(%rsp),%xmm10
+	movaps	0x60(%rsp),%xmm11
+	movaps	0x70(%rsp),%xmm12
+	movaps	0x80(%rsp),%xmm13
+	movaps	0x90(%rsp),%xmm14
+	movaps	0xa0(%rsp),%xmm15
+	lea	0xb8(%rsp),%rsp
+.Lenc_key_epilogue:
+___
+$code.=<<___;
+	xor	%eax,%eax
+	ret
+.cfi_endproc
+.size	${PREFIX}_set_encrypt_key,.-${PREFIX}_set_encrypt_key
+
+.globl	${PREFIX}_set_decrypt_key
+.type	${PREFIX}_set_decrypt_key,\@function,3
+.align	16
+${PREFIX}_set_decrypt_key:
+.cfi_startproc
+___
+$code.=<<___ if ($win64);
+	lea	-0xb8(%rsp),%rsp
+	movaps	%xmm6,0x10(%rsp)
+	movaps	%xmm7,0x20(%rsp)
+	movaps	%xmm8,0x30(%rsp)
+	movaps	%xmm9,0x40(%rsp)
+	movaps	%xmm10,0x50(%rsp)
+	movaps	%xmm11,0x60(%rsp)
+	movaps	%xmm12,0x70(%rsp)
+	movaps	%xmm13,0x80(%rsp)
+	movaps	%xmm14,0x90(%rsp)
+	movaps	%xmm15,0xa0(%rsp)
+.Ldec_key_body:
+___
+$code.=<<___;
+	mov	%esi,%eax
+	shr	\$5,%eax
+	add	\$5,%eax
+	mov	%eax,240(%rdx)	# AES_KEY->rounds = nbits/32+5;
+	shl	\$4,%eax
+	lea	16(%rdx,%rax),%rdx
+
+	mov	\$1,%ecx
+	mov	%esi,%r8d
+	shr	\$1,%r8d
+	and	\$32,%r8d
+	xor	\$32,%r8d	# nbits==192?0:32
+	call	_vpaes_schedule_core
+___
+$code.=<<___ if ($win64);
+	movaps	0x10(%rsp),%xmm6
+	movaps	0x20(%rsp),%xmm7
+	movaps	0x30(%rsp),%xmm8
+	movaps	0x40(%rsp),%xmm9
+	movaps	0x50(%rsp),%xmm10
+	movaps	0x60(%rsp),%xmm11
+	movaps	0x70(%rsp),%xmm12
+	movaps	0x80(%rsp),%xmm13
+	movaps	0x90(%rsp),%xmm14
+	movaps	0xa0(%rsp),%xmm15
+	lea	0xb8(%rsp),%rsp
+.Ldec_key_epilogue:
+___
+$code.=<<___;
+	xor	%eax,%eax
+	ret
+.cfi_endproc
+.size	${PREFIX}_set_decrypt_key,.-${PREFIX}_set_decrypt_key
+
+.globl	${PREFIX}_encrypt
+.type	${PREFIX}_encrypt,\@function,3
+.align	16
+${PREFIX}_encrypt:
+.cfi_startproc
+___
+$code.=<<___ if ($win64);
+	lea	-0xb8(%rsp),%rsp
+	movaps	%xmm6,0x10(%rsp)
+	movaps	%xmm7,0x20(%rsp)
+	movaps	%xmm8,0x30(%rsp)
+	movaps	%xmm9,0x40(%rsp)
+	movaps	%xmm10,0x50(%rsp)
+	movaps	%xmm11,0x60(%rsp)
+	movaps	%xmm12,0x70(%rsp)
+	movaps	%xmm13,0x80(%rsp)
+	movaps	%xmm14,0x90(%rsp)
+	movaps	%xmm15,0xa0(%rsp)
+.Lenc_body:
+___
+$code.=<<___;
+	movdqu	(%rdi),%xmm0
+	call	_vpaes_preheat
+	call	_vpaes_encrypt_core
+	movdqu	%xmm0,(%rsi)
+___
+$code.=<<___ if ($win64);
+	movaps	0x10(%rsp),%xmm6
+	movaps	0x20(%rsp),%xmm7
+	movaps	0x30(%rsp),%xmm8
+	movaps	0x40(%rsp),%xmm9
+	movaps	0x50(%rsp),%xmm10
+	movaps	0x60(%rsp),%xmm11
+	movaps	0x70(%rsp),%xmm12
+	movaps	0x80(%rsp),%xmm13
+	movaps	0x90(%rsp),%xmm14
+	movaps	0xa0(%rsp),%xmm15
+	lea	0xb8(%rsp),%rsp
+.Lenc_epilogue:
+___
+$code.=<<___;
+	ret
+.cfi_endproc
+.size	${PREFIX}_encrypt,.-${PREFIX}_encrypt
+
+.globl	${PREFIX}_decrypt
+.type	${PREFIX}_decrypt,\@function,3
+.align	16
+${PREFIX}_decrypt:
+.cfi_startproc
+___
+$code.=<<___ if ($win64);
+	lea	-0xb8(%rsp),%rsp
+	movaps	%xmm6,0x10(%rsp)
+	movaps	%xmm7,0x20(%rsp)
+	movaps	%xmm8,0x30(%rsp)
+	movaps	%xmm9,0x40(%rsp)
+	movaps	%xmm10,0x50(%rsp)
+	movaps	%xmm11,0x60(%rsp)
+	movaps	%xmm12,0x70(%rsp)
+	movaps	%xmm13,0x80(%rsp)
+	movaps	%xmm14,0x90(%rsp)
+	movaps	%xmm15,0xa0(%rsp)
+.Ldec_body:
+___
+$code.=<<___;
+	movdqu	(%rdi),%xmm0
+	call	_vpaes_preheat
+	call	_vpaes_decrypt_core
+	movdqu	%xmm0,(%rsi)
+___
+$code.=<<___ if ($win64);
+	movaps	0x10(%rsp),%xmm6
+	movaps	0x20(%rsp),%xmm7
+	movaps	0x30(%rsp),%xmm8
+	movaps	0x40(%rsp),%xmm9
+	movaps	0x50(%rsp),%xmm10
+	movaps	0x60(%rsp),%xmm11
+	movaps	0x70(%rsp),%xmm12
+	movaps	0x80(%rsp),%xmm13
+	movaps	0x90(%rsp),%xmm14
+	movaps	0xa0(%rsp),%xmm15
+	lea	0xb8(%rsp),%rsp
+.Ldec_epilogue:
+___
+$code.=<<___;
+	ret
+.cfi_endproc
+.size	${PREFIX}_decrypt,.-${PREFIX}_decrypt
+___
+{
+my ($inp,$out,$len,$key,$ivp,$enc)=("%rdi","%rsi","%rdx","%rcx","%r8","%r9");
+# void AES_cbc_encrypt (const void char *inp, unsigned char *out,
+#                       size_t length, const AES_KEY *key,
+#                       unsigned char *ivp,const int enc);
+$code.=<<___;
+.globl	${PREFIX}_cbc_encrypt
+.type	${PREFIX}_cbc_encrypt,\@function,6
+.align	16
+${PREFIX}_cbc_encrypt:
+.cfi_startproc
+	xchg	$key,$len
+___
+($len,$key)=($key,$len);
+$code.=<<___;
+	sub	\$16,$len
+	jc	.Lcbc_abort
+___
+$code.=<<___ if ($win64);
+	lea	-0xb8(%rsp),%rsp
+	movaps	%xmm6,0x10(%rsp)
+	movaps	%xmm7,0x20(%rsp)
+	movaps	%xmm8,0x30(%rsp)
+	movaps	%xmm9,0x40(%rsp)
+	movaps	%xmm10,0x50(%rsp)
+	movaps	%xmm11,0x60(%rsp)
+	movaps	%xmm12,0x70(%rsp)
+	movaps	%xmm13,0x80(%rsp)
+	movaps	%xmm14,0x90(%rsp)
+	movaps	%xmm15,0xa0(%rsp)
+.Lcbc_body:
+___
+$code.=<<___;
+	movdqu	($ivp),%xmm6		# load IV
+	sub	$inp,$out
+	call	_vpaes_preheat
+	cmp	\$0,${enc}d
+	je	.Lcbc_dec_loop
+	jmp	.Lcbc_enc_loop
+.align	16
+.Lcbc_enc_loop:
+	movdqu	($inp),%xmm0
+	pxor	%xmm6,%xmm0
+	call	_vpaes_encrypt_core
+	movdqa	%xmm0,%xmm6
+	movdqu	%xmm0,($out,$inp)
+	lea	16($inp),$inp
+	sub	\$16,$len
+	jnc	.Lcbc_enc_loop
+	jmp	.Lcbc_done
+.align	16
+.Lcbc_dec_loop:
+	movdqu	($inp),%xmm0
+	movdqa	%xmm0,%xmm7
+	call	_vpaes_decrypt_core
+	pxor	%xmm6,%xmm0
+	movdqa	%xmm7,%xmm6
+	movdqu	%xmm0,($out,$inp)
+	lea	16($inp),$inp
+	sub	\$16,$len
+	jnc	.Lcbc_dec_loop
+.Lcbc_done:
+	movdqu	%xmm6,($ivp)		# save IV
+___
+$code.=<<___ if ($win64);
+	movaps	0x10(%rsp),%xmm6
+	movaps	0x20(%rsp),%xmm7
+	movaps	0x30(%rsp),%xmm8
+	movaps	0x40(%rsp),%xmm9
+	movaps	0x50(%rsp),%xmm10
+	movaps	0x60(%rsp),%xmm11
+	movaps	0x70(%rsp),%xmm12
+	movaps	0x80(%rsp),%xmm13
+	movaps	0x90(%rsp),%xmm14
+	movaps	0xa0(%rsp),%xmm15
+	lea	0xb8(%rsp),%rsp
+.Lcbc_epilogue:
+___
+$code.=<<___;
+.Lcbc_abort:
+	ret
+.cfi_endproc
+.size	${PREFIX}_cbc_encrypt,.-${PREFIX}_cbc_encrypt
+___
+}
+$code.=<<___;
+##
+##  _aes_preheat
+##
+##  Fills register %r10 -> .aes_consts (so you can -fPIC)
+##  and %xmm9-%xmm15 as specified below.
+##
+.type	_vpaes_preheat,\@abi-omnipotent
+.align	16
+_vpaes_preheat:
+.cfi_startproc
+	lea	.Lk_s0F(%rip), %r10
+	movdqa	-0x20(%r10), %xmm10	# .Lk_inv
+	movdqa	-0x10(%r10), %xmm11	# .Lk_inv+16
+	movdqa	0x00(%r10), %xmm9	# .Lk_s0F
+	movdqa	0x30(%r10), %xmm13	# .Lk_sb1
+	movdqa	0x40(%r10), %xmm12	# .Lk_sb1+16
+	movdqa	0x50(%r10), %xmm15	# .Lk_sb2
+	movdqa	0x60(%r10), %xmm14	# .Lk_sb2+16
+	ret
+.cfi_endproc
+.size	_vpaes_preheat,.-_vpaes_preheat
+########################################################
+##                                                    ##
+##                     Constants                      ##
+##                                                    ##
+########################################################
+.type	_vpaes_consts,\@object
+.align	64
+_vpaes_consts:
+.Lk_inv:	# inv, inva
+	.quad	0x0E05060F0D080180, 0x040703090A0B0C02
+	.quad	0x01040A060F0B0780, 0x030D0E0C02050809
+
+.Lk_s0F:	# s0F
+	.quad	0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F
+
+.Lk_ipt:	# input transform (lo, hi)
+	.quad	0xC2B2E8985A2A7000, 0xCABAE09052227808
+	.quad	0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81
+
+.Lk_sb1:	# sb1u, sb1t
+	.quad	0xB19BE18FCB503E00, 0xA5DF7A6E142AF544
+	.quad	0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF
+.Lk_sb2:	# sb2u, sb2t
+	.quad	0xE27A93C60B712400, 0x5EB7E955BC982FCD
+	.quad	0x69EB88400AE12900, 0xC2A163C8AB82234A
+.Lk_sbo:	# sbou, sbot
+	.quad	0xD0D26D176FBDC700, 0x15AABF7AC502A878
+	.quad	0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA
+
+.Lk_mc_forward:	# mc_forward
+	.quad	0x0407060500030201, 0x0C0F0E0D080B0A09
+	.quad	0x080B0A0904070605, 0x000302010C0F0E0D
+	.quad	0x0C0F0E0D080B0A09, 0x0407060500030201
+	.quad	0x000302010C0F0E0D, 0x080B0A0904070605
+
+.Lk_mc_backward:# mc_backward
+	.quad	0x0605040702010003, 0x0E0D0C0F0A09080B
+	.quad	0x020100030E0D0C0F, 0x0A09080B06050407
+	.quad	0x0E0D0C0F0A09080B, 0x0605040702010003
+	.quad	0x0A09080B06050407, 0x020100030E0D0C0F
+
+.Lk_sr:		# sr
+	.quad	0x0706050403020100, 0x0F0E0D0C0B0A0908
+	.quad	0x030E09040F0A0500, 0x0B06010C07020D08
+	.quad	0x0F060D040B020900, 0x070E050C030A0108
+	.quad	0x0B0E0104070A0D00, 0x0306090C0F020508
+
+.Lk_rcon:	# rcon
+	.quad	0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81
+
+.Lk_s63:	# s63: all equal to 0x63 transformed
+	.quad	0x5B5B5B5B5B5B5B5B, 0x5B5B5B5B5B5B5B5B
+
+.Lk_opt:	# output transform
+	.quad	0xFF9F4929D6B66000, 0xF7974121DEBE6808
+	.quad	0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0
+
+.Lk_deskew:	# deskew tables: inverts the sbox's "skew"
+	.quad	0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A
+	.quad	0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77
+
+##
+##  Decryption stuff
+##  Key schedule constants
+##
+.Lk_dksd:	# decryption key schedule: invskew x*D
+	.quad	0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9
+	.quad	0x41C277F4B5368300, 0x5FDC69EAAB289D1E
+.Lk_dksb:	# decryption key schedule: invskew x*B
+	.quad	0x9A4FCA1F8550D500, 0x03D653861CC94C99
+	.quad	0x115BEDA7B6FC4A00, 0xD993256F7E3482C8
+.Lk_dkse:	# decryption key schedule: invskew x*E + 0x63
+	.quad	0xD5031CCA1FC9D600, 0x53859A4C994F5086
+	.quad	0xA23196054FDC7BE8, 0xCD5EF96A20B31487
+.Lk_dks9:	# decryption key schedule: invskew x*9
+	.quad	0xB6116FC87ED9A700, 0x4AED933482255BFC
+	.quad	0x4576516227143300, 0x8BB89FACE9DAFDCE
+
+##
+##  Decryption stuff
+##  Round function constants
+##
+.Lk_dipt:	# decryption input transform
+	.quad	0x0F505B040B545F00, 0x154A411E114E451A
+	.quad	0x86E383E660056500, 0x12771772F491F194
+
+.Lk_dsb9:	# decryption sbox output *9*u, *9*t
+	.quad	0x851C03539A86D600, 0xCAD51F504F994CC9
+	.quad	0xC03B1789ECD74900, 0x725E2C9EB2FBA565
+.Lk_dsbd:	# decryption sbox output *D*u, *D*t
+	.quad	0x7D57CCDFE6B1A200, 0xF56E9B13882A4439
+	.quad	0x3CE2FAF724C6CB00, 0x2931180D15DEEFD3
+.Lk_dsbb:	# decryption sbox output *B*u, *B*t
+	.quad	0xD022649296B44200, 0x602646F6B0F2D404
+	.quad	0xC19498A6CD596700, 0xF3FF0C3E3255AA6B
+.Lk_dsbe:	# decryption sbox output *E*u, *E*t
+	.quad	0x46F2929626D4D000, 0x2242600464B4F6B0
+	.quad	0x0C55A6CDFFAAC100, 0x9467F36B98593E32
+.Lk_dsbo:	# decryption sbox final output
+	.quad	0x1387EA537EF94000, 0xC7AA6DB9D4943E2D
+	.quad	0x12D7560F93441D00, 0xCA4B8159D8C58E9C
+.asciz	"Vector Permutation AES for x86_64/SSSE3, Mike Hamburg (Stanford University)"
+.align	64
+.size	_vpaes_consts,.-_vpaes_consts
+___
+
+if ($win64) {
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+#		CONTEXT *context,DISPATCHER_CONTEXT *disp)
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern	__imp_RtlVirtualUnwind
+.type	se_handler,\@abi-omnipotent
+.align	16
+se_handler:
+	push	%rsi
+	push	%rdi
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	pushfq
+	sub	\$64,%rsp
+
+	mov	120($context),%rax	# pull context->Rax
+	mov	248($context),%rbx	# pull context->Rip
+
+	mov	8($disp),%rsi		# disp->ImageBase
+	mov	56($disp),%r11		# disp->HandlerData
+
+	mov	0(%r11),%r10d		# HandlerData[0]
+	lea	(%rsi,%r10),%r10	# prologue label
+	cmp	%r10,%rbx		# context->Rip<prologue label
+	jb	.Lin_prologue
+
+	mov	152($context),%rax	# pull context->Rsp
+
+	mov	4(%r11),%r10d		# HandlerData[1]
+	lea	(%rsi,%r10),%r10	# epilogue label
+	cmp	%r10,%rbx		# context->Rip>=epilogue label
+	jae	.Lin_prologue
+
+	lea	16(%rax),%rsi		# %xmm save area
+	lea	512($context),%rdi	# &context.Xmm6
+	mov	\$20,%ecx		# 10*sizeof(%xmm0)/sizeof(%rax)
+	.long	0xa548f3fc		# cld; rep movsq
+	lea	0xb8(%rax),%rax		# adjust stack pointer
+
+.Lin_prologue:
+	mov	8(%rax),%rdi
+	mov	16(%rax),%rsi
+	mov	%rax,152($context)	# restore context->Rsp
+	mov	%rsi,168($context)	# restore context->Rsi
+	mov	%rdi,176($context)	# restore context->Rdi
+
+	mov	40($disp),%rdi		# disp->ContextRecord
+	mov	$context,%rsi		# context
+	mov	\$`1232/8`,%ecx		# sizeof(CONTEXT)
+	.long	0xa548f3fc		# cld; rep movsq
+
+	mov	$disp,%rsi
+	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
+	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
+	mov	0(%rsi),%r8		# arg3, disp->ControlPc
+	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
+	mov	40(%rsi),%r10		# disp->ContextRecord
+	lea	56(%rsi),%r11		# &disp->HandlerData
+	lea	24(%rsi),%r12		# &disp->EstablisherFrame
+	mov	%r10,32(%rsp)		# arg5
+	mov	%r11,40(%rsp)		# arg6
+	mov	%r12,48(%rsp)		# arg7
+	mov	%rcx,56(%rsp)		# arg8, (NULL)
+	call	*__imp_RtlVirtualUnwind(%rip)
+
+	mov	\$1,%eax		# ExceptionContinueSearch
+	add	\$64,%rsp
+	popfq
+	pop	%r15
+	pop	%r14
+	pop	%r13
+	pop	%r12
+	pop	%rbp
+	pop	%rbx
+	pop	%rdi
+	pop	%rsi
+	ret
+.size	se_handler,.-se_handler
+
+.section	.pdata
+.align	4
+	.rva	.LSEH_begin_${PREFIX}_set_encrypt_key
+	.rva	.LSEH_end_${PREFIX}_set_encrypt_key
+	.rva	.LSEH_info_${PREFIX}_set_encrypt_key
+
+	.rva	.LSEH_begin_${PREFIX}_set_decrypt_key
+	.rva	.LSEH_end_${PREFIX}_set_decrypt_key
+	.rva	.LSEH_info_${PREFIX}_set_decrypt_key
+
+	.rva	.LSEH_begin_${PREFIX}_encrypt
+	.rva	.LSEH_end_${PREFIX}_encrypt
+	.rva	.LSEH_info_${PREFIX}_encrypt
+
+	.rva	.LSEH_begin_${PREFIX}_decrypt
+	.rva	.LSEH_end_${PREFIX}_decrypt
+	.rva	.LSEH_info_${PREFIX}_decrypt
+
+	.rva	.LSEH_begin_${PREFIX}_cbc_encrypt
+	.rva	.LSEH_end_${PREFIX}_cbc_encrypt
+	.rva	.LSEH_info_${PREFIX}_cbc_encrypt
+
+.section	.xdata
+.align	8
+.LSEH_info_${PREFIX}_set_encrypt_key:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Lenc_key_body,.Lenc_key_epilogue	# HandlerData[]
+.LSEH_info_${PREFIX}_set_decrypt_key:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Ldec_key_body,.Ldec_key_epilogue	# HandlerData[]
+.LSEH_info_${PREFIX}_encrypt:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Lenc_body,.Lenc_epilogue		# HandlerData[]
+.LSEH_info_${PREFIX}_decrypt:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Ldec_body,.Ldec_epilogue		# HandlerData[]
+.LSEH_info_${PREFIX}_cbc_encrypt:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Lcbc_body,.Lcbc_epilogue		# HandlerData[]
+___
+}
+
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+
+print $code;
+
+close STDOUT or die "error closing STDOUT: $!";
diff --git a/ap/lib/libssl/openssl-1.1.1o/crypto/aes/build.info b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/build.info
new file mode 100644
index 0000000..0f04863
--- /dev/null
+++ b/ap/lib/libssl/openssl-1.1.1o/crypto/aes/build.info
@@ -0,0 +1,64 @@
+LIBS=../../libcrypto
+SOURCE[../../libcrypto]=\
+        aes_misc.c aes_ecb.c aes_cfb.c aes_ofb.c \
+        aes_ige.c aes_wrap.c {- $target{aes_asm_src} -}
+
+GENERATE[aes-ia64.s]=asm/aes-ia64.S
+
+GENERATE[aes-586.s]=asm/aes-586.pl \
+        $(PERLASM_SCHEME) $(LIB_CFLAGS) $(LIB_CPPFLAGS) $(PROCESSOR)
+DEPEND[aes-586.s]=../perlasm/x86asm.pl
+GENERATE[vpaes-x86.s]=asm/vpaes-x86.pl \
+        $(PERLASM_SCHEME) $(LIB_CFLAGS) $(LIB_CPPFLAGS) $(PROCESSOR)
+DEPEND[vpaes-586.s]=../perlasm/x86asm.pl
+GENERATE[aesni-x86.s]=asm/aesni-x86.pl \
+        $(PERLASM_SCHEME) $(LIB_CFLAGS) $(LIB_CPPFLAGS) $(PROCESSOR)
+DEPEND[aesni-586.s]=../perlasm/x86asm.pl
+
+GENERATE[aes-x86_64.s]=asm/aes-x86_64.pl $(PERLASM_SCHEME)
+GENERATE[vpaes-x86_64.s]=asm/vpaes-x86_64.pl $(PERLASM_SCHEME)
+GENERATE[bsaes-x86_64.s]=asm/bsaes-x86_64.pl $(PERLASM_SCHEME)
+GENERATE[aesni-x86_64.s]=asm/aesni-x86_64.pl $(PERLASM_SCHEME)
+GENERATE[aesni-sha1-x86_64.s]=asm/aesni-sha1-x86_64.pl $(PERLASM_SCHEME)
+GENERATE[aesni-sha256-x86_64.s]=asm/aesni-sha256-x86_64.pl $(PERLASM_SCHEME)
+GENERATE[aesni-mb-x86_64.s]=asm/aesni-mb-x86_64.pl $(PERLASM_SCHEME)
+
+GENERATE[aes-sparcv9.S]=asm/aes-sparcv9.pl $(PERLASM_SCHEME)
+INCLUDE[aes-sparcv9.o]=..
+GENERATE[aest4-sparcv9.S]=asm/aest4-sparcv9.pl $(PERLASM_SCHEME)
+INCLUDE[aest4-sparcv9.o]=..
+DEPEND[aest4-sparcv9.S]=../perlasm/sparcv9_modes.pl
+GENERATE[aesfx-sparcv9.S]=asm/aesfx-sparcv9.pl $(PERLASM_SCHEME)
+INCLUDE[aesfx-sparcv9.o]=..
+
+GENERATE[aes-ppc.s]=asm/aes-ppc.pl $(PERLASM_SCHEME)
+GENERATE[vpaes-ppc.s]=asm/vpaes-ppc.pl $(PERLASM_SCHEME)
+GENERATE[aesp8-ppc.s]=asm/aesp8-ppc.pl $(PERLASM_SCHEME)
+
+GENERATE[aes-parisc.s]=asm/aes-parisc.pl $(PERLASM_SCHEME)
+
+GENERATE[aes-mips.S]=asm/aes-mips.pl $(PERLASM_SCHEME)
+INCLUDE[aes-mips.o]=..
+
+GENERATE[aesv8-armx.S]=asm/aesv8-armx.pl $(PERLASM_SCHEME)
+INCLUDE[aesv8-armx.o]=..
+GENERATE[vpaes-armv8.S]=asm/vpaes-armv8.pl $(PERLASM_SCHEME)
+
+GENERATE[aes-armv4.S]=asm/aes-armv4.pl $(PERLASM_SCHEME)
+INCLUDE[aes-armv4.o]=..
+GENERATE[bsaes-armv7.S]=asm/bsaes-armv7.pl $(PERLASM_SCHEME)
+INCLUDE[bsaes-armv7.o]=..
+
+GENERATE[aes-s390x.S]=asm/aes-s390x.pl $(PERLASM_SCHEME)
+INCLUDE[aes-s390x.o]=..
+
+BEGINRAW[Makefile]
+##### AES assembler implementations
+
+# GNU make "catch all"
+{- $builddir -}/aes-%.S:	{- $sourcedir -}/asm/aes-%.pl
+	CC="$(CC)" $(PERL) $< $(PERLASM_SCHEME) $@
+{- $builddir -}/bsaes-%.S:	{- $sourcedir -}/asm/bsaes-%.pl
+	CC="$(CC)" $(PERL) $< $(PERLASM_SCHEME) $@
+
+ENDRAW[Makefile]