[T106][ZXW-22]7520V3SCV2.01.01.02P42U09_VEC_V0.8_AP_VEC origin source commit

Change-Id: Ic6e05d89ecd62fc34f82b23dcf306c93764aec4b
diff --git a/ap/lib/libssl/openssl-1.1.1o/crypto/chacha/asm/chacha-armv4.pl b/ap/lib/libssl/openssl-1.1.1o/crypto/chacha/asm/chacha-armv4.pl
new file mode 100755
index 0000000..81c616a
--- /dev/null
+++ b/ap/lib/libssl/openssl-1.1.1o/crypto/chacha/asm/chacha-armv4.pl
@@ -0,0 +1,1160 @@
+#! /usr/bin/env perl
+# Copyright 2016-2020 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License").  You may not use
+# this file except in compliance with the License.  You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# December 2014
+#
+# ChaCha20 for ARMv4.
+#
+# Performance in cycles per byte out of large buffer.
+#
+#			IALU/gcc-4.4    1xNEON      3xNEON+1xIALU
+#
+# Cortex-A5		19.3(*)/+95%    21.8        14.1
+# Cortex-A8		10.5(*)/+160%   13.9        6.35
+# Cortex-A9		12.9(**)/+110%  14.3        6.50
+# Cortex-A15		11.0/+40%       16.0        5.00
+# Snapdragon S4		11.5/+125%      13.6        4.90
+#
+# (*)	most "favourable" result for aligned data on little-endian
+#	processor, result for misaligned data is 10-15% lower;
+# (**)	this result is a trade-off: it can be improved by 20%,
+#	but then Snapdragon S4 and Cortex-A8 results get
+#	20-25% worse;
+
+$flavour = shift;
+if ($flavour=~/\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; }
+else { while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {} }
+
+if ($flavour && $flavour ne "void") {
+    $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+    ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
+    ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
+    die "can't locate arm-xlate.pl";
+
+    open STDOUT,"| \"$^X\" $xlate $flavour $output";
+} else {
+    open STDOUT,">$output";
+}
+
+sub AUTOLOAD()		# thunk [simplified] x86-style perlasm
+{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./;
+  my $arg = pop;
+    $arg = "#$arg" if ($arg*1 eq $arg);
+    $code .= "\t$opcode\t".join(',',@_,$arg)."\n";
+}
+
+my @x=map("r$_",(0..7,"x","x","x","x",12,"x",14,"x"));
+my @t=map("r$_",(8..11));
+
+sub ROUND {
+my ($a0,$b0,$c0,$d0)=@_;
+my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
+my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
+my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
+my $odd = $d0&1;
+my ($xc,$xc_) = (@t[0..1]);
+my ($xd,$xd_) = $odd ? (@t[2],@x[$d1]) : (@x[$d0],@t[2]);
+my @ret;
+
+	# Consider order in which variables are addressed by their
+	# index:
+	#
+	#       a   b   c   d
+	#
+	#       0   4   8  12 < even round
+	#       1   5   9  13
+	#       2   6  10  14
+	#       3   7  11  15
+	#       0   5  10  15 < odd round
+	#       1   6  11  12
+	#       2   7   8  13
+	#       3   4   9  14
+	#
+	# 'a', 'b' are permanently allocated in registers, @x[0..7],
+	# while 'c's and pair of 'd's are maintained in memory. If
+	# you observe 'c' column, you'll notice that pair of 'c's is
+	# invariant between rounds. This means that we have to reload
+	# them once per round, in the middle. This is why you'll see
+	# bunch of 'c' stores and loads in the middle, but none in
+	# the beginning or end. If you observe 'd' column, you'll
+	# notice that 15 and 13 are reused in next pair of rounds.
+	# This is why these two are chosen for offloading to memory,
+	# to make loads count more.
+							push @ret,(
+	"&add	(@x[$a0],@x[$a0],@x[$b0])",
+	"&mov	($xd,$xd,'ror#16')",
+	 "&add	(@x[$a1],@x[$a1],@x[$b1])",
+	 "&mov	($xd_,$xd_,'ror#16')",
+	"&eor	($xd,$xd,@x[$a0],'ror#16')",
+	 "&eor	($xd_,$xd_,@x[$a1],'ror#16')",
+
+	"&add	($xc,$xc,$xd)",
+	"&mov	(@x[$b0],@x[$b0],'ror#20')",
+	 "&add	($xc_,$xc_,$xd_)",
+	 "&mov	(@x[$b1],@x[$b1],'ror#20')",
+	"&eor	(@x[$b0],@x[$b0],$xc,'ror#20')",
+	 "&eor	(@x[$b1],@x[$b1],$xc_,'ror#20')",
+
+	"&add	(@x[$a0],@x[$a0],@x[$b0])",
+	"&mov	($xd,$xd,'ror#24')",
+	 "&add	(@x[$a1],@x[$a1],@x[$b1])",
+	 "&mov	($xd_,$xd_,'ror#24')",
+	"&eor	($xd,$xd,@x[$a0],'ror#24')",
+	 "&eor	($xd_,$xd_,@x[$a1],'ror#24')",
+
+	"&add	($xc,$xc,$xd)",
+	"&mov	(@x[$b0],@x[$b0],'ror#25')"		);
+							push @ret,(
+	"&str	($xd,'[sp,#4*(16+$d0)]')",
+	"&ldr	($xd,'[sp,#4*(16+$d2)]')"		) if ($odd);
+							push @ret,(
+	 "&add	($xc_,$xc_,$xd_)",
+	 "&mov	(@x[$b1],@x[$b1],'ror#25')"		);
+							push @ret,(
+	 "&str	($xd_,'[sp,#4*(16+$d1)]')",
+	 "&ldr	($xd_,'[sp,#4*(16+$d3)]')"		) if (!$odd);
+							push @ret,(
+	"&eor	(@x[$b0],@x[$b0],$xc,'ror#25')",
+	 "&eor	(@x[$b1],@x[$b1],$xc_,'ror#25')"	);
+
+	$xd=@x[$d2]					if (!$odd);
+	$xd_=@x[$d3]					if ($odd);
+							push @ret,(
+	"&str	($xc,'[sp,#4*(16+$c0)]')",
+	"&ldr	($xc,'[sp,#4*(16+$c2)]')",
+	"&add	(@x[$a2],@x[$a2],@x[$b2])",
+	"&mov	($xd,$xd,'ror#16')",
+	 "&str	($xc_,'[sp,#4*(16+$c1)]')",
+	 "&ldr	($xc_,'[sp,#4*(16+$c3)]')",
+	 "&add	(@x[$a3],@x[$a3],@x[$b3])",
+	 "&mov	($xd_,$xd_,'ror#16')",
+	"&eor	($xd,$xd,@x[$a2],'ror#16')",
+	 "&eor	($xd_,$xd_,@x[$a3],'ror#16')",
+
+	"&add	($xc,$xc,$xd)",
+	"&mov	(@x[$b2],@x[$b2],'ror#20')",
+	 "&add	($xc_,$xc_,$xd_)",
+	 "&mov	(@x[$b3],@x[$b3],'ror#20')",
+	"&eor	(@x[$b2],@x[$b2],$xc,'ror#20')",
+	 "&eor	(@x[$b3],@x[$b3],$xc_,'ror#20')",
+
+	"&add	(@x[$a2],@x[$a2],@x[$b2])",
+	"&mov	($xd,$xd,'ror#24')",
+	 "&add	(@x[$a3],@x[$a3],@x[$b3])",
+	 "&mov	($xd_,$xd_,'ror#24')",
+	"&eor	($xd,$xd,@x[$a2],'ror#24')",
+	 "&eor	($xd_,$xd_,@x[$a3],'ror#24')",
+
+	"&add	($xc,$xc,$xd)",
+	"&mov	(@x[$b2],@x[$b2],'ror#25')",
+	 "&add	($xc_,$xc_,$xd_)",
+	 "&mov	(@x[$b3],@x[$b3],'ror#25')",
+	"&eor	(@x[$b2],@x[$b2],$xc,'ror#25')",
+	 "&eor	(@x[$b3],@x[$b3],$xc_,'ror#25')"	);
+
+	@ret;
+}
+
+$code.=<<___;
+#include "arm_arch.h"
+
+.text
+#if defined(__thumb2__) || defined(__clang__)
+.syntax	unified
+#endif
+#if defined(__thumb2__)
+.thumb
+#else
+.code	32
+#endif
+
+#if defined(__thumb2__) || defined(__clang__)
+#define ldrhsb	ldrbhs
+#endif
+
+.align	5
+.Lsigma:
+.long	0x61707865,0x3320646e,0x79622d32,0x6b206574	@ endian-neutral
+.Lone:
+.long	1,0,0,0
+#if __ARM_MAX_ARCH__>=7
+.LOPENSSL_armcap:
+.word   OPENSSL_armcap_P-.LChaCha20_ctr32
+#else
+.word	-1
+#endif
+
+.globl	ChaCha20_ctr32
+.type	ChaCha20_ctr32,%function
+.align	5
+ChaCha20_ctr32:
+.LChaCha20_ctr32:
+	ldr	r12,[sp,#0]		@ pull pointer to counter and nonce
+	stmdb	sp!,{r0-r2,r4-r11,lr}
+#if __ARM_ARCH__<7 && !defined(__thumb2__)
+	sub	r14,pc,#16		@ ChaCha20_ctr32
+#else
+	adr	r14,.LChaCha20_ctr32
+#endif
+	cmp	r2,#0			@ len==0?
+#ifdef	__thumb2__
+	itt	eq
+#endif
+	addeq	sp,sp,#4*3
+	beq	.Lno_data
+#if __ARM_MAX_ARCH__>=7
+	cmp	r2,#192			@ test len
+	bls	.Lshort
+	ldr	r4,[r14,#-32]
+	ldr	r4,[r14,r4]
+# ifdef	__APPLE__
+	ldr	r4,[r4]
+# endif
+	tst	r4,#ARMV7_NEON
+	bne	.LChaCha20_neon
+.Lshort:
+#endif
+	ldmia	r12,{r4-r7}		@ load counter and nonce
+	sub	sp,sp,#4*(16)		@ off-load area
+	sub	r14,r14,#64		@ .Lsigma
+	stmdb	sp!,{r4-r7}		@ copy counter and nonce
+	ldmia	r3,{r4-r11}		@ load key
+	ldmia	r14,{r0-r3}		@ load sigma
+	stmdb	sp!,{r4-r11}		@ copy key
+	stmdb	sp!,{r0-r3}		@ copy sigma
+	str	r10,[sp,#4*(16+10)]	@ off-load "@x[10]"
+	str	r11,[sp,#4*(16+11)]	@ off-load "@x[11]"
+	b	.Loop_outer_enter
+
+.align	4
+.Loop_outer:
+	ldmia	sp,{r0-r9}		@ load key material
+	str	@t[3],[sp,#4*(32+2)]	@ save len
+	str	r12,  [sp,#4*(32+1)]	@ save inp
+	str	r14,  [sp,#4*(32+0)]	@ save out
+.Loop_outer_enter:
+	ldr	@t[3], [sp,#4*(15)]
+	ldr	@x[12],[sp,#4*(12)]	@ modulo-scheduled load
+	ldr	@t[2], [sp,#4*(13)]
+	ldr	@x[14],[sp,#4*(14)]
+	str	@t[3], [sp,#4*(16+15)]
+	mov	@t[3],#10
+	b	.Loop
+
+.align	4
+.Loop:
+	subs	@t[3],@t[3],#1
+___
+	foreach (&ROUND(0, 4, 8,12)) { eval; }
+	foreach (&ROUND(0, 5,10,15)) { eval; }
+$code.=<<___;
+	bne	.Loop
+
+	ldr	@t[3],[sp,#4*(32+2)]	@ load len
+
+	str	@t[0], [sp,#4*(16+8)]	@ modulo-scheduled store
+	str	@t[1], [sp,#4*(16+9)]
+	str	@x[12],[sp,#4*(16+12)]
+	str	@t[2], [sp,#4*(16+13)]
+	str	@x[14],[sp,#4*(16+14)]
+
+	@ at this point we have first half of 512-bit result in
+	@ @x[0-7] and second half at sp+4*(16+8)
+
+	cmp	@t[3],#64		@ done yet?
+#ifdef	__thumb2__
+	itete	lo
+#endif
+	addlo	r12,sp,#4*(0)		@ shortcut or ...
+	ldrhs	r12,[sp,#4*(32+1)]	@ ... load inp
+	addlo	r14,sp,#4*(0)		@ shortcut or ...
+	ldrhs	r14,[sp,#4*(32+0)]	@ ... load out
+
+	ldr	@t[0],[sp,#4*(0)]	@ load key material
+	ldr	@t[1],[sp,#4*(1)]
+
+#if __ARM_ARCH__>=6 || !defined(__ARMEB__)
+# if __ARM_ARCH__<7
+	orr	@t[2],r12,r14
+	tst	@t[2],#3		@ are input and output aligned?
+	ldr	@t[2],[sp,#4*(2)]
+	bne	.Lunaligned
+	cmp	@t[3],#64		@ restore flags
+# else
+	ldr	@t[2],[sp,#4*(2)]
+# endif
+	ldr	@t[3],[sp,#4*(3)]
+
+	add	@x[0],@x[0],@t[0]	@ accumulate key material
+	add	@x[1],@x[1],@t[1]
+# ifdef	__thumb2__
+	itt	hs
+# endif
+	ldrhs	@t[0],[r12],#16		@ load input
+	ldrhs	@t[1],[r12,#-12]
+
+	add	@x[2],@x[2],@t[2]
+	add	@x[3],@x[3],@t[3]
+# ifdef	__thumb2__
+	itt	hs
+# endif
+	ldrhs	@t[2],[r12,#-8]
+	ldrhs	@t[3],[r12,#-4]
+# if __ARM_ARCH__>=6 && defined(__ARMEB__)
+	rev	@x[0],@x[0]
+	rev	@x[1],@x[1]
+	rev	@x[2],@x[2]
+	rev	@x[3],@x[3]
+# endif
+# ifdef	__thumb2__
+	itt	hs
+# endif
+	eorhs	@x[0],@x[0],@t[0]	@ xor with input
+	eorhs	@x[1],@x[1],@t[1]
+	 add	@t[0],sp,#4*(4)
+	str	@x[0],[r14],#16		@ store output
+# ifdef	__thumb2__
+	itt	hs
+# endif
+	eorhs	@x[2],@x[2],@t[2]
+	eorhs	@x[3],@x[3],@t[3]
+	 ldmia	@t[0],{@t[0]-@t[3]}	@ load key material
+	str	@x[1],[r14,#-12]
+	str	@x[2],[r14,#-8]
+	str	@x[3],[r14,#-4]
+
+	add	@x[4],@x[4],@t[0]	@ accumulate key material
+	add	@x[5],@x[5],@t[1]
+# ifdef	__thumb2__
+	itt	hs
+# endif
+	ldrhs	@t[0],[r12],#16		@ load input
+	ldrhs	@t[1],[r12,#-12]
+	add	@x[6],@x[6],@t[2]
+	add	@x[7],@x[7],@t[3]
+# ifdef	__thumb2__
+	itt	hs
+# endif
+	ldrhs	@t[2],[r12,#-8]
+	ldrhs	@t[3],[r12,#-4]
+# if __ARM_ARCH__>=6 && defined(__ARMEB__)
+	rev	@x[4],@x[4]
+	rev	@x[5],@x[5]
+	rev	@x[6],@x[6]
+	rev	@x[7],@x[7]
+# endif
+# ifdef	__thumb2__
+	itt	hs
+# endif
+	eorhs	@x[4],@x[4],@t[0]
+	eorhs	@x[5],@x[5],@t[1]
+	 add	@t[0],sp,#4*(8)
+	str	@x[4],[r14],#16		@ store output
+# ifdef	__thumb2__
+	itt	hs
+# endif
+	eorhs	@x[6],@x[6],@t[2]
+	eorhs	@x[7],@x[7],@t[3]
+	str	@x[5],[r14,#-12]
+	 ldmia	@t[0],{@t[0]-@t[3]}	@ load key material
+	str	@x[6],[r14,#-8]
+	 add	@x[0],sp,#4*(16+8)
+	str	@x[7],[r14,#-4]
+
+	ldmia	@x[0],{@x[0]-@x[7]}	@ load second half
+
+	add	@x[0],@x[0],@t[0]	@ accumulate key material
+	add	@x[1],@x[1],@t[1]
+# ifdef	__thumb2__
+	itt	hs
+# endif
+	ldrhs	@t[0],[r12],#16		@ load input
+	ldrhs	@t[1],[r12,#-12]
+# ifdef	__thumb2__
+	itt	hi
+# endif
+	 strhi	@t[2],[sp,#4*(16+10)]	@ copy "@x[10]" while at it
+	 strhi	@t[3],[sp,#4*(16+11)]	@ copy "@x[11]" while at it
+	add	@x[2],@x[2],@t[2]
+	add	@x[3],@x[3],@t[3]
+# ifdef	__thumb2__
+	itt	hs
+# endif
+	ldrhs	@t[2],[r12,#-8]
+	ldrhs	@t[3],[r12,#-4]
+# if __ARM_ARCH__>=6 && defined(__ARMEB__)
+	rev	@x[0],@x[0]
+	rev	@x[1],@x[1]
+	rev	@x[2],@x[2]
+	rev	@x[3],@x[3]
+# endif
+# ifdef	__thumb2__
+	itt	hs
+# endif
+	eorhs	@x[0],@x[0],@t[0]
+	eorhs	@x[1],@x[1],@t[1]
+	 add	@t[0],sp,#4*(12)
+	str	@x[0],[r14],#16		@ store output
+# ifdef	__thumb2__
+	itt	hs
+# endif
+	eorhs	@x[2],@x[2],@t[2]
+	eorhs	@x[3],@x[3],@t[3]
+	str	@x[1],[r14,#-12]
+	 ldmia	@t[0],{@t[0]-@t[3]}	@ load key material
+	str	@x[2],[r14,#-8]
+	str	@x[3],[r14,#-4]
+
+	add	@x[4],@x[4],@t[0]	@ accumulate key material
+	add	@x[5],@x[5],@t[1]
+# ifdef	__thumb2__
+	itt	hi
+# endif
+	 addhi	@t[0],@t[0],#1		@ next counter value
+	 strhi	@t[0],[sp,#4*(12)]	@ save next counter value
+# ifdef	__thumb2__
+	itt	hs
+# endif
+	ldrhs	@t[0],[r12],#16		@ load input
+	ldrhs	@t[1],[r12,#-12]
+	add	@x[6],@x[6],@t[2]
+	add	@x[7],@x[7],@t[3]
+# ifdef	__thumb2__
+	itt	hs
+# endif
+	ldrhs	@t[2],[r12,#-8]
+	ldrhs	@t[3],[r12,#-4]
+# if __ARM_ARCH__>=6 && defined(__ARMEB__)
+	rev	@x[4],@x[4]
+	rev	@x[5],@x[5]
+	rev	@x[6],@x[6]
+	rev	@x[7],@x[7]
+# endif
+# ifdef	__thumb2__
+	itt	hs
+# endif
+	eorhs	@x[4],@x[4],@t[0]
+	eorhs	@x[5],@x[5],@t[1]
+# ifdef	__thumb2__
+	 it	ne
+# endif
+	 ldrne	@t[0],[sp,#4*(32+2)]	@ re-load len
+# ifdef	__thumb2__
+	itt	hs
+# endif
+	eorhs	@x[6],@x[6],@t[2]
+	eorhs	@x[7],@x[7],@t[3]
+	str	@x[4],[r14],#16		@ store output
+	str	@x[5],[r14,#-12]
+# ifdef	__thumb2__
+	it	hs
+# endif
+	 subhs	@t[3],@t[0],#64		@ len-=64
+	str	@x[6],[r14,#-8]
+	str	@x[7],[r14,#-4]
+	bhi	.Loop_outer
+
+	beq	.Ldone
+# if __ARM_ARCH__<7
+	b	.Ltail
+
+.align	4
+.Lunaligned:				@ unaligned endian-neutral path
+	cmp	@t[3],#64		@ restore flags
+# endif
+#endif
+#if __ARM_ARCH__<7
+	ldr	@t[3],[sp,#4*(3)]
+___
+for ($i=0;$i<16;$i+=4) {
+my $j=$i&0x7;
+
+$code.=<<___	if ($i==4);
+	add	@x[0],sp,#4*(16+8)
+___
+$code.=<<___	if ($i==8);
+	ldmia	@x[0],{@x[0]-@x[7]}		@ load second half
+# ifdef	__thumb2__
+	itt	hi
+# endif
+	strhi	@t[2],[sp,#4*(16+10)]		@ copy "@x[10]"
+	strhi	@t[3],[sp,#4*(16+11)]		@ copy "@x[11]"
+___
+$code.=<<___;
+	add	@x[$j+0],@x[$j+0],@t[0]		@ accumulate key material
+___
+$code.=<<___	if ($i==12);
+# ifdef	__thumb2__
+	itt	hi
+# endif
+	addhi	@t[0],@t[0],#1			@ next counter value
+	strhi	@t[0],[sp,#4*(12)]		@ save next counter value
+___
+$code.=<<___;
+	add	@x[$j+1],@x[$j+1],@t[1]
+	add	@x[$j+2],@x[$j+2],@t[2]
+# ifdef	__thumb2__
+	itete	lo
+# endif
+	eorlo	@t[0],@t[0],@t[0]		@ zero or ...
+	ldrhsb	@t[0],[r12],#16			@ ... load input
+	eorlo	@t[1],@t[1],@t[1]
+	ldrhsb	@t[1],[r12,#-12]
+
+	add	@x[$j+3],@x[$j+3],@t[3]
+# ifdef	__thumb2__
+	itete	lo
+# endif
+	eorlo	@t[2],@t[2],@t[2]
+	ldrhsb	@t[2],[r12,#-8]
+	eorlo	@t[3],@t[3],@t[3]
+	ldrhsb	@t[3],[r12,#-4]
+
+	eor	@x[$j+0],@t[0],@x[$j+0]		@ xor with input (or zero)
+	eor	@x[$j+1],@t[1],@x[$j+1]
+# ifdef	__thumb2__
+	itt	hs
+# endif
+	ldrhsb	@t[0],[r12,#-15]		@ load more input
+	ldrhsb	@t[1],[r12,#-11]
+	eor	@x[$j+2],@t[2],@x[$j+2]
+	 strb	@x[$j+0],[r14],#16		@ store output
+	eor	@x[$j+3],@t[3],@x[$j+3]
+# ifdef	__thumb2__
+	itt	hs
+# endif
+	ldrhsb	@t[2],[r12,#-7]
+	ldrhsb	@t[3],[r12,#-3]
+	 strb	@x[$j+1],[r14,#-12]
+	eor	@x[$j+0],@t[0],@x[$j+0],lsr#8
+	 strb	@x[$j+2],[r14,#-8]
+	eor	@x[$j+1],@t[1],@x[$j+1],lsr#8
+# ifdef	__thumb2__
+	itt	hs
+# endif
+	ldrhsb	@t[0],[r12,#-14]		@ load more input
+	ldrhsb	@t[1],[r12,#-10]
+	 strb	@x[$j+3],[r14,#-4]
+	eor	@x[$j+2],@t[2],@x[$j+2],lsr#8
+	 strb	@x[$j+0],[r14,#-15]
+	eor	@x[$j+3],@t[3],@x[$j+3],lsr#8
+# ifdef	__thumb2__
+	itt	hs
+# endif
+	ldrhsb	@t[2],[r12,#-6]
+	ldrhsb	@t[3],[r12,#-2]
+	 strb	@x[$j+1],[r14,#-11]
+	eor	@x[$j+0],@t[0],@x[$j+0],lsr#8
+	 strb	@x[$j+2],[r14,#-7]
+	eor	@x[$j+1],@t[1],@x[$j+1],lsr#8
+# ifdef	__thumb2__
+	itt	hs
+# endif
+	ldrhsb	@t[0],[r12,#-13]		@ load more input
+	ldrhsb	@t[1],[r12,#-9]
+	 strb	@x[$j+3],[r14,#-3]
+	eor	@x[$j+2],@t[2],@x[$j+2],lsr#8
+	 strb	@x[$j+0],[r14,#-14]
+	eor	@x[$j+3],@t[3],@x[$j+3],lsr#8
+# ifdef	__thumb2__
+	itt	hs
+# endif
+	ldrhsb	@t[2],[r12,#-5]
+	ldrhsb	@t[3],[r12,#-1]
+	 strb	@x[$j+1],[r14,#-10]
+	 strb	@x[$j+2],[r14,#-6]
+	eor	@x[$j+0],@t[0],@x[$j+0],lsr#8
+	 strb	@x[$j+3],[r14,#-2]
+	eor	@x[$j+1],@t[1],@x[$j+1],lsr#8
+	 strb	@x[$j+0],[r14,#-13]
+	eor	@x[$j+2],@t[2],@x[$j+2],lsr#8
+	 strb	@x[$j+1],[r14,#-9]
+	eor	@x[$j+3],@t[3],@x[$j+3],lsr#8
+	 strb	@x[$j+2],[r14,#-5]
+	 strb	@x[$j+3],[r14,#-1]
+___
+$code.=<<___	if ($i<12);
+	add	@t[0],sp,#4*(4+$i)
+	ldmia	@t[0],{@t[0]-@t[3]}		@ load key material
+___
+}
+$code.=<<___;
+# ifdef	__thumb2__
+	it	ne
+# endif
+	ldrne	@t[0],[sp,#4*(32+2)]		@ re-load len
+# ifdef	__thumb2__
+	it	hs
+# endif
+	subhs	@t[3],@t[0],#64			@ len-=64
+	bhi	.Loop_outer
+
+	beq	.Ldone
+#endif
+
+.Ltail:
+	ldr	r12,[sp,#4*(32+1)]	@ load inp
+	add	@t[1],sp,#4*(0)
+	ldr	r14,[sp,#4*(32+0)]	@ load out
+
+.Loop_tail:
+	ldrb	@t[2],[@t[1]],#1	@ read buffer on stack
+	ldrb	@t[3],[r12],#1		@ read input
+	subs	@t[0],@t[0],#1
+	eor	@t[3],@t[3],@t[2]
+	strb	@t[3],[r14],#1		@ store output
+	bne	.Loop_tail
+
+.Ldone:
+	add	sp,sp,#4*(32+3)
+.Lno_data:
+	ldmia	sp!,{r4-r11,pc}
+.size	ChaCha20_ctr32,.-ChaCha20_ctr32
+___
+
+{{{
+my ($a0,$b0,$c0,$d0,$a1,$b1,$c1,$d1,$a2,$b2,$c2,$d2,$t0,$t1,$t2,$t3) =
+    map("q$_",(0..15));
+
+sub NEONROUND {
+my $odd = pop;
+my ($a,$b,$c,$d,$t)=@_;
+
+	(
+	"&vadd_i32	($a,$a,$b)",
+	"&veor		($d,$d,$a)",
+	"&vrev32_16	($d,$d)",	# vrot ($d,16)
+
+	"&vadd_i32	($c,$c,$d)",
+	"&veor		($t,$b,$c)",
+	"&vshr_u32	($b,$t,20)",
+	"&vsli_32	($b,$t,12)",
+
+	"&vadd_i32	($a,$a,$b)",
+	"&veor		($t,$d,$a)",
+	"&vshr_u32	($d,$t,24)",
+	"&vsli_32	($d,$t,8)",
+
+	"&vadd_i32	($c,$c,$d)",
+	"&veor		($t,$b,$c)",
+	"&vshr_u32	($b,$t,25)",
+	"&vsli_32	($b,$t,7)",
+
+	"&vext_8	($c,$c,$c,8)",
+	"&vext_8	($b,$b,$b,$odd?12:4)",
+	"&vext_8	($d,$d,$d,$odd?4:12)"
+	);
+}
+
+$code.=<<___;
+#if __ARM_MAX_ARCH__>=7
+.arch	armv7-a
+.fpu	neon
+
+.type	ChaCha20_neon,%function
+.align	5
+ChaCha20_neon:
+	ldr		r12,[sp,#0]		@ pull pointer to counter and nonce
+	stmdb		sp!,{r0-r2,r4-r11,lr}
+.LChaCha20_neon:
+	adr		r14,.Lsigma
+	vstmdb		sp!,{d8-d15}		@ ABI spec says so
+	stmdb		sp!,{r0-r3}
+
+	vld1.32		{$b0-$c0},[r3]		@ load key
+	ldmia		r3,{r4-r11}		@ load key
+
+	sub		sp,sp,#4*(16+16)
+	vld1.32		{$d0},[r12]		@ load counter and nonce
+	add		r12,sp,#4*8
+	ldmia		r14,{r0-r3}		@ load sigma
+	vld1.32		{$a0},[r14]!		@ load sigma
+	vld1.32		{$t0},[r14]		@ one
+	vst1.32		{$c0-$d0},[r12]		@ copy 1/2key|counter|nonce
+	vst1.32		{$a0-$b0},[sp]		@ copy sigma|1/2key
+
+	str		r10,[sp,#4*(16+10)]	@ off-load "@x[10]"
+	str		r11,[sp,#4*(16+11)]	@ off-load "@x[11]"
+	vshl.i32	$t1#lo,$t0#lo,#1	@ two
+	vstr		$t0#lo,[sp,#4*(16+0)]
+	vshl.i32	$t2#lo,$t0#lo,#2	@ four
+	vstr		$t1#lo,[sp,#4*(16+2)]
+	vmov		$a1,$a0
+	vstr		$t2#lo,[sp,#4*(16+4)]
+	vmov		$a2,$a0
+	vmov		$b1,$b0
+	vmov		$b2,$b0
+	b		.Loop_neon_enter
+
+.align	4
+.Loop_neon_outer:
+	ldmia		sp,{r0-r9}		@ load key material
+	cmp		@t[3],#64*2		@ if len<=64*2
+	bls		.Lbreak_neon		@ switch to integer-only
+	vmov		$a1,$a0
+	str		@t[3],[sp,#4*(32+2)]	@ save len
+	vmov		$a2,$a0
+	str		r12,  [sp,#4*(32+1)]	@ save inp
+	vmov		$b1,$b0
+	str		r14,  [sp,#4*(32+0)]	@ save out
+	vmov		$b2,$b0
+.Loop_neon_enter:
+	ldr		@t[3], [sp,#4*(15)]
+	vadd.i32	$d1,$d0,$t0		@ counter+1
+	ldr		@x[12],[sp,#4*(12)]	@ modulo-scheduled load
+	vmov		$c1,$c0
+	ldr		@t[2], [sp,#4*(13)]
+	vmov		$c2,$c0
+	ldr		@x[14],[sp,#4*(14)]
+	vadd.i32	$d2,$d1,$t0		@ counter+2
+	str		@t[3], [sp,#4*(16+15)]
+	mov		@t[3],#10
+	add		@x[12],@x[12],#3	@ counter+3
+	b		.Loop_neon
+
+.align	4
+.Loop_neon:
+	subs		@t[3],@t[3],#1
+___
+	my @thread0=&NEONROUND($a0,$b0,$c0,$d0,$t0,0);
+	my @thread1=&NEONROUND($a1,$b1,$c1,$d1,$t1,0);
+	my @thread2=&NEONROUND($a2,$b2,$c2,$d2,$t2,0);
+	my @thread3=&ROUND(0,4,8,12);
+
+	foreach (@thread0) {
+		eval;			eval(shift(@thread3));
+		eval(shift(@thread1));	eval(shift(@thread3));
+		eval(shift(@thread2));	eval(shift(@thread3));
+	}
+
+	@thread0=&NEONROUND($a0,$b0,$c0,$d0,$t0,1);
+	@thread1=&NEONROUND($a1,$b1,$c1,$d1,$t1,1);
+	@thread2=&NEONROUND($a2,$b2,$c2,$d2,$t2,1);
+	@thread3=&ROUND(0,5,10,15);
+
+	foreach (@thread0) {
+		eval;			eval(shift(@thread3));
+		eval(shift(@thread1));	eval(shift(@thread3));
+		eval(shift(@thread2));	eval(shift(@thread3));
+	}
+$code.=<<___;
+	bne		.Loop_neon
+
+	add		@t[3],sp,#32
+	vld1.32		{$t0-$t1},[sp]		@ load key material
+	vld1.32		{$t2-$t3},[@t[3]]
+
+	ldr		@t[3],[sp,#4*(32+2)]	@ load len
+
+	str		@t[0], [sp,#4*(16+8)]	@ modulo-scheduled store
+	str		@t[1], [sp,#4*(16+9)]
+	str		@x[12],[sp,#4*(16+12)]
+	str		@t[2], [sp,#4*(16+13)]
+	str		@x[14],[sp,#4*(16+14)]
+
+	@ at this point we have first half of 512-bit result in
+	@ @x[0-7] and second half at sp+4*(16+8)
+
+	ldr		r12,[sp,#4*(32+1)]	@ load inp
+	ldr		r14,[sp,#4*(32+0)]	@ load out
+
+	vadd.i32	$a0,$a0,$t0		@ accumulate key material
+	vadd.i32	$a1,$a1,$t0
+	vadd.i32	$a2,$a2,$t0
+	vldr		$t0#lo,[sp,#4*(16+0)]	@ one
+
+	vadd.i32	$b0,$b0,$t1
+	vadd.i32	$b1,$b1,$t1
+	vadd.i32	$b2,$b2,$t1
+	vldr		$t1#lo,[sp,#4*(16+2)]	@ two
+
+	vadd.i32	$c0,$c0,$t2
+	vadd.i32	$c1,$c1,$t2
+	vadd.i32	$c2,$c2,$t2
+	vadd.i32	$d1#lo,$d1#lo,$t0#lo	@ counter+1
+	vadd.i32	$d2#lo,$d2#lo,$t1#lo	@ counter+2
+
+	vadd.i32	$d0,$d0,$t3
+	vadd.i32	$d1,$d1,$t3
+	vadd.i32	$d2,$d2,$t3
+
+	cmp		@t[3],#64*4
+	blo		.Ltail_neon
+
+	vld1.8		{$t0-$t1},[r12]!	@ load input
+	 mov		@t[3],sp
+	vld1.8		{$t2-$t3},[r12]!
+	veor		$a0,$a0,$t0		@ xor with input
+	veor		$b0,$b0,$t1
+	vld1.8		{$t0-$t1},[r12]!
+	veor		$c0,$c0,$t2
+	veor		$d0,$d0,$t3
+	vld1.8		{$t2-$t3},[r12]!
+
+	veor		$a1,$a1,$t0
+	 vst1.8		{$a0-$b0},[r14]!	@ store output
+	veor		$b1,$b1,$t1
+	vld1.8		{$t0-$t1},[r12]!
+	veor		$c1,$c1,$t2
+	 vst1.8		{$c0-$d0},[r14]!
+	veor		$d1,$d1,$t3
+	vld1.8		{$t2-$t3},[r12]!
+
+	veor		$a2,$a2,$t0
+	 vld1.32	{$a0-$b0},[@t[3]]!	@ load for next iteration
+	 veor		$t0#hi,$t0#hi,$t0#hi
+	 vldr		$t0#lo,[sp,#4*(16+4)]	@ four
+	veor		$b2,$b2,$t1
+	 vld1.32	{$c0-$d0},[@t[3]]
+	veor		$c2,$c2,$t2
+	 vst1.8		{$a1-$b1},[r14]!
+	veor		$d2,$d2,$t3
+	 vst1.8		{$c1-$d1},[r14]!
+
+	vadd.i32	$d0#lo,$d0#lo,$t0#lo	@ next counter value
+	vldr		$t0#lo,[sp,#4*(16+0)]	@ one
+
+	ldmia		sp,{@t[0]-@t[3]}	@ load key material
+	add		@x[0],@x[0],@t[0]	@ accumulate key material
+	ldr		@t[0],[r12],#16		@ load input
+	 vst1.8		{$a2-$b2},[r14]!
+	add		@x[1],@x[1],@t[1]
+	ldr		@t[1],[r12,#-12]
+	 vst1.8		{$c2-$d2},[r14]!
+	add		@x[2],@x[2],@t[2]
+	ldr		@t[2],[r12,#-8]
+	add		@x[3],@x[3],@t[3]
+	ldr		@t[3],[r12,#-4]
+# ifdef	__ARMEB__
+	rev		@x[0],@x[0]
+	rev		@x[1],@x[1]
+	rev		@x[2],@x[2]
+	rev		@x[3],@x[3]
+# endif
+	eor		@x[0],@x[0],@t[0]	@ xor with input
+	 add		@t[0],sp,#4*(4)
+	eor		@x[1],@x[1],@t[1]
+	str		@x[0],[r14],#16		@ store output
+	eor		@x[2],@x[2],@t[2]
+	str		@x[1],[r14,#-12]
+	eor		@x[3],@x[3],@t[3]
+	 ldmia		@t[0],{@t[0]-@t[3]}	@ load key material
+	str		@x[2],[r14,#-8]
+	str		@x[3],[r14,#-4]
+
+	add		@x[4],@x[4],@t[0]	@ accumulate key material
+	ldr		@t[0],[r12],#16		@ load input
+	add		@x[5],@x[5],@t[1]
+	ldr		@t[1],[r12,#-12]
+	add		@x[6],@x[6],@t[2]
+	ldr		@t[2],[r12,#-8]
+	add		@x[7],@x[7],@t[3]
+	ldr		@t[3],[r12,#-4]
+# ifdef	__ARMEB__
+	rev		@x[4],@x[4]
+	rev		@x[5],@x[5]
+	rev		@x[6],@x[6]
+	rev		@x[7],@x[7]
+# endif
+	eor		@x[4],@x[4],@t[0]
+	 add		@t[0],sp,#4*(8)
+	eor		@x[5],@x[5],@t[1]
+	str		@x[4],[r14],#16		@ store output
+	eor		@x[6],@x[6],@t[2]
+	str		@x[5],[r14,#-12]
+	eor		@x[7],@x[7],@t[3]
+	 ldmia		@t[0],{@t[0]-@t[3]}	@ load key material
+	str		@x[6],[r14,#-8]
+	 add		@x[0],sp,#4*(16+8)
+	str		@x[7],[r14,#-4]
+
+	ldmia		@x[0],{@x[0]-@x[7]}	@ load second half
+
+	add		@x[0],@x[0],@t[0]	@ accumulate key material
+	ldr		@t[0],[r12],#16		@ load input
+	add		@x[1],@x[1],@t[1]
+	ldr		@t[1],[r12,#-12]
+# ifdef	__thumb2__
+	it	hi
+# endif
+	 strhi		@t[2],[sp,#4*(16+10)]	@ copy "@x[10]" while at it
+	add		@x[2],@x[2],@t[2]
+	ldr		@t[2],[r12,#-8]
+# ifdef	__thumb2__
+	it	hi
+# endif
+	 strhi		@t[3],[sp,#4*(16+11)]	@ copy "@x[11]" while at it
+	add		@x[3],@x[3],@t[3]
+	ldr		@t[3],[r12,#-4]
+# ifdef	__ARMEB__
+	rev		@x[0],@x[0]
+	rev		@x[1],@x[1]
+	rev		@x[2],@x[2]
+	rev		@x[3],@x[3]
+# endif
+	eor		@x[0],@x[0],@t[0]
+	 add		@t[0],sp,#4*(12)
+	eor		@x[1],@x[1],@t[1]
+	str		@x[0],[r14],#16		@ store output
+	eor		@x[2],@x[2],@t[2]
+	str		@x[1],[r14,#-12]
+	eor		@x[3],@x[3],@t[3]
+	 ldmia		@t[0],{@t[0]-@t[3]}	@ load key material
+	str		@x[2],[r14,#-8]
+	str		@x[3],[r14,#-4]
+
+	add		@x[4],@x[4],@t[0]	@ accumulate key material
+	 add		@t[0],@t[0],#4		@ next counter value
+	add		@x[5],@x[5],@t[1]
+	 str		@t[0],[sp,#4*(12)]	@ save next counter value
+	ldr		@t[0],[r12],#16		@ load input
+	add		@x[6],@x[6],@t[2]
+	 add		@x[4],@x[4],#3		@ counter+3
+	ldr		@t[1],[r12,#-12]
+	add		@x[7],@x[7],@t[3]
+	ldr		@t[2],[r12,#-8]
+	ldr		@t[3],[r12,#-4]
+# ifdef	__ARMEB__
+	rev		@x[4],@x[4]
+	rev		@x[5],@x[5]
+	rev		@x[6],@x[6]
+	rev		@x[7],@x[7]
+# endif
+	eor		@x[4],@x[4],@t[0]
+# ifdef	__thumb2__
+	it	hi
+# endif
+	 ldrhi		@t[0],[sp,#4*(32+2)]	@ re-load len
+	eor		@x[5],@x[5],@t[1]
+	eor		@x[6],@x[6],@t[2]
+	str		@x[4],[r14],#16		@ store output
+	eor		@x[7],@x[7],@t[3]
+	str		@x[5],[r14,#-12]
+	 sub		@t[3],@t[0],#64*4	@ len-=64*4
+	str		@x[6],[r14,#-8]
+	str		@x[7],[r14,#-4]
+	bhi		.Loop_neon_outer
+
+	b		.Ldone_neon
+
+.align	4
+.Lbreak_neon:
+	@ harmonize NEON and integer-only stack frames: load data
+	@ from NEON frame, but save to integer-only one; distance
+	@ between the two is 4*(32+4+16-32)=4*(20).
+
+	str		@t[3], [sp,#4*(20+32+2)]	@ save len
+	 add		@t[3],sp,#4*(32+4)
+	str		r12,   [sp,#4*(20+32+1)]	@ save inp
+	str		r14,   [sp,#4*(20+32+0)]	@ save out
+
+	ldr		@x[12],[sp,#4*(16+10)]
+	ldr		@x[14],[sp,#4*(16+11)]
+	 vldmia		@t[3],{d8-d15}			@ fulfill ABI requirement
+	str		@x[12],[sp,#4*(20+16+10)]	@ copy "@x[10]"
+	str		@x[14],[sp,#4*(20+16+11)]	@ copy "@x[11]"
+
+	ldr		@t[3], [sp,#4*(15)]
+	ldr		@x[12],[sp,#4*(12)]		@ modulo-scheduled load
+	ldr		@t[2], [sp,#4*(13)]
+	ldr		@x[14],[sp,#4*(14)]
+	str		@t[3], [sp,#4*(20+16+15)]
+	add		@t[3],sp,#4*(20)
+	vst1.32		{$a0-$b0},[@t[3]]!		@ copy key
+	add		sp,sp,#4*(20)			@ switch frame
+	vst1.32		{$c0-$d0},[@t[3]]
+	mov		@t[3],#10
+	b		.Loop				@ go integer-only
+
+.align	4
+.Ltail_neon:
+	cmp		@t[3],#64*3
+	bhs		.L192_or_more_neon
+	cmp		@t[3],#64*2
+	bhs		.L128_or_more_neon
+	cmp		@t[3],#64*1
+	bhs		.L64_or_more_neon
+
+	add		@t[0],sp,#4*(8)
+	vst1.8		{$a0-$b0},[sp]
+	add		@t[2],sp,#4*(0)
+	vst1.8		{$c0-$d0},[@t[0]]
+	b		.Loop_tail_neon
+
+.align	4
+.L64_or_more_neon:
+	vld1.8		{$t0-$t1},[r12]!
+	vld1.8		{$t2-$t3},[r12]!
+	veor		$a0,$a0,$t0
+	veor		$b0,$b0,$t1
+	veor		$c0,$c0,$t2
+	veor		$d0,$d0,$t3
+	vst1.8		{$a0-$b0},[r14]!
+	vst1.8		{$c0-$d0},[r14]!
+
+	beq		.Ldone_neon
+
+	add		@t[0],sp,#4*(8)
+	vst1.8		{$a1-$b1},[sp]
+	add		@t[2],sp,#4*(0)
+	vst1.8		{$c1-$d1},[@t[0]]
+	sub		@t[3],@t[3],#64*1	@ len-=64*1
+	b		.Loop_tail_neon
+
+.align	4
+.L128_or_more_neon:
+	vld1.8		{$t0-$t1},[r12]!
+	vld1.8		{$t2-$t3},[r12]!
+	veor		$a0,$a0,$t0
+	veor		$b0,$b0,$t1
+	vld1.8		{$t0-$t1},[r12]!
+	veor		$c0,$c0,$t2
+	veor		$d0,$d0,$t3
+	vld1.8		{$t2-$t3},[r12]!
+
+	veor		$a1,$a1,$t0
+	veor		$b1,$b1,$t1
+	 vst1.8		{$a0-$b0},[r14]!
+	veor		$c1,$c1,$t2
+	 vst1.8		{$c0-$d0},[r14]!
+	veor		$d1,$d1,$t3
+	vst1.8		{$a1-$b1},[r14]!
+	vst1.8		{$c1-$d1},[r14]!
+
+	beq		.Ldone_neon
+
+	add		@t[0],sp,#4*(8)
+	vst1.8		{$a2-$b2},[sp]
+	add		@t[2],sp,#4*(0)
+	vst1.8		{$c2-$d2},[@t[0]]
+	sub		@t[3],@t[3],#64*2	@ len-=64*2
+	b		.Loop_tail_neon
+
+.align	4
+.L192_or_more_neon:
+	vld1.8		{$t0-$t1},[r12]!
+	vld1.8		{$t2-$t3},[r12]!
+	veor		$a0,$a0,$t0
+	veor		$b0,$b0,$t1
+	vld1.8		{$t0-$t1},[r12]!
+	veor		$c0,$c0,$t2
+	veor		$d0,$d0,$t3
+	vld1.8		{$t2-$t3},[r12]!
+
+	veor		$a1,$a1,$t0
+	veor		$b1,$b1,$t1
+	vld1.8		{$t0-$t1},[r12]!
+	veor		$c1,$c1,$t2
+	 vst1.8		{$a0-$b0},[r14]!
+	veor		$d1,$d1,$t3
+	vld1.8		{$t2-$t3},[r12]!
+
+	veor		$a2,$a2,$t0
+	 vst1.8		{$c0-$d0},[r14]!
+	veor		$b2,$b2,$t1
+	 vst1.8		{$a1-$b1},[r14]!
+	veor		$c2,$c2,$t2
+	 vst1.8		{$c1-$d1},[r14]!
+	veor		$d2,$d2,$t3
+	vst1.8		{$a2-$b2},[r14]!
+	vst1.8		{$c2-$d2},[r14]!
+
+	beq		.Ldone_neon
+
+	ldmia		sp,{@t[0]-@t[3]}	@ load key material
+	add		@x[0],@x[0],@t[0]	@ accumulate key material
+	 add		@t[0],sp,#4*(4)
+	add		@x[1],@x[1],@t[1]
+	add		@x[2],@x[2],@t[2]
+	add		@x[3],@x[3],@t[3]
+	 ldmia		@t[0],{@t[0]-@t[3]}	@ load key material
+
+	add		@x[4],@x[4],@t[0]	@ accumulate key material
+	 add		@t[0],sp,#4*(8)
+	add		@x[5],@x[5],@t[1]
+	add		@x[6],@x[6],@t[2]
+	add		@x[7],@x[7],@t[3]
+	 ldmia		@t[0],{@t[0]-@t[3]}	@ load key material
+# ifdef	__ARMEB__
+	rev		@x[0],@x[0]
+	rev		@x[1],@x[1]
+	rev		@x[2],@x[2]
+	rev		@x[3],@x[3]
+	rev		@x[4],@x[4]
+	rev		@x[5],@x[5]
+	rev		@x[6],@x[6]
+	rev		@x[7],@x[7]
+# endif
+	stmia		sp,{@x[0]-@x[7]}
+	 add		@x[0],sp,#4*(16+8)
+
+	ldmia		@x[0],{@x[0]-@x[7]}	@ load second half
+
+	add		@x[0],@x[0],@t[0]	@ accumulate key material
+	 add		@t[0],sp,#4*(12)
+	add		@x[1],@x[1],@t[1]
+	add		@x[2],@x[2],@t[2]
+	add		@x[3],@x[3],@t[3]
+	 ldmia		@t[0],{@t[0]-@t[3]}	@ load key material
+
+	add		@x[4],@x[4],@t[0]	@ accumulate key material
+	 add		@t[0],sp,#4*(8)
+	add		@x[5],@x[5],@t[1]
+	 add		@x[4],@x[4],#3		@ counter+3
+	add		@x[6],@x[6],@t[2]
+	add		@x[7],@x[7],@t[3]
+	 ldr		@t[3],[sp,#4*(32+2)]	@ re-load len
+# ifdef	__ARMEB__
+	rev		@x[0],@x[0]
+	rev		@x[1],@x[1]
+	rev		@x[2],@x[2]
+	rev		@x[3],@x[3]
+	rev		@x[4],@x[4]
+	rev		@x[5],@x[5]
+	rev		@x[6],@x[6]
+	rev		@x[7],@x[7]
+# endif
+	stmia		@t[0],{@x[0]-@x[7]}
+	 add		@t[2],sp,#4*(0)
+	 sub		@t[3],@t[3],#64*3	@ len-=64*3
+
+.Loop_tail_neon:
+	ldrb		@t[0],[@t[2]],#1	@ read buffer on stack
+	ldrb		@t[1],[r12],#1		@ read input
+	subs		@t[3],@t[3],#1
+	eor		@t[0],@t[0],@t[1]
+	strb		@t[0],[r14],#1		@ store output
+	bne		.Loop_tail_neon
+
+.Ldone_neon:
+	add		sp,sp,#4*(32+4)
+	vldmia		sp,{d8-d15}
+	add		sp,sp,#4*(16+3)
+	ldmia		sp!,{r4-r11,pc}
+.size	ChaCha20_neon,.-ChaCha20_neon
+.comm	OPENSSL_armcap_P,4,4
+#endif
+___
+}}}
+
+foreach (split("\n",$code)) {
+	s/\`([^\`]*)\`/eval $1/geo;
+
+	s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo;
+
+	print $_,"\n";
+}
+close STDOUT or die "error closing STDOUT: $!";
diff --git a/ap/lib/libssl/openssl-1.1.1o/crypto/chacha/asm/chacha-armv8.pl b/ap/lib/libssl/openssl-1.1.1o/crypto/chacha/asm/chacha-armv8.pl
new file mode 100755
index 0000000..84c9801
--- /dev/null
+++ b/ap/lib/libssl/openssl-1.1.1o/crypto/chacha/asm/chacha-armv8.pl
@@ -0,0 +1,1145 @@
+#! /usr/bin/env perl
+# Copyright 2016-2020 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License").  You may not use
+# this file except in compliance with the License.  You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# June 2015
+#
+# ChaCha20 for ARMv8.
+#
+# Performance in cycles per byte out of large buffer.
+#
+#			IALU/gcc-4.9    3xNEON+1xIALU	6xNEON+2xIALU
+#
+# Apple A7		5.50/+49%       3.33            1.70
+# Cortex-A53		8.40/+80%       4.72		4.72(*)
+# Cortex-A57		8.06/+43%       4.90            4.43(**)
+# Denver		4.50/+82%       2.63		2.67(*)
+# X-Gene		9.50/+46%       8.82		8.89(*)
+# Mongoose		8.00/+44%	3.64		3.25
+# Kryo			8.17/+50%	4.83		4.65
+#
+# (*)	it's expected that doubling interleave factor doesn't help
+#	all processors, only those with higher NEON latency and
+#	higher instruction issue rate;
+# (**)	expected improvement was actually higher;
+
+$flavour=shift;
+$output=shift;
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
+die "can't locate arm-xlate.pl";
+
+open OUT,"| \"$^X\" $xlate $flavour $output";
+*STDOUT=*OUT;
+
+sub AUTOLOAD()		# thunk [simplified] x86-style perlasm
+{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./;
+  my $arg = pop;
+    $arg = "#$arg" if ($arg*1 eq $arg);
+    $code .= "\t$opcode\t".join(',',@_,$arg)."\n";
+}
+
+my ($out,$inp,$len,$key,$ctr) = map("x$_",(0..4));
+
+my @x=map("x$_",(5..17,19..21));
+my @d=map("x$_",(22..28,30));
+
+sub ROUND {
+my ($a0,$b0,$c0,$d0)=@_;
+my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
+my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
+my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
+
+    (
+	"&add_32	(@x[$a0],@x[$a0],@x[$b0])",
+	 "&add_32	(@x[$a1],@x[$a1],@x[$b1])",
+	  "&add_32	(@x[$a2],@x[$a2],@x[$b2])",
+	   "&add_32	(@x[$a3],@x[$a3],@x[$b3])",
+	"&eor_32	(@x[$d0],@x[$d0],@x[$a0])",
+	 "&eor_32	(@x[$d1],@x[$d1],@x[$a1])",
+	  "&eor_32	(@x[$d2],@x[$d2],@x[$a2])",
+	   "&eor_32	(@x[$d3],@x[$d3],@x[$a3])",
+	"&ror_32	(@x[$d0],@x[$d0],16)",
+	 "&ror_32	(@x[$d1],@x[$d1],16)",
+	  "&ror_32	(@x[$d2],@x[$d2],16)",
+	   "&ror_32	(@x[$d3],@x[$d3],16)",
+
+	"&add_32	(@x[$c0],@x[$c0],@x[$d0])",
+	 "&add_32	(@x[$c1],@x[$c1],@x[$d1])",
+	  "&add_32	(@x[$c2],@x[$c2],@x[$d2])",
+	   "&add_32	(@x[$c3],@x[$c3],@x[$d3])",
+	"&eor_32	(@x[$b0],@x[$b0],@x[$c0])",
+	 "&eor_32	(@x[$b1],@x[$b1],@x[$c1])",
+	  "&eor_32	(@x[$b2],@x[$b2],@x[$c2])",
+	   "&eor_32	(@x[$b3],@x[$b3],@x[$c3])",
+	"&ror_32	(@x[$b0],@x[$b0],20)",
+	 "&ror_32	(@x[$b1],@x[$b1],20)",
+	  "&ror_32	(@x[$b2],@x[$b2],20)",
+	   "&ror_32	(@x[$b3],@x[$b3],20)",
+
+	"&add_32	(@x[$a0],@x[$a0],@x[$b0])",
+	 "&add_32	(@x[$a1],@x[$a1],@x[$b1])",
+	  "&add_32	(@x[$a2],@x[$a2],@x[$b2])",
+	   "&add_32	(@x[$a3],@x[$a3],@x[$b3])",
+	"&eor_32	(@x[$d0],@x[$d0],@x[$a0])",
+	 "&eor_32	(@x[$d1],@x[$d1],@x[$a1])",
+	  "&eor_32	(@x[$d2],@x[$d2],@x[$a2])",
+	   "&eor_32	(@x[$d3],@x[$d3],@x[$a3])",
+	"&ror_32	(@x[$d0],@x[$d0],24)",
+	 "&ror_32	(@x[$d1],@x[$d1],24)",
+	  "&ror_32	(@x[$d2],@x[$d2],24)",
+	   "&ror_32	(@x[$d3],@x[$d3],24)",
+
+	"&add_32	(@x[$c0],@x[$c0],@x[$d0])",
+	 "&add_32	(@x[$c1],@x[$c1],@x[$d1])",
+	  "&add_32	(@x[$c2],@x[$c2],@x[$d2])",
+	   "&add_32	(@x[$c3],@x[$c3],@x[$d3])",
+	"&eor_32	(@x[$b0],@x[$b0],@x[$c0])",
+	 "&eor_32	(@x[$b1],@x[$b1],@x[$c1])",
+	  "&eor_32	(@x[$b2],@x[$b2],@x[$c2])",
+	   "&eor_32	(@x[$b3],@x[$b3],@x[$c3])",
+	"&ror_32	(@x[$b0],@x[$b0],25)",
+	 "&ror_32	(@x[$b1],@x[$b1],25)",
+	  "&ror_32	(@x[$b2],@x[$b2],25)",
+	   "&ror_32	(@x[$b3],@x[$b3],25)"
+    );
+}
+
+$code.=<<___;
+#include "arm_arch.h"
+
+.text
+
+.extern	OPENSSL_armcap_P
+.hidden	OPENSSL_armcap_P
+
+.align	5
+.Lsigma:
+.quad	0x3320646e61707865,0x6b20657479622d32		// endian-neutral
+.Lone:
+.long	1,0,0,0
+.LOPENSSL_armcap_P:
+#ifdef	__ILP32__
+.long	OPENSSL_armcap_P-.
+#else
+.quad	OPENSSL_armcap_P-.
+#endif
+.asciz	"ChaCha20 for ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
+
+.globl	ChaCha20_ctr32
+.type	ChaCha20_ctr32,%function
+.align	5
+ChaCha20_ctr32:
+	cbz	$len,.Labort
+	adr	@x[0],.LOPENSSL_armcap_P
+	cmp	$len,#192
+	b.lo	.Lshort
+#ifdef	__ILP32__
+	ldrsw	@x[1],[@x[0]]
+#else
+	ldr	@x[1],[@x[0]]
+#endif
+	ldr	w17,[@x[1],@x[0]]
+	tst	w17,#ARMV7_NEON
+	b.ne	ChaCha20_neon
+
+.Lshort:
+	.inst	0xd503233f			// paciasp
+	stp	x29,x30,[sp,#-96]!
+	add	x29,sp,#0
+
+	adr	@x[0],.Lsigma
+	stp	x19,x20,[sp,#16]
+	stp	x21,x22,[sp,#32]
+	stp	x23,x24,[sp,#48]
+	stp	x25,x26,[sp,#64]
+	stp	x27,x28,[sp,#80]
+	sub	sp,sp,#64
+
+	ldp	@d[0],@d[1],[@x[0]]		// load sigma
+	ldp	@d[2],@d[3],[$key]		// load key
+	ldp	@d[4],@d[5],[$key,#16]
+	ldp	@d[6],@d[7],[$ctr]		// load counter
+#ifdef	__ARMEB__
+	ror	@d[2],@d[2],#32
+	ror	@d[3],@d[3],#32
+	ror	@d[4],@d[4],#32
+	ror	@d[5],@d[5],#32
+	ror	@d[6],@d[6],#32
+	ror	@d[7],@d[7],#32
+#endif
+
+.Loop_outer:
+	mov.32	@x[0],@d[0]			// unpack key block
+	lsr	@x[1],@d[0],#32
+	mov.32	@x[2],@d[1]
+	lsr	@x[3],@d[1],#32
+	mov.32	@x[4],@d[2]
+	lsr	@x[5],@d[2],#32
+	mov.32	@x[6],@d[3]
+	lsr	@x[7],@d[3],#32
+	mov.32	@x[8],@d[4]
+	lsr	@x[9],@d[4],#32
+	mov.32	@x[10],@d[5]
+	lsr	@x[11],@d[5],#32
+	mov.32	@x[12],@d[6]
+	lsr	@x[13],@d[6],#32
+	mov.32	@x[14],@d[7]
+	lsr	@x[15],@d[7],#32
+
+	mov	$ctr,#10
+	subs	$len,$len,#64
+.Loop:
+	sub	$ctr,$ctr,#1
+___
+	foreach (&ROUND(0, 4, 8,12)) { eval; }
+	foreach (&ROUND(0, 5,10,15)) { eval; }
+$code.=<<___;
+	cbnz	$ctr,.Loop
+
+	add.32	@x[0],@x[0],@d[0]		// accumulate key block
+	add	@x[1],@x[1],@d[0],lsr#32
+	add.32	@x[2],@x[2],@d[1]
+	add	@x[3],@x[3],@d[1],lsr#32
+	add.32	@x[4],@x[4],@d[2]
+	add	@x[5],@x[5],@d[2],lsr#32
+	add.32	@x[6],@x[6],@d[3]
+	add	@x[7],@x[7],@d[3],lsr#32
+	add.32	@x[8],@x[8],@d[4]
+	add	@x[9],@x[9],@d[4],lsr#32
+	add.32	@x[10],@x[10],@d[5]
+	add	@x[11],@x[11],@d[5],lsr#32
+	add.32	@x[12],@x[12],@d[6]
+	add	@x[13],@x[13],@d[6],lsr#32
+	add.32	@x[14],@x[14],@d[7]
+	add	@x[15],@x[15],@d[7],lsr#32
+
+	b.lo	.Ltail
+
+	add	@x[0],@x[0],@x[1],lsl#32	// pack
+	add	@x[2],@x[2],@x[3],lsl#32
+	ldp	@x[1],@x[3],[$inp,#0]		// load input
+	add	@x[4],@x[4],@x[5],lsl#32
+	add	@x[6],@x[6],@x[7],lsl#32
+	ldp	@x[5],@x[7],[$inp,#16]
+	add	@x[8],@x[8],@x[9],lsl#32
+	add	@x[10],@x[10],@x[11],lsl#32
+	ldp	@x[9],@x[11],[$inp,#32]
+	add	@x[12],@x[12],@x[13],lsl#32
+	add	@x[14],@x[14],@x[15],lsl#32
+	ldp	@x[13],@x[15],[$inp,#48]
+	add	$inp,$inp,#64
+#ifdef	__ARMEB__
+	rev	@x[0],@x[0]
+	rev	@x[2],@x[2]
+	rev	@x[4],@x[4]
+	rev	@x[6],@x[6]
+	rev	@x[8],@x[8]
+	rev	@x[10],@x[10]
+	rev	@x[12],@x[12]
+	rev	@x[14],@x[14]
+#endif
+	eor	@x[0],@x[0],@x[1]
+	eor	@x[2],@x[2],@x[3]
+	eor	@x[4],@x[4],@x[5]
+	eor	@x[6],@x[6],@x[7]
+	eor	@x[8],@x[8],@x[9]
+	eor	@x[10],@x[10],@x[11]
+	eor	@x[12],@x[12],@x[13]
+	eor	@x[14],@x[14],@x[15]
+
+	stp	@x[0],@x[2],[$out,#0]		// store output
+	 add	@d[6],@d[6],#1			// increment counter
+	stp	@x[4],@x[6],[$out,#16]
+	stp	@x[8],@x[10],[$out,#32]
+	stp	@x[12],@x[14],[$out,#48]
+	add	$out,$out,#64
+
+	b.hi	.Loop_outer
+
+	ldp	x19,x20,[x29,#16]
+	add	sp,sp,#64
+	ldp	x21,x22,[x29,#32]
+	ldp	x23,x24,[x29,#48]
+	ldp	x25,x26,[x29,#64]
+	ldp	x27,x28,[x29,#80]
+	ldp	x29,x30,[sp],#96
+	.inst	0xd50323bf			// autiasp
+.Labort:
+	ret
+
+.align	4
+.Ltail:
+	add	$len,$len,#64
+.Less_than_64:
+	sub	$out,$out,#1
+	add	$inp,$inp,$len
+	add	$out,$out,$len
+	add	$ctr,sp,$len
+	neg	$len,$len
+
+	add	@x[0],@x[0],@x[1],lsl#32	// pack
+	add	@x[2],@x[2],@x[3],lsl#32
+	add	@x[4],@x[4],@x[5],lsl#32
+	add	@x[6],@x[6],@x[7],lsl#32
+	add	@x[8],@x[8],@x[9],lsl#32
+	add	@x[10],@x[10],@x[11],lsl#32
+	add	@x[12],@x[12],@x[13],lsl#32
+	add	@x[14],@x[14],@x[15],lsl#32
+#ifdef	__ARMEB__
+	rev	@x[0],@x[0]
+	rev	@x[2],@x[2]
+	rev	@x[4],@x[4]
+	rev	@x[6],@x[6]
+	rev	@x[8],@x[8]
+	rev	@x[10],@x[10]
+	rev	@x[12],@x[12]
+	rev	@x[14],@x[14]
+#endif
+	stp	@x[0],@x[2],[sp,#0]
+	stp	@x[4],@x[6],[sp,#16]
+	stp	@x[8],@x[10],[sp,#32]
+	stp	@x[12],@x[14],[sp,#48]
+
+.Loop_tail:
+	ldrb	w10,[$inp,$len]
+	ldrb	w11,[$ctr,$len]
+	add	$len,$len,#1
+	eor	w10,w10,w11
+	strb	w10,[$out,$len]
+	cbnz	$len,.Loop_tail
+
+	stp	xzr,xzr,[sp,#0]
+	stp	xzr,xzr,[sp,#16]
+	stp	xzr,xzr,[sp,#32]
+	stp	xzr,xzr,[sp,#48]
+
+	ldp	x19,x20,[x29,#16]
+	add	sp,sp,#64
+	ldp	x21,x22,[x29,#32]
+	ldp	x23,x24,[x29,#48]
+	ldp	x25,x26,[x29,#64]
+	ldp	x27,x28,[x29,#80]
+	ldp	x29,x30,[sp],#96
+	.inst	0xd50323bf			// autiasp
+	ret
+.size	ChaCha20_ctr32,.-ChaCha20_ctr32
+___
+
+{{{
+my ($A0,$B0,$C0,$D0,$A1,$B1,$C1,$D1,$A2,$B2,$C2,$D2,$T0,$T1,$T2,$T3) =
+    map("v$_.4s",(0..7,16..23));
+my (@K)=map("v$_.4s",(24..30));
+my $ONE="v31.4s";
+
+sub NEONROUND {
+my $odd = pop;
+my ($a,$b,$c,$d,$t)=@_;
+
+	(
+	"&add		('$a','$a','$b')",
+	"&eor		('$d','$d','$a')",
+	"&rev32_16	('$d','$d')",		# vrot ($d,16)
+
+	"&add		('$c','$c','$d')",
+	"&eor		('$t','$b','$c')",
+	"&ushr		('$b','$t',20)",
+	"&sli		('$b','$t',12)",
+
+	"&add		('$a','$a','$b')",
+	"&eor		('$t','$d','$a')",
+	"&ushr		('$d','$t',24)",
+	"&sli		('$d','$t',8)",
+
+	"&add		('$c','$c','$d')",
+	"&eor		('$t','$b','$c')",
+	"&ushr		('$b','$t',25)",
+	"&sli		('$b','$t',7)",
+
+	"&ext		('$c','$c','$c',8)",
+	"&ext		('$d','$d','$d',$odd?4:12)",
+	"&ext		('$b','$b','$b',$odd?12:4)"
+	);
+}
+
+$code.=<<___;
+
+.type	ChaCha20_neon,%function
+.align	5
+ChaCha20_neon:
+	.inst	0xd503233f			// paciasp
+	stp	x29,x30,[sp,#-96]!
+	add	x29,sp,#0
+
+	adr	@x[0],.Lsigma
+	stp	x19,x20,[sp,#16]
+	stp	x21,x22,[sp,#32]
+	stp	x23,x24,[sp,#48]
+	stp	x25,x26,[sp,#64]
+	stp	x27,x28,[sp,#80]
+	cmp	$len,#512
+	b.hs	.L512_or_more_neon
+
+	sub	sp,sp,#64
+
+	ldp	@d[0],@d[1],[@x[0]]		// load sigma
+	ld1	{@K[0]},[@x[0]],#16
+	ldp	@d[2],@d[3],[$key]		// load key
+	ldp	@d[4],@d[5],[$key,#16]
+	ld1	{@K[1],@K[2]},[$key]
+	ldp	@d[6],@d[7],[$ctr]		// load counter
+	ld1	{@K[3]},[$ctr]
+	ld1	{$ONE},[@x[0]]
+#ifdef	__ARMEB__
+	rev64	@K[0],@K[0]
+	ror	@d[2],@d[2],#32
+	ror	@d[3],@d[3],#32
+	ror	@d[4],@d[4],#32
+	ror	@d[5],@d[5],#32
+	ror	@d[6],@d[6],#32
+	ror	@d[7],@d[7],#32
+#endif
+	add	@K[3],@K[3],$ONE		// += 1
+	add	@K[4],@K[3],$ONE
+	add	@K[5],@K[4],$ONE
+	shl	$ONE,$ONE,#2			// 1 -> 4
+
+.Loop_outer_neon:
+	mov.32	@x[0],@d[0]			// unpack key block
+	lsr	@x[1],@d[0],#32
+	 mov	$A0,@K[0]
+	mov.32	@x[2],@d[1]
+	lsr	@x[3],@d[1],#32
+	 mov	$A1,@K[0]
+	mov.32	@x[4],@d[2]
+	lsr	@x[5],@d[2],#32
+	 mov	$A2,@K[0]
+	mov.32	@x[6],@d[3]
+	 mov	$B0,@K[1]
+	lsr	@x[7],@d[3],#32
+	 mov	$B1,@K[1]
+	mov.32	@x[8],@d[4]
+	 mov	$B2,@K[1]
+	lsr	@x[9],@d[4],#32
+	 mov	$D0,@K[3]
+	mov.32	@x[10],@d[5]
+	 mov	$D1,@K[4]
+	lsr	@x[11],@d[5],#32
+	 mov	$D2,@K[5]
+	mov.32	@x[12],@d[6]
+	 mov	$C0,@K[2]
+	lsr	@x[13],@d[6],#32
+	 mov	$C1,@K[2]
+	mov.32	@x[14],@d[7]
+	 mov	$C2,@K[2]
+	lsr	@x[15],@d[7],#32
+
+	mov	$ctr,#10
+	subs	$len,$len,#256
+.Loop_neon:
+	sub	$ctr,$ctr,#1
+___
+	my @thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,0);
+	my @thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,0);
+	my @thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,0);
+	my @thread3=&ROUND(0,4,8,12);
+
+	foreach (@thread0) {
+		eval;			eval(shift(@thread3));
+		eval(shift(@thread1));	eval(shift(@thread3));
+		eval(shift(@thread2));	eval(shift(@thread3));
+	}
+
+	@thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,1);
+	@thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,1);
+	@thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,1);
+	@thread3=&ROUND(0,5,10,15);
+
+	foreach (@thread0) {
+		eval;			eval(shift(@thread3));
+		eval(shift(@thread1));	eval(shift(@thread3));
+		eval(shift(@thread2));	eval(shift(@thread3));
+	}
+$code.=<<___;
+	cbnz	$ctr,.Loop_neon
+
+	add.32	@x[0],@x[0],@d[0]		// accumulate key block
+	 add	$A0,$A0,@K[0]
+	add	@x[1],@x[1],@d[0],lsr#32
+	 add	$A1,$A1,@K[0]
+	add.32	@x[2],@x[2],@d[1]
+	 add	$A2,$A2,@K[0]
+	add	@x[3],@x[3],@d[1],lsr#32
+	 add	$C0,$C0,@K[2]
+	add.32	@x[4],@x[4],@d[2]
+	 add	$C1,$C1,@K[2]
+	add	@x[5],@x[5],@d[2],lsr#32
+	 add	$C2,$C2,@K[2]
+	add.32	@x[6],@x[6],@d[3]
+	 add	$D0,$D0,@K[3]
+	add	@x[7],@x[7],@d[3],lsr#32
+	add.32	@x[8],@x[8],@d[4]
+	 add	$D1,$D1,@K[4]
+	add	@x[9],@x[9],@d[4],lsr#32
+	add.32	@x[10],@x[10],@d[5]
+	 add	$D2,$D2,@K[5]
+	add	@x[11],@x[11],@d[5],lsr#32
+	add.32	@x[12],@x[12],@d[6]
+	 add	$B0,$B0,@K[1]
+	add	@x[13],@x[13],@d[6],lsr#32
+	add.32	@x[14],@x[14],@d[7]
+	 add	$B1,$B1,@K[1]
+	add	@x[15],@x[15],@d[7],lsr#32
+	 add	$B2,$B2,@K[1]
+
+	b.lo	.Ltail_neon
+
+	add	@x[0],@x[0],@x[1],lsl#32	// pack
+	add	@x[2],@x[2],@x[3],lsl#32
+	ldp	@x[1],@x[3],[$inp,#0]		// load input
+	add	@x[4],@x[4],@x[5],lsl#32
+	add	@x[6],@x[6],@x[7],lsl#32
+	ldp	@x[5],@x[7],[$inp,#16]
+	add	@x[8],@x[8],@x[9],lsl#32
+	add	@x[10],@x[10],@x[11],lsl#32
+	ldp	@x[9],@x[11],[$inp,#32]
+	add	@x[12],@x[12],@x[13],lsl#32
+	add	@x[14],@x[14],@x[15],lsl#32
+	ldp	@x[13],@x[15],[$inp,#48]
+	add	$inp,$inp,#64
+#ifdef	__ARMEB__
+	rev	@x[0],@x[0]
+	rev	@x[2],@x[2]
+	rev	@x[4],@x[4]
+	rev	@x[6],@x[6]
+	rev	@x[8],@x[8]
+	rev	@x[10],@x[10]
+	rev	@x[12],@x[12]
+	rev	@x[14],@x[14]
+#endif
+	ld1.8	{$T0-$T3},[$inp],#64
+	eor	@x[0],@x[0],@x[1]
+	eor	@x[2],@x[2],@x[3]
+	eor	@x[4],@x[4],@x[5]
+	eor	@x[6],@x[6],@x[7]
+	eor	@x[8],@x[8],@x[9]
+	 eor	$A0,$A0,$T0
+	eor	@x[10],@x[10],@x[11]
+	 eor	$B0,$B0,$T1
+	eor	@x[12],@x[12],@x[13]
+	 eor	$C0,$C0,$T2
+	eor	@x[14],@x[14],@x[15]
+	 eor	$D0,$D0,$T3
+	 ld1.8	{$T0-$T3},[$inp],#64
+
+	stp	@x[0],@x[2],[$out,#0]		// store output
+	 add	@d[6],@d[6],#4			// increment counter
+	stp	@x[4],@x[6],[$out,#16]
+	 add	@K[3],@K[3],$ONE		// += 4
+	stp	@x[8],@x[10],[$out,#32]
+	 add	@K[4],@K[4],$ONE
+	stp	@x[12],@x[14],[$out,#48]
+	 add	@K[5],@K[5],$ONE
+	add	$out,$out,#64
+
+	st1.8	{$A0-$D0},[$out],#64
+	ld1.8	{$A0-$D0},[$inp],#64
+
+	eor	$A1,$A1,$T0
+	eor	$B1,$B1,$T1
+	eor	$C1,$C1,$T2
+	eor	$D1,$D1,$T3
+	st1.8	{$A1-$D1},[$out],#64
+
+	eor	$A2,$A2,$A0
+	eor	$B2,$B2,$B0
+	eor	$C2,$C2,$C0
+	eor	$D2,$D2,$D0
+	st1.8	{$A2-$D2},[$out],#64
+
+	b.hi	.Loop_outer_neon
+
+	ldp	x19,x20,[x29,#16]
+	add	sp,sp,#64
+	ldp	x21,x22,[x29,#32]
+	ldp	x23,x24,[x29,#48]
+	ldp	x25,x26,[x29,#64]
+	ldp	x27,x28,[x29,#80]
+	ldp	x29,x30,[sp],#96
+	.inst	0xd50323bf			// autiasp
+	ret
+
+.Ltail_neon:
+	add	$len,$len,#256
+	cmp	$len,#64
+	b.lo	.Less_than_64
+
+	add	@x[0],@x[0],@x[1],lsl#32	// pack
+	add	@x[2],@x[2],@x[3],lsl#32
+	ldp	@x[1],@x[3],[$inp,#0]		// load input
+	add	@x[4],@x[4],@x[5],lsl#32
+	add	@x[6],@x[6],@x[7],lsl#32
+	ldp	@x[5],@x[7],[$inp,#16]
+	add	@x[8],@x[8],@x[9],lsl#32
+	add	@x[10],@x[10],@x[11],lsl#32
+	ldp	@x[9],@x[11],[$inp,#32]
+	add	@x[12],@x[12],@x[13],lsl#32
+	add	@x[14],@x[14],@x[15],lsl#32
+	ldp	@x[13],@x[15],[$inp,#48]
+	add	$inp,$inp,#64
+#ifdef	__ARMEB__
+	rev	@x[0],@x[0]
+	rev	@x[2],@x[2]
+	rev	@x[4],@x[4]
+	rev	@x[6],@x[6]
+	rev	@x[8],@x[8]
+	rev	@x[10],@x[10]
+	rev	@x[12],@x[12]
+	rev	@x[14],@x[14]
+#endif
+	eor	@x[0],@x[0],@x[1]
+	eor	@x[2],@x[2],@x[3]
+	eor	@x[4],@x[4],@x[5]
+	eor	@x[6],@x[6],@x[7]
+	eor	@x[8],@x[8],@x[9]
+	eor	@x[10],@x[10],@x[11]
+	eor	@x[12],@x[12],@x[13]
+	eor	@x[14],@x[14],@x[15]
+
+	stp	@x[0],@x[2],[$out,#0]		// store output
+	 add	@d[6],@d[6],#4			// increment counter
+	stp	@x[4],@x[6],[$out,#16]
+	stp	@x[8],@x[10],[$out,#32]
+	stp	@x[12],@x[14],[$out,#48]
+	add	$out,$out,#64
+	b.eq	.Ldone_neon
+	sub	$len,$len,#64
+	cmp	$len,#64
+	b.lo	.Less_than_128
+
+	ld1.8	{$T0-$T3},[$inp],#64
+	eor	$A0,$A0,$T0
+	eor	$B0,$B0,$T1
+	eor	$C0,$C0,$T2
+	eor	$D0,$D0,$T3
+	st1.8	{$A0-$D0},[$out],#64
+	b.eq	.Ldone_neon
+	sub	$len,$len,#64
+	cmp	$len,#64
+	b.lo	.Less_than_192
+
+	ld1.8	{$T0-$T3},[$inp],#64
+	eor	$A1,$A1,$T0
+	eor	$B1,$B1,$T1
+	eor	$C1,$C1,$T2
+	eor	$D1,$D1,$T3
+	st1.8	{$A1-$D1},[$out],#64
+	b.eq	.Ldone_neon
+	sub	$len,$len,#64
+
+	st1.8	{$A2-$D2},[sp]
+	b	.Last_neon
+
+.Less_than_128:
+	st1.8	{$A0-$D0},[sp]
+	b	.Last_neon
+.Less_than_192:
+	st1.8	{$A1-$D1},[sp]
+	b	.Last_neon
+
+.align	4
+.Last_neon:
+	sub	$out,$out,#1
+	add	$inp,$inp,$len
+	add	$out,$out,$len
+	add	$ctr,sp,$len
+	neg	$len,$len
+
+.Loop_tail_neon:
+	ldrb	w10,[$inp,$len]
+	ldrb	w11,[$ctr,$len]
+	add	$len,$len,#1
+	eor	w10,w10,w11
+	strb	w10,[$out,$len]
+	cbnz	$len,.Loop_tail_neon
+
+	stp	xzr,xzr,[sp,#0]
+	stp	xzr,xzr,[sp,#16]
+	stp	xzr,xzr,[sp,#32]
+	stp	xzr,xzr,[sp,#48]
+
+.Ldone_neon:
+	ldp	x19,x20,[x29,#16]
+	add	sp,sp,#64
+	ldp	x21,x22,[x29,#32]
+	ldp	x23,x24,[x29,#48]
+	ldp	x25,x26,[x29,#64]
+	ldp	x27,x28,[x29,#80]
+	ldp	x29,x30,[sp],#96
+	.inst	0xd50323bf			// autiasp
+	ret
+.size	ChaCha20_neon,.-ChaCha20_neon
+___
+{
+my ($T0,$T1,$T2,$T3,$T4,$T5)=@K;
+my ($A0,$B0,$C0,$D0,$A1,$B1,$C1,$D1,$A2,$B2,$C2,$D2,
+    $A3,$B3,$C3,$D3,$A4,$B4,$C4,$D4,$A5,$B5,$C5,$D5) = map("v$_.4s",(0..23));
+
+$code.=<<___;
+.type	ChaCha20_512_neon,%function
+.align	5
+ChaCha20_512_neon:
+	.inst	0xd503233f			// paciasp
+	stp	x29,x30,[sp,#-96]!
+	add	x29,sp,#0
+
+	adr	@x[0],.Lsigma
+	stp	x19,x20,[sp,#16]
+	stp	x21,x22,[sp,#32]
+	stp	x23,x24,[sp,#48]
+	stp	x25,x26,[sp,#64]
+	stp	x27,x28,[sp,#80]
+
+.L512_or_more_neon:
+	sub	sp,sp,#128+64
+
+	ldp	@d[0],@d[1],[@x[0]]		// load sigma
+	ld1	{@K[0]},[@x[0]],#16
+	ldp	@d[2],@d[3],[$key]		// load key
+	ldp	@d[4],@d[5],[$key,#16]
+	ld1	{@K[1],@K[2]},[$key]
+	ldp	@d[6],@d[7],[$ctr]		// load counter
+	ld1	{@K[3]},[$ctr]
+	ld1	{$ONE},[@x[0]]
+#ifdef	__ARMEB__
+	rev64	@K[0],@K[0]
+	ror	@d[2],@d[2],#32
+	ror	@d[3],@d[3],#32
+	ror	@d[4],@d[4],#32
+	ror	@d[5],@d[5],#32
+	ror	@d[6],@d[6],#32
+	ror	@d[7],@d[7],#32
+#endif
+	add	@K[3],@K[3],$ONE		// += 1
+	stp	@K[0],@K[1],[sp,#0]		// off-load key block, invariant part
+	add	@K[3],@K[3],$ONE		// not typo
+	str	@K[2],[sp,#32]
+	add	@K[4],@K[3],$ONE
+	add	@K[5],@K[4],$ONE
+	add	@K[6],@K[5],$ONE
+	shl	$ONE,$ONE,#2			// 1 -> 4
+
+	stp	d8,d9,[sp,#128+0]		// meet ABI requirements
+	stp	d10,d11,[sp,#128+16]
+	stp	d12,d13,[sp,#128+32]
+	stp	d14,d15,[sp,#128+48]
+
+	sub	$len,$len,#512			// not typo
+
+.Loop_outer_512_neon:
+	 mov	$A0,@K[0]
+	 mov	$A1,@K[0]
+	 mov	$A2,@K[0]
+	 mov	$A3,@K[0]
+	 mov	$A4,@K[0]
+	 mov	$A5,@K[0]
+	 mov	$B0,@K[1]
+	mov.32	@x[0],@d[0]			// unpack key block
+	 mov	$B1,@K[1]
+	lsr	@x[1],@d[0],#32
+	 mov	$B2,@K[1]
+	mov.32	@x[2],@d[1]
+	 mov	$B3,@K[1]
+	lsr	@x[3],@d[1],#32
+	 mov	$B4,@K[1]
+	mov.32	@x[4],@d[2]
+	 mov	$B5,@K[1]
+	lsr	@x[5],@d[2],#32
+	 mov	$D0,@K[3]
+	mov.32	@x[6],@d[3]
+	 mov	$D1,@K[4]
+	lsr	@x[7],@d[3],#32
+	 mov	$D2,@K[5]
+	mov.32	@x[8],@d[4]
+	 mov	$D3,@K[6]
+	lsr	@x[9],@d[4],#32
+	 mov	$C0,@K[2]
+	mov.32	@x[10],@d[5]
+	 mov	$C1,@K[2]
+	lsr	@x[11],@d[5],#32
+	 add	$D4,$D0,$ONE			// +4
+	mov.32	@x[12],@d[6]
+	 add	$D5,$D1,$ONE			// +4
+	lsr	@x[13],@d[6],#32
+	 mov	$C2,@K[2]
+	mov.32	@x[14],@d[7]
+	 mov	$C3,@K[2]
+	lsr	@x[15],@d[7],#32
+	 mov	$C4,@K[2]
+	 stp	@K[3],@K[4],[sp,#48]		// off-load key block, variable part
+	 mov	$C5,@K[2]
+	 str	@K[5],[sp,#80]
+
+	mov	$ctr,#5
+	subs	$len,$len,#512
+.Loop_upper_neon:
+	sub	$ctr,$ctr,#1
+___
+	my @thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,0);
+	my @thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,0);
+	my @thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,0);
+	my @thread3=&NEONROUND($A3,$B3,$C3,$D3,$T3,0);
+	my @thread4=&NEONROUND($A4,$B4,$C4,$D4,$T4,0);
+	my @thread5=&NEONROUND($A5,$B5,$C5,$D5,$T5,0);
+	my @thread67=(&ROUND(0,4,8,12),&ROUND(0,5,10,15));
+	my $diff = ($#thread0+1)*6 - $#thread67 - 1;
+	my $i = 0;
+
+	foreach (@thread0) {
+		eval;			eval(shift(@thread67));
+		eval(shift(@thread1));	eval(shift(@thread67));
+		eval(shift(@thread2));	eval(shift(@thread67));
+		eval(shift(@thread3));	eval(shift(@thread67));
+		eval(shift(@thread4));	eval(shift(@thread67));
+		eval(shift(@thread5));	eval(shift(@thread67));
+	}
+
+	@thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,1);
+	@thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,1);
+	@thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,1);
+	@thread3=&NEONROUND($A3,$B3,$C3,$D3,$T3,1);
+	@thread4=&NEONROUND($A4,$B4,$C4,$D4,$T4,1);
+	@thread5=&NEONROUND($A5,$B5,$C5,$D5,$T5,1);
+	@thread67=(&ROUND(0,4,8,12),&ROUND(0,5,10,15));
+
+	foreach (@thread0) {
+		eval;			eval(shift(@thread67));
+		eval(shift(@thread1));	eval(shift(@thread67));
+		eval(shift(@thread2));	eval(shift(@thread67));
+		eval(shift(@thread3));	eval(shift(@thread67));
+		eval(shift(@thread4));	eval(shift(@thread67));
+		eval(shift(@thread5));	eval(shift(@thread67));
+	}
+$code.=<<___;
+	cbnz	$ctr,.Loop_upper_neon
+
+	add.32	@x[0],@x[0],@d[0]		// accumulate key block
+	add	@x[1],@x[1],@d[0],lsr#32
+	add.32	@x[2],@x[2],@d[1]
+	add	@x[3],@x[3],@d[1],lsr#32
+	add.32	@x[4],@x[4],@d[2]
+	add	@x[5],@x[5],@d[2],lsr#32
+	add.32	@x[6],@x[6],@d[3]
+	add	@x[7],@x[7],@d[3],lsr#32
+	add.32	@x[8],@x[8],@d[4]
+	add	@x[9],@x[9],@d[4],lsr#32
+	add.32	@x[10],@x[10],@d[5]
+	add	@x[11],@x[11],@d[5],lsr#32
+	add.32	@x[12],@x[12],@d[6]
+	add	@x[13],@x[13],@d[6],lsr#32
+	add.32	@x[14],@x[14],@d[7]
+	add	@x[15],@x[15],@d[7],lsr#32
+
+	add	@x[0],@x[0],@x[1],lsl#32	// pack
+	add	@x[2],@x[2],@x[3],lsl#32
+	ldp	@x[1],@x[3],[$inp,#0]		// load input
+	add	@x[4],@x[4],@x[5],lsl#32
+	add	@x[6],@x[6],@x[7],lsl#32
+	ldp	@x[5],@x[7],[$inp,#16]
+	add	@x[8],@x[8],@x[9],lsl#32
+	add	@x[10],@x[10],@x[11],lsl#32
+	ldp	@x[9],@x[11],[$inp,#32]
+	add	@x[12],@x[12],@x[13],lsl#32
+	add	@x[14],@x[14],@x[15],lsl#32
+	ldp	@x[13],@x[15],[$inp,#48]
+	add	$inp,$inp,#64
+#ifdef	__ARMEB__
+	rev	@x[0],@x[0]
+	rev	@x[2],@x[2]
+	rev	@x[4],@x[4]
+	rev	@x[6],@x[6]
+	rev	@x[8],@x[8]
+	rev	@x[10],@x[10]
+	rev	@x[12],@x[12]
+	rev	@x[14],@x[14]
+#endif
+	eor	@x[0],@x[0],@x[1]
+	eor	@x[2],@x[2],@x[3]
+	eor	@x[4],@x[4],@x[5]
+	eor	@x[6],@x[6],@x[7]
+	eor	@x[8],@x[8],@x[9]
+	eor	@x[10],@x[10],@x[11]
+	eor	@x[12],@x[12],@x[13]
+	eor	@x[14],@x[14],@x[15]
+
+	 stp	@x[0],@x[2],[$out,#0]		// store output
+	 add	@d[6],@d[6],#1			// increment counter
+	mov.32	@x[0],@d[0]			// unpack key block
+	lsr	@x[1],@d[0],#32
+	 stp	@x[4],@x[6],[$out,#16]
+	mov.32	@x[2],@d[1]
+	lsr	@x[3],@d[1],#32
+	 stp	@x[8],@x[10],[$out,#32]
+	mov.32	@x[4],@d[2]
+	lsr	@x[5],@d[2],#32
+	 stp	@x[12],@x[14],[$out,#48]
+	 add	$out,$out,#64
+	mov.32	@x[6],@d[3]
+	lsr	@x[7],@d[3],#32
+	mov.32	@x[8],@d[4]
+	lsr	@x[9],@d[4],#32
+	mov.32	@x[10],@d[5]
+	lsr	@x[11],@d[5],#32
+	mov.32	@x[12],@d[6]
+	lsr	@x[13],@d[6],#32
+	mov.32	@x[14],@d[7]
+	lsr	@x[15],@d[7],#32
+
+	mov	$ctr,#5
+.Loop_lower_neon:
+	sub	$ctr,$ctr,#1
+___
+	@thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,0);
+	@thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,0);
+	@thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,0);
+	@thread3=&NEONROUND($A3,$B3,$C3,$D3,$T3,0);
+	@thread4=&NEONROUND($A4,$B4,$C4,$D4,$T4,0);
+	@thread5=&NEONROUND($A5,$B5,$C5,$D5,$T5,0);
+	@thread67=(&ROUND(0,4,8,12),&ROUND(0,5,10,15));
+
+	foreach (@thread0) {
+		eval;			eval(shift(@thread67));
+		eval(shift(@thread1));	eval(shift(@thread67));
+		eval(shift(@thread2));	eval(shift(@thread67));
+		eval(shift(@thread3));	eval(shift(@thread67));
+		eval(shift(@thread4));	eval(shift(@thread67));
+		eval(shift(@thread5));	eval(shift(@thread67));
+	}
+
+	@thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,1);
+	@thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,1);
+	@thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,1);
+	@thread3=&NEONROUND($A3,$B3,$C3,$D3,$T3,1);
+	@thread4=&NEONROUND($A4,$B4,$C4,$D4,$T4,1);
+	@thread5=&NEONROUND($A5,$B5,$C5,$D5,$T5,1);
+	@thread67=(&ROUND(0,4,8,12),&ROUND(0,5,10,15));
+
+	foreach (@thread0) {
+		eval;			eval(shift(@thread67));
+		eval(shift(@thread1));	eval(shift(@thread67));
+		eval(shift(@thread2));	eval(shift(@thread67));
+		eval(shift(@thread3));	eval(shift(@thread67));
+		eval(shift(@thread4));	eval(shift(@thread67));
+		eval(shift(@thread5));	eval(shift(@thread67));
+	}
+$code.=<<___;
+	cbnz	$ctr,.Loop_lower_neon
+
+	add.32	@x[0],@x[0],@d[0]		// accumulate key block
+	 ldp	@K[0],@K[1],[sp,#0]
+	add	@x[1],@x[1],@d[0],lsr#32
+	 ldp	@K[2],@K[3],[sp,#32]
+	add.32	@x[2],@x[2],@d[1]
+	 ldp	@K[4],@K[5],[sp,#64]
+	add	@x[3],@x[3],@d[1],lsr#32
+	 add	$A0,$A0,@K[0]
+	add.32	@x[4],@x[4],@d[2]
+	 add	$A1,$A1,@K[0]
+	add	@x[5],@x[5],@d[2],lsr#32
+	 add	$A2,$A2,@K[0]
+	add.32	@x[6],@x[6],@d[3]
+	 add	$A3,$A3,@K[0]
+	add	@x[7],@x[7],@d[3],lsr#32
+	 add	$A4,$A4,@K[0]
+	add.32	@x[8],@x[8],@d[4]
+	 add	$A5,$A5,@K[0]
+	add	@x[9],@x[9],@d[4],lsr#32
+	 add	$C0,$C0,@K[2]
+	add.32	@x[10],@x[10],@d[5]
+	 add	$C1,$C1,@K[2]
+	add	@x[11],@x[11],@d[5],lsr#32
+	 add	$C2,$C2,@K[2]
+	add.32	@x[12],@x[12],@d[6]
+	 add	$C3,$C3,@K[2]
+	add	@x[13],@x[13],@d[6],lsr#32
+	 add	$C4,$C4,@K[2]
+	add.32	@x[14],@x[14],@d[7]
+	 add	$C5,$C5,@K[2]
+	add	@x[15],@x[15],@d[7],lsr#32
+	 add	$D4,$D4,$ONE			// +4
+	add	@x[0],@x[0],@x[1],lsl#32	// pack
+	 add	$D5,$D5,$ONE			// +4
+	add	@x[2],@x[2],@x[3],lsl#32
+	 add	$D0,$D0,@K[3]
+	ldp	@x[1],@x[3],[$inp,#0]		// load input
+	 add	$D1,$D1,@K[4]
+	add	@x[4],@x[4],@x[5],lsl#32
+	 add	$D2,$D2,@K[5]
+	add	@x[6],@x[6],@x[7],lsl#32
+	 add	$D3,$D3,@K[6]
+	ldp	@x[5],@x[7],[$inp,#16]
+	 add	$D4,$D4,@K[3]
+	add	@x[8],@x[8],@x[9],lsl#32
+	 add	$D5,$D5,@K[4]
+	add	@x[10],@x[10],@x[11],lsl#32
+	 add	$B0,$B0,@K[1]
+	ldp	@x[9],@x[11],[$inp,#32]
+	 add	$B1,$B1,@K[1]
+	add	@x[12],@x[12],@x[13],lsl#32
+	 add	$B2,$B2,@K[1]
+	add	@x[14],@x[14],@x[15],lsl#32
+	 add	$B3,$B3,@K[1]
+	ldp	@x[13],@x[15],[$inp,#48]
+	 add	$B4,$B4,@K[1]
+	add	$inp,$inp,#64
+	 add	$B5,$B5,@K[1]
+
+#ifdef	__ARMEB__
+	rev	@x[0],@x[0]
+	rev	@x[2],@x[2]
+	rev	@x[4],@x[4]
+	rev	@x[6],@x[6]
+	rev	@x[8],@x[8]
+	rev	@x[10],@x[10]
+	rev	@x[12],@x[12]
+	rev	@x[14],@x[14]
+#endif
+	ld1.8	{$T0-$T3},[$inp],#64
+	eor	@x[0],@x[0],@x[1]
+	eor	@x[2],@x[2],@x[3]
+	eor	@x[4],@x[4],@x[5]
+	eor	@x[6],@x[6],@x[7]
+	eor	@x[8],@x[8],@x[9]
+	 eor	$A0,$A0,$T0
+	eor	@x[10],@x[10],@x[11]
+	 eor	$B0,$B0,$T1
+	eor	@x[12],@x[12],@x[13]
+	 eor	$C0,$C0,$T2
+	eor	@x[14],@x[14],@x[15]
+	 eor	$D0,$D0,$T3
+	 ld1.8	{$T0-$T3},[$inp],#64
+
+	stp	@x[0],@x[2],[$out,#0]		// store output
+	 add	@d[6],@d[6],#7			// increment counter
+	stp	@x[4],@x[6],[$out,#16]
+	stp	@x[8],@x[10],[$out,#32]
+	stp	@x[12],@x[14],[$out,#48]
+	add	$out,$out,#64
+	st1.8	{$A0-$D0},[$out],#64
+
+	ld1.8	{$A0-$D0},[$inp],#64
+	eor	$A1,$A1,$T0
+	eor	$B1,$B1,$T1
+	eor	$C1,$C1,$T2
+	eor	$D1,$D1,$T3
+	st1.8	{$A1-$D1},[$out],#64
+
+	ld1.8	{$A1-$D1},[$inp],#64
+	eor	$A2,$A2,$A0
+	 ldp	@K[0],@K[1],[sp,#0]
+	eor	$B2,$B2,$B0
+	 ldp	@K[2],@K[3],[sp,#32]
+	eor	$C2,$C2,$C0
+	eor	$D2,$D2,$D0
+	st1.8	{$A2-$D2},[$out],#64
+
+	ld1.8	{$A2-$D2},[$inp],#64
+	eor	$A3,$A3,$A1
+	eor	$B3,$B3,$B1
+	eor	$C3,$C3,$C1
+	eor	$D3,$D3,$D1
+	st1.8	{$A3-$D3},[$out],#64
+
+	ld1.8	{$A3-$D3},[$inp],#64
+	eor	$A4,$A4,$A2
+	eor	$B4,$B4,$B2
+	eor	$C4,$C4,$C2
+	eor	$D4,$D4,$D2
+	st1.8	{$A4-$D4},[$out],#64
+
+	shl	$A0,$ONE,#1			// 4 -> 8
+	eor	$A5,$A5,$A3
+	eor	$B5,$B5,$B3
+	eor	$C5,$C5,$C3
+	eor	$D5,$D5,$D3
+	st1.8	{$A5-$D5},[$out],#64
+
+	add	@K[3],@K[3],$A0			// += 8
+	add	@K[4],@K[4],$A0
+	add	@K[5],@K[5],$A0
+	add	@K[6],@K[6],$A0
+
+	b.hs	.Loop_outer_512_neon
+
+	adds	$len,$len,#512
+	ushr	$A0,$ONE,#2			// 4 -> 1
+
+	ldp	d8,d9,[sp,#128+0]		// meet ABI requirements
+	ldp	d10,d11,[sp,#128+16]
+	ldp	d12,d13,[sp,#128+32]
+	ldp	d14,d15,[sp,#128+48]
+
+	stp	@K[0],$ONE,[sp,#0]		// wipe off-load area
+	stp	@K[0],$ONE,[sp,#32]
+	stp	@K[0],$ONE,[sp,#64]
+
+	b.eq	.Ldone_512_neon
+
+	cmp	$len,#192
+	sub	@K[3],@K[3],$A0			// -= 1
+	sub	@K[4],@K[4],$A0
+	sub	@K[5],@K[5],$A0
+	add	sp,sp,#128
+	b.hs	.Loop_outer_neon
+
+	eor	@K[1],@K[1],@K[1]
+	eor	@K[2],@K[2],@K[2]
+	eor	@K[3],@K[3],@K[3]
+	eor	@K[4],@K[4],@K[4]
+	eor	@K[5],@K[5],@K[5]
+	eor	@K[6],@K[6],@K[6]
+	b	.Loop_outer
+
+.Ldone_512_neon:
+	ldp	x19,x20,[x29,#16]
+	add	sp,sp,#128+64
+	ldp	x21,x22,[x29,#32]
+	ldp	x23,x24,[x29,#48]
+	ldp	x25,x26,[x29,#64]
+	ldp	x27,x28,[x29,#80]
+	ldp	x29,x30,[sp],#96
+	.inst	0xd50323bf			// autiasp
+	ret
+.size	ChaCha20_512_neon,.-ChaCha20_512_neon
+___
+}
+}}}
+
+foreach (split("\n",$code)) {
+	s/\`([^\`]*)\`/eval $1/geo;
+
+	(s/\b([a-z]+)\.32\b/$1/ and (s/x([0-9]+)/w$1/g or 1))	or
+	(m/\b(eor|ext|mov)\b/ and (s/\.4s/\.16b/g or 1))	or
+	(s/\b((?:ld|st)1)\.8\b/$1/ and (s/\.4s/\.16b/g or 1))	or
+	(m/\b(ld|st)[rp]\b/ and (s/v([0-9]+)\.4s/q$1/g or 1))	or
+	(s/\brev32\.16\b/rev32/ and (s/\.4s/\.8h/g or 1));
+
+	#s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo;
+
+	print $_,"\n";
+}
+close STDOUT or die "error closing STDOUT: $!";	# flush
diff --git a/ap/lib/libssl/openssl-1.1.1o/crypto/chacha/asm/chacha-c64xplus.pl b/ap/lib/libssl/openssl-1.1.1o/crypto/chacha/asm/chacha-c64xplus.pl
new file mode 100755
index 0000000..4bd18a4
--- /dev/null
+++ b/ap/lib/libssl/openssl-1.1.1o/crypto/chacha/asm/chacha-c64xplus.pl
@@ -0,0 +1,926 @@
+#! /usr/bin/env perl
+# Copyright 2016-2020 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License").  You may not use
+# this file except in compliance with the License.  You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# ChaCha20 for C64x+.
+#
+# October 2015
+#
+# Performance is 3.54 cycles per processed byte, which is ~4.3 times
+# faster than code generated by TI compiler. Compiler also disables
+# interrupts for some reason, thus making interrupt response time
+# dependent on input length. This module on the other hand is free
+# from such limitation.
+
+$output=pop;
+open STDOUT,">$output";
+
+($OUT,$INP,$LEN,$KEYB,$COUNTERA)=("A4","B4","A6","B6","A8");
+($KEYA,$COUNTERB,$STEP)=("A7","B7","A3");
+
+@X=  ("A16","B16","A17","B17","A18","B18","A19","B19",
+      "A20","B20","A21","B21","A22","B22","A23","B23");
+@Y=  ("A24","B24","A25","B25","A26","B26","A27","B27",
+      "A28","B28","A29","B29","A30","B30","A31","B31");
+@DAT=("A6", "A7", "B6", "B7", "A8", "A9", "B8", "B9",
+      "A10","A11","B10","B11","A12","A13","B12","B13");
+
+# yes, overlaps with @DAT, used only in 2x interleave code path...
+@K2x=("A6", "B6", "A7", "B7", "A8", "B8", "A9", "B9",
+      "A10","B10","A11","B11","A2", "B2", "A13","B13");
+
+$code.=<<___;
+	.text
+
+	.if	.ASSEMBLER_VERSION<7000000
+	.asg	0,__TI_EABI__
+	.endif
+	.if	__TI_EABI__
+	.asg	ChaCha20_ctr32,_ChaCha20_ctr32
+	.endif
+
+	.asg	B3,RA
+	.asg	A15,FP
+	.asg	B15,SP
+
+	.global	_ChaCha20_ctr32
+	.align	32
+_ChaCha20_ctr32:
+	.asmfunc	stack_usage(40+64)
+	MV	$LEN,A0			; reassign
+  [!A0]	BNOP	RA			; no data
+|| [A0]	STW	FP,*SP--(40+64)		; save frame pointer and alloca(40+64)
+|| [A0]	MV	SP,FP
+   [A0]	STDW	B13:B12,*SP[4+8]	; ABI says so
+|| [A0]	MV	$KEYB,$KEYA
+|| [A0]	MV	$COUNTERA,$COUNTERB
+   [A0]	STDW	B11:B10,*SP[3+8]
+|| [A0]	STDW	A13:A12,*FP[-3]
+   [A0]	STDW	A11:A10,*FP[-4]
+|| [A0]	MVK	128,$STEP		; 2 * input block size
+
+   [A0]	LDW	*${KEYA}[0],@Y[4]	; load key
+|| [A0]	LDW	*${KEYB}[1],@Y[5]
+|| [A0]	MVK	0x00007865,@Y[0]	; synthesize sigma
+|| [A0]	MVK	0x0000646e,@Y[1]
+   [A0]	LDW	*${KEYA}[2],@Y[6]
+|| [A0]	LDW	*${KEYB}[3],@Y[7]
+|| [A0]	MVKH	0x61700000,@Y[0]
+|| [A0]	MVKH	0x33200000,@Y[1]
+	LDW	*${KEYA}[4],@Y[8]
+||	LDW	*${KEYB}[5],@Y[9]
+||	MVK	0x00002d32,@Y[2]
+||	MVK	0x00006574,@Y[3]
+	LDW	*${KEYA}[6],@Y[10]
+||	LDW	*${KEYB}[7],@Y[11]
+||	MVKH	0x79620000,@Y[2]
+||	MVKH	0x6b200000,@Y[3]
+	LDW	*${COUNTERA}[0],@Y[12]	; load counter||nonce
+||	LDW	*${COUNTERB}[1],@Y[13]
+||	CMPLTU	A0,$STEP,A1		; is length < 2*blocks?
+	LDW	*${COUNTERA}[2],@Y[14]
+||	LDW	*${COUNTERB}[3],@Y[15]
+|| [A1]	BNOP	top1x?
+   [A1]	MVK	64,$STEP		; input block size
+||	MVK	10,B0			; inner loop counter
+
+	DMV	@Y[2],@Y[0],@X[2]:@X[0]	; copy block
+||	DMV	@Y[3],@Y[1],@X[3]:@X[1]
+||[!A1]	STDW	@Y[2]:@Y[0],*FP[-12]	; offload key material to stack
+||[!A1]	STDW	@Y[3]:@Y[1],*SP[2]
+	DMV	@Y[6],@Y[4],@X[6]:@X[4]
+||	DMV	@Y[7],@Y[5],@X[7]:@X[5]
+||[!A1]	STDW	@Y[6]:@Y[4],*FP[-10]
+||[!A1]	STDW	@Y[7]:@Y[5],*SP[4]
+	DMV	@Y[10],@Y[8],@X[10]:@X[8]
+||	DMV	@Y[11],@Y[9],@X[11]:@X[9]
+||[!A1]	STDW	@Y[10]:@Y[8],*FP[-8]
+||[!A1]	STDW	@Y[11]:@Y[9],*SP[6]
+	DMV	@Y[14],@Y[12],@X[14]:@X[12]
+||	DMV	@Y[15],@Y[13],@X[15]:@X[13]
+||[!A1]	MV	@Y[12],@K2x[12]		; counter
+||[!A1]	MV	@Y[13],@K2x[13]
+||[!A1]	STW	@Y[14],*FP[-6*2]
+||[!A1]	STW	@Y[15],*SP[8*2]
+___
+{	################################################################
+	# 2x interleave gives 50% performance improvement
+	#
+my ($a0,$a1,$a2,$a3) = (0..3);
+my ($b0,$b1,$b2,$b3) = (4..7);
+my ($c0,$c1,$c2,$c3) = (8..11);
+my ($d0,$d1,$d2,$d3) = (12..15);
+
+$code.=<<___;
+outer2x?:
+	ADD	@X[$b1],@X[$a1],@X[$a1]
+||	ADD	@X[$b2],@X[$a2],@X[$a2]
+||	ADD	@X[$b0],@X[$a0],@X[$a0]
+||	ADD	@X[$b3],@X[$a3],@X[$a3]
+||	 DMV	@Y[2],@Y[0],@K2x[2]:@K2x[0]
+||	 DMV	@Y[3],@Y[1],@K2x[3]:@K2x[1]
+	XOR	@X[$a1],@X[$d1],@X[$d1]
+||	XOR	@X[$a2],@X[$d2],@X[$d2]
+||	XOR	@X[$a0],@X[$d0],@X[$d0]
+||	XOR	@X[$a3],@X[$d3],@X[$d3]
+||	 DMV	@Y[6],@Y[4],@K2x[6]:@K2x[4]
+||	 DMV	@Y[7],@Y[5],@K2x[7]:@K2x[5]
+	SWAP2	@X[$d1],@X[$d1]		; rotate by 16
+||	SWAP2	@X[$d2],@X[$d2]
+||	SWAP2	@X[$d0],@X[$d0]
+||	SWAP2	@X[$d3],@X[$d3]
+
+	ADD	@X[$d1],@X[$c1],@X[$c1]
+||	ADD	@X[$d2],@X[$c2],@X[$c2]
+||	ADD	@X[$d0],@X[$c0],@X[$c0]
+||	ADD	@X[$d3],@X[$c3],@X[$c3]
+||	 DMV	@Y[10],@Y[8],@K2x[10]:@K2x[8]
+||	 DMV	@Y[11],@Y[9],@K2x[11]:@K2x[9]
+	XOR	@X[$c1],@X[$b1],@X[$b1]
+||	XOR	@X[$c2],@X[$b2],@X[$b2]
+||	XOR	@X[$c0],@X[$b0],@X[$b0]
+||	XOR	@X[$c3],@X[$b3],@X[$b3]
+||	 ADD	1,@Y[12],@Y[12]		; adjust counter for 2nd block
+	ROTL	@X[$b1],12,@X[$b1]
+||	ROTL	@X[$b2],12,@X[$b2]
+||	 MV	@Y[14],@K2x[14]
+||	 MV	@Y[15],@K2x[15]
+top2x?:
+	ROTL	@X[$b0],12,@X[$b0]
+||	ROTL	@X[$b3],12,@X[$b3]
+||	 ADD	@Y[$b1],@Y[$a1],@Y[$a1]
+||	 ADD	@Y[$b2],@Y[$a2],@Y[$a2]
+	 ADD	@Y[$b0],@Y[$a0],@Y[$a0]
+||	 ADD	@Y[$b3],@Y[$a3],@Y[$a3]
+
+||	ADD	@X[$b1],@X[$a1],@X[$a1]
+||	ADD	@X[$b2],@X[$a2],@X[$a2]
+||	 XOR	@Y[$a1],@Y[$d1],@Y[$d1]
+||	 XOR	@Y[$a2],@Y[$d2],@Y[$d2]
+	 XOR	@Y[$a0],@Y[$d0],@Y[$d0]
+||	 XOR	@Y[$a3],@Y[$d3],@Y[$d3]
+||	ADD	@X[$b0],@X[$a0],@X[$a0]
+||	ADD	@X[$b3],@X[$a3],@X[$a3]
+||	XOR	@X[$a1],@X[$d1],@X[$d1]
+||	XOR	@X[$a2],@X[$d2],@X[$d2]
+	XOR	@X[$a0],@X[$d0],@X[$d0]
+||	XOR	@X[$a3],@X[$d3],@X[$d3]
+||	ROTL	@X[$d1],8,@X[$d1]
+||	ROTL	@X[$d2],8,@X[$d2]
+||	 SWAP2	@Y[$d1],@Y[$d1]		; rotate by 16
+||	 SWAP2	@Y[$d2],@Y[$d2]
+||	 SWAP2	@Y[$d0],@Y[$d0]
+||	 SWAP2	@Y[$d3],@Y[$d3]
+	ROTL	@X[$d0],8,@X[$d0]
+||	ROTL	@X[$d3],8,@X[$d3]
+||	 ADD	@Y[$d1],@Y[$c1],@Y[$c1]
+||	 ADD	@Y[$d2],@Y[$c2],@Y[$c2]
+||	 ADD	@Y[$d0],@Y[$c0],@Y[$c0]
+||	 ADD	@Y[$d3],@Y[$c3],@Y[$c3]
+||	BNOP	middle2x1?		; protect from interrupt
+
+	ADD	@X[$d1],@X[$c1],@X[$c1]
+||	ADD	@X[$d2],@X[$c2],@X[$c2]
+||	 XOR	@Y[$c1],@Y[$b1],@Y[$b1]
+||	 XOR	@Y[$c2],@Y[$b2],@Y[$b2]
+||	 XOR	@Y[$c0],@Y[$b0],@Y[$b0]
+||	 XOR	@Y[$c3],@Y[$b3],@Y[$b3]
+	ADD	@X[$d0],@X[$c0],@X[$c0]
+||	ADD	@X[$d3],@X[$c3],@X[$c3]
+||	XOR	@X[$c1],@X[$b1],@X[$b1]
+||	XOR	@X[$c2],@X[$b2],@X[$b2]
+||	ROTL	@X[$d1],0,@X[$d2]	; moved to avoid cross-path stall
+||	ROTL	@X[$d2],0,@X[$d3]
+	XOR	@X[$c0],@X[$b0],@X[$b0]
+||	XOR	@X[$c3],@X[$b3],@X[$b3]
+||	MV	@X[$d0],@X[$d1]
+||	MV	@X[$d3],@X[$d0]
+||	 ROTL	@Y[$b1],12,@Y[$b1]
+||	 ROTL	@Y[$b2],12,@Y[$b2]
+	ROTL	@X[$b1],7,@X[$b0]	; avoided cross-path stall
+||	ROTL	@X[$b2],7,@X[$b1]
+	ROTL	@X[$b0],7,@X[$b3]
+||	ROTL	@X[$b3],7,@X[$b2]
+middle2x1?:
+
+	 ROTL	@Y[$b0],12,@Y[$b0]
+||	 ROTL	@Y[$b3],12,@Y[$b3]
+||	ADD	@X[$b0],@X[$a0],@X[$a0]
+||	ADD	@X[$b1],@X[$a1],@X[$a1]
+	ADD	@X[$b2],@X[$a2],@X[$a2]
+||	ADD	@X[$b3],@X[$a3],@X[$a3]
+
+||	 ADD	@Y[$b1],@Y[$a1],@Y[$a1]
+||	 ADD	@Y[$b2],@Y[$a2],@Y[$a2]
+||	XOR	@X[$a0],@X[$d0],@X[$d0]
+||	XOR	@X[$a1],@X[$d1],@X[$d1]
+	XOR	@X[$a2],@X[$d2],@X[$d2]
+||	XOR	@X[$a3],@X[$d3],@X[$d3]
+||	 ADD	@Y[$b0],@Y[$a0],@Y[$a0]
+||	 ADD	@Y[$b3],@Y[$a3],@Y[$a3]
+||	 XOR	@Y[$a1],@Y[$d1],@Y[$d1]
+||	 XOR	@Y[$a2],@Y[$d2],@Y[$d2]
+	 XOR	@Y[$a0],@Y[$d0],@Y[$d0]
+||	 XOR	@Y[$a3],@Y[$d3],@Y[$d3]
+||	 ROTL	@Y[$d1],8,@Y[$d1]
+||	 ROTL	@Y[$d2],8,@Y[$d2]
+||	SWAP2	@X[$d0],@X[$d0]		; rotate by 16
+||	SWAP2	@X[$d1],@X[$d1]
+||	SWAP2	@X[$d2],@X[$d2]
+||	SWAP2	@X[$d3],@X[$d3]
+	 ROTL	@Y[$d0],8,@Y[$d0]
+||	 ROTL	@Y[$d3],8,@Y[$d3]
+||	ADD	@X[$d0],@X[$c2],@X[$c2]
+||	ADD	@X[$d1],@X[$c3],@X[$c3]
+||	ADD	@X[$d2],@X[$c0],@X[$c0]
+||	ADD	@X[$d3],@X[$c1],@X[$c1]
+||	BNOP	middle2x2?		; protect from interrupt
+
+	 ADD	@Y[$d1],@Y[$c1],@Y[$c1]
+||	 ADD	@Y[$d2],@Y[$c2],@Y[$c2]
+||	XOR	@X[$c2],@X[$b0],@X[$b0]
+||	XOR	@X[$c3],@X[$b1],@X[$b1]
+||	XOR	@X[$c0],@X[$b2],@X[$b2]
+||	XOR	@X[$c1],@X[$b3],@X[$b3]
+	 ADD	@Y[$d0],@Y[$c0],@Y[$c0]
+||	 ADD	@Y[$d3],@Y[$c3],@Y[$c3]
+||	 XOR	@Y[$c1],@Y[$b1],@Y[$b1]
+||	 XOR	@Y[$c2],@Y[$b2],@Y[$b2]
+||	 ROTL	@Y[$d1],0,@Y[$d2]	; moved to avoid cross-path stall
+||	 ROTL	@Y[$d2],0,@Y[$d3]
+	 XOR	@Y[$c0],@Y[$b0],@Y[$b0]
+||	 XOR	@Y[$c3],@Y[$b3],@Y[$b3]
+||	 MV	@Y[$d0],@Y[$d1]
+||	 MV	@Y[$d3],@Y[$d0]
+||	ROTL	@X[$b0],12,@X[$b0]
+||	ROTL	@X[$b1],12,@X[$b1]
+	 ROTL	@Y[$b1],7,@Y[$b0]	; avoided cross-path stall
+||	 ROTL	@Y[$b2],7,@Y[$b1]
+	 ROTL	@Y[$b0],7,@Y[$b3]
+||	 ROTL	@Y[$b3],7,@Y[$b2]
+middle2x2?:
+
+	ROTL	@X[$b2],12,@X[$b2]
+||	ROTL	@X[$b3],12,@X[$b3]
+||	 ADD	@Y[$b0],@Y[$a0],@Y[$a0]
+||	 ADD	@Y[$b1],@Y[$a1],@Y[$a1]
+	 ADD	@Y[$b2],@Y[$a2],@Y[$a2]
+||	 ADD	@Y[$b3],@Y[$a3],@Y[$a3]
+
+||	ADD	@X[$b0],@X[$a0],@X[$a0]
+||	ADD	@X[$b1],@X[$a1],@X[$a1]
+||	 XOR	@Y[$a0],@Y[$d0],@Y[$d0]
+||	 XOR	@Y[$a1],@Y[$d1],@Y[$d1]
+	 XOR	@Y[$a2],@Y[$d2],@Y[$d2]
+||	 XOR	@Y[$a3],@Y[$d3],@Y[$d3]
+||	ADD	@X[$b2],@X[$a2],@X[$a2]
+||	ADD	@X[$b3],@X[$a3],@X[$a3]
+||	XOR	@X[$a0],@X[$d0],@X[$d0]
+||	XOR	@X[$a1],@X[$d1],@X[$d1]
+	XOR	@X[$a2],@X[$d2],@X[$d2]
+||	XOR	@X[$a3],@X[$d3],@X[$d3]
+||	ROTL	@X[$d0],8,@X[$d0]
+||	ROTL	@X[$d1],8,@X[$d1]
+||	 SWAP2	@Y[$d0],@Y[$d0]		; rotate by 16
+||	 SWAP2	@Y[$d1],@Y[$d1]
+||	 SWAP2	@Y[$d2],@Y[$d2]
+||	 SWAP2	@Y[$d3],@Y[$d3]
+	ROTL	@X[$d2],8,@X[$d2]
+||	ROTL	@X[$d3],8,@X[$d3]
+||	 ADD	@Y[$d0],@Y[$c2],@Y[$c2]
+||	 ADD	@Y[$d1],@Y[$c3],@Y[$c3]
+||	 ADD	@Y[$d2],@Y[$c0],@Y[$c0]
+||	 ADD	@Y[$d3],@Y[$c1],@Y[$c1]
+||	BNOP	bottom2x1?		; protect from interrupt
+
+	ADD	@X[$d0],@X[$c2],@X[$c2]
+||	ADD	@X[$d1],@X[$c3],@X[$c3]
+||	 XOR	@Y[$c2],@Y[$b0],@Y[$b0]
+||	 XOR	@Y[$c3],@Y[$b1],@Y[$b1]
+||	 XOR	@Y[$c0],@Y[$b2],@Y[$b2]
+||	 XOR	@Y[$c1],@Y[$b3],@Y[$b3]
+	ADD	@X[$d2],@X[$c0],@X[$c0]
+||	ADD	@X[$d3],@X[$c1],@X[$c1]
+||	XOR	@X[$c2],@X[$b0],@X[$b0]
+||	XOR	@X[$c3],@X[$b1],@X[$b1]
+||	ROTL	@X[$d0],0,@X[$d3]	; moved to avoid cross-path stall
+||	ROTL	@X[$d1],0,@X[$d0]
+	XOR	@X[$c0],@X[$b2],@X[$b2]
+||	XOR	@X[$c1],@X[$b3],@X[$b3]
+||	MV	@X[$d2],@X[$d1]
+||	MV	@X[$d3],@X[$d2]
+||	 ROTL	@Y[$b0],12,@Y[$b0]
+||	 ROTL	@Y[$b1],12,@Y[$b1]
+	ROTL	@X[$b0],7,@X[$b1]	; avoided cross-path stall
+||	ROTL	@X[$b1],7,@X[$b2]
+	ROTL	@X[$b2],7,@X[$b3]
+||	ROTL	@X[$b3],7,@X[$b0]
+|| [B0]	SUB	B0,1,B0			; decrement inner loop counter
+bottom2x1?:
+
+	 ROTL	@Y[$b2],12,@Y[$b2]
+||	 ROTL	@Y[$b3],12,@Y[$b3]
+|| [B0]	ADD	@X[$b1],@X[$a1],@X[$a1]	; modulo-scheduled
+|| [B0]	ADD	@X[$b2],@X[$a2],@X[$a2]
+   [B0]	ADD	@X[$b0],@X[$a0],@X[$a0]
+|| [B0]	ADD	@X[$b3],@X[$a3],@X[$a3]
+
+||	 ADD	@Y[$b0],@Y[$a0],@Y[$a0]
+||	 ADD	@Y[$b1],@Y[$a1],@Y[$a1]
+|| [B0]	XOR	@X[$a1],@X[$d1],@X[$d1]
+|| [B0]	XOR	@X[$a2],@X[$d2],@X[$d2]
+   [B0]	XOR	@X[$a0],@X[$d0],@X[$d0]
+|| [B0]	XOR	@X[$a3],@X[$d3],@X[$d3]
+||	 ADD	@Y[$b2],@Y[$a2],@Y[$a2]
+||	 ADD	@Y[$b3],@Y[$a3],@Y[$a3]
+||	 XOR	@Y[$a0],@Y[$d0],@Y[$d0]
+||	 XOR	@Y[$a1],@Y[$d1],@Y[$d1]
+	 XOR	@Y[$a2],@Y[$d2],@Y[$d2]
+||	 XOR	@Y[$a3],@Y[$d3],@Y[$d3]
+||	 ROTL	@Y[$d0],8,@Y[$d0]
+||	 ROTL	@Y[$d1],8,@Y[$d1]
+|| [B0]	SWAP2	@X[$d1],@X[$d1]		; rotate by 16
+|| [B0]	SWAP2	@X[$d2],@X[$d2]
+|| [B0]	SWAP2	@X[$d0],@X[$d0]
+|| [B0]	SWAP2	@X[$d3],@X[$d3]
+	 ROTL	@Y[$d2],8,@Y[$d2]
+||	 ROTL	@Y[$d3],8,@Y[$d3]
+|| [B0]	ADD	@X[$d1],@X[$c1],@X[$c1]
+|| [B0]	ADD	@X[$d2],@X[$c2],@X[$c2]
+|| [B0]	ADD	@X[$d0],@X[$c0],@X[$c0]
+|| [B0]	ADD	@X[$d3],@X[$c3],@X[$c3]
+|| [B0]	BNOP	top2x?			; even protects from interrupt
+
+	 ADD	@Y[$d0],@Y[$c2],@Y[$c2]
+||	 ADD	@Y[$d1],@Y[$c3],@Y[$c3]
+|| [B0]	XOR	@X[$c1],@X[$b1],@X[$b1]
+|| [B0]	XOR	@X[$c2],@X[$b2],@X[$b2]
+|| [B0]	XOR	@X[$c0],@X[$b0],@X[$b0]
+|| [B0]	XOR	@X[$c3],@X[$b3],@X[$b3]
+	 ADD	@Y[$d2],@Y[$c0],@Y[$c0]
+||	 ADD	@Y[$d3],@Y[$c1],@Y[$c1]
+||	 XOR	@Y[$c2],@Y[$b0],@Y[$b0]
+||	 XOR	@Y[$c3],@Y[$b1],@Y[$b1]
+||	 ROTL	@Y[$d0],0,@Y[$d3]	; moved to avoid cross-path stall
+||	 ROTL	@Y[$d1],0,@Y[$d0]
+	 XOR	@Y[$c0],@Y[$b2],@Y[$b2]
+||	 XOR	@Y[$c1],@Y[$b3],@Y[$b3]
+||	 MV	@Y[$d2],@Y[$d1]
+||	 MV	@Y[$d3],@Y[$d2]
+|| [B0]	ROTL	@X[$b1],12,@X[$b1]
+|| [B0]	ROTL	@X[$b2],12,@X[$b2]
+	 ROTL	@Y[$b0],7,@Y[$b1]	; avoided cross-path stall
+||	 ROTL	@Y[$b1],7,@Y[$b2]
+	 ROTL	@Y[$b2],7,@Y[$b3]
+||	 ROTL	@Y[$b3],7,@Y[$b0]
+bottom2x2?:
+___
+}
+
+$code.=<<___;
+	ADD	@K2x[0],@X[0],@X[0]	; accumulate key material
+||	ADD	@K2x[1],@X[1],@X[1]
+||	ADD	@K2x[2],@X[2],@X[2]
+||	ADD	@K2x[3],@X[3],@X[3]
+	 ADD	@K2x[0],@Y[0],@Y[0]
+||	 ADD	@K2x[1],@Y[1],@Y[1]
+||	 ADD	@K2x[2],@Y[2],@Y[2]
+||	 ADD	@K2x[3],@Y[3],@Y[3]
+||	LDNDW	*${INP}++[8],@DAT[1]:@DAT[0]
+	ADD	@K2x[4],@X[4],@X[4]
+||	ADD	@K2x[5],@X[5],@X[5]
+||	ADD	@K2x[6],@X[6],@X[6]
+||	ADD	@K2x[7],@X[7],@X[7]
+||	LDNDW	*${INP}[-7],@DAT[3]:@DAT[2]
+	 ADD	@K2x[4],@Y[4],@Y[4]
+||	 ADD	@K2x[5],@Y[5],@Y[5]
+||	 ADD	@K2x[6],@Y[6],@Y[6]
+||	 ADD	@K2x[7],@Y[7],@Y[7]
+||	LDNDW	*${INP}[-6],@DAT[5]:@DAT[4]
+	ADD	@K2x[8],@X[8],@X[8]
+||	ADD	@K2x[9],@X[9],@X[9]
+||	ADD	@K2x[10],@X[10],@X[10]
+||	ADD	@K2x[11],@X[11],@X[11]
+||	LDNDW	*${INP}[-5],@DAT[7]:@DAT[6]
+	 ADD	@K2x[8],@Y[8],@Y[8]
+||	 ADD	@K2x[9],@Y[9],@Y[9]
+||	 ADD	@K2x[10],@Y[10],@Y[10]
+||	 ADD	@K2x[11],@Y[11],@Y[11]
+||	LDNDW	*${INP}[-4],@DAT[9]:@DAT[8]
+	ADD	@K2x[12],@X[12],@X[12]
+||	ADD	@K2x[13],@X[13],@X[13]
+||	ADD	@K2x[14],@X[14],@X[14]
+||	ADD	@K2x[15],@X[15],@X[15]
+||	LDNDW	*${INP}[-3],@DAT[11]:@DAT[10]
+	 ADD	@K2x[12],@Y[12],@Y[12]
+||	 ADD	@K2x[13],@Y[13],@Y[13]
+||	 ADD	@K2x[14],@Y[14],@Y[14]
+||	 ADD	@K2x[15],@Y[15],@Y[15]
+||	LDNDW	*${INP}[-2],@DAT[13]:@DAT[12]
+	 ADD	1,@Y[12],@Y[12]		; adjust counter for 2nd block
+||	ADD	2,@K2x[12],@K2x[12]	; increment counter
+||	LDNDW	*${INP}[-1],@DAT[15]:@DAT[14]
+
+	.if	.BIG_ENDIAN
+	SWAP2	@X[0],@X[0]
+||	SWAP2	@X[1],@X[1]
+||	SWAP2	@X[2],@X[2]
+||	SWAP2	@X[3],@X[3]
+	SWAP2	@X[4],@X[4]
+||	SWAP2	@X[5],@X[5]
+||	SWAP2	@X[6],@X[6]
+||	SWAP2	@X[7],@X[7]
+	SWAP2	@X[8],@X[8]
+||	SWAP2	@X[9],@X[9]
+||	SWAP4	@X[0],@X[1]
+||	SWAP4	@X[1],@X[0]
+	SWAP2	@X[10],@X[10]
+||	SWAP2	@X[11],@X[11]
+||	SWAP4	@X[2],@X[3]
+||	SWAP4	@X[3],@X[2]
+	SWAP2	@X[12],@X[12]
+||	SWAP2	@X[13],@X[13]
+||	SWAP4	@X[4],@X[5]
+||	SWAP4	@X[5],@X[4]
+	SWAP2	@X[14],@X[14]
+||	SWAP2	@X[15],@X[15]
+||	SWAP4	@X[6],@X[7]
+||	SWAP4	@X[7],@X[6]
+	SWAP4	@X[8],@X[9]
+||	SWAP4	@X[9],@X[8]
+||	 SWAP2	@Y[0],@Y[0]
+||	 SWAP2	@Y[1],@Y[1]
+	SWAP4	@X[10],@X[11]
+||	SWAP4	@X[11],@X[10]
+||	 SWAP2	@Y[2],@Y[2]
+||	 SWAP2	@Y[3],@Y[3]
+	SWAP4	@X[12],@X[13]
+||	SWAP4	@X[13],@X[12]
+||	 SWAP2	@Y[4],@Y[4]
+||	 SWAP2	@Y[5],@Y[5]
+	SWAP4	@X[14],@X[15]
+||	SWAP4	@X[15],@X[14]
+||	 SWAP2	@Y[6],@Y[6]
+||	 SWAP2	@Y[7],@Y[7]
+	 SWAP2	@Y[8],@Y[8]
+||	 SWAP2	@Y[9],@Y[9]
+||	 SWAP4	@Y[0],@Y[1]
+||	 SWAP4	@Y[1],@Y[0]
+	 SWAP2	@Y[10],@Y[10]
+||	 SWAP2	@Y[11],@Y[11]
+||	 SWAP4	@Y[2],@Y[3]
+||	 SWAP4	@Y[3],@Y[2]
+	 SWAP2	@Y[12],@Y[12]
+||	 SWAP2	@Y[13],@Y[13]
+||	 SWAP4	@Y[4],@Y[5]
+||	 SWAP4	@Y[5],@Y[4]
+	 SWAP2	@Y[14],@Y[14]
+||	 SWAP2	@Y[15],@Y[15]
+||	 SWAP4	@Y[6],@Y[7]
+||	 SWAP4	@Y[7],@Y[6]
+	 SWAP4	@Y[8],@Y[9]
+||	 SWAP4	@Y[9],@Y[8]
+	 SWAP4	@Y[10],@Y[11]
+||	 SWAP4	@Y[11],@Y[10]
+	 SWAP4	@Y[12],@Y[13]
+||	 SWAP4	@Y[13],@Y[12]
+	 SWAP4	@Y[14],@Y[15]
+||	 SWAP4	@Y[15],@Y[14]
+	.endif
+
+	XOR	@DAT[0],@X[0],@X[0]	; xor 1st block
+||	XOR	@DAT[3],@X[3],@X[3]
+||	XOR	@DAT[2],@X[2],@X[1]
+||	XOR	@DAT[1],@X[1],@X[2]
+||	LDNDW	*${INP}++[8],@DAT[1]:@DAT[0]
+	XOR	@DAT[4],@X[4],@X[4]
+||	XOR	@DAT[7],@X[7],@X[7]
+||	LDNDW	*${INP}[-7],@DAT[3]:@DAT[2]
+	XOR	@DAT[6],@X[6],@X[5]
+||	XOR	@DAT[5],@X[5],@X[6]
+||	LDNDW	*${INP}[-6],@DAT[5]:@DAT[4]
+	XOR	@DAT[8],@X[8],@X[8]
+||	XOR	@DAT[11],@X[11],@X[11]
+||	LDNDW	*${INP}[-5],@DAT[7]:@DAT[6]
+	XOR	@DAT[10],@X[10],@X[9]
+||	XOR	@DAT[9],@X[9],@X[10]
+||	LDNDW	*${INP}[-4],@DAT[9]:@DAT[8]
+	XOR	@DAT[12],@X[12],@X[12]
+||	XOR	@DAT[15],@X[15],@X[15]
+||	LDNDW	*${INP}[-3],@DAT[11]:@DAT[10]
+	XOR	@DAT[14],@X[14],@X[13]
+||	XOR	@DAT[13],@X[13],@X[14]
+||	LDNDW	*${INP}[-2],@DAT[13]:@DAT[12]
+   [A0]	SUB	A0,$STEP,A0		; SUB	A0,128,A0
+||	LDNDW	*${INP}[-1],@DAT[15]:@DAT[14]
+
+	XOR	@Y[0],@DAT[0],@DAT[0]	; xor 2nd block
+||	XOR	@Y[1],@DAT[1],@DAT[1]
+||	STNDW	@X[2]:@X[0],*${OUT}++[8]
+	XOR	@Y[2],@DAT[2],@DAT[2]
+||	XOR	@Y[3],@DAT[3],@DAT[3]
+||	STNDW	@X[3]:@X[1],*${OUT}[-7]
+	XOR	@Y[4],@DAT[4],@DAT[4]
+|| [A0]	LDDW	*FP[-12],@X[2]:@X[0]	; re-load key material from stack
+|| [A0]	LDDW	*SP[2],  @X[3]:@X[1]
+	XOR	@Y[5],@DAT[5],@DAT[5]
+||	STNDW	@X[6]:@X[4],*${OUT}[-6]
+	XOR	@Y[6],@DAT[6],@DAT[6]
+||	XOR	@Y[7],@DAT[7],@DAT[7]
+||	STNDW	@X[7]:@X[5],*${OUT}[-5]
+	XOR	@Y[8],@DAT[8],@DAT[8]
+|| [A0]	LDDW	*FP[-10],@X[6]:@X[4]
+|| [A0]	LDDW	*SP[4],  @X[7]:@X[5]
+	XOR	@Y[9],@DAT[9],@DAT[9]
+||	STNDW	@X[10]:@X[8],*${OUT}[-4]
+	XOR	@Y[10],@DAT[10],@DAT[10]
+||	XOR	@Y[11],@DAT[11],@DAT[11]
+||	STNDW	@X[11]:@X[9],*${OUT}[-3]
+	XOR	@Y[12],@DAT[12],@DAT[12]
+|| [A0]	LDDW	*FP[-8], @X[10]:@X[8]
+|| [A0]	LDDW	*SP[6],  @X[11]:@X[9]
+	XOR	@Y[13],@DAT[13],@DAT[13]
+||	STNDW	@X[14]:@X[12],*${OUT}[-2]
+	XOR	@Y[14],@DAT[14],@DAT[14]
+||	XOR	@Y[15],@DAT[15],@DAT[15]
+||	STNDW	@X[15]:@X[13],*${OUT}[-1]
+
+   [A0]	MV	@K2x[12],@X[12]
+|| [A0]	MV	@K2x[13],@X[13]
+|| [A0]	LDW	*FP[-6*2], @X[14]
+|| [A0]	LDW	*SP[8*2],  @X[15]
+
+   [A0]	DMV	@X[2],@X[0],@Y[2]:@Y[0]	; duplicate key material
+||	STNDW	@DAT[1]:@DAT[0],*${OUT}++[8]
+   [A0]	DMV	@X[3],@X[1],@Y[3]:@Y[1]
+||	STNDW	@DAT[3]:@DAT[2],*${OUT}[-7]
+   [A0]	DMV	@X[6],@X[4],@Y[6]:@Y[4]
+||	STNDW	@DAT[5]:@DAT[4],*${OUT}[-6]
+||	CMPLTU	A0,$STEP,A1		; is remaining length < 2*blocks?
+||[!A0]	BNOP	epilogue?
+   [A0]	DMV	@X[7],@X[5],@Y[7]:@Y[5]
+||	STNDW	@DAT[7]:@DAT[6],*${OUT}[-5]
+||[!A1]	BNOP	outer2x?
+   [A0]	DMV	@X[10],@X[8],@Y[10]:@Y[8]
+||	STNDW	@DAT[9]:@DAT[8],*${OUT}[-4]
+   [A0]	DMV	@X[11],@X[9],@Y[11]:@Y[9]
+||	STNDW	@DAT[11]:@DAT[10],*${OUT}[-3]
+   [A0]	DMV	@X[14],@X[12],@Y[14]:@Y[12]
+||	STNDW	@DAT[13]:@DAT[12],*${OUT}[-2]
+   [A0]	DMV	@X[15],@X[13],@Y[15]:@Y[13]
+||	STNDW	@DAT[15]:@DAT[14],*${OUT}[-1]
+;;===== branch to epilogue? is taken here
+   [A1]	MVK	64,$STEP
+|| [A0]	MVK	10,B0			; inner loop counter
+;;===== branch to outer2x? is taken here
+___
+{
+my ($a0,$a1,$a2,$a3) = (0..3);
+my ($b0,$b1,$b2,$b3) = (4..7);
+my ($c0,$c1,$c2,$c3) = (8..11);
+my ($d0,$d1,$d2,$d3) = (12..15);
+
+$code.=<<___;
+top1x?:
+	ADD	@X[$b1],@X[$a1],@X[$a1]
+||	ADD	@X[$b2],@X[$a2],@X[$a2]
+	ADD	@X[$b0],@X[$a0],@X[$a0]
+||	ADD	@X[$b3],@X[$a3],@X[$a3]
+||	XOR	@X[$a1],@X[$d1],@X[$d1]
+||	XOR	@X[$a2],@X[$d2],@X[$d2]
+	XOR	@X[$a0],@X[$d0],@X[$d0]
+||	XOR	@X[$a3],@X[$d3],@X[$d3]
+||	SWAP2	@X[$d1],@X[$d1]		; rotate by 16
+||	SWAP2	@X[$d2],@X[$d2]
+	SWAP2	@X[$d0],@X[$d0]
+||	SWAP2	@X[$d3],@X[$d3]
+
+||	ADD	@X[$d1],@X[$c1],@X[$c1]
+||	ADD	@X[$d2],@X[$c2],@X[$c2]
+	ADD	@X[$d0],@X[$c0],@X[$c0]
+||	ADD	@X[$d3],@X[$c3],@X[$c3]
+||	XOR	@X[$c1],@X[$b1],@X[$b1]
+||	XOR	@X[$c2],@X[$b2],@X[$b2]
+	XOR	@X[$c0],@X[$b0],@X[$b0]
+||	XOR	@X[$c3],@X[$b3],@X[$b3]
+||	ROTL	@X[$b1],12,@X[$b1]
+||	ROTL	@X[$b2],12,@X[$b2]
+	ROTL	@X[$b0],12,@X[$b0]
+||	ROTL	@X[$b3],12,@X[$b3]
+
+	ADD	@X[$b1],@X[$a1],@X[$a1]
+||	ADD	@X[$b2],@X[$a2],@X[$a2]
+	ADD	@X[$b0],@X[$a0],@X[$a0]
+||	ADD	@X[$b3],@X[$a3],@X[$a3]
+||	XOR	@X[$a1],@X[$d1],@X[$d1]
+||	XOR	@X[$a2],@X[$d2],@X[$d2]
+	XOR	@X[$a0],@X[$d0],@X[$d0]
+||	XOR	@X[$a3],@X[$d3],@X[$d3]
+||	ROTL	@X[$d1],8,@X[$d1]
+||	ROTL	@X[$d2],8,@X[$d2]
+	ROTL	@X[$d0],8,@X[$d0]
+||	ROTL	@X[$d3],8,@X[$d3]
+||	BNOP	middle1x?		; protect from interrupt
+
+	ADD	@X[$d1],@X[$c1],@X[$c1]
+||	ADD	@X[$d2],@X[$c2],@X[$c2]
+	ADD	@X[$d0],@X[$c0],@X[$c0]
+||	ADD	@X[$d3],@X[$c3],@X[$c3]
+||	XOR	@X[$c1],@X[$b1],@X[$b1]
+||	XOR	@X[$c2],@X[$b2],@X[$b2]
+||	ROTL	@X[$d1],0,@X[$d2]	; moved to avoid cross-path stall
+||	ROTL	@X[$d2],0,@X[$d3]
+	XOR	@X[$c0],@X[$b0],@X[$b0]
+||	XOR	@X[$c3],@X[$b3],@X[$b3]
+||	ROTL	@X[$d0],0,@X[$d1]
+||	ROTL	@X[$d3],0,@X[$d0]
+	ROTL	@X[$b1],7,@X[$b0]	; avoided cross-path stall
+||	ROTL	@X[$b2],7,@X[$b1]
+	ROTL	@X[$b0],7,@X[$b3]
+||	ROTL	@X[$b3],7,@X[$b2]
+middle1x?:
+
+	ADD	@X[$b0],@X[$a0],@X[$a0]
+||	ADD	@X[$b1],@X[$a1],@X[$a1]
+	ADD	@X[$b2],@X[$a2],@X[$a2]
+||	ADD	@X[$b3],@X[$a3],@X[$a3]
+||	XOR	@X[$a0],@X[$d0],@X[$d0]
+||	XOR	@X[$a1],@X[$d1],@X[$d1]
+	XOR	@X[$a2],@X[$d2],@X[$d2]
+||	XOR	@X[$a3],@X[$d3],@X[$d3]
+||	SWAP2	@X[$d0],@X[$d0]		; rotate by 16
+||	SWAP2	@X[$d1],@X[$d1]
+	SWAP2	@X[$d2],@X[$d2]
+||	SWAP2	@X[$d3],@X[$d3]
+
+||	ADD	@X[$d0],@X[$c2],@X[$c2]
+||	ADD	@X[$d1],@X[$c3],@X[$c3]
+	ADD	@X[$d2],@X[$c0],@X[$c0]
+||	ADD	@X[$d3],@X[$c1],@X[$c1]
+||	XOR	@X[$c2],@X[$b0],@X[$b0]
+||	XOR	@X[$c3],@X[$b1],@X[$b1]
+	XOR	@X[$c0],@X[$b2],@X[$b2]
+||	XOR	@X[$c1],@X[$b3],@X[$b3]
+||	ROTL	@X[$b0],12,@X[$b0]
+||	ROTL	@X[$b1],12,@X[$b1]
+	ROTL	@X[$b2],12,@X[$b2]
+||	ROTL	@X[$b3],12,@X[$b3]
+
+	ADD	@X[$b0],@X[$a0],@X[$a0]
+||	ADD	@X[$b1],@X[$a1],@X[$a1]
+|| [B0]	SUB	B0,1,B0			; decrement inner loop counter
+	ADD	@X[$b2],@X[$a2],@X[$a2]
+||	ADD	@X[$b3],@X[$a3],@X[$a3]
+||	XOR	@X[$a0],@X[$d0],@X[$d0]
+||	XOR	@X[$a1],@X[$d1],@X[$d1]
+	XOR	@X[$a2],@X[$d2],@X[$d2]
+||	XOR	@X[$a3],@X[$d3],@X[$d3]
+||	ROTL	@X[$d0],8,@X[$d0]
+||	ROTL	@X[$d1],8,@X[$d1]
+	ROTL	@X[$d2],8,@X[$d2]
+||	ROTL	@X[$d3],8,@X[$d3]
+|| [B0]	BNOP	top1x?			; even protects from interrupt
+
+	ADD	@X[$d0],@X[$c2],@X[$c2]
+||	ADD	@X[$d1],@X[$c3],@X[$c3]
+	ADD	@X[$d2],@X[$c0],@X[$c0]
+||	ADD	@X[$d3],@X[$c1],@X[$c1]
+||	XOR	@X[$c2],@X[$b0],@X[$b0]
+||	XOR	@X[$c3],@X[$b1],@X[$b1]
+||	ROTL	@X[$d0],0,@X[$d3]	; moved to avoid cross-path stall
+||	ROTL	@X[$d1],0,@X[$d0]
+	XOR	@X[$c0],@X[$b2],@X[$b2]
+||	XOR	@X[$c1],@X[$b3],@X[$b3]
+||	ROTL	@X[$d2],0,@X[$d1]
+||	ROTL	@X[$d3],0,@X[$d2]
+	ROTL	@X[$b0],7,@X[$b1]	; avoided cross-path stall
+||	ROTL	@X[$b1],7,@X[$b2]
+	ROTL	@X[$b2],7,@X[$b3]
+||	ROTL	@X[$b3],7,@X[$b0]
+||[!B0]	CMPLTU	A0,$STEP,A1		; less than 64 bytes left?
+bottom1x?:
+___
+}
+
+$code.=<<___;
+	ADD	@Y[0],@X[0],@X[0]	; accumulate key material
+||	ADD	@Y[1],@X[1],@X[1]
+||	ADD	@Y[2],@X[2],@X[2]
+||	ADD	@Y[3],@X[3],@X[3]
+||[!A1]	LDNDW	*${INP}++[8],@DAT[1]:@DAT[0]
+|| [A1]	BNOP	tail?
+	ADD	@Y[4],@X[4],@X[4]
+||	ADD	@Y[5],@X[5],@X[5]
+||	ADD	@Y[6],@X[6],@X[6]
+||	ADD	@Y[7],@X[7],@X[7]
+||[!A1]	LDNDW	*${INP}[-7],@DAT[3]:@DAT[2]
+	ADD	@Y[8],@X[8],@X[8]
+||	ADD	@Y[9],@X[9],@X[9]
+||	ADD	@Y[10],@X[10],@X[10]
+||	ADD	@Y[11],@X[11],@X[11]
+||[!A1]	LDNDW	*${INP}[-6],@DAT[5]:@DAT[4]
+	ADD	@Y[12],@X[12],@X[12]
+||	ADD	@Y[13],@X[13],@X[13]
+||	ADD	@Y[14],@X[14],@X[14]
+||	ADD	@Y[15],@X[15],@X[15]
+||[!A1]	LDNDW	*${INP}[-5],@DAT[7]:@DAT[6]
+  [!A1]	LDNDW	*${INP}[-4],@DAT[9]:@DAT[8]
+  [!A1]	LDNDW	*${INP}[-3],@DAT[11]:@DAT[10]
+	LDNDW	*${INP}[-2],@DAT[13]:@DAT[12]
+	LDNDW	*${INP}[-1],@DAT[15]:@DAT[14]
+
+	.if	.BIG_ENDIAN
+	SWAP2	@X[0],@X[0]
+||	SWAP2	@X[1],@X[1]
+||	SWAP2	@X[2],@X[2]
+||	SWAP2	@X[3],@X[3]
+	SWAP2	@X[4],@X[4]
+||	SWAP2	@X[5],@X[5]
+||	SWAP2	@X[6],@X[6]
+||	SWAP2	@X[7],@X[7]
+	SWAP2	@X[8],@X[8]
+||	SWAP2	@X[9],@X[9]
+||	SWAP4	@X[0],@X[1]
+||	SWAP4	@X[1],@X[0]
+	SWAP2	@X[10],@X[10]
+||	SWAP2	@X[11],@X[11]
+||	SWAP4	@X[2],@X[3]
+||	SWAP4	@X[3],@X[2]
+	SWAP2	@X[12],@X[12]
+||	SWAP2	@X[13],@X[13]
+||	SWAP4	@X[4],@X[5]
+||	SWAP4	@X[5],@X[4]
+	SWAP2	@X[14],@X[14]
+||	SWAP2	@X[15],@X[15]
+||	SWAP4	@X[6],@X[7]
+||	SWAP4	@X[7],@X[6]
+	SWAP4	@X[8],@X[9]
+||	SWAP4	@X[9],@X[8]
+	SWAP4	@X[10],@X[11]
+||	SWAP4	@X[11],@X[10]
+	SWAP4	@X[12],@X[13]
+||	SWAP4	@X[13],@X[12]
+	SWAP4	@X[14],@X[15]
+||	SWAP4	@X[15],@X[14]
+	.else
+	NOP	1
+	.endif
+
+	XOR	@X[0],@DAT[0],@DAT[0]	; xor with input
+||	XOR	@X[1],@DAT[1],@DAT[1]
+||	XOR	@X[2],@DAT[2],@DAT[2]
+||	XOR	@X[3],@DAT[3],@DAT[3]
+|| [A0]	SUB	A0,$STEP,A0		; SUB	A0,64,A0
+	XOR	@X[4],@DAT[4],@DAT[4]
+||	XOR	@X[5],@DAT[5],@DAT[5]
+||	XOR	@X[6],@DAT[6],@DAT[6]
+||	XOR	@X[7],@DAT[7],@DAT[7]
+||	STNDW	@DAT[1]:@DAT[0],*${OUT}++[8]
+	XOR	@X[8],@DAT[8],@DAT[8]
+||	XOR	@X[9],@DAT[9],@DAT[9]
+||	XOR	@X[10],@DAT[10],@DAT[10]
+||	XOR	@X[11],@DAT[11],@DAT[11]
+||	STNDW	@DAT[3]:@DAT[2],*${OUT}[-7]
+	XOR	@X[12],@DAT[12],@DAT[12]
+||	XOR	@X[13],@DAT[13],@DAT[13]
+||	XOR	@X[14],@DAT[14],@DAT[14]
+||	XOR	@X[15],@DAT[15],@DAT[15]
+||	STNDW	@DAT[5]:@DAT[4],*${OUT}[-6]
+|| [A0]	BNOP	top1x?
+   [A0]	DMV	@Y[2],@Y[0],@X[2]:@X[0]	; duplicate key material
+|| [A0]	DMV	@Y[3],@Y[1],@X[3]:@X[1]
+||	STNDW	@DAT[7]:@DAT[6],*${OUT}[-5]
+   [A0]	DMV	@Y[6],@Y[4],@X[6]:@X[4]
+|| [A0]	DMV	@Y[7],@Y[5],@X[7]:@X[5]
+||	STNDW	@DAT[9]:@DAT[8],*${OUT}[-4]
+   [A0]	DMV	@Y[10],@Y[8],@X[10]:@X[8]
+|| [A0]	DMV	@Y[11],@Y[9],@X[11]:@X[9]
+|| [A0]	ADD	1,@Y[12],@Y[12]		; increment counter
+||	STNDW	@DAT[11]:@DAT[10],*${OUT}[-3]
+   [A0]	DMV	@Y[14],@Y[12],@X[14]:@X[12]
+|| [A0]	DMV	@Y[15],@Y[13],@X[15]:@X[13]
+||	STNDW	@DAT[13]:@DAT[12],*${OUT}[-2]
+   [A0]	MVK	10,B0			; inner loop counter
+||	STNDW	@DAT[15]:@DAT[14],*${OUT}[-1]
+;;===== branch to top1x? is taken here
+
+epilogue?:
+	LDDW	*FP[-4],A11:A10		; ABI says so
+	LDDW	*FP[-3],A13:A12
+||	LDDW	*SP[3+8],B11:B10
+	LDDW	*SP[4+8],B13:B12
+||	BNOP	RA
+	LDW	*++SP(40+64),FP		; restore frame pointer
+	NOP	4
+
+tail?:
+	LDBU	*${INP}++[1],B24	; load byte by byte
+||	SUB	A0,1,A0
+||	SUB	A0,1,B1
+  [!B1]	BNOP	epilogue?		; interrupts are disabled for whole time
+|| [A0] LDBU	*${INP}++[1],B24
+|| [A0]	SUB	A0,1,A0
+||	SUB	B1,1,B1
+  [!B1]	BNOP	epilogue?
+|| [A0] LDBU	*${INP}++[1],B24
+|| [A0]	SUB	A0,1,A0
+||	SUB	B1,1,B1
+  [!B1]	BNOP	epilogue?
+||	ROTL	@X[0],0,A24
+|| [A0] LDBU	*${INP}++[1],B24
+|| [A0]	SUB	A0,1,A0
+||	SUB	B1,1,B1
+  [!B1]	BNOP	epilogue?
+||	ROTL	@X[0],24,A24
+|| [A0] LDBU	*${INP}++[1],A24
+|| [A0]	SUB	A0,1,A0
+||	SUB	B1,1,B1
+  [!B1]	BNOP	epilogue?
+||	ROTL	@X[0],16,A24
+|| [A0] LDBU	*${INP}++[1],A24
+|| [A0]	SUB	A0,1,A0
+||	SUB	B1,1,B1
+||	XOR	A24,B24,B25
+	STB	B25,*${OUT}++[1]	; store byte by byte
+||[!B1]	BNOP	epilogue?
+||	ROTL	@X[0],8,A24
+|| [A0] LDBU	*${INP}++[1],A24
+|| [A0]	SUB	A0,1,A0
+||	SUB	B1,1,B1
+||	XOR	A24,B24,B25
+	STB	B25,*${OUT}++[1]
+___
+sub TAIL_STEP {
+my $Xi= shift;
+my $T = ($Xi=~/^B/?"B24":"A24");	# match @X[i] to avoid cross path
+my $D = $T; $D=~tr/AB/BA/;
+my $O = $D; $O=~s/24/25/;
+
+$code.=<<___;
+||[!B1]	BNOP	epilogue?
+||	ROTL	$Xi,0,$T
+|| [A0] LDBU	*${INP}++[1],$D
+|| [A0]	SUB	A0,1,A0
+||	SUB	B1,1,B1
+||	XOR	A24,B24,$O
+	STB	$O,*${OUT}++[1]
+||[!B1]	BNOP	epilogue?
+||	ROTL	$Xi,24,$T
+|| [A0] LDBU	*${INP}++[1],$T
+|| [A0]	SUB	A0,1,A0
+||	SUB	B1,1,B1
+||	XOR	A24,B24,$O
+	STB	$O,*${OUT}++[1]
+||[!B1]	BNOP	epilogue?
+||	ROTL	$Xi,16,$T
+|| [A0] LDBU	*${INP}++[1],$T
+|| [A0]	SUB	A0,1,A0
+||	SUB	B1,1,B1
+||	XOR	A24,B24,$O
+	STB	$O,*${OUT}++[1]
+||[!B1]	BNOP	epilogue?
+||	ROTL	$Xi,8,$T
+|| [A0] LDBU	*${INP}++[1],$T
+|| [A0]	SUB	A0,1,A0
+||	SUB	B1,1,B1
+||	XOR	A24,B24,$O
+	STB	$O,*${OUT}++[1]
+___
+}
+	foreach (1..14) { TAIL_STEP(@X[$_]); }
+$code.=<<___;
+||[!B1]	BNOP	epilogue?
+||	ROTL	@X[15],0,B24
+||	XOR	A24,B24,A25
+	STB	A25,*${OUT}++[1]
+||	ROTL	@X[15],24,B24
+||	XOR	A24,B24,A25
+	STB	A25,*${OUT}++[1]
+||	ROTL	@X[15],16,B24
+||	XOR	A24,B24,A25
+	STB	A25,*${OUT}++[1]
+||	XOR	A24,B24,A25
+	STB	A25,*${OUT}++[1]
+||	XOR	A24,B24,B25
+	STB	B25,*${OUT}++[1]
+	.endasmfunc
+
+	.sect	.const
+	.cstring "ChaCha20 for C64x+, CRYPTOGAMS by <appro\@openssl.org>"
+	.align	4
+___
+
+print $code;
+close STDOUT or die "error closing STDOUT: $!";
diff --git a/ap/lib/libssl/openssl-1.1.1o/crypto/chacha/asm/chacha-ppc.pl b/ap/lib/libssl/openssl-1.1.1o/crypto/chacha/asm/chacha-ppc.pl
new file mode 100755
index 0000000..3073dea
--- /dev/null
+++ b/ap/lib/libssl/openssl-1.1.1o/crypto/chacha/asm/chacha-ppc.pl
@@ -0,0 +1,1348 @@
+#! /usr/bin/env perl
+# Copyright 2016-2020 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License").  You may not use
+# this file except in compliance with the License.  You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# October 2015
+#
+# ChaCha20 for PowerPC/AltiVec.
+#
+# June 2018
+#
+# Add VSX 2.07 code path. Original 3xAltiVec+1xIALU is well-suited for
+# processors that can't issue more than one vector instruction per
+# cycle. But POWER8 (and POWER9) can issue a pair, and vector-only 4x
+# interleave would perform better. Incidentally PowerISA 2.07 (first
+# implemented by POWER8) defined new usable instructions, hence 4xVSX
+# code path...
+#
+# Performance in cycles per byte out of large buffer.
+#
+#			IALU/gcc-4.x    3xAltiVec+1xIALU	4xVSX
+#
+# Freescale e300	13.6/+115%	-			-
+# PPC74x0/G4e		6.81/+310%	3.81			-
+# PPC970/G5		9.29/+160%	?			-
+# POWER7		8.62/+61%	3.35			-
+# POWER8		8.70/+51%	2.91			2.09
+# POWER9		8.80/+29%	4.44(*)			2.45(**)
+#
+# (*)	this is trade-off result, it's possible to improve it, but
+#	then it would negatively affect all others;
+# (**)	POWER9 seems to be "allergic" to mixing vector and integer
+#	instructions, which is why switch to vector-only code pays
+#	off that much;
+
+$flavour = shift;
+
+if ($flavour =~ /64/) {
+	$SIZE_T	=8;
+	$LRSAVE	=2*$SIZE_T;
+	$STU	="stdu";
+	$POP	="ld";
+	$PUSH	="std";
+	$UCMP	="cmpld";
+} elsif ($flavour =~ /32/) {
+	$SIZE_T	=4;
+	$LRSAVE	=$SIZE_T;
+	$STU	="stwu";
+	$POP	="lwz";
+	$PUSH	="stw";
+	$UCMP	="cmplw";
+} else { die "nonsense $flavour"; }
+
+$LITTLE_ENDIAN = ($flavour=~/le$/) ? 1 : 0;
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
+die "can't locate ppc-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
+
+$LOCALS=6*$SIZE_T;
+$FRAME=$LOCALS+64+18*$SIZE_T;	# 64 is for local variables
+
+sub AUTOLOAD()		# thunk [simplified] x86-style perlasm
+{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./;
+    $code .= "\t$opcode\t".join(',',@_)."\n";
+}
+
+my $sp = "r1";
+
+my ($out,$inp,$len,$key,$ctr) = map("r$_",(3..7));
+
+my @x=map("r$_",(16..31));
+my @d=map("r$_",(11,12,14,15));
+my @t=map("r$_",(7..10));
+
+sub ROUND {
+my ($a0,$b0,$c0,$d0)=@_;
+my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
+my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
+my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
+
+    (
+	"&add		(@x[$a0],@x[$a0],@x[$b0])",
+	 "&add		(@x[$a1],@x[$a1],@x[$b1])",
+	  "&add		(@x[$a2],@x[$a2],@x[$b2])",
+	   "&add	(@x[$a3],@x[$a3],@x[$b3])",
+	"&xor		(@x[$d0],@x[$d0],@x[$a0])",
+	 "&xor		(@x[$d1],@x[$d1],@x[$a1])",
+	  "&xor		(@x[$d2],@x[$d2],@x[$a2])",
+	   "&xor	(@x[$d3],@x[$d3],@x[$a3])",
+	"&rotlwi	(@x[$d0],@x[$d0],16)",
+	 "&rotlwi	(@x[$d1],@x[$d1],16)",
+	  "&rotlwi	(@x[$d2],@x[$d2],16)",
+	   "&rotlwi	(@x[$d3],@x[$d3],16)",
+
+	"&add		(@x[$c0],@x[$c0],@x[$d0])",
+	 "&add		(@x[$c1],@x[$c1],@x[$d1])",
+	  "&add		(@x[$c2],@x[$c2],@x[$d2])",
+	   "&add	(@x[$c3],@x[$c3],@x[$d3])",
+	"&xor		(@x[$b0],@x[$b0],@x[$c0])",
+	 "&xor		(@x[$b1],@x[$b1],@x[$c1])",
+	  "&xor		(@x[$b2],@x[$b2],@x[$c2])",
+	   "&xor	(@x[$b3],@x[$b3],@x[$c3])",
+	"&rotlwi	(@x[$b0],@x[$b0],12)",
+	 "&rotlwi	(@x[$b1],@x[$b1],12)",
+	  "&rotlwi	(@x[$b2],@x[$b2],12)",
+	   "&rotlwi	(@x[$b3],@x[$b3],12)",
+
+	"&add		(@x[$a0],@x[$a0],@x[$b0])",
+	 "&add		(@x[$a1],@x[$a1],@x[$b1])",
+	  "&add		(@x[$a2],@x[$a2],@x[$b2])",
+	   "&add	(@x[$a3],@x[$a3],@x[$b3])",
+	"&xor		(@x[$d0],@x[$d0],@x[$a0])",
+	 "&xor		(@x[$d1],@x[$d1],@x[$a1])",
+	  "&xor		(@x[$d2],@x[$d2],@x[$a2])",
+	   "&xor	(@x[$d3],@x[$d3],@x[$a3])",
+	"&rotlwi	(@x[$d0],@x[$d0],8)",
+	 "&rotlwi	(@x[$d1],@x[$d1],8)",
+	  "&rotlwi	(@x[$d2],@x[$d2],8)",
+	   "&rotlwi	(@x[$d3],@x[$d3],8)",
+
+	"&add		(@x[$c0],@x[$c0],@x[$d0])",
+	 "&add		(@x[$c1],@x[$c1],@x[$d1])",
+	  "&add		(@x[$c2],@x[$c2],@x[$d2])",
+	   "&add	(@x[$c3],@x[$c3],@x[$d3])",
+	"&xor		(@x[$b0],@x[$b0],@x[$c0])",
+	 "&xor		(@x[$b1],@x[$b1],@x[$c1])",
+	  "&xor		(@x[$b2],@x[$b2],@x[$c2])",
+	   "&xor	(@x[$b3],@x[$b3],@x[$c3])",
+	"&rotlwi	(@x[$b0],@x[$b0],7)",
+	 "&rotlwi	(@x[$b1],@x[$b1],7)",
+	  "&rotlwi	(@x[$b2],@x[$b2],7)",
+	   "&rotlwi	(@x[$b3],@x[$b3],7)"
+    );
+}
+
+$code.=<<___;
+.machine	"any"
+.text
+
+.globl	.ChaCha20_ctr32_int
+.align	5
+.ChaCha20_ctr32_int:
+__ChaCha20_ctr32_int:
+	${UCMP}i $len,0
+	beqlr-
+
+	$STU	$sp,-$FRAME($sp)
+	mflr	r0
+
+	$PUSH	r14,`$FRAME-$SIZE_T*18`($sp)
+	$PUSH	r15,`$FRAME-$SIZE_T*17`($sp)
+	$PUSH	r16,`$FRAME-$SIZE_T*16`($sp)
+	$PUSH	r17,`$FRAME-$SIZE_T*15`($sp)
+	$PUSH	r18,`$FRAME-$SIZE_T*14`($sp)
+	$PUSH	r19,`$FRAME-$SIZE_T*13`($sp)
+	$PUSH	r20,`$FRAME-$SIZE_T*12`($sp)
+	$PUSH	r21,`$FRAME-$SIZE_T*11`($sp)
+	$PUSH	r22,`$FRAME-$SIZE_T*10`($sp)
+	$PUSH	r23,`$FRAME-$SIZE_T*9`($sp)
+	$PUSH	r24,`$FRAME-$SIZE_T*8`($sp)
+	$PUSH	r25,`$FRAME-$SIZE_T*7`($sp)
+	$PUSH	r26,`$FRAME-$SIZE_T*6`($sp)
+	$PUSH	r27,`$FRAME-$SIZE_T*5`($sp)
+	$PUSH	r28,`$FRAME-$SIZE_T*4`($sp)
+	$PUSH	r29,`$FRAME-$SIZE_T*3`($sp)
+	$PUSH	r30,`$FRAME-$SIZE_T*2`($sp)
+	$PUSH	r31,`$FRAME-$SIZE_T*1`($sp)
+	$PUSH	r0,`$FRAME+$LRSAVE`($sp)
+
+	lwz	@d[0],0($ctr)			# load counter
+	lwz	@d[1],4($ctr)
+	lwz	@d[2],8($ctr)
+	lwz	@d[3],12($ctr)
+
+	bl	__ChaCha20_1x
+
+	$POP	r0,`$FRAME+$LRSAVE`($sp)
+	$POP	r14,`$FRAME-$SIZE_T*18`($sp)
+	$POP	r15,`$FRAME-$SIZE_T*17`($sp)
+	$POP	r16,`$FRAME-$SIZE_T*16`($sp)
+	$POP	r17,`$FRAME-$SIZE_T*15`($sp)
+	$POP	r18,`$FRAME-$SIZE_T*14`($sp)
+	$POP	r19,`$FRAME-$SIZE_T*13`($sp)
+	$POP	r20,`$FRAME-$SIZE_T*12`($sp)
+	$POP	r21,`$FRAME-$SIZE_T*11`($sp)
+	$POP	r22,`$FRAME-$SIZE_T*10`($sp)
+	$POP	r23,`$FRAME-$SIZE_T*9`($sp)
+	$POP	r24,`$FRAME-$SIZE_T*8`($sp)
+	$POP	r25,`$FRAME-$SIZE_T*7`($sp)
+	$POP	r26,`$FRAME-$SIZE_T*6`($sp)
+	$POP	r27,`$FRAME-$SIZE_T*5`($sp)
+	$POP	r28,`$FRAME-$SIZE_T*4`($sp)
+	$POP	r29,`$FRAME-$SIZE_T*3`($sp)
+	$POP	r30,`$FRAME-$SIZE_T*2`($sp)
+	$POP	r31,`$FRAME-$SIZE_T*1`($sp)
+	mtlr	r0
+	addi	$sp,$sp,$FRAME
+	blr
+	.long	0
+	.byte	0,12,4,1,0x80,18,5,0
+	.long	0
+.size	.ChaCha20_ctr32_int,.-.ChaCha20_ctr32_int
+
+.align	5
+__ChaCha20_1x:
+Loop_outer:
+	lis	@x[0],0x6170			# synthesize sigma
+	lis	@x[1],0x3320
+	lis	@x[2],0x7962
+	lis	@x[3],0x6b20
+	ori	@x[0],@x[0],0x7865
+	ori	@x[1],@x[1],0x646e
+	ori	@x[2],@x[2],0x2d32
+	ori	@x[3],@x[3],0x6574
+
+	li	r0,10				# inner loop counter
+	lwz	@x[4],0($key)			# load key
+	lwz	@x[5],4($key)
+	lwz	@x[6],8($key)
+	lwz	@x[7],12($key)
+	lwz	@x[8],16($key)
+	mr	@x[12],@d[0]			# copy counter
+	lwz	@x[9],20($key)
+	mr	@x[13],@d[1]
+	lwz	@x[10],24($key)
+	mr	@x[14],@d[2]
+	lwz	@x[11],28($key)
+	mr	@x[15],@d[3]
+
+	mr	@t[0],@x[4]
+	mr	@t[1],@x[5]
+	mr	@t[2],@x[6]
+	mr	@t[3],@x[7]
+
+	mtctr	r0
+Loop:
+___
+	foreach (&ROUND(0, 4, 8,12)) { eval; }
+	foreach (&ROUND(0, 5,10,15)) { eval; }
+$code.=<<___;
+	bdnz	Loop
+
+	subic	$len,$len,64			# $len-=64
+	addi	@x[0],@x[0],0x7865		# accumulate key block
+	addi	@x[1],@x[1],0x646e
+	addi	@x[2],@x[2],0x2d32
+	addi	@x[3],@x[3],0x6574
+	addis	@x[0],@x[0],0x6170
+	addis	@x[1],@x[1],0x3320
+	addis	@x[2],@x[2],0x7962
+	addis	@x[3],@x[3],0x6b20
+
+	subfe.	r0,r0,r0			# borrow?-1:0
+	add	@x[4],@x[4],@t[0]
+	lwz	@t[0],16($key)
+	add	@x[5],@x[5],@t[1]
+	lwz	@t[1],20($key)
+	add	@x[6],@x[6],@t[2]
+	lwz	@t[2],24($key)
+	add	@x[7],@x[7],@t[3]
+	lwz	@t[3],28($key)
+	add	@x[8],@x[8],@t[0]
+	add	@x[9],@x[9],@t[1]
+	add	@x[10],@x[10],@t[2]
+	add	@x[11],@x[11],@t[3]
+
+	add	@x[12],@x[12],@d[0]
+	add	@x[13],@x[13],@d[1]
+	add	@x[14],@x[14],@d[2]
+	add	@x[15],@x[15],@d[3]
+	addi	@d[0],@d[0],1			# increment counter
+___
+if (!$LITTLE_ENDIAN) { for($i=0;$i<16;$i++) {	# flip byte order
+$code.=<<___;
+	mr	@t[$i&3],@x[$i]
+	rotlwi	@x[$i],@x[$i],8
+	rlwimi	@x[$i],@t[$i&3],24,0,7
+	rlwimi	@x[$i],@t[$i&3],24,16,23
+___
+} }
+$code.=<<___;
+	bne	Ltail				# $len-=64 borrowed
+
+	lwz	@t[0],0($inp)			# load input, aligned or not
+	lwz	@t[1],4($inp)
+	${UCMP}i $len,0				# done already?
+	lwz	@t[2],8($inp)
+	lwz	@t[3],12($inp)
+	xor	@x[0],@x[0],@t[0]		# xor with input
+	lwz	@t[0],16($inp)
+	xor	@x[1],@x[1],@t[1]
+	lwz	@t[1],20($inp)
+	xor	@x[2],@x[2],@t[2]
+	lwz	@t[2],24($inp)
+	xor	@x[3],@x[3],@t[3]
+	lwz	@t[3],28($inp)
+	xor	@x[4],@x[4],@t[0]
+	lwz	@t[0],32($inp)
+	xor	@x[5],@x[5],@t[1]
+	lwz	@t[1],36($inp)
+	xor	@x[6],@x[6],@t[2]
+	lwz	@t[2],40($inp)
+	xor	@x[7],@x[7],@t[3]
+	lwz	@t[3],44($inp)
+	xor	@x[8],@x[8],@t[0]
+	lwz	@t[0],48($inp)
+	xor	@x[9],@x[9],@t[1]
+	lwz	@t[1],52($inp)
+	xor	@x[10],@x[10],@t[2]
+	lwz	@t[2],56($inp)
+	xor	@x[11],@x[11],@t[3]
+	lwz	@t[3],60($inp)
+	xor	@x[12],@x[12],@t[0]
+	stw	@x[0],0($out)			# store output, aligned or not
+	xor	@x[13],@x[13],@t[1]
+	stw	@x[1],4($out)
+	xor	@x[14],@x[14],@t[2]
+	stw	@x[2],8($out)
+	xor	@x[15],@x[15],@t[3]
+	stw	@x[3],12($out)
+	stw	@x[4],16($out)
+	stw	@x[5],20($out)
+	stw	@x[6],24($out)
+	stw	@x[7],28($out)
+	stw	@x[8],32($out)
+	stw	@x[9],36($out)
+	stw	@x[10],40($out)
+	stw	@x[11],44($out)
+	stw	@x[12],48($out)
+	stw	@x[13],52($out)
+	stw	@x[14],56($out)
+	addi	$inp,$inp,64
+	stw	@x[15],60($out)
+	addi	$out,$out,64
+
+	bne	Loop_outer
+
+	blr
+
+.align	4
+Ltail:
+	addi	$len,$len,64			# restore tail length
+	subi	$inp,$inp,1			# prepare for *++ptr
+	subi	$out,$out,1
+	addi	@t[0],$sp,$LOCALS-1
+	mtctr	$len
+
+	stw	@x[0],`$LOCALS+0`($sp)		# save whole block to stack
+	stw	@x[1],`$LOCALS+4`($sp)
+	stw	@x[2],`$LOCALS+8`($sp)
+	stw	@x[3],`$LOCALS+12`($sp)
+	stw	@x[4],`$LOCALS+16`($sp)
+	stw	@x[5],`$LOCALS+20`($sp)
+	stw	@x[6],`$LOCALS+24`($sp)
+	stw	@x[7],`$LOCALS+28`($sp)
+	stw	@x[8],`$LOCALS+32`($sp)
+	stw	@x[9],`$LOCALS+36`($sp)
+	stw	@x[10],`$LOCALS+40`($sp)
+	stw	@x[11],`$LOCALS+44`($sp)
+	stw	@x[12],`$LOCALS+48`($sp)
+	stw	@x[13],`$LOCALS+52`($sp)
+	stw	@x[14],`$LOCALS+56`($sp)
+	stw	@x[15],`$LOCALS+60`($sp)
+
+Loop_tail:					# byte-by-byte loop
+	lbzu	@d[0],1($inp)
+	lbzu	@x[0],1(@t[0])
+	xor	@d[1],@d[0],@x[0]
+	stbu	@d[1],1($out)
+	bdnz	Loop_tail
+
+	stw	$sp,`$LOCALS+0`($sp)		# wipe block on stack
+	stw	$sp,`$LOCALS+4`($sp)
+	stw	$sp,`$LOCALS+8`($sp)
+	stw	$sp,`$LOCALS+12`($sp)
+	stw	$sp,`$LOCALS+16`($sp)
+	stw	$sp,`$LOCALS+20`($sp)
+	stw	$sp,`$LOCALS+24`($sp)
+	stw	$sp,`$LOCALS+28`($sp)
+	stw	$sp,`$LOCALS+32`($sp)
+	stw	$sp,`$LOCALS+36`($sp)
+	stw	$sp,`$LOCALS+40`($sp)
+	stw	$sp,`$LOCALS+44`($sp)
+	stw	$sp,`$LOCALS+48`($sp)
+	stw	$sp,`$LOCALS+52`($sp)
+	stw	$sp,`$LOCALS+56`($sp)
+	stw	$sp,`$LOCALS+60`($sp)
+
+	blr
+	.long	0
+	.byte	0,12,0x14,0,0,0,0,0
+___
+
+{{{
+my ($A0,$B0,$C0,$D0,$A1,$B1,$C1,$D1,$A2,$B2,$C2,$D2)
+				= map("v$_",(0..11));
+my @K				= map("v$_",(12..17));
+my ($FOUR,$sixteen,$twenty4)	= map("v$_",(18..19,23));
+my ($inpperm,$outperm,$outmask)	= map("v$_",(24..26));
+my @D				= map("v$_",(27..31));
+my ($twelve,$seven,$T0,$T1) = @D;
+
+my $FRAME=$LOCALS+64+10*16+18*$SIZE_T;	# 10*16 is for v23-v31 offload
+
+sub VMXROUND {
+my $odd = pop;
+my ($a,$b,$c,$d)=@_;
+
+	(
+	"&vadduwm	('$a','$a','$b')",
+	"&vxor		('$d','$d','$a')",
+	"&vperm		('$d','$d','$d','$sixteen')",
+
+	"&vadduwm	('$c','$c','$d')",
+	"&vxor		('$b','$b','$c')",
+	"&vrlw		('$b','$b','$twelve')",
+
+	"&vadduwm	('$a','$a','$b')",
+	"&vxor		('$d','$d','$a')",
+	"&vperm		('$d','$d','$d','$twenty4')",
+
+	"&vadduwm	('$c','$c','$d')",
+	"&vxor		('$b','$b','$c')",
+	"&vrlw		('$b','$b','$seven')",
+
+	"&vrldoi	('$c','$c',8)",
+	"&vrldoi	('$b','$b',$odd?4:12)",
+	"&vrldoi	('$d','$d',$odd?12:4)"
+	);
+}
+
+$code.=<<___;
+
+.globl	.ChaCha20_ctr32_vmx
+.align	5
+.ChaCha20_ctr32_vmx:
+	${UCMP}i $len,256
+	blt	__ChaCha20_ctr32_int
+
+	$STU	$sp,-$FRAME($sp)
+	mflr	r0
+	li	r10,`15+$LOCALS+64`
+	li	r11,`31+$LOCALS+64`
+	mfspr	r12,256
+	stvx	v23,r10,$sp
+	addi	r10,r10,32
+	stvx	v24,r11,$sp
+	addi	r11,r11,32
+	stvx	v25,r10,$sp
+	addi	r10,r10,32
+	stvx	v26,r11,$sp
+	addi	r11,r11,32
+	stvx	v27,r10,$sp
+	addi	r10,r10,32
+	stvx	v28,r11,$sp
+	addi	r11,r11,32
+	stvx	v29,r10,$sp
+	addi	r10,r10,32
+	stvx	v30,r11,$sp
+	stvx	v31,r10,$sp
+	stw	r12,`$FRAME-$SIZE_T*18-4`($sp)	# save vrsave
+	$PUSH	r14,`$FRAME-$SIZE_T*18`($sp)
+	$PUSH	r15,`$FRAME-$SIZE_T*17`($sp)
+	$PUSH	r16,`$FRAME-$SIZE_T*16`($sp)
+	$PUSH	r17,`$FRAME-$SIZE_T*15`($sp)
+	$PUSH	r18,`$FRAME-$SIZE_T*14`($sp)
+	$PUSH	r19,`$FRAME-$SIZE_T*13`($sp)
+	$PUSH	r20,`$FRAME-$SIZE_T*12`($sp)
+	$PUSH	r21,`$FRAME-$SIZE_T*11`($sp)
+	$PUSH	r22,`$FRAME-$SIZE_T*10`($sp)
+	$PUSH	r23,`$FRAME-$SIZE_T*9`($sp)
+	$PUSH	r24,`$FRAME-$SIZE_T*8`($sp)
+	$PUSH	r25,`$FRAME-$SIZE_T*7`($sp)
+	$PUSH	r26,`$FRAME-$SIZE_T*6`($sp)
+	$PUSH	r27,`$FRAME-$SIZE_T*5`($sp)
+	$PUSH	r28,`$FRAME-$SIZE_T*4`($sp)
+	$PUSH	r29,`$FRAME-$SIZE_T*3`($sp)
+	$PUSH	r30,`$FRAME-$SIZE_T*2`($sp)
+	$PUSH	r31,`$FRAME-$SIZE_T*1`($sp)
+	li	r12,-4096+511
+	$PUSH	r0, `$FRAME+$LRSAVE`($sp)
+	mtspr	256,r12				# preserve 29 AltiVec registers
+
+	bl	Lconsts				# returns pointer Lsigma in r12
+	li	@x[0],16
+	li	@x[1],32
+	li	@x[2],48
+	li	@x[3],64
+	li	@x[4],31			# 31 is not a typo
+	li	@x[5],15			# nor is 15
+
+	lvx	@K[1],0,$key			# load key
+	?lvsr	$T0,0,$key			# prepare unaligned load
+	lvx	@K[2],@x[0],$key
+	lvx	@D[0],@x[4],$key
+
+	lvx	@K[3],0,$ctr			# load counter
+	?lvsr	$T1,0,$ctr			# prepare unaligned load
+	lvx	@D[1],@x[5],$ctr
+
+	lvx	@K[0],0,r12			# load constants
+	lvx	@K[5],@x[0],r12			# one
+	lvx	$FOUR,@x[1],r12
+	lvx	$sixteen,@x[2],r12
+	lvx	$twenty4,@x[3],r12
+
+	?vperm	@K[1],@K[2],@K[1],$T0		# align key
+	?vperm	@K[2],@D[0],@K[2],$T0
+	?vperm	@K[3],@D[1],@K[3],$T1		# align counter
+
+	lwz	@d[0],0($ctr)			# load counter to GPR
+	lwz	@d[1],4($ctr)
+	vadduwm	@K[3],@K[3],@K[5]		# adjust AltiVec counter
+	lwz	@d[2],8($ctr)
+	vadduwm	@K[4],@K[3],@K[5]
+	lwz	@d[3],12($ctr)
+	vadduwm	@K[5],@K[4],@K[5]
+
+	vxor	$T0,$T0,$T0			# 0x00..00
+	vspltisw $outmask,-1			# 0xff..ff
+	?lvsr	$inpperm,0,$inp			# prepare for unaligned load
+	?lvsl	$outperm,0,$out			# prepare for unaligned store
+	?vperm	$outmask,$outmask,$T0,$outperm
+
+	be?lvsl	$T0,0,@x[0]			# 0x00..0f
+	be?vspltisb $T1,3			# 0x03..03
+	be?vxor	$T0,$T0,$T1			# swap bytes within words
+	be?vxor	$outperm,$outperm,$T1
+	be?vperm $inpperm,$inpperm,$inpperm,$T0
+
+	li	r0,10				# inner loop counter
+	b	Loop_outer_vmx
+
+.align	4
+Loop_outer_vmx:
+	lis	@x[0],0x6170			# synthesize sigma
+	lis	@x[1],0x3320
+	 vmr	$A0,@K[0]
+	lis	@x[2],0x7962
+	lis	@x[3],0x6b20
+	 vmr	$A1,@K[0]
+	ori	@x[0],@x[0],0x7865
+	ori	@x[1],@x[1],0x646e
+	 vmr	$A2,@K[0]
+	ori	@x[2],@x[2],0x2d32
+	ori	@x[3],@x[3],0x6574
+	 vmr	$B0,@K[1]
+
+	lwz	@x[4],0($key)			# load key to GPR
+	 vmr	$B1,@K[1]
+	lwz	@x[5],4($key)
+	 vmr	$B2,@K[1]
+	lwz	@x[6],8($key)
+	 vmr	$C0,@K[2]
+	lwz	@x[7],12($key)
+	 vmr	$C1,@K[2]
+	lwz	@x[8],16($key)
+	 vmr	$C2,@K[2]
+	mr	@x[12],@d[0]			# copy GPR counter
+	lwz	@x[9],20($key)
+	 vmr	$D0,@K[3]
+	mr	@x[13],@d[1]
+	lwz	@x[10],24($key)
+	 vmr	$D1,@K[4]
+	mr	@x[14],@d[2]
+	lwz	@x[11],28($key)
+	 vmr	$D2,@K[5]
+	mr	@x[15],@d[3]
+
+	mr	@t[0],@x[4]
+	mr	@t[1],@x[5]
+	mr	@t[2],@x[6]
+	mr	@t[3],@x[7]
+
+	vspltisw $twelve,12			# synthesize constants
+	vspltisw $seven,7
+
+	mtctr	r0
+	nop
+Loop_vmx:
+___
+	my @thread0=&VMXROUND($A0,$B0,$C0,$D0,0);
+	my @thread1=&VMXROUND($A1,$B1,$C1,$D1,0);
+	my @thread2=&VMXROUND($A2,$B2,$C2,$D2,0);
+	my @thread3=&ROUND(0,4,8,12);
+
+	foreach (@thread0) {
+		eval;
+		eval(shift(@thread1));
+		eval(shift(@thread2));
+
+		eval(shift(@thread3));
+		eval(shift(@thread3));
+		eval(shift(@thread3));
+	}
+	foreach (@thread3) { eval; }
+
+	@thread0=&VMXROUND($A0,$B0,$C0,$D0,1);
+	@thread1=&VMXROUND($A1,$B1,$C1,$D1,1);
+	@thread2=&VMXROUND($A2,$B2,$C2,$D2,1);
+	@thread3=&ROUND(0,5,10,15);
+
+	foreach (@thread0) {
+		eval;
+		eval(shift(@thread1));
+		eval(shift(@thread2));
+
+		eval(shift(@thread3));
+		eval(shift(@thread3));
+		eval(shift(@thread3));
+	}
+	foreach (@thread3) { eval; }
+$code.=<<___;
+	bdnz	Loop_vmx
+
+	subi	$len,$len,256			# $len-=256
+	addi	@x[0],@x[0],0x7865		# accumulate key block
+	addi	@x[1],@x[1],0x646e
+	addi	@x[2],@x[2],0x2d32
+	addi	@x[3],@x[3],0x6574
+	addis	@x[0],@x[0],0x6170
+	addis	@x[1],@x[1],0x3320
+	addis	@x[2],@x[2],0x7962
+	addis	@x[3],@x[3],0x6b20
+	add	@x[4],@x[4],@t[0]
+	lwz	@t[0],16($key)
+	add	@x[5],@x[5],@t[1]
+	lwz	@t[1],20($key)
+	add	@x[6],@x[6],@t[2]
+	lwz	@t[2],24($key)
+	add	@x[7],@x[7],@t[3]
+	lwz	@t[3],28($key)
+	add	@x[8],@x[8],@t[0]
+	add	@x[9],@x[9],@t[1]
+	add	@x[10],@x[10],@t[2]
+	add	@x[11],@x[11],@t[3]
+	add	@x[12],@x[12],@d[0]
+	add	@x[13],@x[13],@d[1]
+	add	@x[14],@x[14],@d[2]
+	add	@x[15],@x[15],@d[3]
+
+	vadduwm	$A0,$A0,@K[0]			# accumulate key block
+	vadduwm	$A1,$A1,@K[0]
+	vadduwm	$A2,$A2,@K[0]
+	vadduwm	$B0,$B0,@K[1]
+	vadduwm	$B1,$B1,@K[1]
+	vadduwm	$B2,$B2,@K[1]
+	vadduwm	$C0,$C0,@K[2]
+	vadduwm	$C1,$C1,@K[2]
+	vadduwm	$C2,$C2,@K[2]
+	vadduwm	$D0,$D0,@K[3]
+	vadduwm	$D1,$D1,@K[4]
+	vadduwm	$D2,$D2,@K[5]
+
+	addi	@d[0],@d[0],4			# increment counter
+	vadduwm	@K[3],@K[3],$FOUR
+	vadduwm	@K[4],@K[4],$FOUR
+	vadduwm	@K[5],@K[5],$FOUR
+
+___
+if (!$LITTLE_ENDIAN) { for($i=0;$i<16;$i++) {	# flip byte order
+$code.=<<___;
+	mr	@t[$i&3],@x[$i]
+	rotlwi	@x[$i],@x[$i],8
+	rlwimi	@x[$i],@t[$i&3],24,0,7
+	rlwimi	@x[$i],@t[$i&3],24,16,23
+___
+} }
+$code.=<<___;
+	lwz	@t[0],0($inp)			# load input, aligned or not
+	lwz	@t[1],4($inp)
+	lwz	@t[2],8($inp)
+	lwz	@t[3],12($inp)
+	xor	@x[0],@x[0],@t[0]		# xor with input
+	lwz	@t[0],16($inp)
+	xor	@x[1],@x[1],@t[1]
+	lwz	@t[1],20($inp)
+	xor	@x[2],@x[2],@t[2]
+	lwz	@t[2],24($inp)
+	xor	@x[3],@x[3],@t[3]
+	lwz	@t[3],28($inp)
+	xor	@x[4],@x[4],@t[0]
+	lwz	@t[0],32($inp)
+	xor	@x[5],@x[5],@t[1]
+	lwz	@t[1],36($inp)
+	xor	@x[6],@x[6],@t[2]
+	lwz	@t[2],40($inp)
+	xor	@x[7],@x[7],@t[3]
+	lwz	@t[3],44($inp)
+	xor	@x[8],@x[8],@t[0]
+	lwz	@t[0],48($inp)
+	xor	@x[9],@x[9],@t[1]
+	lwz	@t[1],52($inp)
+	xor	@x[10],@x[10],@t[2]
+	lwz	@t[2],56($inp)
+	xor	@x[11],@x[11],@t[3]
+	lwz	@t[3],60($inp)
+	xor	@x[12],@x[12],@t[0]
+	stw	@x[0],0($out)			# store output, aligned or not
+	xor	@x[13],@x[13],@t[1]
+	stw	@x[1],4($out)
+	xor	@x[14],@x[14],@t[2]
+	stw	@x[2],8($out)
+	xor	@x[15],@x[15],@t[3]
+	stw	@x[3],12($out)
+	addi	$inp,$inp,64
+	stw	@x[4],16($out)
+	li	@t[0],16
+	stw	@x[5],20($out)
+	li	@t[1],32
+	stw	@x[6],24($out)
+	li	@t[2],48
+	stw	@x[7],28($out)
+	li	@t[3],64
+	stw	@x[8],32($out)
+	stw	@x[9],36($out)
+	stw	@x[10],40($out)
+	stw	@x[11],44($out)
+	stw	@x[12],48($out)
+	stw	@x[13],52($out)
+	stw	@x[14],56($out)
+	stw	@x[15],60($out)
+	addi	$out,$out,64
+
+	lvx	@D[0],0,$inp			# load input
+	lvx	@D[1],@t[0],$inp
+	lvx	@D[2],@t[1],$inp
+	lvx	@D[3],@t[2],$inp
+	lvx	@D[4],@t[3],$inp
+	addi	$inp,$inp,64
+
+	?vperm	@D[0],@D[1],@D[0],$inpperm	# align input
+	?vperm	@D[1],@D[2],@D[1],$inpperm
+	?vperm	@D[2],@D[3],@D[2],$inpperm
+	?vperm	@D[3],@D[4],@D[3],$inpperm
+	vxor	$A0,$A0,@D[0]			# xor with input
+	vxor	$B0,$B0,@D[1]
+	lvx	@D[1],@t[0],$inp		# keep loading input
+	vxor	$C0,$C0,@D[2]
+	lvx	@D[2],@t[1],$inp
+	vxor	$D0,$D0,@D[3]
+	lvx	@D[3],@t[2],$inp
+	lvx	@D[0],@t[3],$inp
+	addi	$inp,$inp,64
+	li	@t[3],63			# 63 is not a typo
+	vperm	$A0,$A0,$A0,$outperm		# pre-misalign output
+	vperm	$B0,$B0,$B0,$outperm
+	vperm	$C0,$C0,$C0,$outperm
+	vperm	$D0,$D0,$D0,$outperm
+
+	?vperm	@D[4],@D[1],@D[4],$inpperm	# align input
+	?vperm	@D[1],@D[2],@D[1],$inpperm
+	?vperm	@D[2],@D[3],@D[2],$inpperm
+	?vperm	@D[3],@D[0],@D[3],$inpperm
+	vxor	$A1,$A1,@D[4]
+	vxor	$B1,$B1,@D[1]
+	lvx	@D[1],@t[0],$inp		# keep loading input
+	vxor	$C1,$C1,@D[2]
+	lvx	@D[2],@t[1],$inp
+	vxor	$D1,$D1,@D[3]
+	lvx	@D[3],@t[2],$inp
+	lvx	@D[4],@t[3],$inp		# redundant in aligned case
+	addi	$inp,$inp,64
+	vperm	$A1,$A1,$A1,$outperm		# pre-misalign output
+	vperm	$B1,$B1,$B1,$outperm
+	vperm	$C1,$C1,$C1,$outperm
+	vperm	$D1,$D1,$D1,$outperm
+
+	?vperm	@D[0],@D[1],@D[0],$inpperm	# align input
+	?vperm	@D[1],@D[2],@D[1],$inpperm
+	?vperm	@D[2],@D[3],@D[2],$inpperm
+	?vperm	@D[3],@D[4],@D[3],$inpperm
+	vxor	$A2,$A2,@D[0]
+	vxor	$B2,$B2,@D[1]
+	vxor	$C2,$C2,@D[2]
+	vxor	$D2,$D2,@D[3]
+	vperm	$A2,$A2,$A2,$outperm		# pre-misalign output
+	vperm	$B2,$B2,$B2,$outperm
+	vperm	$C2,$C2,$C2,$outperm
+	vperm	$D2,$D2,$D2,$outperm
+
+	andi.	@x[1],$out,15			# is $out aligned?
+	mr	@x[0],$out
+
+	vsel	@D[0],$A0,$B0,$outmask		# collect pre-misaligned output
+	vsel	@D[1],$B0,$C0,$outmask
+	vsel	@D[2],$C0,$D0,$outmask
+	vsel	@D[3],$D0,$A1,$outmask
+	vsel	$B0,$A1,$B1,$outmask
+	vsel	$C0,$B1,$C1,$outmask
+	vsel	$D0,$C1,$D1,$outmask
+	vsel	$A1,$D1,$A2,$outmask
+	vsel	$B1,$A2,$B2,$outmask
+	vsel	$C1,$B2,$C2,$outmask
+	vsel	$D1,$C2,$D2,$outmask
+
+	#stvx	$A0,0,$out			# take it easy on the edges
+	stvx	@D[0],@t[0],$out		# store output
+	stvx	@D[1],@t[1],$out
+	stvx	@D[2],@t[2],$out
+	addi	$out,$out,64
+	stvx	@D[3],0,$out
+	stvx	$B0,@t[0],$out
+	stvx	$C0,@t[1],$out
+	stvx	$D0,@t[2],$out
+	addi	$out,$out,64
+	stvx	$A1,0,$out
+	stvx	$B1,@t[0],$out
+	stvx	$C1,@t[1],$out
+	stvx	$D1,@t[2],$out
+	addi	$out,$out,64
+
+	beq	Laligned_vmx
+
+	sub	@x[2],$out,@x[1]		# in misaligned case edges
+	li	@x[3],0				# are written byte-by-byte
+Lunaligned_tail_vmx:
+	stvebx	$D2,@x[3],@x[2]
+	addi	@x[3],@x[3],1
+	cmpw	@x[3],@x[1]
+	bne	Lunaligned_tail_vmx
+
+	sub	@x[2],@x[0],@x[1]
+Lunaligned_head_vmx:
+	stvebx	$A0,@x[1],@x[2]
+	cmpwi	@x[1],15
+	addi	@x[1],@x[1],1
+	bne	Lunaligned_head_vmx
+
+	${UCMP}i $len,255			# done with 256-byte blocks yet?
+	bgt	Loop_outer_vmx
+
+	b	Ldone_vmx
+
+.align	4
+Laligned_vmx:
+	stvx	$A0,0,@x[0]			# head hexaword was not stored
+
+	${UCMP}i $len,255			# done with 256-byte blocks yet?
+	bgt	Loop_outer_vmx
+	nop
+
+Ldone_vmx:
+	${UCMP}i $len,0				# done yet?
+	bnel	__ChaCha20_1x
+
+	lwz	r12,`$FRAME-$SIZE_T*18-4`($sp)	# pull vrsave
+	li	r10,`15+$LOCALS+64`
+	li	r11,`31+$LOCALS+64`
+	mtspr	256,r12				# restore vrsave
+	lvx	v23,r10,$sp
+	addi	r10,r10,32
+	lvx	v24,r11,$sp
+	addi	r11,r11,32
+	lvx	v25,r10,$sp
+	addi	r10,r10,32
+	lvx	v26,r11,$sp
+	addi	r11,r11,32
+	lvx	v27,r10,$sp
+	addi	r10,r10,32
+	lvx	v28,r11,$sp
+	addi	r11,r11,32
+	lvx	v29,r10,$sp
+	addi	r10,r10,32
+	lvx	v30,r11,$sp
+	lvx	v31,r10,$sp
+	$POP	r0, `$FRAME+$LRSAVE`($sp)
+	$POP	r14,`$FRAME-$SIZE_T*18`($sp)
+	$POP	r15,`$FRAME-$SIZE_T*17`($sp)
+	$POP	r16,`$FRAME-$SIZE_T*16`($sp)
+	$POP	r17,`$FRAME-$SIZE_T*15`($sp)
+	$POP	r18,`$FRAME-$SIZE_T*14`($sp)
+	$POP	r19,`$FRAME-$SIZE_T*13`($sp)
+	$POP	r20,`$FRAME-$SIZE_T*12`($sp)
+	$POP	r21,`$FRAME-$SIZE_T*11`($sp)
+	$POP	r22,`$FRAME-$SIZE_T*10`($sp)
+	$POP	r23,`$FRAME-$SIZE_T*9`($sp)
+	$POP	r24,`$FRAME-$SIZE_T*8`($sp)
+	$POP	r25,`$FRAME-$SIZE_T*7`($sp)
+	$POP	r26,`$FRAME-$SIZE_T*6`($sp)
+	$POP	r27,`$FRAME-$SIZE_T*5`($sp)
+	$POP	r28,`$FRAME-$SIZE_T*4`($sp)
+	$POP	r29,`$FRAME-$SIZE_T*3`($sp)
+	$POP	r30,`$FRAME-$SIZE_T*2`($sp)
+	$POP	r31,`$FRAME-$SIZE_T*1`($sp)
+	mtlr	r0
+	addi	$sp,$sp,$FRAME
+	blr
+	.long	0
+	.byte	0,12,0x04,1,0x80,18,5,0
+	.long	0
+.size	.ChaCha20_ctr32_vmx,.-.ChaCha20_ctr32_vmx
+___
+}}}
+{{{
+my ($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
+    $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3) = map("v$_",(0..15));
+my @K = map("v$_",(16..19));
+my $CTR = "v26";
+my ($xt0,$xt1,$xt2,$xt3) = map("v$_",(27..30));
+my ($sixteen,$twelve,$eight,$seven) = ($xt0,$xt1,$xt2,$xt3);
+my $beperm = "v31";
+
+my ($x00,$x10,$x20,$x30) = (0, map("r$_",(8..10)));
+
+my $FRAME=$LOCALS+64+7*16;	# 7*16 is for v26-v31 offload
+
+sub VSX_lane_ROUND {
+my ($a0,$b0,$c0,$d0)=@_;
+my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
+my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
+my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
+my @x=map("\"v$_\"",(0..15));
+
+	(
+	"&vadduwm	(@x[$a0],@x[$a0],@x[$b0])",	# Q1
+	 "&vadduwm	(@x[$a1],@x[$a1],@x[$b1])",	# Q2
+	  "&vadduwm	(@x[$a2],@x[$a2],@x[$b2])",	# Q3
+	   "&vadduwm	(@x[$a3],@x[$a3],@x[$b3])",	# Q4
+	"&vxor		(@x[$d0],@x[$d0],@x[$a0])",
+	 "&vxor		(@x[$d1],@x[$d1],@x[$a1])",
+	  "&vxor	(@x[$d2],@x[$d2],@x[$a2])",
+	   "&vxor	(@x[$d3],@x[$d3],@x[$a3])",
+	"&vrlw		(@x[$d0],@x[$d0],'$sixteen')",
+	 "&vrlw		(@x[$d1],@x[$d1],'$sixteen')",
+	  "&vrlw	(@x[$d2],@x[$d2],'$sixteen')",
+	   "&vrlw	(@x[$d3],@x[$d3],'$sixteen')",
+
+	"&vadduwm	(@x[$c0],@x[$c0],@x[$d0])",
+	 "&vadduwm	(@x[$c1],@x[$c1],@x[$d1])",
+	  "&vadduwm	(@x[$c2],@x[$c2],@x[$d2])",
+	   "&vadduwm	(@x[$c3],@x[$c3],@x[$d3])",
+	"&vxor		(@x[$b0],@x[$b0],@x[$c0])",
+	 "&vxor		(@x[$b1],@x[$b1],@x[$c1])",
+	  "&vxor	(@x[$b2],@x[$b2],@x[$c2])",
+	   "&vxor	(@x[$b3],@x[$b3],@x[$c3])",
+	"&vrlw		(@x[$b0],@x[$b0],'$twelve')",
+	 "&vrlw		(@x[$b1],@x[$b1],'$twelve')",
+	  "&vrlw	(@x[$b2],@x[$b2],'$twelve')",
+	   "&vrlw	(@x[$b3],@x[$b3],'$twelve')",
+
+	"&vadduwm	(@x[$a0],@x[$a0],@x[$b0])",
+	 "&vadduwm	(@x[$a1],@x[$a1],@x[$b1])",
+	  "&vadduwm	(@x[$a2],@x[$a2],@x[$b2])",
+	   "&vadduwm	(@x[$a3],@x[$a3],@x[$b3])",
+	"&vxor		(@x[$d0],@x[$d0],@x[$a0])",
+	 "&vxor		(@x[$d1],@x[$d1],@x[$a1])",
+	  "&vxor	(@x[$d2],@x[$d2],@x[$a2])",
+	   "&vxor	(@x[$d3],@x[$d3],@x[$a3])",
+	"&vrlw		(@x[$d0],@x[$d0],'$eight')",
+	 "&vrlw		(@x[$d1],@x[$d1],'$eight')",
+	  "&vrlw	(@x[$d2],@x[$d2],'$eight')",
+	   "&vrlw	(@x[$d3],@x[$d3],'$eight')",
+
+	"&vadduwm	(@x[$c0],@x[$c0],@x[$d0])",
+	 "&vadduwm	(@x[$c1],@x[$c1],@x[$d1])",
+	  "&vadduwm	(@x[$c2],@x[$c2],@x[$d2])",
+	   "&vadduwm	(@x[$c3],@x[$c3],@x[$d3])",
+	"&vxor		(@x[$b0],@x[$b0],@x[$c0])",
+	 "&vxor		(@x[$b1],@x[$b1],@x[$c1])",
+	  "&vxor	(@x[$b2],@x[$b2],@x[$c2])",
+	   "&vxor	(@x[$b3],@x[$b3],@x[$c3])",
+	"&vrlw		(@x[$b0],@x[$b0],'$seven')",
+	 "&vrlw		(@x[$b1],@x[$b1],'$seven')",
+	  "&vrlw	(@x[$b2],@x[$b2],'$seven')",
+	   "&vrlw	(@x[$b3],@x[$b3],'$seven')"
+	);
+}
+
+$code.=<<___;
+
+.globl	.ChaCha20_ctr32_vsx
+.align	5
+.ChaCha20_ctr32_vsx:
+	$STU	$sp,-$FRAME($sp)
+	mflr	r0
+	li	r10,`15+$LOCALS+64`
+	li	r11,`31+$LOCALS+64`
+	mfspr	r12,256
+	stvx	v26,r10,$sp
+	addi	r10,r10,32
+	stvx	v27,r11,$sp
+	addi	r11,r11,32
+	stvx	v28,r10,$sp
+	addi	r10,r10,32
+	stvx	v29,r11,$sp
+	addi	r11,r11,32
+	stvx	v30,r10,$sp
+	stvx	v31,r11,$sp
+	stw	r12,`$FRAME-4`($sp)		# save vrsave
+	li	r12,-4096+63
+	$PUSH	r0, `$FRAME+$LRSAVE`($sp)
+	mtspr	256,r12				# preserve 29 AltiVec registers
+
+	bl	Lconsts				# returns pointer Lsigma in r12
+	lvx_4w	@K[0],0,r12			# load sigma
+	addi	r12,r12,0x50
+	li	$x10,16
+	li	$x20,32
+	li	$x30,48
+	li	r11,64
+
+	lvx_4w	@K[1],0,$key			# load key
+	lvx_4w	@K[2],$x10,$key
+	lvx_4w	@K[3],0,$ctr			# load counter
+
+	vxor	$xt0,$xt0,$xt0
+	lvx_4w	$xt1,r11,r12
+	vspltw	$CTR,@K[3],0
+	vsldoi	@K[3],@K[3],$xt0,4
+	vsldoi	@K[3],$xt0,@K[3],12		# clear @K[3].word[0]
+	vadduwm	$CTR,$CTR,$xt1
+
+	be?lvsl	$beperm,0,$x10			# 0x00..0f
+	be?vspltisb $xt0,3			# 0x03..03
+	be?vxor	$beperm,$beperm,$xt0		# swap bytes within words
+
+	li	r0,10				# inner loop counter
+	mtctr	r0
+	b	Loop_outer_vsx
+
+.align	5
+Loop_outer_vsx:
+	lvx	$xa0,$x00,r12			# load [smashed] sigma
+	lvx	$xa1,$x10,r12
+	lvx	$xa2,$x20,r12
+	lvx	$xa3,$x30,r12
+
+	vspltw	$xb0,@K[1],0			# smash the key
+	vspltw	$xb1,@K[1],1
+	vspltw	$xb2,@K[1],2
+	vspltw	$xb3,@K[1],3
+
+	vspltw	$xc0,@K[2],0
+	vspltw	$xc1,@K[2],1
+	vspltw	$xc2,@K[2],2
+	vspltw	$xc3,@K[2],3
+
+	vmr	$xd0,$CTR			# smash the counter
+	vspltw	$xd1,@K[3],1
+	vspltw	$xd2,@K[3],2
+	vspltw	$xd3,@K[3],3
+
+	vspltisw $sixteen,-16			# synthesize constants
+	vspltisw $twelve,12
+	vspltisw $eight,8
+	vspltisw $seven,7
+
+Loop_vsx:
+___
+	foreach (&VSX_lane_ROUND(0, 4, 8,12)) { eval; }
+	foreach (&VSX_lane_ROUND(0, 5,10,15)) { eval; }
+$code.=<<___;
+	bdnz	Loop_vsx
+
+	vadduwm	$xd0,$xd0,$CTR
+
+	vmrgew	$xt0,$xa0,$xa1			# transpose data
+	vmrgew	$xt1,$xa2,$xa3
+	vmrgow	$xa0,$xa0,$xa1
+	vmrgow	$xa2,$xa2,$xa3
+	 vmrgew	$xt2,$xb0,$xb1
+	 vmrgew	$xt3,$xb2,$xb3
+	vpermdi	$xa1,$xa0,$xa2,0b00
+	vpermdi	$xa3,$xa0,$xa2,0b11
+	vpermdi	$xa0,$xt0,$xt1,0b00
+	vpermdi	$xa2,$xt0,$xt1,0b11
+
+	vmrgow	$xb0,$xb0,$xb1
+	vmrgow	$xb2,$xb2,$xb3
+	 vmrgew	$xt0,$xc0,$xc1
+	 vmrgew	$xt1,$xc2,$xc3
+	vpermdi	$xb1,$xb0,$xb2,0b00
+	vpermdi	$xb3,$xb0,$xb2,0b11
+	vpermdi	$xb0,$xt2,$xt3,0b00
+	vpermdi	$xb2,$xt2,$xt3,0b11
+
+	vmrgow	$xc0,$xc0,$xc1
+	vmrgow	$xc2,$xc2,$xc3
+	 vmrgew	$xt2,$xd0,$xd1
+	 vmrgew	$xt3,$xd2,$xd3
+	vpermdi	$xc1,$xc0,$xc2,0b00
+	vpermdi	$xc3,$xc0,$xc2,0b11
+	vpermdi	$xc0,$xt0,$xt1,0b00
+	vpermdi	$xc2,$xt0,$xt1,0b11
+
+	vmrgow	$xd0,$xd0,$xd1
+	vmrgow	$xd2,$xd2,$xd3
+	 vspltisw $xt0,4
+	 vadduwm  $CTR,$CTR,$xt0		# next counter value
+	vpermdi	$xd1,$xd0,$xd2,0b00
+	vpermdi	$xd3,$xd0,$xd2,0b11
+	vpermdi	$xd0,$xt2,$xt3,0b00
+	vpermdi	$xd2,$xt2,$xt3,0b11
+
+	vadduwm	$xa0,$xa0,@K[0]
+	vadduwm	$xb0,$xb0,@K[1]
+	vadduwm	$xc0,$xc0,@K[2]
+	vadduwm	$xd0,$xd0,@K[3]
+
+	be?vperm $xa0,$xa0,$xa0,$beperm
+	be?vperm $xb0,$xb0,$xb0,$beperm
+	be?vperm $xc0,$xc0,$xc0,$beperm
+	be?vperm $xd0,$xd0,$xd0,$beperm
+
+	${UCMP}i $len,0x40
+	blt	Ltail_vsx
+
+	lvx_4w	$xt0,$x00,$inp
+	lvx_4w	$xt1,$x10,$inp
+	lvx_4w	$xt2,$x20,$inp
+	lvx_4w	$xt3,$x30,$inp
+
+	vxor	$xt0,$xt0,$xa0
+	vxor	$xt1,$xt1,$xb0
+	vxor	$xt2,$xt2,$xc0
+	vxor	$xt3,$xt3,$xd0
+
+	stvx_4w	$xt0,$x00,$out
+	stvx_4w	$xt1,$x10,$out
+	addi	$inp,$inp,0x40
+	stvx_4w	$xt2,$x20,$out
+	subi	$len,$len,0x40
+	stvx_4w	$xt3,$x30,$out
+	addi	$out,$out,0x40
+	beq	Ldone_vsx
+
+	vadduwm	$xa0,$xa1,@K[0]
+	vadduwm	$xb0,$xb1,@K[1]
+	vadduwm	$xc0,$xc1,@K[2]
+	vadduwm	$xd0,$xd1,@K[3]
+
+	be?vperm $xa0,$xa0,$xa0,$beperm
+	be?vperm $xb0,$xb0,$xb0,$beperm
+	be?vperm $xc0,$xc0,$xc0,$beperm
+	be?vperm $xd0,$xd0,$xd0,$beperm
+
+	${UCMP}i $len,0x40
+	blt	Ltail_vsx
+
+	lvx_4w	$xt0,$x00,$inp
+	lvx_4w	$xt1,$x10,$inp
+	lvx_4w	$xt2,$x20,$inp
+	lvx_4w	$xt3,$x30,$inp
+
+	vxor	$xt0,$xt0,$xa0
+	vxor	$xt1,$xt1,$xb0
+	vxor	$xt2,$xt2,$xc0
+	vxor	$xt3,$xt3,$xd0
+
+	stvx_4w	$xt0,$x00,$out
+	stvx_4w	$xt1,$x10,$out
+	addi	$inp,$inp,0x40
+	stvx_4w	$xt2,$x20,$out
+	subi	$len,$len,0x40
+	stvx_4w	$xt3,$x30,$out
+	addi	$out,$out,0x40
+	beq	Ldone_vsx
+
+	vadduwm	$xa0,$xa2,@K[0]
+	vadduwm	$xb0,$xb2,@K[1]
+	vadduwm	$xc0,$xc2,@K[2]
+	vadduwm	$xd0,$xd2,@K[3]
+
+	be?vperm $xa0,$xa0,$xa0,$beperm
+	be?vperm $xb0,$xb0,$xb0,$beperm
+	be?vperm $xc0,$xc0,$xc0,$beperm
+	be?vperm $xd0,$xd0,$xd0,$beperm
+
+	${UCMP}i $len,0x40
+	blt	Ltail_vsx
+
+	lvx_4w	$xt0,$x00,$inp
+	lvx_4w	$xt1,$x10,$inp
+	lvx_4w	$xt2,$x20,$inp
+	lvx_4w	$xt3,$x30,$inp
+
+	vxor	$xt0,$xt0,$xa0
+	vxor	$xt1,$xt1,$xb0
+	vxor	$xt2,$xt2,$xc0
+	vxor	$xt3,$xt3,$xd0
+
+	stvx_4w	$xt0,$x00,$out
+	stvx_4w	$xt1,$x10,$out
+	addi	$inp,$inp,0x40
+	stvx_4w	$xt2,$x20,$out
+	subi	$len,$len,0x40
+	stvx_4w	$xt3,$x30,$out
+	addi	$out,$out,0x40
+	beq	Ldone_vsx
+
+	vadduwm	$xa0,$xa3,@K[0]
+	vadduwm	$xb0,$xb3,@K[1]
+	vadduwm	$xc0,$xc3,@K[2]
+	vadduwm	$xd0,$xd3,@K[3]
+
+	be?vperm $xa0,$xa0,$xa0,$beperm
+	be?vperm $xb0,$xb0,$xb0,$beperm
+	be?vperm $xc0,$xc0,$xc0,$beperm
+	be?vperm $xd0,$xd0,$xd0,$beperm
+
+	${UCMP}i $len,0x40
+	blt	Ltail_vsx
+
+	lvx_4w	$xt0,$x00,$inp
+	lvx_4w	$xt1,$x10,$inp
+	lvx_4w	$xt2,$x20,$inp
+	lvx_4w	$xt3,$x30,$inp
+
+	vxor	$xt0,$xt0,$xa0
+	vxor	$xt1,$xt1,$xb0
+	vxor	$xt2,$xt2,$xc0
+	vxor	$xt3,$xt3,$xd0
+
+	stvx_4w	$xt0,$x00,$out
+	stvx_4w	$xt1,$x10,$out
+	addi	$inp,$inp,0x40
+	stvx_4w	$xt2,$x20,$out
+	subi	$len,$len,0x40
+	stvx_4w	$xt3,$x30,$out
+	addi	$out,$out,0x40
+	mtctr	r0
+	bne	Loop_outer_vsx
+
+Ldone_vsx:
+	lwz	r12,`$FRAME-4`($sp)		# pull vrsave
+	li	r10,`15+$LOCALS+64`
+	li	r11,`31+$LOCALS+64`
+	$POP	r0, `$FRAME+$LRSAVE`($sp)
+	mtspr	256,r12				# restore vrsave
+	lvx	v26,r10,$sp
+	addi	r10,r10,32
+	lvx	v27,r11,$sp
+	addi	r11,r11,32
+	lvx	v28,r10,$sp
+	addi	r10,r10,32
+	lvx	v29,r11,$sp
+	addi	r11,r11,32
+	lvx	v30,r10,$sp
+	lvx	v31,r11,$sp
+	mtlr	r0
+	addi	$sp,$sp,$FRAME
+	blr
+
+.align	4
+Ltail_vsx:
+	addi	r11,$sp,$LOCALS
+	mtctr	$len
+	stvx_4w	$xa0,$x00,r11			# offload block to stack
+	stvx_4w	$xb0,$x10,r11
+	stvx_4w	$xc0,$x20,r11
+	stvx_4w	$xd0,$x30,r11
+	subi	r12,r11,1			# prepare for *++ptr
+	subi	$inp,$inp,1
+	subi	$out,$out,1
+
+Loop_tail_vsx:
+	lbzu	r6,1(r12)
+	lbzu	r7,1($inp)
+	xor	r6,r6,r7
+	stbu	r6,1($out)
+	bdnz	Loop_tail_vsx
+
+	stvx_4w	$K[0],$x00,r11			# wipe copy of the block
+	stvx_4w	$K[0],$x10,r11
+	stvx_4w	$K[0],$x20,r11
+	stvx_4w	$K[0],$x30,r11
+
+	b	Ldone_vsx
+	.long	0
+	.byte	0,12,0x04,1,0x80,0,5,0
+	.long	0
+.size	.ChaCha20_ctr32_vsx,.-.ChaCha20_ctr32_vsx
+___
+}}}
+$code.=<<___;
+.align	5
+Lconsts:
+	mflr	r0
+	bcl	20,31,\$+4
+	mflr	r12	#vvvvv "distance between . and Lsigma
+	addi	r12,r12,`64-8`
+	mtlr	r0
+	blr
+	.long	0
+	.byte	0,12,0x14,0,0,0,0,0
+	.space	`64-9*4`
+Lsigma:
+	.long   0x61707865,0x3320646e,0x79622d32,0x6b206574
+	.long	1,0,0,0
+	.long	4,0,0,0
+___
+$code.=<<___ 	if ($LITTLE_ENDIAN);
+	.long	0x0e0f0c0d,0x0a0b0809,0x06070405,0x02030001
+	.long	0x0d0e0f0c,0x090a0b08,0x05060704,0x01020300
+___
+$code.=<<___ 	if (!$LITTLE_ENDIAN);	# flipped words
+	.long	0x02030001,0x06070405,0x0a0b0809,0x0e0f0c0d
+	.long	0x01020300,0x05060704,0x090a0b08,0x0d0e0f0c
+___
+$code.=<<___;
+	.long	0x61707865,0x61707865,0x61707865,0x61707865
+	.long	0x3320646e,0x3320646e,0x3320646e,0x3320646e
+	.long	0x79622d32,0x79622d32,0x79622d32,0x79622d32
+	.long	0x6b206574,0x6b206574,0x6b206574,0x6b206574
+	.long	0,1,2,3
+.asciz  "ChaCha20 for PowerPC/AltiVec, CRYPTOGAMS by <appro\@openssl.org>"
+.align	2
+___
+
+foreach (split("\n",$code)) {
+	s/\`([^\`]*)\`/eval $1/ge;
+
+	# instructions prefixed with '?' are endian-specific and need
+	# to be adjusted accordingly...
+	if ($flavour !~ /le$/) {	# big-endian
+	    s/be\?//		or
+	    s/le\?/#le#/	or
+	    s/\?lvsr/lvsl/	or
+	    s/\?lvsl/lvsr/	or
+	    s/\?(vperm\s+v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+)/$1$3$2$4/ or
+	    s/vrldoi(\s+v[0-9]+,\s*)(v[0-9]+,)\s*([0-9]+)/vsldoi$1$2$2 16-$3/;
+	} else {			# little-endian
+	    s/le\?//		or
+	    s/be\?/#be#/	or
+	    s/\?([a-z]+)/$1/	or
+	    s/vrldoi(\s+v[0-9]+,\s*)(v[0-9]+,)\s*([0-9]+)/vsldoi$1$2$2 $3/;
+	}
+
+	print $_,"\n";
+}
+
+close STDOUT or die "error closing STDOUT: $!";
diff --git a/ap/lib/libssl/openssl-1.1.1o/crypto/chacha/asm/chacha-s390x.pl b/ap/lib/libssl/openssl-1.1.1o/crypto/chacha/asm/chacha-s390x.pl
new file mode 100755
index 0000000..dd66a9c
--- /dev/null
+++ b/ap/lib/libssl/openssl-1.1.1o/crypto/chacha/asm/chacha-s390x.pl
@@ -0,0 +1,326 @@
+#! /usr/bin/env perl
+# Copyright 2016-2020 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License").  You may not use
+# this file except in compliance with the License.  You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# December 2015
+#
+# ChaCha20 for s390x.
+#
+# 3 times faster than compiler-generated code.
+
+$flavour = shift;
+
+if ($flavour =~ /3[12]/) {
+	$SIZE_T=4;
+	$g="";
+} else {
+	$SIZE_T=8;
+	$g="g";
+}
+
+while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+sub AUTOLOAD()		# thunk [simplified] x86-style perlasm
+{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
+    $code .= "\t$opcode\t".join(',',@_)."\n";
+}
+
+my $sp="%r15";
+
+my $stdframe=16*$SIZE_T+4*8;
+my $frame=$stdframe+4*20;
+
+my ($out,$inp,$len,$key,$counter)=map("%r$_",(2..6));
+
+my @x=map("%r$_",(0..7,"x","x","x","x",(10..13)));
+my @t=map("%r$_",(8,9));
+
+sub ROUND {
+my ($a0,$b0,$c0,$d0)=@_;
+my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
+my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
+my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
+my ($xc,$xc_)=map("\"$_\"",@t);
+my @x=map("\"$_\"",@x);
+
+	# Consider order in which variables are addressed by their
+	# index:
+	#
+	#	a   b   c   d
+	#
+	#	0   4   8  12 < even round
+	#	1   5   9  13
+	#	2   6  10  14
+	#	3   7  11  15
+	#	0   5  10  15 < odd round
+	#	1   6  11  12
+	#	2   7   8  13
+	#	3   4   9  14
+	#
+	# 'a', 'b' and 'd's are permanently allocated in registers,
+	# @x[0..7,12..15], while 'c's are maintained in memory. If
+	# you observe 'c' column, you'll notice that pair of 'c's is
+	# invariant between rounds. This means that we have to reload
+	# them once per round, in the middle. This is why you'll see
+	# 'c' stores and loads in the middle, but none in the beginning
+	# or end.
+
+	(
+	"&alr	(@x[$a0],@x[$b0])",	# Q1
+	 "&alr	(@x[$a1],@x[$b1])",	# Q2
+	"&xr	(@x[$d0],@x[$a0])",
+	 "&xr	(@x[$d1],@x[$a1])",
+	"&rll	(@x[$d0],@x[$d0],16)",
+	 "&rll	(@x[$d1],@x[$d1],16)",
+
+	"&alr	($xc,@x[$d0])",
+	 "&alr	($xc_,@x[$d1])",
+	"&xr	(@x[$b0],$xc)",
+	 "&xr	(@x[$b1],$xc_)",
+	"&rll	(@x[$b0],@x[$b0],12)",
+	 "&rll	(@x[$b1],@x[$b1],12)",
+
+	"&alr	(@x[$a0],@x[$b0])",
+	 "&alr	(@x[$a1],@x[$b1])",
+	"&xr	(@x[$d0],@x[$a0])",
+	 "&xr	(@x[$d1],@x[$a1])",
+	"&rll	(@x[$d0],@x[$d0],8)",
+	 "&rll	(@x[$d1],@x[$d1],8)",
+
+	"&alr	($xc,@x[$d0])",
+	 "&alr	($xc_,@x[$d1])",
+	"&xr	(@x[$b0],$xc)",
+	 "&xr	(@x[$b1],$xc_)",
+	"&rll	(@x[$b0],@x[$b0],7)",
+	 "&rll	(@x[$b1],@x[$b1],7)",
+
+	"&stm	($xc,$xc_,'$stdframe+4*8+4*$c0($sp)')",	# reload pair of 'c's
+	"&lm	($xc,$xc_,'$stdframe+4*8+4*$c2($sp)')",
+
+	"&alr	(@x[$a2],@x[$b2])",	# Q3
+	 "&alr	(@x[$a3],@x[$b3])",	# Q4
+	"&xr	(@x[$d2],@x[$a2])",
+	 "&xr	(@x[$d3],@x[$a3])",
+	"&rll	(@x[$d2],@x[$d2],16)",
+	 "&rll	(@x[$d3],@x[$d3],16)",
+
+	"&alr	($xc,@x[$d2])",
+	 "&alr	($xc_,@x[$d3])",
+	"&xr	(@x[$b2],$xc)",
+	 "&xr	(@x[$b3],$xc_)",
+	"&rll	(@x[$b2],@x[$b2],12)",
+	 "&rll	(@x[$b3],@x[$b3],12)",
+
+	"&alr	(@x[$a2],@x[$b2])",
+	 "&alr	(@x[$a3],@x[$b3])",
+	"&xr	(@x[$d2],@x[$a2])",
+	 "&xr	(@x[$d3],@x[$a3])",
+	"&rll	(@x[$d2],@x[$d2],8)",
+	 "&rll	(@x[$d3],@x[$d3],8)",
+
+	"&alr	($xc,@x[$d2])",
+	 "&alr	($xc_,@x[$d3])",
+	"&xr	(@x[$b2],$xc)",
+	 "&xr	(@x[$b3],$xc_)",
+	"&rll	(@x[$b2],@x[$b2],7)",
+	 "&rll	(@x[$b3],@x[$b3],7)"
+	);
+}
+
+$code.=<<___;
+.text
+
+.globl	ChaCha20_ctr32
+.type	ChaCha20_ctr32,\@function
+.align	32
+ChaCha20_ctr32:
+	lt${g}r	$len,$len			# $len==0?
+	bzr	%r14
+	a${g}hi	$len,-64
+	l${g}hi	%r1,-$frame
+	stm${g}	%r6,%r15,`6*$SIZE_T`($sp)
+	sl${g}r	$out,$inp			# difference
+	la	$len,0($inp,$len)		# end of input minus 64
+	larl	%r7,.Lsigma
+	lgr	%r0,$sp
+	la	$sp,0(%r1,$sp)
+	st${g}	%r0,0($sp)
+
+	lmg	%r8,%r11,0($key)		# load key
+	lmg	%r12,%r13,0($counter)		# load counter
+	lmg	%r6,%r7,0(%r7)			# load sigma constant
+
+	la	%r14,0($inp)
+	st${g}	$out,$frame+3*$SIZE_T($sp)
+	st${g}	$len,$frame+4*$SIZE_T($sp)
+	stmg	%r6,%r13,$stdframe($sp)		# copy key schedule to stack
+	srlg	@x[12],%r12,32			# 32-bit counter value
+	j	.Loop_outer
+
+.align	16
+.Loop_outer:
+	lm	@x[0],@x[7],$stdframe+4*0($sp)		# load x[0]-x[7]
+	lm	@t[0],@t[1],$stdframe+4*10($sp)		# load x[10]-x[11]
+	lm	@x[13],@x[15],$stdframe+4*13($sp)	# load x[13]-x[15]
+	stm	@t[0],@t[1],$stdframe+4*8+4*10($sp)	# offload x[10]-x[11]
+	lm	@t[0],@t[1],$stdframe+4*8($sp)		# load x[8]-x[9]
+	st	@x[12],$stdframe+4*12($sp)		# save counter
+	st${g}	%r14,$frame+2*$SIZE_T($sp)		# save input pointer
+	lhi	%r14,10
+	j	.Loop
+
+.align	4
+.Loop:
+___
+	foreach (&ROUND(0, 4, 8,12)) { eval; }
+	foreach (&ROUND(0, 5,10,15)) { eval; }
+$code.=<<___;
+	brct	%r14,.Loop
+
+	l${g}	%r14,$frame+2*$SIZE_T($sp)		# pull input pointer
+	stm	@t[0],@t[1],$stdframe+4*8+4*8($sp)	# offload x[8]-x[9]
+	lm${g}	@t[0],@t[1],$frame+3*$SIZE_T($sp)
+
+	al	@x[0],$stdframe+4*0($sp)	# accumulate key schedule
+	al	@x[1],$stdframe+4*1($sp)
+	al	@x[2],$stdframe+4*2($sp)
+	al	@x[3],$stdframe+4*3($sp)
+	al	@x[4],$stdframe+4*4($sp)
+	al	@x[5],$stdframe+4*5($sp)
+	al	@x[6],$stdframe+4*6($sp)
+	al	@x[7],$stdframe+4*7($sp)
+	lrvr	@x[0],@x[0]
+	lrvr	@x[1],@x[1]
+	lrvr	@x[2],@x[2]
+	lrvr	@x[3],@x[3]
+	lrvr	@x[4],@x[4]
+	lrvr	@x[5],@x[5]
+	lrvr	@x[6],@x[6]
+	lrvr	@x[7],@x[7]
+	al	@x[12],$stdframe+4*12($sp)
+	al	@x[13],$stdframe+4*13($sp)
+	al	@x[14],$stdframe+4*14($sp)
+	al	@x[15],$stdframe+4*15($sp)
+	lrvr	@x[12],@x[12]
+	lrvr	@x[13],@x[13]
+	lrvr	@x[14],@x[14]
+	lrvr	@x[15],@x[15]
+
+	la	@t[0],0(@t[0],%r14)		# reconstruct output pointer
+	cl${g}r	%r14,@t[1]
+	jh	.Ltail
+
+	x	@x[0],4*0(%r14)			# xor with input
+	x	@x[1],4*1(%r14)
+	st	@x[0],4*0(@t[0])		# store output
+	x	@x[2],4*2(%r14)
+	st	@x[1],4*1(@t[0])
+	x	@x[3],4*3(%r14)
+	st	@x[2],4*2(@t[0])
+	x	@x[4],4*4(%r14)
+	st	@x[3],4*3(@t[0])
+	 lm	@x[0],@x[3],$stdframe+4*8+4*8($sp)	# load x[8]-x[11]
+	x	@x[5],4*5(%r14)
+	st	@x[4],4*4(@t[0])
+	x	@x[6],4*6(%r14)
+	 al	@x[0],$stdframe+4*8($sp)
+	st	@x[5],4*5(@t[0])
+	x	@x[7],4*7(%r14)
+	 al	@x[1],$stdframe+4*9($sp)
+	st	@x[6],4*6(@t[0])
+	x	@x[12],4*12(%r14)
+	 al	@x[2],$stdframe+4*10($sp)
+	st	@x[7],4*7(@t[0])
+	x	@x[13],4*13(%r14)
+	 al	@x[3],$stdframe+4*11($sp)
+	st	@x[12],4*12(@t[0])
+	x	@x[14],4*14(%r14)
+	st	@x[13],4*13(@t[0])
+	x	@x[15],4*15(%r14)
+	st	@x[14],4*14(@t[0])
+	 lrvr	@x[0],@x[0]
+	st	@x[15],4*15(@t[0])
+	 lrvr	@x[1],@x[1]
+	 lrvr	@x[2],@x[2]
+	 lrvr	@x[3],@x[3]
+	lhi	@x[12],1
+	 x	@x[0],4*8(%r14)
+	al	@x[12],$stdframe+4*12($sp)	# increment counter
+	 x	@x[1],4*9(%r14)
+	 st	@x[0],4*8(@t[0])
+	 x	@x[2],4*10(%r14)
+	 st	@x[1],4*9(@t[0])
+	 x	@x[3],4*11(%r14)
+	 st	@x[2],4*10(@t[0])
+	 st	@x[3],4*11(@t[0])
+
+	cl${g}r	%r14,@t[1]			# done yet?
+	la	%r14,64(%r14)
+	jl	.Loop_outer
+
+.Ldone:
+	xgr	%r0,%r0
+	xgr	%r1,%r1
+	xgr	%r2,%r2
+	xgr	%r3,%r3
+	stmg	%r0,%r3,$stdframe+4*4($sp)	# wipe key copy
+	stmg	%r0,%r3,$stdframe+4*12($sp)
+
+	lm${g}	%r6,%r15,`$frame+6*$SIZE_T`($sp)
+	br	%r14
+
+.align	16
+.Ltail:
+	la	@t[1],64($t[1])
+	stm	@x[0],@x[7],$stdframe+4*0($sp)
+	sl${g}r	@t[1],%r14
+	lm	@x[0],@x[3],$stdframe+4*8+4*8($sp)
+	l${g}hi	@x[6],0
+	stm	@x[12],@x[15],$stdframe+4*12($sp)
+	al	@x[0],$stdframe+4*8($sp)
+	al	@x[1],$stdframe+4*9($sp)
+	al	@x[2],$stdframe+4*10($sp)
+	al	@x[3],$stdframe+4*11($sp)
+	lrvr	@x[0],@x[0]
+	lrvr	@x[1],@x[1]
+	lrvr	@x[2],@x[2]
+	lrvr	@x[3],@x[3]
+	stm	@x[0],@x[3],$stdframe+4*8($sp)
+
+.Loop_tail:
+	llgc	@x[4],0(@x[6],%r14)
+	llgc	@x[5],$stdframe(@x[6],$sp)
+	xr	@x[5],@x[4]
+	stc	@x[5],0(@x[6],@t[0])
+	la	@x[6],1(@x[6])
+	brct	@t[1],.Loop_tail
+
+	j	.Ldone
+.size	ChaCha20_ctr32,.-ChaCha20_ctr32
+
+.align	32
+.Lsigma:
+.long	0x61707865,0x3320646e,0x79622d32,0x6b206574	# endian-neutral
+.asciz	"ChaCha20 for s390x, CRYPTOGAMS by <appro\@openssl.org>"
+.align	4
+___
+
+foreach (split("\n",$code)) {
+	s/\`([^\`]*)\`/eval $1/ge;
+
+	print $_,"\n";
+}
+close STDOUT or die "error closing STDOUT: $!";
diff --git a/ap/lib/libssl/openssl-1.1.1o/crypto/chacha/asm/chacha-x86.pl b/ap/lib/libssl/openssl-1.1.1o/crypto/chacha/asm/chacha-x86.pl
new file mode 100755
index 0000000..492fda5
--- /dev/null
+++ b/ap/lib/libssl/openssl-1.1.1o/crypto/chacha/asm/chacha-x86.pl
@@ -0,0 +1,1155 @@
+#! /usr/bin/env perl
+# Copyright 2016-2020 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License").  You may not use
+# this file except in compliance with the License.  You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# January 2015
+#
+# ChaCha20 for x86.
+#
+# Performance in cycles per byte out of large buffer.
+#
+#		1xIALU/gcc	4xSSSE3
+# Pentium	17.5/+80%
+# PIII		14.2/+60%
+# P4		18.6/+84%
+# Core2		9.56/+89%	4.83
+# Westmere	9.50/+45%	3.35
+# Sandy Bridge	10.5/+47%	3.20
+# Haswell	8.15/+50%	2.83
+# Skylake	7.53/+22%	2.75
+# Silvermont	17.4/+36%	8.35
+# Goldmont	13.4/+40%	4.36
+# Sledgehammer	10.2/+54%
+# Bulldozer	13.4/+50%	4.38(*)
+#
+# (*)	Bulldozer actually executes 4xXOP code path that delivers 3.55;
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../perlasm");
+require "x86asm.pl";
+
+$output=pop;
+open STDOUT,">$output";
+
+&asm_init($ARGV[0],$ARGV[$#ARGV] eq "386");
+
+$xmm=$ymm=0;
+for (@ARGV) { $xmm=1 if (/-DOPENSSL_IA32_SSE2/); }
+
+$ymm=1 if ($xmm &&
+		`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
+			=~ /GNU assembler version ([2-9]\.[0-9]+)/ &&
+		($gasver=$1)>=2.19);	# first version supporting AVX
+
+$ymm=1 if ($xmm && !$ymm && $ARGV[0] eq "win32n" &&
+		`nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/ &&
+		$1>=2.03);	# first version supporting AVX
+
+$ymm=1 if ($xmm && !$ymm && $ARGV[0] eq "win32" &&
+		`ml 2>&1` =~ /Version ([0-9]+)\./ &&
+		$1>=10);	# first version supporting AVX
+
+$ymm=1 if ($xmm && !$ymm &&
+		`$ENV{CC} -v 2>&1` =~ /((?:clang|LLVM) version|based on LLVM) ([0-9]+\.[0-9]+)/ &&
+		$2>=3.0);	# first version supporting AVX
+
+$a="eax";
+($b,$b_)=("ebx","ebp");
+($c,$c_)=("ecx","esi");
+($d,$d_)=("edx","edi");
+
+sub QUARTERROUND {
+my ($ai,$bi,$ci,$di,$i)=@_;
+my ($an,$bn,$cn,$dn)=map(($_&~3)+(($_+1)&3),($ai,$bi,$ci,$di));	# next
+my ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_-1)&3),($ai,$bi,$ci,$di));	# previous
+
+	#       a   b   c   d
+	#
+	#       0   4   8  12 < even round
+	#       1   5   9  13
+	#       2   6  10  14
+	#       3   7  11  15
+	#       0   5  10  15 < odd round
+	#       1   6  11  12
+	#       2   7   8  13
+	#       3   4   9  14
+
+	if ($i==0) {
+            my $j=4;
+	    ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_-$j--)&3),($ap,$bp,$cp,$dp));
+	} elsif ($i==3) {
+            my $j=0;
+	    ($an,$bn,$cn,$dn)=map(($_&~3)+(($_+$j++)&3),($an,$bn,$cn,$dn));
+	} elsif ($i==4) {
+            my $j=4;
+	    ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_+$j--)&3),($ap,$bp,$cp,$dp));
+	} elsif ($i==7) {
+            my $j=0;
+	    ($an,$bn,$cn,$dn)=map(($_&~3)+(($_-$j++)&3),($an,$bn,$cn,$dn));
+	}
+
+	#&add	($a,$b);			# see elsewhere
+	&xor	($d,$a);
+	 &mov	(&DWP(4*$cp,"esp"),$c_)		if ($ai>0 && $ai<3);
+	&rol	($d,16);
+	 &mov	(&DWP(4*$bp,"esp"),$b_)		if ($i!=0);
+	&add	($c,$d);
+	 &mov	($c_,&DWP(4*$cn,"esp"))		if ($ai>0 && $ai<3);
+	&xor	($b,$c);
+	 &mov	($d_,&DWP(4*$dn,"esp"))		if ($di!=$dn);
+	&rol	($b,12);
+	 &mov	($b_,&DWP(4*$bn,"esp"))		if ($i<7);
+	 &mov	($b_,&DWP(128,"esp"))		if ($i==7);	# loop counter
+	&add	($a,$b);
+	&xor	($d,$a);
+	&mov	(&DWP(4*$ai,"esp"),$a);
+	&rol	($d,8);
+	&mov	($a,&DWP(4*$an,"esp"));
+	&add	($c,$d);
+	&mov	(&DWP(4*$di,"esp"),$d)		if ($di!=$dn);
+	&mov	($d_,$d)			if ($di==$dn);
+	&xor	($b,$c);
+	 &add	($a,$b_)			if ($i<7);	# elsewhere
+	&rol	($b,7);
+
+	($b,$b_)=($b_,$b);
+	($c,$c_)=($c_,$c);
+	($d,$d_)=($d_,$d);
+}
+
+&static_label("ssse3_shortcut");
+&static_label("xop_shortcut");
+&static_label("ssse3_data");
+&static_label("pic_point");
+
+&function_begin("ChaCha20_ctr32");
+	&xor	("eax","eax");
+	&cmp	("eax",&wparam(2));		# len==0?
+	&je	(&label("no_data"));
+if ($xmm) {
+	&call	(&label("pic_point"));
+&set_label("pic_point");
+	&blindpop("eax");
+	&picmeup("ebp","OPENSSL_ia32cap_P","eax",&label("pic_point"));
+	&test	(&DWP(0,"ebp"),1<<24);		# test FXSR bit
+	&jz	(&label("x86"));
+	&test	(&DWP(4,"ebp"),1<<9);		# test SSSE3 bit
+	&jz	(&label("x86"));
+	&jmp	(&label("ssse3_shortcut"));
+&set_label("x86");
+}
+	&mov	("esi",&wparam(3));		# key
+	&mov	("edi",&wparam(4));		# counter and nonce
+
+	&stack_push(33);
+
+	&mov	("eax",&DWP(4*0,"esi"));	# copy key
+	&mov	("ebx",&DWP(4*1,"esi"));
+	&mov	("ecx",&DWP(4*2,"esi"));
+	&mov	("edx",&DWP(4*3,"esi"));
+	&mov	(&DWP(64+4*4,"esp"),"eax");
+	&mov	(&DWP(64+4*5,"esp"),"ebx");
+	&mov	(&DWP(64+4*6,"esp"),"ecx");
+	&mov	(&DWP(64+4*7,"esp"),"edx");
+	&mov	("eax",&DWP(4*4,"esi"));
+	&mov	("ebx",&DWP(4*5,"esi"));
+	&mov	("ecx",&DWP(4*6,"esi"));
+	&mov	("edx",&DWP(4*7,"esi"));
+	&mov	(&DWP(64+4*8,"esp"),"eax");
+	&mov	(&DWP(64+4*9,"esp"),"ebx");
+	&mov	(&DWP(64+4*10,"esp"),"ecx");
+	&mov	(&DWP(64+4*11,"esp"),"edx");
+	&mov	("eax",&DWP(4*0,"edi"));	# copy counter and nonce
+	&mov	("ebx",&DWP(4*1,"edi"));
+	&mov	("ecx",&DWP(4*2,"edi"));
+	&mov	("edx",&DWP(4*3,"edi"));
+	&sub	("eax",1);
+	&mov	(&DWP(64+4*12,"esp"),"eax");
+	&mov	(&DWP(64+4*13,"esp"),"ebx");
+	&mov	(&DWP(64+4*14,"esp"),"ecx");
+	&mov	(&DWP(64+4*15,"esp"),"edx");
+	&jmp	(&label("entry"));
+
+&set_label("outer_loop",16);
+	&mov	(&wparam(1),$b);		# save input
+	&mov	(&wparam(0),$a);		# save output
+	&mov	(&wparam(2),$c);		# save len
+&set_label("entry");
+	&mov	($a,0x61707865);
+	&mov	(&DWP(4*1,"esp"),0x3320646e);
+	&mov	(&DWP(4*2,"esp"),0x79622d32);
+	&mov	(&DWP(4*3,"esp"),0x6b206574);
+
+	&mov	($b, &DWP(64+4*5,"esp"));	# copy key material
+	&mov	($b_,&DWP(64+4*6,"esp"));
+	&mov	($c, &DWP(64+4*10,"esp"));
+	&mov	($c_,&DWP(64+4*11,"esp"));
+	&mov	($d, &DWP(64+4*13,"esp"));
+	&mov	($d_,&DWP(64+4*14,"esp"));
+	&mov	(&DWP(4*5,"esp"),$b);
+	&mov	(&DWP(4*6,"esp"),$b_);
+	&mov	(&DWP(4*10,"esp"),$c);
+	&mov	(&DWP(4*11,"esp"),$c_);
+	&mov	(&DWP(4*13,"esp"),$d);
+	&mov	(&DWP(4*14,"esp"),$d_);
+
+	&mov	($b, &DWP(64+4*7,"esp"));
+	&mov	($d_,&DWP(64+4*15,"esp"));
+	&mov	($d, &DWP(64+4*12,"esp"));
+	&mov	($b_,&DWP(64+4*4,"esp"));
+	&mov	($c, &DWP(64+4*8,"esp"));
+	&mov	($c_,&DWP(64+4*9,"esp"));
+	&add	($d,1);				# counter value
+	&mov	(&DWP(4*7,"esp"),$b);
+	&mov	(&DWP(4*15,"esp"),$d_);
+	&mov	(&DWP(64+4*12,"esp"),$d);	# save counter value
+
+	&mov	($b,10);			# loop counter
+	&jmp	(&label("loop"));
+
+&set_label("loop",16);
+	&add	($a,$b_);			# elsewhere
+	&mov	(&DWP(128,"esp"),$b);		# save loop counter
+	&mov	($b,$b_);
+	&QUARTERROUND(0, 4, 8, 12, 0);
+	&QUARTERROUND(1, 5, 9, 13, 1);
+	&QUARTERROUND(2, 6,10, 14, 2);
+	&QUARTERROUND(3, 7,11, 15, 3);
+	&QUARTERROUND(0, 5,10, 15, 4);
+	&QUARTERROUND(1, 6,11, 12, 5);
+	&QUARTERROUND(2, 7, 8, 13, 6);
+	&QUARTERROUND(3, 4, 9, 14, 7);
+	&dec	($b);
+	&jnz	(&label("loop"));
+
+	&mov	($b,&wparam(2));		# load len
+
+	&add	($a,0x61707865);		# accumulate key material
+	&add	($b_,&DWP(64+4*4,"esp"));
+	&add	($c, &DWP(64+4*8,"esp"));
+	&add	($c_,&DWP(64+4*9,"esp"));
+
+	&cmp	($b,64);
+	&jb	(&label("tail"));
+
+	&mov	($b,&wparam(1));		# load input pointer
+	&add	($d, &DWP(64+4*12,"esp"));
+	&add	($d_,&DWP(64+4*14,"esp"));
+
+	&xor	($a, &DWP(4*0,$b));		# xor with input
+	&xor	($b_,&DWP(4*4,$b));
+	&mov	(&DWP(4*0,"esp"),$a);
+	&mov	($a,&wparam(0));		# load output pointer
+	&xor	($c, &DWP(4*8,$b));
+	&xor	($c_,&DWP(4*9,$b));
+	&xor	($d, &DWP(4*12,$b));
+	&xor	($d_,&DWP(4*14,$b));
+	&mov	(&DWP(4*4,$a),$b_);		# write output
+	&mov	(&DWP(4*8,$a),$c);
+	&mov	(&DWP(4*9,$a),$c_);
+	&mov	(&DWP(4*12,$a),$d);
+	&mov	(&DWP(4*14,$a),$d_);
+
+	&mov	($b_,&DWP(4*1,"esp"));
+	&mov	($c, &DWP(4*2,"esp"));
+	&mov	($c_,&DWP(4*3,"esp"));
+	&mov	($d, &DWP(4*5,"esp"));
+	&mov	($d_,&DWP(4*6,"esp"));
+	&add	($b_,0x3320646e);		# accumulate key material
+	&add	($c, 0x79622d32);
+	&add	($c_,0x6b206574);
+	&add	($d, &DWP(64+4*5,"esp"));
+	&add	($d_,&DWP(64+4*6,"esp"));
+	&xor	($b_,&DWP(4*1,$b));
+	&xor	($c, &DWP(4*2,$b));
+	&xor	($c_,&DWP(4*3,$b));
+	&xor	($d, &DWP(4*5,$b));
+	&xor	($d_,&DWP(4*6,$b));
+	&mov	(&DWP(4*1,$a),$b_);
+	&mov	(&DWP(4*2,$a),$c);
+	&mov	(&DWP(4*3,$a),$c_);
+	&mov	(&DWP(4*5,$a),$d);
+	&mov	(&DWP(4*6,$a),$d_);
+
+	&mov	($b_,&DWP(4*7,"esp"));
+	&mov	($c, &DWP(4*10,"esp"));
+	&mov	($c_,&DWP(4*11,"esp"));
+	&mov	($d, &DWP(4*13,"esp"));
+	&mov	($d_,&DWP(4*15,"esp"));
+	&add	($b_,&DWP(64+4*7,"esp"));
+	&add	($c, &DWP(64+4*10,"esp"));
+	&add	($c_,&DWP(64+4*11,"esp"));
+	&add	($d, &DWP(64+4*13,"esp"));
+	&add	($d_,&DWP(64+4*15,"esp"));
+	&xor	($b_,&DWP(4*7,$b));
+	&xor	($c, &DWP(4*10,$b));
+	&xor	($c_,&DWP(4*11,$b));
+	&xor	($d, &DWP(4*13,$b));
+	&xor	($d_,&DWP(4*15,$b));
+	&lea	($b,&DWP(4*16,$b));
+	&mov	(&DWP(4*7,$a),$b_);
+	&mov	($b_,&DWP(4*0,"esp"));
+	&mov	(&DWP(4*10,$a),$c);
+	&mov	($c,&wparam(2));		# len
+	&mov	(&DWP(4*11,$a),$c_);
+	&mov	(&DWP(4*13,$a),$d);
+	&mov	(&DWP(4*15,$a),$d_);
+	&mov	(&DWP(4*0,$a),$b_);
+	&lea	($a,&DWP(4*16,$a));
+	&sub	($c,64);
+	&jnz	(&label("outer_loop"));
+
+	&jmp	(&label("done"));
+
+&set_label("tail");
+	&add	($d, &DWP(64+4*12,"esp"));
+	&add	($d_,&DWP(64+4*14,"esp"));
+	&mov	(&DWP(4*0,"esp"),$a);
+	&mov	(&DWP(4*4,"esp"),$b_);
+	&mov	(&DWP(4*8,"esp"),$c);
+	&mov	(&DWP(4*9,"esp"),$c_);
+	&mov	(&DWP(4*12,"esp"),$d);
+	&mov	(&DWP(4*14,"esp"),$d_);
+
+	&mov	($b_,&DWP(4*1,"esp"));
+	&mov	($c, &DWP(4*2,"esp"));
+	&mov	($c_,&DWP(4*3,"esp"));
+	&mov	($d, &DWP(4*5,"esp"));
+	&mov	($d_,&DWP(4*6,"esp"));
+	&add	($b_,0x3320646e);		# accumulate key material
+	&add	($c, 0x79622d32);
+	&add	($c_,0x6b206574);
+	&add	($d, &DWP(64+4*5,"esp"));
+	&add	($d_,&DWP(64+4*6,"esp"));
+	&mov	(&DWP(4*1,"esp"),$b_);
+	&mov	(&DWP(4*2,"esp"),$c);
+	&mov	(&DWP(4*3,"esp"),$c_);
+	&mov	(&DWP(4*5,"esp"),$d);
+	&mov	(&DWP(4*6,"esp"),$d_);
+
+	&mov	($b_,&DWP(4*7,"esp"));
+	&mov	($c, &DWP(4*10,"esp"));
+	&mov	($c_,&DWP(4*11,"esp"));
+	&mov	($d, &DWP(4*13,"esp"));
+	&mov	($d_,&DWP(4*15,"esp"));
+	&add	($b_,&DWP(64+4*7,"esp"));
+	&add	($c, &DWP(64+4*10,"esp"));
+	&add	($c_,&DWP(64+4*11,"esp"));
+	&add	($d, &DWP(64+4*13,"esp"));
+	&add	($d_,&DWP(64+4*15,"esp"));
+	&mov	(&DWP(4*7,"esp"),$b_);
+	&mov	($b_,&wparam(1));		# load input
+	&mov	(&DWP(4*10,"esp"),$c);
+	&mov	($c,&wparam(0));		# load output
+	&mov	(&DWP(4*11,"esp"),$c_);
+	&xor	($c_,$c_);
+	&mov	(&DWP(4*13,"esp"),$d);
+	&mov	(&DWP(4*15,"esp"),$d_);
+
+	&xor	("eax","eax");
+	&xor	("edx","edx");
+&set_label("tail_loop");
+	&movb	("al",&BP(0,$c_,$b_));
+	&movb	("dl",&BP(0,"esp",$c_));
+	&lea	($c_,&DWP(1,$c_));
+	&xor	("al","dl");
+	&mov	(&BP(-1,$c,$c_),"al");
+	&dec	($b);
+	&jnz	(&label("tail_loop"));
+
+&set_label("done");
+	&stack_pop(33);
+&set_label("no_data");
+&function_end("ChaCha20_ctr32");
+
+if ($xmm) {
+my ($xa,$xa_,$xb,$xb_,$xc,$xc_,$xd,$xd_)=map("xmm$_",(0..7));
+my ($out,$inp,$len)=("edi","esi","ecx");
+
+sub QUARTERROUND_SSSE3 {
+my ($ai,$bi,$ci,$di,$i)=@_;
+my ($an,$bn,$cn,$dn)=map(($_&~3)+(($_+1)&3),($ai,$bi,$ci,$di));	# next
+my ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_-1)&3),($ai,$bi,$ci,$di));	# previous
+
+	#       a   b   c   d
+	#
+	#       0   4   8  12 < even round
+	#       1   5   9  13
+	#       2   6  10  14
+	#       3   7  11  15
+	#       0   5  10  15 < odd round
+	#       1   6  11  12
+	#       2   7   8  13
+	#       3   4   9  14
+
+	if ($i==0) {
+            my $j=4;
+	    ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_-$j--)&3),($ap,$bp,$cp,$dp));
+	} elsif ($i==3) {
+            my $j=0;
+	    ($an,$bn,$cn,$dn)=map(($_&~3)+(($_+$j++)&3),($an,$bn,$cn,$dn));
+	} elsif ($i==4) {
+            my $j=4;
+	    ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_+$j--)&3),($ap,$bp,$cp,$dp));
+	} elsif ($i==7) {
+            my $j=0;
+	    ($an,$bn,$cn,$dn)=map(($_&~3)+(($_-$j++)&3),($an,$bn,$cn,$dn));
+	}
+
+	#&paddd	($xa,$xb);			# see elsewhere
+	#&pxor	($xd,$xa);			# see elsewhere
+	 &movdqa(&QWP(16*$cp-128,"ebx"),$xc_)	if ($ai>0 && $ai<3);
+	&pshufb	($xd,&QWP(0,"eax"));		# rot16
+	 &movdqa(&QWP(16*$bp-128,"ebx"),$xb_)	if ($i!=0);
+	&paddd	($xc,$xd);
+	 &movdqa($xc_,&QWP(16*$cn-128,"ebx"))	if ($ai>0 && $ai<3);
+	&pxor	($xb,$xc);
+	 &movdqa($xb_,&QWP(16*$bn-128,"ebx"))	if ($i<7);
+	&movdqa	($xa_,$xb);			# borrow as temporary
+	&pslld	($xb,12);
+	&psrld	($xa_,20);
+	&por	($xb,$xa_);
+	 &movdqa($xa_,&QWP(16*$an-128,"ebx"));
+	&paddd	($xa,$xb);
+	 &movdqa($xd_,&QWP(16*$dn-128,"ebx"))	if ($di!=$dn);
+	&pxor	($xd,$xa);
+	&movdqa	(&QWP(16*$ai-128,"ebx"),$xa);
+	&pshufb	($xd,&QWP(16,"eax"));		# rot8
+	&paddd	($xc,$xd);
+	&movdqa	(&QWP(16*$di-128,"ebx"),$xd)	if ($di!=$dn);
+	&movdqa	($xd_,$xd)			if ($di==$dn);
+	&pxor	($xb,$xc);
+	 &paddd	($xa_,$xb_)			if ($i<7);	# elsewhere
+	&movdqa	($xa,$xb);			# borrow as temporary
+	&pslld	($xb,7);
+	&psrld	($xa,25);
+	 &pxor	($xd_,$xa_)			if ($i<7);	# elsewhere
+	&por	($xb,$xa);
+
+	($xa,$xa_)=($xa_,$xa);
+	($xb,$xb_)=($xb_,$xb);
+	($xc,$xc_)=($xc_,$xc);
+	($xd,$xd_)=($xd_,$xd);
+}
+
+&function_begin("ChaCha20_ssse3");
+&set_label("ssse3_shortcut");
+if ($ymm) {
+	&test		(&DWP(4,"ebp"),1<<11);		# test XOP bit
+	&jnz		(&label("xop_shortcut"));
+}
+
+	&mov		($out,&wparam(0));
+	&mov		($inp,&wparam(1));
+	&mov		($len,&wparam(2));
+	&mov		("edx",&wparam(3));		# key
+	&mov		("ebx",&wparam(4));		# counter and nonce
+
+	&mov		("ebp","esp");
+	&stack_push	(131);
+	&and		("esp",-64);
+	&mov		(&DWP(512,"esp"),"ebp");
+
+	&lea		("eax",&DWP(&label("ssse3_data")."-".
+				    &label("pic_point"),"eax"));
+	&movdqu		("xmm3",&QWP(0,"ebx"));		# counter and nonce
+
+if (defined($gasver) && $gasver>=2.17) {		# even though we encode
+							# pshufb manually, we
+							# handle only register
+							# operands, while this
+							# segment uses memory
+							# operand...
+	&cmp		($len,64*4);
+	&jb		(&label("1x"));
+
+	&mov		(&DWP(512+4,"esp"),"edx");	# offload pointers
+	&mov		(&DWP(512+8,"esp"),"ebx");
+	&sub		($len,64*4);			# bias len
+	&lea		("ebp",&DWP(256+128,"esp"));	# size optimization
+
+	&movdqu		("xmm7",&QWP(0,"edx"));		# key
+	&pshufd		("xmm0","xmm3",0x00);
+	&pshufd		("xmm1","xmm3",0x55);
+	&pshufd		("xmm2","xmm3",0xaa);
+	&pshufd		("xmm3","xmm3",0xff);
+	 &paddd		("xmm0",&QWP(16*3,"eax"));	# fix counters
+	&pshufd		("xmm4","xmm7",0x00);
+	&pshufd		("xmm5","xmm7",0x55);
+	 &psubd		("xmm0",&QWP(16*4,"eax"));
+	&pshufd		("xmm6","xmm7",0xaa);
+	&pshufd		("xmm7","xmm7",0xff);
+	&movdqa		(&QWP(16*12-128,"ebp"),"xmm0");
+	&movdqa		(&QWP(16*13-128,"ebp"),"xmm1");
+	&movdqa		(&QWP(16*14-128,"ebp"),"xmm2");
+	&movdqa		(&QWP(16*15-128,"ebp"),"xmm3");
+	 &movdqu	("xmm3",&QWP(16,"edx"));	# key
+	&movdqa		(&QWP(16*4-128,"ebp"),"xmm4");
+	&movdqa		(&QWP(16*5-128,"ebp"),"xmm5");
+	&movdqa		(&QWP(16*6-128,"ebp"),"xmm6");
+	&movdqa		(&QWP(16*7-128,"ebp"),"xmm7");
+	 &movdqa	("xmm7",&QWP(16*2,"eax"));	# sigma
+	 &lea		("ebx",&DWP(128,"esp"));	# size optimization
+
+	&pshufd		("xmm0","xmm3",0x00);
+	&pshufd		("xmm1","xmm3",0x55);
+	&pshufd		("xmm2","xmm3",0xaa);
+	&pshufd		("xmm3","xmm3",0xff);
+	&pshufd		("xmm4","xmm7",0x00);
+	&pshufd		("xmm5","xmm7",0x55);
+	&pshufd		("xmm6","xmm7",0xaa);
+	&pshufd		("xmm7","xmm7",0xff);
+	&movdqa		(&QWP(16*8-128,"ebp"),"xmm0");
+	&movdqa		(&QWP(16*9-128,"ebp"),"xmm1");
+	&movdqa		(&QWP(16*10-128,"ebp"),"xmm2");
+	&movdqa		(&QWP(16*11-128,"ebp"),"xmm3");
+	&movdqa		(&QWP(16*0-128,"ebp"),"xmm4");
+	&movdqa		(&QWP(16*1-128,"ebp"),"xmm5");
+	&movdqa		(&QWP(16*2-128,"ebp"),"xmm6");
+	&movdqa		(&QWP(16*3-128,"ebp"),"xmm7");
+
+	&lea		($inp,&DWP(128,$inp));		# size optimization
+	&lea		($out,&DWP(128,$out));		# size optimization
+	&jmp		(&label("outer_loop"));
+
+&set_label("outer_loop",16);
+	#&movdqa	("xmm0",&QWP(16*0-128,"ebp"));	# copy key material
+	&movdqa		("xmm1",&QWP(16*1-128,"ebp"));
+	&movdqa		("xmm2",&QWP(16*2-128,"ebp"));
+	&movdqa		("xmm3",&QWP(16*3-128,"ebp"));
+	#&movdqa	("xmm4",&QWP(16*4-128,"ebp"));
+	&movdqa		("xmm5",&QWP(16*5-128,"ebp"));
+	&movdqa		("xmm6",&QWP(16*6-128,"ebp"));
+	&movdqa		("xmm7",&QWP(16*7-128,"ebp"));
+	#&movdqa	(&QWP(16*0-128,"ebx"),"xmm0");
+	&movdqa		(&QWP(16*1-128,"ebx"),"xmm1");
+	&movdqa		(&QWP(16*2-128,"ebx"),"xmm2");
+	&movdqa		(&QWP(16*3-128,"ebx"),"xmm3");
+	#&movdqa	(&QWP(16*4-128,"ebx"),"xmm4");
+	&movdqa		(&QWP(16*5-128,"ebx"),"xmm5");
+	&movdqa		(&QWP(16*6-128,"ebx"),"xmm6");
+	&movdqa		(&QWP(16*7-128,"ebx"),"xmm7");
+	#&movdqa	("xmm0",&QWP(16*8-128,"ebp"));
+	#&movdqa	("xmm1",&QWP(16*9-128,"ebp"));
+	&movdqa		("xmm2",&QWP(16*10-128,"ebp"));
+	&movdqa		("xmm3",&QWP(16*11-128,"ebp"));
+	&movdqa		("xmm4",&QWP(16*12-128,"ebp"));
+	&movdqa		("xmm5",&QWP(16*13-128,"ebp"));
+	&movdqa		("xmm6",&QWP(16*14-128,"ebp"));
+	&movdqa		("xmm7",&QWP(16*15-128,"ebp"));
+	&paddd		("xmm4",&QWP(16*4,"eax"));	# counter value
+	#&movdqa	(&QWP(16*8-128,"ebx"),"xmm0");
+	#&movdqa	(&QWP(16*9-128,"ebx"),"xmm1");
+	&movdqa		(&QWP(16*10-128,"ebx"),"xmm2");
+	&movdqa		(&QWP(16*11-128,"ebx"),"xmm3");
+	&movdqa		(&QWP(16*12-128,"ebx"),"xmm4");
+	&movdqa		(&QWP(16*13-128,"ebx"),"xmm5");
+	&movdqa		(&QWP(16*14-128,"ebx"),"xmm6");
+	&movdqa		(&QWP(16*15-128,"ebx"),"xmm7");
+	&movdqa		(&QWP(16*12-128,"ebp"),"xmm4");	# save counter value
+
+	&movdqa		($xa, &QWP(16*0-128,"ebp"));
+	&movdqa		($xd, "xmm4");
+	&movdqa		($xb_,&QWP(16*4-128,"ebp"));
+	&movdqa		($xc, &QWP(16*8-128,"ebp"));
+	&movdqa		($xc_,&QWP(16*9-128,"ebp"));
+
+	&mov		("edx",10);			# loop counter
+	&nop		();
+
+&set_label("loop",16);
+	&paddd		($xa,$xb_);			# elsewhere
+	&movdqa		($xb,$xb_);
+	&pxor		($xd,$xa);			# elsewhere
+	&QUARTERROUND_SSSE3(0, 4, 8, 12, 0);
+	&QUARTERROUND_SSSE3(1, 5, 9, 13, 1);
+	&QUARTERROUND_SSSE3(2, 6,10, 14, 2);
+	&QUARTERROUND_SSSE3(3, 7,11, 15, 3);
+	&QUARTERROUND_SSSE3(0, 5,10, 15, 4);
+	&QUARTERROUND_SSSE3(1, 6,11, 12, 5);
+	&QUARTERROUND_SSSE3(2, 7, 8, 13, 6);
+	&QUARTERROUND_SSSE3(3, 4, 9, 14, 7);
+	&dec		("edx");
+	&jnz		(&label("loop"));
+
+	&movdqa		(&QWP(16*4-128,"ebx"),$xb_);
+	&movdqa		(&QWP(16*8-128,"ebx"),$xc);
+	&movdqa		(&QWP(16*9-128,"ebx"),$xc_);
+	&movdqa		(&QWP(16*12-128,"ebx"),$xd);
+	&movdqa		(&QWP(16*14-128,"ebx"),$xd_);
+
+    my ($xa0,$xa1,$xa2,$xa3,$xt0,$xt1,$xt2,$xt3)=map("xmm$_",(0..7));
+
+	#&movdqa	($xa0,&QWP(16*0-128,"ebx"));	# it's there
+	&movdqa		($xa1,&QWP(16*1-128,"ebx"));
+	&movdqa		($xa2,&QWP(16*2-128,"ebx"));
+	&movdqa		($xa3,&QWP(16*3-128,"ebx"));
+
+    for($i=0;$i<256;$i+=64) {
+	&paddd		($xa0,&QWP($i+16*0-128,"ebp"));	# accumulate key material
+	&paddd		($xa1,&QWP($i+16*1-128,"ebp"));
+	&paddd		($xa2,&QWP($i+16*2-128,"ebp"));
+	&paddd		($xa3,&QWP($i+16*3-128,"ebp"));
+
+	&movdqa		($xt2,$xa0);		# "de-interlace" data
+	&punpckldq	($xa0,$xa1);
+	&movdqa		($xt3,$xa2);
+	&punpckldq	($xa2,$xa3);
+	&punpckhdq	($xt2,$xa1);
+	&punpckhdq	($xt3,$xa3);
+	&movdqa		($xa1,$xa0);
+	&punpcklqdq	($xa0,$xa2);		# "a0"
+	&movdqa		($xa3,$xt2);
+	&punpcklqdq	($xt2,$xt3);		# "a2"
+	&punpckhqdq	($xa1,$xa2);		# "a1"
+	&punpckhqdq	($xa3,$xt3);		# "a3"
+
+	#($xa2,$xt2)=($xt2,$xa2);
+
+	&movdqu		($xt0,&QWP(64*0-128,$inp));	# load input
+	&movdqu		($xt1,&QWP(64*1-128,$inp));
+	&movdqu		($xa2,&QWP(64*2-128,$inp));
+	&movdqu		($xt3,&QWP(64*3-128,$inp));
+	&lea		($inp,&QWP($i<192?16:(64*4-16*3),$inp));
+	&pxor		($xt0,$xa0);
+	&movdqa		($xa0,&QWP($i+16*4-128,"ebx"))	if ($i<192);
+	&pxor		($xt1,$xa1);
+	&movdqa		($xa1,&QWP($i+16*5-128,"ebx"))	if ($i<192);
+	&pxor		($xt2,$xa2);
+	&movdqa		($xa2,&QWP($i+16*6-128,"ebx"))	if ($i<192);
+	&pxor		($xt3,$xa3);
+	&movdqa		($xa3,&QWP($i+16*7-128,"ebx"))	if ($i<192);
+	&movdqu		(&QWP(64*0-128,$out),$xt0);	# store output
+	&movdqu		(&QWP(64*1-128,$out),$xt1);
+	&movdqu		(&QWP(64*2-128,$out),$xt2);
+	&movdqu		(&QWP(64*3-128,$out),$xt3);
+	&lea		($out,&QWP($i<192?16:(64*4-16*3),$out));
+    }
+	&sub		($len,64*4);
+	&jnc		(&label("outer_loop"));
+
+	&add		($len,64*4);
+	&jz		(&label("done"));
+
+	&mov		("ebx",&DWP(512+8,"esp"));	# restore pointers
+	&lea		($inp,&DWP(-128,$inp));
+	&mov		("edx",&DWP(512+4,"esp"));
+	&lea		($out,&DWP(-128,$out));
+
+	&movd		("xmm2",&DWP(16*12-128,"ebp"));	# counter value
+	&movdqu		("xmm3",&QWP(0,"ebx"));
+	&paddd		("xmm2",&QWP(16*6,"eax"));	# +four
+	&pand		("xmm3",&QWP(16*7,"eax"));
+	&por		("xmm3","xmm2");		# counter value
+}
+{
+my ($a,$b,$c,$d,$t,$t1,$rot16,$rot24)=map("xmm$_",(0..7));
+
+sub SSSE3ROUND {	# critical path is 20 "SIMD ticks" per round
+	&paddd		($a,$b);
+	&pxor		($d,$a);
+	&pshufb		($d,$rot16);
+
+	&paddd		($c,$d);
+	&pxor		($b,$c);
+	&movdqa		($t,$b);
+	&psrld		($b,20);
+	&pslld		($t,12);
+	&por		($b,$t);
+
+	&paddd		($a,$b);
+	&pxor		($d,$a);
+	&pshufb		($d,$rot24);
+
+	&paddd		($c,$d);
+	&pxor		($b,$c);
+	&movdqa		($t,$b);
+	&psrld		($b,25);
+	&pslld		($t,7);
+	&por		($b,$t);
+}
+
+&set_label("1x");
+	&movdqa		($a,&QWP(16*2,"eax"));		# sigma
+	&movdqu		($b,&QWP(0,"edx"));
+	&movdqu		($c,&QWP(16,"edx"));
+	#&movdqu	($d,&QWP(0,"ebx"));		# already loaded
+	&movdqa		($rot16,&QWP(0,"eax"));
+	&movdqa		($rot24,&QWP(16,"eax"));
+	&mov		(&DWP(16*3,"esp"),"ebp");
+
+	&movdqa		(&QWP(16*0,"esp"),$a);
+	&movdqa		(&QWP(16*1,"esp"),$b);
+	&movdqa		(&QWP(16*2,"esp"),$c);
+	&movdqa		(&QWP(16*3,"esp"),$d);
+	&mov		("edx",10);
+	&jmp		(&label("loop1x"));
+
+&set_label("outer1x",16);
+	&movdqa		($d,&QWP(16*5,"eax"));		# one
+	&movdqa		($a,&QWP(16*0,"esp"));
+	&movdqa		($b,&QWP(16*1,"esp"));
+	&movdqa		($c,&QWP(16*2,"esp"));
+	&paddd		($d,&QWP(16*3,"esp"));
+	&mov		("edx",10);
+	&movdqa		(&QWP(16*3,"esp"),$d);
+	&jmp		(&label("loop1x"));
+
+&set_label("loop1x",16);
+	&SSSE3ROUND();
+	&pshufd	($c,$c,0b01001110);
+	&pshufd	($b,$b,0b00111001);
+	&pshufd	($d,$d,0b10010011);
+	&nop	();
+
+	&SSSE3ROUND();
+	&pshufd	($c,$c,0b01001110);
+	&pshufd	($b,$b,0b10010011);
+	&pshufd	($d,$d,0b00111001);
+
+	&dec		("edx");
+	&jnz		(&label("loop1x"));
+
+	&paddd		($a,&QWP(16*0,"esp"));
+	&paddd		($b,&QWP(16*1,"esp"));
+	&paddd		($c,&QWP(16*2,"esp"));
+	&paddd		($d,&QWP(16*3,"esp"));
+
+	&cmp		($len,64);
+	&jb		(&label("tail"));
+
+	&movdqu		($t,&QWP(16*0,$inp));
+	&movdqu		($t1,&QWP(16*1,$inp));
+	&pxor		($a,$t);		# xor with input
+	&movdqu		($t,&QWP(16*2,$inp));
+	&pxor		($b,$t1);
+	&movdqu		($t1,&QWP(16*3,$inp));
+	&pxor		($c,$t);
+	&pxor		($d,$t1);
+	&lea		($inp,&DWP(16*4,$inp));	# inp+=64
+
+	&movdqu		(&QWP(16*0,$out),$a);	# write output
+	&movdqu		(&QWP(16*1,$out),$b);
+	&movdqu		(&QWP(16*2,$out),$c);
+	&movdqu		(&QWP(16*3,$out),$d);
+	&lea		($out,&DWP(16*4,$out));	# inp+=64
+
+	&sub		($len,64);
+	&jnz		(&label("outer1x"));
+
+	&jmp		(&label("done"));
+
+&set_label("tail");
+	&movdqa		(&QWP(16*0,"esp"),$a);
+	&movdqa		(&QWP(16*1,"esp"),$b);
+	&movdqa		(&QWP(16*2,"esp"),$c);
+	&movdqa		(&QWP(16*3,"esp"),$d);
+
+	&xor		("eax","eax");
+	&xor		("edx","edx");
+	&xor		("ebp","ebp");
+
+&set_label("tail_loop");
+	&movb		("al",&BP(0,"esp","ebp"));
+	&movb		("dl",&BP(0,$inp,"ebp"));
+	&lea		("ebp",&DWP(1,"ebp"));
+	&xor		("al","dl");
+	&movb		(&BP(-1,$out,"ebp"),"al");
+	&dec		($len);
+	&jnz		(&label("tail_loop"));
+}
+&set_label("done");
+	&mov		("esp",&DWP(512,"esp"));
+&function_end("ChaCha20_ssse3");
+
+&align	(64);
+&set_label("ssse3_data");
+&data_byte(0x2,0x3,0x0,0x1, 0x6,0x7,0x4,0x5, 0xa,0xb,0x8,0x9, 0xe,0xf,0xc,0xd);
+&data_byte(0x3,0x0,0x1,0x2, 0x7,0x4,0x5,0x6, 0xb,0x8,0x9,0xa, 0xf,0xc,0xd,0xe);
+&data_word(0x61707865,0x3320646e,0x79622d32,0x6b206574);
+&data_word(0,1,2,3);
+&data_word(4,4,4,4);
+&data_word(1,0,0,0);
+&data_word(4,0,0,0);
+&data_word(0,-1,-1,-1);
+&align	(64);
+}
+&asciz	("ChaCha20 for x86, CRYPTOGAMS by <appro\@openssl.org>");
+
+if ($ymm) {
+my ($xa,$xa_,$xb,$xb_,$xc,$xc_,$xd,$xd_)=map("xmm$_",(0..7));
+my ($out,$inp,$len)=("edi","esi","ecx");
+
+sub QUARTERROUND_XOP {
+my ($ai,$bi,$ci,$di,$i)=@_;
+my ($an,$bn,$cn,$dn)=map(($_&~3)+(($_+1)&3),($ai,$bi,$ci,$di));	# next
+my ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_-1)&3),($ai,$bi,$ci,$di));	# previous
+
+	#       a   b   c   d
+	#
+	#       0   4   8  12 < even round
+	#       1   5   9  13
+	#       2   6  10  14
+	#       3   7  11  15
+	#       0   5  10  15 < odd round
+	#       1   6  11  12
+	#       2   7   8  13
+	#       3   4   9  14
+
+	if ($i==0) {
+            my $j=4;
+	    ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_-$j--)&3),($ap,$bp,$cp,$dp));
+	} elsif ($i==3) {
+            my $j=0;
+	    ($an,$bn,$cn,$dn)=map(($_&~3)+(($_+$j++)&3),($an,$bn,$cn,$dn));
+	} elsif ($i==4) {
+            my $j=4;
+	    ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_+$j--)&3),($ap,$bp,$cp,$dp));
+	} elsif ($i==7) {
+            my $j=0;
+	    ($an,$bn,$cn,$dn)=map(($_&~3)+(($_-$j++)&3),($an,$bn,$cn,$dn));
+	}
+
+	#&vpaddd	($xa,$xa,$xb);			# see elsewhere
+	#&vpxor		($xd,$xd,$xa);			# see elsewhere
+	 &vmovdqa	(&QWP(16*$cp-128,"ebx"),$xc_)	if ($ai>0 && $ai<3);
+	&vprotd		($xd,$xd,16);
+	 &vmovdqa	(&QWP(16*$bp-128,"ebx"),$xb_)	if ($i!=0);
+	&vpaddd		($xc,$xc,$xd);
+	 &vmovdqa	($xc_,&QWP(16*$cn-128,"ebx"))	if ($ai>0 && $ai<3);
+	&vpxor		($xb,$i!=0?$xb:$xb_,$xc);
+	 &vmovdqa	($xa_,&QWP(16*$an-128,"ebx"));
+	&vprotd		($xb,$xb,12);
+	 &vmovdqa	($xb_,&QWP(16*$bn-128,"ebx"))	if ($i<7);
+	&vpaddd		($xa,$xa,$xb);
+	 &vmovdqa	($xd_,&QWP(16*$dn-128,"ebx"))	if ($di!=$dn);
+	&vpxor		($xd,$xd,$xa);
+	 &vpaddd	($xa_,$xa_,$xb_)		if ($i<7);	# elsewhere
+	&vprotd		($xd,$xd,8);
+	&vmovdqa	(&QWP(16*$ai-128,"ebx"),$xa);
+	&vpaddd		($xc,$xc,$xd);
+	&vmovdqa	(&QWP(16*$di-128,"ebx"),$xd)	if ($di!=$dn);
+	&vpxor		($xb,$xb,$xc);
+	 &vpxor		($xd_,$di==$dn?$xd:$xd_,$xa_)	if ($i<7);	# elsewhere
+	&vprotd		($xb,$xb,7);
+
+	($xa,$xa_)=($xa_,$xa);
+	($xb,$xb_)=($xb_,$xb);
+	($xc,$xc_)=($xc_,$xc);
+	($xd,$xd_)=($xd_,$xd);
+}
+
+&function_begin("ChaCha20_xop");
+&set_label("xop_shortcut");
+	&mov		($out,&wparam(0));
+	&mov		($inp,&wparam(1));
+	&mov		($len,&wparam(2));
+	&mov		("edx",&wparam(3));		# key
+	&mov		("ebx",&wparam(4));		# counter and nonce
+	&vzeroupper	();
+
+	&mov		("ebp","esp");
+	&stack_push	(131);
+	&and		("esp",-64);
+	&mov		(&DWP(512,"esp"),"ebp");
+
+	&lea		("eax",&DWP(&label("ssse3_data")."-".
+				    &label("pic_point"),"eax"));
+	&vmovdqu	("xmm3",&QWP(0,"ebx"));		# counter and nonce
+
+	&cmp		($len,64*4);
+	&jb		(&label("1x"));
+
+	&mov		(&DWP(512+4,"esp"),"edx");	# offload pointers
+	&mov		(&DWP(512+8,"esp"),"ebx");
+	&sub		($len,64*4);			# bias len
+	&lea		("ebp",&DWP(256+128,"esp"));	# size optimization
+
+	&vmovdqu	("xmm7",&QWP(0,"edx"));		# key
+	&vpshufd	("xmm0","xmm3",0x00);
+	&vpshufd	("xmm1","xmm3",0x55);
+	&vpshufd	("xmm2","xmm3",0xaa);
+	&vpshufd	("xmm3","xmm3",0xff);
+	 &vpaddd	("xmm0","xmm0",&QWP(16*3,"eax"));	# fix counters
+	&vpshufd	("xmm4","xmm7",0x00);
+	&vpshufd	("xmm5","xmm7",0x55);
+	 &vpsubd	("xmm0","xmm0",&QWP(16*4,"eax"));
+	&vpshufd	("xmm6","xmm7",0xaa);
+	&vpshufd	("xmm7","xmm7",0xff);
+	&vmovdqa	(&QWP(16*12-128,"ebp"),"xmm0");
+	&vmovdqa	(&QWP(16*13-128,"ebp"),"xmm1");
+	&vmovdqa	(&QWP(16*14-128,"ebp"),"xmm2");
+	&vmovdqa	(&QWP(16*15-128,"ebp"),"xmm3");
+	 &vmovdqu	("xmm3",&QWP(16,"edx"));	# key
+	&vmovdqa	(&QWP(16*4-128,"ebp"),"xmm4");
+	&vmovdqa	(&QWP(16*5-128,"ebp"),"xmm5");
+	&vmovdqa	(&QWP(16*6-128,"ebp"),"xmm6");
+	&vmovdqa	(&QWP(16*7-128,"ebp"),"xmm7");
+	 &vmovdqa	("xmm7",&QWP(16*2,"eax"));	# sigma
+	 &lea		("ebx",&DWP(128,"esp"));	# size optimization
+
+	&vpshufd	("xmm0","xmm3",0x00);
+	&vpshufd	("xmm1","xmm3",0x55);
+	&vpshufd	("xmm2","xmm3",0xaa);
+	&vpshufd	("xmm3","xmm3",0xff);
+	&vpshufd	("xmm4","xmm7",0x00);
+	&vpshufd	("xmm5","xmm7",0x55);
+	&vpshufd	("xmm6","xmm7",0xaa);
+	&vpshufd	("xmm7","xmm7",0xff);
+	&vmovdqa	(&QWP(16*8-128,"ebp"),"xmm0");
+	&vmovdqa	(&QWP(16*9-128,"ebp"),"xmm1");
+	&vmovdqa	(&QWP(16*10-128,"ebp"),"xmm2");
+	&vmovdqa	(&QWP(16*11-128,"ebp"),"xmm3");
+	&vmovdqa	(&QWP(16*0-128,"ebp"),"xmm4");
+	&vmovdqa	(&QWP(16*1-128,"ebp"),"xmm5");
+	&vmovdqa	(&QWP(16*2-128,"ebp"),"xmm6");
+	&vmovdqa	(&QWP(16*3-128,"ebp"),"xmm7");
+
+	&lea		($inp,&DWP(128,$inp));		# size optimization
+	&lea		($out,&DWP(128,$out));		# size optimization
+	&jmp		(&label("outer_loop"));
+
+&set_label("outer_loop",32);
+	#&vmovdqa	("xmm0",&QWP(16*0-128,"ebp"));	# copy key material
+	&vmovdqa	("xmm1",&QWP(16*1-128,"ebp"));
+	&vmovdqa	("xmm2",&QWP(16*2-128,"ebp"));
+	&vmovdqa	("xmm3",&QWP(16*3-128,"ebp"));
+	#&vmovdqa	("xmm4",&QWP(16*4-128,"ebp"));
+	&vmovdqa	("xmm5",&QWP(16*5-128,"ebp"));
+	&vmovdqa	("xmm6",&QWP(16*6-128,"ebp"));
+	&vmovdqa	("xmm7",&QWP(16*7-128,"ebp"));
+	#&vmovdqa	(&QWP(16*0-128,"ebx"),"xmm0");
+	&vmovdqa	(&QWP(16*1-128,"ebx"),"xmm1");
+	&vmovdqa	(&QWP(16*2-128,"ebx"),"xmm2");
+	&vmovdqa	(&QWP(16*3-128,"ebx"),"xmm3");
+	#&vmovdqa	(&QWP(16*4-128,"ebx"),"xmm4");
+	&vmovdqa	(&QWP(16*5-128,"ebx"),"xmm5");
+	&vmovdqa	(&QWP(16*6-128,"ebx"),"xmm6");
+	&vmovdqa	(&QWP(16*7-128,"ebx"),"xmm7");
+	#&vmovdqa	("xmm0",&QWP(16*8-128,"ebp"));
+	#&vmovdqa	("xmm1",&QWP(16*9-128,"ebp"));
+	&vmovdqa	("xmm2",&QWP(16*10-128,"ebp"));
+	&vmovdqa	("xmm3",&QWP(16*11-128,"ebp"));
+	&vmovdqa	("xmm4",&QWP(16*12-128,"ebp"));
+	&vmovdqa	("xmm5",&QWP(16*13-128,"ebp"));
+	&vmovdqa	("xmm6",&QWP(16*14-128,"ebp"));
+	&vmovdqa	("xmm7",&QWP(16*15-128,"ebp"));
+	&vpaddd		("xmm4","xmm4",&QWP(16*4,"eax"));	# counter value
+	#&vmovdqa	(&QWP(16*8-128,"ebx"),"xmm0");
+	#&vmovdqa	(&QWP(16*9-128,"ebx"),"xmm1");
+	&vmovdqa	(&QWP(16*10-128,"ebx"),"xmm2");
+	&vmovdqa	(&QWP(16*11-128,"ebx"),"xmm3");
+	&vmovdqa	(&QWP(16*12-128,"ebx"),"xmm4");
+	&vmovdqa	(&QWP(16*13-128,"ebx"),"xmm5");
+	&vmovdqa	(&QWP(16*14-128,"ebx"),"xmm6");
+	&vmovdqa	(&QWP(16*15-128,"ebx"),"xmm7");
+	&vmovdqa	(&QWP(16*12-128,"ebp"),"xmm4");	# save counter value
+
+	&vmovdqa	($xa, &QWP(16*0-128,"ebp"));
+	&vmovdqa	($xd, "xmm4");
+	&vmovdqa	($xb_,&QWP(16*4-128,"ebp"));
+	&vmovdqa	($xc, &QWP(16*8-128,"ebp"));
+	&vmovdqa	($xc_,&QWP(16*9-128,"ebp"));
+
+	&mov		("edx",10);			# loop counter
+	&nop		();
+
+&set_label("loop",32);
+	&vpaddd		($xa,$xa,$xb_);			# elsewhere
+	&vpxor		($xd,$xd,$xa);			# elsewhere
+	&QUARTERROUND_XOP(0, 4, 8, 12, 0);
+	&QUARTERROUND_XOP(1, 5, 9, 13, 1);
+	&QUARTERROUND_XOP(2, 6,10, 14, 2);
+	&QUARTERROUND_XOP(3, 7,11, 15, 3);
+	&QUARTERROUND_XOP(0, 5,10, 15, 4);
+	&QUARTERROUND_XOP(1, 6,11, 12, 5);
+	&QUARTERROUND_XOP(2, 7, 8, 13, 6);
+	&QUARTERROUND_XOP(3, 4, 9, 14, 7);
+	&dec		("edx");
+	&jnz		(&label("loop"));
+
+	&vmovdqa	(&QWP(16*4-128,"ebx"),$xb_);
+	&vmovdqa	(&QWP(16*8-128,"ebx"),$xc);
+	&vmovdqa	(&QWP(16*9-128,"ebx"),$xc_);
+	&vmovdqa	(&QWP(16*12-128,"ebx"),$xd);
+	&vmovdqa	(&QWP(16*14-128,"ebx"),$xd_);
+
+    my ($xa0,$xa1,$xa2,$xa3,$xt0,$xt1,$xt2,$xt3)=map("xmm$_",(0..7));
+
+	#&vmovdqa	($xa0,&QWP(16*0-128,"ebx"));	# it's there
+	&vmovdqa	($xa1,&QWP(16*1-128,"ebx"));
+	&vmovdqa	($xa2,&QWP(16*2-128,"ebx"));
+	&vmovdqa	($xa3,&QWP(16*3-128,"ebx"));
+
+    for($i=0;$i<256;$i+=64) {
+	&vpaddd		($xa0,$xa0,&QWP($i+16*0-128,"ebp"));	# accumulate key material
+	&vpaddd		($xa1,$xa1,&QWP($i+16*1-128,"ebp"));
+	&vpaddd		($xa2,$xa2,&QWP($i+16*2-128,"ebp"));
+	&vpaddd		($xa3,$xa3,&QWP($i+16*3-128,"ebp"));
+
+	&vpunpckldq	($xt2,$xa0,$xa1);	# "de-interlace" data
+	&vpunpckldq	($xt3,$xa2,$xa3);
+	&vpunpckhdq	($xa0,$xa0,$xa1);
+	&vpunpckhdq	($xa2,$xa2,$xa3);
+	&vpunpcklqdq	($xa1,$xt2,$xt3);	# "a0"
+	&vpunpckhqdq	($xt2,$xt2,$xt3);	# "a1"
+	&vpunpcklqdq	($xt3,$xa0,$xa2);	# "a2"
+	&vpunpckhqdq	($xa3,$xa0,$xa2);	# "a3"
+
+	&vpxor		($xt0,$xa1,&QWP(64*0-128,$inp));
+	&vpxor		($xt1,$xt2,&QWP(64*1-128,$inp));
+	&vpxor		($xt2,$xt3,&QWP(64*2-128,$inp));
+	&vpxor		($xt3,$xa3,&QWP(64*3-128,$inp));
+	&lea		($inp,&QWP($i<192?16:(64*4-16*3),$inp));
+	&vmovdqa	($xa0,&QWP($i+16*4-128,"ebx"))	if ($i<192);
+	&vmovdqa	($xa1,&QWP($i+16*5-128,"ebx"))	if ($i<192);
+	&vmovdqa	($xa2,&QWP($i+16*6-128,"ebx"))	if ($i<192);
+	&vmovdqa	($xa3,&QWP($i+16*7-128,"ebx"))	if ($i<192);
+	&vmovdqu	(&QWP(64*0-128,$out),$xt0);	# store output
+	&vmovdqu	(&QWP(64*1-128,$out),$xt1);
+	&vmovdqu	(&QWP(64*2-128,$out),$xt2);
+	&vmovdqu	(&QWP(64*3-128,$out),$xt3);
+	&lea		($out,&QWP($i<192?16:(64*4-16*3),$out));
+    }
+	&sub		($len,64*4);
+	&jnc		(&label("outer_loop"));
+
+	&add		($len,64*4);
+	&jz		(&label("done"));
+
+	&mov		("ebx",&DWP(512+8,"esp"));	# restore pointers
+	&lea		($inp,&DWP(-128,$inp));
+	&mov		("edx",&DWP(512+4,"esp"));
+	&lea		($out,&DWP(-128,$out));
+
+	&vmovd		("xmm2",&DWP(16*12-128,"ebp"));	# counter value
+	&vmovdqu	("xmm3",&QWP(0,"ebx"));
+	&vpaddd		("xmm2","xmm2",&QWP(16*6,"eax"));# +four
+	&vpand		("xmm3","xmm3",&QWP(16*7,"eax"));
+	&vpor		("xmm3","xmm3","xmm2");		# counter value
+{
+my ($a,$b,$c,$d,$t,$t1,$rot16,$rot24)=map("xmm$_",(0..7));
+
+sub XOPROUND {
+	&vpaddd		($a,$a,$b);
+	&vpxor		($d,$d,$a);
+	&vprotd		($d,$d,16);
+
+	&vpaddd		($c,$c,$d);
+	&vpxor		($b,$b,$c);
+	&vprotd		($b,$b,12);
+
+	&vpaddd		($a,$a,$b);
+	&vpxor		($d,$d,$a);
+	&vprotd		($d,$d,8);
+
+	&vpaddd		($c,$c,$d);
+	&vpxor		($b,$b,$c);
+	&vprotd		($b,$b,7);
+}
+
+&set_label("1x");
+	&vmovdqa	($a,&QWP(16*2,"eax"));		# sigma
+	&vmovdqu	($b,&QWP(0,"edx"));
+	&vmovdqu	($c,&QWP(16,"edx"));
+	#&vmovdqu	($d,&QWP(0,"ebx"));		# already loaded
+	&vmovdqa	($rot16,&QWP(0,"eax"));
+	&vmovdqa	($rot24,&QWP(16,"eax"));
+	&mov		(&DWP(16*3,"esp"),"ebp");
+
+	&vmovdqa	(&QWP(16*0,"esp"),$a);
+	&vmovdqa	(&QWP(16*1,"esp"),$b);
+	&vmovdqa	(&QWP(16*2,"esp"),$c);
+	&vmovdqa	(&QWP(16*3,"esp"),$d);
+	&mov		("edx",10);
+	&jmp		(&label("loop1x"));
+
+&set_label("outer1x",16);
+	&vmovdqa	($d,&QWP(16*5,"eax"));		# one
+	&vmovdqa	($a,&QWP(16*0,"esp"));
+	&vmovdqa	($b,&QWP(16*1,"esp"));
+	&vmovdqa	($c,&QWP(16*2,"esp"));
+	&vpaddd		($d,$d,&QWP(16*3,"esp"));
+	&mov		("edx",10);
+	&vmovdqa	(&QWP(16*3,"esp"),$d);
+	&jmp		(&label("loop1x"));
+
+&set_label("loop1x",16);
+	&XOPROUND();
+	&vpshufd	($c,$c,0b01001110);
+	&vpshufd	($b,$b,0b00111001);
+	&vpshufd	($d,$d,0b10010011);
+
+	&XOPROUND();
+	&vpshufd	($c,$c,0b01001110);
+	&vpshufd	($b,$b,0b10010011);
+	&vpshufd	($d,$d,0b00111001);
+
+	&dec		("edx");
+	&jnz		(&label("loop1x"));
+
+	&vpaddd		($a,$a,&QWP(16*0,"esp"));
+	&vpaddd		($b,$b,&QWP(16*1,"esp"));
+	&vpaddd		($c,$c,&QWP(16*2,"esp"));
+	&vpaddd		($d,$d,&QWP(16*3,"esp"));
+
+	&cmp		($len,64);
+	&jb		(&label("tail"));
+
+	&vpxor		($a,$a,&QWP(16*0,$inp));	# xor with input
+	&vpxor		($b,$b,&QWP(16*1,$inp));
+	&vpxor		($c,$c,&QWP(16*2,$inp));
+	&vpxor		($d,$d,&QWP(16*3,$inp));
+	&lea		($inp,&DWP(16*4,$inp));		# inp+=64
+
+	&vmovdqu	(&QWP(16*0,$out),$a);		# write output
+	&vmovdqu	(&QWP(16*1,$out),$b);
+	&vmovdqu	(&QWP(16*2,$out),$c);
+	&vmovdqu	(&QWP(16*3,$out),$d);
+	&lea		($out,&DWP(16*4,$out));		# inp+=64
+
+	&sub		($len,64);
+	&jnz		(&label("outer1x"));
+
+	&jmp		(&label("done"));
+
+&set_label("tail");
+	&vmovdqa	(&QWP(16*0,"esp"),$a);
+	&vmovdqa	(&QWP(16*1,"esp"),$b);
+	&vmovdqa	(&QWP(16*2,"esp"),$c);
+	&vmovdqa	(&QWP(16*3,"esp"),$d);
+
+	&xor		("eax","eax");
+	&xor		("edx","edx");
+	&xor		("ebp","ebp");
+
+&set_label("tail_loop");
+	&movb		("al",&BP(0,"esp","ebp"));
+	&movb		("dl",&BP(0,$inp,"ebp"));
+	&lea		("ebp",&DWP(1,"ebp"));
+	&xor		("al","dl");
+	&movb		(&BP(-1,$out,"ebp"),"al");
+	&dec		($len);
+	&jnz		(&label("tail_loop"));
+}
+&set_label("done");
+	&vzeroupper	();
+	&mov		("esp",&DWP(512,"esp"));
+&function_end("ChaCha20_xop");
+}
+
+&asm_finish();
+
+close STDOUT or die "error closing STDOUT: $!";
diff --git a/ap/lib/libssl/openssl-1.1.1o/crypto/chacha/asm/chacha-x86_64.pl b/ap/lib/libssl/openssl-1.1.1o/crypto/chacha/asm/chacha-x86_64.pl
new file mode 100755
index 0000000..c0e5d86
--- /dev/null
+++ b/ap/lib/libssl/openssl-1.1.1o/crypto/chacha/asm/chacha-x86_64.pl
@@ -0,0 +1,4037 @@
+#! /usr/bin/env perl
+# Copyright 2016-2021 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License").  You may not use
+# this file except in compliance with the License.  You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# November 2014
+#
+# ChaCha20 for x86_64.
+#
+# December 2016
+#
+# Add AVX512F code path.
+#
+# December 2017
+#
+# Add AVX512VL code path.
+#
+# Performance in cycles per byte out of large buffer.
+#
+#		IALU/gcc 4.8(i)	1x/2xSSSE3(ii)	4xSSSE3	    NxAVX(v)
+#
+# P4		9.48/+99%	-		-
+# Core2		7.83/+55%	7.90/5.76	4.35
+# Westmere	7.19/+50%	5.60/4.50	3.00
+# Sandy Bridge	8.31/+42%	5.45/4.00	2.72
+# Ivy Bridge	6.71/+46%	5.40/?		2.41
+# Haswell	5.92/+43%	5.20/3.45	2.42        1.23
+# Skylake[-X]	5.87/+39%	4.70/3.22	2.31        1.19[0.80(vi)]
+# Silvermont	12.0/+33%	7.75/6.90	7.03(iii)
+# Knights L	11.7/-		?		9.60(iii)   0.80
+# Goldmont	10.6/+17%	5.10/3.52	3.28
+# Sledgehammer	7.28/+52%	-		-
+# Bulldozer	9.66/+28%	9.85/5.35(iv)	3.06(iv)
+# Ryzen		5.96/+50%	5.19/3.00	2.40        2.09
+# VIA Nano	10.5/+46%	6.72/6.88	6.05
+#
+# (i)	compared to older gcc 3.x one can observe >2x improvement on
+#	most platforms;
+# (ii)	2xSSSE3 is code path optimized specifically for 128 bytes used
+#	by chacha20_poly1305_tls_cipher, results are EVP-free;
+# (iii)	this is not optimal result for Atom because of MSROM
+#	limitations, SSE2 can do better, but gain is considered too
+#	low to justify the [maintenance] effort;
+# (iv)	Bulldozer actually executes 4xXOP code path that delivers 2.20
+#	and 4.85 for 128-byte inputs;
+# (v)	8xAVX2, 8xAVX512VL or 16xAVX512F, whichever best applicable;
+# (vi)	even though Skylake-X can execute AVX512F code and deliver 0.57
+#	cpb in single thread, the corresponding capability is suppressed;
+
+$flavour = shift;
+$output  = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
+		=~ /GNU assembler version ([2-9]\.[0-9]+)/) {
+	$avx = ($1>=2.19) + ($1>=2.22) + ($1>=2.25);
+}
+
+if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
+	   `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)(?:\.([0-9]+))?/) {
+	$avx = ($1>=2.09) + ($1>=2.10) + ($1>=2.12);
+	$avx += 1 if ($1==2.11 && $2>=8);
+}
+
+if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
+	   `ml64 2>&1` =~ /Version ([0-9]+)\./) {
+	$avx = ($1>=10) + ($1>=11);
+}
+
+if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:clang|LLVM) version|.*based on LLVM) ([0-9]+\.[0-9]+)/) {
+	$avx = ($2>=3.0) + ($2>3.0);
+}
+
+open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
+*STDOUT=*OUT;
+
+# input parameter block
+($out,$inp,$len,$key,$counter)=("%rdi","%rsi","%rdx","%rcx","%r8");
+
+$code.=<<___;
+.text
+
+.extern OPENSSL_ia32cap_P
+
+.align	64
+.Lzero:
+.long	0,0,0,0
+.Lone:
+.long	1,0,0,0
+.Linc:
+.long	0,1,2,3
+.Lfour:
+.long	4,4,4,4
+.Lincy:
+.long	0,2,4,6,1,3,5,7
+.Leight:
+.long	8,8,8,8,8,8,8,8
+.Lrot16:
+.byte	0x2,0x3,0x0,0x1, 0x6,0x7,0x4,0x5, 0xa,0xb,0x8,0x9, 0xe,0xf,0xc,0xd
+.Lrot24:
+.byte	0x3,0x0,0x1,0x2, 0x7,0x4,0x5,0x6, 0xb,0x8,0x9,0xa, 0xf,0xc,0xd,0xe
+.Ltwoy:
+.long	2,0,0,0, 2,0,0,0
+.align	64
+.Lzeroz:
+.long	0,0,0,0, 1,0,0,0, 2,0,0,0, 3,0,0,0
+.Lfourz:
+.long	4,0,0,0, 4,0,0,0, 4,0,0,0, 4,0,0,0
+.Lincz:
+.long	0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
+.Lsixteen:
+.long	16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16
+.Lsigma:
+.asciz	"expand 32-byte k"
+.asciz	"ChaCha20 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+sub AUTOLOAD()          # thunk [simplified] 32-bit style perlasm
+{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
+  my $arg = pop;
+    $arg = "\$$arg" if ($arg*1 eq $arg);
+    $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n";
+}
+
+@x=("%eax","%ebx","%ecx","%edx",map("%r${_}d",(8..11)),
+    "%nox","%nox","%nox","%nox",map("%r${_}d",(12..15)));
+@t=("%esi","%edi");
+
+sub ROUND {			# critical path is 24 cycles per round
+my ($a0,$b0,$c0,$d0)=@_;
+my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
+my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
+my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
+my ($xc,$xc_)=map("\"$_\"",@t);
+my @x=map("\"$_\"",@x);
+
+	# Consider order in which variables are addressed by their
+	# index:
+	#
+	#	a   b   c   d
+	#
+	#	0   4   8  12 < even round
+	#	1   5   9  13
+	#	2   6  10  14
+	#	3   7  11  15
+	#	0   5  10  15 < odd round
+	#	1   6  11  12
+	#	2   7   8  13
+	#	3   4   9  14
+	#
+	# 'a', 'b' and 'd's are permanently allocated in registers,
+	# @x[0..7,12..15], while 'c's are maintained in memory. If
+	# you observe 'c' column, you'll notice that pair of 'c's is
+	# invariant between rounds. This means that we have to reload
+	# them once per round, in the middle. This is why you'll see
+	# bunch of 'c' stores and loads in the middle, but none in
+	# the beginning or end.
+
+	# Normally instructions would be interleaved to favour in-order
+	# execution. Generally out-of-order cores manage it gracefully,
+	# but not this time for some reason. As in-order execution
+	# cores are dying breed, old Atom is the only one around,
+	# instructions are left uninterleaved. Besides, Atom is better
+	# off executing 1xSSSE3 code anyway...
+
+	(
+	"&add	(@x[$a0],@x[$b0])",	# Q1
+	"&xor	(@x[$d0],@x[$a0])",
+	"&rol	(@x[$d0],16)",
+	 "&add	(@x[$a1],@x[$b1])",	# Q2
+	 "&xor	(@x[$d1],@x[$a1])",
+	 "&rol	(@x[$d1],16)",
+
+	"&add	($xc,@x[$d0])",
+	"&xor	(@x[$b0],$xc)",
+	"&rol	(@x[$b0],12)",
+	 "&add	($xc_,@x[$d1])",
+	 "&xor	(@x[$b1],$xc_)",
+	 "&rol	(@x[$b1],12)",
+
+	"&add	(@x[$a0],@x[$b0])",
+	"&xor	(@x[$d0],@x[$a0])",
+	"&rol	(@x[$d0],8)",
+	 "&add	(@x[$a1],@x[$b1])",
+	 "&xor	(@x[$d1],@x[$a1])",
+	 "&rol	(@x[$d1],8)",
+
+	"&add	($xc,@x[$d0])",
+	"&xor	(@x[$b0],$xc)",
+	"&rol	(@x[$b0],7)",
+	 "&add	($xc_,@x[$d1])",
+	 "&xor	(@x[$b1],$xc_)",
+	 "&rol	(@x[$b1],7)",
+
+	"&mov	(\"4*$c0(%rsp)\",$xc)",	# reload pair of 'c's
+	 "&mov	(\"4*$c1(%rsp)\",$xc_)",
+	"&mov	($xc,\"4*$c2(%rsp)\")",
+	 "&mov	($xc_,\"4*$c3(%rsp)\")",
+
+	"&add	(@x[$a2],@x[$b2])",	# Q3
+	"&xor	(@x[$d2],@x[$a2])",
+	"&rol	(@x[$d2],16)",
+	 "&add	(@x[$a3],@x[$b3])",	# Q4
+	 "&xor	(@x[$d3],@x[$a3])",
+	 "&rol	(@x[$d3],16)",
+
+	"&add	($xc,@x[$d2])",
+	"&xor	(@x[$b2],$xc)",
+	"&rol	(@x[$b2],12)",
+	 "&add	($xc_,@x[$d3])",
+	 "&xor	(@x[$b3],$xc_)",
+	 "&rol	(@x[$b3],12)",
+
+	"&add	(@x[$a2],@x[$b2])",
+	"&xor	(@x[$d2],@x[$a2])",
+	"&rol	(@x[$d2],8)",
+	 "&add	(@x[$a3],@x[$b3])",
+	 "&xor	(@x[$d3],@x[$a3])",
+	 "&rol	(@x[$d3],8)",
+
+	"&add	($xc,@x[$d2])",
+	"&xor	(@x[$b2],$xc)",
+	"&rol	(@x[$b2],7)",
+	 "&add	($xc_,@x[$d3])",
+	 "&xor	(@x[$b3],$xc_)",
+	 "&rol	(@x[$b3],7)"
+	);
+}
+
+########################################################################
+# Generic code path that handles all lengths on pre-SSSE3 processors.
+$code.=<<___;
+.globl	ChaCha20_ctr32
+.type	ChaCha20_ctr32,\@function,5
+.align	64
+ChaCha20_ctr32:
+.cfi_startproc
+	cmp	\$0,$len
+	je	.Lno_data
+	mov	OPENSSL_ia32cap_P+4(%rip),%r10
+___
+$code.=<<___	if ($avx>2);
+	bt	\$48,%r10		# check for AVX512F
+	jc	.LChaCha20_avx512
+	test	%r10,%r10		# check for AVX512VL
+	js	.LChaCha20_avx512vl
+___
+$code.=<<___;
+	test	\$`1<<(41-32)`,%r10d
+	jnz	.LChaCha20_ssse3
+
+	push	%rbx
+.cfi_push	%rbx
+	push	%rbp
+.cfi_push	%rbp
+	push	%r12
+.cfi_push	%r12
+	push	%r13
+.cfi_push	%r13
+	push	%r14
+.cfi_push	%r14
+	push	%r15
+.cfi_push	%r15
+	sub	\$64+24,%rsp
+.cfi_adjust_cfa_offset	64+24
+.Lctr32_body:
+
+	#movdqa	.Lsigma(%rip),%xmm0
+	movdqu	($key),%xmm1
+	movdqu	16($key),%xmm2
+	movdqu	($counter),%xmm3
+	movdqa	.Lone(%rip),%xmm4
+
+	#movdqa	%xmm0,4*0(%rsp)		# key[0]
+	movdqa	%xmm1,4*4(%rsp)		# key[1]
+	movdqa	%xmm2,4*8(%rsp)		# key[2]
+	movdqa	%xmm3,4*12(%rsp)	# key[3]
+	mov	$len,%rbp		# reassign $len
+	jmp	.Loop_outer
+
+.align	32
+.Loop_outer:
+	mov	\$0x61707865,@x[0]      # 'expa'
+	mov	\$0x3320646e,@x[1]      # 'nd 3'
+	mov	\$0x79622d32,@x[2]      # '2-by'
+	mov	\$0x6b206574,@x[3]      # 'te k'
+	mov	4*4(%rsp),@x[4]
+	mov	4*5(%rsp),@x[5]
+	mov	4*6(%rsp),@x[6]
+	mov	4*7(%rsp),@x[7]
+	movd	%xmm3,@x[12]
+	mov	4*13(%rsp),@x[13]
+	mov	4*14(%rsp),@x[14]
+	mov	4*15(%rsp),@x[15]
+
+	mov	%rbp,64+0(%rsp)		# save len
+	mov	\$10,%ebp
+	mov	$inp,64+8(%rsp)		# save inp
+	movq	%xmm2,%rsi		# "@x[8]"
+	mov	$out,64+16(%rsp)	# save out
+	mov	%rsi,%rdi
+	shr	\$32,%rdi		# "@x[9]"
+	jmp	.Loop
+
+.align	32
+.Loop:
+___
+	foreach (&ROUND (0, 4, 8,12)) { eval; }
+	foreach (&ROUND	(0, 5,10,15)) { eval; }
+	&dec	("%ebp");
+	&jnz	(".Loop");
+
+$code.=<<___;
+	mov	@t[1],4*9(%rsp)		# modulo-scheduled
+	mov	@t[0],4*8(%rsp)
+	mov	64(%rsp),%rbp		# load len
+	movdqa	%xmm2,%xmm1
+	mov	64+8(%rsp),$inp		# load inp
+	paddd	%xmm4,%xmm3		# increment counter
+	mov	64+16(%rsp),$out	# load out
+
+	add	\$0x61707865,@x[0]      # 'expa'
+	add	\$0x3320646e,@x[1]      # 'nd 3'
+	add	\$0x79622d32,@x[2]      # '2-by'
+	add	\$0x6b206574,@x[3]      # 'te k'
+	add	4*4(%rsp),@x[4]
+	add	4*5(%rsp),@x[5]
+	add	4*6(%rsp),@x[6]
+	add	4*7(%rsp),@x[7]
+	add	4*12(%rsp),@x[12]
+	add	4*13(%rsp),@x[13]
+	add	4*14(%rsp),@x[14]
+	add	4*15(%rsp),@x[15]
+	paddd	4*8(%rsp),%xmm1
+
+	cmp	\$64,%rbp
+	jb	.Ltail
+
+	xor	4*0($inp),@x[0]		# xor with input
+	xor	4*1($inp),@x[1]
+	xor	4*2($inp),@x[2]
+	xor	4*3($inp),@x[3]
+	xor	4*4($inp),@x[4]
+	xor	4*5($inp),@x[5]
+	xor	4*6($inp),@x[6]
+	xor	4*7($inp),@x[7]
+	movdqu	4*8($inp),%xmm0
+	xor	4*12($inp),@x[12]
+	xor	4*13($inp),@x[13]
+	xor	4*14($inp),@x[14]
+	xor	4*15($inp),@x[15]
+	lea	4*16($inp),$inp		# inp+=64
+	pxor	%xmm1,%xmm0
+
+	movdqa	%xmm2,4*8(%rsp)
+	movd	%xmm3,4*12(%rsp)
+
+	mov	@x[0],4*0($out)		# write output
+	mov	@x[1],4*1($out)
+	mov	@x[2],4*2($out)
+	mov	@x[3],4*3($out)
+	mov	@x[4],4*4($out)
+	mov	@x[5],4*5($out)
+	mov	@x[6],4*6($out)
+	mov	@x[7],4*7($out)
+	movdqu	%xmm0,4*8($out)
+	mov	@x[12],4*12($out)
+	mov	@x[13],4*13($out)
+	mov	@x[14],4*14($out)
+	mov	@x[15],4*15($out)
+	lea	4*16($out),$out		# out+=64
+
+	sub	\$64,%rbp
+	jnz	.Loop_outer
+
+	jmp	.Ldone
+
+.align	16
+.Ltail:
+	mov	@x[0],4*0(%rsp)
+	mov	@x[1],4*1(%rsp)
+	xor	%rbx,%rbx
+	mov	@x[2],4*2(%rsp)
+	mov	@x[3],4*3(%rsp)
+	mov	@x[4],4*4(%rsp)
+	mov	@x[5],4*5(%rsp)
+	mov	@x[6],4*6(%rsp)
+	mov	@x[7],4*7(%rsp)
+	movdqa	%xmm1,4*8(%rsp)
+	mov	@x[12],4*12(%rsp)
+	mov	@x[13],4*13(%rsp)
+	mov	@x[14],4*14(%rsp)
+	mov	@x[15],4*15(%rsp)
+
+.Loop_tail:
+	movzb	($inp,%rbx),%eax
+	movzb	(%rsp,%rbx),%edx
+	lea	1(%rbx),%rbx
+	xor	%edx,%eax
+	mov	%al,-1($out,%rbx)
+	dec	%rbp
+	jnz	.Loop_tail
+
+.Ldone:
+	lea	64+24+48(%rsp),%rsi
+.cfi_def_cfa	%rsi,8
+	mov	-48(%rsi),%r15
+.cfi_restore	%r15
+	mov	-40(%rsi),%r14
+.cfi_restore	%r14
+	mov	-32(%rsi),%r13
+.cfi_restore	%r13
+	mov	-24(%rsi),%r12
+.cfi_restore	%r12
+	mov	-16(%rsi),%rbp
+.cfi_restore	%rbp
+	mov	-8(%rsi),%rbx
+.cfi_restore	%rbx
+	lea	(%rsi),%rsp
+.cfi_def_cfa_register	%rsp
+.Lno_data:
+	ret
+.cfi_endproc
+.size	ChaCha20_ctr32,.-ChaCha20_ctr32
+___
+
+########################################################################
+# SSSE3 code path that handles shorter lengths
+{
+my ($a,$b,$c,$d,$t,$t1,$rot16,$rot24)=map("%xmm$_",(0..7));
+
+sub SSSE3ROUND {	# critical path is 20 "SIMD ticks" per round
+	&paddd	($a,$b);
+	&pxor	($d,$a);
+	&pshufb	($d,$rot16);
+
+	&paddd	($c,$d);
+	&pxor	($b,$c);
+	&movdqa	($t,$b);
+	&psrld	($b,20);
+	&pslld	($t,12);
+	&por	($b,$t);
+
+	&paddd	($a,$b);
+	&pxor	($d,$a);
+	&pshufb	($d,$rot24);
+
+	&paddd	($c,$d);
+	&pxor	($b,$c);
+	&movdqa	($t,$b);
+	&psrld	($b,25);
+	&pslld	($t,7);
+	&por	($b,$t);
+}
+
+my $xframe = $win64 ? 160+8 : 8;
+
+$code.=<<___;
+.type	ChaCha20_ssse3,\@function,5
+.align	32
+ChaCha20_ssse3:
+.cfi_startproc
+.LChaCha20_ssse3:
+	mov	%rsp,%r9		# frame pointer
+.cfi_def_cfa_register	%r9
+___
+$code.=<<___	if ($avx);
+	test	\$`1<<(43-32)`,%r10d
+	jnz	.LChaCha20_4xop		# XOP is fastest even if we use 1/4
+___
+$code.=<<___;
+	cmp	\$128,$len		# we might throw away some data,
+	je	.LChaCha20_128
+	ja	.LChaCha20_4x		# but overall it won't be slower
+
+.Ldo_sse3_after_all:
+	sub	\$64+$xframe,%rsp
+___
+$code.=<<___	if ($win64);
+	movaps	%xmm6,-0x28(%r9)
+	movaps	%xmm7,-0x18(%r9)
+.Lssse3_body:
+___
+$code.=<<___;
+	movdqa	.Lsigma(%rip),$a
+	movdqu	($key),$b
+	movdqu	16($key),$c
+	movdqu	($counter),$d
+	movdqa	.Lrot16(%rip),$rot16
+	movdqa	.Lrot24(%rip),$rot24
+
+	movdqa	$a,0x00(%rsp)
+	movdqa	$b,0x10(%rsp)
+	movdqa	$c,0x20(%rsp)
+	movdqa	$d,0x30(%rsp)
+	mov	\$10,$counter		# reuse $counter
+	jmp	.Loop_ssse3
+
+.align	32
+.Loop_outer_ssse3:
+	movdqa	.Lone(%rip),$d
+	movdqa	0x00(%rsp),$a
+	movdqa	0x10(%rsp),$b
+	movdqa	0x20(%rsp),$c
+	paddd	0x30(%rsp),$d
+	mov	\$10,$counter
+	movdqa	$d,0x30(%rsp)
+	jmp	.Loop_ssse3
+
+.align	32
+.Loop_ssse3:
+___
+	&SSSE3ROUND();
+	&pshufd	($c,$c,0b01001110);
+	&pshufd	($b,$b,0b00111001);
+	&pshufd	($d,$d,0b10010011);
+	&nop	();
+
+	&SSSE3ROUND();
+	&pshufd	($c,$c,0b01001110);
+	&pshufd	($b,$b,0b10010011);
+	&pshufd	($d,$d,0b00111001);
+
+	&dec	($counter);
+	&jnz	(".Loop_ssse3");
+
+$code.=<<___;
+	paddd	0x00(%rsp),$a
+	paddd	0x10(%rsp),$b
+	paddd	0x20(%rsp),$c
+	paddd	0x30(%rsp),$d
+
+	cmp	\$64,$len
+	jb	.Ltail_ssse3
+
+	movdqu	0x00($inp),$t
+	movdqu	0x10($inp),$t1
+	pxor	$t,$a			# xor with input
+	movdqu	0x20($inp),$t
+	pxor	$t1,$b
+	movdqu	0x30($inp),$t1
+	lea	0x40($inp),$inp		# inp+=64
+	pxor	$t,$c
+	pxor	$t1,$d
+
+	movdqu	$a,0x00($out)		# write output
+	movdqu	$b,0x10($out)
+	movdqu	$c,0x20($out)
+	movdqu	$d,0x30($out)
+	lea	0x40($out),$out		# out+=64
+
+	sub	\$64,$len
+	jnz	.Loop_outer_ssse3
+
+	jmp	.Ldone_ssse3
+
+.align	16
+.Ltail_ssse3:
+	movdqa	$a,0x00(%rsp)
+	movdqa	$b,0x10(%rsp)
+	movdqa	$c,0x20(%rsp)
+	movdqa	$d,0x30(%rsp)
+	xor	$counter,$counter
+
+.Loop_tail_ssse3:
+	movzb	($inp,$counter),%eax
+	movzb	(%rsp,$counter),%ecx
+	lea	1($counter),$counter
+	xor	%ecx,%eax
+	mov	%al,-1($out,$counter)
+	dec	$len
+	jnz	.Loop_tail_ssse3
+
+.Ldone_ssse3:
+___
+$code.=<<___	if ($win64);
+	movaps	-0x28(%r9),%xmm6
+	movaps	-0x18(%r9),%xmm7
+___
+$code.=<<___;
+	lea	(%r9),%rsp
+.cfi_def_cfa_register	%rsp
+.Lssse3_epilogue:
+	ret
+.cfi_endproc
+.size	ChaCha20_ssse3,.-ChaCha20_ssse3
+___
+}
+
+########################################################################
+# SSSE3 code path that handles 128-byte inputs
+{
+my ($a,$b,$c,$d,$t,$t1,$rot16,$rot24)=map("%xmm$_",(8,9,2..7));
+my ($a1,$b1,$c1,$d1)=map("%xmm$_",(10,11,0,1));
+
+sub SSSE3ROUND_2x {
+	&paddd	($a,$b);
+	&pxor	($d,$a);
+	 &paddd	($a1,$b1);
+	 &pxor	($d1,$a1);
+	&pshufb	($d,$rot16);
+	 &pshufb($d1,$rot16);
+
+	&paddd	($c,$d);
+	 &paddd	($c1,$d1);
+	&pxor	($b,$c);
+	 &pxor	($b1,$c1);
+	&movdqa	($t,$b);
+	&psrld	($b,20);
+	 &movdqa($t1,$b1);
+	&pslld	($t,12);
+	 &psrld	($b1,20);
+	&por	($b,$t);
+	 &pslld	($t1,12);
+	 &por	($b1,$t1);
+
+	&paddd	($a,$b);
+	&pxor	($d,$a);
+	 &paddd	($a1,$b1);
+	 &pxor	($d1,$a1);
+	&pshufb	($d,$rot24);
+	 &pshufb($d1,$rot24);
+
+	&paddd	($c,$d);
+	 &paddd	($c1,$d1);
+	&pxor	($b,$c);
+	 &pxor	($b1,$c1);
+	&movdqa	($t,$b);
+	&psrld	($b,25);
+	 &movdqa($t1,$b1);
+	&pslld	($t,7);
+	 &psrld	($b1,25);
+	&por	($b,$t);
+	 &pslld	($t1,7);
+	 &por	($b1,$t1);
+}
+
+my $xframe = $win64 ? 0x68 : 8;
+
+$code.=<<___;
+.type	ChaCha20_128,\@function,5
+.align	32
+ChaCha20_128:
+.cfi_startproc
+.LChaCha20_128:
+	mov	%rsp,%r9		# frame pointer
+.cfi_def_cfa_register	%r9
+	sub	\$64+$xframe,%rsp
+___
+$code.=<<___	if ($win64);
+	movaps	%xmm6,-0x68(%r9)
+	movaps	%xmm7,-0x58(%r9)
+	movaps	%xmm8,-0x48(%r9)
+	movaps	%xmm9,-0x38(%r9)
+	movaps	%xmm10,-0x28(%r9)
+	movaps	%xmm11,-0x18(%r9)
+.L128_body:
+___
+$code.=<<___;
+	movdqa	.Lsigma(%rip),$a
+	movdqu	($key),$b
+	movdqu	16($key),$c
+	movdqu	($counter),$d
+	movdqa	.Lone(%rip),$d1
+	movdqa	.Lrot16(%rip),$rot16
+	movdqa	.Lrot24(%rip),$rot24
+
+	movdqa	$a,$a1
+	movdqa	$a,0x00(%rsp)
+	movdqa	$b,$b1
+	movdqa	$b,0x10(%rsp)
+	movdqa	$c,$c1
+	movdqa	$c,0x20(%rsp)
+	paddd	$d,$d1
+	movdqa	$d,0x30(%rsp)
+	mov	\$10,$counter		# reuse $counter
+	jmp	.Loop_128
+
+.align	32
+.Loop_128:
+___
+	&SSSE3ROUND_2x();
+	&pshufd	($c,$c,0b01001110);
+	&pshufd	($b,$b,0b00111001);
+	&pshufd	($d,$d,0b10010011);
+	&pshufd	($c1,$c1,0b01001110);
+	&pshufd	($b1,$b1,0b00111001);
+	&pshufd	($d1,$d1,0b10010011);
+
+	&SSSE3ROUND_2x();
+	&pshufd	($c,$c,0b01001110);
+	&pshufd	($b,$b,0b10010011);
+	&pshufd	($d,$d,0b00111001);
+	&pshufd	($c1,$c1,0b01001110);
+	&pshufd	($b1,$b1,0b10010011);
+	&pshufd	($d1,$d1,0b00111001);
+
+	&dec	($counter);
+	&jnz	(".Loop_128");
+
+$code.=<<___;
+	paddd	0x00(%rsp),$a
+	paddd	0x10(%rsp),$b
+	paddd	0x20(%rsp),$c
+	paddd	0x30(%rsp),$d
+	paddd	.Lone(%rip),$d1
+	paddd	0x00(%rsp),$a1
+	paddd	0x10(%rsp),$b1
+	paddd	0x20(%rsp),$c1
+	paddd	0x30(%rsp),$d1
+
+	movdqu	0x00($inp),$t
+	movdqu	0x10($inp),$t1
+	pxor	$t,$a			# xor with input
+	movdqu	0x20($inp),$t
+	pxor	$t1,$b
+	movdqu	0x30($inp),$t1
+	pxor	$t,$c
+	movdqu	0x40($inp),$t
+	pxor	$t1,$d
+	movdqu	0x50($inp),$t1
+	pxor	$t,$a1
+	movdqu	0x60($inp),$t
+	pxor	$t1,$b1
+	movdqu	0x70($inp),$t1
+	pxor	$t,$c1
+	pxor	$t1,$d1
+
+	movdqu	$a,0x00($out)		# write output
+	movdqu	$b,0x10($out)
+	movdqu	$c,0x20($out)
+	movdqu	$d,0x30($out)
+	movdqu	$a1,0x40($out)
+	movdqu	$b1,0x50($out)
+	movdqu	$c1,0x60($out)
+	movdqu	$d1,0x70($out)
+___
+$code.=<<___	if ($win64);
+	movaps	-0x68(%r9),%xmm6
+	movaps	-0x58(%r9),%xmm7
+	movaps	-0x48(%r9),%xmm8
+	movaps	-0x38(%r9),%xmm9
+	movaps	-0x28(%r9),%xmm10
+	movaps	-0x18(%r9),%xmm11
+___
+$code.=<<___;
+	lea	(%r9),%rsp
+.cfi_def_cfa_register	%rsp
+.L128_epilogue:
+	ret
+.cfi_endproc
+.size	ChaCha20_128,.-ChaCha20_128
+___
+}
+
+########################################################################
+# SSSE3 code path that handles longer messages.
+{
+# assign variables to favor Atom front-end
+my ($xd0,$xd1,$xd2,$xd3, $xt0,$xt1,$xt2,$xt3,
+    $xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3)=map("%xmm$_",(0..15));
+my  @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
+	"%nox","%nox","%nox","%nox", $xd0,$xd1,$xd2,$xd3);
+
+sub SSSE3_lane_ROUND {
+my ($a0,$b0,$c0,$d0)=@_;
+my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
+my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
+my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
+my ($xc,$xc_,$t0,$t1)=map("\"$_\"",$xt0,$xt1,$xt2,$xt3);
+my @x=map("\"$_\"",@xx);
+
+	# Consider order in which variables are addressed by their
+	# index:
+	#
+	#	a   b   c   d
+	#
+	#	0   4   8  12 < even round
+	#	1   5   9  13
+	#	2   6  10  14
+	#	3   7  11  15
+	#	0   5  10  15 < odd round
+	#	1   6  11  12
+	#	2   7   8  13
+	#	3   4   9  14
+	#
+	# 'a', 'b' and 'd's are permanently allocated in registers,
+	# @x[0..7,12..15], while 'c's are maintained in memory. If
+	# you observe 'c' column, you'll notice that pair of 'c's is
+	# invariant between rounds. This means that we have to reload
+	# them once per round, in the middle. This is why you'll see
+	# bunch of 'c' stores and loads in the middle, but none in
+	# the beginning or end.
+
+	(
+	"&paddd		(@x[$a0],@x[$b0])",	# Q1
+	 "&paddd	(@x[$a1],@x[$b1])",	# Q2
+	"&pxor		(@x[$d0],@x[$a0])",
+	 "&pxor		(@x[$d1],@x[$a1])",
+	"&pshufb	(@x[$d0],$t1)",
+	 "&pshufb	(@x[$d1],$t1)",
+
+	"&paddd		($xc,@x[$d0])",
+	 "&paddd	($xc_,@x[$d1])",
+	"&pxor		(@x[$b0],$xc)",
+	 "&pxor		(@x[$b1],$xc_)",
+	"&movdqa	($t0,@x[$b0])",
+	"&pslld		(@x[$b0],12)",
+	"&psrld		($t0,20)",
+	 "&movdqa	($t1,@x[$b1])",
+	 "&pslld	(@x[$b1],12)",
+	"&por		(@x[$b0],$t0)",
+	 "&psrld	($t1,20)",
+	"&movdqa	($t0,'(%r11)')",	# .Lrot24(%rip)
+	 "&por		(@x[$b1],$t1)",
+
+	"&paddd		(@x[$a0],@x[$b0])",
+	 "&paddd	(@x[$a1],@x[$b1])",
+	"&pxor		(@x[$d0],@x[$a0])",
+	 "&pxor		(@x[$d1],@x[$a1])",
+	"&pshufb	(@x[$d0],$t0)",
+	 "&pshufb	(@x[$d1],$t0)",
+
+	"&paddd		($xc,@x[$d0])",
+	 "&paddd	($xc_,@x[$d1])",
+	"&pxor		(@x[$b0],$xc)",
+	 "&pxor		(@x[$b1],$xc_)",
+	"&movdqa	($t1,@x[$b0])",
+	"&pslld		(@x[$b0],7)",
+	"&psrld		($t1,25)",
+	 "&movdqa	($t0,@x[$b1])",
+	 "&pslld	(@x[$b1],7)",
+	"&por		(@x[$b0],$t1)",
+	 "&psrld	($t0,25)",
+	"&movdqa	($t1,'(%r10)')",	# .Lrot16(%rip)
+	 "&por		(@x[$b1],$t0)",
+
+	"&movdqa	(\"`16*($c0-8)`(%rsp)\",$xc)",	# reload pair of 'c's
+	 "&movdqa	(\"`16*($c1-8)`(%rsp)\",$xc_)",
+	"&movdqa	($xc,\"`16*($c2-8)`(%rsp)\")",
+	 "&movdqa	($xc_,\"`16*($c3-8)`(%rsp)\")",
+
+	"&paddd		(@x[$a2],@x[$b2])",	# Q3
+	 "&paddd	(@x[$a3],@x[$b3])",	# Q4
+	"&pxor		(@x[$d2],@x[$a2])",
+	 "&pxor		(@x[$d3],@x[$a3])",
+	"&pshufb	(@x[$d2],$t1)",
+	 "&pshufb	(@x[$d3],$t1)",
+
+	"&paddd		($xc,@x[$d2])",
+	 "&paddd	($xc_,@x[$d3])",
+	"&pxor		(@x[$b2],$xc)",
+	 "&pxor		(@x[$b3],$xc_)",
+	"&movdqa	($t0,@x[$b2])",
+	"&pslld		(@x[$b2],12)",
+	"&psrld		($t0,20)",
+	 "&movdqa	($t1,@x[$b3])",
+	 "&pslld	(@x[$b3],12)",
+	"&por		(@x[$b2],$t0)",
+	 "&psrld	($t1,20)",
+	"&movdqa	($t0,'(%r11)')",	# .Lrot24(%rip)
+	 "&por		(@x[$b3],$t1)",
+
+	"&paddd		(@x[$a2],@x[$b2])",
+	 "&paddd	(@x[$a3],@x[$b3])",
+	"&pxor		(@x[$d2],@x[$a2])",
+	 "&pxor		(@x[$d3],@x[$a3])",
+	"&pshufb	(@x[$d2],$t0)",
+	 "&pshufb	(@x[$d3],$t0)",
+
+	"&paddd		($xc,@x[$d2])",
+	 "&paddd	($xc_,@x[$d3])",
+	"&pxor		(@x[$b2],$xc)",
+	 "&pxor		(@x[$b3],$xc_)",
+	"&movdqa	($t1,@x[$b2])",
+	"&pslld		(@x[$b2],7)",
+	"&psrld		($t1,25)",
+	 "&movdqa	($t0,@x[$b3])",
+	 "&pslld	(@x[$b3],7)",
+	"&por		(@x[$b2],$t1)",
+	 "&psrld	($t0,25)",
+	"&movdqa	($t1,'(%r10)')",	# .Lrot16(%rip)
+	 "&por		(@x[$b3],$t0)"
+	);
+}
+
+my $xframe = $win64 ? 0xa8 : 8;
+
+$code.=<<___;
+.type	ChaCha20_4x,\@function,5
+.align	32
+ChaCha20_4x:
+.cfi_startproc
+.LChaCha20_4x:
+	mov		%rsp,%r9		# frame pointer
+.cfi_def_cfa_register	%r9
+	mov		%r10,%r11
+___
+$code.=<<___	if ($avx>1);
+	shr		\$32,%r10		# OPENSSL_ia32cap_P+8
+	test		\$`1<<5`,%r10		# test AVX2
+	jnz		.LChaCha20_8x
+___
+$code.=<<___;
+	cmp		\$192,$len
+	ja		.Lproceed4x
+
+	and		\$`1<<26|1<<22`,%r11	# isolate XSAVE+MOVBE
+	cmp		\$`1<<22`,%r11		# check for MOVBE without XSAVE
+	je		.Ldo_sse3_after_all	# to detect Atom
+
+.Lproceed4x:
+	sub		\$0x140+$xframe,%rsp
+___
+	################ stack layout
+	# +0x00		SIMD equivalent of @x[8-12]
+	# ...
+	# +0x40		constant copy of key[0-2] smashed by lanes
+	# ...
+	# +0x100	SIMD counters (with nonce smashed by lanes)
+	# ...
+	# +0x140
+$code.=<<___	if ($win64);
+	movaps		%xmm6,-0xa8(%r9)
+	movaps		%xmm7,-0x98(%r9)
+	movaps		%xmm8,-0x88(%r9)
+	movaps		%xmm9,-0x78(%r9)
+	movaps		%xmm10,-0x68(%r9)
+	movaps		%xmm11,-0x58(%r9)
+	movaps		%xmm12,-0x48(%r9)
+	movaps		%xmm13,-0x38(%r9)
+	movaps		%xmm14,-0x28(%r9)
+	movaps		%xmm15,-0x18(%r9)
+.L4x_body:
+___
+$code.=<<___;
+	movdqa		.Lsigma(%rip),$xa3	# key[0]
+	movdqu		($key),$xb3		# key[1]
+	movdqu		16($key),$xt3		# key[2]
+	movdqu		($counter),$xd3		# key[3]
+	lea		0x100(%rsp),%rcx	# size optimization
+	lea		.Lrot16(%rip),%r10
+	lea		.Lrot24(%rip),%r11
+
+	pshufd		\$0x00,$xa3,$xa0	# smash key by lanes...
+	pshufd		\$0x55,$xa3,$xa1
+	movdqa		$xa0,0x40(%rsp)		# ... and offload
+	pshufd		\$0xaa,$xa3,$xa2
+	movdqa		$xa1,0x50(%rsp)
+	pshufd		\$0xff,$xa3,$xa3
+	movdqa		$xa2,0x60(%rsp)
+	movdqa		$xa3,0x70(%rsp)
+
+	pshufd		\$0x00,$xb3,$xb0
+	pshufd		\$0x55,$xb3,$xb1
+	movdqa		$xb0,0x80-0x100(%rcx)
+	pshufd		\$0xaa,$xb3,$xb2
+	movdqa		$xb1,0x90-0x100(%rcx)
+	pshufd		\$0xff,$xb3,$xb3
+	movdqa		$xb2,0xa0-0x100(%rcx)
+	movdqa		$xb3,0xb0-0x100(%rcx)
+
+	pshufd		\$0x00,$xt3,$xt0	# "$xc0"
+	pshufd		\$0x55,$xt3,$xt1	# "$xc1"
+	movdqa		$xt0,0xc0-0x100(%rcx)
+	pshufd		\$0xaa,$xt3,$xt2	# "$xc2"
+	movdqa		$xt1,0xd0-0x100(%rcx)
+	pshufd		\$0xff,$xt3,$xt3	# "$xc3"
+	movdqa		$xt2,0xe0-0x100(%rcx)
+	movdqa		$xt3,0xf0-0x100(%rcx)
+
+	pshufd		\$0x00,$xd3,$xd0
+	pshufd		\$0x55,$xd3,$xd1
+	paddd		.Linc(%rip),$xd0	# don't save counters yet
+	pshufd		\$0xaa,$xd3,$xd2
+	movdqa		$xd1,0x110-0x100(%rcx)
+	pshufd		\$0xff,$xd3,$xd3
+	movdqa		$xd2,0x120-0x100(%rcx)
+	movdqa		$xd3,0x130-0x100(%rcx)
+
+	jmp		.Loop_enter4x
+
+.align	32
+.Loop_outer4x:
+	movdqa		0x40(%rsp),$xa0		# re-load smashed key
+	movdqa		0x50(%rsp),$xa1
+	movdqa		0x60(%rsp),$xa2
+	movdqa		0x70(%rsp),$xa3
+	movdqa		0x80-0x100(%rcx),$xb0
+	movdqa		0x90-0x100(%rcx),$xb1
+	movdqa		0xa0-0x100(%rcx),$xb2
+	movdqa		0xb0-0x100(%rcx),$xb3
+	movdqa		0xc0-0x100(%rcx),$xt0	# "$xc0"
+	movdqa		0xd0-0x100(%rcx),$xt1	# "$xc1"
+	movdqa		0xe0-0x100(%rcx),$xt2	# "$xc2"
+	movdqa		0xf0-0x100(%rcx),$xt3	# "$xc3"
+	movdqa		0x100-0x100(%rcx),$xd0
+	movdqa		0x110-0x100(%rcx),$xd1
+	movdqa		0x120-0x100(%rcx),$xd2
+	movdqa		0x130-0x100(%rcx),$xd3
+	paddd		.Lfour(%rip),$xd0	# next SIMD counters
+
+.Loop_enter4x:
+	movdqa		$xt2,0x20(%rsp)		# SIMD equivalent of "@x[10]"
+	movdqa		$xt3,0x30(%rsp)		# SIMD equivalent of "@x[11]"
+	movdqa		(%r10),$xt3		# .Lrot16(%rip)
+	mov		\$10,%eax
+	movdqa		$xd0,0x100-0x100(%rcx)	# save SIMD counters
+	jmp		.Loop4x
+
+.align	32
+.Loop4x:
+___
+	foreach (&SSSE3_lane_ROUND(0, 4, 8,12)) { eval; }
+	foreach (&SSSE3_lane_ROUND(0, 5,10,15)) { eval; }
+$code.=<<___;
+	dec		%eax
+	jnz		.Loop4x
+
+	paddd		0x40(%rsp),$xa0		# accumulate key material
+	paddd		0x50(%rsp),$xa1
+	paddd		0x60(%rsp),$xa2
+	paddd		0x70(%rsp),$xa3
+
+	movdqa		$xa0,$xt2		# "de-interlace" data
+	punpckldq	$xa1,$xa0
+	movdqa		$xa2,$xt3
+	punpckldq	$xa3,$xa2
+	punpckhdq	$xa1,$xt2
+	punpckhdq	$xa3,$xt3
+	movdqa		$xa0,$xa1
+	punpcklqdq	$xa2,$xa0		# "a0"
+	movdqa		$xt2,$xa3
+	punpcklqdq	$xt3,$xt2		# "a2"
+	punpckhqdq	$xa2,$xa1		# "a1"
+	punpckhqdq	$xt3,$xa3		# "a3"
+___
+	($xa2,$xt2)=($xt2,$xa2);
+$code.=<<___;
+	paddd		0x80-0x100(%rcx),$xb0
+	paddd		0x90-0x100(%rcx),$xb1
+	paddd		0xa0-0x100(%rcx),$xb2
+	paddd		0xb0-0x100(%rcx),$xb3
+
+	movdqa		$xa0,0x00(%rsp)		# offload $xaN
+	movdqa		$xa1,0x10(%rsp)
+	movdqa		0x20(%rsp),$xa0		# "xc2"
+	movdqa		0x30(%rsp),$xa1		# "xc3"
+
+	movdqa		$xb0,$xt2
+	punpckldq	$xb1,$xb0
+	movdqa		$xb2,$xt3
+	punpckldq	$xb3,$xb2
+	punpckhdq	$xb1,$xt2
+	punpckhdq	$xb3,$xt3
+	movdqa		$xb0,$xb1
+	punpcklqdq	$xb2,$xb0		# "b0"
+	movdqa		$xt2,$xb3
+	punpcklqdq	$xt3,$xt2		# "b2"
+	punpckhqdq	$xb2,$xb1		# "b1"
+	punpckhqdq	$xt3,$xb3		# "b3"
+___
+	($xb2,$xt2)=($xt2,$xb2);
+	my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1);
+$code.=<<___;
+	paddd		0xc0-0x100(%rcx),$xc0
+	paddd		0xd0-0x100(%rcx),$xc1
+	paddd		0xe0-0x100(%rcx),$xc2
+	paddd		0xf0-0x100(%rcx),$xc3
+
+	movdqa		$xa2,0x20(%rsp)		# keep offloading $xaN
+	movdqa		$xa3,0x30(%rsp)
+
+	movdqa		$xc0,$xt2
+	punpckldq	$xc1,$xc0
+	movdqa		$xc2,$xt3
+	punpckldq	$xc3,$xc2
+	punpckhdq	$xc1,$xt2
+	punpckhdq	$xc3,$xt3
+	movdqa		$xc0,$xc1
+	punpcklqdq	$xc2,$xc0		# "c0"
+	movdqa		$xt2,$xc3
+	punpcklqdq	$xt3,$xt2		# "c2"
+	punpckhqdq	$xc2,$xc1		# "c1"
+	punpckhqdq	$xt3,$xc3		# "c3"
+___
+	($xc2,$xt2)=($xt2,$xc2);
+	($xt0,$xt1)=($xa2,$xa3);		# use $xaN as temporary
+$code.=<<___;
+	paddd		0x100-0x100(%rcx),$xd0
+	paddd		0x110-0x100(%rcx),$xd1
+	paddd		0x120-0x100(%rcx),$xd2
+	paddd		0x130-0x100(%rcx),$xd3
+
+	movdqa		$xd0,$xt2
+	punpckldq	$xd1,$xd0
+	movdqa		$xd2,$xt3
+	punpckldq	$xd3,$xd2
+	punpckhdq	$xd1,$xt2
+	punpckhdq	$xd3,$xt3
+	movdqa		$xd0,$xd1
+	punpcklqdq	$xd2,$xd0		# "d0"
+	movdqa		$xt2,$xd3
+	punpcklqdq	$xt3,$xt2		# "d2"
+	punpckhqdq	$xd2,$xd1		# "d1"
+	punpckhqdq	$xt3,$xd3		# "d3"
+___
+	($xd2,$xt2)=($xt2,$xd2);
+$code.=<<___;
+	cmp		\$64*4,$len
+	jb		.Ltail4x
+
+	movdqu		0x00($inp),$xt0		# xor with input
+	movdqu		0x10($inp),$xt1
+	movdqu		0x20($inp),$xt2
+	movdqu		0x30($inp),$xt3
+	pxor		0x00(%rsp),$xt0		# $xaN is offloaded, remember?
+	pxor		$xb0,$xt1
+	pxor		$xc0,$xt2
+	pxor		$xd0,$xt3
+
+	 movdqu		$xt0,0x00($out)
+	movdqu		0x40($inp),$xt0
+	 movdqu		$xt1,0x10($out)
+	movdqu		0x50($inp),$xt1
+	 movdqu		$xt2,0x20($out)
+	movdqu		0x60($inp),$xt2
+	 movdqu		$xt3,0x30($out)
+	movdqu		0x70($inp),$xt3
+	lea		0x80($inp),$inp		# size optimization
+	pxor		0x10(%rsp),$xt0
+	pxor		$xb1,$xt1
+	pxor		$xc1,$xt2
+	pxor		$xd1,$xt3
+
+	 movdqu		$xt0,0x40($out)
+	movdqu		0x00($inp),$xt0
+	 movdqu		$xt1,0x50($out)
+	movdqu		0x10($inp),$xt1
+	 movdqu		$xt2,0x60($out)
+	movdqu		0x20($inp),$xt2
+	 movdqu		$xt3,0x70($out)
+	 lea		0x80($out),$out		# size optimization
+	movdqu		0x30($inp),$xt3
+	pxor		0x20(%rsp),$xt0
+	pxor		$xb2,$xt1
+	pxor		$xc2,$xt2
+	pxor		$xd2,$xt3
+
+	 movdqu		$xt0,0x00($out)
+	movdqu		0x40($inp),$xt0
+	 movdqu		$xt1,0x10($out)
+	movdqu		0x50($inp),$xt1
+	 movdqu		$xt2,0x20($out)
+	movdqu		0x60($inp),$xt2
+	 movdqu		$xt3,0x30($out)
+	movdqu		0x70($inp),$xt3
+	lea		0x80($inp),$inp		# inp+=64*4
+	pxor		0x30(%rsp),$xt0
+	pxor		$xb3,$xt1
+	pxor		$xc3,$xt2
+	pxor		$xd3,$xt3
+	movdqu		$xt0,0x40($out)
+	movdqu		$xt1,0x50($out)
+	movdqu		$xt2,0x60($out)
+	movdqu		$xt3,0x70($out)
+	lea		0x80($out),$out		# out+=64*4
+
+	sub		\$64*4,$len
+	jnz		.Loop_outer4x
+
+	jmp		.Ldone4x
+
+.Ltail4x:
+	cmp		\$192,$len
+	jae		.L192_or_more4x
+	cmp		\$128,$len
+	jae		.L128_or_more4x
+	cmp		\$64,$len
+	jae		.L64_or_more4x
+
+	#movdqa		0x00(%rsp),$xt0		# $xaN is offloaded, remember?
+	xor		%r10,%r10
+	#movdqa		$xt0,0x00(%rsp)
+	movdqa		$xb0,0x10(%rsp)
+	movdqa		$xc0,0x20(%rsp)
+	movdqa		$xd0,0x30(%rsp)
+	jmp		.Loop_tail4x
+
+.align	32
+.L64_or_more4x:
+	movdqu		0x00($inp),$xt0		# xor with input
+	movdqu		0x10($inp),$xt1
+	movdqu		0x20($inp),$xt2
+	movdqu		0x30($inp),$xt3
+	pxor		0x00(%rsp),$xt0		# $xaxN is offloaded, remember?
+	pxor		$xb0,$xt1
+	pxor		$xc0,$xt2
+	pxor		$xd0,$xt3
+	movdqu		$xt0,0x00($out)
+	movdqu		$xt1,0x10($out)
+	movdqu		$xt2,0x20($out)
+	movdqu		$xt3,0x30($out)
+	je		.Ldone4x
+
+	movdqa		0x10(%rsp),$xt0		# $xaN is offloaded, remember?
+	lea		0x40($inp),$inp		# inp+=64*1
+	xor		%r10,%r10
+	movdqa		$xt0,0x00(%rsp)
+	movdqa		$xb1,0x10(%rsp)
+	lea		0x40($out),$out		# out+=64*1
+	movdqa		$xc1,0x20(%rsp)
+	sub		\$64,$len		# len-=64*1
+	movdqa		$xd1,0x30(%rsp)
+	jmp		.Loop_tail4x
+
+.align	32
+.L128_or_more4x:
+	movdqu		0x00($inp),$xt0		# xor with input
+	movdqu		0x10($inp),$xt1
+	movdqu		0x20($inp),$xt2
+	movdqu		0x30($inp),$xt3
+	pxor		0x00(%rsp),$xt0		# $xaN is offloaded, remember?
+	pxor		$xb0,$xt1
+	pxor		$xc0,$xt2
+	pxor		$xd0,$xt3
+
+	 movdqu		$xt0,0x00($out)
+	movdqu		0x40($inp),$xt0
+	 movdqu		$xt1,0x10($out)
+	movdqu		0x50($inp),$xt1
+	 movdqu		$xt2,0x20($out)
+	movdqu		0x60($inp),$xt2
+	 movdqu		$xt3,0x30($out)
+	movdqu		0x70($inp),$xt3
+	pxor		0x10(%rsp),$xt0
+	pxor		$xb1,$xt1
+	pxor		$xc1,$xt2
+	pxor		$xd1,$xt3
+	movdqu		$xt0,0x40($out)
+	movdqu		$xt1,0x50($out)
+	movdqu		$xt2,0x60($out)
+	movdqu		$xt3,0x70($out)
+	je		.Ldone4x
+
+	movdqa		0x20(%rsp),$xt0		# $xaN is offloaded, remember?
+	lea		0x80($inp),$inp		# inp+=64*2
+	xor		%r10,%r10
+	movdqa		$xt0,0x00(%rsp)
+	movdqa		$xb2,0x10(%rsp)
+	lea		0x80($out),$out		# out+=64*2
+	movdqa		$xc2,0x20(%rsp)
+	sub		\$128,$len		# len-=64*2
+	movdqa		$xd2,0x30(%rsp)
+	jmp		.Loop_tail4x
+
+.align	32
+.L192_or_more4x:
+	movdqu		0x00($inp),$xt0		# xor with input
+	movdqu		0x10($inp),$xt1
+	movdqu		0x20($inp),$xt2
+	movdqu		0x30($inp),$xt3
+	pxor		0x00(%rsp),$xt0		# $xaN is offloaded, remember?
+	pxor		$xb0,$xt1
+	pxor		$xc0,$xt2
+	pxor		$xd0,$xt3
+
+	 movdqu		$xt0,0x00($out)
+	movdqu		0x40($inp),$xt0
+	 movdqu		$xt1,0x10($out)
+	movdqu		0x50($inp),$xt1
+	 movdqu		$xt2,0x20($out)
+	movdqu		0x60($inp),$xt2
+	 movdqu		$xt3,0x30($out)
+	movdqu		0x70($inp),$xt3
+	lea		0x80($inp),$inp		# size optimization
+	pxor		0x10(%rsp),$xt0
+	pxor		$xb1,$xt1
+	pxor		$xc1,$xt2
+	pxor		$xd1,$xt3
+
+	 movdqu		$xt0,0x40($out)
+	movdqu		0x00($inp),$xt0
+	 movdqu		$xt1,0x50($out)
+	movdqu		0x10($inp),$xt1
+	 movdqu		$xt2,0x60($out)
+	movdqu		0x20($inp),$xt2
+	 movdqu		$xt3,0x70($out)
+	 lea		0x80($out),$out		# size optimization
+	movdqu		0x30($inp),$xt3
+	pxor		0x20(%rsp),$xt0
+	pxor		$xb2,$xt1
+	pxor		$xc2,$xt2
+	pxor		$xd2,$xt3
+	movdqu		$xt0,0x00($out)
+	movdqu		$xt1,0x10($out)
+	movdqu		$xt2,0x20($out)
+	movdqu		$xt3,0x30($out)
+	je		.Ldone4x
+
+	movdqa		0x30(%rsp),$xt0		# $xaN is offloaded, remember?
+	lea		0x40($inp),$inp		# inp+=64*3
+	xor		%r10,%r10
+	movdqa		$xt0,0x00(%rsp)
+	movdqa		$xb3,0x10(%rsp)
+	lea		0x40($out),$out		# out+=64*3
+	movdqa		$xc3,0x20(%rsp)
+	sub		\$192,$len		# len-=64*3
+	movdqa		$xd3,0x30(%rsp)
+
+.Loop_tail4x:
+	movzb		($inp,%r10),%eax
+	movzb		(%rsp,%r10),%ecx
+	lea		1(%r10),%r10
+	xor		%ecx,%eax
+	mov		%al,-1($out,%r10)
+	dec		$len
+	jnz		.Loop_tail4x
+
+.Ldone4x:
+___
+$code.=<<___	if ($win64);
+	movaps		-0xa8(%r9),%xmm6
+	movaps		-0x98(%r9),%xmm7
+	movaps		-0x88(%r9),%xmm8
+	movaps		-0x78(%r9),%xmm9
+	movaps		-0x68(%r9),%xmm10
+	movaps		-0x58(%r9),%xmm11
+	movaps		-0x48(%r9),%xmm12
+	movaps		-0x38(%r9),%xmm13
+	movaps		-0x28(%r9),%xmm14
+	movaps		-0x18(%r9),%xmm15
+___
+$code.=<<___;
+	lea		(%r9),%rsp
+.cfi_def_cfa_register	%rsp
+.L4x_epilogue:
+	ret
+.cfi_endproc
+.size	ChaCha20_4x,.-ChaCha20_4x
+___
+}
+
+########################################################################
+# XOP code path that handles all lengths.
+if ($avx) {
+# There is some "anomaly" observed depending on instructions' size or
+# alignment. If you look closely at below code you'll notice that
+# sometimes argument order varies. The order affects instruction
+# encoding by making it larger, and such fiddling gives 5% performance
+# improvement. This is on FX-4100...
+
+my ($xb0,$xb1,$xb2,$xb3, $xd0,$xd1,$xd2,$xd3,
+    $xa0,$xa1,$xa2,$xa3, $xt0,$xt1,$xt2,$xt3)=map("%xmm$_",(0..15));
+my  @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
+	 $xt0,$xt1,$xt2,$xt3, $xd0,$xd1,$xd2,$xd3);
+
+sub XOP_lane_ROUND {
+my ($a0,$b0,$c0,$d0)=@_;
+my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
+my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
+my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
+my @x=map("\"$_\"",@xx);
+
+	(
+	"&vpaddd	(@x[$a0],@x[$a0],@x[$b0])",	# Q1
+	 "&vpaddd	(@x[$a1],@x[$a1],@x[$b1])",	# Q2
+	  "&vpaddd	(@x[$a2],@x[$a2],@x[$b2])",	# Q3
+	   "&vpaddd	(@x[$a3],@x[$a3],@x[$b3])",	# Q4
+	"&vpxor		(@x[$d0],@x[$a0],@x[$d0])",
+	 "&vpxor	(@x[$d1],@x[$a1],@x[$d1])",
+	  "&vpxor	(@x[$d2],@x[$a2],@x[$d2])",
+	   "&vpxor	(@x[$d3],@x[$a3],@x[$d3])",
+	"&vprotd	(@x[$d0],@x[$d0],16)",
+	 "&vprotd	(@x[$d1],@x[$d1],16)",
+	  "&vprotd	(@x[$d2],@x[$d2],16)",
+	   "&vprotd	(@x[$d3],@x[$d3],16)",
+
+	"&vpaddd	(@x[$c0],@x[$c0],@x[$d0])",
+	 "&vpaddd	(@x[$c1],@x[$c1],@x[$d1])",
+	  "&vpaddd	(@x[$c2],@x[$c2],@x[$d2])",
+	   "&vpaddd	(@x[$c3],@x[$c3],@x[$d3])",
+	"&vpxor		(@x[$b0],@x[$c0],@x[$b0])",
+	 "&vpxor	(@x[$b1],@x[$c1],@x[$b1])",
+	  "&vpxor	(@x[$b2],@x[$b2],@x[$c2])",	# flip
+	   "&vpxor	(@x[$b3],@x[$b3],@x[$c3])",	# flip
+	"&vprotd	(@x[$b0],@x[$b0],12)",
+	 "&vprotd	(@x[$b1],@x[$b1],12)",
+	  "&vprotd	(@x[$b2],@x[$b2],12)",
+	   "&vprotd	(@x[$b3],@x[$b3],12)",
+
+	"&vpaddd	(@x[$a0],@x[$b0],@x[$a0])",	# flip
+	 "&vpaddd	(@x[$a1],@x[$b1],@x[$a1])",	# flip
+	  "&vpaddd	(@x[$a2],@x[$a2],@x[$b2])",
+	   "&vpaddd	(@x[$a3],@x[$a3],@x[$b3])",
+	"&vpxor		(@x[$d0],@x[$a0],@x[$d0])",
+	 "&vpxor	(@x[$d1],@x[$a1],@x[$d1])",
+	  "&vpxor	(@x[$d2],@x[$a2],@x[$d2])",
+	   "&vpxor	(@x[$d3],@x[$a3],@x[$d3])",
+	"&vprotd	(@x[$d0],@x[$d0],8)",
+	 "&vprotd	(@x[$d1],@x[$d1],8)",
+	  "&vprotd	(@x[$d2],@x[$d2],8)",
+	   "&vprotd	(@x[$d3],@x[$d3],8)",
+
+	"&vpaddd	(@x[$c0],@x[$c0],@x[$d0])",
+	 "&vpaddd	(@x[$c1],@x[$c1],@x[$d1])",
+	  "&vpaddd	(@x[$c2],@x[$c2],@x[$d2])",
+	   "&vpaddd	(@x[$c3],@x[$c3],@x[$d3])",
+	"&vpxor		(@x[$b0],@x[$c0],@x[$b0])",
+	 "&vpxor	(@x[$b1],@x[$c1],@x[$b1])",
+	  "&vpxor	(@x[$b2],@x[$b2],@x[$c2])",	# flip
+	   "&vpxor	(@x[$b3],@x[$b3],@x[$c3])",	# flip
+	"&vprotd	(@x[$b0],@x[$b0],7)",
+	 "&vprotd	(@x[$b1],@x[$b1],7)",
+	  "&vprotd	(@x[$b2],@x[$b2],7)",
+	   "&vprotd	(@x[$b3],@x[$b3],7)"
+	);
+}
+
+my $xframe = $win64 ? 0xa8 : 8;
+
+$code.=<<___;
+.type	ChaCha20_4xop,\@function,5
+.align	32
+ChaCha20_4xop:
+.cfi_startproc
+.LChaCha20_4xop:
+	mov		%rsp,%r9		# frame pointer
+.cfi_def_cfa_register	%r9
+	sub		\$0x140+$xframe,%rsp
+___
+	################ stack layout
+	# +0x00		SIMD equivalent of @x[8-12]
+	# ...
+	# +0x40		constant copy of key[0-2] smashed by lanes
+	# ...
+	# +0x100	SIMD counters (with nonce smashed by lanes)
+	# ...
+	# +0x140
+$code.=<<___	if ($win64);
+	movaps		%xmm6,-0xa8(%r9)
+	movaps		%xmm7,-0x98(%r9)
+	movaps		%xmm8,-0x88(%r9)
+	movaps		%xmm9,-0x78(%r9)
+	movaps		%xmm10,-0x68(%r9)
+	movaps		%xmm11,-0x58(%r9)
+	movaps		%xmm12,-0x48(%r9)
+	movaps		%xmm13,-0x38(%r9)
+	movaps		%xmm14,-0x28(%r9)
+	movaps		%xmm15,-0x18(%r9)
+.L4xop_body:
+___
+$code.=<<___;
+	vzeroupper
+
+	vmovdqa		.Lsigma(%rip),$xa3	# key[0]
+	vmovdqu		($key),$xb3		# key[1]
+	vmovdqu		16($key),$xt3		# key[2]
+	vmovdqu		($counter),$xd3		# key[3]
+	lea		0x100(%rsp),%rcx	# size optimization
+
+	vpshufd		\$0x00,$xa3,$xa0	# smash key by lanes...
+	vpshufd		\$0x55,$xa3,$xa1
+	vmovdqa		$xa0,0x40(%rsp)		# ... and offload
+	vpshufd		\$0xaa,$xa3,$xa2
+	vmovdqa		$xa1,0x50(%rsp)
+	vpshufd		\$0xff,$xa3,$xa3
+	vmovdqa		$xa2,0x60(%rsp)
+	vmovdqa		$xa3,0x70(%rsp)
+
+	vpshufd		\$0x00,$xb3,$xb0
+	vpshufd		\$0x55,$xb3,$xb1
+	vmovdqa		$xb0,0x80-0x100(%rcx)
+	vpshufd		\$0xaa,$xb3,$xb2
+	vmovdqa		$xb1,0x90-0x100(%rcx)
+	vpshufd		\$0xff,$xb3,$xb3
+	vmovdqa		$xb2,0xa0-0x100(%rcx)
+	vmovdqa		$xb3,0xb0-0x100(%rcx)
+
+	vpshufd		\$0x00,$xt3,$xt0	# "$xc0"
+	vpshufd		\$0x55,$xt3,$xt1	# "$xc1"
+	vmovdqa		$xt0,0xc0-0x100(%rcx)
+	vpshufd		\$0xaa,$xt3,$xt2	# "$xc2"
+	vmovdqa		$xt1,0xd0-0x100(%rcx)
+	vpshufd		\$0xff,$xt3,$xt3	# "$xc3"
+	vmovdqa		$xt2,0xe0-0x100(%rcx)
+	vmovdqa		$xt3,0xf0-0x100(%rcx)
+
+	vpshufd		\$0x00,$xd3,$xd0
+	vpshufd		\$0x55,$xd3,$xd1
+	vpaddd		.Linc(%rip),$xd0,$xd0	# don't save counters yet
+	vpshufd		\$0xaa,$xd3,$xd2
+	vmovdqa		$xd1,0x110-0x100(%rcx)
+	vpshufd		\$0xff,$xd3,$xd3
+	vmovdqa		$xd2,0x120-0x100(%rcx)
+	vmovdqa		$xd3,0x130-0x100(%rcx)
+
+	jmp		.Loop_enter4xop
+
+.align	32
+.Loop_outer4xop:
+	vmovdqa		0x40(%rsp),$xa0		# re-load smashed key
+	vmovdqa		0x50(%rsp),$xa1
+	vmovdqa		0x60(%rsp),$xa2
+	vmovdqa		0x70(%rsp),$xa3
+	vmovdqa		0x80-0x100(%rcx),$xb0
+	vmovdqa		0x90-0x100(%rcx),$xb1
+	vmovdqa		0xa0-0x100(%rcx),$xb2
+	vmovdqa		0xb0-0x100(%rcx),$xb3
+	vmovdqa		0xc0-0x100(%rcx),$xt0	# "$xc0"
+	vmovdqa		0xd0-0x100(%rcx),$xt1	# "$xc1"
+	vmovdqa		0xe0-0x100(%rcx),$xt2	# "$xc2"
+	vmovdqa		0xf0-0x100(%rcx),$xt3	# "$xc3"
+	vmovdqa		0x100-0x100(%rcx),$xd0
+	vmovdqa		0x110-0x100(%rcx),$xd1
+	vmovdqa		0x120-0x100(%rcx),$xd2
+	vmovdqa		0x130-0x100(%rcx),$xd3
+	vpaddd		.Lfour(%rip),$xd0,$xd0	# next SIMD counters
+
+.Loop_enter4xop:
+	mov		\$10,%eax
+	vmovdqa		$xd0,0x100-0x100(%rcx)	# save SIMD counters
+	jmp		.Loop4xop
+
+.align	32
+.Loop4xop:
+___
+	foreach (&XOP_lane_ROUND(0, 4, 8,12)) { eval; }
+	foreach (&XOP_lane_ROUND(0, 5,10,15)) { eval; }
+$code.=<<___;
+	dec		%eax
+	jnz		.Loop4xop
+
+	vpaddd		0x40(%rsp),$xa0,$xa0	# accumulate key material
+	vpaddd		0x50(%rsp),$xa1,$xa1
+	vpaddd		0x60(%rsp),$xa2,$xa2
+	vpaddd		0x70(%rsp),$xa3,$xa3
+
+	vmovdqa		$xt2,0x20(%rsp)		# offload $xc2,3
+	vmovdqa		$xt3,0x30(%rsp)
+
+	vpunpckldq	$xa1,$xa0,$xt2		# "de-interlace" data
+	vpunpckldq	$xa3,$xa2,$xt3
+	vpunpckhdq	$xa1,$xa0,$xa0
+	vpunpckhdq	$xa3,$xa2,$xa2
+	vpunpcklqdq	$xt3,$xt2,$xa1		# "a0"
+	vpunpckhqdq	$xt3,$xt2,$xt2		# "a1"
+	vpunpcklqdq	$xa2,$xa0,$xa3		# "a2"
+	vpunpckhqdq	$xa2,$xa0,$xa0		# "a3"
+___
+        ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
+$code.=<<___;
+	vpaddd		0x80-0x100(%rcx),$xb0,$xb0
+	vpaddd		0x90-0x100(%rcx),$xb1,$xb1
+	vpaddd		0xa0-0x100(%rcx),$xb2,$xb2
+	vpaddd		0xb0-0x100(%rcx),$xb3,$xb3
+
+	vmovdqa		$xa0,0x00(%rsp)		# offload $xa0,1
+	vmovdqa		$xa1,0x10(%rsp)
+	vmovdqa		0x20(%rsp),$xa0		# "xc2"
+	vmovdqa		0x30(%rsp),$xa1		# "xc3"
+
+	vpunpckldq	$xb1,$xb0,$xt2
+	vpunpckldq	$xb3,$xb2,$xt3
+	vpunpckhdq	$xb1,$xb0,$xb0
+	vpunpckhdq	$xb3,$xb2,$xb2
+	vpunpcklqdq	$xt3,$xt2,$xb1		# "b0"
+	vpunpckhqdq	$xt3,$xt2,$xt2		# "b1"
+	vpunpcklqdq	$xb2,$xb0,$xb3		# "b2"
+	vpunpckhqdq	$xb2,$xb0,$xb0		# "b3"
+___
+	($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
+	my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1);
+$code.=<<___;
+	vpaddd		0xc0-0x100(%rcx),$xc0,$xc0
+	vpaddd		0xd0-0x100(%rcx),$xc1,$xc1
+	vpaddd		0xe0-0x100(%rcx),$xc2,$xc2
+	vpaddd		0xf0-0x100(%rcx),$xc3,$xc3
+
+	vpunpckldq	$xc1,$xc0,$xt2
+	vpunpckldq	$xc3,$xc2,$xt3
+	vpunpckhdq	$xc1,$xc0,$xc0
+	vpunpckhdq	$xc3,$xc2,$xc2
+	vpunpcklqdq	$xt3,$xt2,$xc1		# "c0"
+	vpunpckhqdq	$xt3,$xt2,$xt2		# "c1"
+	vpunpcklqdq	$xc2,$xc0,$xc3		# "c2"
+	vpunpckhqdq	$xc2,$xc0,$xc0		# "c3"
+___
+	($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
+$code.=<<___;
+	vpaddd		0x100-0x100(%rcx),$xd0,$xd0
+	vpaddd		0x110-0x100(%rcx),$xd1,$xd1
+	vpaddd		0x120-0x100(%rcx),$xd2,$xd2
+	vpaddd		0x130-0x100(%rcx),$xd3,$xd3
+
+	vpunpckldq	$xd1,$xd0,$xt2
+	vpunpckldq	$xd3,$xd2,$xt3
+	vpunpckhdq	$xd1,$xd0,$xd0
+	vpunpckhdq	$xd3,$xd2,$xd2
+	vpunpcklqdq	$xt3,$xt2,$xd1		# "d0"
+	vpunpckhqdq	$xt3,$xt2,$xt2		# "d1"
+	vpunpcklqdq	$xd2,$xd0,$xd3		# "d2"
+	vpunpckhqdq	$xd2,$xd0,$xd0		# "d3"
+___
+	($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
+	($xa0,$xa1)=($xt2,$xt3);
+$code.=<<___;
+	vmovdqa		0x00(%rsp),$xa0		# restore $xa0,1
+	vmovdqa		0x10(%rsp),$xa1
+
+	cmp		\$64*4,$len
+	jb		.Ltail4xop
+
+	vpxor		0x00($inp),$xa0,$xa0	# xor with input
+	vpxor		0x10($inp),$xb0,$xb0
+	vpxor		0x20($inp),$xc0,$xc0
+	vpxor		0x30($inp),$xd0,$xd0
+	vpxor		0x40($inp),$xa1,$xa1
+	vpxor		0x50($inp),$xb1,$xb1
+	vpxor		0x60($inp),$xc1,$xc1
+	vpxor		0x70($inp),$xd1,$xd1
+	lea		0x80($inp),$inp		# size optimization
+	vpxor		0x00($inp),$xa2,$xa2
+	vpxor		0x10($inp),$xb2,$xb2
+	vpxor		0x20($inp),$xc2,$xc2
+	vpxor		0x30($inp),$xd2,$xd2
+	vpxor		0x40($inp),$xa3,$xa3
+	vpxor		0x50($inp),$xb3,$xb3
+	vpxor		0x60($inp),$xc3,$xc3
+	vpxor		0x70($inp),$xd3,$xd3
+	lea		0x80($inp),$inp		# inp+=64*4
+
+	vmovdqu		$xa0,0x00($out)
+	vmovdqu		$xb0,0x10($out)
+	vmovdqu		$xc0,0x20($out)
+	vmovdqu		$xd0,0x30($out)
+	vmovdqu		$xa1,0x40($out)
+	vmovdqu		$xb1,0x50($out)
+	vmovdqu		$xc1,0x60($out)
+	vmovdqu		$xd1,0x70($out)
+	lea		0x80($out),$out		# size optimization
+	vmovdqu		$xa2,0x00($out)
+	vmovdqu		$xb2,0x10($out)
+	vmovdqu		$xc2,0x20($out)
+	vmovdqu		$xd2,0x30($out)
+	vmovdqu		$xa3,0x40($out)
+	vmovdqu		$xb3,0x50($out)
+	vmovdqu		$xc3,0x60($out)
+	vmovdqu		$xd3,0x70($out)
+	lea		0x80($out),$out		# out+=64*4
+
+	sub		\$64*4,$len
+	jnz		.Loop_outer4xop
+
+	jmp		.Ldone4xop
+
+.align	32
+.Ltail4xop:
+	cmp		\$192,$len
+	jae		.L192_or_more4xop
+	cmp		\$128,$len
+	jae		.L128_or_more4xop
+	cmp		\$64,$len
+	jae		.L64_or_more4xop
+
+	xor		%r10,%r10
+	vmovdqa		$xa0,0x00(%rsp)
+	vmovdqa		$xb0,0x10(%rsp)
+	vmovdqa		$xc0,0x20(%rsp)
+	vmovdqa		$xd0,0x30(%rsp)
+	jmp		.Loop_tail4xop
+
+.align	32
+.L64_or_more4xop:
+	vpxor		0x00($inp),$xa0,$xa0	# xor with input
+	vpxor		0x10($inp),$xb0,$xb0
+	vpxor		0x20($inp),$xc0,$xc0
+	vpxor		0x30($inp),$xd0,$xd0
+	vmovdqu		$xa0,0x00($out)
+	vmovdqu		$xb0,0x10($out)
+	vmovdqu		$xc0,0x20($out)
+	vmovdqu		$xd0,0x30($out)
+	je		.Ldone4xop
+
+	lea		0x40($inp),$inp		# inp+=64*1
+	vmovdqa		$xa1,0x00(%rsp)
+	xor		%r10,%r10
+	vmovdqa		$xb1,0x10(%rsp)
+	lea		0x40($out),$out		# out+=64*1
+	vmovdqa		$xc1,0x20(%rsp)
+	sub		\$64,$len		# len-=64*1
+	vmovdqa		$xd1,0x30(%rsp)
+	jmp		.Loop_tail4xop
+
+.align	32
+.L128_or_more4xop:
+	vpxor		0x00($inp),$xa0,$xa0	# xor with input
+	vpxor		0x10($inp),$xb0,$xb0
+	vpxor		0x20($inp),$xc0,$xc0
+	vpxor		0x30($inp),$xd0,$xd0
+	vpxor		0x40($inp),$xa1,$xa1
+	vpxor		0x50($inp),$xb1,$xb1
+	vpxor		0x60($inp),$xc1,$xc1
+	vpxor		0x70($inp),$xd1,$xd1
+
+	vmovdqu		$xa0,0x00($out)
+	vmovdqu		$xb0,0x10($out)
+	vmovdqu		$xc0,0x20($out)
+	vmovdqu		$xd0,0x30($out)
+	vmovdqu		$xa1,0x40($out)
+	vmovdqu		$xb1,0x50($out)
+	vmovdqu		$xc1,0x60($out)
+	vmovdqu		$xd1,0x70($out)
+	je		.Ldone4xop
+
+	lea		0x80($inp),$inp		# inp+=64*2
+	vmovdqa		$xa2,0x00(%rsp)
+	xor		%r10,%r10
+	vmovdqa		$xb2,0x10(%rsp)
+	lea		0x80($out),$out		# out+=64*2
+	vmovdqa		$xc2,0x20(%rsp)
+	sub		\$128,$len		# len-=64*2
+	vmovdqa		$xd2,0x30(%rsp)
+	jmp		.Loop_tail4xop
+
+.align	32
+.L192_or_more4xop:
+	vpxor		0x00($inp),$xa0,$xa0	# xor with input
+	vpxor		0x10($inp),$xb0,$xb0
+	vpxor		0x20($inp),$xc0,$xc0
+	vpxor		0x30($inp),$xd0,$xd0
+	vpxor		0x40($inp),$xa1,$xa1
+	vpxor		0x50($inp),$xb1,$xb1
+	vpxor		0x60($inp),$xc1,$xc1
+	vpxor		0x70($inp),$xd1,$xd1
+	lea		0x80($inp),$inp		# size optimization
+	vpxor		0x00($inp),$xa2,$xa2
+	vpxor		0x10($inp),$xb2,$xb2
+	vpxor		0x20($inp),$xc2,$xc2
+	vpxor		0x30($inp),$xd2,$xd2
+
+	vmovdqu		$xa0,0x00($out)
+	vmovdqu		$xb0,0x10($out)
+	vmovdqu		$xc0,0x20($out)
+	vmovdqu		$xd0,0x30($out)
+	vmovdqu		$xa1,0x40($out)
+	vmovdqu		$xb1,0x50($out)
+	vmovdqu		$xc1,0x60($out)
+	vmovdqu		$xd1,0x70($out)
+	lea		0x80($out),$out		# size optimization
+	vmovdqu		$xa2,0x00($out)
+	vmovdqu		$xb2,0x10($out)
+	vmovdqu		$xc2,0x20($out)
+	vmovdqu		$xd2,0x30($out)
+	je		.Ldone4xop
+
+	lea		0x40($inp),$inp		# inp+=64*3
+	vmovdqa		$xa3,0x00(%rsp)
+	xor		%r10,%r10
+	vmovdqa		$xb3,0x10(%rsp)
+	lea		0x40($out),$out		# out+=64*3
+	vmovdqa		$xc3,0x20(%rsp)
+	sub		\$192,$len		# len-=64*3
+	vmovdqa		$xd3,0x30(%rsp)
+
+.Loop_tail4xop:
+	movzb		($inp,%r10),%eax
+	movzb		(%rsp,%r10),%ecx
+	lea		1(%r10),%r10
+	xor		%ecx,%eax
+	mov		%al,-1($out,%r10)
+	dec		$len
+	jnz		.Loop_tail4xop
+
+.Ldone4xop:
+	vzeroupper
+___
+$code.=<<___	if ($win64);
+	movaps		-0xa8(%r9),%xmm6
+	movaps		-0x98(%r9),%xmm7
+	movaps		-0x88(%r9),%xmm8
+	movaps		-0x78(%r9),%xmm9
+	movaps		-0x68(%r9),%xmm10
+	movaps		-0x58(%r9),%xmm11
+	movaps		-0x48(%r9),%xmm12
+	movaps		-0x38(%r9),%xmm13
+	movaps		-0x28(%r9),%xmm14
+	movaps		-0x18(%r9),%xmm15
+___
+$code.=<<___;
+	lea		(%r9),%rsp
+.cfi_def_cfa_register	%rsp
+.L4xop_epilogue:
+	ret
+.cfi_endproc
+.size	ChaCha20_4xop,.-ChaCha20_4xop
+___
+}
+
+########################################################################
+# AVX2 code path
+if ($avx>1) {
+my ($xb0,$xb1,$xb2,$xb3, $xd0,$xd1,$xd2,$xd3,
+    $xa0,$xa1,$xa2,$xa3, $xt0,$xt1,$xt2,$xt3)=map("%ymm$_",(0..15));
+my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
+	"%nox","%nox","%nox","%nox", $xd0,$xd1,$xd2,$xd3);
+
+sub AVX2_lane_ROUND {
+my ($a0,$b0,$c0,$d0)=@_;
+my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
+my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
+my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
+my ($xc,$xc_,$t0,$t1)=map("\"$_\"",$xt0,$xt1,$xt2,$xt3);
+my @x=map("\"$_\"",@xx);
+
+	# Consider order in which variables are addressed by their
+	# index:
+	#
+	#	a   b   c   d
+	#
+	#	0   4   8  12 < even round
+	#	1   5   9  13
+	#	2   6  10  14
+	#	3   7  11  15
+	#	0   5  10  15 < odd round
+	#	1   6  11  12
+	#	2   7   8  13
+	#	3   4   9  14
+	#
+	# 'a', 'b' and 'd's are permanently allocated in registers,
+	# @x[0..7,12..15], while 'c's are maintained in memory. If
+	# you observe 'c' column, you'll notice that pair of 'c's is
+	# invariant between rounds. This means that we have to reload
+	# them once per round, in the middle. This is why you'll see
+	# bunch of 'c' stores and loads in the middle, but none in
+	# the beginning or end.
+
+	(
+	"&vpaddd	(@x[$a0],@x[$a0],@x[$b0])",	# Q1
+	"&vpxor		(@x[$d0],@x[$a0],@x[$d0])",
+	"&vpshufb	(@x[$d0],@x[$d0],$t1)",
+	 "&vpaddd	(@x[$a1],@x[$a1],@x[$b1])",	# Q2
+	 "&vpxor	(@x[$d1],@x[$a1],@x[$d1])",
+	 "&vpshufb	(@x[$d1],@x[$d1],$t1)",
+
+	"&vpaddd	($xc,$xc,@x[$d0])",
+	"&vpxor		(@x[$b0],$xc,@x[$b0])",
+	"&vpslld	($t0,@x[$b0],12)",
+	"&vpsrld	(@x[$b0],@x[$b0],20)",
+	"&vpor		(@x[$b0],$t0,@x[$b0])",
+	"&vbroadcasti128($t0,'(%r11)')",		# .Lrot24(%rip)
+	 "&vpaddd	($xc_,$xc_,@x[$d1])",
+	 "&vpxor	(@x[$b1],$xc_,@x[$b1])",
+	 "&vpslld	($t1,@x[$b1],12)",
+	 "&vpsrld	(@x[$b1],@x[$b1],20)",
+	 "&vpor		(@x[$b1],$t1,@x[$b1])",
+
+	"&vpaddd	(@x[$a0],@x[$a0],@x[$b0])",
+	"&vpxor		(@x[$d0],@x[$a0],@x[$d0])",
+	"&vpshufb	(@x[$d0],@x[$d0],$t0)",
+	 "&vpaddd	(@x[$a1],@x[$a1],@x[$b1])",
+	 "&vpxor	(@x[$d1],@x[$a1],@x[$d1])",
+	 "&vpshufb	(@x[$d1],@x[$d1],$t0)",
+
+	"&vpaddd	($xc,$xc,@x[$d0])",
+	"&vpxor		(@x[$b0],$xc,@x[$b0])",
+	"&vpslld	($t1,@x[$b0],7)",
+	"&vpsrld	(@x[$b0],@x[$b0],25)",
+	"&vpor		(@x[$b0],$t1,@x[$b0])",
+	"&vbroadcasti128($t1,'(%r10)')",		# .Lrot16(%rip)
+	 "&vpaddd	($xc_,$xc_,@x[$d1])",
+	 "&vpxor	(@x[$b1],$xc_,@x[$b1])",
+	 "&vpslld	($t0,@x[$b1],7)",
+	 "&vpsrld	(@x[$b1],@x[$b1],25)",
+	 "&vpor		(@x[$b1],$t0,@x[$b1])",
+
+	"&vmovdqa	(\"`32*($c0-8)`(%rsp)\",$xc)",	# reload pair of 'c's
+	 "&vmovdqa	(\"`32*($c1-8)`(%rsp)\",$xc_)",
+	"&vmovdqa	($xc,\"`32*($c2-8)`(%rsp)\")",
+	 "&vmovdqa	($xc_,\"`32*($c3-8)`(%rsp)\")",
+
+	"&vpaddd	(@x[$a2],@x[$a2],@x[$b2])",	# Q3
+	"&vpxor		(@x[$d2],@x[$a2],@x[$d2])",
+	"&vpshufb	(@x[$d2],@x[$d2],$t1)",
+	 "&vpaddd	(@x[$a3],@x[$a3],@x[$b3])",	# Q4
+	 "&vpxor	(@x[$d3],@x[$a3],@x[$d3])",
+	 "&vpshufb	(@x[$d3],@x[$d3],$t1)",
+
+	"&vpaddd	($xc,$xc,@x[$d2])",
+	"&vpxor		(@x[$b2],$xc,@x[$b2])",
+	"&vpslld	($t0,@x[$b2],12)",
+	"&vpsrld	(@x[$b2],@x[$b2],20)",
+	"&vpor		(@x[$b2],$t0,@x[$b2])",
+	"&vbroadcasti128($t0,'(%r11)')",		# .Lrot24(%rip)
+	 "&vpaddd	($xc_,$xc_,@x[$d3])",
+	 "&vpxor	(@x[$b3],$xc_,@x[$b3])",
+	 "&vpslld	($t1,@x[$b3],12)",
+	 "&vpsrld	(@x[$b3],@x[$b3],20)",
+	 "&vpor		(@x[$b3],$t1,@x[$b3])",
+
+	"&vpaddd	(@x[$a2],@x[$a2],@x[$b2])",
+	"&vpxor		(@x[$d2],@x[$a2],@x[$d2])",
+	"&vpshufb	(@x[$d2],@x[$d2],$t0)",
+	 "&vpaddd	(@x[$a3],@x[$a3],@x[$b3])",
+	 "&vpxor	(@x[$d3],@x[$a3],@x[$d3])",
+	 "&vpshufb	(@x[$d3],@x[$d3],$t0)",
+
+	"&vpaddd	($xc,$xc,@x[$d2])",
+	"&vpxor		(@x[$b2],$xc,@x[$b2])",
+	"&vpslld	($t1,@x[$b2],7)",
+	"&vpsrld	(@x[$b2],@x[$b2],25)",
+	"&vpor		(@x[$b2],$t1,@x[$b2])",
+	"&vbroadcasti128($t1,'(%r10)')",		# .Lrot16(%rip)
+	 "&vpaddd	($xc_,$xc_,@x[$d3])",
+	 "&vpxor	(@x[$b3],$xc_,@x[$b3])",
+	 "&vpslld	($t0,@x[$b3],7)",
+	 "&vpsrld	(@x[$b3],@x[$b3],25)",
+	 "&vpor		(@x[$b3],$t0,@x[$b3])"
+	);
+}
+
+my $xframe = $win64 ? 0xa8 : 8;
+
+$code.=<<___;
+.type	ChaCha20_8x,\@function,5
+.align	32
+ChaCha20_8x:
+.cfi_startproc
+.LChaCha20_8x:
+	mov		%rsp,%r9		# frame register
+.cfi_def_cfa_register	%r9
+	sub		\$0x280+$xframe,%rsp
+	and		\$-32,%rsp
+___
+$code.=<<___	if ($win64);
+	movaps		%xmm6,-0xa8(%r9)
+	movaps		%xmm7,-0x98(%r9)
+	movaps		%xmm8,-0x88(%r9)
+	movaps		%xmm9,-0x78(%r9)
+	movaps		%xmm10,-0x68(%r9)
+	movaps		%xmm11,-0x58(%r9)
+	movaps		%xmm12,-0x48(%r9)
+	movaps		%xmm13,-0x38(%r9)
+	movaps		%xmm14,-0x28(%r9)
+	movaps		%xmm15,-0x18(%r9)
+.L8x_body:
+___
+$code.=<<___;
+	vzeroupper
+
+	################ stack layout
+	# +0x00		SIMD equivalent of @x[8-12]
+	# ...
+	# +0x80		constant copy of key[0-2] smashed by lanes
+	# ...
+	# +0x200	SIMD counters (with nonce smashed by lanes)
+	# ...
+	# +0x280
+
+	vbroadcasti128	.Lsigma(%rip),$xa3	# key[0]
+	vbroadcasti128	($key),$xb3		# key[1]
+	vbroadcasti128	16($key),$xt3		# key[2]
+	vbroadcasti128	($counter),$xd3		# key[3]
+	lea		0x100(%rsp),%rcx	# size optimization
+	lea		0x200(%rsp),%rax	# size optimization
+	lea		.Lrot16(%rip),%r10
+	lea		.Lrot24(%rip),%r11
+
+	vpshufd		\$0x00,$xa3,$xa0	# smash key by lanes...
+	vpshufd		\$0x55,$xa3,$xa1
+	vmovdqa		$xa0,0x80-0x100(%rcx)	# ... and offload
+	vpshufd		\$0xaa,$xa3,$xa2
+	vmovdqa		$xa1,0xa0-0x100(%rcx)
+	vpshufd		\$0xff,$xa3,$xa3
+	vmovdqa		$xa2,0xc0-0x100(%rcx)
+	vmovdqa		$xa3,0xe0-0x100(%rcx)
+
+	vpshufd		\$0x00,$xb3,$xb0
+	vpshufd		\$0x55,$xb3,$xb1
+	vmovdqa		$xb0,0x100-0x100(%rcx)
+	vpshufd		\$0xaa,$xb3,$xb2
+	vmovdqa		$xb1,0x120-0x100(%rcx)
+	vpshufd		\$0xff,$xb3,$xb3
+	vmovdqa		$xb2,0x140-0x100(%rcx)
+	vmovdqa		$xb3,0x160-0x100(%rcx)
+
+	vpshufd		\$0x00,$xt3,$xt0	# "xc0"
+	vpshufd		\$0x55,$xt3,$xt1	# "xc1"
+	vmovdqa		$xt0,0x180-0x200(%rax)
+	vpshufd		\$0xaa,$xt3,$xt2	# "xc2"
+	vmovdqa		$xt1,0x1a0-0x200(%rax)
+	vpshufd		\$0xff,$xt3,$xt3	# "xc3"
+	vmovdqa		$xt2,0x1c0-0x200(%rax)
+	vmovdqa		$xt3,0x1e0-0x200(%rax)
+
+	vpshufd		\$0x00,$xd3,$xd0
+	vpshufd		\$0x55,$xd3,$xd1
+	vpaddd		.Lincy(%rip),$xd0,$xd0	# don't save counters yet
+	vpshufd		\$0xaa,$xd3,$xd2
+	vmovdqa		$xd1,0x220-0x200(%rax)
+	vpshufd		\$0xff,$xd3,$xd3
+	vmovdqa		$xd2,0x240-0x200(%rax)
+	vmovdqa		$xd3,0x260-0x200(%rax)
+
+	jmp		.Loop_enter8x
+
+.align	32
+.Loop_outer8x:
+	vmovdqa		0x80-0x100(%rcx),$xa0	# re-load smashed key
+	vmovdqa		0xa0-0x100(%rcx),$xa1
+	vmovdqa		0xc0-0x100(%rcx),$xa2
+	vmovdqa		0xe0-0x100(%rcx),$xa3
+	vmovdqa		0x100-0x100(%rcx),$xb0
+	vmovdqa		0x120-0x100(%rcx),$xb1
+	vmovdqa		0x140-0x100(%rcx),$xb2
+	vmovdqa		0x160-0x100(%rcx),$xb3
+	vmovdqa		0x180-0x200(%rax),$xt0	# "xc0"
+	vmovdqa		0x1a0-0x200(%rax),$xt1	# "xc1"
+	vmovdqa		0x1c0-0x200(%rax),$xt2	# "xc2"
+	vmovdqa		0x1e0-0x200(%rax),$xt3	# "xc3"
+	vmovdqa		0x200-0x200(%rax),$xd0
+	vmovdqa		0x220-0x200(%rax),$xd1
+	vmovdqa		0x240-0x200(%rax),$xd2
+	vmovdqa		0x260-0x200(%rax),$xd3
+	vpaddd		.Leight(%rip),$xd0,$xd0	# next SIMD counters
+
+.Loop_enter8x:
+	vmovdqa		$xt2,0x40(%rsp)		# SIMD equivalent of "@x[10]"
+	vmovdqa		$xt3,0x60(%rsp)		# SIMD equivalent of "@x[11]"
+	vbroadcasti128	(%r10),$xt3
+	vmovdqa		$xd0,0x200-0x200(%rax)	# save SIMD counters
+	mov		\$10,%eax
+	jmp		.Loop8x
+
+.align	32
+.Loop8x:
+___
+	foreach (&AVX2_lane_ROUND(0, 4, 8,12)) { eval; }
+	foreach (&AVX2_lane_ROUND(0, 5,10,15)) { eval; }
+$code.=<<___;
+	dec		%eax
+	jnz		.Loop8x
+
+	lea		0x200(%rsp),%rax	# size optimization
+	vpaddd		0x80-0x100(%rcx),$xa0,$xa0	# accumulate key
+	vpaddd		0xa0-0x100(%rcx),$xa1,$xa1
+	vpaddd		0xc0-0x100(%rcx),$xa2,$xa2
+	vpaddd		0xe0-0x100(%rcx),$xa3,$xa3
+
+	vpunpckldq	$xa1,$xa0,$xt2		# "de-interlace" data
+	vpunpckldq	$xa3,$xa2,$xt3
+	vpunpckhdq	$xa1,$xa0,$xa0
+	vpunpckhdq	$xa3,$xa2,$xa2
+	vpunpcklqdq	$xt3,$xt2,$xa1		# "a0"
+	vpunpckhqdq	$xt3,$xt2,$xt2		# "a1"
+	vpunpcklqdq	$xa2,$xa0,$xa3		# "a2"
+	vpunpckhqdq	$xa2,$xa0,$xa0		# "a3"
+___
+	($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
+$code.=<<___;
+	vpaddd		0x100-0x100(%rcx),$xb0,$xb0
+	vpaddd		0x120-0x100(%rcx),$xb1,$xb1
+	vpaddd		0x140-0x100(%rcx),$xb2,$xb2
+	vpaddd		0x160-0x100(%rcx),$xb3,$xb3
+
+	vpunpckldq	$xb1,$xb0,$xt2
+	vpunpckldq	$xb3,$xb2,$xt3
+	vpunpckhdq	$xb1,$xb0,$xb0
+	vpunpckhdq	$xb3,$xb2,$xb2
+	vpunpcklqdq	$xt3,$xt2,$xb1		# "b0"
+	vpunpckhqdq	$xt3,$xt2,$xt2		# "b1"
+	vpunpcklqdq	$xb2,$xb0,$xb3		# "b2"
+	vpunpckhqdq	$xb2,$xb0,$xb0		# "b3"
+___
+	($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
+$code.=<<___;
+	vperm2i128	\$0x20,$xb0,$xa0,$xt3	# "de-interlace" further
+	vperm2i128	\$0x31,$xb0,$xa0,$xb0
+	vperm2i128	\$0x20,$xb1,$xa1,$xa0
+	vperm2i128	\$0x31,$xb1,$xa1,$xb1
+	vperm2i128	\$0x20,$xb2,$xa2,$xa1
+	vperm2i128	\$0x31,$xb2,$xa2,$xb2
+	vperm2i128	\$0x20,$xb3,$xa3,$xa2
+	vperm2i128	\$0x31,$xb3,$xa3,$xb3
+___
+	($xa0,$xa1,$xa2,$xa3,$xt3)=($xt3,$xa0,$xa1,$xa2,$xa3);
+	my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1);
+$code.=<<___;
+	vmovdqa		$xa0,0x00(%rsp)		# offload $xaN
+	vmovdqa		$xa1,0x20(%rsp)
+	vmovdqa		0x40(%rsp),$xc2		# $xa0
+	vmovdqa		0x60(%rsp),$xc3		# $xa1
+
+	vpaddd		0x180-0x200(%rax),$xc0,$xc0
+	vpaddd		0x1a0-0x200(%rax),$xc1,$xc1
+	vpaddd		0x1c0-0x200(%rax),$xc2,$xc2
+	vpaddd		0x1e0-0x200(%rax),$xc3,$xc3
+
+	vpunpckldq	$xc1,$xc0,$xt2
+	vpunpckldq	$xc3,$xc2,$xt3
+	vpunpckhdq	$xc1,$xc0,$xc0
+	vpunpckhdq	$xc3,$xc2,$xc2
+	vpunpcklqdq	$xt3,$xt2,$xc1		# "c0"
+	vpunpckhqdq	$xt3,$xt2,$xt2		# "c1"
+	vpunpcklqdq	$xc2,$xc0,$xc3		# "c2"
+	vpunpckhqdq	$xc2,$xc0,$xc0		# "c3"
+___
+	($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
+$code.=<<___;
+	vpaddd		0x200-0x200(%rax),$xd0,$xd0
+	vpaddd		0x220-0x200(%rax),$xd1,$xd1
+	vpaddd		0x240-0x200(%rax),$xd2,$xd2
+	vpaddd		0x260-0x200(%rax),$xd3,$xd3
+
+	vpunpckldq	$xd1,$xd0,$xt2
+	vpunpckldq	$xd3,$xd2,$xt3
+	vpunpckhdq	$xd1,$xd0,$xd0
+	vpunpckhdq	$xd3,$xd2,$xd2
+	vpunpcklqdq	$xt3,$xt2,$xd1		# "d0"
+	vpunpckhqdq	$xt3,$xt2,$xt2		# "d1"
+	vpunpcklqdq	$xd2,$xd0,$xd3		# "d2"
+	vpunpckhqdq	$xd2,$xd0,$xd0		# "d3"
+___
+	($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
+$code.=<<___;
+	vperm2i128	\$0x20,$xd0,$xc0,$xt3	# "de-interlace" further
+	vperm2i128	\$0x31,$xd0,$xc0,$xd0
+	vperm2i128	\$0x20,$xd1,$xc1,$xc0
+	vperm2i128	\$0x31,$xd1,$xc1,$xd1
+	vperm2i128	\$0x20,$xd2,$xc2,$xc1
+	vperm2i128	\$0x31,$xd2,$xc2,$xd2
+	vperm2i128	\$0x20,$xd3,$xc3,$xc2
+	vperm2i128	\$0x31,$xd3,$xc3,$xd3
+___
+	($xc0,$xc1,$xc2,$xc3,$xt3)=($xt3,$xc0,$xc1,$xc2,$xc3);
+	($xb0,$xb1,$xb2,$xb3,$xc0,$xc1,$xc2,$xc3)=
+	($xc0,$xc1,$xc2,$xc3,$xb0,$xb1,$xb2,$xb3);
+	($xa0,$xa1)=($xt2,$xt3);
+$code.=<<___;
+	vmovdqa		0x00(%rsp),$xa0		# $xaN was offloaded, remember?
+	vmovdqa		0x20(%rsp),$xa1
+
+	cmp		\$64*8,$len
+	jb		.Ltail8x
+
+	vpxor		0x00($inp),$xa0,$xa0	# xor with input
+	vpxor		0x20($inp),$xb0,$xb0
+	vpxor		0x40($inp),$xc0,$xc0
+	vpxor		0x60($inp),$xd0,$xd0
+	lea		0x80($inp),$inp		# size optimization
+	vmovdqu		$xa0,0x00($out)
+	vmovdqu		$xb0,0x20($out)
+	vmovdqu		$xc0,0x40($out)
+	vmovdqu		$xd0,0x60($out)
+	lea		0x80($out),$out		# size optimization
+
+	vpxor		0x00($inp),$xa1,$xa1
+	vpxor		0x20($inp),$xb1,$xb1
+	vpxor		0x40($inp),$xc1,$xc1
+	vpxor		0x60($inp),$xd1,$xd1
+	lea		0x80($inp),$inp		# size optimization
+	vmovdqu		$xa1,0x00($out)
+	vmovdqu		$xb1,0x20($out)
+	vmovdqu		$xc1,0x40($out)
+	vmovdqu		$xd1,0x60($out)
+	lea		0x80($out),$out		# size optimization
+
+	vpxor		0x00($inp),$xa2,$xa2
+	vpxor		0x20($inp),$xb2,$xb2
+	vpxor		0x40($inp),$xc2,$xc2
+	vpxor		0x60($inp),$xd2,$xd2
+	lea		0x80($inp),$inp		# size optimization
+	vmovdqu		$xa2,0x00($out)
+	vmovdqu		$xb2,0x20($out)
+	vmovdqu		$xc2,0x40($out)
+	vmovdqu		$xd2,0x60($out)
+	lea		0x80($out),$out		# size optimization
+
+	vpxor		0x00($inp),$xa3,$xa3
+	vpxor		0x20($inp),$xb3,$xb3
+	vpxor		0x40($inp),$xc3,$xc3
+	vpxor		0x60($inp),$xd3,$xd3
+	lea		0x80($inp),$inp		# size optimization
+	vmovdqu		$xa3,0x00($out)
+	vmovdqu		$xb3,0x20($out)
+	vmovdqu		$xc3,0x40($out)
+	vmovdqu		$xd3,0x60($out)
+	lea		0x80($out),$out		# size optimization
+
+	sub		\$64*8,$len
+	jnz		.Loop_outer8x
+
+	jmp		.Ldone8x
+
+.Ltail8x:
+	cmp		\$448,$len
+	jae		.L448_or_more8x
+	cmp		\$384,$len
+	jae		.L384_or_more8x
+	cmp		\$320,$len
+	jae		.L320_or_more8x
+	cmp		\$256,$len
+	jae		.L256_or_more8x
+	cmp		\$192,$len
+	jae		.L192_or_more8x
+	cmp		\$128,$len
+	jae		.L128_or_more8x
+	cmp		\$64,$len
+	jae		.L64_or_more8x
+
+	xor		%r10,%r10
+	vmovdqa		$xa0,0x00(%rsp)
+	vmovdqa		$xb0,0x20(%rsp)
+	jmp		.Loop_tail8x
+
+.align	32
+.L64_or_more8x:
+	vpxor		0x00($inp),$xa0,$xa0	# xor with input
+	vpxor		0x20($inp),$xb0,$xb0
+	vmovdqu		$xa0,0x00($out)
+	vmovdqu		$xb0,0x20($out)
+	je		.Ldone8x
+
+	lea		0x40($inp),$inp		# inp+=64*1
+	xor		%r10,%r10
+	vmovdqa		$xc0,0x00(%rsp)
+	lea		0x40($out),$out		# out+=64*1
+	sub		\$64,$len		# len-=64*1
+	vmovdqa		$xd0,0x20(%rsp)
+	jmp		.Loop_tail8x
+
+.align	32
+.L128_or_more8x:
+	vpxor		0x00($inp),$xa0,$xa0	# xor with input
+	vpxor		0x20($inp),$xb0,$xb0
+	vpxor		0x40($inp),$xc0,$xc0
+	vpxor		0x60($inp),$xd0,$xd0
+	vmovdqu		$xa0,0x00($out)
+	vmovdqu		$xb0,0x20($out)
+	vmovdqu		$xc0,0x40($out)
+	vmovdqu		$xd0,0x60($out)
+	je		.Ldone8x
+
+	lea		0x80($inp),$inp		# inp+=64*2
+	xor		%r10,%r10
+	vmovdqa		$xa1,0x00(%rsp)
+	lea		0x80($out),$out		# out+=64*2
+	sub		\$128,$len		# len-=64*2
+	vmovdqa		$xb1,0x20(%rsp)
+	jmp		.Loop_tail8x
+
+.align	32
+.L192_or_more8x:
+	vpxor		0x00($inp),$xa0,$xa0	# xor with input
+	vpxor		0x20($inp),$xb0,$xb0
+	vpxor		0x40($inp),$xc0,$xc0
+	vpxor		0x60($inp),$xd0,$xd0
+	vpxor		0x80($inp),$xa1,$xa1
+	vpxor		0xa0($inp),$xb1,$xb1
+	vmovdqu		$xa0,0x00($out)
+	vmovdqu		$xb0,0x20($out)
+	vmovdqu		$xc0,0x40($out)
+	vmovdqu		$xd0,0x60($out)
+	vmovdqu		$xa1,0x80($out)
+	vmovdqu		$xb1,0xa0($out)
+	je		.Ldone8x
+
+	lea		0xc0($inp),$inp		# inp+=64*3
+	xor		%r10,%r10
+	vmovdqa		$xc1,0x00(%rsp)
+	lea		0xc0($out),$out		# out+=64*3
+	sub		\$192,$len		# len-=64*3
+	vmovdqa		$xd1,0x20(%rsp)
+	jmp		.Loop_tail8x
+
+.align	32
+.L256_or_more8x:
+	vpxor		0x00($inp),$xa0,$xa0	# xor with input
+	vpxor		0x20($inp),$xb0,$xb0
+	vpxor		0x40($inp),$xc0,$xc0
+	vpxor		0x60($inp),$xd0,$xd0
+	vpxor		0x80($inp),$xa1,$xa1
+	vpxor		0xa0($inp),$xb1,$xb1
+	vpxor		0xc0($inp),$xc1,$xc1
+	vpxor		0xe0($inp),$xd1,$xd1
+	vmovdqu		$xa0,0x00($out)
+	vmovdqu		$xb0,0x20($out)
+	vmovdqu		$xc0,0x40($out)
+	vmovdqu		$xd0,0x60($out)
+	vmovdqu		$xa1,0x80($out)
+	vmovdqu		$xb1,0xa0($out)
+	vmovdqu		$xc1,0xc0($out)
+	vmovdqu		$xd1,0xe0($out)
+	je		.Ldone8x
+
+	lea		0x100($inp),$inp	# inp+=64*4
+	xor		%r10,%r10
+	vmovdqa		$xa2,0x00(%rsp)
+	lea		0x100($out),$out	# out+=64*4
+	sub		\$256,$len		# len-=64*4
+	vmovdqa		$xb2,0x20(%rsp)
+	jmp		.Loop_tail8x
+
+.align	32
+.L320_or_more8x:
+	vpxor		0x00($inp),$xa0,$xa0	# xor with input
+	vpxor		0x20($inp),$xb0,$xb0
+	vpxor		0x40($inp),$xc0,$xc0
+	vpxor		0x60($inp),$xd0,$xd0
+	vpxor		0x80($inp),$xa1,$xa1
+	vpxor		0xa0($inp),$xb1,$xb1
+	vpxor		0xc0($inp),$xc1,$xc1
+	vpxor		0xe0($inp),$xd1,$xd1
+	vpxor		0x100($inp),$xa2,$xa2
+	vpxor		0x120($inp),$xb2,$xb2
+	vmovdqu		$xa0,0x00($out)
+	vmovdqu		$xb0,0x20($out)
+	vmovdqu		$xc0,0x40($out)
+	vmovdqu		$xd0,0x60($out)
+	vmovdqu		$xa1,0x80($out)
+	vmovdqu		$xb1,0xa0($out)
+	vmovdqu		$xc1,0xc0($out)
+	vmovdqu		$xd1,0xe0($out)
+	vmovdqu		$xa2,0x100($out)
+	vmovdqu		$xb2,0x120($out)
+	je		.Ldone8x
+
+	lea		0x140($inp),$inp	# inp+=64*5
+	xor		%r10,%r10
+	vmovdqa		$xc2,0x00(%rsp)
+	lea		0x140($out),$out	# out+=64*5
+	sub		\$320,$len		# len-=64*5
+	vmovdqa		$xd2,0x20(%rsp)
+	jmp		.Loop_tail8x
+
+.align	32
+.L384_or_more8x:
+	vpxor		0x00($inp),$xa0,$xa0	# xor with input
+	vpxor		0x20($inp),$xb0,$xb0
+	vpxor		0x40($inp),$xc0,$xc0
+	vpxor		0x60($inp),$xd0,$xd0
+	vpxor		0x80($inp),$xa1,$xa1
+	vpxor		0xa0($inp),$xb1,$xb1
+	vpxor		0xc0($inp),$xc1,$xc1
+	vpxor		0xe0($inp),$xd1,$xd1
+	vpxor		0x100($inp),$xa2,$xa2
+	vpxor		0x120($inp),$xb2,$xb2
+	vpxor		0x140($inp),$xc2,$xc2
+	vpxor		0x160($inp),$xd2,$xd2
+	vmovdqu		$xa0,0x00($out)
+	vmovdqu		$xb0,0x20($out)
+	vmovdqu		$xc0,0x40($out)
+	vmovdqu		$xd0,0x60($out)
+	vmovdqu		$xa1,0x80($out)
+	vmovdqu		$xb1,0xa0($out)
+	vmovdqu		$xc1,0xc0($out)
+	vmovdqu		$xd1,0xe0($out)
+	vmovdqu		$xa2,0x100($out)
+	vmovdqu		$xb2,0x120($out)
+	vmovdqu		$xc2,0x140($out)
+	vmovdqu		$xd2,0x160($out)
+	je		.Ldone8x
+
+	lea		0x180($inp),$inp	# inp+=64*6
+	xor		%r10,%r10
+	vmovdqa		$xa3,0x00(%rsp)
+	lea		0x180($out),$out	# out+=64*6
+	sub		\$384,$len		# len-=64*6
+	vmovdqa		$xb3,0x20(%rsp)
+	jmp		.Loop_tail8x
+
+.align	32
+.L448_or_more8x:
+	vpxor		0x00($inp),$xa0,$xa0	# xor with input
+	vpxor		0x20($inp),$xb0,$xb0
+	vpxor		0x40($inp),$xc0,$xc0
+	vpxor		0x60($inp),$xd0,$xd0
+	vpxor		0x80($inp),$xa1,$xa1
+	vpxor		0xa0($inp),$xb1,$xb1
+	vpxor		0xc0($inp),$xc1,$xc1
+	vpxor		0xe0($inp),$xd1,$xd1
+	vpxor		0x100($inp),$xa2,$xa2
+	vpxor		0x120($inp),$xb2,$xb2
+	vpxor		0x140($inp),$xc2,$xc2
+	vpxor		0x160($inp),$xd2,$xd2
+	vpxor		0x180($inp),$xa3,$xa3
+	vpxor		0x1a0($inp),$xb3,$xb3
+	vmovdqu		$xa0,0x00($out)
+	vmovdqu		$xb0,0x20($out)
+	vmovdqu		$xc0,0x40($out)
+	vmovdqu		$xd0,0x60($out)
+	vmovdqu		$xa1,0x80($out)
+	vmovdqu		$xb1,0xa0($out)
+	vmovdqu		$xc1,0xc0($out)
+	vmovdqu		$xd1,0xe0($out)
+	vmovdqu		$xa2,0x100($out)
+	vmovdqu		$xb2,0x120($out)
+	vmovdqu		$xc2,0x140($out)
+	vmovdqu		$xd2,0x160($out)
+	vmovdqu		$xa3,0x180($out)
+	vmovdqu		$xb3,0x1a0($out)
+	je		.Ldone8x
+
+	lea		0x1c0($inp),$inp	# inp+=64*7
+	xor		%r10,%r10
+	vmovdqa		$xc3,0x00(%rsp)
+	lea		0x1c0($out),$out	# out+=64*7
+	sub		\$448,$len		# len-=64*7
+	vmovdqa		$xd3,0x20(%rsp)
+
+.Loop_tail8x:
+	movzb		($inp,%r10),%eax
+	movzb		(%rsp,%r10),%ecx
+	lea		1(%r10),%r10
+	xor		%ecx,%eax
+	mov		%al,-1($out,%r10)
+	dec		$len
+	jnz		.Loop_tail8x
+
+.Ldone8x:
+	vzeroall
+___
+$code.=<<___	if ($win64);
+	movaps		-0xa8(%r9),%xmm6
+	movaps		-0x98(%r9),%xmm7
+	movaps		-0x88(%r9),%xmm8
+	movaps		-0x78(%r9),%xmm9
+	movaps		-0x68(%r9),%xmm10
+	movaps		-0x58(%r9),%xmm11
+	movaps		-0x48(%r9),%xmm12
+	movaps		-0x38(%r9),%xmm13
+	movaps		-0x28(%r9),%xmm14
+	movaps		-0x18(%r9),%xmm15
+___
+$code.=<<___;
+	lea		(%r9),%rsp
+.cfi_def_cfa_register	%rsp
+.L8x_epilogue:
+	ret
+.cfi_endproc
+.size	ChaCha20_8x,.-ChaCha20_8x
+___
+}
+
+########################################################################
+# AVX512 code paths
+if ($avx>2) {
+# This one handles shorter inputs...
+
+my ($a,$b,$c,$d, $a_,$b_,$c_,$d_,$fourz) = map("%zmm$_",(0..3,16..20));
+my ($t0,$t1,$t2,$t3) = map("%xmm$_",(4..7));
+
+sub vpxord()		# size optimization
+{ my $opcode = "vpxor";	# adhere to vpxor when possible
+
+    foreach (@_) {
+	if (/%([zy])mm([0-9]+)/ && ($1 eq "z" || $2>=16)) {
+	    $opcode = "vpxord";
+	    last;
+	}
+    }
+
+    $code .= "\t$opcode\t".join(',',reverse @_)."\n";
+}
+
+sub AVX512ROUND {	# critical path is 14 "SIMD ticks" per round
+	&vpaddd	($a,$a,$b);
+	&vpxord	($d,$d,$a);
+	&vprold	($d,$d,16);
+
+	&vpaddd	($c,$c,$d);
+	&vpxord	($b,$b,$c);
+	&vprold	($b,$b,12);
+
+	&vpaddd	($a,$a,$b);
+	&vpxord	($d,$d,$a);
+	&vprold	($d,$d,8);
+
+	&vpaddd	($c,$c,$d);
+	&vpxord	($b,$b,$c);
+	&vprold	($b,$b,7);
+}
+
+my $xframe = $win64 ? 160+8 : 8;
+
+$code.=<<___;
+.type	ChaCha20_avx512,\@function,5
+.align	32
+ChaCha20_avx512:
+.cfi_startproc
+.LChaCha20_avx512:
+	mov	%rsp,%r9		# frame pointer
+.cfi_def_cfa_register	%r9
+	cmp	\$512,$len
+	ja	.LChaCha20_16x
+
+	sub	\$64+$xframe,%rsp
+___
+$code.=<<___	if ($win64);
+	movaps	%xmm6,-0xa8(%r9)
+	movaps	%xmm7,-0x98(%r9)
+	movaps	%xmm8,-0x88(%r9)
+	movaps	%xmm9,-0x78(%r9)
+	movaps	%xmm10,-0x68(%r9)
+	movaps	%xmm11,-0x58(%r9)
+	movaps	%xmm12,-0x48(%r9)
+	movaps	%xmm13,-0x38(%r9)
+	movaps	%xmm14,-0x28(%r9)
+	movaps	%xmm15,-0x18(%r9)
+.Lavx512_body:
+___
+$code.=<<___;
+	vbroadcasti32x4	.Lsigma(%rip),$a
+	vbroadcasti32x4	($key),$b
+	vbroadcasti32x4	16($key),$c
+	vbroadcasti32x4	($counter),$d
+
+	vmovdqa32	$a,$a_
+	vmovdqa32	$b,$b_
+	vmovdqa32	$c,$c_
+	vpaddd		.Lzeroz(%rip),$d,$d
+	vmovdqa32	.Lfourz(%rip),$fourz
+	mov		\$10,$counter	# reuse $counter
+	vmovdqa32	$d,$d_
+	jmp		.Loop_avx512
+
+.align	16
+.Loop_outer_avx512:
+	vmovdqa32	$a_,$a
+	vmovdqa32	$b_,$b
+	vmovdqa32	$c_,$c
+	vpaddd		$fourz,$d_,$d
+	mov		\$10,$counter
+	vmovdqa32	$d,$d_
+	jmp		.Loop_avx512
+
+.align	32
+.Loop_avx512:
+___
+	&AVX512ROUND();
+	&vpshufd	($c,$c,0b01001110);
+	&vpshufd	($b,$b,0b00111001);
+	&vpshufd	($d,$d,0b10010011);
+
+	&AVX512ROUND();
+	&vpshufd	($c,$c,0b01001110);
+	&vpshufd	($b,$b,0b10010011);
+	&vpshufd	($d,$d,0b00111001);
+
+	&dec		($counter);
+	&jnz		(".Loop_avx512");
+
+$code.=<<___;
+	vpaddd		$a_,$a,$a
+	vpaddd		$b_,$b,$b
+	vpaddd		$c_,$c,$c
+	vpaddd		$d_,$d,$d
+
+	sub		\$64,$len
+	jb		.Ltail64_avx512
+
+	vpxor		0x00($inp),%x#$a,$t0	# xor with input
+	vpxor		0x10($inp),%x#$b,$t1
+	vpxor		0x20($inp),%x#$c,$t2
+	vpxor		0x30($inp),%x#$d,$t3
+	lea		0x40($inp),$inp		# inp+=64
+
+	vmovdqu		$t0,0x00($out)		# write output
+	vmovdqu		$t1,0x10($out)
+	vmovdqu		$t2,0x20($out)
+	vmovdqu		$t3,0x30($out)
+	lea		0x40($out),$out		# out+=64
+
+	jz		.Ldone_avx512
+
+	vextracti32x4	\$1,$a,$t0
+	vextracti32x4	\$1,$b,$t1
+	vextracti32x4	\$1,$c,$t2
+	vextracti32x4	\$1,$d,$t3
+
+	sub		\$64,$len
+	jb		.Ltail_avx512
+
+	vpxor		0x00($inp),$t0,$t0	# xor with input
+	vpxor		0x10($inp),$t1,$t1
+	vpxor		0x20($inp),$t2,$t2
+	vpxor		0x30($inp),$t3,$t3
+	lea		0x40($inp),$inp		# inp+=64
+
+	vmovdqu		$t0,0x00($out)		# write output
+	vmovdqu		$t1,0x10($out)
+	vmovdqu		$t2,0x20($out)
+	vmovdqu		$t3,0x30($out)
+	lea		0x40($out),$out		# out+=64
+
+	jz		.Ldone_avx512
+
+	vextracti32x4	\$2,$a,$t0
+	vextracti32x4	\$2,$b,$t1
+	vextracti32x4	\$2,$c,$t2
+	vextracti32x4	\$2,$d,$t3
+
+	sub		\$64,$len
+	jb		.Ltail_avx512
+
+	vpxor		0x00($inp),$t0,$t0	# xor with input
+	vpxor		0x10($inp),$t1,$t1
+	vpxor		0x20($inp),$t2,$t2
+	vpxor		0x30($inp),$t3,$t3
+	lea		0x40($inp),$inp		# inp+=64
+
+	vmovdqu		$t0,0x00($out)		# write output
+	vmovdqu		$t1,0x10($out)
+	vmovdqu		$t2,0x20($out)
+	vmovdqu		$t3,0x30($out)
+	lea		0x40($out),$out		# out+=64
+
+	jz		.Ldone_avx512
+
+	vextracti32x4	\$3,$a,$t0
+	vextracti32x4	\$3,$b,$t1
+	vextracti32x4	\$3,$c,$t2
+	vextracti32x4	\$3,$d,$t3
+
+	sub		\$64,$len
+	jb		.Ltail_avx512
+
+	vpxor		0x00($inp),$t0,$t0	# xor with input
+	vpxor		0x10($inp),$t1,$t1
+	vpxor		0x20($inp),$t2,$t2
+	vpxor		0x30($inp),$t3,$t3
+	lea		0x40($inp),$inp		# inp+=64
+
+	vmovdqu		$t0,0x00($out)		# write output
+	vmovdqu		$t1,0x10($out)
+	vmovdqu		$t2,0x20($out)
+	vmovdqu		$t3,0x30($out)
+	lea		0x40($out),$out		# out+=64
+
+	jnz		.Loop_outer_avx512
+
+	jmp		.Ldone_avx512
+
+.align	16
+.Ltail64_avx512:
+	vmovdqa		%x#$a,0x00(%rsp)
+	vmovdqa		%x#$b,0x10(%rsp)
+	vmovdqa		%x#$c,0x20(%rsp)
+	vmovdqa		%x#$d,0x30(%rsp)
+	add		\$64,$len
+	jmp		.Loop_tail_avx512
+
+.align	16
+.Ltail_avx512:
+	vmovdqa		$t0,0x00(%rsp)
+	vmovdqa		$t1,0x10(%rsp)
+	vmovdqa		$t2,0x20(%rsp)
+	vmovdqa		$t3,0x30(%rsp)
+	add		\$64,$len
+
+.Loop_tail_avx512:
+	movzb		($inp,$counter),%eax
+	movzb		(%rsp,$counter),%ecx
+	lea		1($counter),$counter
+	xor		%ecx,%eax
+	mov		%al,-1($out,$counter)
+	dec		$len
+	jnz		.Loop_tail_avx512
+
+	vmovdqu32	$a_,0x00(%rsp)
+
+.Ldone_avx512:
+	vzeroall
+___
+$code.=<<___	if ($win64);
+	movaps	-0xa8(%r9),%xmm6
+	movaps	-0x98(%r9),%xmm7
+	movaps	-0x88(%r9),%xmm8
+	movaps	-0x78(%r9),%xmm9
+	movaps	-0x68(%r9),%xmm10
+	movaps	-0x58(%r9),%xmm11
+	movaps	-0x48(%r9),%xmm12
+	movaps	-0x38(%r9),%xmm13
+	movaps	-0x28(%r9),%xmm14
+	movaps	-0x18(%r9),%xmm15
+___
+$code.=<<___;
+	lea	(%r9),%rsp
+.cfi_def_cfa_register	%rsp
+.Lavx512_epilogue:
+	ret
+.cfi_endproc
+.size	ChaCha20_avx512,.-ChaCha20_avx512
+___
+
+map(s/%z/%y/, $a,$b,$c,$d, $a_,$b_,$c_,$d_,$fourz);
+
+$code.=<<___;
+.type	ChaCha20_avx512vl,\@function,5
+.align	32
+ChaCha20_avx512vl:
+.cfi_startproc
+.LChaCha20_avx512vl:
+	mov	%rsp,%r9		# frame pointer
+.cfi_def_cfa_register	%r9
+	cmp	\$128,$len
+	ja	.LChaCha20_8xvl
+
+	sub	\$64+$xframe,%rsp
+___
+$code.=<<___	if ($win64);
+	movaps	%xmm6,-0xa8(%r9)
+	movaps	%xmm7,-0x98(%r9)
+	movaps	%xmm8,-0x88(%r9)
+	movaps	%xmm9,-0x78(%r9)
+	movaps	%xmm10,-0x68(%r9)
+	movaps	%xmm11,-0x58(%r9)
+	movaps	%xmm12,-0x48(%r9)
+	movaps	%xmm13,-0x38(%r9)
+	movaps	%xmm14,-0x28(%r9)
+	movaps	%xmm15,-0x18(%r9)
+.Lavx512vl_body:
+___
+$code.=<<___;
+	vbroadcasti128	.Lsigma(%rip),$a
+	vbroadcasti128	($key),$b
+	vbroadcasti128	16($key),$c
+	vbroadcasti128	($counter),$d
+
+	vmovdqa32	$a,$a_
+	vmovdqa32	$b,$b_
+	vmovdqa32	$c,$c_
+	vpaddd		.Lzeroz(%rip),$d,$d
+	vmovdqa32	.Ltwoy(%rip),$fourz
+	mov		\$10,$counter	# reuse $counter
+	vmovdqa32	$d,$d_
+	jmp		.Loop_avx512vl
+
+.align	16
+.Loop_outer_avx512vl:
+	vmovdqa32	$c_,$c
+	vpaddd		$fourz,$d_,$d
+	mov		\$10,$counter
+	vmovdqa32	$d,$d_
+	jmp		.Loop_avx512vl
+
+.align	32
+.Loop_avx512vl:
+___
+	&AVX512ROUND();
+	&vpshufd	($c,$c,0b01001110);
+	&vpshufd	($b,$b,0b00111001);
+	&vpshufd	($d,$d,0b10010011);
+
+	&AVX512ROUND();
+	&vpshufd	($c,$c,0b01001110);
+	&vpshufd	($b,$b,0b10010011);
+	&vpshufd	($d,$d,0b00111001);
+
+	&dec		($counter);
+	&jnz		(".Loop_avx512vl");
+
+$code.=<<___;
+	vpaddd		$a_,$a,$a
+	vpaddd		$b_,$b,$b
+	vpaddd		$c_,$c,$c
+	vpaddd		$d_,$d,$d
+
+	sub		\$64,$len
+	jb		.Ltail64_avx512vl
+
+	vpxor		0x00($inp),%x#$a,$t0	# xor with input
+	vpxor		0x10($inp),%x#$b,$t1
+	vpxor		0x20($inp),%x#$c,$t2
+	vpxor		0x30($inp),%x#$d,$t3
+	lea		0x40($inp),$inp		# inp+=64
+
+	vmovdqu		$t0,0x00($out)		# write output
+	vmovdqu		$t1,0x10($out)
+	vmovdqu		$t2,0x20($out)
+	vmovdqu		$t3,0x30($out)
+	lea		0x40($out),$out		# out+=64
+
+	jz		.Ldone_avx512vl
+
+	vextracti128	\$1,$a,$t0
+	vextracti128	\$1,$b,$t1
+	vextracti128	\$1,$c,$t2
+	vextracti128	\$1,$d,$t3
+
+	sub		\$64,$len
+	jb		.Ltail_avx512vl
+
+	vpxor		0x00($inp),$t0,$t0	# xor with input
+	vpxor		0x10($inp),$t1,$t1
+	vpxor		0x20($inp),$t2,$t2
+	vpxor		0x30($inp),$t3,$t3
+	lea		0x40($inp),$inp		# inp+=64
+
+	vmovdqu		$t0,0x00($out)		# write output
+	vmovdqu		$t1,0x10($out)
+	vmovdqu		$t2,0x20($out)
+	vmovdqu		$t3,0x30($out)
+	lea		0x40($out),$out		# out+=64
+
+	vmovdqa32	$a_,$a
+	vmovdqa32	$b_,$b
+	jnz		.Loop_outer_avx512vl
+
+	jmp		.Ldone_avx512vl
+
+.align	16
+.Ltail64_avx512vl:
+	vmovdqa		%x#$a,0x00(%rsp)
+	vmovdqa		%x#$b,0x10(%rsp)
+	vmovdqa		%x#$c,0x20(%rsp)
+	vmovdqa		%x#$d,0x30(%rsp)
+	add		\$64,$len
+	jmp		.Loop_tail_avx512vl
+
+.align	16
+.Ltail_avx512vl:
+	vmovdqa		$t0,0x00(%rsp)
+	vmovdqa		$t1,0x10(%rsp)
+	vmovdqa		$t2,0x20(%rsp)
+	vmovdqa		$t3,0x30(%rsp)
+	add		\$64,$len
+
+.Loop_tail_avx512vl:
+	movzb		($inp,$counter),%eax
+	movzb		(%rsp,$counter),%ecx
+	lea		1($counter),$counter
+	xor		%ecx,%eax
+	mov		%al,-1($out,$counter)
+	dec		$len
+	jnz		.Loop_tail_avx512vl
+
+	vmovdqu32	$a_,0x00(%rsp)
+	vmovdqu32	$a_,0x20(%rsp)
+
+.Ldone_avx512vl:
+	vzeroall
+___
+$code.=<<___	if ($win64);
+	movaps	-0xa8(%r9),%xmm6
+	movaps	-0x98(%r9),%xmm7
+	movaps	-0x88(%r9),%xmm8
+	movaps	-0x78(%r9),%xmm9
+	movaps	-0x68(%r9),%xmm10
+	movaps	-0x58(%r9),%xmm11
+	movaps	-0x48(%r9),%xmm12
+	movaps	-0x38(%r9),%xmm13
+	movaps	-0x28(%r9),%xmm14
+	movaps	-0x18(%r9),%xmm15
+___
+$code.=<<___;
+	lea	(%r9),%rsp
+.cfi_def_cfa_register	%rsp
+.Lavx512vl_epilogue:
+	ret
+.cfi_endproc
+.size	ChaCha20_avx512vl,.-ChaCha20_avx512vl
+___
+}
+if ($avx>2) {
+# This one handles longer inputs...
+
+my ($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
+    $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3)=map("%zmm$_",(0..15));
+my  @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
+	 $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3);
+my @key=map("%zmm$_",(16..31));
+my ($xt0,$xt1,$xt2,$xt3)=@key[0..3];
+
+sub AVX512_lane_ROUND {
+my ($a0,$b0,$c0,$d0)=@_;
+my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
+my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
+my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
+my @x=map("\"$_\"",@xx);
+
+	(
+	"&vpaddd	(@x[$a0],@x[$a0],@x[$b0])",	# Q1
+	 "&vpaddd	(@x[$a1],@x[$a1],@x[$b1])",	# Q2
+	  "&vpaddd	(@x[$a2],@x[$a2],@x[$b2])",	# Q3
+	   "&vpaddd	(@x[$a3],@x[$a3],@x[$b3])",	# Q4
+	"&vpxord	(@x[$d0],@x[$d0],@x[$a0])",
+	 "&vpxord	(@x[$d1],@x[$d1],@x[$a1])",
+	  "&vpxord	(@x[$d2],@x[$d2],@x[$a2])",
+	   "&vpxord	(@x[$d3],@x[$d3],@x[$a3])",
+	"&vprold	(@x[$d0],@x[$d0],16)",
+	 "&vprold	(@x[$d1],@x[$d1],16)",
+	  "&vprold	(@x[$d2],@x[$d2],16)",
+	   "&vprold	(@x[$d3],@x[$d3],16)",
+
+	"&vpaddd	(@x[$c0],@x[$c0],@x[$d0])",
+	 "&vpaddd	(@x[$c1],@x[$c1],@x[$d1])",
+	  "&vpaddd	(@x[$c2],@x[$c2],@x[$d2])",
+	   "&vpaddd	(@x[$c3],@x[$c3],@x[$d3])",
+	"&vpxord	(@x[$b0],@x[$b0],@x[$c0])",
+	 "&vpxord	(@x[$b1],@x[$b1],@x[$c1])",
+	  "&vpxord	(@x[$b2],@x[$b2],@x[$c2])",
+	   "&vpxord	(@x[$b3],@x[$b3],@x[$c3])",
+	"&vprold	(@x[$b0],@x[$b0],12)",
+	 "&vprold	(@x[$b1],@x[$b1],12)",
+	  "&vprold	(@x[$b2],@x[$b2],12)",
+	   "&vprold	(@x[$b3],@x[$b3],12)",
+
+	"&vpaddd	(@x[$a0],@x[$a0],@x[$b0])",
+	 "&vpaddd	(@x[$a1],@x[$a1],@x[$b1])",
+	  "&vpaddd	(@x[$a2],@x[$a2],@x[$b2])",
+	   "&vpaddd	(@x[$a3],@x[$a3],@x[$b3])",
+	"&vpxord	(@x[$d0],@x[$d0],@x[$a0])",
+	 "&vpxord	(@x[$d1],@x[$d1],@x[$a1])",
+	  "&vpxord	(@x[$d2],@x[$d2],@x[$a2])",
+	   "&vpxord	(@x[$d3],@x[$d3],@x[$a3])",
+	"&vprold	(@x[$d0],@x[$d0],8)",
+	 "&vprold	(@x[$d1],@x[$d1],8)",
+	  "&vprold	(@x[$d2],@x[$d2],8)",
+	   "&vprold	(@x[$d3],@x[$d3],8)",
+
+	"&vpaddd	(@x[$c0],@x[$c0],@x[$d0])",
+	 "&vpaddd	(@x[$c1],@x[$c1],@x[$d1])",
+	  "&vpaddd	(@x[$c2],@x[$c2],@x[$d2])",
+	   "&vpaddd	(@x[$c3],@x[$c3],@x[$d3])",
+	"&vpxord	(@x[$b0],@x[$b0],@x[$c0])",
+	 "&vpxord	(@x[$b1],@x[$b1],@x[$c1])",
+	  "&vpxord	(@x[$b2],@x[$b2],@x[$c2])",
+	   "&vpxord	(@x[$b3],@x[$b3],@x[$c3])",
+	"&vprold	(@x[$b0],@x[$b0],7)",
+	 "&vprold	(@x[$b1],@x[$b1],7)",
+	  "&vprold	(@x[$b2],@x[$b2],7)",
+	   "&vprold	(@x[$b3],@x[$b3],7)"
+	);
+}
+
+my $xframe = $win64 ? 0xa8 : 8;
+
+$code.=<<___;
+.type	ChaCha20_16x,\@function,5
+.align	32
+ChaCha20_16x:
+.cfi_startproc
+.LChaCha20_16x:
+	mov		%rsp,%r9		# frame register
+.cfi_def_cfa_register	%r9
+	sub		\$64+$xframe,%rsp
+	and		\$-64,%rsp
+___
+$code.=<<___	if ($win64);
+	movaps		%xmm6,-0xa8(%r9)
+	movaps		%xmm7,-0x98(%r9)
+	movaps		%xmm8,-0x88(%r9)
+	movaps		%xmm9,-0x78(%r9)
+	movaps		%xmm10,-0x68(%r9)
+	movaps		%xmm11,-0x58(%r9)
+	movaps		%xmm12,-0x48(%r9)
+	movaps		%xmm13,-0x38(%r9)
+	movaps		%xmm14,-0x28(%r9)
+	movaps		%xmm15,-0x18(%r9)
+.L16x_body:
+___
+$code.=<<___;
+	vzeroupper
+
+	lea		.Lsigma(%rip),%r10
+	vbroadcasti32x4	(%r10),$xa3		# key[0]
+	vbroadcasti32x4	($key),$xb3		# key[1]
+	vbroadcasti32x4	16($key),$xc3		# key[2]
+	vbroadcasti32x4	($counter),$xd3		# key[3]
+
+	vpshufd		\$0x00,$xa3,$xa0	# smash key by lanes...
+	vpshufd		\$0x55,$xa3,$xa1
+	vpshufd		\$0xaa,$xa3,$xa2
+	vpshufd		\$0xff,$xa3,$xa3
+	vmovdqa64	$xa0,@key[0]
+	vmovdqa64	$xa1,@key[1]
+	vmovdqa64	$xa2,@key[2]
+	vmovdqa64	$xa3,@key[3]
+
+	vpshufd		\$0x00,$xb3,$xb0
+	vpshufd		\$0x55,$xb3,$xb1
+	vpshufd		\$0xaa,$xb3,$xb2
+	vpshufd		\$0xff,$xb3,$xb3
+	vmovdqa64	$xb0,@key[4]
+	vmovdqa64	$xb1,@key[5]
+	vmovdqa64	$xb2,@key[6]
+	vmovdqa64	$xb3,@key[7]
+
+	vpshufd		\$0x00,$xc3,$xc0
+	vpshufd		\$0x55,$xc3,$xc1
+	vpshufd		\$0xaa,$xc3,$xc2
+	vpshufd		\$0xff,$xc3,$xc3
+	vmovdqa64	$xc0,@key[8]
+	vmovdqa64	$xc1,@key[9]
+	vmovdqa64	$xc2,@key[10]
+	vmovdqa64	$xc3,@key[11]
+
+	vpshufd		\$0x00,$xd3,$xd0
+	vpshufd		\$0x55,$xd3,$xd1
+	vpshufd		\$0xaa,$xd3,$xd2
+	vpshufd		\$0xff,$xd3,$xd3
+	vpaddd		.Lincz(%rip),$xd0,$xd0	# don't save counters yet
+	vmovdqa64	$xd0,@key[12]
+	vmovdqa64	$xd1,@key[13]
+	vmovdqa64	$xd2,@key[14]
+	vmovdqa64	$xd3,@key[15]
+
+	mov		\$10,%eax
+	jmp		.Loop16x
+
+.align	32
+.Loop_outer16x:
+	vpbroadcastd	0(%r10),$xa0		# reload key
+	vpbroadcastd	4(%r10),$xa1
+	vpbroadcastd	8(%r10),$xa2
+	vpbroadcastd	12(%r10),$xa3
+	vpaddd		.Lsixteen(%rip),@key[12],@key[12]	# next SIMD counters
+	vmovdqa64	@key[4],$xb0
+	vmovdqa64	@key[5],$xb1
+	vmovdqa64	@key[6],$xb2
+	vmovdqa64	@key[7],$xb3
+	vmovdqa64	@key[8],$xc0
+	vmovdqa64	@key[9],$xc1
+	vmovdqa64	@key[10],$xc2
+	vmovdqa64	@key[11],$xc3
+	vmovdqa64	@key[12],$xd0
+	vmovdqa64	@key[13],$xd1
+	vmovdqa64	@key[14],$xd2
+	vmovdqa64	@key[15],$xd3
+
+	vmovdqa64	$xa0,@key[0]
+	vmovdqa64	$xa1,@key[1]
+	vmovdqa64	$xa2,@key[2]
+	vmovdqa64	$xa3,@key[3]
+
+	mov		\$10,%eax
+	jmp		.Loop16x
+
+.align	32
+.Loop16x:
+___
+	foreach (&AVX512_lane_ROUND(0, 4, 8,12)) { eval; }
+	foreach (&AVX512_lane_ROUND(0, 5,10,15)) { eval; }
+$code.=<<___;
+	dec		%eax
+	jnz		.Loop16x
+
+	vpaddd		@key[0],$xa0,$xa0	# accumulate key
+	vpaddd		@key[1],$xa1,$xa1
+	vpaddd		@key[2],$xa2,$xa2
+	vpaddd		@key[3],$xa3,$xa3
+
+	vpunpckldq	$xa1,$xa0,$xt2		# "de-interlace" data
+	vpunpckldq	$xa3,$xa2,$xt3
+	vpunpckhdq	$xa1,$xa0,$xa0
+	vpunpckhdq	$xa3,$xa2,$xa2
+	vpunpcklqdq	$xt3,$xt2,$xa1		# "a0"
+	vpunpckhqdq	$xt3,$xt2,$xt2		# "a1"
+	vpunpcklqdq	$xa2,$xa0,$xa3		# "a2"
+	vpunpckhqdq	$xa2,$xa0,$xa0		# "a3"
+___
+	($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
+$code.=<<___;
+	vpaddd		@key[4],$xb0,$xb0
+	vpaddd		@key[5],$xb1,$xb1
+	vpaddd		@key[6],$xb2,$xb2
+	vpaddd		@key[7],$xb3,$xb3
+
+	vpunpckldq	$xb1,$xb0,$xt2
+	vpunpckldq	$xb3,$xb2,$xt3
+	vpunpckhdq	$xb1,$xb0,$xb0
+	vpunpckhdq	$xb3,$xb2,$xb2
+	vpunpcklqdq	$xt3,$xt2,$xb1		# "b0"
+	vpunpckhqdq	$xt3,$xt2,$xt2		# "b1"
+	vpunpcklqdq	$xb2,$xb0,$xb3		# "b2"
+	vpunpckhqdq	$xb2,$xb0,$xb0		# "b3"
+___
+	($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
+$code.=<<___;
+	vshufi32x4	\$0x44,$xb0,$xa0,$xt3	# "de-interlace" further
+	vshufi32x4	\$0xee,$xb0,$xa0,$xb0
+	vshufi32x4	\$0x44,$xb1,$xa1,$xa0
+	vshufi32x4	\$0xee,$xb1,$xa1,$xb1
+	vshufi32x4	\$0x44,$xb2,$xa2,$xa1
+	vshufi32x4	\$0xee,$xb2,$xa2,$xb2
+	vshufi32x4	\$0x44,$xb3,$xa3,$xa2
+	vshufi32x4	\$0xee,$xb3,$xa3,$xb3
+___
+	($xa0,$xa1,$xa2,$xa3,$xt3)=($xt3,$xa0,$xa1,$xa2,$xa3);
+$code.=<<___;
+	vpaddd		@key[8],$xc0,$xc0
+	vpaddd		@key[9],$xc1,$xc1
+	vpaddd		@key[10],$xc2,$xc2
+	vpaddd		@key[11],$xc3,$xc3
+
+	vpunpckldq	$xc1,$xc0,$xt2
+	vpunpckldq	$xc3,$xc2,$xt3
+	vpunpckhdq	$xc1,$xc0,$xc0
+	vpunpckhdq	$xc3,$xc2,$xc2
+	vpunpcklqdq	$xt3,$xt2,$xc1		# "c0"
+	vpunpckhqdq	$xt3,$xt2,$xt2		# "c1"
+	vpunpcklqdq	$xc2,$xc0,$xc3		# "c2"
+	vpunpckhqdq	$xc2,$xc0,$xc0		# "c3"
+___
+	($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
+$code.=<<___;
+	vpaddd		@key[12],$xd0,$xd0
+	vpaddd		@key[13],$xd1,$xd1
+	vpaddd		@key[14],$xd2,$xd2
+	vpaddd		@key[15],$xd3,$xd3
+
+	vpunpckldq	$xd1,$xd0,$xt2
+	vpunpckldq	$xd3,$xd2,$xt3
+	vpunpckhdq	$xd1,$xd0,$xd0
+	vpunpckhdq	$xd3,$xd2,$xd2
+	vpunpcklqdq	$xt3,$xt2,$xd1		# "d0"
+	vpunpckhqdq	$xt3,$xt2,$xt2		# "d1"
+	vpunpcklqdq	$xd2,$xd0,$xd3		# "d2"
+	vpunpckhqdq	$xd2,$xd0,$xd0		# "d3"
+___
+	($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
+$code.=<<___;
+	vshufi32x4	\$0x44,$xd0,$xc0,$xt3	# "de-interlace" further
+	vshufi32x4	\$0xee,$xd0,$xc0,$xd0
+	vshufi32x4	\$0x44,$xd1,$xc1,$xc0
+	vshufi32x4	\$0xee,$xd1,$xc1,$xd1
+	vshufi32x4	\$0x44,$xd2,$xc2,$xc1
+	vshufi32x4	\$0xee,$xd2,$xc2,$xd2
+	vshufi32x4	\$0x44,$xd3,$xc3,$xc2
+	vshufi32x4	\$0xee,$xd3,$xc3,$xd3
+___
+	($xc0,$xc1,$xc2,$xc3,$xt3)=($xt3,$xc0,$xc1,$xc2,$xc3);
+$code.=<<___;
+	vshufi32x4	\$0x88,$xc0,$xa0,$xt0	# "de-interlace" further
+	vshufi32x4	\$0xdd,$xc0,$xa0,$xa0
+	 vshufi32x4	\$0x88,$xd0,$xb0,$xc0
+	 vshufi32x4	\$0xdd,$xd0,$xb0,$xd0
+	vshufi32x4	\$0x88,$xc1,$xa1,$xt1
+	vshufi32x4	\$0xdd,$xc1,$xa1,$xa1
+	 vshufi32x4	\$0x88,$xd1,$xb1,$xc1
+	 vshufi32x4	\$0xdd,$xd1,$xb1,$xd1
+	vshufi32x4	\$0x88,$xc2,$xa2,$xt2
+	vshufi32x4	\$0xdd,$xc2,$xa2,$xa2
+	 vshufi32x4	\$0x88,$xd2,$xb2,$xc2
+	 vshufi32x4	\$0xdd,$xd2,$xb2,$xd2
+	vshufi32x4	\$0x88,$xc3,$xa3,$xt3
+	vshufi32x4	\$0xdd,$xc3,$xa3,$xa3
+	 vshufi32x4	\$0x88,$xd3,$xb3,$xc3
+	 vshufi32x4	\$0xdd,$xd3,$xb3,$xd3
+___
+	($xa0,$xa1,$xa2,$xa3,$xb0,$xb1,$xb2,$xb3)=
+	($xt0,$xt1,$xt2,$xt3,$xa0,$xa1,$xa2,$xa3);
+
+	($xa0,$xb0,$xc0,$xd0, $xa1,$xb1,$xc1,$xd1,
+	 $xa2,$xb2,$xc2,$xd2, $xa3,$xb3,$xc3,$xd3) =
+	($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
+	 $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3);
+$code.=<<___;
+	cmp		\$64*16,$len
+	jb		.Ltail16x
+
+	vpxord		0x00($inp),$xa0,$xa0	# xor with input
+	vpxord		0x40($inp),$xb0,$xb0
+	vpxord		0x80($inp),$xc0,$xc0
+	vpxord		0xc0($inp),$xd0,$xd0
+	vmovdqu32	$xa0,0x00($out)
+	vmovdqu32	$xb0,0x40($out)
+	vmovdqu32	$xc0,0x80($out)
+	vmovdqu32	$xd0,0xc0($out)
+
+	vpxord		0x100($inp),$xa1,$xa1
+	vpxord		0x140($inp),$xb1,$xb1
+	vpxord		0x180($inp),$xc1,$xc1
+	vpxord		0x1c0($inp),$xd1,$xd1
+	vmovdqu32	$xa1,0x100($out)
+	vmovdqu32	$xb1,0x140($out)
+	vmovdqu32	$xc1,0x180($out)
+	vmovdqu32	$xd1,0x1c0($out)
+
+	vpxord		0x200($inp),$xa2,$xa2
+	vpxord		0x240($inp),$xb2,$xb2
+	vpxord		0x280($inp),$xc2,$xc2
+	vpxord		0x2c0($inp),$xd2,$xd2
+	vmovdqu32	$xa2,0x200($out)
+	vmovdqu32	$xb2,0x240($out)
+	vmovdqu32	$xc2,0x280($out)
+	vmovdqu32	$xd2,0x2c0($out)
+
+	vpxord		0x300($inp),$xa3,$xa3
+	vpxord		0x340($inp),$xb3,$xb3
+	vpxord		0x380($inp),$xc3,$xc3
+	vpxord		0x3c0($inp),$xd3,$xd3
+	lea		0x400($inp),$inp
+	vmovdqu32	$xa3,0x300($out)
+	vmovdqu32	$xb3,0x340($out)
+	vmovdqu32	$xc3,0x380($out)
+	vmovdqu32	$xd3,0x3c0($out)
+	lea		0x400($out),$out
+
+	sub		\$64*16,$len
+	jnz		.Loop_outer16x
+
+	jmp		.Ldone16x
+
+.align	32
+.Ltail16x:
+	xor		%r10,%r10
+	sub		$inp,$out
+	cmp		\$64*1,$len
+	jb		.Less_than_64_16x
+	vpxord		($inp),$xa0,$xa0	# xor with input
+	vmovdqu32	$xa0,($out,$inp)
+	je		.Ldone16x
+	vmovdqa32	$xb0,$xa0
+	lea		64($inp),$inp
+
+	cmp		\$64*2,$len
+	jb		.Less_than_64_16x
+	vpxord		($inp),$xb0,$xb0
+	vmovdqu32	$xb0,($out,$inp)
+	je		.Ldone16x
+	vmovdqa32	$xc0,$xa0
+	lea		64($inp),$inp
+
+	cmp		\$64*3,$len
+	jb		.Less_than_64_16x
+	vpxord		($inp),$xc0,$xc0
+	vmovdqu32	$xc0,($out,$inp)
+	je		.Ldone16x
+	vmovdqa32	$xd0,$xa0
+	lea		64($inp),$inp
+
+	cmp		\$64*4,$len
+	jb		.Less_than_64_16x
+	vpxord		($inp),$xd0,$xd0
+	vmovdqu32	$xd0,($out,$inp)
+	je		.Ldone16x
+	vmovdqa32	$xa1,$xa0
+	lea		64($inp),$inp
+
+	cmp		\$64*5,$len
+	jb		.Less_than_64_16x
+	vpxord		($inp),$xa1,$xa1
+	vmovdqu32	$xa1,($out,$inp)
+	je		.Ldone16x
+	vmovdqa32	$xb1,$xa0
+	lea		64($inp),$inp
+
+	cmp		\$64*6,$len
+	jb		.Less_than_64_16x
+	vpxord		($inp),$xb1,$xb1
+	vmovdqu32	$xb1,($out,$inp)
+	je		.Ldone16x
+	vmovdqa32	$xc1,$xa0
+	lea		64($inp),$inp
+
+	cmp		\$64*7,$len
+	jb		.Less_than_64_16x
+	vpxord		($inp),$xc1,$xc1
+	vmovdqu32	$xc1,($out,$inp)
+	je		.Ldone16x
+	vmovdqa32	$xd1,$xa0
+	lea		64($inp),$inp
+
+	cmp		\$64*8,$len
+	jb		.Less_than_64_16x
+	vpxord		($inp),$xd1,$xd1
+	vmovdqu32	$xd1,($out,$inp)
+	je		.Ldone16x
+	vmovdqa32	$xa2,$xa0
+	lea		64($inp),$inp
+
+	cmp		\$64*9,$len
+	jb		.Less_than_64_16x
+	vpxord		($inp),$xa2,$xa2
+	vmovdqu32	$xa2,($out,$inp)
+	je		.Ldone16x
+	vmovdqa32	$xb2,$xa0
+	lea		64($inp),$inp
+
+	cmp		\$64*10,$len
+	jb		.Less_than_64_16x
+	vpxord		($inp),$xb2,$xb2
+	vmovdqu32	$xb2,($out,$inp)
+	je		.Ldone16x
+	vmovdqa32	$xc2,$xa0
+	lea		64($inp),$inp
+
+	cmp		\$64*11,$len
+	jb		.Less_than_64_16x
+	vpxord		($inp),$xc2,$xc2
+	vmovdqu32	$xc2,($out,$inp)
+	je		.Ldone16x
+	vmovdqa32	$xd2,$xa0
+	lea		64($inp),$inp
+
+	cmp		\$64*12,$len
+	jb		.Less_than_64_16x
+	vpxord		($inp),$xd2,$xd2
+	vmovdqu32	$xd2,($out,$inp)
+	je		.Ldone16x
+	vmovdqa32	$xa3,$xa0
+	lea		64($inp),$inp
+
+	cmp		\$64*13,$len
+	jb		.Less_than_64_16x
+	vpxord		($inp),$xa3,$xa3
+	vmovdqu32	$xa3,($out,$inp)
+	je		.Ldone16x
+	vmovdqa32	$xb3,$xa0
+	lea		64($inp),$inp
+
+	cmp		\$64*14,$len
+	jb		.Less_than_64_16x
+	vpxord		($inp),$xb3,$xb3
+	vmovdqu32	$xb3,($out,$inp)
+	je		.Ldone16x
+	vmovdqa32	$xc3,$xa0
+	lea		64($inp),$inp
+
+	cmp		\$64*15,$len
+	jb		.Less_than_64_16x
+	vpxord		($inp),$xc3,$xc3
+	vmovdqu32	$xc3,($out,$inp)
+	je		.Ldone16x
+	vmovdqa32	$xd3,$xa0
+	lea		64($inp),$inp
+
+.Less_than_64_16x:
+	vmovdqa32	$xa0,0x00(%rsp)
+	lea		($out,$inp),$out
+	and		\$63,$len
+
+.Loop_tail16x:
+	movzb		($inp,%r10),%eax
+	movzb		(%rsp,%r10),%ecx
+	lea		1(%r10),%r10
+	xor		%ecx,%eax
+	mov		%al,-1($out,%r10)
+	dec		$len
+	jnz		.Loop_tail16x
+
+	vpxord		$xa0,$xa0,$xa0
+	vmovdqa32	$xa0,0(%rsp)
+
+.Ldone16x:
+	vzeroall
+___
+$code.=<<___	if ($win64);
+	movaps		-0xa8(%r9),%xmm6
+	movaps		-0x98(%r9),%xmm7
+	movaps		-0x88(%r9),%xmm8
+	movaps		-0x78(%r9),%xmm9
+	movaps		-0x68(%r9),%xmm10
+	movaps		-0x58(%r9),%xmm11
+	movaps		-0x48(%r9),%xmm12
+	movaps		-0x38(%r9),%xmm13
+	movaps		-0x28(%r9),%xmm14
+	movaps		-0x18(%r9),%xmm15
+___
+$code.=<<___;
+	lea		(%r9),%rsp
+.cfi_def_cfa_register	%rsp
+.L16x_epilogue:
+	ret
+.cfi_endproc
+.size	ChaCha20_16x,.-ChaCha20_16x
+___
+
+# switch to %ymm domain
+($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
+ $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3)=map("%ymm$_",(0..15));
+@xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
+     $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3);
+@key=map("%ymm$_",(16..31));
+($xt0,$xt1,$xt2,$xt3)=@key[0..3];
+
+$code.=<<___;
+.type	ChaCha20_8xvl,\@function,5
+.align	32
+ChaCha20_8xvl:
+.cfi_startproc
+.LChaCha20_8xvl:
+	mov		%rsp,%r9		# frame register
+.cfi_def_cfa_register	%r9
+	sub		\$64+$xframe,%rsp
+	and		\$-64,%rsp
+___
+$code.=<<___	if ($win64);
+	movaps		%xmm6,-0xa8(%r9)
+	movaps		%xmm7,-0x98(%r9)
+	movaps		%xmm8,-0x88(%r9)
+	movaps		%xmm9,-0x78(%r9)
+	movaps		%xmm10,-0x68(%r9)
+	movaps		%xmm11,-0x58(%r9)
+	movaps		%xmm12,-0x48(%r9)
+	movaps		%xmm13,-0x38(%r9)
+	movaps		%xmm14,-0x28(%r9)
+	movaps		%xmm15,-0x18(%r9)
+.L8xvl_body:
+___
+$code.=<<___;
+	vzeroupper
+
+	lea		.Lsigma(%rip),%r10
+	vbroadcasti128	(%r10),$xa3		# key[0]
+	vbroadcasti128	($key),$xb3		# key[1]
+	vbroadcasti128	16($key),$xc3		# key[2]
+	vbroadcasti128	($counter),$xd3		# key[3]
+
+	vpshufd		\$0x00,$xa3,$xa0	# smash key by lanes...
+	vpshufd		\$0x55,$xa3,$xa1
+	vpshufd		\$0xaa,$xa3,$xa2
+	vpshufd		\$0xff,$xa3,$xa3
+	vmovdqa64	$xa0,@key[0]
+	vmovdqa64	$xa1,@key[1]
+	vmovdqa64	$xa2,@key[2]
+	vmovdqa64	$xa3,@key[3]
+
+	vpshufd		\$0x00,$xb3,$xb0
+	vpshufd		\$0x55,$xb3,$xb1
+	vpshufd		\$0xaa,$xb3,$xb2
+	vpshufd		\$0xff,$xb3,$xb3
+	vmovdqa64	$xb0,@key[4]
+	vmovdqa64	$xb1,@key[5]
+	vmovdqa64	$xb2,@key[6]
+	vmovdqa64	$xb3,@key[7]
+
+	vpshufd		\$0x00,$xc3,$xc0
+	vpshufd		\$0x55,$xc3,$xc1
+	vpshufd		\$0xaa,$xc3,$xc2
+	vpshufd		\$0xff,$xc3,$xc3
+	vmovdqa64	$xc0,@key[8]
+	vmovdqa64	$xc1,@key[9]
+	vmovdqa64	$xc2,@key[10]
+	vmovdqa64	$xc3,@key[11]
+
+	vpshufd		\$0x00,$xd3,$xd0
+	vpshufd		\$0x55,$xd3,$xd1
+	vpshufd		\$0xaa,$xd3,$xd2
+	vpshufd		\$0xff,$xd3,$xd3
+	vpaddd		.Lincy(%rip),$xd0,$xd0	# don't save counters yet
+	vmovdqa64	$xd0,@key[12]
+	vmovdqa64	$xd1,@key[13]
+	vmovdqa64	$xd2,@key[14]
+	vmovdqa64	$xd3,@key[15]
+
+	mov		\$10,%eax
+	jmp		.Loop8xvl
+
+.align	32
+.Loop_outer8xvl:
+	#vpbroadcastd	0(%r10),$xa0		# reload key
+	#vpbroadcastd	4(%r10),$xa1
+	vpbroadcastd	8(%r10),$xa2
+	vpbroadcastd	12(%r10),$xa3
+	vpaddd		.Leight(%rip),@key[12],@key[12]	# next SIMD counters
+	vmovdqa64	@key[4],$xb0
+	vmovdqa64	@key[5],$xb1
+	vmovdqa64	@key[6],$xb2
+	vmovdqa64	@key[7],$xb3
+	vmovdqa64	@key[8],$xc0
+	vmovdqa64	@key[9],$xc1
+	vmovdqa64	@key[10],$xc2
+	vmovdqa64	@key[11],$xc3
+	vmovdqa64	@key[12],$xd0
+	vmovdqa64	@key[13],$xd1
+	vmovdqa64	@key[14],$xd2
+	vmovdqa64	@key[15],$xd3
+
+	vmovdqa64	$xa0,@key[0]
+	vmovdqa64	$xa1,@key[1]
+	vmovdqa64	$xa2,@key[2]
+	vmovdqa64	$xa3,@key[3]
+
+	mov		\$10,%eax
+	jmp		.Loop8xvl
+
+.align	32
+.Loop8xvl:
+___
+	foreach (&AVX512_lane_ROUND(0, 4, 8,12)) { eval; }
+	foreach (&AVX512_lane_ROUND(0, 5,10,15)) { eval; }
+$code.=<<___;
+	dec		%eax
+	jnz		.Loop8xvl
+
+	vpaddd		@key[0],$xa0,$xa0	# accumulate key
+	vpaddd		@key[1],$xa1,$xa1
+	vpaddd		@key[2],$xa2,$xa2
+	vpaddd		@key[3],$xa3,$xa3
+
+	vpunpckldq	$xa1,$xa0,$xt2		# "de-interlace" data
+	vpunpckldq	$xa3,$xa2,$xt3
+	vpunpckhdq	$xa1,$xa0,$xa0
+	vpunpckhdq	$xa3,$xa2,$xa2
+	vpunpcklqdq	$xt3,$xt2,$xa1		# "a0"
+	vpunpckhqdq	$xt3,$xt2,$xt2		# "a1"
+	vpunpcklqdq	$xa2,$xa0,$xa3		# "a2"
+	vpunpckhqdq	$xa2,$xa0,$xa0		# "a3"
+___
+	($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
+$code.=<<___;
+	vpaddd		@key[4],$xb0,$xb0
+	vpaddd		@key[5],$xb1,$xb1
+	vpaddd		@key[6],$xb2,$xb2
+	vpaddd		@key[7],$xb3,$xb3
+
+	vpunpckldq	$xb1,$xb0,$xt2
+	vpunpckldq	$xb3,$xb2,$xt3
+	vpunpckhdq	$xb1,$xb0,$xb0
+	vpunpckhdq	$xb3,$xb2,$xb2
+	vpunpcklqdq	$xt3,$xt2,$xb1		# "b0"
+	vpunpckhqdq	$xt3,$xt2,$xt2		# "b1"
+	vpunpcklqdq	$xb2,$xb0,$xb3		# "b2"
+	vpunpckhqdq	$xb2,$xb0,$xb0		# "b3"
+___
+	($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
+$code.=<<___;
+	vshufi32x4	\$0,$xb0,$xa0,$xt3	# "de-interlace" further
+	vshufi32x4	\$3,$xb0,$xa0,$xb0
+	vshufi32x4	\$0,$xb1,$xa1,$xa0
+	vshufi32x4	\$3,$xb1,$xa1,$xb1
+	vshufi32x4	\$0,$xb2,$xa2,$xa1
+	vshufi32x4	\$3,$xb2,$xa2,$xb2
+	vshufi32x4	\$0,$xb3,$xa3,$xa2
+	vshufi32x4	\$3,$xb3,$xa3,$xb3
+___
+	($xa0,$xa1,$xa2,$xa3,$xt3)=($xt3,$xa0,$xa1,$xa2,$xa3);
+$code.=<<___;
+	vpaddd		@key[8],$xc0,$xc0
+	vpaddd		@key[9],$xc1,$xc1
+	vpaddd		@key[10],$xc2,$xc2
+	vpaddd		@key[11],$xc3,$xc3
+
+	vpunpckldq	$xc1,$xc0,$xt2
+	vpunpckldq	$xc3,$xc2,$xt3
+	vpunpckhdq	$xc1,$xc0,$xc0
+	vpunpckhdq	$xc3,$xc2,$xc2
+	vpunpcklqdq	$xt3,$xt2,$xc1		# "c0"
+	vpunpckhqdq	$xt3,$xt2,$xt2		# "c1"
+	vpunpcklqdq	$xc2,$xc0,$xc3		# "c2"
+	vpunpckhqdq	$xc2,$xc0,$xc0		# "c3"
+___
+	($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
+$code.=<<___;
+	vpaddd		@key[12],$xd0,$xd0
+	vpaddd		@key[13],$xd1,$xd1
+	vpaddd		@key[14],$xd2,$xd2
+	vpaddd		@key[15],$xd3,$xd3
+
+	vpunpckldq	$xd1,$xd0,$xt2
+	vpunpckldq	$xd3,$xd2,$xt3
+	vpunpckhdq	$xd1,$xd0,$xd0
+	vpunpckhdq	$xd3,$xd2,$xd2
+	vpunpcklqdq	$xt3,$xt2,$xd1		# "d0"
+	vpunpckhqdq	$xt3,$xt2,$xt2		# "d1"
+	vpunpcklqdq	$xd2,$xd0,$xd3		# "d2"
+	vpunpckhqdq	$xd2,$xd0,$xd0		# "d3"
+___
+	($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
+$code.=<<___;
+	vperm2i128	\$0x20,$xd0,$xc0,$xt3	# "de-interlace" further
+	vperm2i128	\$0x31,$xd0,$xc0,$xd0
+	vperm2i128	\$0x20,$xd1,$xc1,$xc0
+	vperm2i128	\$0x31,$xd1,$xc1,$xd1
+	vperm2i128	\$0x20,$xd2,$xc2,$xc1
+	vperm2i128	\$0x31,$xd2,$xc2,$xd2
+	vperm2i128	\$0x20,$xd3,$xc3,$xc2
+	vperm2i128	\$0x31,$xd3,$xc3,$xd3
+___
+	($xc0,$xc1,$xc2,$xc3,$xt3)=($xt3,$xc0,$xc1,$xc2,$xc3);
+	($xb0,$xb1,$xb2,$xb3,$xc0,$xc1,$xc2,$xc3)=
+	($xc0,$xc1,$xc2,$xc3,$xb0,$xb1,$xb2,$xb3);
+$code.=<<___;
+	cmp		\$64*8,$len
+	jb		.Ltail8xvl
+
+	mov		\$0x80,%eax		# size optimization
+	vpxord		0x00($inp),$xa0,$xa0	# xor with input
+	vpxor		0x20($inp),$xb0,$xb0
+	vpxor		0x40($inp),$xc0,$xc0
+	vpxor		0x60($inp),$xd0,$xd0
+	lea		($inp,%rax),$inp	# size optimization
+	vmovdqu32	$xa0,0x00($out)
+	vmovdqu		$xb0,0x20($out)
+	vmovdqu		$xc0,0x40($out)
+	vmovdqu		$xd0,0x60($out)
+	lea		($out,%rax),$out	# size optimization
+
+	vpxor		0x00($inp),$xa1,$xa1
+	vpxor		0x20($inp),$xb1,$xb1
+	vpxor		0x40($inp),$xc1,$xc1
+	vpxor		0x60($inp),$xd1,$xd1
+	lea		($inp,%rax),$inp	# size optimization
+	vmovdqu		$xa1,0x00($out)
+	vmovdqu		$xb1,0x20($out)
+	vmovdqu		$xc1,0x40($out)
+	vmovdqu		$xd1,0x60($out)
+	lea		($out,%rax),$out	# size optimization
+
+	vpxord		0x00($inp),$xa2,$xa2
+	vpxor		0x20($inp),$xb2,$xb2
+	vpxor		0x40($inp),$xc2,$xc2
+	vpxor		0x60($inp),$xd2,$xd2
+	lea		($inp,%rax),$inp	# size optimization
+	vmovdqu32	$xa2,0x00($out)
+	vmovdqu		$xb2,0x20($out)
+	vmovdqu		$xc2,0x40($out)
+	vmovdqu		$xd2,0x60($out)
+	lea		($out,%rax),$out	# size optimization
+
+	vpxor		0x00($inp),$xa3,$xa3
+	vpxor		0x20($inp),$xb3,$xb3
+	vpxor		0x40($inp),$xc3,$xc3
+	vpxor		0x60($inp),$xd3,$xd3
+	lea		($inp,%rax),$inp	# size optimization
+	vmovdqu		$xa3,0x00($out)
+	vmovdqu		$xb3,0x20($out)
+	vmovdqu		$xc3,0x40($out)
+	vmovdqu		$xd3,0x60($out)
+	lea		($out,%rax),$out	# size optimization
+
+	vpbroadcastd	0(%r10),%ymm0		# reload key
+	vpbroadcastd	4(%r10),%ymm1
+
+	sub		\$64*8,$len
+	jnz		.Loop_outer8xvl
+
+	jmp		.Ldone8xvl
+
+.align	32
+.Ltail8xvl:
+	vmovdqa64	$xa0,%ymm8		# size optimization
+___
+$xa0 = "%ymm8";
+$code.=<<___;
+	xor		%r10,%r10
+	sub		$inp,$out
+	cmp		\$64*1,$len
+	jb		.Less_than_64_8xvl
+	vpxor		0x00($inp),$xa0,$xa0	# xor with input
+	vpxor		0x20($inp),$xb0,$xb0
+	vmovdqu		$xa0,0x00($out,$inp)
+	vmovdqu		$xb0,0x20($out,$inp)
+	je		.Ldone8xvl
+	vmovdqa		$xc0,$xa0
+	vmovdqa		$xd0,$xb0
+	lea		64($inp),$inp
+
+	cmp		\$64*2,$len
+	jb		.Less_than_64_8xvl
+	vpxor		0x00($inp),$xc0,$xc0
+	vpxor		0x20($inp),$xd0,$xd0
+	vmovdqu		$xc0,0x00($out,$inp)
+	vmovdqu		$xd0,0x20($out,$inp)
+	je		.Ldone8xvl
+	vmovdqa		$xa1,$xa0
+	vmovdqa		$xb1,$xb0
+	lea		64($inp),$inp
+
+	cmp		\$64*3,$len
+	jb		.Less_than_64_8xvl
+	vpxor		0x00($inp),$xa1,$xa1
+	vpxor		0x20($inp),$xb1,$xb1
+	vmovdqu		$xa1,0x00($out,$inp)
+	vmovdqu		$xb1,0x20($out,$inp)
+	je		.Ldone8xvl
+	vmovdqa		$xc1,$xa0
+	vmovdqa		$xd1,$xb0
+	lea		64($inp),$inp
+
+	cmp		\$64*4,$len
+	jb		.Less_than_64_8xvl
+	vpxor		0x00($inp),$xc1,$xc1
+	vpxor		0x20($inp),$xd1,$xd1
+	vmovdqu		$xc1,0x00($out,$inp)
+	vmovdqu		$xd1,0x20($out,$inp)
+	je		.Ldone8xvl
+	vmovdqa32	$xa2,$xa0
+	vmovdqa		$xb2,$xb0
+	lea		64($inp),$inp
+
+	cmp		\$64*5,$len
+	jb		.Less_than_64_8xvl
+	vpxord		0x00($inp),$xa2,$xa2
+	vpxor		0x20($inp),$xb2,$xb2
+	vmovdqu32	$xa2,0x00($out,$inp)
+	vmovdqu		$xb2,0x20($out,$inp)
+	je		.Ldone8xvl
+	vmovdqa		$xc2,$xa0
+	vmovdqa		$xd2,$xb0
+	lea		64($inp),$inp
+
+	cmp		\$64*6,$len
+	jb		.Less_than_64_8xvl
+	vpxor		0x00($inp),$xc2,$xc2
+	vpxor		0x20($inp),$xd2,$xd2
+	vmovdqu		$xc2,0x00($out,$inp)
+	vmovdqu		$xd2,0x20($out,$inp)
+	je		.Ldone8xvl
+	vmovdqa		$xa3,$xa0
+	vmovdqa		$xb3,$xb0
+	lea		64($inp),$inp
+
+	cmp		\$64*7,$len
+	jb		.Less_than_64_8xvl
+	vpxor		0x00($inp),$xa3,$xa3
+	vpxor		0x20($inp),$xb3,$xb3
+	vmovdqu		$xa3,0x00($out,$inp)
+	vmovdqu		$xb3,0x20($out,$inp)
+	je		.Ldone8xvl
+	vmovdqa		$xc3,$xa0
+	vmovdqa		$xd3,$xb0
+	lea		64($inp),$inp
+
+.Less_than_64_8xvl:
+	vmovdqa		$xa0,0x00(%rsp)
+	vmovdqa		$xb0,0x20(%rsp)
+	lea		($out,$inp),$out
+	and		\$63,$len
+
+.Loop_tail8xvl:
+	movzb		($inp,%r10),%eax
+	movzb		(%rsp,%r10),%ecx
+	lea		1(%r10),%r10
+	xor		%ecx,%eax
+	mov		%al,-1($out,%r10)
+	dec		$len
+	jnz		.Loop_tail8xvl
+
+	vpxor		$xa0,$xa0,$xa0
+	vmovdqa		$xa0,0x00(%rsp)
+	vmovdqa		$xa0,0x20(%rsp)
+
+.Ldone8xvl:
+	vzeroall
+___
+$code.=<<___	if ($win64);
+	movaps		-0xa8(%r9),%xmm6
+	movaps		-0x98(%r9),%xmm7
+	movaps		-0x88(%r9),%xmm8
+	movaps		-0x78(%r9),%xmm9
+	movaps		-0x68(%r9),%xmm10
+	movaps		-0x58(%r9),%xmm11
+	movaps		-0x48(%r9),%xmm12
+	movaps		-0x38(%r9),%xmm13
+	movaps		-0x28(%r9),%xmm14
+	movaps		-0x18(%r9),%xmm15
+___
+$code.=<<___;
+	lea		(%r9),%rsp
+.cfi_def_cfa_register	%rsp
+.L8xvl_epilogue:
+	ret
+.cfi_endproc
+.size	ChaCha20_8xvl,.-ChaCha20_8xvl
+___
+}
+
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+#		CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64) {
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern	__imp_RtlVirtualUnwind
+.type	se_handler,\@abi-omnipotent
+.align	16
+se_handler:
+	push	%rsi
+	push	%rdi
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	pushfq
+	sub	\$64,%rsp
+
+	mov	120($context),%rax	# pull context->Rax
+	mov	248($context),%rbx	# pull context->Rip
+
+	mov	8($disp),%rsi		# disp->ImageBase
+	mov	56($disp),%r11		# disp->HandlerData
+
+	lea	.Lctr32_body(%rip),%r10
+	cmp	%r10,%rbx		# context->Rip<.Lprologue
+	jb	.Lcommon_seh_tail
+
+	mov	152($context),%rax	# pull context->Rsp
+
+	lea	.Lno_data(%rip),%r10	# epilogue label
+	cmp	%r10,%rbx		# context->Rip>=.Lepilogue
+	jae	.Lcommon_seh_tail
+
+	lea	64+24+48(%rax),%rax
+
+	mov	-8(%rax),%rbx
+	mov	-16(%rax),%rbp
+	mov	-24(%rax),%r12
+	mov	-32(%rax),%r13
+	mov	-40(%rax),%r14
+	mov	-48(%rax),%r15
+	mov	%rbx,144($context)	# restore context->Rbx
+	mov	%rbp,160($context)	# restore context->Rbp
+	mov	%r12,216($context)	# restore context->R12
+	mov	%r13,224($context)	# restore context->R13
+	mov	%r14,232($context)	# restore context->R14
+	mov	%r15,240($context)	# restore context->R14
+
+.Lcommon_seh_tail:
+	mov	8(%rax),%rdi
+	mov	16(%rax),%rsi
+	mov	%rax,152($context)	# restore context->Rsp
+	mov	%rsi,168($context)	# restore context->Rsi
+	mov	%rdi,176($context)	# restore context->Rdi
+
+	mov	40($disp),%rdi		# disp->ContextRecord
+	mov	$context,%rsi		# context
+	mov	\$154,%ecx		# sizeof(CONTEXT)
+	.long	0xa548f3fc		# cld; rep movsq
+
+	mov	$disp,%rsi
+	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
+	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
+	mov	0(%rsi),%r8		# arg3, disp->ControlPc
+	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
+	mov	40(%rsi),%r10		# disp->ContextRecord
+	lea	56(%rsi),%r11		# &disp->HandlerData
+	lea	24(%rsi),%r12		# &disp->EstablisherFrame
+	mov	%r10,32(%rsp)		# arg5
+	mov	%r11,40(%rsp)		# arg6
+	mov	%r12,48(%rsp)		# arg7
+	mov	%rcx,56(%rsp)		# arg8, (NULL)
+	call	*__imp_RtlVirtualUnwind(%rip)
+
+	mov	\$1,%eax		# ExceptionContinueSearch
+	add	\$64,%rsp
+	popfq
+	pop	%r15
+	pop	%r14
+	pop	%r13
+	pop	%r12
+	pop	%rbp
+	pop	%rbx
+	pop	%rdi
+	pop	%rsi
+	ret
+.size	se_handler,.-se_handler
+
+.type	simd_handler,\@abi-omnipotent
+.align	16
+simd_handler:
+	push	%rsi
+	push	%rdi
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	pushfq
+	sub	\$64,%rsp
+
+	mov	120($context),%rax	# pull context->Rax
+	mov	248($context),%rbx	# pull context->Rip
+
+	mov	8($disp),%rsi		# disp->ImageBase
+	mov	56($disp),%r11		# disp->HandlerData
+
+	mov	0(%r11),%r10d		# HandlerData[0]
+	lea	(%rsi,%r10),%r10	# prologue label
+	cmp	%r10,%rbx		# context->Rip<prologue label
+	jb	.Lcommon_seh_tail
+
+	mov	192($context),%rax	# pull context->R9
+
+	mov	4(%r11),%r10d		# HandlerData[1]
+	mov	8(%r11),%ecx		# HandlerData[2]
+	lea	(%rsi,%r10),%r10	# epilogue label
+	cmp	%r10,%rbx		# context->Rip>=epilogue label
+	jae	.Lcommon_seh_tail
+
+	neg	%rcx
+	lea	-8(%rax,%rcx),%rsi
+	lea	512($context),%rdi	# &context.Xmm6
+	neg	%ecx
+	shr	\$3,%ecx
+	.long	0xa548f3fc		# cld; rep movsq
+
+	jmp	.Lcommon_seh_tail
+.size	simd_handler,.-simd_handler
+
+.section	.pdata
+.align	4
+	.rva	.LSEH_begin_ChaCha20_ctr32
+	.rva	.LSEH_end_ChaCha20_ctr32
+	.rva	.LSEH_info_ChaCha20_ctr32
+
+	.rva	.LSEH_begin_ChaCha20_ssse3
+	.rva	.LSEH_end_ChaCha20_ssse3
+	.rva	.LSEH_info_ChaCha20_ssse3
+
+	.rva	.LSEH_begin_ChaCha20_128
+	.rva	.LSEH_end_ChaCha20_128
+	.rva	.LSEH_info_ChaCha20_128
+
+	.rva	.LSEH_begin_ChaCha20_4x
+	.rva	.LSEH_end_ChaCha20_4x
+	.rva	.LSEH_info_ChaCha20_4x
+___
+$code.=<<___ if ($avx);
+	.rva	.LSEH_begin_ChaCha20_4xop
+	.rva	.LSEH_end_ChaCha20_4xop
+	.rva	.LSEH_info_ChaCha20_4xop
+___
+$code.=<<___ if ($avx>1);
+	.rva	.LSEH_begin_ChaCha20_8x
+	.rva	.LSEH_end_ChaCha20_8x
+	.rva	.LSEH_info_ChaCha20_8x
+___
+$code.=<<___ if ($avx>2);
+	.rva	.LSEH_begin_ChaCha20_avx512
+	.rva	.LSEH_end_ChaCha20_avx512
+	.rva	.LSEH_info_ChaCha20_avx512
+
+	.rva	.LSEH_begin_ChaCha20_avx512vl
+	.rva	.LSEH_end_ChaCha20_avx512vl
+	.rva	.LSEH_info_ChaCha20_avx512vl
+
+	.rva	.LSEH_begin_ChaCha20_16x
+	.rva	.LSEH_end_ChaCha20_16x
+	.rva	.LSEH_info_ChaCha20_16x
+
+	.rva	.LSEH_begin_ChaCha20_8xvl
+	.rva	.LSEH_end_ChaCha20_8xvl
+	.rva	.LSEH_info_ChaCha20_8xvl
+___
+$code.=<<___;
+.section	.xdata
+.align	8
+.LSEH_info_ChaCha20_ctr32:
+	.byte	9,0,0,0
+	.rva	se_handler
+
+.LSEH_info_ChaCha20_ssse3:
+	.byte	9,0,0,0
+	.rva	simd_handler
+	.rva	.Lssse3_body,.Lssse3_epilogue
+	.long	0x20,0
+
+.LSEH_info_ChaCha20_128:
+	.byte	9,0,0,0
+	.rva	simd_handler
+	.rva	.L128_body,.L128_epilogue
+	.long	0x60,0
+
+.LSEH_info_ChaCha20_4x:
+	.byte	9,0,0,0
+	.rva	simd_handler
+	.rva	.L4x_body,.L4x_epilogue
+	.long	0xa0,0
+___
+$code.=<<___ if ($avx);
+.LSEH_info_ChaCha20_4xop:
+	.byte	9,0,0,0
+	.rva	simd_handler
+	.rva	.L4xop_body,.L4xop_epilogue		# HandlerData[]
+	.long	0xa0,0
+___
+$code.=<<___ if ($avx>1);
+.LSEH_info_ChaCha20_8x:
+	.byte	9,0,0,0
+	.rva	simd_handler
+	.rva	.L8x_body,.L8x_epilogue			# HandlerData[]
+	.long	0xa0,0
+___
+$code.=<<___ if ($avx>2);
+.LSEH_info_ChaCha20_avx512:
+	.byte	9,0,0,0
+	.rva	simd_handler
+	.rva	.Lavx512_body,.Lavx512_epilogue		# HandlerData[]
+	.long	0x20,0
+
+.LSEH_info_ChaCha20_avx512vl:
+	.byte	9,0,0,0
+	.rva	simd_handler
+	.rva	.Lavx512vl_body,.Lavx512vl_epilogue	# HandlerData[]
+	.long	0x20,0
+
+.LSEH_info_ChaCha20_16x:
+	.byte	9,0,0,0
+	.rva	simd_handler
+	.rva	.L16x_body,.L16x_epilogue		# HandlerData[]
+	.long	0xa0,0
+
+.LSEH_info_ChaCha20_8xvl:
+	.byte	9,0,0,0
+	.rva	simd_handler
+	.rva	.L8xvl_body,.L8xvl_epilogue		# HandlerData[]
+	.long	0xa0,0
+___
+}
+
+foreach (split("\n",$code)) {
+	s/\`([^\`]*)\`/eval $1/ge;
+
+	s/%x#%[yz]/%x/g;	# "down-shift"
+
+	print $_,"\n";
+}
+
+close STDOUT or die "error closing STDOUT: $!";
diff --git a/ap/lib/libssl/openssl-1.1.1o/crypto/chacha/build.info b/ap/lib/libssl/openssl-1.1.1o/crypto/chacha/build.info
new file mode 100644
index 0000000..e75ca72
--- /dev/null
+++ b/ap/lib/libssl/openssl-1.1.1o/crypto/chacha/build.info
@@ -0,0 +1,20 @@
+LIBS=../../libcrypto
+SOURCE[../../libcrypto]={- $target{chacha_asm_src} -}
+
+GENERATE[chacha-x86.s]=asm/chacha-x86.pl \
+        $(PERLASM_SCHEME) $(LIB_CFLAGS) $(LIB_CPPFLAGS) $(PROCESSOR)
+GENERATE[chacha-x86_64.s]=asm/chacha-x86_64.pl $(PERLASM_SCHEME)
+GENERATE[chacha-ppc.s]=asm/chacha-ppc.pl $(PERLASM_SCHEME)
+GENERATE[chacha-armv4.S]=asm/chacha-armv4.pl $(PERLASM_SCHEME)
+INCLUDE[chacha-armv4.o]=..
+GENERATE[chacha-armv8.S]=asm/chacha-armv8.pl $(PERLASM_SCHEME)
+INCLUDE[chacha-armv8.o]=..
+GENERATE[chacha-s390x.S]=asm/chacha-s390x.pl $(PERLASM_SCHEME)
+INCLUDE[chacha-s390x.o]=..
+
+BEGINRAW[Makefile(unix)]
+##### CHACHA assembler implementations
+
+{- $builddir -}/chacha-%.S:	{- $sourcedir -}/asm/chacha-%.pl
+	CC="$(CC)" $(PERL) $< $(PERLASM_SCHEME) $@
+ENDRAW[Makefile(unix)]
diff --git a/ap/lib/libssl/openssl-1.1.1o/crypto/chacha/chacha_enc.c b/ap/lib/libssl/openssl-1.1.1o/crypto/chacha/chacha_enc.c
new file mode 100644
index 0000000..18251ea
--- /dev/null
+++ b/ap/lib/libssl/openssl-1.1.1o/crypto/chacha/chacha_enc.c
@@ -0,0 +1,130 @@
+/*
+ * Copyright 2015-2016 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the OpenSSL license (the "License").  You may not use
+ * this file except in compliance with the License.  You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+/* Adapted from the public domain code by D. Bernstein from SUPERCOP. */
+
+#include <string.h>
+
+#include "crypto/chacha.h"
+#include "crypto/ctype.h"
+
+typedef unsigned int u32;
+typedef unsigned char u8;
+typedef union {
+    u32 u[16];
+    u8 c[64];
+} chacha_buf;
+
+# define ROTATE(v, n) (((v) << (n)) | ((v) >> (32 - (n))))
+
+# define U32TO8_LITTLE(p, v) do { \
+                                (p)[0] = (u8)(v >>  0); \
+                                (p)[1] = (u8)(v >>  8); \
+                                (p)[2] = (u8)(v >> 16); \
+                                (p)[3] = (u8)(v >> 24); \
+                                } while(0)
+
+/* QUARTERROUND updates a, b, c, d with a ChaCha "quarter" round. */
+# define QUARTERROUND(a,b,c,d) ( \
+                x[a] += x[b], x[d] = ROTATE((x[d] ^ x[a]),16), \
+                x[c] += x[d], x[b] = ROTATE((x[b] ^ x[c]),12), \
+                x[a] += x[b], x[d] = ROTATE((x[d] ^ x[a]), 8), \
+                x[c] += x[d], x[b] = ROTATE((x[b] ^ x[c]), 7)  )
+
+/* chacha_core performs 20 rounds of ChaCha on the input words in
+ * |input| and writes the 64 output bytes to |output|. */
+static void chacha20_core(chacha_buf *output, const u32 input[16])
+{
+    u32 x[16];
+    int i;
+    const union {
+        long one;
+        char little;
+    } is_endian = { 1 };
+
+    memcpy(x, input, sizeof(x));
+
+    for (i = 20; i > 0; i -= 2) {
+        QUARTERROUND(0, 4, 8, 12);
+        QUARTERROUND(1, 5, 9, 13);
+        QUARTERROUND(2, 6, 10, 14);
+        QUARTERROUND(3, 7, 11, 15);
+        QUARTERROUND(0, 5, 10, 15);
+        QUARTERROUND(1, 6, 11, 12);
+        QUARTERROUND(2, 7, 8, 13);
+        QUARTERROUND(3, 4, 9, 14);
+    }
+
+    if (is_endian.little) {
+        for (i = 0; i < 16; ++i)
+            output->u[i] = x[i] + input[i];
+    } else {
+        for (i = 0; i < 16; ++i)
+            U32TO8_LITTLE(output->c + 4 * i, (x[i] + input[i]));
+    }
+}
+
+void ChaCha20_ctr32(unsigned char *out, const unsigned char *inp,
+                    size_t len, const unsigned int key[8],
+                    const unsigned int counter[4])
+{
+    u32 input[16];
+    chacha_buf buf;
+    size_t todo, i;
+
+    /* sigma constant "expand 32-byte k" in little-endian encoding */
+    input[0] = ((u32)ossl_toascii('e')) | ((u32)ossl_toascii('x') << 8)
+               | ((u32)ossl_toascii('p') << 16)
+               | ((u32)ossl_toascii('a') << 24);
+    input[1] = ((u32)ossl_toascii('n')) | ((u32)ossl_toascii('d') << 8)
+               | ((u32)ossl_toascii(' ') << 16)
+               | ((u32)ossl_toascii('3') << 24);
+    input[2] = ((u32)ossl_toascii('2')) | ((u32)ossl_toascii('-') << 8)
+               | ((u32)ossl_toascii('b') << 16)
+               | ((u32)ossl_toascii('y') << 24);
+    input[3] = ((u32)ossl_toascii('t')) | ((u32)ossl_toascii('e') << 8)
+               | ((u32)ossl_toascii(' ') << 16)
+               | ((u32)ossl_toascii('k') << 24);
+
+    input[4] = key[0];
+    input[5] = key[1];
+    input[6] = key[2];
+    input[7] = key[3];
+    input[8] = key[4];
+    input[9] = key[5];
+    input[10] = key[6];
+    input[11] = key[7];
+
+    input[12] = counter[0];
+    input[13] = counter[1];
+    input[14] = counter[2];
+    input[15] = counter[3];
+
+    while (len > 0) {
+        todo = sizeof(buf);
+        if (len < todo)
+            todo = len;
+
+        chacha20_core(&buf, input);
+
+        for (i = 0; i < todo; i++)
+            out[i] = inp[i] ^ buf.c[i];
+        out += todo;
+        inp += todo;
+        len -= todo;
+
+        /*
+         * Advance 32-bit counter. Note that as subroutine is so to
+         * say nonce-agnostic, this limited counter width doesn't
+         * prevent caller from implementing wider counter. It would
+         * simply take two calls split on counter overflow...
+         */
+        input[12]++;
+    }
+}