ASR_BASE

Change-Id: Icf3719cc0afe3eeb3edc7fa80a2eb5199ca9dda1
diff --git a/marvell/uboot/lib/sha_neon/Makefile b/marvell/uboot/lib/sha_neon/Makefile
new file mode 100644
index 0000000..22c337e
--- /dev/null
+++ b/marvell/uboot/lib/sha_neon/Makefile
@@ -0,0 +1,14 @@
+#
+# (C) Copyright 2008
+# Stefan Roese, DENX Software Engineering, sr@denx.de.
+#
+# SPDX-License-Identifier:	GPL-2.0+
+#
+
+obj-$(CONFIG_SHA256) += armv7-neon.o
+obj-$(CONFIG_SHA1) += armv7-neon.o
+
+obj-$(CONFIG_SHA256) += sha256-armv7-neon.o
+obj-$(CONFIG_SHA256) += sha256_neon.o
+obj-$(CONFIG_SHA1) += sha1-armv7-neon.o
+obj-$(CONFIG_SHA1) += sha1_neon.o
\ No newline at end of file
diff --git a/marvell/uboot/lib/sha_neon/armv7-neon.S b/marvell/uboot/lib/sha_neon/armv7-neon.S
new file mode 100644
index 0000000..ab13f3e
--- /dev/null
+++ b/marvell/uboot/lib/sha_neon/armv7-neon.S
@@ -0,0 +1,48 @@
+#include <linux/linkage.h>
+
+.text
+.code   32
+.fpu neon
+
+ENTRY(neon_en_check)
+	mrc	p15, 0, r1, c1, c0, 2
+	tst	r1, #(0x3 << 20)
+    beq no_neon
+
+	fmrx	r1, FPEXC
+    tst r1, #(1 << 30)
+    beq no_neon
+
+    mov r0, #1
+    b exit
+
+no_neon:
+    mov r0, #0
+
+exit:
+    mov pc, lr
+ENDPROC(neon_en_check)
+
+ENTRY(neon_enable)
+	/* Enable the the VFP */
+	mrc	p15, 0, r1, c1, c0, 2
+	orr	r1, r1, #(0x3 << 20)
+	mcr	p15, 0, r1, c1, c0, 2
+	isb
+	fmrx	r1, FPEXC
+	orr	r1, r1, #(1 << 30)
+	fmxr	FPEXC, r1
+
+	/* Move back to caller */
+	mov	pc, lr
+ENDPROC(neon_enable)
+
+ENTRY(neon_disable)
+	/* Enable the the VFP */
+	fmrx	r1, FPEXC
+	bic	r1, r1, #(1 << 30)
+	fmxr	FPEXC, r1
+
+	/* Move back to caller */
+	mov	pc, lr
+ENDPROC(neon_disable)
\ No newline at end of file
diff --git a/marvell/uboot/lib/sha_neon/sha1-armv7-neon.S b/marvell/uboot/lib/sha_neon/sha1-armv7-neon.S
new file mode 100644
index 0000000..257377b
--- /dev/null
+++ b/marvell/uboot/lib/sha_neon/sha1-armv7-neon.S
@@ -0,0 +1,634 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* sha1-armv7-neon.S - ARM/NEON accelerated SHA-1 transform function
+ *
+ * Copyright © 2013-2014 Jussi Kivilinna <jussi.kivilinna@iki.fi>
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+.syntax unified
+.fpu neon
+
+.text
+
+
+/* Context structure */
+
+#define state_h0 0
+#define state_h1 4
+#define state_h2 8
+#define state_h3 12
+#define state_h4 16
+
+
+/* Constants */
+
+#define K1  0x5A827999
+#define K2  0x6ED9EBA1
+#define K3  0x8F1BBCDC
+#define K4  0xCA62C1D6
+.align 4
+.LK_VEC:
+.LK1:	.long K1, K1, K1, K1
+.LK2:	.long K2, K2, K2, K2
+.LK3:	.long K3, K3, K3, K3
+.LK4:	.long K4, K4, K4, K4
+
+
+/* Register macros */
+
+#define RSTATE r0
+#define RDATA r1
+#define RNBLKS r2
+#define ROLDSTACK r3
+#define RWK lr
+
+#define _a r4
+#define _b r5
+#define _c r6
+#define _d r7
+#define _e r8
+
+#define RT0 r9
+#define RT1 r10
+#define RT2 r11
+#define RT3 r12
+
+#define W0 q0
+#define W1 q7
+#define W2 q2
+#define W3 q3
+#define W4 q4
+#define W5 q6
+#define W6 q5
+#define W7 q1
+
+#define tmp0 q8
+#define tmp1 q9
+#define tmp2 q10
+#define tmp3 q11
+
+#define qK1 q12
+#define qK2 q13
+#define qK3 q14
+#define qK4 q15
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define ARM_LE(code...)
+#else
+#define ARM_LE(code...)		code
+#endif
+
+/* Round function macros. */
+
+#define WK_offs(i) (((i) & 15) * 4)
+
+#define _R_F1(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
+	      W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	ldr RT3, [sp, WK_offs(i)]; \
+		pre1(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
+	bic RT0, d, b; \
+	add e, e, a, ror #(32 - 5); \
+	and RT1, c, b; \
+		pre2(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
+	add RT0, RT0, RT3; \
+	add e, e, RT1; \
+	ror b, #(32 - 30); \
+		pre3(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
+	add e, e, RT0;
+
+#define _R_F2(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
+	      W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	ldr RT3, [sp, WK_offs(i)]; \
+		pre1(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
+	eor RT0, d, b; \
+	add e, e, a, ror #(32 - 5); \
+	eor RT0, RT0, c; \
+		pre2(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
+	add e, e, RT3; \
+	ror b, #(32 - 30); \
+		pre3(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
+	add e, e, RT0; \
+
+#define _R_F3(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
+	      W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	ldr RT3, [sp, WK_offs(i)]; \
+		pre1(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
+	eor RT0, b, c; \
+	and RT1, b, c; \
+	add e, e, a, ror #(32 - 5); \
+		pre2(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
+	and RT0, RT0, d; \
+	add RT1, RT1, RT3; \
+	add e, e, RT0; \
+	ror b, #(32 - 30); \
+		pre3(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
+	add e, e, RT1;
+
+#define _R_F4(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
+	      W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	_R_F2(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
+	      W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28)
+
+#define _R(a,b,c,d,e,f,i,pre1,pre2,pre3,i16,\
+           W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	_R_##f(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
+	       W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28)
+
+#define R(a,b,c,d,e,f,i) \
+	_R_##f(a,b,c,d,e,i,dummy,dummy,dummy,i16,\
+	       W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28)
+
+#define dummy(...)
+
+
+/* Input expansion macros. */
+
+/********* Precalc macros for rounds 0-15 *************************************/
+
+#define W_PRECALC_00_15() \
+	add       RWK, sp, #(WK_offs(0));			\
+	\
+	vld1.32   {W0, W7}, [RDATA]!;				\
+ ARM_LE(vrev32.8  W0, W0;	)	/* big => little */	\
+	vld1.32   {W6, W5}, [RDATA]!;				\
+	vadd.u32  tmp0, W0, curK;				\
+ ARM_LE(vrev32.8  W7, W7;	)	/* big => little */	\
+ ARM_LE(vrev32.8  W6, W6;	)	/* big => little */	\
+	vadd.u32  tmp1, W7, curK;				\
+ ARM_LE(vrev32.8  W5, W5;	)	/* big => little */	\
+	vadd.u32  tmp2, W6, curK;				\
+	vst1.32   {tmp0, tmp1}, [RWK]!;				\
+	vadd.u32  tmp3, W5, curK;				\
+	vst1.32   {tmp2, tmp3}, [RWK];				\
+
+#define WPRECALC_00_15_0(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	vld1.32   {W0, W7}, [RDATA]!;				\
+
+#define WPRECALC_00_15_1(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	add       RWK, sp, #(WK_offs(0));			\
+
+#define WPRECALC_00_15_2(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+ ARM_LE(vrev32.8  W0, W0;	)	/* big => little */	\
+
+#define WPRECALC_00_15_3(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	vld1.32   {W6, W5}, [RDATA]!;				\
+
+#define WPRECALC_00_15_4(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	vadd.u32  tmp0, W0, curK;				\
+
+#define WPRECALC_00_15_5(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+ ARM_LE(vrev32.8  W7, W7;	)	/* big => little */	\
+
+#define WPRECALC_00_15_6(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+ ARM_LE(vrev32.8  W6, W6;	)	/* big => little */	\
+
+#define WPRECALC_00_15_7(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	vadd.u32  tmp1, W7, curK;				\
+
+#define WPRECALC_00_15_8(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+ ARM_LE(vrev32.8  W5, W5;	)	/* big => little */	\
+
+#define WPRECALC_00_15_9(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	vadd.u32  tmp2, W6, curK;				\
+
+#define WPRECALC_00_15_10(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	vst1.32   {tmp0, tmp1}, [RWK]!;				\
+
+#define WPRECALC_00_15_11(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	vadd.u32  tmp3, W5, curK;				\
+
+#define WPRECALC_00_15_12(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	vst1.32   {tmp2, tmp3}, [RWK];				\
+
+
+/********* Precalc macros for rounds 16-31 ************************************/
+
+#define WPRECALC_16_31_0(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	veor      tmp0, tmp0;			\
+	vext.8    W, W_m16, W_m12, #8;		\
+
+#define WPRECALC_16_31_1(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	add       RWK, sp, #(WK_offs(i));	\
+	vext.8    tmp0, W_m04, tmp0, #4;	\
+
+#define WPRECALC_16_31_2(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	veor      tmp0, tmp0, W_m16;		\
+	veor.32   W, W, W_m08;			\
+
+#define WPRECALC_16_31_3(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	veor      tmp1, tmp1;			\
+	veor      W, W, tmp0;			\
+
+#define WPRECALC_16_31_4(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	vshl.u32  tmp0, W, #1;			\
+
+#define WPRECALC_16_31_5(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	vext.8    tmp1, tmp1, W, #(16-12);	\
+	vshr.u32  W, W, #31;			\
+
+#define WPRECALC_16_31_6(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	vorr      tmp0, tmp0, W;		\
+	vshr.u32  W, tmp1, #30;			\
+
+#define WPRECALC_16_31_7(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	vshl.u32  tmp1, tmp1, #2;		\
+
+#define WPRECALC_16_31_8(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	veor      tmp0, tmp0, W;		\
+
+#define WPRECALC_16_31_9(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	veor      W, tmp0, tmp1;		\
+
+#define WPRECALC_16_31_10(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	vadd.u32  tmp0, W, curK;		\
+
+#define WPRECALC_16_31_11(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	vst1.32   {tmp0}, [RWK];
+
+
+/********* Precalc macros for rounds 32-79 ************************************/
+
+#define WPRECALC_32_79_0(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	veor W, W_m28; \
+
+#define WPRECALC_32_79_1(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	vext.8 tmp0, W_m08, W_m04, #8; \
+
+#define WPRECALC_32_79_2(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	veor W, W_m16; \
+
+#define WPRECALC_32_79_3(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	veor W, tmp0; \
+
+#define WPRECALC_32_79_4(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	add RWK, sp, #(WK_offs(i&~3)); \
+
+#define WPRECALC_32_79_5(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	vshl.u32 tmp1, W, #2; \
+
+#define WPRECALC_32_79_6(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	vshr.u32 tmp0, W, #30; \
+
+#define WPRECALC_32_79_7(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	vorr W, tmp0, tmp1; \
+
+#define WPRECALC_32_79_8(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	vadd.u32 tmp0, W, curK; \
+
+#define WPRECALC_32_79_9(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+	vst1.32 {tmp0}, [RWK];
+
+
+/*
+ * Transform nblks*64 bytes (nblks*16 32-bit words) at DATA.
+ *
+ * unsigned int
+ * sha1_transform_neon (void *ctx, const unsigned char *data,
+ *                      unsigned int nblks)
+ */
+.align 3
+ENTRY(sha1_transform_neon)
+  /* input:
+   *	r0: ctx, CTX
+   *	r1: data (64*nblks bytes)
+   *	r2: nblks
+   */
+
+  cmp RNBLKS, #0;
+  beq .Ldo_nothing;
+
+  stmdb   sp!,{r4-r12,lr}
+  /*vpush {q4-q7};*/
+
+  adr RT3, .LK_VEC;
+
+  mov ROLDSTACK, sp;
+
+  /* Align stack. */
+  sub RT0, sp, #(16*4);
+  and RT0, #(~(16-1));
+  mov sp, RT0;
+
+  vld1.32 {qK1-qK2}, [RT3]!; /* Load K1,K2 */
+
+  /* Get the values of the chaining variables. */
+  ldm RSTATE, {_a-_e};
+
+  vld1.32 {qK3-qK4}, [RT3]; /* Load K3,K4 */
+
+#undef curK
+#define curK qK1
+  /* Precalc 0-15. */
+  W_PRECALC_00_15();
+
+.Loop:
+  /* Transform 0-15 + Precalc 16-31. */
+  _R( _a, _b, _c, _d, _e, F1,  0,
+      WPRECALC_16_31_0, WPRECALC_16_31_1, WPRECALC_16_31_2, 16,
+      W4, W5, W6, W7, W0, _, _, _ );
+  _R( _e, _a, _b, _c, _d, F1,  1,
+      WPRECALC_16_31_3, WPRECALC_16_31_4, WPRECALC_16_31_5, 16,
+      W4, W5, W6, W7, W0, _, _, _ );
+  _R( _d, _e, _a, _b, _c, F1,  2,
+      WPRECALC_16_31_6, WPRECALC_16_31_7, WPRECALC_16_31_8, 16,
+      W4, W5, W6, W7, W0, _, _, _ );
+  _R( _c, _d, _e, _a, _b, F1,  3,
+      WPRECALC_16_31_9, WPRECALC_16_31_10,WPRECALC_16_31_11,16,
+      W4, W5, W6, W7, W0, _, _, _ );
+
+#undef curK
+#define curK qK2
+  _R( _b, _c, _d, _e, _a, F1,  4,
+      WPRECALC_16_31_0, WPRECALC_16_31_1, WPRECALC_16_31_2, 20,
+      W3, W4, W5, W6, W7, _, _, _ );
+  _R( _a, _b, _c, _d, _e, F1,  5,
+      WPRECALC_16_31_3, WPRECALC_16_31_4, WPRECALC_16_31_5, 20,
+      W3, W4, W5, W6, W7, _, _, _ );
+  _R( _e, _a, _b, _c, _d, F1,  6,
+      WPRECALC_16_31_6, WPRECALC_16_31_7, WPRECALC_16_31_8, 20,
+      W3, W4, W5, W6, W7, _, _, _ );
+  _R( _d, _e, _a, _b, _c, F1,  7,
+      WPRECALC_16_31_9, WPRECALC_16_31_10,WPRECALC_16_31_11,20,
+      W3, W4, W5, W6, W7, _, _, _ );
+
+  _R( _c, _d, _e, _a, _b, F1,  8,
+      WPRECALC_16_31_0, WPRECALC_16_31_1, WPRECALC_16_31_2, 24,
+      W2, W3, W4, W5, W6, _, _, _ );
+  _R( _b, _c, _d, _e, _a, F1,  9,
+      WPRECALC_16_31_3, WPRECALC_16_31_4, WPRECALC_16_31_5, 24,
+      W2, W3, W4, W5, W6, _, _, _ );
+  _R( _a, _b, _c, _d, _e, F1, 10,
+      WPRECALC_16_31_6, WPRECALC_16_31_7, WPRECALC_16_31_8, 24,
+      W2, W3, W4, W5, W6, _, _, _ );
+  _R( _e, _a, _b, _c, _d, F1, 11,
+      WPRECALC_16_31_9, WPRECALC_16_31_10,WPRECALC_16_31_11,24,
+      W2, W3, W4, W5, W6, _, _, _ );
+
+  _R( _d, _e, _a, _b, _c, F1, 12,
+      WPRECALC_16_31_0, WPRECALC_16_31_1, WPRECALC_16_31_2, 28,
+      W1, W2, W3, W4, W5, _, _, _ );
+  _R( _c, _d, _e, _a, _b, F1, 13,
+      WPRECALC_16_31_3, WPRECALC_16_31_4, WPRECALC_16_31_5, 28,
+      W1, W2, W3, W4, W5, _, _, _ );
+  _R( _b, _c, _d, _e, _a, F1, 14,
+      WPRECALC_16_31_6, WPRECALC_16_31_7, WPRECALC_16_31_8, 28,
+      W1, W2, W3, W4, W5, _, _, _ );
+  _R( _a, _b, _c, _d, _e, F1, 15,
+      WPRECALC_16_31_9, WPRECALC_16_31_10,WPRECALC_16_31_11,28,
+      W1, W2, W3, W4, W5, _, _, _ );
+
+  /* Transform 16-63 + Precalc 32-79. */
+  _R( _e, _a, _b, _c, _d, F1, 16,
+      WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 32,
+      W0, W1, W2, W3, W4, W5, W6, W7);
+  _R( _d, _e, _a, _b, _c, F1, 17,
+      WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 32,
+      W0, W1, W2, W3, W4, W5, W6, W7);
+  _R( _c, _d, _e, _a, _b, F1, 18,
+      WPRECALC_32_79_6, dummy,            WPRECALC_32_79_7, 32,
+      W0, W1, W2, W3, W4, W5, W6, W7);
+  _R( _b, _c, _d, _e, _a, F1, 19,
+      WPRECALC_32_79_8, dummy,            WPRECALC_32_79_9, 32,
+      W0, W1, W2, W3, W4, W5, W6, W7);
+
+  _R( _a, _b, _c, _d, _e, F2, 20,
+      WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 36,
+      W7, W0, W1, W2, W3, W4, W5, W6);
+  _R( _e, _a, _b, _c, _d, F2, 21,
+      WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 36,
+      W7, W0, W1, W2, W3, W4, W5, W6);
+  _R( _d, _e, _a, _b, _c, F2, 22,
+      WPRECALC_32_79_6, dummy,            WPRECALC_32_79_7, 36,
+      W7, W0, W1, W2, W3, W4, W5, W6);
+  _R( _c, _d, _e, _a, _b, F2, 23,
+      WPRECALC_32_79_8, dummy,            WPRECALC_32_79_9, 36,
+      W7, W0, W1, W2, W3, W4, W5, W6);
+
+#undef curK
+#define curK qK3
+  _R( _b, _c, _d, _e, _a, F2, 24,
+      WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 40,
+      W6, W7, W0, W1, W2, W3, W4, W5);
+  _R( _a, _b, _c, _d, _e, F2, 25,
+      WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 40,
+      W6, W7, W0, W1, W2, W3, W4, W5);
+  _R( _e, _a, _b, _c, _d, F2, 26,
+      WPRECALC_32_79_6, dummy,            WPRECALC_32_79_7, 40,
+      W6, W7, W0, W1, W2, W3, W4, W5);
+  _R( _d, _e, _a, _b, _c, F2, 27,
+      WPRECALC_32_79_8, dummy,            WPRECALC_32_79_9, 40,
+      W6, W7, W0, W1, W2, W3, W4, W5);
+
+  _R( _c, _d, _e, _a, _b, F2, 28,
+      WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 44,
+      W5, W6, W7, W0, W1, W2, W3, W4);
+  _R( _b, _c, _d, _e, _a, F2, 29,
+      WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 44,
+      W5, W6, W7, W0, W1, W2, W3, W4);
+  _R( _a, _b, _c, _d, _e, F2, 30,
+      WPRECALC_32_79_6, dummy,            WPRECALC_32_79_7, 44,
+      W5, W6, W7, W0, W1, W2, W3, W4);
+  _R( _e, _a, _b, _c, _d, F2, 31,
+      WPRECALC_32_79_8, dummy,            WPRECALC_32_79_9, 44,
+      W5, W6, W7, W0, W1, W2, W3, W4);
+
+  _R( _d, _e, _a, _b, _c, F2, 32,
+      WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 48,
+      W4, W5, W6, W7, W0, W1, W2, W3);
+  _R( _c, _d, _e, _a, _b, F2, 33,
+      WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 48,
+      W4, W5, W6, W7, W0, W1, W2, W3);
+  _R( _b, _c, _d, _e, _a, F2, 34,
+      WPRECALC_32_79_6, dummy,            WPRECALC_32_79_7, 48,
+      W4, W5, W6, W7, W0, W1, W2, W3);
+  _R( _a, _b, _c, _d, _e, F2, 35,
+      WPRECALC_32_79_8, dummy,            WPRECALC_32_79_9, 48,
+      W4, W5, W6, W7, W0, W1, W2, W3);
+
+  _R( _e, _a, _b, _c, _d, F2, 36,
+      WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 52,
+      W3, W4, W5, W6, W7, W0, W1, W2);
+  _R( _d, _e, _a, _b, _c, F2, 37,
+      WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 52,
+      W3, W4, W5, W6, W7, W0, W1, W2);
+  _R( _c, _d, _e, _a, _b, F2, 38,
+      WPRECALC_32_79_6, dummy,            WPRECALC_32_79_7, 52,
+      W3, W4, W5, W6, W7, W0, W1, W2);
+  _R( _b, _c, _d, _e, _a, F2, 39,
+      WPRECALC_32_79_8, dummy,            WPRECALC_32_79_9, 52,
+      W3, W4, W5, W6, W7, W0, W1, W2);
+
+  _R( _a, _b, _c, _d, _e, F3, 40,
+      WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 56,
+      W2, W3, W4, W5, W6, W7, W0, W1);
+  _R( _e, _a, _b, _c, _d, F3, 41,
+      WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 56,
+      W2, W3, W4, W5, W6, W7, W0, W1);
+  _R( _d, _e, _a, _b, _c, F3, 42,
+      WPRECALC_32_79_6, dummy,            WPRECALC_32_79_7, 56,
+      W2, W3, W4, W5, W6, W7, W0, W1);
+  _R( _c, _d, _e, _a, _b, F3, 43,
+      WPRECALC_32_79_8, dummy,            WPRECALC_32_79_9, 56,
+      W2, W3, W4, W5, W6, W7, W0, W1);
+
+#undef curK
+#define curK qK4
+  _R( _b, _c, _d, _e, _a, F3, 44,
+      WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 60,
+      W1, W2, W3, W4, W5, W6, W7, W0);
+  _R( _a, _b, _c, _d, _e, F3, 45,
+      WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 60,
+      W1, W2, W3, W4, W5, W6, W7, W0);
+  _R( _e, _a, _b, _c, _d, F3, 46,
+      WPRECALC_32_79_6, dummy,            WPRECALC_32_79_7, 60,
+      W1, W2, W3, W4, W5, W6, W7, W0);
+  _R( _d, _e, _a, _b, _c, F3, 47,
+      WPRECALC_32_79_8, dummy,            WPRECALC_32_79_9, 60,
+      W1, W2, W3, W4, W5, W6, W7, W0);
+
+  _R( _c, _d, _e, _a, _b, F3, 48,
+      WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 64,
+      W0, W1, W2, W3, W4, W5, W6, W7);
+  _R( _b, _c, _d, _e, _a, F3, 49,
+      WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 64,
+      W0, W1, W2, W3, W4, W5, W6, W7);
+  _R( _a, _b, _c, _d, _e, F3, 50,
+      WPRECALC_32_79_6, dummy,            WPRECALC_32_79_7, 64,
+      W0, W1, W2, W3, W4, W5, W6, W7);
+  _R( _e, _a, _b, _c, _d, F3, 51,
+      WPRECALC_32_79_8, dummy,            WPRECALC_32_79_9, 64,
+      W0, W1, W2, W3, W4, W5, W6, W7);
+
+  _R( _d, _e, _a, _b, _c, F3, 52,
+      WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 68,
+      W7, W0, W1, W2, W3, W4, W5, W6);
+  _R( _c, _d, _e, _a, _b, F3, 53,
+      WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 68,
+      W7, W0, W1, W2, W3, W4, W5, W6);
+  _R( _b, _c, _d, _e, _a, F3, 54,
+      WPRECALC_32_79_6, dummy,            WPRECALC_32_79_7, 68,
+      W7, W0, W1, W2, W3, W4, W5, W6);
+  _R( _a, _b, _c, _d, _e, F3, 55,
+      WPRECALC_32_79_8, dummy,            WPRECALC_32_79_9, 68,
+      W7, W0, W1, W2, W3, W4, W5, W6);
+
+  _R( _e, _a, _b, _c, _d, F3, 56,
+      WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 72,
+      W6, W7, W0, W1, W2, W3, W4, W5);
+  _R( _d, _e, _a, _b, _c, F3, 57,
+      WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 72,
+      W6, W7, W0, W1, W2, W3, W4, W5);
+  _R( _c, _d, _e, _a, _b, F3, 58,
+      WPRECALC_32_79_6, dummy,            WPRECALC_32_79_7, 72,
+      W6, W7, W0, W1, W2, W3, W4, W5);
+  _R( _b, _c, _d, _e, _a, F3, 59,
+      WPRECALC_32_79_8, dummy,            WPRECALC_32_79_9, 72,
+      W6, W7, W0, W1, W2, W3, W4, W5);
+
+  subs RNBLKS, #1;
+
+  _R( _a, _b, _c, _d, _e, F4, 60,
+      WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 76,
+      W5, W6, W7, W0, W1, W2, W3, W4);
+  _R( _e, _a, _b, _c, _d, F4, 61,
+      WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 76,
+      W5, W6, W7, W0, W1, W2, W3, W4);
+  _R( _d, _e, _a, _b, _c, F4, 62,
+      WPRECALC_32_79_6, dummy,            WPRECALC_32_79_7, 76,
+      W5, W6, W7, W0, W1, W2, W3, W4);
+  _R( _c, _d, _e, _a, _b, F4, 63,
+      WPRECALC_32_79_8, dummy,            WPRECALC_32_79_9, 76,
+      W5, W6, W7, W0, W1, W2, W3, W4);
+
+  beq .Lend;
+
+  /* Transform 64-79 + Precalc 0-15 of next block. */
+#undef curK
+#define curK qK1
+  _R( _b, _c, _d, _e, _a, F4, 64,
+      WPRECALC_00_15_0, dummy, dummy, _, _, _, _, _, _, _, _, _ );
+  _R( _a, _b, _c, _d, _e, F4, 65,
+      WPRECALC_00_15_1, dummy, dummy, _, _, _, _, _, _, _, _, _ );
+  _R( _e, _a, _b, _c, _d, F4, 66,
+      WPRECALC_00_15_2, dummy, dummy, _, _, _, _, _, _, _, _, _ );
+  _R( _d, _e, _a, _b, _c, F4, 67,
+      WPRECALC_00_15_3, dummy, dummy, _, _, _, _, _, _, _, _, _ );
+
+  _R( _c, _d, _e, _a, _b, F4, 68,
+      dummy,            dummy, dummy, _, _, _, _, _, _, _, _, _ );
+  _R( _b, _c, _d, _e, _a, F4, 69,
+      dummy,            dummy, dummy, _, _, _, _, _, _, _, _, _ );
+  _R( _a, _b, _c, _d, _e, F4, 70,
+      WPRECALC_00_15_4, dummy, dummy, _, _, _, _, _, _, _, _, _ );
+  _R( _e, _a, _b, _c, _d, F4, 71,
+      WPRECALC_00_15_5, dummy, dummy, _, _, _, _, _, _, _, _, _ );
+
+  _R( _d, _e, _a, _b, _c, F4, 72,
+      dummy,            dummy, dummy, _, _, _, _, _, _, _, _, _ );
+  _R( _c, _d, _e, _a, _b, F4, 73,
+      dummy,            dummy, dummy, _, _, _, _, _, _, _, _, _ );
+  _R( _b, _c, _d, _e, _a, F4, 74,
+      WPRECALC_00_15_6, dummy, dummy, _, _, _, _, _, _, _, _, _ );
+  _R( _a, _b, _c, _d, _e, F4, 75,
+      WPRECALC_00_15_7, dummy, dummy, _, _, _, _, _, _, _, _, _ );
+
+  _R( _e, _a, _b, _c, _d, F4, 76,
+      WPRECALC_00_15_8, dummy, dummy, _, _, _, _, _, _, _, _, _ );
+  _R( _d, _e, _a, _b, _c, F4, 77,
+      WPRECALC_00_15_9, dummy, dummy, _, _, _, _, _, _, _, _, _ );
+  _R( _c, _d, _e, _a, _b, F4, 78,
+      WPRECALC_00_15_10, dummy, dummy, _, _, _, _, _, _, _, _, _ );
+  _R( _b, _c, _d, _e, _a, F4, 79,
+      WPRECALC_00_15_11, dummy, WPRECALC_00_15_12, _, _, _, _, _, _, _, _, _ );
+
+  /* Update the chaining variables. */
+  ldm RSTATE, {RT0-RT3};
+  add _a, RT0;
+  ldr RT0, [RSTATE, #state_h4];
+  add _b, RT1;
+  add _c, RT2;
+  add _d, RT3;
+  add _e, RT0;
+  stm RSTATE, {_a-_e};
+
+  b .Loop;
+
+.Lend:
+  /* Transform 64-79 */
+  R( _b, _c, _d, _e, _a, F4, 64 );
+  R( _a, _b, _c, _d, _e, F4, 65 );
+  R( _e, _a, _b, _c, _d, F4, 66 );
+  R( _d, _e, _a, _b, _c, F4, 67 );
+  R( _c, _d, _e, _a, _b, F4, 68 );
+  R( _b, _c, _d, _e, _a, F4, 69 );
+  R( _a, _b, _c, _d, _e, F4, 70 );
+  R( _e, _a, _b, _c, _d, F4, 71 );
+  R( _d, _e, _a, _b, _c, F4, 72 );
+  R( _c, _d, _e, _a, _b, F4, 73 );
+  R( _b, _c, _d, _e, _a, F4, 74 );
+  R( _a, _b, _c, _d, _e, F4, 75 );
+  R( _e, _a, _b, _c, _d, F4, 76 );
+  R( _d, _e, _a, _b, _c, F4, 77 );
+  R( _c, _d, _e, _a, _b, F4, 78 );
+  R( _b, _c, _d, _e, _a, F4, 79 );
+
+  mov sp, ROLDSTACK;
+
+  /* Update the chaining variables. */
+  ldm RSTATE, {RT0-RT3};
+  add _a, RT0;
+  ldr RT0, [RSTATE, #state_h4];
+  add _b, RT1;
+  add _c, RT2;
+  add _d, RT3;
+  /*vpop {q4-q7};*/
+  add _e, RT0;
+  stm RSTATE, {_a-_e};
+
+  ldmia   sp!,{r4-r12,pc}
+
+.Ldo_nothing:
+  bx lr
+ENDPROC(sha1_transform_neon)
diff --git a/marvell/uboot/lib/sha_neon/sha1_neon.c b/marvell/uboot/lib/sha_neon/sha1_neon.c
new file mode 100644
index 0000000..0dea146
--- /dev/null
+++ b/marvell/uboot/lib/sha_neon/sha1_neon.c
@@ -0,0 +1,315 @@
+/*
+ *  Heiko Schocher, DENX Software Engineering, hs@denx.de.
+ *  based on:
+ *  FIPS-180-1 compliant SHA-1 implementation
+ *
+ *  Copyright (C) 2003-2006  Christophe Devine
+ *
+ *  This library is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU Lesser General Public
+ *  License, version 2.1 as published by the Free Software Foundation.
+ *
+ *  This library is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  Lesser General Public License for more details.
+ *
+ *  You should have received a copy of the GNU Lesser General Public
+ *  License along with this library; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ *  MA  02110-1301  USA
+ */
+/*
+ *  The SHA-1 standard was published by NIST in 1993.
+ *
+ *  http://www.itl.nist.gov/fipspubs/fip180-1.htm
+ */
+
+#ifndef _CRT_SECURE_NO_DEPRECATE
+#define _CRT_SECURE_NO_DEPRECATE 1
+#endif
+
+#ifndef USE_HOSTCC
+#include <common.h>
+#include <linux/string.h>
+#else
+#include <string.h>
+#endif /* USE_HOSTCC */
+#include <watchdog.h>
+#include "sha1.h"
+
+asmlinkage int	neon_en_check(void);
+asmlinkage void	neon_enable(void);
+asmlinkage void	neon_disable(void);
+asmlinkage void sha1_transform_neon(void *state_h, const char *data,
+				    unsigned int rounds);
+
+/*
+ * 32-bit integer manipulation macros (big endian)
+ */
+#ifndef PUT_UINT32_BE
+#define PUT_UINT32_BE(n,b,i) {				\
+	(b)[(i)    ] = (unsigned char) ( (n) >> 24 );	\
+	(b)[(i) + 1] = (unsigned char) ( (n) >> 16 );	\
+	(b)[(i) + 2] = (unsigned char) ( (n) >>  8 );	\
+	(b)[(i) + 3] = (unsigned char) ( (n)       );	\
+}
+#endif
+
+/*
+ * SHA-1 context setup
+ */
+void sha1_starts (sha1_context * ctx)
+{
+	ctx->total[0] = 0;
+	ctx->total[1] = 0;
+
+	ctx->state[0] = 0x67452301;
+	ctx->state[1] = 0xEFCDAB89;
+	ctx->state[2] = 0x98BADCFE;
+	ctx->state[3] = 0x10325476;
+	ctx->state[4] = 0xC3D2E1F0;
+}
+
+static inline void sha1_process(sha1_context *ctx, 
+                    const unsigned char *data, uint32_t blks)
+{
+	int neon_en = neon_en_check();
+
+	if (!neon_en) 
+    	neon_enable();
+
+    sha1_transform_neon(ctx->state, data, blks);
+
+	if (!neon_en) 
+    	neon_disable();
+}
+
+/*
+ * SHA-1 process buffer
+ */
+void sha1_update(sha1_context *ctx, const unsigned char *input,
+		 unsigned int ilen)
+{
+	int fill;
+	unsigned long left;
+    uint32_t blks;
+
+	if (ilen <= 0)
+		return;
+
+	left = ctx->total[0] & 0x3F;
+	fill = 64 - left;
+
+	ctx->total[0] += ilen;
+	ctx->total[0] &= 0xFFFFFFFF;
+
+	if (ctx->total[0] < (unsigned long) ilen)
+		ctx->total[1]++;
+
+	if (left && ilen >= fill) {
+		memcpy ((void *) (ctx->buffer + left), (void *) input, fill);
+		sha1_process (ctx, ctx->buffer, 1);
+		input += fill;
+		ilen -= fill;
+		left = 0;
+	}
+
+    blks = ilen / 64;
+	while (ilen >= 64) {
+		sha1_process (ctx, input, blks);
+		input += 64 * blks;
+		ilen -= 64 * blks;
+	}
+
+	if (ilen > 0) {
+		memcpy ((void *) (ctx->buffer + left), (void *) input, ilen);
+	}
+}
+
+static const unsigned char sha1_padding[64] = {
+	0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	   0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	   0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	   0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+/*
+ * SHA-1 final digest
+ */
+void sha1_finish (sha1_context * ctx, unsigned char output[20])
+{
+	unsigned long last, padn;
+	unsigned long high, low;
+	unsigned char msglen[8];
+
+	high = (ctx->total[0] >> 29)
+		| (ctx->total[1] << 3);
+	low = (ctx->total[0] << 3);
+
+	PUT_UINT32_BE (high, msglen, 0);
+	PUT_UINT32_BE (low, msglen, 4);
+
+	last = ctx->total[0] & 0x3F;
+	padn = (last < 56) ? (56 - last) : (120 - last);
+
+	sha1_update (ctx, (unsigned char *) sha1_padding, padn);
+	sha1_update (ctx, msglen, 8);
+
+	PUT_UINT32_BE (ctx->state[0], output, 0);
+	PUT_UINT32_BE (ctx->state[1], output, 4);
+	PUT_UINT32_BE (ctx->state[2], output, 8);
+	PUT_UINT32_BE (ctx->state[3], output, 12);
+	PUT_UINT32_BE (ctx->state[4], output, 16);
+}
+
+/*
+ * Output = SHA-1( input buffer )
+ */
+void sha1_csum(const unsigned char *input, unsigned int ilen,
+	       unsigned char *output)
+{
+	sha1_context ctx;
+
+	sha1_starts (&ctx);
+	sha1_update (&ctx, input, ilen);
+	sha1_finish (&ctx, output);
+}
+
+/*
+ * Output = SHA-1( input buffer ). Trigger the watchdog every 'chunk_sz'
+ * bytes of input processed.
+ */
+void sha1_csum_wd(const unsigned char *input, unsigned int ilen,
+		  unsigned char *output, unsigned int chunk_sz)
+{
+	sha1_context ctx;
+#if defined(CONFIG_HW_WATCHDOG) || defined(CONFIG_WATCHDOG)
+	const unsigned char *end, *curr;
+	int chunk;
+#endif
+
+	sha1_starts (&ctx);
+
+#if defined(CONFIG_HW_WATCHDOG) || defined(CONFIG_WATCHDOG)
+	curr = input;
+	end = input + ilen;
+	while (curr < end) {
+		chunk = end - curr;
+		if (chunk > chunk_sz)
+			chunk = chunk_sz;
+		sha1_update (&ctx, curr, chunk);
+		curr += chunk;
+		WATCHDOG_RESET ();
+	}
+#else
+	sha1_update (&ctx, input, ilen);
+#endif
+
+	sha1_finish (&ctx, output);
+}
+
+/*
+ * Output = HMAC-SHA-1( input buffer, hmac key )
+ */
+void sha1_hmac(const unsigned char *key, int keylen,
+	       const unsigned char *input, unsigned int ilen,
+	       unsigned char *output)
+{
+	int i;
+	sha1_context ctx;
+	unsigned char k_ipad[64];
+	unsigned char k_opad[64];
+	unsigned char tmpbuf[20];
+
+	memset (k_ipad, 0x36, 64);
+	memset (k_opad, 0x5C, 64);
+
+	for (i = 0; i < keylen; i++) {
+		if (i >= 64)
+			break;
+
+		k_ipad[i] ^= key[i];
+		k_opad[i] ^= key[i];
+	}
+
+	sha1_starts (&ctx);
+	sha1_update (&ctx, k_ipad, 64);
+	sha1_update (&ctx, input, ilen);
+	sha1_finish (&ctx, tmpbuf);
+
+	sha1_starts (&ctx);
+	sha1_update (&ctx, k_opad, 64);
+	sha1_update (&ctx, tmpbuf, 20);
+	sha1_finish (&ctx, output);
+
+	memset (k_ipad, 0, 64);
+	memset (k_opad, 0, 64);
+	memset (tmpbuf, 0, 20);
+	memset (&ctx, 0, sizeof (sha1_context));
+}
+
+static const char _sha1_src[] = "_sha1_src";
+
+#ifdef SELF_TEST
+/*
+ * FIPS-180-1 test vectors
+ */
+static const char sha1_test_str[3][57] = {
+	{"abc"},
+	{"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"},
+	{""}
+};
+
+static const unsigned char sha1_test_sum[3][20] = {
+	{0xA9, 0x99, 0x3E, 0x36, 0x47, 0x06, 0x81, 0x6A, 0xBA, 0x3E,
+	 0x25, 0x71, 0x78, 0x50, 0xC2, 0x6C, 0x9C, 0xD0, 0xD8, 0x9D},
+	{0x84, 0x98, 0x3E, 0x44, 0x1C, 0x3B, 0xD2, 0x6E, 0xBA, 0xAE,
+	 0x4A, 0xA1, 0xF9, 0x51, 0x29, 0xE5, 0xE5, 0x46, 0x70, 0xF1},
+	{0x34, 0xAA, 0x97, 0x3C, 0xD4, 0xC4, 0xDA, 0xA4, 0xF6, 0x1E,
+	 0xEB, 0x2B, 0xDB, 0xAD, 0x27, 0x31, 0x65, 0x34, 0x01, 0x6F}
+};
+
+/*
+ * Checkup routine
+ */
+int sha1_self_test (void)
+{
+	int i, j;
+	unsigned char buf[1000];
+	unsigned char sha1sum[20];
+	sha1_context ctx;
+
+	for (i = 0; i < 3; i++) {
+		printf ("  SHA-1 test #%d: ", i + 1);
+
+		sha1_starts (&ctx);
+
+		if (i < 2)
+			sha1_update (&ctx, (unsigned char *) sha1_test_str[i],
+				     strlen (sha1_test_str[i]));
+		else {
+			memset (buf, 'a', 1000);
+			for (j = 0; j < 1000; j++)
+				sha1_update (&ctx, buf, 1000);
+		}
+
+		sha1_finish (&ctx, sha1sum);
+
+		if (memcmp (sha1sum, sha1_test_sum[i], 20) != 0) {
+			printf ("failed\n");
+			return (1);
+		}
+
+		printf ("passed\n");
+	}
+
+	printf ("\n");
+	return (0);
+}
+#else
+int sha1_self_test (void)
+{
+	return (0);
+}
+#endif
diff --git a/marvell/uboot/lib/sha_neon/sha256-armv7-neon.S b/marvell/uboot/lib/sha_neon/sha256-armv7-neon.S
new file mode 100644
index 0000000..4d13b6c
--- /dev/null
+++ b/marvell/uboot/lib/sha_neon/sha256-armv7-neon.S
@@ -0,0 +1,811 @@
+#include <linux/linkage.h>
+
+.text
+.code   32
+.fpu neon
+
+.type  K256,%object
+.align 5
+K256:
+.word  0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+.word  0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
+.word  0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+.word  0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
+.word  0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+.word  0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
+.word  0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+.word  0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+.word  0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
+.word  0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+.word  0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
+.word  0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+.word  0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+.word  0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
+.word  0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+.word  0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+.size  K256,.-K256
+.word  0                               @ terminator
+.word  0
+.align 5
+
+.align 5
+ENTRY(sha256_transform_neon)
+       /* Input:
+        *      %r0: SHA256_CONTEXT
+        *      %r1: data
+        *      %r2: nblks
+        */
+       sub     r3,pc,#8                @ sha256_transform_neon
+
+       add     r2,r1,r2,lsl#6  @ len to point at the end of inp
+
+       stmdb   sp!,{r4-r12,lr}
+
+       mov     r12,sp
+       sub     sp,sp,#16*4+16          @ alloca
+       sub     r14,r3,#256+32  @ K256
+       bic     sp,sp,#15               @ align for 128-bit stores
+
+       vld1.8          {q0},[r1]!
+       vld1.8          {q1},[r1]!
+       vld1.8          {q2},[r1]!
+       vld1.8          {q3},[r1]!
+       vld1.32         {q8},[r14,:128]!
+       vld1.32         {q9},[r14,:128]!
+       vld1.32         {q10},[r14,:128]!
+       vld1.32         {q11},[r14,:128]!
+       vrev32.8        q0,q0           @ yes, even on
+       str             r0,[sp,#64]
+       vrev32.8        q1,q1           @ big-endian
+       str             r1,[sp,#68]
+       mov             r1,sp
+       vrev32.8        q2,q2
+       str             r2,[sp,#72]
+       vrev32.8        q3,q3
+       str             r12,[sp,#76]            @ save original sp
+       vadd.i32        q8,q8,q0
+       vadd.i32        q9,q9,q1
+       vst1.32         {q8},[r1,:128]!
+       vadd.i32        q10,q10,q2
+       vst1.32         {q9},[r1,:128]!
+       vadd.i32        q11,q11,q3
+       vst1.32         {q10},[r1,:128]!
+       vst1.32         {q11},[r1,:128]!
+
+       ldmia           r0,{r4-r11}
+       sub             r1,r1,#64
+       ldr             r2,[sp,#0]
+       eor             r12,r12,r12
+       eor             r3,r5,r6
+       b               .L_00_48
+
+.align 4
+.L_00_48:
+       vext.8  q8,q0,q1,#4
+       add     r11,r11,r2
+       eor     r2,r9,r10
+       eor     r0,r8,r8,ror#5
+       vext.8  q9,q2,q3,#4
+       add     r4,r4,r12
+       and     r2,r2,r8
+       eor     r12,r0,r8,ror#19
+       vshr.u32        q10,q8,#7
+       eor     r0,r4,r4,ror#11
+       eor     r2,r2,r10
+       vadd.i32        q0,q0,q9
+       add     r11,r11,r12,ror#6
+       eor     r12,r4,r5
+       vshr.u32        q9,q8,#3
+       eor     r0,r0,r4,ror#20
+       add     r11,r11,r2
+       vsli.32 q10,q8,#25
+       ldr     r2,[sp,#4]
+       and     r3,r3,r12
+       vshr.u32        q11,q8,#18
+       add     r7,r7,r11
+       add     r11,r11,r0,ror#2
+       eor     r3,r3,r5
+       veor    q9,q9,q10
+       add     r10,r10,r2
+       vsli.32 q11,q8,#14
+       eor     r2,r8,r9
+       eor     r0,r7,r7,ror#5
+       vshr.u32        d24,d7,#17
+       add     r11,r11,r3
+       and     r2,r2,r7
+       veor    q9,q9,q11
+       eor     r3,r0,r7,ror#19
+       eor     r0,r11,r11,ror#11
+       vsli.32 d24,d7,#15
+       eor     r2,r2,r9
+       add     r10,r10,r3,ror#6
+       vshr.u32        d25,d7,#10
+       eor     r3,r11,r4
+       eor     r0,r0,r11,ror#20
+       vadd.i32        q0,q0,q9
+       add     r10,r10,r2
+       ldr     r2,[sp,#8]
+       veor    d25,d25,d24
+       and     r12,r12,r3
+       add     r6,r6,r10
+       vshr.u32        d24,d7,#19
+       add     r10,r10,r0,ror#2
+       eor     r12,r12,r4
+       vsli.32 d24,d7,#13
+       add     r9,r9,r2
+       eor     r2,r7,r8
+       veor    d25,d25,d24
+       eor     r0,r6,r6,ror#5
+       add     r10,r10,r12
+       vadd.i32        d0,d0,d25
+       and     r2,r2,r6
+       eor     r12,r0,r6,ror#19
+       vshr.u32        d24,d0,#17
+       eor     r0,r10,r10,ror#11
+       eor     r2,r2,r8
+       vsli.32 d24,d0,#15
+       add     r9,r9,r12,ror#6
+       eor     r12,r10,r11
+       vshr.u32        d25,d0,#10
+       eor     r0,r0,r10,ror#20
+       add     r9,r9,r2
+       veor    d25,d25,d24
+       ldr     r2,[sp,#12]
+       and     r3,r3,r12
+       vshr.u32        d24,d0,#19
+       add     r5,r5,r9
+       add     r9,r9,r0,ror#2
+       eor     r3,r3,r11
+       vld1.32 {q8},[r14,:128]!
+       add     r8,r8,r2
+       vsli.32 d24,d0,#13
+       eor     r2,r6,r7
+       eor     r0,r5,r5,ror#5
+       veor    d25,d25,d24
+       add     r9,r9,r3
+       and     r2,r2,r5
+       vadd.i32        d1,d1,d25
+       eor     r3,r0,r5,ror#19
+       eor     r0,r9,r9,ror#11
+       vadd.i32        q8,q8,q0
+       eor     r2,r2,r7
+       add     r8,r8,r3,ror#6
+       eor     r3,r9,r10
+       eor     r0,r0,r9,ror#20
+       add     r8,r8,r2
+       ldr     r2,[sp,#16]
+       and     r12,r12,r3
+       add     r4,r4,r8
+       vst1.32 {q8},[r1,:128]!
+       add     r8,r8,r0,ror#2
+       eor     r12,r12,r10
+       vext.8  q8,q1,q2,#4
+       add     r7,r7,r2
+       eor     r2,r5,r6
+       eor     r0,r4,r4,ror#5
+       vext.8  q9,q3,q0,#4
+       add     r8,r8,r12
+       and     r2,r2,r4
+       eor     r12,r0,r4,ror#19
+       vshr.u32        q10,q8,#7
+       eor     r0,r8,r8,ror#11
+       eor     r2,r2,r6
+       vadd.i32        q1,q1,q9
+       add     r7,r7,r12,ror#6
+       eor     r12,r8,r9
+       vshr.u32        q9,q8,#3
+       eor     r0,r0,r8,ror#20
+       add     r7,r7,r2
+       vsli.32 q10,q8,#25
+       ldr     r2,[sp,#20]
+       and     r3,r3,r12
+       vshr.u32        q11,q8,#18
+       add     r11,r11,r7
+       add     r7,r7,r0,ror#2
+       eor     r3,r3,r9
+       veor    q9,q9,q10
+       add     r6,r6,r2
+       vsli.32 q11,q8,#14
+       eor     r2,r4,r5
+       eor     r0,r11,r11,ror#5
+       vshr.u32        d24,d1,#17
+       add     r7,r7,r3
+       and     r2,r2,r11
+       veor    q9,q9,q11
+       eor     r3,r0,r11,ror#19
+       eor     r0,r7,r7,ror#11
+       vsli.32 d24,d1,#15
+       eor     r2,r2,r5
+       add     r6,r6,r3,ror#6
+       vshr.u32        d25,d1,#10
+       eor     r3,r7,r8
+       eor     r0,r0,r7,ror#20
+       vadd.i32        q1,q1,q9
+       add     r6,r6,r2
+       ldr     r2,[sp,#24]
+       veor    d25,d25,d24
+       and     r12,r12,r3
+       add     r10,r10,r6
+       vshr.u32        d24,d1,#19
+       add     r6,r6,r0,ror#2
+       eor     r12,r12,r8
+       vsli.32 d24,d1,#13
+       add     r5,r5,r2
+       eor     r2,r11,r4
+       veor    d25,d25,d24
+       eor     r0,r10,r10,ror#5
+       add     r6,r6,r12
+       vadd.i32        d2,d2,d25
+       and     r2,r2,r10
+       eor     r12,r0,r10,ror#19
+       vshr.u32        d24,d2,#17
+       eor     r0,r6,r6,ror#11
+       eor     r2,r2,r4
+       vsli.32 d24,d2,#15
+       add     r5,r5,r12,ror#6
+       eor     r12,r6,r7
+       vshr.u32        d25,d2,#10
+       eor     r0,r0,r6,ror#20
+       add     r5,r5,r2
+       veor    d25,d25,d24
+       ldr     r2,[sp,#28]
+       and     r3,r3,r12
+       vshr.u32        d24,d2,#19
+       add     r9,r9,r5
+       add     r5,r5,r0,ror#2
+       eor     r3,r3,r7
+       vld1.32 {q8},[r14,:128]!
+       add     r4,r4,r2
+       vsli.32 d24,d2,#13
+       eor     r2,r10,r11
+       eor     r0,r9,r9,ror#5
+       veor    d25,d25,d24
+       add     r5,r5,r3
+       and     r2,r2,r9
+       vadd.i32        d3,d3,d25
+       eor     r3,r0,r9,ror#19
+       eor     r0,r5,r5,ror#11
+       vadd.i32        q8,q8,q1
+       eor     r2,r2,r11
+       add     r4,r4,r3,ror#6
+       eor     r3,r5,r6
+       eor     r0,r0,r5,ror#20
+       add     r4,r4,r2
+       ldr     r2,[sp,#32]
+       and     r12,r12,r3
+       add     r8,r8,r4
+       vst1.32 {q8},[r1,:128]!
+       add     r4,r4,r0,ror#2
+       eor     r12,r12,r6
+       vext.8  q8,q2,q3,#4
+       add     r11,r11,r2
+       eor     r2,r9,r10
+       eor     r0,r8,r8,ror#5
+       vext.8  q9,q0,q1,#4
+       add     r4,r4,r12
+       and     r2,r2,r8
+       eor     r12,r0,r8,ror#19
+       vshr.u32        q10,q8,#7
+       eor     r0,r4,r4,ror#11
+       eor     r2,r2,r10
+       vadd.i32        q2,q2,q9
+       add     r11,r11,r12,ror#6
+       eor     r12,r4,r5
+       vshr.u32        q9,q8,#3
+       eor     r0,r0,r4,ror#20
+       add     r11,r11,r2
+       vsli.32 q10,q8,#25
+       ldr     r2,[sp,#36]
+       and     r3,r3,r12
+       vshr.u32        q11,q8,#18
+       add     r7,r7,r11
+       add     r11,r11,r0,ror#2
+       eor     r3,r3,r5
+       veor    q9,q9,q10
+       add     r10,r10,r2
+       vsli.32 q11,q8,#14
+       eor     r2,r8,r9
+       eor     r0,r7,r7,ror#5
+       vshr.u32        d24,d3,#17
+       add     r11,r11,r3
+       and     r2,r2,r7
+       veor    q9,q9,q11
+       eor     r3,r0,r7,ror#19
+       eor     r0,r11,r11,ror#11
+       vsli.32 d24,d3,#15
+       eor     r2,r2,r9
+       add     r10,r10,r3,ror#6
+       vshr.u32        d25,d3,#10
+       eor     r3,r11,r4
+       eor     r0,r0,r11,ror#20
+       vadd.i32        q2,q2,q9
+       add     r10,r10,r2
+       ldr     r2,[sp,#40]
+       veor    d25,d25,d24
+       and     r12,r12,r3
+       add     r6,r6,r10
+       vshr.u32        d24,d3,#19
+       add     r10,r10,r0,ror#2
+       eor     r12,r12,r4
+       vsli.32 d24,d3,#13
+       add     r9,r9,r2
+       eor     r2,r7,r8
+       veor    d25,d25,d24
+       eor     r0,r6,r6,ror#5
+       add     r10,r10,r12
+       vadd.i32        d4,d4,d25
+       and     r2,r2,r6
+       eor     r12,r0,r6,ror#19
+       vshr.u32        d24,d4,#17
+       eor     r0,r10,r10,ror#11
+       eor     r2,r2,r8
+       vsli.32 d24,d4,#15
+       add     r9,r9,r12,ror#6
+       eor     r12,r10,r11
+       vshr.u32        d25,d4,#10
+       eor     r0,r0,r10,ror#20
+       add     r9,r9,r2
+       veor    d25,d25,d24
+       ldr     r2,[sp,#44]
+       and     r3,r3,r12
+       vshr.u32        d24,d4,#19
+       add     r5,r5,r9
+       add     r9,r9,r0,ror#2
+       eor     r3,r3,r11
+       vld1.32 {q8},[r14,:128]!
+       add     r8,r8,r2
+       vsli.32 d24,d4,#13
+       eor     r2,r6,r7
+       eor     r0,r5,r5,ror#5
+       veor    d25,d25,d24
+       add     r9,r9,r3
+       and     r2,r2,r5
+       vadd.i32        d5,d5,d25
+       eor     r3,r0,r5,ror#19
+       eor     r0,r9,r9,ror#11
+       vadd.i32        q8,q8,q2
+       eor     r2,r2,r7
+       add     r8,r8,r3,ror#6
+       eor     r3,r9,r10
+       eor     r0,r0,r9,ror#20
+       add     r8,r8,r2
+       ldr     r2,[sp,#48]
+       and     r12,r12,r3
+       add     r4,r4,r8
+       vst1.32 {q8},[r1,:128]!
+       add     r8,r8,r0,ror#2
+       eor     r12,r12,r10
+       vext.8  q8,q3,q0,#4
+       add     r7,r7,r2
+       eor     r2,r5,r6
+       eor     r0,r4,r4,ror#5
+       vext.8  q9,q1,q2,#4
+       add     r8,r8,r12
+       and     r2,r2,r4
+       eor     r12,r0,r4,ror#19
+       vshr.u32        q10,q8,#7
+       eor     r0,r8,r8,ror#11
+       eor     r2,r2,r6
+       vadd.i32        q3,q3,q9
+       add     r7,r7,r12,ror#6
+       eor     r12,r8,r9
+       vshr.u32        q9,q8,#3
+       eor     r0,r0,r8,ror#20
+       add     r7,r7,r2
+       vsli.32 q10,q8,#25
+       ldr     r2,[sp,#52]
+       and     r3,r3,r12
+       vshr.u32        q11,q8,#18
+       add     r11,r11,r7
+       add     r7,r7,r0,ror#2
+       eor     r3,r3,r9
+       veor    q9,q9,q10
+       add     r6,r6,r2
+       vsli.32 q11,q8,#14
+       eor     r2,r4,r5
+       eor     r0,r11,r11,ror#5
+       vshr.u32        d24,d5,#17
+       add     r7,r7,r3
+       and     r2,r2,r11
+       veor    q9,q9,q11
+       eor     r3,r0,r11,ror#19
+       eor     r0,r7,r7,ror#11
+       vsli.32 d24,d5,#15
+       eor     r2,r2,r5
+       add     r6,r6,r3,ror#6
+       vshr.u32        d25,d5,#10
+       eor     r3,r7,r8
+       eor     r0,r0,r7,ror#20
+       vadd.i32        q3,q3,q9
+       add     r6,r6,r2
+       ldr     r2,[sp,#56]
+       veor    d25,d25,d24
+       and     r12,r12,r3
+       add     r10,r10,r6
+       vshr.u32        d24,d5,#19
+       add     r6,r6,r0,ror#2
+       eor     r12,r12,r8
+       vsli.32 d24,d5,#13
+       add     r5,r5,r2
+       eor     r2,r11,r4
+       veor    d25,d25,d24
+       eor     r0,r10,r10,ror#5
+       add     r6,r6,r12
+       vadd.i32        d6,d6,d25
+       and     r2,r2,r10
+       eor     r12,r0,r10,ror#19
+       vshr.u32        d24,d6,#17
+       eor     r0,r6,r6,ror#11
+       eor     r2,r2,r4
+       vsli.32 d24,d6,#15
+       add     r5,r5,r12,ror#6
+       eor     r12,r6,r7
+       vshr.u32        d25,d6,#10
+       eor     r0,r0,r6,ror#20
+       add     r5,r5,r2
+       veor    d25,d25,d24
+       ldr     r2,[sp,#60]
+       and     r3,r3,r12
+       vshr.u32        d24,d6,#19
+       add     r9,r9,r5
+       add     r5,r5,r0,ror#2
+       eor     r3,r3,r7
+       vld1.32 {q8},[r14,:128]!
+       add     r4,r4,r2
+       vsli.32 d24,d6,#13
+       eor     r2,r10,r11
+       eor     r0,r9,r9,ror#5
+       veor    d25,d25,d24
+       add     r5,r5,r3
+       and     r2,r2,r9
+       vadd.i32        d7,d7,d25
+       eor     r3,r0,r9,ror#19
+       eor     r0,r5,r5,ror#11
+       vadd.i32        q8,q8,q3
+       eor     r2,r2,r11
+       add     r4,r4,r3,ror#6
+       eor     r3,r5,r6
+       eor     r0,r0,r5,ror#20
+       add     r4,r4,r2
+       ldr     r2,[r14]
+       and     r12,r12,r3
+       add     r8,r8,r4
+       vst1.32 {q8},[r1,:128]!
+       add     r4,r4,r0,ror#2
+       eor     r12,r12,r6
+       teq     r2,#0                           @ check for K256 terminator
+       ldr     r2,[sp,#0]
+       sub     r1,r1,#64
+       bne     .L_00_48
+
+       ldr             r1,[sp,#68]
+       ldr             r0,[sp,#72]
+       sub             r14,r14,#256    @ rewind r14
+       teq             r1,r0
+       subeq           r1,r1,#64               @ avoid SEGV
+       vld1.8          {q0},[r1]!              @ load next input block
+       vld1.8          {q1},[r1]!
+       vld1.8          {q2},[r1]!
+       vld1.8          {q3},[r1]!
+       strne           r1,[sp,#68]
+       mov             r1,sp
+       add     r11,r11,r2
+       eor     r2,r9,r10
+       eor     r0,r8,r8,ror#5
+       add     r4,r4,r12
+       vld1.32 {q8},[r14,:128]!
+       and     r2,r2,r8
+       eor     r12,r0,r8,ror#19
+       eor     r0,r4,r4,ror#11
+       eor     r2,r2,r10
+       vrev32.8        q0,q0
+       add     r11,r11,r12,ror#6
+       eor     r12,r4,r5
+       eor     r0,r0,r4,ror#20
+       add     r11,r11,r2
+       vadd.i32        q8,q8,q0
+       ldr     r2,[sp,#4]
+       and     r3,r3,r12
+       add     r7,r7,r11
+       add     r11,r11,r0,ror#2
+       eor     r3,r3,r5
+       add     r10,r10,r2
+       eor     r2,r8,r9
+       eor     r0,r7,r7,ror#5
+       add     r11,r11,r3
+       and     r2,r2,r7
+       eor     r3,r0,r7,ror#19
+       eor     r0,r11,r11,ror#11
+       eor     r2,r2,r9
+       add     r10,r10,r3,ror#6
+       eor     r3,r11,r4
+       eor     r0,r0,r11,ror#20
+       add     r10,r10,r2
+       ldr     r2,[sp,#8]
+       and     r12,r12,r3
+       add     r6,r6,r10
+       add     r10,r10,r0,ror#2
+       eor     r12,r12,r4
+       add     r9,r9,r2
+       eor     r2,r7,r8
+       eor     r0,r6,r6,ror#5
+       add     r10,r10,r12
+       and     r2,r2,r6
+       eor     r12,r0,r6,ror#19
+       eor     r0,r10,r10,ror#11
+       eor     r2,r2,r8
+       add     r9,r9,r12,ror#6
+       eor     r12,r10,r11
+       eor     r0,r0,r10,ror#20
+       add     r9,r9,r2
+       ldr     r2,[sp,#12]
+       and     r3,r3,r12
+       add     r5,r5,r9
+       add     r9,r9,r0,ror#2
+       eor     r3,r3,r11
+       add     r8,r8,r2
+       eor     r2,r6,r7
+       eor     r0,r5,r5,ror#5
+       add     r9,r9,r3
+       and     r2,r2,r5
+       eor     r3,r0,r5,ror#19
+       eor     r0,r9,r9,ror#11
+       eor     r2,r2,r7
+       add     r8,r8,r3,ror#6
+       eor     r3,r9,r10
+       eor     r0,r0,r9,ror#20
+       add     r8,r8,r2
+       ldr     r2,[sp,#16]
+       and     r12,r12,r3
+       add     r4,r4,r8
+       add     r8,r8,r0,ror#2
+       eor     r12,r12,r10
+       vst1.32 {q8},[r1,:128]!
+       add     r7,r7,r2
+       eor     r2,r5,r6
+       eor     r0,r4,r4,ror#5
+       add     r8,r8,r12
+       vld1.32 {q8},[r14,:128]!
+       and     r2,r2,r4
+       eor     r12,r0,r4,ror#19
+       eor     r0,r8,r8,ror#11
+       eor     r2,r2,r6
+       vrev32.8        q1,q1
+       add     r7,r7,r12,ror#6
+       eor     r12,r8,r9
+       eor     r0,r0,r8,ror#20
+       add     r7,r7,r2
+       vadd.i32        q8,q8,q1
+       ldr     r2,[sp,#20]
+       and     r3,r3,r12
+       add     r11,r11,r7
+       add     r7,r7,r0,ror#2
+       eor     r3,r3,r9
+       add     r6,r6,r2
+       eor     r2,r4,r5
+       eor     r0,r11,r11,ror#5
+       add     r7,r7,r3
+       and     r2,r2,r11
+       eor     r3,r0,r11,ror#19
+       eor     r0,r7,r7,ror#11
+       eor     r2,r2,r5
+       add     r6,r6,r3,ror#6
+       eor     r3,r7,r8
+       eor     r0,r0,r7,ror#20
+       add     r6,r6,r2
+       ldr     r2,[sp,#24]
+       and     r12,r12,r3
+       add     r10,r10,r6
+       add     r6,r6,r0,ror#2
+       eor     r12,r12,r8
+       add     r5,r5,r2
+       eor     r2,r11,r4
+       eor     r0,r10,r10,ror#5
+       add     r6,r6,r12
+       and     r2,r2,r10
+       eor     r12,r0,r10,ror#19
+       eor     r0,r6,r6,ror#11
+       eor     r2,r2,r4
+       add     r5,r5,r12,ror#6
+       eor     r12,r6,r7
+       eor     r0,r0,r6,ror#20
+       add     r5,r5,r2
+       ldr     r2,[sp,#28]
+       and     r3,r3,r12
+       add     r9,r9,r5
+       add     r5,r5,r0,ror#2
+       eor     r3,r3,r7
+       add     r4,r4,r2
+       eor     r2,r10,r11
+       eor     r0,r9,r9,ror#5
+       add     r5,r5,r3
+       and     r2,r2,r9
+       eor     r3,r0,r9,ror#19
+       eor     r0,r5,r5,ror#11
+       eor     r2,r2,r11
+       add     r4,r4,r3,ror#6
+       eor     r3,r5,r6
+       eor     r0,r0,r5,ror#20
+       add     r4,r4,r2
+       ldr     r2,[sp,#32]
+       and     r12,r12,r3
+       add     r8,r8,r4
+       add     r4,r4,r0,ror#2
+       eor     r12,r12,r6
+       vst1.32 {q8},[r1,:128]!
+       add     r11,r11,r2
+       eor     r2,r9,r10
+       eor     r0,r8,r8,ror#5
+       add     r4,r4,r12
+       vld1.32 {q8},[r14,:128]!
+       and     r2,r2,r8
+       eor     r12,r0,r8,ror#19
+       eor     r0,r4,r4,ror#11
+       eor     r2,r2,r10
+       vrev32.8        q2,q2
+       add     r11,r11,r12,ror#6
+       eor     r12,r4,r5
+       eor     r0,r0,r4,ror#20
+       add     r11,r11,r2
+       vadd.i32        q8,q8,q2
+       ldr     r2,[sp,#36]
+       and     r3,r3,r12
+       add     r7,r7,r11
+       add     r11,r11,r0,ror#2
+       eor     r3,r3,r5
+       add     r10,r10,r2
+       eor     r2,r8,r9
+       eor     r0,r7,r7,ror#5
+       add     r11,r11,r3
+       and     r2,r2,r7
+       eor     r3,r0,r7,ror#19
+       eor     r0,r11,r11,ror#11
+       eor     r2,r2,r9
+       add     r10,r10,r3,ror#6
+       eor     r3,r11,r4
+       eor     r0,r0,r11,ror#20
+       add     r10,r10,r2
+       ldr     r2,[sp,#40]
+       and     r12,r12,r3
+       add     r6,r6,r10
+       add     r10,r10,r0,ror#2
+       eor     r12,r12,r4
+       add     r9,r9,r2
+       eor     r2,r7,r8
+       eor     r0,r6,r6,ror#5
+       add     r10,r10,r12
+       and     r2,r2,r6
+       eor     r12,r0,r6,ror#19
+       eor     r0,r10,r10,ror#11
+       eor     r2,r2,r8
+       add     r9,r9,r12,ror#6
+       eor     r12,r10,r11
+       eor     r0,r0,r10,ror#20
+       add     r9,r9,r2
+       ldr     r2,[sp,#44]
+       and     r3,r3,r12
+       add     r5,r5,r9
+       add     r9,r9,r0,ror#2
+       eor     r3,r3,r11
+       add     r8,r8,r2
+       eor     r2,r6,r7
+       eor     r0,r5,r5,ror#5
+       add     r9,r9,r3
+       and     r2,r2,r5
+       eor     r3,r0,r5,ror#19
+       eor     r0,r9,r9,ror#11
+       eor     r2,r2,r7
+       add     r8,r8,r3,ror#6
+       eor     r3,r9,r10
+       eor     r0,r0,r9,ror#20
+       add     r8,r8,r2
+       ldr     r2,[sp,#48]
+       and     r12,r12,r3
+       add     r4,r4,r8
+       add     r8,r8,r0,ror#2
+       eor     r12,r12,r10
+       vst1.32 {q8},[r1,:128]!
+       add     r7,r7,r2
+       eor     r2,r5,r6
+       eor     r0,r4,r4,ror#5
+       add     r8,r8,r12
+       vld1.32 {q8},[r14,:128]!
+       and     r2,r2,r4
+       eor     r12,r0,r4,ror#19
+       eor     r0,r8,r8,ror#11
+       eor     r2,r2,r6
+       vrev32.8        q3,q3
+       add     r7,r7,r12,ror#6
+       eor     r12,r8,r9
+       eor     r0,r0,r8,ror#20
+       add     r7,r7,r2
+       vadd.i32        q8,q8,q3
+       ldr     r2,[sp,#52]
+       and     r3,r3,r12
+       add     r11,r11,r7
+       add     r7,r7,r0,ror#2
+       eor     r3,r3,r9
+       add     r6,r6,r2
+       eor     r2,r4,r5
+       eor     r0,r11,r11,ror#5
+       add     r7,r7,r3
+       and     r2,r2,r11
+       eor     r3,r0,r11,ror#19
+       eor     r0,r7,r7,ror#11
+       eor     r2,r2,r5
+       add     r6,r6,r3,ror#6
+       eor     r3,r7,r8
+       eor     r0,r0,r7,ror#20
+       add     r6,r6,r2
+       ldr     r2,[sp,#56]
+       and     r12,r12,r3
+       add     r10,r10,r6
+       add     r6,r6,r0,ror#2
+       eor     r12,r12,r8
+       add     r5,r5,r2
+       eor     r2,r11,r4
+       eor     r0,r10,r10,ror#5
+       add     r6,r6,r12
+       and     r2,r2,r10
+       eor     r12,r0,r10,ror#19
+       eor     r0,r6,r6,ror#11
+       eor     r2,r2,r4
+       add     r5,r5,r12,ror#6
+       eor     r12,r6,r7
+       eor     r0,r0,r6,ror#20
+       add     r5,r5,r2
+       ldr     r2,[sp,#60]
+       and     r3,r3,r12
+       add     r9,r9,r5
+       add     r5,r5,r0,ror#2
+       eor     r3,r3,r7
+       add     r4,r4,r2
+       eor     r2,r10,r11
+       eor     r0,r9,r9,ror#5
+       add     r5,r5,r3
+       and     r2,r2,r9
+       eor     r3,r0,r9,ror#19
+       eor     r0,r5,r5,ror#11
+       eor     r2,r2,r11
+       add     r4,r4,r3,ror#6
+       eor     r3,r5,r6
+       eor     r0,r0,r5,ror#20
+       add     r4,r4,r2
+       ldr     r2,[sp,#64]
+       and     r12,r12,r3
+       add     r8,r8,r4
+       add     r4,r4,r0,ror#2
+       eor     r12,r12,r6
+       vst1.32 {q8},[r1,:128]!
+       ldr     r0,[r2,#0]
+       add     r4,r4,r12                       @ h+=Maj(a,b,c) from the past
+       ldr     r12,[r2,#4]
+       ldr     r3,[r2,#8]
+       ldr     r1,[r2,#12]
+       add     r4,r4,r0                        @ accumulate
+       ldr     r0,[r2,#16]
+       add     r5,r5,r12
+       ldr     r12,[r2,#20]
+       add     r6,r6,r3
+       ldr     r3,[r2,#24]
+       add     r7,r7,r1
+       ldr     r1,[r2,#28]
+       add     r8,r8,r0
+       str     r4,[r2],#4
+       add     r9,r9,r12
+       str     r5,[r2],#4
+       add     r10,r10,r3
+       str     r6,[r2],#4
+       add     r11,r11,r1
+       str     r7,[r2],#4
+       stmia   r2,{r8-r11}
+
+       movne   r1,sp
+       ldrne   r2,[sp,#0]
+       eorne   r12,r12,r12
+       ldreq   sp,[sp,#76]                     @ restore original sp
+       eorne   r3,r5,r6
+       bne     .L_00_48
+
+       ldmia   sp!,{r4-r12,pc}
+ENDPROC(sha256_transform_neon)
diff --git a/marvell/uboot/lib/sha_neon/sha256_neon.c b/marvell/uboot/lib/sha_neon/sha256_neon.c
new file mode 100644
index 0000000..1214674
--- /dev/null
+++ b/marvell/uboot/lib/sha_neon/sha256_neon.c
@@ -0,0 +1,133 @@
+/*
+ * FIPS-180-2 compliant SHA-256 implementation
+ *
+ * Copyright (C) 2001-2003  Christophe Devine
+ *
+ * SPDX-License-Identifier:	GPL-2.0+
+ */
+
+#ifndef USE_HOSTCC
+#include <common.h>
+#endif /* USE_HOSTCC */
+#include <watchdog.h>
+#include <linux/string.h>
+#include <sha256.h>
+
+asmlinkage int	neon_en_check(void);
+asmlinkage void	neon_enable(void);
+asmlinkage void	neon_disable(void);
+asmlinkage void sha256_transform_neon(u32 *digest, const void *data,
+                      unsigned int num_blks);
+
+/*
+ * 32-bit integer manipulation macros (big endian)
+ */
+#ifndef PUT_UINT32_BE
+#define PUT_UINT32_BE(n,b,i) {				\
+    (b)[(i)    ] = (unsigned char) ( (n) >> 24 );	\
+    (b)[(i) + 1] = (unsigned char) ( (n) >> 16 );	\
+    (b)[(i) + 2] = (unsigned char) ( (n) >>  8 );	\
+    (b)[(i) + 3] = (unsigned char) ( (n)       );	\
+}
+#endif
+
+void sha256_starts_neon(sha256_context * ctx)
+{
+    ctx->total[0] = 0;
+    ctx->total[1] = 0;
+
+    ctx->state[0] = 0x6A09E667;
+    ctx->state[1] = 0xBB67AE85;
+    ctx->state[2] = 0x3C6EF372;
+    ctx->state[3] = 0xA54FF53A;
+    ctx->state[4] = 0x510E527F;
+    ctx->state[5] = 0x9B05688C;
+    ctx->state[6] = 0x1F83D9AB;
+    ctx->state[7] = 0x5BE0CD19;
+}
+
+static inline void sha256_process(sha256_context *ctx, 
+                    const uint8_t *data, uint32_t blks)
+{
+	int neon_en = neon_en_check();
+
+	if (!neon_en) 
+    	neon_enable();
+
+    sha256_transform_neon(ctx->state, data, blks);
+
+	if (!neon_en) 
+    	neon_disable();
+}
+
+void sha256_update_neon(sha256_context *ctx, const uint8_t *input, uint32_t length)
+{
+    uint32_t left, fill;
+    uint32_t blks;
+
+    if (!length)
+        return;
+
+    left = ctx->total[0] & 0x3F;
+    fill = 64 - left;
+
+    ctx->total[0] += length;
+    ctx->total[0] &= 0xFFFFFFFF;
+
+    if (ctx->total[0] < length)
+        ctx->total[1]++;
+
+    if (left && length >= fill) {
+        memcpy((void *) (ctx->buffer + left), (void *) input, fill);
+        sha256_process(ctx, ctx->buffer, 1);
+        length -= fill;
+        input += fill;
+        left = 0;
+    }
+
+    blks = length / 64;
+    while (length >= 64) {
+        sha256_process(ctx, input, blks);
+        length -= 64 * blks;
+        input += 64 * blks;
+    }
+
+    if (length)
+        memcpy((void *) (ctx->buffer + left), (void *) input, length);
+}
+
+static uint8_t sha256_padding[64] = {
+    0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+void sha256_finish_neon(sha256_context * ctx, uint8_t digest[32])
+{
+    uint32_t last, padn;
+    uint32_t high, low;
+    uint8_t msglen[8];
+
+    high = ((ctx->total[0] >> 29)
+        | (ctx->total[1] << 3));
+    low = (ctx->total[0] << 3);
+
+    PUT_UINT32_BE(high, msglen, 0);
+    PUT_UINT32_BE(low, msglen, 4);
+
+    last = ctx->total[0] & 0x3F;
+    padn = (last < 56) ? (56 - last) : (120 - last);
+
+    sha256_update(ctx, sha256_padding, padn);
+    sha256_update(ctx, msglen, 8);
+
+    PUT_UINT32_BE(ctx->state[0], digest, 0);
+    PUT_UINT32_BE(ctx->state[1], digest, 4);
+    PUT_UINT32_BE(ctx->state[2], digest, 8);
+    PUT_UINT32_BE(ctx->state[3], digest, 12);
+    PUT_UINT32_BE(ctx->state[4], digest, 16);
+    PUT_UINT32_BE(ctx->state[5], digest, 20);
+    PUT_UINT32_BE(ctx->state[6], digest, 24);
+    PUT_UINT32_BE(ctx->state[7], digest, 28);
+}