[Feature]add MT2731_MP2_MR2_SVN388 baseline version

Change-Id: Ief04314834b31e27effab435d3ca8ba33b499059
diff --git a/src/bsp/lk/lib/sha256/arch/arm/rules.mk b/src/bsp/lk/lib/sha256/arch/arm/rules.mk
new file mode 100644
index 0000000..cb96dd4
--- /dev/null
+++ b/src/bsp/lk/lib/sha256/arch/arm/rules.mk
@@ -0,0 +1,4 @@
+LOCAL_DIR := $(GET_LOCAL_DIR)
+
+MODULE_SRCS += \
+    $(LOCAL_DIR)/sha256-core.S
diff --git a/src/bsp/lk/lib/sha256/arch/arm/sha256-core.S b/src/bsp/lk/lib/sha256/arch/arm/sha256-core.S
new file mode 100644
index 0000000..3804be8
--- /dev/null
+++ b/src/bsp/lk/lib/sha256/arch/arm/sha256-core.S
@@ -0,0 +1,862 @@
+@ ====================================================================
+@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+@ project. The module is, however, dual licensed under OpenSSL and
+@ CRYPTOGAMS licenses depending on where you obtain it. For further
+@ details see http://www.openssl.org/~appro/cryptogams/.
+@
+@ Permission to use under GPL terms is granted.
+@ ====================================================================
+
+@ SHA256 block procedure for ARMv4. May 2007.
+
+	@ Performance is ~2x better than gcc 3.4 generated code and in "abso-
+	@ lute" terms is ~2250 cycles per 64-byte block or ~35 cycles per
+	@ byte [on single-issue Xscale PXA250 core].
+
+		@ July 2010.
+		@
+		@ Rescheduling for dual-issue pipeline resulted in 22% improvement on
+		@ Cortex A8 core and ~20 cycles per processed byte.
+
+		@ February 2011.
+		@
+		@ Profiler-assisted and platform-specific optimization resulted in 16%
+		@ improvement on Cortex A8 core and ~15.4 cycles per processed byte.
+
+		@ September 2013.
+		@
+		@ Add NEON implementation. On Cortex A8 it was measured to process one
+		@ byte in 12.5 cycles or 23% faster than integer-only code. Snapdragon
+		@ S4 does it in 12.5 cycles too, but it's 50% faster than integer-only
+		@ code (meaning that latter performs sub-optimally, nothing was done
+		@ about it).
+
+		@ May 2014.
+		@
+		@ Add ARMv8 code path performing at 2.0 cpb on Apple A7.
+
+# define __KERNEL__
+# define __ARM_ARCH__ __LINUX_ARM_ARCH__
+# define __ARM_MAX_ARCH__ 7
+
+		.text
+#if __ARM_ARCH__<7
+		.code	32
+#else
+		.syntax unified
+# ifdef __thumb2__
+#  define adrl adr
+		.thumb
+# else
+		.code   32
+# endif
+#endif
+
+		.type	K256,%object
+		.align	5
+		K256:
+		.word	0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+		.word	0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
+		.word	0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+		.word	0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
+		.word	0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+		.word	0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
+		.word	0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+		.word	0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+		.word	0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
+		.word	0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+		.word	0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
+		.word	0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+		.word	0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+		.word	0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
+		.word	0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+		.word	0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+		.size	K256,.-K256
+		.word	0				@ terminator
+
+#if __ARM_MAX_ARCH__>=7
+		.arch	armv7-a
+		.fpu	neon
+
+		.global	sha256_block_data_order
+		.type	sha256_block_data_order,%function
+		.align	4
+		sha256_block_data_order:
+		.LNEON:
+		stmdb	sp!,{r4-r12,lr}
+
+		sub	r11,sp,#16*4+16
+		adrl	r14,K256
+		bic	r11,r11,#15		@ align for 128-bit stores
+		mov	r12,sp
+		mov	sp,r11			@ alloca
+		add	r2,r1,r2,lsl#6	@ len to point at the end of inp
+
+		vld1.8		{q0},[r1]!
+		vld1.8		{q1},[r1]!
+		vld1.8		{q2},[r1]!
+		vld1.8		{q3},[r1]!
+		vld1.32		{q8},[r14,:128]!
+		vld1.32		{q9},[r14,:128]!
+		vld1.32		{q10},[r14,:128]!
+		vld1.32		{q11},[r14,:128]!
+		vrev32.8	q0,q0		@ yes, even on
+		str		r0,[sp,#64]
+		vrev32.8	q1,q1		@ big-endian
+		str		r1,[sp,#68]
+		mov		r1,sp
+		vrev32.8	q2,q2
+		str		r2,[sp,#72]
+		vrev32.8	q3,q3
+		str		r12,[sp,#76]		@ save original sp
+		vadd.i32	q8,q8,q0
+		vadd.i32	q9,q9,q1
+		vst1.32		{q8},[r1,:128]!
+		vadd.i32	q10,q10,q2
+		vst1.32		{q9},[r1,:128]!
+		vadd.i32	q11,q11,q3
+		vst1.32		{q10},[r1,:128]!
+		vst1.32		{q11},[r1,:128]!
+
+		ldmia		r0,{r4-r11}
+		sub		r1,r1,#64
+		ldr		r2,[sp,#0]
+		eor		r12,r12,r12
+		eor		r3,r5,r6
+		b		.L_00_48
+
+		.align	4
+		.L_00_48:
+		vext.8	q8,q0,q1,#4
+		add	r11,r11,r2
+		eor	r2,r9,r10
+		eor	r0,r8,r8,ror#5
+		vext.8	q9,q2,q3,#4
+		add	r4,r4,r12
+		and	r2,r2,r8
+		eor	r12,r0,r8,ror#19
+		vshr.u32	q10,q8,#7
+		eor	r0,r4,r4,ror#11
+		eor	r2,r2,r10
+		vadd.i32	q0,q0,q9
+		add	r11,r11,r12,ror#6
+		eor	r12,r4,r5
+		vshr.u32	q9,q8,#3
+		eor	r0,r0,r4,ror#20
+		add	r11,r11,r2
+		vsli.32	q10,q8,#25
+		ldr	r2,[sp,#4]
+		and	r3,r3,r12
+		vshr.u32	q11,q8,#18
+		add	r7,r7,r11
+		add	r11,r11,r0,ror#2
+		eor	r3,r3,r5
+		veor	q9,q9,q10
+		add	r10,r10,r2
+		vsli.32	q11,q8,#14
+		eor	r2,r8,r9
+		eor	r0,r7,r7,ror#5
+		vshr.u32	d24,d7,#17
+		add	r11,r11,r3
+		and	r2,r2,r7
+		veor	q9,q9,q11
+		eor	r3,r0,r7,ror#19
+		eor	r0,r11,r11,ror#11
+		vsli.32	d24,d7,#15
+		eor	r2,r2,r9
+		add	r10,r10,r3,ror#6
+		vshr.u32	d25,d7,#10
+		eor	r3,r11,r4
+		eor	r0,r0,r11,ror#20
+		vadd.i32	q0,q0,q9
+		add	r10,r10,r2
+		ldr	r2,[sp,#8]
+		veor	d25,d25,d24
+		and	r12,r12,r3
+		add	r6,r6,r10
+		vshr.u32	d24,d7,#19
+		add	r10,r10,r0,ror#2
+		eor	r12,r12,r4
+		vsli.32	d24,d7,#13
+		add	r9,r9,r2
+		eor	r2,r7,r8
+		veor	d25,d25,d24
+		eor	r0,r6,r6,ror#5
+		add	r10,r10,r12
+		vadd.i32	d0,d0,d25
+		and	r2,r2,r6
+		eor	r12,r0,r6,ror#19
+		vshr.u32	d24,d0,#17
+		eor	r0,r10,r10,ror#11
+		eor	r2,r2,r8
+		vsli.32	d24,d0,#15
+		add	r9,r9,r12,ror#6
+		eor	r12,r10,r11
+		vshr.u32	d25,d0,#10
+		eor	r0,r0,r10,ror#20
+		add	r9,r9,r2
+		veor	d25,d25,d24
+		ldr	r2,[sp,#12]
+		and	r3,r3,r12
+		vshr.u32	d24,d0,#19
+		add	r5,r5,r9
+		add	r9,r9,r0,ror#2
+		eor	r3,r3,r11
+		vld1.32	{q8},[r14,:128]!
+		add	r8,r8,r2
+		vsli.32	d24,d0,#13
+		eor	r2,r6,r7
+		eor	r0,r5,r5,ror#5
+		veor	d25,d25,d24
+		add	r9,r9,r3
+		and	r2,r2,r5
+		vadd.i32	d1,d1,d25
+		eor	r3,r0,r5,ror#19
+		eor	r0,r9,r9,ror#11
+		vadd.i32	q8,q8,q0
+		eor	r2,r2,r7
+		add	r8,r8,r3,ror#6
+		eor	r3,r9,r10
+		eor	r0,r0,r9,ror#20
+		add	r8,r8,r2
+		ldr	r2,[sp,#16]
+		and	r12,r12,r3
+		add	r4,r4,r8
+		vst1.32	{q8},[r1,:128]!
+		add	r8,r8,r0,ror#2
+		eor	r12,r12,r10
+		vext.8	q8,q1,q2,#4
+		add	r7,r7,r2
+		eor	r2,r5,r6
+		eor	r0,r4,r4,ror#5
+		vext.8	q9,q3,q0,#4
+		add	r8,r8,r12
+		and	r2,r2,r4
+		eor	r12,r0,r4,ror#19
+		vshr.u32	q10,q8,#7
+		eor	r0,r8,r8,ror#11
+		eor	r2,r2,r6
+		vadd.i32	q1,q1,q9
+		add	r7,r7,r12,ror#6
+		eor	r12,r8,r9
+		vshr.u32	q9,q8,#3
+		eor	r0,r0,r8,ror#20
+		add	r7,r7,r2
+		vsli.32	q10,q8,#25
+		ldr	r2,[sp,#20]
+		and	r3,r3,r12
+		vshr.u32	q11,q8,#18
+		add	r11,r11,r7
+		add	r7,r7,r0,ror#2
+		eor	r3,r3,r9
+		veor	q9,q9,q10
+		add	r6,r6,r2
+		vsli.32	q11,q8,#14
+		eor	r2,r4,r5
+		eor	r0,r11,r11,ror#5
+		vshr.u32	d24,d1,#17
+		add	r7,r7,r3
+		and	r2,r2,r11
+		veor	q9,q9,q11
+		eor	r3,r0,r11,ror#19
+		eor	r0,r7,r7,ror#11
+		vsli.32	d24,d1,#15
+		eor	r2,r2,r5
+		add	r6,r6,r3,ror#6
+		vshr.u32	d25,d1,#10
+		eor	r3,r7,r8
+		eor	r0,r0,r7,ror#20
+		vadd.i32	q1,q1,q9
+		add	r6,r6,r2
+		ldr	r2,[sp,#24]
+		veor	d25,d25,d24
+		and	r12,r12,r3
+		add	r10,r10,r6
+		vshr.u32	d24,d1,#19
+		add	r6,r6,r0,ror#2
+		eor	r12,r12,r8
+		vsli.32	d24,d1,#13
+		add	r5,r5,r2
+		eor	r2,r11,r4
+		veor	d25,d25,d24
+		eor	r0,r10,r10,ror#5
+		add	r6,r6,r12
+		vadd.i32	d2,d2,d25
+		and	r2,r2,r10
+		eor	r12,r0,r10,ror#19
+		vshr.u32	d24,d2,#17
+		eor	r0,r6,r6,ror#11
+		eor	r2,r2,r4
+		vsli.32	d24,d2,#15
+		add	r5,r5,r12,ror#6
+		eor	r12,r6,r7
+		vshr.u32	d25,d2,#10
+		eor	r0,r0,r6,ror#20
+		add	r5,r5,r2
+		veor	d25,d25,d24
+		ldr	r2,[sp,#28]
+		and	r3,r3,r12
+		vshr.u32	d24,d2,#19
+		add	r9,r9,r5
+		add	r5,r5,r0,ror#2
+		eor	r3,r3,r7
+		vld1.32	{q8},[r14,:128]!
+		add	r4,r4,r2
+		vsli.32	d24,d2,#13
+		eor	r2,r10,r11
+		eor	r0,r9,r9,ror#5
+		veor	d25,d25,d24
+		add	r5,r5,r3
+		and	r2,r2,r9
+		vadd.i32	d3,d3,d25
+		eor	r3,r0,r9,ror#19
+		eor	r0,r5,r5,ror#11
+		vadd.i32	q8,q8,q1
+		eor	r2,r2,r11
+		add	r4,r4,r3,ror#6
+		eor	r3,r5,r6
+		eor	r0,r0,r5,ror#20
+		add	r4,r4,r2
+		ldr	r2,[sp,#32]
+		and	r12,r12,r3
+		add	r8,r8,r4
+		vst1.32	{q8},[r1,:128]!
+		add	r4,r4,r0,ror#2
+		eor	r12,r12,r6
+		vext.8	q8,q2,q3,#4
+		add	r11,r11,r2
+		eor	r2,r9,r10
+		eor	r0,r8,r8,ror#5
+		vext.8	q9,q0,q1,#4
+		add	r4,r4,r12
+		and	r2,r2,r8
+		eor	r12,r0,r8,ror#19
+		vshr.u32	q10,q8,#7
+		eor	r0,r4,r4,ror#11
+		eor	r2,r2,r10
+		vadd.i32	q2,q2,q9
+		add	r11,r11,r12,ror#6
+		eor	r12,r4,r5
+		vshr.u32	q9,q8,#3
+		eor	r0,r0,r4,ror#20
+		add	r11,r11,r2
+		vsli.32	q10,q8,#25
+		ldr	r2,[sp,#36]
+		and	r3,r3,r12
+		vshr.u32	q11,q8,#18
+		add	r7,r7,r11
+		add	r11,r11,r0,ror#2
+		eor	r3,r3,r5
+		veor	q9,q9,q10
+		add	r10,r10,r2
+		vsli.32	q11,q8,#14
+		eor	r2,r8,r9
+		eor	r0,r7,r7,ror#5
+		vshr.u32	d24,d3,#17
+		add	r11,r11,r3
+		and	r2,r2,r7
+		veor	q9,q9,q11
+		eor	r3,r0,r7,ror#19
+		eor	r0,r11,r11,ror#11
+		vsli.32	d24,d3,#15
+		eor	r2,r2,r9
+		add	r10,r10,r3,ror#6
+		vshr.u32	d25,d3,#10
+		eor	r3,r11,r4
+		eor	r0,r0,r11,ror#20
+		vadd.i32	q2,q2,q9
+		add	r10,r10,r2
+		ldr	r2,[sp,#40]
+		veor	d25,d25,d24
+		and	r12,r12,r3
+		add	r6,r6,r10
+		vshr.u32	d24,d3,#19
+		add	r10,r10,r0,ror#2
+		eor	r12,r12,r4
+		vsli.32	d24,d3,#13
+		add	r9,r9,r2
+		eor	r2,r7,r8
+		veor	d25,d25,d24
+		eor	r0,r6,r6,ror#5
+		add	r10,r10,r12
+		vadd.i32	d4,d4,d25
+		and	r2,r2,r6
+		eor	r12,r0,r6,ror#19
+		vshr.u32	d24,d4,#17
+		eor	r0,r10,r10,ror#11
+		eor	r2,r2,r8
+		vsli.32	d24,d4,#15
+		add	r9,r9,r12,ror#6
+		eor	r12,r10,r11
+		vshr.u32	d25,d4,#10
+		eor	r0,r0,r10,ror#20
+		add	r9,r9,r2
+		veor	d25,d25,d24
+		ldr	r2,[sp,#44]
+		and	r3,r3,r12
+		vshr.u32	d24,d4,#19
+		add	r5,r5,r9
+		add	r9,r9,r0,ror#2
+		eor	r3,r3,r11
+		vld1.32	{q8},[r14,:128]!
+		add	r8,r8,r2
+		vsli.32	d24,d4,#13
+		eor	r2,r6,r7
+		eor	r0,r5,r5,ror#5
+		veor	d25,d25,d24
+		add	r9,r9,r3
+		and	r2,r2,r5
+		vadd.i32	d5,d5,d25
+		eor	r3,r0,r5,ror#19
+		eor	r0,r9,r9,ror#11
+		vadd.i32	q8,q8,q2
+		eor	r2,r2,r7
+		add	r8,r8,r3,ror#6
+		eor	r3,r9,r10
+		eor	r0,r0,r9,ror#20
+		add	r8,r8,r2
+		ldr	r2,[sp,#48]
+		and	r12,r12,r3
+		add	r4,r4,r8
+		vst1.32	{q8},[r1,:128]!
+		add	r8,r8,r0,ror#2
+		eor	r12,r12,r10
+		vext.8	q8,q3,q0,#4
+		add	r7,r7,r2
+		eor	r2,r5,r6
+		eor	r0,r4,r4,ror#5
+		vext.8	q9,q1,q2,#4
+		add	r8,r8,r12
+		and	r2,r2,r4
+		eor	r12,r0,r4,ror#19
+		vshr.u32	q10,q8,#7
+		eor	r0,r8,r8,ror#11
+		eor	r2,r2,r6
+		vadd.i32	q3,q3,q9
+		add	r7,r7,r12,ror#6
+		eor	r12,r8,r9
+		vshr.u32	q9,q8,#3
+		eor	r0,r0,r8,ror#20
+		add	r7,r7,r2
+		vsli.32	q10,q8,#25
+		ldr	r2,[sp,#52]
+		and	r3,r3,r12
+		vshr.u32	q11,q8,#18
+		add	r11,r11,r7
+		add	r7,r7,r0,ror#2
+		eor	r3,r3,r9
+		veor	q9,q9,q10
+		add	r6,r6,r2
+		vsli.32	q11,q8,#14
+		eor	r2,r4,r5
+		eor	r0,r11,r11,ror#5
+		vshr.u32	d24,d5,#17
+		add	r7,r7,r3
+		and	r2,r2,r11
+		veor	q9,q9,q11
+		eor	r3,r0,r11,ror#19
+		eor	r0,r7,r7,ror#11
+		vsli.32	d24,d5,#15
+		eor	r2,r2,r5
+		add	r6,r6,r3,ror#6
+		vshr.u32	d25,d5,#10
+		eor	r3,r7,r8
+		eor	r0,r0,r7,ror#20
+		vadd.i32	q3,q3,q9
+		add	r6,r6,r2
+		ldr	r2,[sp,#56]
+		veor	d25,d25,d24
+		and	r12,r12,r3
+		add	r10,r10,r6
+		vshr.u32	d24,d5,#19
+		add	r6,r6,r0,ror#2
+		eor	r12,r12,r8
+		vsli.32	d24,d5,#13
+		add	r5,r5,r2
+		eor	r2,r11,r4
+		veor	d25,d25,d24
+		eor	r0,r10,r10,ror#5
+		add	r6,r6,r12
+		vadd.i32	d6,d6,d25
+		and	r2,r2,r10
+		eor	r12,r0,r10,ror#19
+		vshr.u32	d24,d6,#17
+		eor	r0,r6,r6,ror#11
+		eor	r2,r2,r4
+		vsli.32	d24,d6,#15
+		add	r5,r5,r12,ror#6
+		eor	r12,r6,r7
+		vshr.u32	d25,d6,#10
+		eor	r0,r0,r6,ror#20
+		add	r5,r5,r2
+		veor	d25,d25,d24
+		ldr	r2,[sp,#60]
+		and	r3,r3,r12
+		vshr.u32	d24,d6,#19
+		add	r9,r9,r5
+		add	r5,r5,r0,ror#2
+		eor	r3,r3,r7
+		vld1.32	{q8},[r14,:128]!
+		add	r4,r4,r2
+		vsli.32	d24,d6,#13
+		eor	r2,r10,r11
+		eor	r0,r9,r9,ror#5
+		veor	d25,d25,d24
+		add	r5,r5,r3
+		and	r2,r2,r9
+		vadd.i32	d7,d7,d25
+		eor	r3,r0,r9,ror#19
+		eor	r0,r5,r5,ror#11
+		vadd.i32	q8,q8,q3
+		eor	r2,r2,r11
+		add	r4,r4,r3,ror#6
+		eor	r3,r5,r6
+		eor	r0,r0,r5,ror#20
+		add	r4,r4,r2
+		ldr	r2,[r14]
+		and	r12,r12,r3
+		add	r8,r8,r4
+		vst1.32	{q8},[r1,:128]!
+		add	r4,r4,r0,ror#2
+		eor	r12,r12,r6
+		teq	r2,#0				@ check for K256 terminator
+		ldr	r2,[sp,#0]
+		sub	r1,r1,#64
+		bne	.L_00_48
+
+		ldr		r1,[sp,#68]
+		ldr		r0,[sp,#72]
+		sub		r14,r14,#256	@ rewind r14
+		teq		r1,r0
+		it		eq
+		subeq		r1,r1,#64		@ avoid SEGV
+		vld1.8		{q0},[r1]!		@ load next input block
+		vld1.8		{q1},[r1]!
+		vld1.8		{q2},[r1]!
+		vld1.8		{q3},[r1]!
+		it		ne
+		strne		r1,[sp,#68]
+		mov		r1,sp
+		add	r11,r11,r2
+		eor	r2,r9,r10
+		eor	r0,r8,r8,ror#5
+		add	r4,r4,r12
+		vld1.32	{q8},[r14,:128]!
+		and	r2,r2,r8
+		eor	r12,r0,r8,ror#19
+		eor	r0,r4,r4,ror#11
+		eor	r2,r2,r10
+		vrev32.8	q0,q0
+		add	r11,r11,r12,ror#6
+		eor	r12,r4,r5
+		eor	r0,r0,r4,ror#20
+		add	r11,r11,r2
+		vadd.i32	q8,q8,q0
+		ldr	r2,[sp,#4]
+		and	r3,r3,r12
+		add	r7,r7,r11
+		add	r11,r11,r0,ror#2
+		eor	r3,r3,r5
+		add	r10,r10,r2
+		eor	r2,r8,r9
+		eor	r0,r7,r7,ror#5
+		add	r11,r11,r3
+		and	r2,r2,r7
+		eor	r3,r0,r7,ror#19
+		eor	r0,r11,r11,ror#11
+		eor	r2,r2,r9
+		add	r10,r10,r3,ror#6
+		eor	r3,r11,r4
+		eor	r0,r0,r11,ror#20
+		add	r10,r10,r2
+		ldr	r2,[sp,#8]
+		and	r12,r12,r3
+		add	r6,r6,r10
+		add	r10,r10,r0,ror#2
+		eor	r12,r12,r4
+		add	r9,r9,r2
+		eor	r2,r7,r8
+		eor	r0,r6,r6,ror#5
+		add	r10,r10,r12
+		and	r2,r2,r6
+		eor	r12,r0,r6,ror#19
+		eor	r0,r10,r10,ror#11
+		eor	r2,r2,r8
+		add	r9,r9,r12,ror#6
+		eor	r12,r10,r11
+		eor	r0,r0,r10,ror#20
+		add	r9,r9,r2
+		ldr	r2,[sp,#12]
+		and	r3,r3,r12
+		add	r5,r5,r9
+		add	r9,r9,r0,ror#2
+		eor	r3,r3,r11
+		add	r8,r8,r2
+		eor	r2,r6,r7
+		eor	r0,r5,r5,ror#5
+		add	r9,r9,r3
+		and	r2,r2,r5
+		eor	r3,r0,r5,ror#19
+		eor	r0,r9,r9,ror#11
+		eor	r2,r2,r7
+		add	r8,r8,r3,ror#6
+		eor	r3,r9,r10
+		eor	r0,r0,r9,ror#20
+		add	r8,r8,r2
+		ldr	r2,[sp,#16]
+		and	r12,r12,r3
+		add	r4,r4,r8
+		add	r8,r8,r0,ror#2
+		eor	r12,r12,r10
+		vst1.32	{q8},[r1,:128]!
+		add	r7,r7,r2
+		eor	r2,r5,r6
+		eor	r0,r4,r4,ror#5
+		add	r8,r8,r12
+		vld1.32	{q8},[r14,:128]!
+		and	r2,r2,r4
+		eor	r12,r0,r4,ror#19
+		eor	r0,r8,r8,ror#11
+		eor	r2,r2,r6
+		vrev32.8	q1,q1
+		add	r7,r7,r12,ror#6
+		eor	r12,r8,r9
+		eor	r0,r0,r8,ror#20
+		add	r7,r7,r2
+		vadd.i32	q8,q8,q1
+		ldr	r2,[sp,#20]
+		and	r3,r3,r12
+		add	r11,r11,r7
+		add	r7,r7,r0,ror#2
+		eor	r3,r3,r9
+		add	r6,r6,r2
+		eor	r2,r4,r5
+		eor	r0,r11,r11,ror#5
+		add	r7,r7,r3
+		and	r2,r2,r11
+		eor	r3,r0,r11,ror#19
+		eor	r0,r7,r7,ror#11
+		eor	r2,r2,r5
+		add	r6,r6,r3,ror#6
+		eor	r3,r7,r8
+		eor	r0,r0,r7,ror#20
+		add	r6,r6,r2
+		ldr	r2,[sp,#24]
+		and	r12,r12,r3
+		add	r10,r10,r6
+		add	r6,r6,r0,ror#2
+		eor	r12,r12,r8
+		add	r5,r5,r2
+		eor	r2,r11,r4
+		eor	r0,r10,r10,ror#5
+		add	r6,r6,r12
+		and	r2,r2,r10
+		eor	r12,r0,r10,ror#19
+		eor	r0,r6,r6,ror#11
+		eor	r2,r2,r4
+		add	r5,r5,r12,ror#6
+		eor	r12,r6,r7
+		eor	r0,r0,r6,ror#20
+		add	r5,r5,r2
+		ldr	r2,[sp,#28]
+		and	r3,r3,r12
+		add	r9,r9,r5
+		add	r5,r5,r0,ror#2
+		eor	r3,r3,r7
+		add	r4,r4,r2
+		eor	r2,r10,r11
+		eor	r0,r9,r9,ror#5
+		add	r5,r5,r3
+		and	r2,r2,r9
+		eor	r3,r0,r9,ror#19
+		eor	r0,r5,r5,ror#11
+		eor	r2,r2,r11
+		add	r4,r4,r3,ror#6
+		eor	r3,r5,r6
+		eor	r0,r0,r5,ror#20
+		add	r4,r4,r2
+		ldr	r2,[sp,#32]
+		and	r12,r12,r3
+		add	r8,r8,r4
+		add	r4,r4,r0,ror#2
+		eor	r12,r12,r6
+		vst1.32	{q8},[r1,:128]!
+		add	r11,r11,r2
+		eor	r2,r9,r10
+		eor	r0,r8,r8,ror#5
+		add	r4,r4,r12
+		vld1.32	{q8},[r14,:128]!
+		and	r2,r2,r8
+		eor	r12,r0,r8,ror#19
+		eor	r0,r4,r4,ror#11
+		eor	r2,r2,r10
+		vrev32.8	q2,q2
+		add	r11,r11,r12,ror#6
+		eor	r12,r4,r5
+		eor	r0,r0,r4,ror#20
+		add	r11,r11,r2
+		vadd.i32	q8,q8,q2
+		ldr	r2,[sp,#36]
+		and	r3,r3,r12
+		add	r7,r7,r11
+		add	r11,r11,r0,ror#2
+		eor	r3,r3,r5
+		add	r10,r10,r2
+		eor	r2,r8,r9
+		eor	r0,r7,r7,ror#5
+		add	r11,r11,r3
+		and	r2,r2,r7
+		eor	r3,r0,r7,ror#19
+		eor	r0,r11,r11,ror#11
+		eor	r2,r2,r9
+		add	r10,r10,r3,ror#6
+		eor	r3,r11,r4
+		eor	r0,r0,r11,ror#20
+		add	r10,r10,r2
+		ldr	r2,[sp,#40]
+		and	r12,r12,r3
+		add	r6,r6,r10
+		add	r10,r10,r0,ror#2
+		eor	r12,r12,r4
+		add	r9,r9,r2
+		eor	r2,r7,r8
+		eor	r0,r6,r6,ror#5
+		add	r10,r10,r12
+		and	r2,r2,r6
+		eor	r12,r0,r6,ror#19
+		eor	r0,r10,r10,ror#11
+		eor	r2,r2,r8
+		add	r9,r9,r12,ror#6
+		eor	r12,r10,r11
+		eor	r0,r0,r10,ror#20
+		add	r9,r9,r2
+		ldr	r2,[sp,#44]
+		and	r3,r3,r12
+		add	r5,r5,r9
+		add	r9,r9,r0,ror#2
+		eor	r3,r3,r11
+		add	r8,r8,r2
+		eor	r2,r6,r7
+		eor	r0,r5,r5,ror#5
+		add	r9,r9,r3
+		and	r2,r2,r5
+		eor	r3,r0,r5,ror#19
+		eor	r0,r9,r9,ror#11
+		eor	r2,r2,r7
+		add	r8,r8,r3,ror#6
+		eor	r3,r9,r10
+		eor	r0,r0,r9,ror#20
+		add	r8,r8,r2
+		ldr	r2,[sp,#48]
+		and	r12,r12,r3
+		add	r4,r4,r8
+		add	r8,r8,r0,ror#2
+		eor	r12,r12,r10
+		vst1.32	{q8},[r1,:128]!
+		add	r7,r7,r2
+		eor	r2,r5,r6
+		eor	r0,r4,r4,ror#5
+		add	r8,r8,r12
+		vld1.32	{q8},[r14,:128]!
+		and	r2,r2,r4
+		eor	r12,r0,r4,ror#19
+		eor	r0,r8,r8,ror#11
+		eor	r2,r2,r6
+		vrev32.8	q3,q3
+		add	r7,r7,r12,ror#6
+		eor	r12,r8,r9
+		eor	r0,r0,r8,ror#20
+		add	r7,r7,r2
+		vadd.i32	q8,q8,q3
+		ldr	r2,[sp,#52]
+		and	r3,r3,r12
+		add	r11,r11,r7
+		add	r7,r7,r0,ror#2
+		eor	r3,r3,r9
+		add	r6,r6,r2
+		eor	r2,r4,r5
+		eor	r0,r11,r11,ror#5
+		add	r7,r7,r3
+		and	r2,r2,r11
+		eor	r3,r0,r11,ror#19
+		eor	r0,r7,r7,ror#11
+		eor	r2,r2,r5
+		add	r6,r6,r3,ror#6
+		eor	r3,r7,r8
+		eor	r0,r0,r7,ror#20
+		add	r6,r6,r2
+		ldr	r2,[sp,#56]
+		and	r12,r12,r3
+		add	r10,r10,r6
+		add	r6,r6,r0,ror#2
+		eor	r12,r12,r8
+		add	r5,r5,r2
+		eor	r2,r11,r4
+		eor	r0,r10,r10,ror#5
+		add	r6,r6,r12
+		and	r2,r2,r10
+		eor	r12,r0,r10,ror#19
+		eor	r0,r6,r6,ror#11
+		eor	r2,r2,r4
+		add	r5,r5,r12,ror#6
+		eor	r12,r6,r7
+		eor	r0,r0,r6,ror#20
+		add	r5,r5,r2
+		ldr	r2,[sp,#60]
+		and	r3,r3,r12
+		add	r9,r9,r5
+		add	r5,r5,r0,ror#2
+		eor	r3,r3,r7
+		add	r4,r4,r2
+		eor	r2,r10,r11
+		eor	r0,r9,r9,ror#5
+		add	r5,r5,r3
+		and	r2,r2,r9
+		eor	r3,r0,r9,ror#19
+		eor	r0,r5,r5,ror#11
+		eor	r2,r2,r11
+		add	r4,r4,r3,ror#6
+		eor	r3,r5,r6
+		eor	r0,r0,r5,ror#20
+		add	r4,r4,r2
+		ldr	r2,[sp,#64]
+		and	r12,r12,r3
+		add	r8,r8,r4
+		add	r4,r4,r0,ror#2
+		eor	r12,r12,r6
+		vst1.32	{q8},[r1,:128]!
+		ldr	r0,[r2,#0]
+		add	r4,r4,r12			@ h+=Maj(a,b,c) from the past
+		ldr	r12,[r2,#4]
+		ldr	r3,[r2,#8]
+		ldr	r1,[r2,#12]
+		add	r4,r4,r0			@ accumulate
+		ldr	r0,[r2,#16]
+		add	r5,r5,r12
+		ldr	r12,[r2,#20]
+		add	r6,r6,r3
+		ldr	r3,[r2,#24]
+		add	r7,r7,r1
+		ldr	r1,[r2,#28]
+		add	r8,r8,r0
+		str	r4,[r2],#4
+		add	r9,r9,r12
+		str	r5,[r2],#4
+		add	r10,r10,r3
+		str	r6,[r2],#4
+		add	r11,r11,r1
+		str	r7,[r2],#4
+		stmia	r2,{r8-r11}
+
+		ittte	ne
+		movne	r1,sp
+		ldrne	r2,[sp,#0]
+		eorne	r12,r12,r12
+		ldreq	sp,[sp,#76]			@ restore original sp
+		itt	ne
+		eorne	r3,r5,r6
+		bne	.L_00_48
+
+		ldmia	sp!,{r4-r12,pc}
+		.size	sha256_block_data_order,.-sha256_block_data_order
+#endif
diff --git a/src/bsp/lk/lib/sha256/arch/arm64/rules.mk b/src/bsp/lk/lib/sha256/arch/arm64/rules.mk
new file mode 100644
index 0000000..f81b92a
--- /dev/null
+++ b/src/bsp/lk/lib/sha256/arch/arm64/rules.mk
@@ -0,0 +1,4 @@
+LOCAL_DIR := $(GET_LOCAL_DIR)
+
+MODULE_SRCS += \
+    $(LOCAL_DIR)/sha256-armv8.S
diff --git a/src/bsp/lk/lib/sha256/arch/arm64/sha256-armv8.S b/src/bsp/lk/lib/sha256/arch/arm64/sha256-armv8.S
new file mode 100644
index 0000000..28be18a
--- /dev/null
+++ b/src/bsp/lk/lib/sha256/arch/arm64/sha256-armv8.S
@@ -0,0 +1,198 @@
+/*

+ * ====================================================================

+ * Written by Andy Polyakov <appro@openssl.org> for the OpenSSL

+ * project. The module is, however, dual licensed under OpenSSL and

+ * CRYPTOGAMS licenses depending on where you obtain it. For further

+ * details see http://www.openssl.org/~appro/cryptogams/.

+ * ====================================================================

+ *

+ * SHA256/512 for ARMv8.

+ *

+ * Performance in cycles per processed byte and improvement coefficient

+ * over code generated with "default" compiler:

+ *

+ *		SHA256-hw	SHA256(*)	SHA512

+ * Apple A7	1.97		10.5 (+33%)	6.73 (-1%(**))

+ * Cortex-A53	2.38		15.6 (+110%)	10.1 (+190%(***))

+ * Cortex-A57	2.31		11.6 (+86%)	7.51 (+260%(***))

+ *

+ * (*)	Software SHA256 results are of lesser relevance, presented

+ *	mostly for informational purposes.

+ * (**)	The result is a trade-off: it's possible to improve it by

+ *	10% (or by 1 cycle per round), but at the cost of 20% loss

+ *	on Cortex-A53 (or by 4 cycles per round).

+ * (***)	Super-impressive coefficients over gcc-generated code are

+ *	indication of some compiler "pathology", most notably code

+ *	generated with -mgeneral-regs-only is significanty faster

+ *	and lags behind assembly only by 50-90%.

+ */

+

+.text

+.globl	sha256_block_data_order

+.type	sha256_block_data_order,%function

+.align	6

+.type	.LK256,%object

+.LK256:

+.long	0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5

+.long	0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5

+.long	0xd807aa98,0x12835b01,0x243185be,0x550c7dc3

+.long	0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174

+.long	0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc

+.long	0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da

+.long	0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7

+.long	0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967

+.long	0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13

+.long	0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85

+.long	0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3

+.long	0xd192e819,0xd6990624,0xf40e3585,0x106aa070

+.long	0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5

+.long	0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3

+.long	0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208

+.long	0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2

+.long	0	//terminator

+.size	.LK256,.-.LK256

+.align	3

+.LOPENSSL_armcap_P:

+.quad	OPENSSL_armcap_P-.

+.byte	83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0

+.align	2

+.align	2

+.type	sha256_block_data_order,%function

+.align	6

+sha256_block_data_order:

+.Lv8_entry:

+	stp	x29,x30,[sp,#-16]!

+	add	x29,sp,#0

+

+	ld1	{v0.4s,v1.4s},[x0]

+	adr	x3,.LK256

+

+.Loop_hw:

+	ld1	{v4.16b,v5.16b,v6.16b,v7.16b},[x1],#64

+	sub	x2,x2,#1

+	ld1	{v16.4s},[x3],#16

+	rev32	v4.16b,v4.16b

+	rev32	v5.16b,v5.16b

+	rev32	v6.16b,v6.16b

+	rev32	v7.16b,v7.16b

+	orr	v18.16b,v0.16b,v0.16b		// offload

+	orr	v19.16b,v1.16b,v1.16b

+	ld1	{v17.4s},[x3],#16

+	add	v16.4s,v16.4s,v4.4s

+.inst	0x5e2828a4	//sha256su0 v4.16b,v5.16b

+	orr	v2.16b,v0.16b,v0.16b

+.inst	0x5e104020	//sha256h v0.16b,v1.16b,v16.4s

+.inst	0x5e105041	//sha256h2 v1.16b,v2.16b,v16.4s

+.inst	0x5e0760c4	//sha256su1 v4.16b,v6.16b,v7.16b

+	ld1	{v16.4s},[x3],#16

+	add	v17.4s,v17.4s,v5.4s

+.inst	0x5e2828c5	//sha256su0 v5.16b,v6.16b

+	orr	v2.16b,v0.16b,v0.16b

+.inst	0x5e114020	//sha256h v0.16b,v1.16b,v17.4s

+.inst	0x5e115041	//sha256h2 v1.16b,v2.16b,v17.4s

+.inst	0x5e0460e5	//sha256su1 v5.16b,v7.16b,v4.16b

+	ld1	{v17.4s},[x3],#16

+	add	v16.4s,v16.4s,v6.4s

+.inst	0x5e2828e6	//sha256su0 v6.16b,v7.16b

+	orr	v2.16b,v0.16b,v0.16b

+.inst	0x5e104020	//sha256h v0.16b,v1.16b,v16.4s

+.inst	0x5e105041	//sha256h2 v1.16b,v2.16b,v16.4s

+.inst	0x5e056086	//sha256su1 v6.16b,v4.16b,v5.16b

+	ld1	{v16.4s},[x3],#16

+	add	v17.4s,v17.4s,v7.4s

+.inst	0x5e282887	//sha256su0 v7.16b,v4.16b

+	orr	v2.16b,v0.16b,v0.16b

+.inst	0x5e114020	//sha256h v0.16b,v1.16b,v17.4s

+.inst	0x5e115041	//sha256h2 v1.16b,v2.16b,v17.4s

+.inst	0x5e0660a7	//sha256su1 v7.16b,v5.16b,v6.16b

+	ld1	{v17.4s},[x3],#16

+	add	v16.4s,v16.4s,v4.4s

+.inst	0x5e2828a4	//sha256su0 v4.16b,v5.16b

+	orr	v2.16b,v0.16b,v0.16b

+.inst	0x5e104020	//sha256h v0.16b,v1.16b,v16.4s

+.inst	0x5e105041	//sha256h2 v1.16b,v2.16b,v16.4s

+.inst	0x5e0760c4	//sha256su1 v4.16b,v6.16b,v7.16b

+	ld1	{v16.4s},[x3],#16

+	add	v17.4s,v17.4s,v5.4s

+.inst	0x5e2828c5	//sha256su0 v5.16b,v6.16b

+	orr	v2.16b,v0.16b,v0.16b

+.inst	0x5e114020	//sha256h v0.16b,v1.16b,v17.4s

+.inst	0x5e115041	//sha256h2 v1.16b,v2.16b,v17.4s

+.inst	0x5e0460e5	//sha256su1 v5.16b,v7.16b,v4.16b

+	ld1	{v17.4s},[x3],#16

+	add	v16.4s,v16.4s,v6.4s

+.inst	0x5e2828e6	//sha256su0 v6.16b,v7.16b

+	orr	v2.16b,v0.16b,v0.16b

+.inst	0x5e104020	//sha256h v0.16b,v1.16b,v16.4s

+.inst	0x5e105041	//sha256h2 v1.16b,v2.16b,v16.4s

+.inst	0x5e056086	//sha256su1 v6.16b,v4.16b,v5.16b

+	ld1	{v16.4s},[x3],#16

+	add	v17.4s,v17.4s,v7.4s

+.inst	0x5e282887	//sha256su0 v7.16b,v4.16b

+	orr	v2.16b,v0.16b,v0.16b

+.inst	0x5e114020	//sha256h v0.16b,v1.16b,v17.4s

+.inst	0x5e115041	//sha256h2 v1.16b,v2.16b,v17.4s

+.inst	0x5e0660a7	//sha256su1 v7.16b,v5.16b,v6.16b

+	ld1	{v17.4s},[x3],#16

+	add	v16.4s,v16.4s,v4.4s

+.inst	0x5e2828a4	//sha256su0 v4.16b,v5.16b

+	orr	v2.16b,v0.16b,v0.16b

+.inst	0x5e104020	//sha256h v0.16b,v1.16b,v16.4s

+.inst	0x5e105041	//sha256h2 v1.16b,v2.16b,v16.4s

+.inst	0x5e0760c4	//sha256su1 v4.16b,v6.16b,v7.16b

+	ld1	{v16.4s},[x3],#16

+	add	v17.4s,v17.4s,v5.4s

+.inst	0x5e2828c5	//sha256su0 v5.16b,v6.16b

+	orr	v2.16b,v0.16b,v0.16b

+.inst	0x5e114020	//sha256h v0.16b,v1.16b,v17.4s

+.inst	0x5e115041	//sha256h2 v1.16b,v2.16b,v17.4s

+.inst	0x5e0460e5	//sha256su1 v5.16b,v7.16b,v4.16b

+	ld1	{v17.4s},[x3],#16

+	add	v16.4s,v16.4s,v6.4s

+.inst	0x5e2828e6	//sha256su0 v6.16b,v7.16b

+	orr	v2.16b,v0.16b,v0.16b

+.inst	0x5e104020	//sha256h v0.16b,v1.16b,v16.4s

+.inst	0x5e105041	//sha256h2 v1.16b,v2.16b,v16.4s

+.inst	0x5e056086	//sha256su1 v6.16b,v4.16b,v5.16b

+	ld1	{v16.4s},[x3],#16

+	add	v17.4s,v17.4s,v7.4s

+.inst	0x5e282887	//sha256su0 v7.16b,v4.16b

+	orr	v2.16b,v0.16b,v0.16b

+.inst	0x5e114020	//sha256h v0.16b,v1.16b,v17.4s

+.inst	0x5e115041	//sha256h2 v1.16b,v2.16b,v17.4s

+.inst	0x5e0660a7	//sha256su1 v7.16b,v5.16b,v6.16b

+	ld1	{v17.4s},[x3],#16

+	add	v16.4s,v16.4s,v4.4s

+	orr	v2.16b,v0.16b,v0.16b

+.inst	0x5e104020	//sha256h v0.16b,v1.16b,v16.4s

+.inst	0x5e105041	//sha256h2 v1.16b,v2.16b,v16.4s

+

+	ld1	{v16.4s},[x3],#16

+	add	v17.4s,v17.4s,v5.4s

+	orr	v2.16b,v0.16b,v0.16b

+.inst	0x5e114020	//sha256h v0.16b,v1.16b,v17.4s

+.inst	0x5e115041	//sha256h2 v1.16b,v2.16b,v17.4s

+

+	ld1	{v17.4s},[x3]

+	add	v16.4s,v16.4s,v6.4s

+	sub	x3,x3,#64*4-16	// rewind

+	orr	v2.16b,v0.16b,v0.16b

+.inst	0x5e104020	//sha256h v0.16b,v1.16b,v16.4s

+.inst	0x5e105041	//sha256h2 v1.16b,v2.16b,v16.4s

+

+	add	v17.4s,v17.4s,v7.4s

+	orr	v2.16b,v0.16b,v0.16b

+.inst	0x5e114020	//sha256h v0.16b,v1.16b,v17.4s

+.inst	0x5e115041	//sha256h2 v1.16b,v2.16b,v17.4s

+

+	add	v0.4s,v0.4s,v18.4s

+	add	v1.4s,v1.4s,v19.4s

+

+	cbnz	x2,.Loop_hw

+

+	st1	{v0.4s,v1.4s},[x0]

+

+	ldr	x29,[sp],#16

+	ret

+.size	sha256_block_data_order,.-sha256_block_data_order

+.comm	OPENSSL_armcap_P,4,4

diff --git a/src/bsp/lk/lib/sha256/include/sha256.h b/src/bsp/lk/lib/sha256/include/sha256.h
new file mode 100644
index 0000000..8e746d5
--- /dev/null
+++ b/src/bsp/lk/lib/sha256/include/sha256.h
@@ -0,0 +1,13 @@
+#include <sys/types.h>
+
+#define SHA256_BLOCK_SIZE       64
+
+struct sha256_context {
+    u32 state[8];
+    u64 count;
+    u8 buf[SHA256_BLOCK_SIZE];
+};
+
+int sha256_start(struct sha256_context *s_ctx);
+int sha256_process(struct sha256_context *s_ctx, const u8 *input, unsigned int len);
+int sha256_end(struct sha256_context *s_ctx, u8 *out);
diff --git a/src/bsp/lk/lib/sha256/rules.mk b/src/bsp/lk/lib/sha256/rules.mk
new file mode 100644
index 0000000..153bde6
--- /dev/null
+++ b/src/bsp/lk/lib/sha256/rules.mk
@@ -0,0 +1,9 @@
+LOCAL_DIR := $(GET_LOCAL_DIR)
+
+MODULE := $(LOCAL_DIR)
+
+MODULE_SRCS += \
+    $(LOCAL_DIR)/sha256.c
+
+include $(LOCAL_DIR)/arch/$(ARCH)/rules.mk
+include make/module.mk
diff --git a/src/bsp/lk/lib/sha256/sha256.c b/src/bsp/lk/lib/sha256/sha256.c
new file mode 100644
index 0000000..0120bcb
--- /dev/null
+++ b/src/bsp/lk/lib/sha256/sha256.c
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ *
+ * Use of this source code is governed by a MIT-style
+ * license that can be found in the LICENSE file or at
+ * https://opensource.org/licenses/MIT
+ */
+
+#include <string.h>
+#include <sys/types.h>
+#include "sha256.h"
+
+#define htobe64 __builtin_bswap64
+
+typedef unsigned int __be32;
+typedef unsigned long long __be64;
+
+extern void sha256_block_data_order(u32 *digest, const void *data,
+                                  unsigned int num_blks);
+
+int sha256_start(struct sha256_context *s_ctx)
+{
+    struct sha256_context *sctx = s_ctx;
+    if(NULL ==s_ctx) return -1;
+    sctx->state[0] = 0x6a09e667UL;
+    sctx->state[1] = 0xbb67ae85UL;
+    sctx->state[2] = 0x3c6ef372UL;
+    sctx->state[3] = 0xa54ff53aUL;
+    sctx->state[4] = 0x510e527fUL;
+    sctx->state[5] = 0x9b05688cUL;
+    sctx->state[6] = 0x1f83d9abUL;
+    sctx->state[7] = 0x5be0cd19UL;
+    sctx->count = 0;
+
+    return 0;
+}
+
+static inline int sha256_padding(struct sha256_context *s_ctx)
+{
+    struct sha256_context *sctx = (s_ctx);
+    const unsigned int bit_offset = SHA256_BLOCK_SIZE - sizeof(__be64);
+    __be64 *bits = (__be64 *)(sctx->buf + bit_offset);
+    unsigned int non_block_align = sctx->count % SHA256_BLOCK_SIZE;
+
+    sctx->buf[non_block_align++] = 0x80;
+    if (non_block_align > bit_offset) {
+        memset(sctx->buf + non_block_align, 0x0, SHA256_BLOCK_SIZE - non_block_align);
+        sha256_block_data_order((u32*)sctx, sctx->buf, 1);
+        non_block_align = 0;
+    }
+
+    memset(sctx->buf + non_block_align, 0x0, bit_offset - non_block_align);
+    *bits = __builtin_bswap64(sctx->count << 3);
+    sha256_block_data_order((u32*)sctx, sctx->buf, 1);
+
+    return 0;
+}
+
+static inline void u32_split_u8(u32 val, u8 *p)
+{
+    *p++ = val >> 24;
+    *p++ = val >> 16;
+    *p++ = val >> 8;
+    *p++ = val;
+}
+
+int sha256_process(struct sha256_context *s_ctx, const u8 *input,
+                         unsigned int len)
+{
+    struct sha256_context *sctx = s_ctx;
+    int block_num;
+    unsigned int non_block_align;
+    int fill;
+
+    if (s_ctx == NULL)
+        return -1;
+    non_block_align = sctx->count % SHA256_BLOCK_SIZE;
+    fill = SHA256_BLOCK_SIZE - non_block_align;
+    sctx->count += len;
+
+    if ((non_block_align + len) >= SHA256_BLOCK_SIZE) {
+        if (non_block_align) {
+            memcpy(sctx->buf + non_block_align, input, fill);
+            sha256_block_data_order((u32*)sctx, sctx->buf, 1);
+            input += fill;
+            len -= fill;
+        }
+
+        block_num = len / SHA256_BLOCK_SIZE;
+        len %= SHA256_BLOCK_SIZE;
+
+        if (block_num) {
+            sha256_block_data_order((u32*)sctx, input, block_num);
+            input += block_num * SHA256_BLOCK_SIZE;
+        }
+        non_block_align = 0;
+    }
+    if (len)
+        memcpy(sctx->buf + non_block_align, input, len);
+
+    return 0;
+}
+
+int sha256_end(struct sha256_context *s_ctx, u8 *out)
+{
+    unsigned int digest_size = 32;
+    struct sha256_context *sctx = s_ctx;
+    __be32 *digest = (__be32 *)out;
+    int i;
+
+    sha256_padding(sctx);
+
+    for (i = 0; digest_size > 0; i++, digest_size -= sizeof(__be32))
+        u32_split_u8(sctx->state[i],(u8*)digest++);
+
+    *sctx = (struct sha256_context) {};
+    return 0;
+
+}
+
+int sha256_hash(const void *input, int len, u8 *output)
+{
+    struct sha256_context s_ctx;
+    memset((void*)&s_ctx, 0, sizeof(s_ctx));
+    sha256_start(&s_ctx);
+    sha256_process(&s_ctx,input,len);
+    sha256_end(&s_ctx,output);
+    return 0;
+}