ASR_BASE

Change-Id: Icf3719cc0afe3eeb3edc7fa80a2eb5199ca9dda1
diff --git a/marvell/linux/drivers/crypto/asr/bcm_optee/Makefile b/marvell/linux/drivers/crypto/asr/bcm_optee/Makefile
new file mode 100644
index 0000000..8a2975a
--- /dev/null
+++ b/marvell/linux/drivers/crypto/asr/bcm_optee/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_ASR_BCM) += asr-bcm-optee.o
+obj-$(CONFIG_ASR_BCM_SHA) += asr-sha-optee.o
+obj-$(CONFIG_ASR_BCM_CIPHER) += asr-cipher-optee.o
\ No newline at end of file
diff --git a/marvell/linux/drivers/crypto/asr/bcm_optee/asr-bcm-optee.c b/marvell/linux/drivers/crypto/asr/bcm_optee/asr-bcm-optee.c
new file mode 100644
index 0000000..2ad66f1
--- /dev/null
+++ b/marvell/linux/drivers/crypto/asr/bcm_optee/asr-bcm-optee.c
@@ -0,0 +1,207 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/hw_random.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/of_device.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/algapi.h>
+#include <linux/tee_drv.h>
+
+#include "asr-bcm-optee.h"
+
+static void asrbcm_uuid_to_octets(uint8_t d[TEE_IOCTL_UUID_LEN], struct teec_uuid *s)
+{
+	d[0] = s->timeLow >> 24;
+	d[1] = s->timeLow >> 16;
+	d[2] = s->timeLow >> 8;
+	d[3] = s->timeLow;
+	d[4] = s->timeMid >> 8;
+	d[5] = s->timeMid;
+	d[6] = s->timeHiAndVersion >> 8;
+	d[7] = s->timeHiAndVersion;
+	memcpy(d + 8, s->clockSeqAndNode, sizeof(s->clockSeqAndNode));
+}
+
+static int asrbcm_tee_match_cb(struct tee_ioctl_version_data *ver, const void *data)
+{
+	return 1;
+}
+
+int asrbcm_optee_open_ta(struct asrbcm_tee_context *ctx, struct teec_uuid *uuid)
+{
+	struct tee_ioctl_open_session_arg open_session_arg;
+	int ret;
+
+	if (ctx == NULL)
+		return -EINVAL;
+
+	ctx->session = 0;
+	ctx->tee_ctx = tee_client_open_context(NULL, asrbcm_tee_match_cb, NULL, NULL);
+	if (IS_ERR(ctx->tee_ctx)) {
+		ret = PTR_ERR(ctx->tee_ctx);
+		ctx->tee_ctx = NULL;
+		return ret;
+	}
+
+	memset(&open_session_arg, 0x0, sizeof(struct tee_ioctl_open_session_arg));
+	asrbcm_uuid_to_octets(open_session_arg.uuid, uuid);
+	open_session_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC;
+	open_session_arg.num_params = 0;
+	ret = tee_client_open_session(ctx->tee_ctx, &open_session_arg, NULL);
+	if (ret != 0) {
+		goto err_exit;
+	} else if (open_session_arg.ret != 0) {
+		ret = -EIO;
+		goto err_exit;
+	}
+
+	ctx->session = open_session_arg.session;
+
+	return ret;
+err_exit:
+	tee_client_close_context(ctx->tee_ctx);
+	ctx->tee_ctx = NULL;
+	return ret;
+}
+
+int asrbcm_optee_close_ta(struct asrbcm_tee_context *ctx)
+{
+	int ret;
+
+	if (ctx == NULL)
+		return -EINVAL;
+
+	ret = tee_client_close_session(ctx->tee_ctx, ctx->session);
+
+	tee_client_close_context(ctx->tee_ctx);
+
+	return ret;
+}
+
+#if defined(CONFIG_OF)
+static const struct of_device_id asr_bcm_dt_ids[] = {
+    { .compatible = "asr,asr-bcm" },
+    { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, asr_bcm_dt_ids);
+#endif
+
+static int asr_bcm_probe(struct platform_device *pdev)
+{
+    struct asr_bcm_dev *bcm_dd;
+    struct device *dev = &pdev->dev;
+    struct device_node *np = NULL;
+    int err = 0, devnum = 0;
+
+    bcm_dd = devm_kzalloc(&pdev->dev, sizeof(*bcm_dd), GFP_KERNEL);
+    if (bcm_dd == NULL) {
+        err = -ENOMEM;
+        goto no_mem_err;
+    }
+
+    np = dev->of_node;
+    bcm_dd->dev = dev;
+
+    platform_set_drvdata(pdev, bcm_dd);
+
+#ifdef CONFIG_ASR_BCM_CIPHER
+    if (of_get_property(np, "asr,asr-cipher", NULL)) {
+        err = asr_bcm_cipher_register(bcm_dd);
+        if (err)
+            goto res_err;
+        dev_info(dev, "CIPHER engine is initialized\n");
+        devnum ++;
+    }
+#endif
+
+#ifdef CONFIG_ASR_BCM_SHA
+    if (of_get_property(np, "asr,asr-sha", NULL)) {
+        err = asr_bcm_sha_register(bcm_dd);
+        if (err)
+            goto sha_err;
+        dev_info(dev, "SHA engine is initialized\n");
+        devnum ++;
+    }
+#endif
+
+    if (!devnum) {
+        dev_err(dev, "No BCM device enabled\n");
+        err = -ENODEV;
+        goto res_err;
+    }
+
+    return 0;
+
+#ifdef CONFIG_ASR_BCM_SHA
+sha_err:
+#ifdef CONFIG_ASR_BCM_CIPHER
+	asr_bcm_cipher_unregister(bcm_dd);
+#endif
+#endif
+
+res_err:
+    devm_kfree(dev, bcm_dd);
+no_mem_err:
+    dev_err(dev, "initialization failed.\n");
+
+    return err;
+}
+
+static int asr_bcm_remove(struct platform_device *pdev)
+{
+    struct asr_bcm_dev *bcm_dd;
+
+    bcm_dd = platform_get_drvdata(pdev);
+    if (!bcm_dd)
+        return -ENODEV;
+
+#ifdef CONFIG_ASR_BCM_CIPHER
+	asr_bcm_cipher_unregister(bcm_dd);
+#endif
+
+#ifdef CONFIG_ASR_BCM_SHA
+    asr_bcm_sha_unregister(bcm_dd);
+#endif
+
+    devm_kfree(bcm_dd->dev, bcm_dd);
+
+    return 0;
+}
+
+static struct platform_driver asr_bcm_driver = {
+    .probe		= asr_bcm_probe,
+    .remove		= asr_bcm_remove,
+    .driver		= {
+        .name	= "asr_bcm",
+        .of_match_table = of_match_ptr(asr_bcm_dt_ids),
+    },
+};
+
+static int __init asr_bcm_init(void)
+{
+    int ret;
+
+    ret = platform_driver_register(&asr_bcm_driver);
+
+    return ret;
+}
+
+device_initcall_sync(asr_bcm_init);
+
+MODULE_DESCRIPTION("BCM: ASR Trust Engine support.");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Yonggan Wang");
\ No newline at end of file
diff --git a/marvell/linux/drivers/crypto/asr/bcm_optee/asr-bcm-optee.h b/marvell/linux/drivers/crypto/asr/bcm_optee/asr-bcm-optee.h
new file mode 100644
index 0000000..be73036
--- /dev/null
+++ b/marvell/linux/drivers/crypto/asr/bcm_optee/asr-bcm-optee.h
@@ -0,0 +1,44 @@
+#ifndef _ASR_BCM_OPTEE_H_
+#define _ASR_BCM_OPTEE_H_
+
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/miscdevice.h>
+
+#include "../bcm/asr-sha.h"
+#include "../bcm/asr-cipher.h"
+
+struct teec_uuid {
+	uint32_t timeLow;
+	uint16_t timeMid;
+	uint16_t timeHiAndVersion;
+	uint8_t clockSeqAndNode[8];
+};
+
+struct asrbcm_tee_context {
+	struct tee_context *tee_ctx;
+	int session;
+};
+
+struct asr_bcm_dev {
+    struct device		*dev;
+    struct asr_bcm_sha asr_sha;
+    struct asr_bcm_cipher asr_cipher;
+};
+
+struct asr_bcm_ops {
+    int (*dev_get)(struct asr_bcm_dev *);
+    int (*dev_put)(struct asr_bcm_dev *);
+};
+
+int asrbcm_optee_open_ta(struct asrbcm_tee_context *ctx, struct teec_uuid *uuid);
+int asrbcm_optee_close_ta(struct asrbcm_tee_context *ctx);
+
+int asr_bcm_sha_register(struct asr_bcm_dev *bcm_dd);
+int asr_bcm_sha_unregister(struct asr_bcm_dev *bcm_dd);
+
+int asr_bcm_cipher_register(struct asr_bcm_dev *bcm_dd);
+int asr_bcm_cipher_unregister(struct asr_bcm_dev *bcm_dd);
+#endif
\ No newline at end of file
diff --git a/marvell/linux/drivers/crypto/asr/bcm_optee/asr-cipher-optee.c b/marvell/linux/drivers/crypto/asr/bcm_optee/asr-cipher-optee.c
new file mode 100644
index 0000000..8d5912a
--- /dev/null
+++ b/marvell/linux/drivers/crypto/asr/bcm_optee/asr-cipher-optee.c
@@ -0,0 +1,651 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023 ASR Micro Limited
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#ifdef CONFIG_TEE
+#include <linux/tee_drv.h>
+#endif
+#include <linux/crypto.h>
+#include <linux/cputype.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/internal/skcipher.h>
+
+#include "asr-bcm-optee.h"
+#include "asr-cipher-optee.h"
+
+struct asr_bcm_cipher *asr_cipher_local;
+
+static struct teec_uuid pta_cipher_uuid = ASR_AES_ACCESS_UUID;
+
+static int asr_optee_cipher_get_rkek_state(u32 *state)
+{
+	struct tee_ioctl_invoke_arg invoke_arg;
+	struct tee_param params[1];
+	struct asrbcm_tee_context asrbcm_tee_ctx;
+	int ret = 0;
+
+	ret = asrbcm_optee_open_ta(&asrbcm_tee_ctx, &pta_cipher_uuid);
+	if (ret != 0) {
+		return ret;
+	}
+
+	memset(&invoke_arg, 0x0, sizeof(struct tee_ioctl_invoke_arg));
+	invoke_arg.func = CMD_AES_HWKEY_STATUS;
+	invoke_arg.session  = asrbcm_tee_ctx.session;
+	invoke_arg.num_params = 1;
+
+	params[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT;
+	params[0].u.value.a = 0;
+	params[0].u.value.b = 0;
+	params[0].u.value.c = 0;
+
+	ret = tee_client_invoke_func(asrbcm_tee_ctx.tee_ctx, &invoke_arg, params);
+	if (ret != 0) {
+		goto exit;
+	} else if (invoke_arg.ret != 0) {
+		ret = -EIO;
+		goto exit;
+	}
+
+	*state = params[0].u.value.a;
+
+exit:
+	asrbcm_optee_close_ta(&asrbcm_tee_ctx);
+	return ret;
+
+}
+
+static int asr_optee_cipher_process(uint32_t cipher_mode, uint32_t op_mode,
+		struct scatterlist *src, struct scatterlist *dst,
+		size_t len, uint32_t key_size, u8 *key,
+		u8 *iv, uint32_t ivsize)
+{
+	struct tee_ioctl_invoke_arg invoke_arg;
+	struct tee_param params[4];
+	struct asrbcm_tee_context asrbcm_tee_ctx;
+	struct tee_shm *shm;
+	int ret = 0;
+	char *ma = NULL;
+    uint32_t srclen = len, dstlen = len, paralen_a = key_size, paralen_b = ivsize;
+    uint8_t *parabuf_a = key, *parabuf_b = iv;
+
+	ret = asrbcm_optee_open_ta(&asrbcm_tee_ctx, &pta_cipher_uuid);
+	if (ret != 0) {
+		return ret;
+	}
+
+	memset(&invoke_arg, 0x0, sizeof(struct tee_ioctl_invoke_arg));
+	invoke_arg.func = cipher_mode;
+	invoke_arg.session  = asrbcm_tee_ctx.session;
+
+	shm = tee_shm_alloc(asrbcm_tee_ctx.tee_ctx, srclen + dstlen + paralen_a + paralen_b, 
+						TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
+	if (!shm) {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	params[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+	params[0].u.memref.shm_offs = 0;
+	params[0].u.memref.size = srclen;
+	params[0].u.memref.shm = shm;
+
+	params[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
+	params[1].u.memref.shm_offs = srclen;
+	params[1].u.memref.size = dstlen;
+	params[1].u.memref.shm = shm;
+
+	params[2].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+	params[2].u.value.a = op_mode;
+
+	ma = tee_shm_get_va(shm, 0);
+	sg_copy_to_buffer(src, sg_nents(src), ma, srclen);
+	memcpy(ma + srclen + dstlen, parabuf_a, paralen_a);
+
+	/* cbc with iv */
+	if (parabuf_b && paralen_b) {
+		memcpy(ma + srclen + dstlen + paralen_a, parabuf_b, paralen_b);
+		params[2].u.value.b = paralen_a;
+		params[3].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+		params[3].u.memref.shm_offs = srclen + dstlen;
+		params[3].u.memref.size = paralen_a + paralen_b;
+		params[3].u.memref.shm = shm;
+		invoke_arg.num_params = 4;
+	} else {
+		/* ecb with non iv */
+		params[3].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+		params[3].u.memref.shm_offs = srclen + dstlen;
+		params[3].u.memref.size = paralen_a;
+		params[3].u.memref.shm = shm;
+		invoke_arg.num_params = 4;
+	}
+
+	ret = tee_client_invoke_func(asrbcm_tee_ctx.tee_ctx, &invoke_arg, params);
+	if (ret != 0) {
+		goto free_shm;
+	} else if (invoke_arg.ret != 0) {
+		ret = -EIO;
+		goto free_shm;
+	}
+	sg_copy_from_buffer(dst, sg_nents(dst), ma + srclen, dstlen);
+
+free_shm:
+	tee_shm_free(shm);
+exit:
+	asrbcm_optee_close_ta(&asrbcm_tee_ctx);
+	return ret;
+}
+
+static int asr_optee_cipher_hwkey_process(uint32_t cipher_mode, uint32_t op_mode,
+		struct scatterlist *src, struct scatterlist *dst,
+		size_t len, uint32_t key_size,
+		u8 *iv, uint32_t ivsize)
+{
+	struct tee_ioctl_invoke_arg invoke_arg;
+	struct tee_param params[4];
+	struct asrbcm_tee_context asrbcm_tee_ctx;
+	struct tee_shm *shm;
+	int ret = 0;
+	char *ma = NULL;
+    uint32_t srclen = len, dstlen = len, paralen = ivsize;
+    uint8_t *parabuf = iv;
+
+
+	ret = asrbcm_optee_open_ta(&asrbcm_tee_ctx, &pta_cipher_uuid);
+	if (ret != 0) {
+		return ret;
+	}
+
+	memset(&invoke_arg, 0x0, sizeof(struct tee_ioctl_invoke_arg));
+	invoke_arg.func = cipher_mode;
+	invoke_arg.session  = asrbcm_tee_ctx.session;
+
+	shm = tee_shm_alloc(asrbcm_tee_ctx.tee_ctx, srclen + dstlen + paralen, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
+	if (!shm) {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	params[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+	params[0].u.memref.shm_offs = 0;
+	params[0].u.memref.size = srclen;
+	params[0].u.memref.shm = shm;
+
+	params[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
+	params[1].u.memref.shm_offs = srclen;
+	params[1].u.memref.size = dstlen;
+	params[1].u.memref.shm = shm;
+
+	params[2].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+	params[2].u.value.a = key_size;
+	params[2].u.value.b = op_mode;
+	params[2].u.value.c = 0;
+
+	ma = tee_shm_get_va(shm, 0);
+	sg_copy_to_buffer(src, sg_nents(src), ma, srclen);
+	if (parabuf && paralen) {
+		params[3].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+		params[3].u.memref.shm_offs = srclen + dstlen;
+		params[3].u.memref.size = paralen;
+		params[3].u.memref.shm = shm;
+		memcpy(ma + srclen + dstlen, parabuf, paralen);
+		invoke_arg.num_params = 4;
+	} else {
+		invoke_arg.num_params = 3;
+	}
+
+	ret = tee_client_invoke_func(asrbcm_tee_ctx.tee_ctx, &invoke_arg, params);
+	if (ret != 0) {
+		goto free_shm;
+	} else if (invoke_arg.ret != 0) {
+		ret = -EIO;
+		goto free_shm;
+	}
+	sg_copy_from_buffer(dst, sg_nents(dst), ma + srclen, dstlen);
+
+free_shm:
+	tee_shm_free(shm);
+exit:
+	asrbcm_optee_close_ta(&asrbcm_tee_ctx);
+	return ret;
+}
+
+static inline void asr_cipher_set_mode(struct asr_bcm_cipher *dd,
+				      const struct asr_cipher_reqctx *rctx)
+{
+	/* Clear all but persistent flags and set request flags. */
+	dd->flags = (dd->flags & CIPHER_FLAGS_PERSISTENT) | rctx->mode;
+}
+
+static void asr_cipher_set_iv_as_last_ciphertext_block(struct asr_bcm_cipher *dd)
+{
+	struct skcipher_request *req = skcipher_request_cast(dd->areq);
+	struct asr_cipher_reqctx *rctx = skcipher_request_ctx(req);
+	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+	unsigned int ivsize = crypto_skcipher_ivsize(cipher);
+
+	if (req->cryptlen < ivsize)
+		return;
+
+	if (rctx->mode & FLAGS_ENCRYPT) {
+		scatterwalk_map_and_copy(req->iv, req->dst,
+					 req->cryptlen - ivsize, ivsize, 0);
+	} else {
+		if (req->src == req->dst)
+			memcpy(req->iv, rctx->lastc, ivsize);
+		else
+			scatterwalk_map_and_copy(req->iv, req->src,
+						 req->cryptlen - ivsize,
+						 ivsize, 0);
+	}
+}
+
+static int asr_cipher_handle_queue(struct asr_bcm_cipher *dd,
+				  struct crypto_async_request *new_areq)
+{
+	struct crypto_async_request *areq, *backlog;
+	struct asr_cipher_ctx *ctx;
+	unsigned long flags;
+	bool start_async;
+	int err, ret = 0;
+
+	spin_lock_irqsave(&dd->lock, flags);
+	if (new_areq)
+		ret = crypto_enqueue_request(&dd->queue, new_areq);
+	if (dd->flags & FLAGS_BUSY) {
+		spin_unlock_irqrestore(&dd->lock, flags);
+		return ret;
+	}
+
+	backlog = crypto_get_backlog(&dd->queue);
+	areq = crypto_dequeue_request(&dd->queue);
+	if (areq) {
+		dd->flags |= FLAGS_BUSY;
+	}
+	spin_unlock_irqrestore(&dd->lock, flags);
+
+	if (!areq)
+		return ret;
+
+	if (backlog)
+		backlog->complete(backlog, -EINPROGRESS);
+
+	ctx = crypto_tfm_ctx(areq->tfm);
+
+	dd->areq = areq;
+	dd->ctx = ctx;
+	start_async = (areq != new_areq);
+	dd->is_async = start_async;
+
+	/* WARNING: ctx->start() MAY change dd->is_async. */
+	err = ctx->start(dd);
+	return (start_async) ? ret : err;
+}
+
+static inline int asr_cipher_complete(struct asr_bcm_cipher *dd, int err)
+{
+
+	dd->flags &= ~FLAGS_BUSY;
+
+	asr_cipher_set_iv_as_last_ciphertext_block(dd);
+
+	if (dd->is_async)
+		dd->areq->complete(dd->areq, err);
+
+	tasklet_schedule(&dd->queue_task);
+
+	return err;
+}
+
+static int asr_cipher_start(struct asr_bcm_cipher *dd)
+{
+	struct skcipher_request *req = skcipher_request_cast(dd->areq);
+	struct asr_cipher_reqctx *rctx = skcipher_request_ctx(req);
+	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+	u8 *iv;
+	u32 flags, cipher_mode, op_mode, keylen, ivsize;
+	int err;
+
+	asr_cipher_set_mode(dd, rctx);
+
+    flags = dd->flags;
+
+	if ((flags & FLAGS_OPMODE_MASK) == FLAGS_CBC){
+		if (rctx->use_rkek) {
+			cipher_mode = CMD_AES_HWKEY_CBC;
+		} else{
+			cipher_mode = CMD_AES_CBC;
+		}
+		ivsize = crypto_skcipher_ivsize(cipher);
+		iv = req->iv;
+	}
+	else {
+		iv = NULL;
+		ivsize = 0;
+		if (rctx->use_rkek) {
+			cipher_mode = CMD_AES_HWKEY_ECB;
+		} else {
+			cipher_mode = CMD_AES_ECB;
+		}
+	}
+
+	if (flags & FLAGS_ENCRYPT)
+		op_mode = 1;
+	else
+		op_mode = 0;
+
+	keylen = dd->ctx->keylen;
+
+	if (rctx->use_rkek) {
+		err = asr_optee_cipher_hwkey_process(cipher_mode, op_mode, req->src,
+				req->dst, req->cryptlen, keylen, iv, ivsize);
+	} else {
+		err = asr_optee_cipher_process(cipher_mode, op_mode, req->src,
+				req->dst, req->cryptlen, keylen, (u8 *)dd->ctx->key, iv, ivsize);		
+	}
+
+	return asr_cipher_complete(dd, err);
+}
+
+static int asr_cipher(struct skcipher_request *req, unsigned long mode)
+{
+	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+	struct asr_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+	struct asr_cipher_reqctx *rctx;
+	struct asr_bcm_cipher *dd = asr_cipher_local;
+
+	ctx->block_size = AES_BLOCK_SIZE;
+	ctx->dd = dd;
+
+	rctx = skcipher_request_ctx(req);
+	rctx->mode = mode;
+	rctx->use_rkek = ctx->use_rkek;
+
+	if (!(mode) && (req->src == req->dst)) {
+		unsigned int ivsize = crypto_skcipher_ivsize(cipher);
+		if (req->cryptlen >= ivsize) {
+			scatterwalk_map_and_copy(rctx->lastc, req->src,
+						 req->cryptlen - ivsize,
+						 ivsize, 0);
+		}
+	}
+
+	return asr_cipher_handle_queue(dd, &req->base);
+}
+
+static int asr_cipher_setkey(struct crypto_skcipher *cipher, const u8 *key,
+			   unsigned int keylen)
+{
+	struct asr_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+	struct asr_bcm_cipher *dd = asr_cipher_local;
+	
+	ctx->dd = dd;
+	ctx->use_rkek = false;
+
+	if (keylen != AES_KEYSIZE_128 &&
+		keylen != AES_KEYSIZE_192 &&
+		keylen != AES_KEYSIZE_256) {
+		crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	memcpy(ctx->key, key, keylen);
+	ctx->keylen = keylen;
+
+	return 0;
+}
+
+static int asr_cipher_set_hwkey(struct crypto_skcipher *cipher, const u8 *key,
+			   unsigned int keylen)
+{
+	struct asr_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+	struct asr_bcm_cipher *dd = asr_cipher_local;
+
+	(void)key; /* ignore the sw key */
+
+	if (!dd->rkek_burned)
+		return -EPERM;
+
+	if (keylen != AES_KEYSIZE_128 &&
+	    keylen != AES_KEYSIZE_192 &&
+	    keylen != AES_KEYSIZE_256) {
+		crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	ctx->keylen = keylen;
+
+	return 0;
+}
+
+static int asr_aes_ecb_encrypt(struct skcipher_request *req)
+{
+	return asr_cipher(req, FLAGS_ECB | FLAGS_ENCRYPT);
+}
+
+static int asr_aes_ecb_decrypt(struct skcipher_request *req)
+{
+	return asr_cipher(req, FLAGS_ECB);
+}
+
+static int asr_aes_cbc_encrypt(struct skcipher_request *req)
+{
+	return asr_cipher(req, FLAGS_CBC | FLAGS_ENCRYPT);
+}
+
+static int asr_aes_cbc_decrypt(struct skcipher_request *req)
+{
+	return asr_cipher(req, FLAGS_CBC);
+}
+
+static int asr_cipher_init(struct crypto_skcipher *tfm)
+{
+	struct asr_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+	tfm->reqsize = sizeof(struct asr_cipher_reqctx);
+	ctx->start = asr_cipher_start;
+
+	return 0;
+}
+
+static int asr_cipher_hwkey_init(struct crypto_skcipher *tfm)
+{
+	struct asr_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct asr_bcm_cipher *dd = asr_cipher_local;
+
+	if (!dd->rkek_burned)
+		return -EPERM;
+
+	tfm->reqsize = sizeof(struct asr_cipher_reqctx);
+	ctx->start = asr_cipher_start;
+
+	return 0;
+}
+
+static void asr_cipher_exit(struct crypto_skcipher *tfm)
+{
+	struct asr_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+	memset(ctx, 0, sizeof(*ctx));
+}
+
+static void asr_cipher_queue_task(unsigned long data)
+{
+	struct asr_bcm_cipher *dd = (struct asr_bcm_cipher *)data;
+
+	asr_cipher_handle_queue(dd, NULL);
+}
+
+static struct skcipher_alg cipher_algs[] = {
+	/* AES - ECB, using input key*/
+	{
+		.base = {
+			.cra_name = "ecb(aes)",
+			.cra_driver_name = "asr-ecb-aes",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct asr_cipher_ctx),
+			.cra_alignmask = 0xf,
+			.cra_module = THIS_MODULE,
+		},
+		.min_keysize = AES_MIN_KEY_SIZE,
+		.max_keysize = AES_MAX_KEY_SIZE,
+		.setkey = asr_cipher_setkey,
+		.encrypt = asr_aes_ecb_encrypt,
+		.decrypt = asr_aes_ecb_decrypt,
+		.init = asr_cipher_init,
+		.exit = asr_cipher_exit,
+	},
+	/* AES - CBC, using input key,*/
+	{
+		.base = {
+			.cra_name = "cbc(aes)",
+			.cra_driver_name = "asr-cbc-aes",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct asr_cipher_ctx),
+			.cra_alignmask = 0xf,
+			.cra_module = THIS_MODULE,
+		},
+		.min_keysize = AES_MIN_KEY_SIZE,
+		.max_keysize = AES_MAX_KEY_SIZE,
+		.setkey = asr_cipher_setkey,
+		.encrypt = asr_aes_cbc_encrypt,
+		.decrypt = asr_aes_cbc_decrypt,
+		.init = asr_cipher_init,
+		.exit = asr_cipher_exit,
+		.ivsize = AES_BLOCK_SIZE,
+	},
+	/* AES - ECB, using hardware key, a.k.a. RKEK */
+	{
+		.base = {
+			.cra_name = "ecb(aes-hwkey)",
+			.cra_driver_name = "asr-ecb-aes-hwkey",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct asr_cipher_ctx),
+			.cra_alignmask = 0xf,
+			.cra_module = THIS_MODULE,
+		},
+		.min_keysize = AES_MIN_KEY_SIZE,
+		.max_keysize = AES_MAX_KEY_SIZE,
+		.setkey = asr_cipher_set_hwkey,
+		.encrypt = asr_aes_ecb_encrypt,
+		.decrypt = asr_aes_ecb_decrypt,
+		.init = asr_cipher_hwkey_init,
+		.exit = asr_cipher_exit,
+	},
+	/* AES - CBC, using hardware key, a.k.a. RKEK */
+	{
+		.base = {
+			.cra_name = "cbc(aes-hwkey)",
+			.cra_driver_name = "asr-cbc-aes-hwkey",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct asr_cipher_ctx),
+			.cra_alignmask = 0xf,
+			.cra_module = THIS_MODULE,
+		},
+		.min_keysize = AES_MIN_KEY_SIZE,
+		.max_keysize = AES_MAX_KEY_SIZE,
+		.setkey = asr_cipher_set_hwkey,
+		.encrypt = asr_aes_cbc_encrypt,
+		.decrypt = asr_aes_cbc_decrypt,
+		.init = asr_cipher_hwkey_init,
+		.exit = asr_cipher_exit,
+		.ivsize = AES_BLOCK_SIZE,
+	},
+};
+
+int asr_bcm_cipher_register(struct asr_bcm_dev *bcm_dd)
+{
+	int i, j, err;
+	struct asr_bcm_cipher *cipher_dd;
+	struct device *dev = bcm_dd->dev;
+	u32 rkek_state;
+
+    cipher_dd = &bcm_dd->asr_cipher;
+	cipher_dd->dev = bcm_dd->dev;
+
+	asr_cipher_local = cipher_dd;
+
+	err = asr_optee_cipher_get_rkek_state(&rkek_state);
+	if (err) {
+		dev_warn(dev, "can't get hwkey(rkek) state\n");
+		cipher_dd->rkek_burned = 0;
+	} else {
+		if (rkek_state)
+			cipher_dd->rkek_burned = 1;
+		else
+			cipher_dd->rkek_burned = 0;
+		switch (rkek_state) {
+		case 2:
+			dev_warn(dev, "hwkey(rkek) burned, SW access not disabled\n");
+			break;
+		case 1:
+			dev_warn(dev, "hwkey(rkek) burned, SW access disabled\n");
+			break;
+		case 0:
+			dev_warn(dev, "hwkey(rkek) not burned\n");
+			break;
+		}
+	}
+
+	spin_lock_init(&cipher_dd->lock);
+	tasklet_init(&cipher_dd->queue_task, asr_cipher_queue_task,
+					(unsigned long)cipher_dd);
+
+	crypto_init_queue(&cipher_dd->queue, ASR_CIPHER_QUEUE_LENGTH);
+
+	for (i = 0; i < ARRAY_SIZE(cipher_algs); i++) {
+		err = crypto_register_skcipher(&cipher_algs[i]);
+		if (err){
+			for (j = 0; j < i; j++)
+				crypto_unregister_skcipher(&cipher_algs[j]);
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+int asr_bcm_cipher_unregister(struct asr_bcm_dev *bcm_dd)
+{
+	int i;
+	struct asr_bcm_cipher *cipher_dd = &bcm_dd->asr_cipher;
+	struct device *dev = bcm_dd->dev;
+
+	for (i = 0; i < ARRAY_SIZE(cipher_algs); i++)
+		crypto_unregister_skcipher(&cipher_algs[i]);
+
+	tasklet_kill(&cipher_dd->queue_task);
+
+	devm_kfree(dev, cipher_dd);
+
+	return 0;
+}
+
+MODULE_DESCRIPTION("ASR HWKey CIPHER driver with optee-os.");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Yonggan Wang");
\ No newline at end of file
diff --git a/marvell/linux/drivers/crypto/asr/bcm_optee/asr-cipher-optee.h b/marvell/linux/drivers/crypto/asr/bcm_optee/asr-cipher-optee.h
new file mode 100644
index 0000000..74b790e
--- /dev/null
+++ b/marvell/linux/drivers/crypto/asr/bcm_optee/asr-cipher-optee.h
@@ -0,0 +1,72 @@
+#ifndef ASR_CIPHER_OPTEE_H
+#define ASR_CIPHER_OPTEE_H
+
+#define ASR_AES_ACCESS_UUID \
+		{ \
+			0xba1b496f, 0xf07d, 0x466e, \
+			{ 0x99, 0x09, 0xeb, 0xe3, 0x55, 0x43, 0xa0, 0x1c } \
+		}
+
+/*
+ * AES ECB encrypt/decrypt data with HWKEY(RKEK)
+ *
+ * [in]     params[0].memref.buffer     plain/cipher text to encrypt/decrypt
+ * [in]     params[0].memref.size       length of plain/cipher text
+ * [out]    pParams[1].memref.buffer    cipher/plain text after encrypt/decrypt
+ * [in]     pParams[2].value.a          keysize
+ * [in]     pParams[2].value.b          op_mode: 1--encrypt, 0--decrypt
+ */
+#define CMD_AES_HWKEY_ECB   0x1
+
+/*
+ * AES CBC encrypt/decrypt data with HWKEY(RKEK)
+ *
+ * [in]     params[0].memref.buffer     plain/cipher text to encrypt/decrypt
+ * [in]     params[0].memref.size       length of plain/cipher text
+ * [out]    pParams[1].memref.buffer    cipher/plain text after encrypt/decrypt
+ * [in]     pParams[2].value.a          keysize
+ * [in]     pParams[2].value.b          op_mode: 1--encrypt, 0--decrypt
+ * [in]     pParams[3].memref.buffer    initial vector
+ */
+#define CMD_AES_HWKEY_CBC   0x2
+
+
+/*
+ * Check AES RKEK status 
+ *     0: RKEK(hwkey) is not burned
+ *     1: RKEK(hwkey) is burned and software access is disabled
+ *     2: RKEK(hwkey) is burned but software access is not disabled)
+ *
+ * [out]     pParams[0].value.a          status
+ */
+#define CMD_AES_HWKEY_STATUS	0x3
+
+/*
+ * AES ECB encrypt/decrypt data with input key
+ *
+ * [in]     params[0].memref.buffer     plain/cipher text to encrypt/decrypt
+ * [in]     params[0].memref.size       length of plain/cipher text
+ * [out]    pParams[1].memref.buffer    cipher/plain text after encrypt/decrypt
+ * [in]     pParams[2].value.a          op_mode: 1--encrypt, 0--decrypt
+ * [in]     pParams[3].memref.buffer   	input key
+ * [in]     pParams[3].memref.size      keysize
+ */
+#define CMD_AES_ECB   0x4
+
+/*
+ * AES CBC encrypt/decrypt data with input key
+ *
+ * [in]     params[0].memref.buffer     plain/cipher text to encrypt/decrypt
+ * [in]     params[0].memref.size       length of plain/cipher text
+ * [out]    pParams[1].memref.buffer    cipher/plain text after encrypt/decrypt
+ * [in]     pParams[2].value.a          op_mode: 1--encrypt, 0--decrypt
+ * [in]     pParams[2].value.b          keysize
+ * [in]     pParams[3].memref.buffer   	input key + initial vector
+ * [in]     pParams[3].memref.size      keysize + ivsize
+ */
+#define CMD_AES_CBC   0x5
+
+
+#include "../bcm/asr-cipher.h"
+
+#endif
\ No newline at end of file
diff --git a/marvell/linux/drivers/crypto/asr/bcm_optee/asr-sha-optee.c b/marvell/linux/drivers/crypto/asr/bcm_optee/asr-sha-optee.c
new file mode 100644
index 0000000..fcb9bb7
--- /dev/null
+++ b/marvell/linux/drivers/crypto/asr/bcm_optee/asr-sha-optee.c
@@ -0,0 +1,1119 @@
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/hw_random.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <crypto/scatterwalk.h>
+#include <linux/of_device.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <crypto/hmac.h>
+#include <crypto/md5.h>
+#include <crypto/sha.h>
+
+#include "asr-sha-optee.h"
+
+static struct asr_bcm_sha *asr_sha_local = NULL;
+
+static struct teec_uuid pta_sha_uuid = ASR_SHA_ACCESS_UUID;
+
+static int asrbcm_optee_acquire_hash_init(struct asr_optee_sha_reqctx *ctx, struct teec_uuid *uuid, u32 cmd, u32 alg)
+{
+	struct tee_ioctl_invoke_arg invoke_arg;
+	struct tee_param params[2];
+	int ret = 0;
+
+	ret = asrbcm_optee_open_ta(&ctx->asrbcm_tee_ctx, uuid);
+	if (ret != 0) {
+		return ret;
+	}
+
+	memset(&invoke_arg, 0x0, sizeof(struct tee_ioctl_invoke_arg));
+	invoke_arg.func = cmd;
+	invoke_arg.session  = ctx->asrbcm_tee_ctx.session;
+	invoke_arg.num_params = 2;
+
+	params[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+	params[0].u.value.a = alg;
+
+	params[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+	params[1].u.value.a = (uint32_t)ctx;
+
+	ret = tee_client_invoke_func(ctx->asrbcm_tee_ctx.tee_ctx, &invoke_arg, params);
+	if (ret != 0) {
+		goto exit;
+	} else if (invoke_arg.ret != 0) {
+		ret = -EIO;
+		goto exit;
+	}
+
+	return ret;
+
+exit:
+	asrbcm_optee_close_ta(&ctx->asrbcm_tee_ctx);
+	return ret;
+}
+
+static int asrbcm_optee_acquire_hash_update(struct asr_optee_sha_reqctx *ctx, struct teec_uuid *uuid, u32 cmd, \
+									u32 alg, uint8_t *in, u32 inlen)
+{
+	struct tee_ioctl_invoke_arg invoke_arg;
+	struct tee_param params[2];
+	int ret = 0;
+	struct tee_shm *shm = NULL;
+	u8 *pbuf = NULL;
+	
+	memset(&invoke_arg, 0x0, sizeof(struct tee_ioctl_invoke_arg));
+	invoke_arg.func = cmd;
+	invoke_arg.session  = ctx->asrbcm_tee_ctx.session;
+	invoke_arg.num_params = 2;
+
+	shm = tee_shm_alloc(ctx->asrbcm_tee_ctx.tee_ctx, inlen, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
+	if (!shm) {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	pbuf = tee_shm_get_va(shm, 0);
+	memcpy(pbuf, in, inlen);
+
+	params[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+	params[0].u.memref.shm_offs = 0;
+	params[0].u.memref.size = inlen;
+	params[0].u.memref.shm = shm;
+
+	params[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+	params[1].u.value.a = (uint32_t)ctx;
+
+	ret = tee_client_invoke_func(ctx->asrbcm_tee_ctx.tee_ctx, &invoke_arg, params);
+	if (ret != 0) {
+		goto exit;
+	} else if (invoke_arg.ret != 0) {
+		ret = -EIO;
+		goto exit;
+	}
+
+	tee_shm_free(shm);
+	return ret;
+
+exit:
+	tee_shm_free(shm);
+	asrbcm_optee_close_ta(&ctx->asrbcm_tee_ctx);
+	return ret;
+}
+
+static int asrbcm_optee_acquire_hash_final(struct asr_optee_sha_reqctx *ctx, struct teec_uuid *uuid, u32 cmd, u32 alg, u8 *out, u8 outlen)
+{
+	struct tee_ioctl_invoke_arg invoke_arg;
+	struct tee_param params[2];
+	int ret = 0;
+	struct tee_shm *shm = NULL;
+	u8 *pbuf = NULL;
+
+	memset(&invoke_arg, 0x0, sizeof(struct tee_ioctl_invoke_arg));
+	invoke_arg.func = cmd;
+	invoke_arg.session  = ctx->asrbcm_tee_ctx.session;
+	invoke_arg.num_params = 2;
+
+	shm = tee_shm_alloc(ctx->asrbcm_tee_ctx.tee_ctx, outlen, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
+	if (!shm) {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	params[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
+	params[0].u.memref.shm_offs = 0;
+	params[0].u.memref.size = outlen;
+	params[0].u.memref.shm = shm;
+
+	params[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+	params[1].u.value.a = (uint32_t)ctx;
+
+	ret = tee_client_invoke_func(ctx->asrbcm_tee_ctx.tee_ctx, &invoke_arg, params);
+	if (ret != 0) {
+		goto exit;
+	} else if (invoke_arg.ret != 0) {
+		ret = -EIO;
+		goto exit;
+	}
+	
+	pbuf = tee_shm_get_va(shm, 0);
+	memcpy(out, pbuf, outlen);
+
+exit:
+    tee_shm_free(shm);
+	asrbcm_optee_close_ta(&ctx->asrbcm_tee_ctx);
+	return ret;
+}
+
+static int asr_sha_handle_queue(struct asr_bcm_sha *dd,
+				  struct ahash_request *req)
+{
+	struct crypto_async_request *async_req, *backlog;
+	struct asr_sha_ctx *ctx;
+	unsigned long flags;
+	bool start_async;
+	int err = 0, ret = 0;
+
+	spin_lock_irqsave(&dd->lock, flags);
+	if (req)
+		ret = ahash_enqueue_request(&dd->queue, req);
+
+	if (SHA_FLAGS_BUSY & dd->flags) {
+		spin_unlock_irqrestore(&dd->lock, flags);
+		return ret;
+	}
+
+	backlog = crypto_get_backlog(&dd->queue);
+	async_req = crypto_dequeue_request(&dd->queue);
+	if (async_req)
+		dd->flags |= SHA_FLAGS_BUSY;
+
+	spin_unlock_irqrestore(&dd->lock, flags);
+
+	if (!async_req) {
+		return ret;
+	}
+
+	if (backlog)
+		backlog->complete(backlog, -EINPROGRESS);
+
+	ctx = crypto_tfm_ctx(async_req->tfm);
+
+	dd->req = ahash_request_cast(async_req);
+	start_async = (dd->req != req);
+	dd->is_async = start_async;
+	dd->force_complete = false;
+
+	/* WARNING: ctx->start() MAY change dd->is_async. */
+	err = ctx->start(dd);
+	return (start_async) ? ret : err;
+}
+
+static int asr_sha_enqueue(struct ahash_request *req, unsigned int op)
+{
+	struct asr_optee_sha_reqctx *optee_ctx = ahash_request_ctx(req);
+	struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
+
+	struct asr_bcm_sha *dd = ctx->dd;
+
+	ctx->op = op;
+
+	return asr_sha_handle_queue(dd, req);
+}
+
+static void asr_sha_copy_ready_hash(struct ahash_request *req)
+{
+	struct asr_optee_sha_reqctx *optee_ctx = ahash_request_ctx(req);
+	struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
+
+	if (!req->result)
+		return;
+
+	switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
+	case SHA_FLAGS_MD5:
+		memcpy(req->result, ctx->digest, MD5_DIGEST_SIZE);
+		break;
+	case SHA_FLAGS_SHA1:
+		memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE);
+		break;
+	case SHA_FLAGS_SHA224:
+		memcpy(req->result, ctx->digest, SHA224_DIGEST_SIZE);
+		break;
+	case SHA_FLAGS_SHA256:
+		memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE);
+		break;
+	case SHA_FLAGS_SHA384:
+		memcpy(req->result, ctx->digest, SHA384_DIGEST_SIZE);
+		break;
+	case SHA_FLAGS_SHA512:
+		memcpy(req->result, ctx->digest, SHA512_DIGEST_SIZE);
+		break;
+	default:
+		return;
+	}
+}
+
+static inline int asr_sha_complete(struct asr_bcm_sha *dd, int err)
+{
+	struct ahash_request *req = dd->req;
+	struct asr_optee_sha_reqctx *optee_ctx = ahash_request_ctx(req);
+	struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
+
+	dd->flags &= ~(SHA_FLAGS_BUSY);
+	ctx->flags &= ~(SHA_FLAGS_FINAL);
+
+	if ((dd->is_async || dd->force_complete) && req->base.complete)
+		req->base.complete(&req->base, err);
+
+	/* handle new request */
+	tasklet_schedule(&dd->queue_task);
+
+	return err;
+}
+
+static size_t asr_sha_append_sg(struct asr_sha_reqctx *ctx)
+{
+	size_t count;
+
+	while ((ctx->bufcnt < ctx->buflen) && ctx->total) {
+		count = min(ctx->sg->length - ctx->offset, ctx->total);
+		count = min(count, ctx->buflen - ctx->bufcnt);
+
+		if (count <= 0) {
+			/*
+			* Check if count <= 0 because the buffer is full or
+			* because the sg length is 0. In the latest case,
+			* check if there is another sg in the list, a 0 length
+			* sg doesn't necessarily mean the end of the sg list.
+			*/
+			if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) {
+				ctx->sg = sg_next(ctx->sg);
+				continue;
+			} else {
+				break;
+			}
+		}
+
+		scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg,
+			ctx->offset, count, 0);
+
+		ctx->bufcnt += count;
+		ctx->offset += count;
+		ctx->total -= count;
+
+		if (ctx->offset == ctx->sg->length) {
+			ctx->sg = sg_next(ctx->sg);
+			if (ctx->sg)
+				ctx->offset = 0;
+			else
+				ctx->total = 0;
+		}
+	}
+
+	return 0;
+}
+
+static int asr_sha_buff_init(struct asr_bcm_sha *dd, uint32_t len)
+{
+	struct ahash_request *req = dd->req;
+	struct asr_optee_sha_reqctx *optee_ctx = ahash_request_ctx(req);
+	struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
+
+	ctx->buffer = (void *)__get_free_pages(GFP_KERNEL, get_order(len));
+	if (!ctx->buffer) {
+		dev_err(dd->dev, "unable to alloc pages.\n");
+		return -ENOMEM;
+	}
+
+	ctx->buflen = PAGE_SIZE << get_order(len);
+
+	return 0;
+}
+
+static void asr_sha_buff_cleanup(struct asr_bcm_sha *dd, uint32_t len)
+{
+	struct ahash_request *req = dd->req;
+	struct asr_optee_sha_reqctx *optee_ctx = ahash_request_ctx(req);
+	struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
+
+	free_pages((unsigned long)ctx->buffer, get_order(len));
+	ctx->buflen = 0;
+}
+
+static int sha_init_req(struct asr_optee_sha_reqctx *optee_ctx)
+{
+	int ret = 0;
+	struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
+
+	/* hardware: hash init */
+	ret = asrbcm_optee_acquire_hash_init(optee_ctx, &pta_sha_uuid, \
+								CMD_SHA_INIT, ctx->md.alg);
+	if (ret)
+		return -EINVAL;
+	return 0;
+}
+
+static int sha_update_req(struct asr_optee_sha_reqctx *optee_ctx)
+{
+	int ret = 0;
+	int bufcnt;
+	uint8_t *pdata;
+	struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
+	uint32_t buflen = ctx->total;
+
+	ret = asr_sha_buff_init(ctx->dd, ctx->total);
+	if (ret)
+		return -ENOMEM;
+	
+	asr_sha_append_sg(ctx);
+	bufcnt = ctx->bufcnt;
+	ctx->bufcnt = 0;
+
+	pdata = (uint8_t *)ctx->buffer;
+
+	/* hashware: hash process */
+	ret = asrbcm_optee_acquire_hash_update(optee_ctx, &pta_sha_uuid, \
+			CMD_SHA_UPDATE, ctx->md.alg, pdata, bufcnt);
+	if (ret)
+		ret = -EINVAL;
+
+	asr_sha_buff_cleanup(ctx->dd, buflen);
+	return ret;
+}
+
+static void sha_finish_req(struct asr_optee_sha_reqctx *optee_ctx, int *err)
+{
+	struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(ctx->dd->req);
+    uint8_t *hash = (uint8_t *)ctx->digest;
+	uint32_t outlen = crypto_ahash_digestsize(tfm);
+
+	if (!(*err) && (ctx->flags & SHA_FLAGS_FINAL)) {
+		*err = asrbcm_optee_acquire_hash_final(optee_ctx, &pta_sha_uuid, CMD_SHA_FINAL, \
+											ctx->md.alg, (uint8_t *)hash, outlen);
+		ctx->flags &= (~SHA_FLAGS_FINAL);
+		asr_sha_copy_ready_hash(ctx->dd->req);
+	} else {
+		ctx->flags |= SHA_FLAGS_ERROR;
+	}
+}
+
+static void sha_next_req(struct asr_optee_sha_reqctx *optee_ctx, int *err)
+{
+	struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
+
+	if (likely(!(*err) && (SHA_FLAGS_FINAL & ctx->flags)))
+		sha_finish_req(optee_ctx, err);
+
+	(void)asr_sha_complete(ctx->dd, *err);
+}
+
+static int asr_sha_done(struct asr_bcm_sha *dd);
+
+static int asr_sha_start(struct asr_bcm_sha *dd)
+{
+	int err = 0;
+	struct ahash_request *req = dd->req;
+	struct asr_optee_sha_reqctx *optee_ctx = ahash_request_ctx(req);
+	struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
+	
+	mutex_lock(&dd->queue_lock);
+
+	dd->resume = asr_sha_done;
+
+	if ((ctx->flags & SHA_FLAGS_INIT)) {
+		err = sha_init_req(optee_ctx);
+		ctx->flags &= (~SHA_FLAGS_INIT);
+	}
+
+	if (!err) {
+		if (ctx->op == SHA_OP_UPDATE) {
+			err = sha_update_req(optee_ctx);
+			if (!err && (ctx->flags & SHA_FLAGS_FINUP))
+				/* no final() after finup() */
+				sha_finish_req(optee_ctx, &err);
+		} else if (ctx->op == SHA_OP_FINAL) {
+			sha_finish_req(optee_ctx, &err);
+		}
+	}
+
+	if (unlikely(err != -EINPROGRESS))
+		/* Task will not finish it, so do it here */
+		sha_next_req(optee_ctx, &err);
+
+	mutex_unlock(&dd->queue_lock);
+	return err;
+}
+
+static int asr_sha_cra_init(struct crypto_tfm *tfm)
+{
+    struct asr_sha_ctx *ctx = crypto_tfm_ctx(tfm);
+	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+				 sizeof(struct asr_optee_sha_reqctx));
+	ctx->start = asr_sha_start;
+
+	return 0;
+}
+
+static void asr_sha_cra_exit(struct crypto_tfm *tfm)
+{
+    struct asr_sha_ctx *ctx = crypto_tfm_ctx(tfm);
+    memset(ctx, 0, sizeof(*ctx));
+}
+
+static inline void asr_sha_get(struct asr_bcm_sha *dd)
+{
+	mutex_lock(&dd->sha_lock);
+}
+
+static inline void asr_sha_put(struct asr_bcm_sha *dd)
+{
+	if(mutex_is_locked(&dd->sha_lock))
+		mutex_unlock(&dd->sha_lock);
+}
+
+static int asr_sha_init(struct ahash_request *req)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct asr_optee_sha_reqctx *optee_ctx = ahash_request_ctx(req);
+	struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
+	struct asr_bcm_sha *dd = asr_sha_local;
+
+    asr_sha_get(dd);
+
+    ctx->dd = dd;
+	memset(&ctx->md, 0, sizeof(ctx->md));
+    ctx->flags = 0;
+
+	switch (crypto_ahash_digestsize(tfm)) {
+	case MD5_DIGEST_SIZE:
+		ctx->flags |= SHA_FLAGS_MD5;
+		ctx->md.alg = TEE_ALG_MD5;
+		ctx->md.block_size = MD5_HMAC_BLOCK_SIZE;
+		break;
+	case SHA1_DIGEST_SIZE:
+		ctx->flags |= SHA_FLAGS_SHA1;
+		ctx->md.alg = TEE_ALG_SHA1;
+		ctx->md.block_size = SHA1_BLOCK_SIZE;
+		break;
+	case SHA224_DIGEST_SIZE:
+		ctx->flags |= SHA_FLAGS_SHA224;
+		ctx->md.alg = TEE_ALG_SHA224;
+		ctx->md.block_size = SHA224_BLOCK_SIZE;
+		break;
+	case SHA256_DIGEST_SIZE:
+		ctx->flags |= SHA_FLAGS_SHA256;
+		ctx->md.alg = TEE_ALG_SHA256;
+		ctx->md.block_size = SHA256_BLOCK_SIZE;
+		break;
+	case SHA384_DIGEST_SIZE:
+		ctx->flags |= SHA_FLAGS_SHA384;
+		ctx->md.alg = TEE_ALG_SHA384;
+		ctx->md.block_size = SHA384_BLOCK_SIZE;
+		break;
+	case SHA512_DIGEST_SIZE:
+		ctx->flags |= SHA_FLAGS_SHA512;
+		ctx->md.alg = TEE_ALG_SHA512;
+		ctx->md.block_size = SHA512_BLOCK_SIZE;
+		break;
+	default:
+        asr_sha_put(dd);
+		return -EINVAL;
+	}
+
+	ctx->bufcnt = 0;
+	ctx->flags |= SHA_FLAGS_INIT;
+	
+	asr_sha_put(dd);
+	return 0;
+}
+
+static int asr_sha_update(struct ahash_request *req)
+{
+	int ret = 0;
+	struct asr_optee_sha_reqctx *optee_ctx = ahash_request_ctx(req);
+	struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
+
+	asr_sha_get(ctx->dd);
+	ctx->total = req->nbytes;
+	ctx->sg = req->src;
+	ctx->offset = 0;
+
+	ret = asr_sha_enqueue(req, SHA_OP_UPDATE);
+
+	asr_sha_put(ctx->dd);
+	return ret;
+}
+
+static int asr_sha_final(struct ahash_request *req)
+{
+	int ret = 0;
+	struct asr_optee_sha_reqctx *optee_ctx = ahash_request_ctx(req);
+	struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
+
+	asr_sha_get(ctx->dd);
+	ctx->flags |= SHA_FLAGS_FINAL;
+	if (ctx->flags & SHA_FLAGS_ERROR) {
+		asr_sha_put(ctx->dd);
+		return 0; /* uncompleted hash is not needed */
+	}
+	ret = asr_sha_enqueue(req, SHA_OP_FINAL);
+
+	asr_sha_put(ctx->dd);
+	return ret;
+}
+
+static int asr_sha_finup(struct ahash_request *req)
+{
+	struct asr_optee_sha_reqctx *optee_ctx = ahash_request_ctx(req);
+	struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
+	int err1, err2;
+
+	ctx->flags |= SHA_FLAGS_FINUP;
+
+	err1 = asr_sha_update(req);
+	if (err1 == -EINPROGRESS ||
+		(err1 == -EBUSY && (ahash_request_flags(req) &
+				CRYPTO_TFM_REQ_MAY_BACKLOG))) {
+		asr_sha_put(ctx->dd);
+		return err1;
+	}
+	/*
+	 * final() has to be always called to cleanup resources
+	 * even if udpate() failed, except EINPROGRESS
+	 */
+	err2 = asr_sha_final(req);
+
+	return err1 ?: err2;
+}
+
+static int asr_sha_digest(struct ahash_request *req)
+{
+	return asr_sha_init(req) ?: asr_sha_finup(req);
+}
+
+static int asr_sha_export(struct ahash_request *req, void *out)
+{
+	const struct asr_optee_sha_reqctx *ctx = ahash_request_ctx(req);
+
+	memcpy(out, ctx, sizeof(*ctx));
+	return 0;
+}
+
+static int asr_sha_import(struct ahash_request *req, const void *in)
+{
+	struct asr_optee_sha_reqctx *ctx = ahash_request_ctx(req);
+
+	memcpy(ctx, in, sizeof(*ctx));
+	return 0;
+}
+
+static struct ahash_alg sha_algs[] = {
+	/* md5 */
+	{
+		.init		= asr_sha_init,
+		.update		= asr_sha_update,
+		.final		= asr_sha_final,
+		.finup		= asr_sha_finup,
+		.digest		= asr_sha_digest,
+		.export		= asr_sha_export,
+		.import		= asr_sha_import,
+		.halg = {
+			.digestsize	= MD5_DIGEST_SIZE,
+			.statesize	= sizeof(struct asr_optee_sha_reqctx),
+			.base	= {
+				.cra_name		= "md5",
+				.cra_driver_name	= "asr-md5",
+				.cra_priority		= ASR_SHA_PRIORITY,
+				.cra_flags		= CRYPTO_ALG_ASYNC,
+				.cra_blocksize		= MD5_HMAC_BLOCK_SIZE,
+				.cra_ctxsize		= sizeof(struct asr_sha_ctx),
+				.cra_alignmask		= 0,
+				.cra_module		= THIS_MODULE,
+				.cra_init		= asr_sha_cra_init,
+				.cra_exit		= asr_sha_cra_exit,
+			}
+		}
+	},
+
+	/* sha1 */
+	{
+		.init		= asr_sha_init,
+		.update		= asr_sha_update,
+		.final		= asr_sha_final,
+		.finup		= asr_sha_finup,
+		.digest		= asr_sha_digest,
+		.export		= asr_sha_export,
+		.import		= asr_sha_import,
+		.halg = {
+			.digestsize	= SHA1_DIGEST_SIZE,
+			.statesize	= sizeof(struct asr_optee_sha_reqctx),
+			.base	= {
+				.cra_name		= "sha1",
+				.cra_driver_name	= "asr-sha1",
+				.cra_priority		= ASR_SHA_PRIORITY,
+				.cra_flags		= CRYPTO_ALG_ASYNC,
+				.cra_blocksize		= SHA1_BLOCK_SIZE,
+				.cra_ctxsize		= sizeof(struct asr_sha_ctx),
+				.cra_alignmask		= 0,
+				.cra_module		= THIS_MODULE,
+				.cra_init		= asr_sha_cra_init,
+				.cra_exit		= asr_sha_cra_exit,
+			}
+		}
+	},
+
+	/* sha224 */
+	{
+		.init		= asr_sha_init,
+		.update		= asr_sha_update,
+		.final		= asr_sha_final,
+		.finup		= asr_sha_finup,
+		.digest		= asr_sha_digest,
+		.export		= asr_sha_export,
+		.import		= asr_sha_import,
+		.halg = {
+			.digestsize	= SHA224_DIGEST_SIZE,
+			.statesize	= sizeof(struct asr_optee_sha_reqctx),
+			.base	= {
+				.cra_name		= "sha224",
+				.cra_driver_name	= "asr-sha224",
+				.cra_priority		= ASR_SHA_PRIORITY,
+				.cra_flags		= CRYPTO_ALG_ASYNC,
+				.cra_blocksize		= SHA224_BLOCK_SIZE,
+				.cra_ctxsize		= sizeof(struct asr_sha_ctx),
+				.cra_alignmask		= 0,
+				.cra_module		= THIS_MODULE,
+				.cra_init		= asr_sha_cra_init,
+				.cra_exit		= asr_sha_cra_exit,
+			}
+		}
+	},
+
+	/* sha256 */
+	{
+		.init		= asr_sha_init,
+		.update		= asr_sha_update,
+		.final		= asr_sha_final,
+		.finup		= asr_sha_finup,
+		.digest		= asr_sha_digest,
+		.export		= asr_sha_export,
+		.import		= asr_sha_import,
+		.halg = {
+			.digestsize	= SHA256_DIGEST_SIZE,
+			.statesize	= sizeof(struct asr_optee_sha_reqctx),
+			.base	= {
+				.cra_name		= "sha256",
+				.cra_driver_name	= "asr-sha256",
+				.cra_priority		= ASR_SHA_PRIORITY,
+				.cra_flags		= CRYPTO_ALG_ASYNC,
+				.cra_blocksize		= SHA256_BLOCK_SIZE,
+				.cra_ctxsize		= sizeof(struct asr_sha_ctx),
+				.cra_alignmask		= 0,
+				.cra_module		= THIS_MODULE,
+				.cra_init		= asr_sha_cra_init,
+				.cra_exit		= asr_sha_cra_exit,
+			}
+		}
+	},
+
+	/* sha384 */
+	{
+		.init		= asr_sha_init,
+		.update		= asr_sha_update,
+		.final		= asr_sha_final,
+		.finup		= asr_sha_finup,
+		.digest		= asr_sha_digest,
+		.export		= asr_sha_export,
+		.import		= asr_sha_import,
+		.halg = {
+			.digestsize	= SHA384_DIGEST_SIZE,
+			.statesize	= sizeof(struct asr_optee_sha_reqctx),
+			.base	= {
+				.cra_name		= "sha384",
+				.cra_driver_name	= "asr-sha384",
+				.cra_priority		= ASR_SHA_PRIORITY,
+				.cra_flags		= CRYPTO_ALG_ASYNC,
+				.cra_blocksize		= SHA384_BLOCK_SIZE,
+				.cra_ctxsize		= sizeof(struct asr_sha_ctx),
+				.cra_alignmask		= 0,
+				.cra_module		= THIS_MODULE,
+				.cra_init		= asr_sha_cra_init,
+				.cra_exit		= asr_sha_cra_exit,
+			}
+		}
+	},
+
+	/* sha512 */
+	{
+		.init		= asr_sha_init,
+		.update		= asr_sha_update,
+		.final		= asr_sha_final,
+		.finup		= asr_sha_finup,
+		.digest		= asr_sha_digest,
+		.export		= asr_sha_export,
+		.import		= asr_sha_import,
+		.halg = {
+			.digestsize	= SHA512_DIGEST_SIZE,
+			.statesize	= sizeof(struct asr_optee_sha_reqctx),
+			.base	= {
+				.cra_name		= "sha512",
+				.cra_driver_name	= "asr-sha512",
+				.cra_priority		= ASR_SHA_PRIORITY,
+				.cra_flags		= CRYPTO_ALG_ASYNC,
+				.cra_blocksize		= SHA512_BLOCK_SIZE,
+				.cra_ctxsize		= sizeof(struct asr_sha_ctx),
+				.cra_alignmask		= 0,
+				.cra_module		= THIS_MODULE,
+				.cra_init		= asr_sha_cra_init,
+				.cra_exit		= asr_sha_cra_exit,
+			}
+		}
+	},
+};
+
+static void asr_sha_queue_task(unsigned long data)
+{
+	struct asr_bcm_sha *dd = (struct asr_bcm_sha *)data;
+
+	asr_sha_handle_queue(dd, NULL);
+}
+
+static int asr_sha_done(struct asr_bcm_sha *dd)
+{
+	int err = 0;
+	struct ahash_request *req = dd->req;
+	struct asr_optee_sha_reqctx *ctx = ahash_request_ctx(req);
+
+	sha_finish_req(ctx, 0);
+
+	return err;
+}
+
+static void asr_sha_done_task(unsigned long data)
+{
+	struct asr_bcm_sha *dd = (struct asr_bcm_sha *)data;
+
+	dd->is_async = true;
+	(void)dd->resume(dd);
+}
+
+static int hash_handle(int alg, uint8_t *in, uint32_t inlen, uint8_t *out)
+{
+	int ret = 0;
+	uint32_t outlen;
+	struct asr_optee_sha_reqctx ctx;
+
+	switch(alg) {
+	case TEE_ALG_SHA512:
+		outlen = HASH_LEN_SHA512;
+		break;
+	case TEE_ALG_SHA384:
+		outlen = HASH_LEN_SHA384;
+		break;
+	case TEE_ALG_SHA256:
+		outlen = HASH_LEN_SHA256;
+		break;
+	case TEE_ALG_SHA224:
+		outlen = HASH_LEN_SHA224;
+		break;
+	case TEE_ALG_SHA1:
+		outlen = HASH_LEN_SHA1;
+		break;
+	case TEE_ALG_MD5:
+		outlen = HASH_LEN_MD5;
+		break;
+	default:
+		printk("err: not support hash alg\n");
+		ret = -1;
+		goto exit;
+	}
+
+	ret = asrbcm_optee_acquire_hash_init(&ctx, &pta_sha_uuid, CMD_SHA_INIT, alg);
+	if (ret) {
+		ret = -1;
+		goto exit;
+	}
+
+	ret = asrbcm_optee_acquire_hash_update(&ctx, &pta_sha_uuid, CMD_SHA_UPDATE, alg, in, inlen);
+	if (ret) {
+		ret = -1;
+		goto exit;
+	}
+
+	ret = asrbcm_optee_acquire_hash_final(&ctx, &pta_sha_uuid, CMD_SHA_FINAL, alg, out, outlen);
+	if (ret) {
+		ret = -1;
+		goto exit;
+	}
+
+exit:
+	return ret;
+}
+
+static int tee_hwhash_func_verify(void)
+{
+	int ret = 0;
+	unsigned char out_sha256[32] = {0};
+	const struct {
+		const char *msg;
+		uint8_t hash[32];
+	} sha256_tests = {
+		"abc", 
+		{   0xBA, 0x78, 0x16, 0xBF, 0x8F, 0x01, 
+			0xCF, 0xEA, 0x41, 0x41, 0x40, 0xDE, 
+			0x5D, 0xAE, 0x22, 0x23, 0xB0, 0x03, 
+			0x61, 0xA3, 0x96, 0x17, 0x7A, 0x9C, 
+			0xB4, 0x10, 0xFF, 0x61, 0xF2, 0x00, 
+			0x15, 0xAD
+		}
+	};
+
+	ret = hash_handle(TEE_ALG_SHA256, (uint8_t *)sha256_tests.msg, strlen(sha256_tests.msg), out_sha256);
+	if (ret) 
+		return ret;
+
+	if (memcmp(out_sha256, sha256_tests.hash, sizeof(out_sha256))) {
+		return -1;
+	}
+
+	return 0;
+}
+
+// #define ASR_BCM_SHA_TEST
+
+#ifdef ASR_BCM_SHA_TEST
+static int bcm_sha_test(void);
+#endif
+
+int asr_bcm_sha_register(struct asr_bcm_dev *bcm_dd)
+{
+	int err, i, j;
+	struct asr_bcm_sha *sha_dd;
+
+	sha_dd = &bcm_dd->asr_sha;
+	sha_dd->dev = bcm_dd->dev;
+
+	asr_sha_local = sha_dd;
+
+	spin_lock_init(&sha_dd->lock);
+	mutex_init(&sha_dd->sha_lock);
+	mutex_init(&sha_dd->queue_lock);
+	tasklet_init(&sha_dd->done_task, asr_sha_done_task,
+					(unsigned long)sha_dd);
+	tasklet_init(&sha_dd->queue_task, asr_sha_queue_task,
+					(unsigned long)sha_dd);
+	crypto_init_queue(&sha_dd->queue, ASR_SHA_QUEUE_LENGTH);
+
+	/* don't register sha if hash verify err in tos */
+	err = tee_hwhash_func_verify();
+	if (err) 
+		return err;
+	
+	for (i = 0; i < ARRAY_SIZE(sha_algs); i++) {
+		err = crypto_register_ahash(&sha_algs[i]);
+		if (err)
+			goto err_sha_algs;
+	}
+
+#ifdef ASR_BCM_SHA_TEST
+	bcm_sha_test();
+#endif
+
+	return 0;
+
+err_sha_algs:
+	for (j = 0; j < i; j++)
+		crypto_unregister_ahash(&sha_algs[j]);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(asr_bcm_sha_register);
+
+int asr_bcm_sha_unregister(struct asr_bcm_dev *bcm_dd)
+{
+	int i;
+	struct asr_bcm_sha *sha_dd = &bcm_dd->asr_sha;
+
+
+	for (i = 0; i < ARRAY_SIZE(sha_algs); i++)
+		crypto_unregister_ahash(&sha_algs[i]);
+
+	tasklet_kill(&sha_dd->queue_task);
+	tasklet_kill(&sha_dd->done_task);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(asr_bcm_sha_unregister);
+
+
+
+#ifdef ASR_BCM_SHA_TEST
+
+static int bcm_sha_test(void)
+{
+	int ret = 0;
+	uint32_t i;
+	
+	const struct {
+		const char *msg;
+		uint8_t hash[20];
+	} sha1_tests[] = {
+		{
+			"abc", 
+			{   0xa9, 0x99, 0x3e, 0x36, 0x47, 0x06, 
+				0x81, 0x6a, 0xba, 0x3e, 0x25, 0x71, 
+				0x78, 0x50, 0xc2, 0x6c, 0x9c, 0xd0,
+				0xd8, 0x9d 
+			}
+		},
+		{
+			"asjhsdjljfdsdjjkdfwyqeuwouzxkmcxjkmwqdsjklfdfjlkdfkfs" \
+			"fkjlfskjdflioherfjjfdjkfdnkfdfdojjodfjdfjflj;sljjlfkkl" \
+			"nfnkgbhhoigfhigfopojpfjojpoffkjlfskjdflioherfjjfdjkfdn" \
+			"kfdfdojjodfjdfjfljnfnkgbhhoigfhigfoponfnkgbhhoigfhigfopojpfjo",
+			{
+				0x93, 0x84, 0x7f, 0x98, 0x22, 0x5e, 
+				0x6d, 0xf2, 0x09, 0x1c, 0xc9, 0xac, 
+				0xbb, 0x5d, 0x00, 0x2d, 0x64, 0x81, 
+				0xe3, 0xcd
+			}
+		},
+		{
+			"asjhsdjljfdsdjjkdfwyqeuwouzxkmcxjkmwqdsjklfdfjlkdfkfs" \
+			"fkjlfskjdflioherfjjfdjkfdnkfdfdojjodfjdfjflj;sljjlfkkl" \
+			"nfnkgbhhoigfhigfopojpfjojpoffkjlfskjdflioherfjjfdjkfdn" \
+			"kfdfdojjodfjdfjfljnfnkgbhhoigfhigfoponfnkgbhhoigfhigfopojpfjoewiroiowiod",
+			{
+				0x6a, 0x66, 0xc2, 0x87, 0x84, 0x36, 
+				0x14, 0x90, 0x99, 0x03, 0x90, 0xf0, 
+				0xaa, 0x7e, 0xbd, 0xc7, 0xdb, 0x38, 
+				0x54, 0x09
+			}
+		},
+		{
+			"asjhsdjljfdsdjjkdfwyqeuwouzxkmcxjkmwqds"
+			"jklfdfjlkdfkfsfkjlfskjdflioherfjjfdjkfd"
+			"nkfdfdojjodfjdfjflj;sljjlfkklnfnkgbhhoi"
+			"gfhigfopojpfjojpoffkjlfskjdflioherfjjfd"
+			"jkfdnkfdfdojjodfjdfjfljnfnkgbhhoigfhigf"
+			"oponfnkgbhhoigfhigfopojpfjoewiroiowiods"
+			"djkisijdknknkskdnknflnnesniewinoinknmdn"
+			"kknknsdnjjfsnnkfnkknslnklknfnknkflksnlk"
+			"lskldklklklnmlflmlmlfmlfml",
+			{
+				0xc4, 0x53, 0xca, 0x24, 0xfa, 0xe5,
+				0x39, 0x53, 0x08, 0x8c, 0x57, 0x1a, 
+				0x96, 0xe9, 0x64, 0x7f, 0xd5, 0xf9, 
+				0x13, 0x91
+			}
+		}
+	};
+
+	struct asr_optee_sha_reqctx ctx1;
+	struct asr_optee_sha_reqctx ctx2;
+	struct asr_optee_sha_reqctx ctx3;
+	struct asr_optee_sha_reqctx ctx4;
+	unsigned char out_sha1_1[20] = {0};
+	unsigned char out_sha1_2[20] = {0};
+	unsigned char out_sha1_3[20] = {0};
+	unsigned char out_sha1_4[20] = {0};
+
+	ret = asrbcm_optee_acquire_hash_init(&ctx1, &pta_sha_uuid, CMD_SHA_INIT, TEE_ALG_SHA1);
+	if (ret) {
+		return ret;
+	}
+
+	ret = asrbcm_optee_acquire_hash_init(&ctx2, &pta_sha_uuid, CMD_SHA_INIT, TEE_ALG_SHA1);
+	if (ret) {
+		return ret;
+	}
+
+	ret = asrbcm_optee_acquire_hash_update(&ctx1, &pta_sha_uuid, CMD_SHA_UPDATE, TEE_ALG_SHA1, 
+										(uint8_t *)sha1_tests[0].msg, strlen(sha1_tests[0].msg));
+	if (ret) {
+		return ret;
+	}
+
+	ret = asrbcm_optee_acquire_hash_init(&ctx3, &pta_sha_uuid, CMD_SHA_INIT, TEE_ALG_SHA1);
+	if (ret) {
+		return ret;
+	}
+
+	if (ret) {
+		return ret;
+	}
+
+	ret = asrbcm_optee_acquire_hash_update(&ctx2, &pta_sha_uuid, CMD_SHA_UPDATE, TEE_ALG_SHA1, 
+					(uint8_t *)(((uint32_t)sha1_tests[1].msg)+10), strlen(sha1_tests[1].msg) - 10);
+	if (ret) {
+		return ret;
+	}
+
+	ret = asrbcm_optee_acquire_hash_final(&ctx1, &pta_sha_uuid, CMD_SHA_FINAL, TEE_ALG_SHA1, 
+																out_sha1_1, sizeof(out_sha1_1));
+	if (ret) {
+		return ret;
+	}
+
+	ret = asrbcm_optee_acquire_hash_update(&ctx3, &pta_sha_uuid, CMD_SHA_UPDATE, TEE_ALG_SHA1, 
+															(uint8_t *)sha1_tests[2].msg, 25);
+	if (ret) {
+		return ret;
+	}
+
+	ret = asrbcm_optee_acquire_hash_init(&ctx4, &pta_sha_uuid, CMD_SHA_INIT, TEE_ALG_SHA1);
+	if (ret) {
+		return ret;
+	}
+
+	ret = asrbcm_optee_acquire_hash_final(&ctx2, &pta_sha_uuid, CMD_SHA_FINAL, TEE_ALG_SHA1, 
+										out_sha1_2, sizeof(out_sha1_2));
+	if (ret) {
+		return ret;
+	}
+
+	ret = asrbcm_optee_acquire_hash_update(&ctx3, &pta_sha_uuid, CMD_SHA_UPDATE, TEE_ALG_SHA1, 
+						(uint8_t *)(((uint32_t)sha1_tests[2].msg)+25), strlen(sha1_tests[2].msg)-25);
+	if (ret) {
+		return ret;
+	}
+
+	ret = asrbcm_optee_acquire_hash_final(&ctx3, &pta_sha_uuid, CMD_SHA_FINAL, TEE_ALG_SHA1, 
+										out_sha1_3, sizeof(out_sha1_3));
+	if (ret) {
+		return ret;
+	}
+
+	ret = asrbcm_optee_acquire_hash_update(&ctx4, &pta_sha_uuid, CMD_SHA_UPDATE, TEE_ALG_SHA1, 
+										(uint8_t *)sha1_tests[3].msg, 43);
+	if (ret) {
+		return ret;
+	}
+	ret = asrbcm_optee_acquire_hash_update(&ctx4, &pta_sha_uuid, CMD_SHA_UPDATE, TEE_ALG_SHA1,
+					 (uint8_t *)(((uint32_t)sha1_tests[3].msg)+43), strlen(sha1_tests[3].msg)-43);
+	if (ret) {
+		return ret;
+	}
+
+	ret = asrbcm_optee_acquire_hash_final(&ctx4, &pta_sha_uuid, CMD_SHA_FINAL, TEE_ALG_SHA1, 
+										out_sha1_4, sizeof(out_sha1_4));
+	if (ret) {
+		return ret;
+	}
+
+	if (memcmp(out_sha1_1, sha1_tests[0].hash, sizeof(out_sha1_1))) {
+		printk("sha1 test 0 failed");
+	} else {
+		printk("sha1 test 0 pass");
+	}
+	if (memcmp(out_sha1_2, sha1_tests[1].hash, sizeof(out_sha1_2))) {
+		printk("sha1 test 1 failed");
+	} else {
+		printk("sha1 test 1 pass");
+	}
+	if (memcmp(out_sha1_3, sha1_tests[2].hash, sizeof(out_sha1_3))) {
+		printk("sha1 test 2 failed");
+	} else {
+		printk("sha1 test 2 pass");
+	}
+	if (memcmp(out_sha1_4, sha1_tests[3].hash, sizeof(out_sha1_4))) {
+		printk("sha1 test 3 failed");
+	} else {
+		printk("sha1 test 4 pass");
+	}
+
+	return 0;
+}
+#endif
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("wangyonggan <yongganwang@asrmicro.com>");
+MODULE_DESCRIPTION("ASR bcm sha driver");
\ No newline at end of file
diff --git a/marvell/linux/drivers/crypto/asr/bcm_optee/asr-sha-optee.h b/marvell/linux/drivers/crypto/asr/bcm_optee/asr-sha-optee.h
new file mode 100644
index 0000000..97e174e
--- /dev/null
+++ b/marvell/linux/drivers/crypto/asr/bcm_optee/asr-sha-optee.h
@@ -0,0 +1,61 @@
+#ifndef _ASR_BCM_SHA_OPTEE_H_
+#define _ASR_BCM_SHA_OPTEE_H_
+
+#include "asr-bcm-optee.h"
+#ifdef CONFIG_TEE
+#include <linux/tee_drv.h>
+#endif
+
+#define ASR_SHA_ACCESS_UUID 									\
+        { 														\
+            0xc6445f2a, 0x3365, 0x11ef, 						\
+            { 0x9e, 0x32, 0xe7, 0x0c, 0x07, 0x9f, 0x77, 0xec } 	\
+        }														\
+
+#define TEE_ALG_MD5                             0x50000001
+#define TEE_ALG_SHA1                            0x50000002
+#define TEE_ALG_SHA224                          0x50000003
+#define TEE_ALG_SHA256                          0x50000004
+#define TEE_ALG_SHA384                          0x50000005
+#define TEE_ALG_SHA512                          0x50000006
+
+#define HASH_CONTEXT_SIZE (256)
+
+/*
+ * hash init params
+ *
+ * [in]     pParams[0].value.a          hash algorithm type
+ * [in]     pParams[1].value.a          hash context addr from external, such as kernel
+ */
+ #define CMD_SHA_INIT         0x1
+
+/*
+ * hash update params
+ * when input addr is share mem, such as params from kernel:
+ * [in]     pParams[0].memref.buffer    input data
+ * [in]     pParams[0].memref.size      length of input data
+ * [in]     pParams[1].value.a          hash context addr from external, such as kernel
+ *
+ * when input addr is physical addr, such as params from uboot:
+ * [in]     pParams[0].value.a      input data addr
+ * [in]     pParams[0].value.b      length of input data
+ * [in]     pParams[1].value.a      whether physical addr
+ */
+ #define CMD_SHA_UPDATE       0x2
+
+/*
+ * hash finish params
+ *
+ * [out]    pParams[0].memref.buffer    output hash
+ * [out]    pParams[0].memref.size      length of output hash
+ * [in]     pParams[1].value.a          hash context addr from external, such as kernel
+ */
+#define CMD_SHA_FINAL         0x3
+
+struct asr_optee_sha_reqctx {
+    struct asr_sha_reqctx reqctx;
+    struct tee_shm *shm;
+    struct asrbcm_tee_context asrbcm_tee_ctx;
+};
+
+#endif