ASR_BASE

Change-Id: Icf3719cc0afe3eeb3edc7fa80a2eb5199ca9dda1
diff --git a/marvell/linux/drivers/crypto/asr/bcm_optee/asr-cipher-optee.c b/marvell/linux/drivers/crypto/asr/bcm_optee/asr-cipher-optee.c
new file mode 100644
index 0000000..8d5912a
--- /dev/null
+++ b/marvell/linux/drivers/crypto/asr/bcm_optee/asr-cipher-optee.c
@@ -0,0 +1,651 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023 ASR Micro Limited
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#ifdef CONFIG_TEE
+#include <linux/tee_drv.h>
+#endif
+#include <linux/crypto.h>
+#include <linux/cputype.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/internal/skcipher.h>
+
+#include "asr-bcm-optee.h"
+#include "asr-cipher-optee.h"
+
+struct asr_bcm_cipher *asr_cipher_local;
+
+static struct teec_uuid pta_cipher_uuid = ASR_AES_ACCESS_UUID;
+
+static int asr_optee_cipher_get_rkek_state(u32 *state)
+{
+	struct tee_ioctl_invoke_arg invoke_arg;
+	struct tee_param params[1];
+	struct asrbcm_tee_context asrbcm_tee_ctx;
+	int ret = 0;
+
+	ret = asrbcm_optee_open_ta(&asrbcm_tee_ctx, &pta_cipher_uuid);
+	if (ret != 0) {
+		return ret;
+	}
+
+	memset(&invoke_arg, 0x0, sizeof(struct tee_ioctl_invoke_arg));
+	invoke_arg.func = CMD_AES_HWKEY_STATUS;
+	invoke_arg.session  = asrbcm_tee_ctx.session;
+	invoke_arg.num_params = 1;
+
+	params[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT;
+	params[0].u.value.a = 0;
+	params[0].u.value.b = 0;
+	params[0].u.value.c = 0;
+
+	ret = tee_client_invoke_func(asrbcm_tee_ctx.tee_ctx, &invoke_arg, params);
+	if (ret != 0) {
+		goto exit;
+	} else if (invoke_arg.ret != 0) {
+		ret = -EIO;
+		goto exit;
+	}
+
+	*state = params[0].u.value.a;
+
+exit:
+	asrbcm_optee_close_ta(&asrbcm_tee_ctx);
+	return ret;
+
+}
+
+static int asr_optee_cipher_process(uint32_t cipher_mode, uint32_t op_mode,
+		struct scatterlist *src, struct scatterlist *dst,
+		size_t len, uint32_t key_size, u8 *key,
+		u8 *iv, uint32_t ivsize)
+{
+	struct tee_ioctl_invoke_arg invoke_arg;
+	struct tee_param params[4];
+	struct asrbcm_tee_context asrbcm_tee_ctx;
+	struct tee_shm *shm;
+	int ret = 0;
+	char *ma = NULL;
+    uint32_t srclen = len, dstlen = len, paralen_a = key_size, paralen_b = ivsize;
+    uint8_t *parabuf_a = key, *parabuf_b = iv;
+
+	ret = asrbcm_optee_open_ta(&asrbcm_tee_ctx, &pta_cipher_uuid);
+	if (ret != 0) {
+		return ret;
+	}
+
+	memset(&invoke_arg, 0x0, sizeof(struct tee_ioctl_invoke_arg));
+	invoke_arg.func = cipher_mode;
+	invoke_arg.session  = asrbcm_tee_ctx.session;
+
+	shm = tee_shm_alloc(asrbcm_tee_ctx.tee_ctx, srclen + dstlen + paralen_a + paralen_b, 
+						TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
+	if (!shm) {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	params[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+	params[0].u.memref.shm_offs = 0;
+	params[0].u.memref.size = srclen;
+	params[0].u.memref.shm = shm;
+
+	params[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
+	params[1].u.memref.shm_offs = srclen;
+	params[1].u.memref.size = dstlen;
+	params[1].u.memref.shm = shm;
+
+	params[2].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+	params[2].u.value.a = op_mode;
+
+	ma = tee_shm_get_va(shm, 0);
+	sg_copy_to_buffer(src, sg_nents(src), ma, srclen);
+	memcpy(ma + srclen + dstlen, parabuf_a, paralen_a);
+
+	/* cbc with iv */
+	if (parabuf_b && paralen_b) {
+		memcpy(ma + srclen + dstlen + paralen_a, parabuf_b, paralen_b);
+		params[2].u.value.b = paralen_a;
+		params[3].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+		params[3].u.memref.shm_offs = srclen + dstlen;
+		params[3].u.memref.size = paralen_a + paralen_b;
+		params[3].u.memref.shm = shm;
+		invoke_arg.num_params = 4;
+	} else {
+		/* ecb with non iv */
+		params[3].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+		params[3].u.memref.shm_offs = srclen + dstlen;
+		params[3].u.memref.size = paralen_a;
+		params[3].u.memref.shm = shm;
+		invoke_arg.num_params = 4;
+	}
+
+	ret = tee_client_invoke_func(asrbcm_tee_ctx.tee_ctx, &invoke_arg, params);
+	if (ret != 0) {
+		goto free_shm;
+	} else if (invoke_arg.ret != 0) {
+		ret = -EIO;
+		goto free_shm;
+	}
+	sg_copy_from_buffer(dst, sg_nents(dst), ma + srclen, dstlen);
+
+free_shm:
+	tee_shm_free(shm);
+exit:
+	asrbcm_optee_close_ta(&asrbcm_tee_ctx);
+	return ret;
+}
+
+static int asr_optee_cipher_hwkey_process(uint32_t cipher_mode, uint32_t op_mode,
+		struct scatterlist *src, struct scatterlist *dst,
+		size_t len, uint32_t key_size,
+		u8 *iv, uint32_t ivsize)
+{
+	struct tee_ioctl_invoke_arg invoke_arg;
+	struct tee_param params[4];
+	struct asrbcm_tee_context asrbcm_tee_ctx;
+	struct tee_shm *shm;
+	int ret = 0;
+	char *ma = NULL;
+    uint32_t srclen = len, dstlen = len, paralen = ivsize;
+    uint8_t *parabuf = iv;
+
+
+	ret = asrbcm_optee_open_ta(&asrbcm_tee_ctx, &pta_cipher_uuid);
+	if (ret != 0) {
+		return ret;
+	}
+
+	memset(&invoke_arg, 0x0, sizeof(struct tee_ioctl_invoke_arg));
+	invoke_arg.func = cipher_mode;
+	invoke_arg.session  = asrbcm_tee_ctx.session;
+
+	shm = tee_shm_alloc(asrbcm_tee_ctx.tee_ctx, srclen + dstlen + paralen, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
+	if (!shm) {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	params[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+	params[0].u.memref.shm_offs = 0;
+	params[0].u.memref.size = srclen;
+	params[0].u.memref.shm = shm;
+
+	params[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
+	params[1].u.memref.shm_offs = srclen;
+	params[1].u.memref.size = dstlen;
+	params[1].u.memref.shm = shm;
+
+	params[2].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+	params[2].u.value.a = key_size;
+	params[2].u.value.b = op_mode;
+	params[2].u.value.c = 0;
+
+	ma = tee_shm_get_va(shm, 0);
+	sg_copy_to_buffer(src, sg_nents(src), ma, srclen);
+	if (parabuf && paralen) {
+		params[3].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+		params[3].u.memref.shm_offs = srclen + dstlen;
+		params[3].u.memref.size = paralen;
+		params[3].u.memref.shm = shm;
+		memcpy(ma + srclen + dstlen, parabuf, paralen);
+		invoke_arg.num_params = 4;
+	} else {
+		invoke_arg.num_params = 3;
+	}
+
+	ret = tee_client_invoke_func(asrbcm_tee_ctx.tee_ctx, &invoke_arg, params);
+	if (ret != 0) {
+		goto free_shm;
+	} else if (invoke_arg.ret != 0) {
+		ret = -EIO;
+		goto free_shm;
+	}
+	sg_copy_from_buffer(dst, sg_nents(dst), ma + srclen, dstlen);
+
+free_shm:
+	tee_shm_free(shm);
+exit:
+	asrbcm_optee_close_ta(&asrbcm_tee_ctx);
+	return ret;
+}
+
+static inline void asr_cipher_set_mode(struct asr_bcm_cipher *dd,
+				      const struct asr_cipher_reqctx *rctx)
+{
+	/* Clear all but persistent flags and set request flags. */
+	dd->flags = (dd->flags & CIPHER_FLAGS_PERSISTENT) | rctx->mode;
+}
+
+static void asr_cipher_set_iv_as_last_ciphertext_block(struct asr_bcm_cipher *dd)
+{
+	struct skcipher_request *req = skcipher_request_cast(dd->areq);
+	struct asr_cipher_reqctx *rctx = skcipher_request_ctx(req);
+	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+	unsigned int ivsize = crypto_skcipher_ivsize(cipher);
+
+	if (req->cryptlen < ivsize)
+		return;
+
+	if (rctx->mode & FLAGS_ENCRYPT) {
+		scatterwalk_map_and_copy(req->iv, req->dst,
+					 req->cryptlen - ivsize, ivsize, 0);
+	} else {
+		if (req->src == req->dst)
+			memcpy(req->iv, rctx->lastc, ivsize);
+		else
+			scatterwalk_map_and_copy(req->iv, req->src,
+						 req->cryptlen - ivsize,
+						 ivsize, 0);
+	}
+}
+
+static int asr_cipher_handle_queue(struct asr_bcm_cipher *dd,
+				  struct crypto_async_request *new_areq)
+{
+	struct crypto_async_request *areq, *backlog;
+	struct asr_cipher_ctx *ctx;
+	unsigned long flags;
+	bool start_async;
+	int err, ret = 0;
+
+	spin_lock_irqsave(&dd->lock, flags);
+	if (new_areq)
+		ret = crypto_enqueue_request(&dd->queue, new_areq);
+	if (dd->flags & FLAGS_BUSY) {
+		spin_unlock_irqrestore(&dd->lock, flags);
+		return ret;
+	}
+
+	backlog = crypto_get_backlog(&dd->queue);
+	areq = crypto_dequeue_request(&dd->queue);
+	if (areq) {
+		dd->flags |= FLAGS_BUSY;
+	}
+	spin_unlock_irqrestore(&dd->lock, flags);
+
+	if (!areq)
+		return ret;
+
+	if (backlog)
+		backlog->complete(backlog, -EINPROGRESS);
+
+	ctx = crypto_tfm_ctx(areq->tfm);
+
+	dd->areq = areq;
+	dd->ctx = ctx;
+	start_async = (areq != new_areq);
+	dd->is_async = start_async;
+
+	/* WARNING: ctx->start() MAY change dd->is_async. */
+	err = ctx->start(dd);
+	return (start_async) ? ret : err;
+}
+
+static inline int asr_cipher_complete(struct asr_bcm_cipher *dd, int err)
+{
+
+	dd->flags &= ~FLAGS_BUSY;
+
+	asr_cipher_set_iv_as_last_ciphertext_block(dd);
+
+	if (dd->is_async)
+		dd->areq->complete(dd->areq, err);
+
+	tasklet_schedule(&dd->queue_task);
+
+	return err;
+}
+
+static int asr_cipher_start(struct asr_bcm_cipher *dd)
+{
+	struct skcipher_request *req = skcipher_request_cast(dd->areq);
+	struct asr_cipher_reqctx *rctx = skcipher_request_ctx(req);
+	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+	u8 *iv;
+	u32 flags, cipher_mode, op_mode, keylen, ivsize;
+	int err;
+
+	asr_cipher_set_mode(dd, rctx);
+
+    flags = dd->flags;
+
+	if ((flags & FLAGS_OPMODE_MASK) == FLAGS_CBC){
+		if (rctx->use_rkek) {
+			cipher_mode = CMD_AES_HWKEY_CBC;
+		} else{
+			cipher_mode = CMD_AES_CBC;
+		}
+		ivsize = crypto_skcipher_ivsize(cipher);
+		iv = req->iv;
+	}
+	else {
+		iv = NULL;
+		ivsize = 0;
+		if (rctx->use_rkek) {
+			cipher_mode = CMD_AES_HWKEY_ECB;
+		} else {
+			cipher_mode = CMD_AES_ECB;
+		}
+	}
+
+	if (flags & FLAGS_ENCRYPT)
+		op_mode = 1;
+	else
+		op_mode = 0;
+
+	keylen = dd->ctx->keylen;
+
+	if (rctx->use_rkek) {
+		err = asr_optee_cipher_hwkey_process(cipher_mode, op_mode, req->src,
+				req->dst, req->cryptlen, keylen, iv, ivsize);
+	} else {
+		err = asr_optee_cipher_process(cipher_mode, op_mode, req->src,
+				req->dst, req->cryptlen, keylen, (u8 *)dd->ctx->key, iv, ivsize);		
+	}
+
+	return asr_cipher_complete(dd, err);
+}
+
+static int asr_cipher(struct skcipher_request *req, unsigned long mode)
+{
+	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+	struct asr_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+	struct asr_cipher_reqctx *rctx;
+	struct asr_bcm_cipher *dd = asr_cipher_local;
+
+	ctx->block_size = AES_BLOCK_SIZE;
+	ctx->dd = dd;
+
+	rctx = skcipher_request_ctx(req);
+	rctx->mode = mode;
+	rctx->use_rkek = ctx->use_rkek;
+
+	if (!(mode) && (req->src == req->dst)) {
+		unsigned int ivsize = crypto_skcipher_ivsize(cipher);
+		if (req->cryptlen >= ivsize) {
+			scatterwalk_map_and_copy(rctx->lastc, req->src,
+						 req->cryptlen - ivsize,
+						 ivsize, 0);
+		}
+	}
+
+	return asr_cipher_handle_queue(dd, &req->base);
+}
+
+static int asr_cipher_setkey(struct crypto_skcipher *cipher, const u8 *key,
+			   unsigned int keylen)
+{
+	struct asr_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+	struct asr_bcm_cipher *dd = asr_cipher_local;
+	
+	ctx->dd = dd;
+	ctx->use_rkek = false;
+
+	if (keylen != AES_KEYSIZE_128 &&
+		keylen != AES_KEYSIZE_192 &&
+		keylen != AES_KEYSIZE_256) {
+		crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	memcpy(ctx->key, key, keylen);
+	ctx->keylen = keylen;
+
+	return 0;
+}
+
+static int asr_cipher_set_hwkey(struct crypto_skcipher *cipher, const u8 *key,
+			   unsigned int keylen)
+{
+	struct asr_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+	struct asr_bcm_cipher *dd = asr_cipher_local;
+
+	(void)key; /* ignore the sw key */
+
+	if (!dd->rkek_burned)
+		return -EPERM;
+
+	if (keylen != AES_KEYSIZE_128 &&
+	    keylen != AES_KEYSIZE_192 &&
+	    keylen != AES_KEYSIZE_256) {
+		crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	ctx->keylen = keylen;
+
+	return 0;
+}
+
+static int asr_aes_ecb_encrypt(struct skcipher_request *req)
+{
+	return asr_cipher(req, FLAGS_ECB | FLAGS_ENCRYPT);
+}
+
+static int asr_aes_ecb_decrypt(struct skcipher_request *req)
+{
+	return asr_cipher(req, FLAGS_ECB);
+}
+
+static int asr_aes_cbc_encrypt(struct skcipher_request *req)
+{
+	return asr_cipher(req, FLAGS_CBC | FLAGS_ENCRYPT);
+}
+
+static int asr_aes_cbc_decrypt(struct skcipher_request *req)
+{
+	return asr_cipher(req, FLAGS_CBC);
+}
+
+static int asr_cipher_init(struct crypto_skcipher *tfm)
+{
+	struct asr_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+	tfm->reqsize = sizeof(struct asr_cipher_reqctx);
+	ctx->start = asr_cipher_start;
+
+	return 0;
+}
+
+static int asr_cipher_hwkey_init(struct crypto_skcipher *tfm)
+{
+	struct asr_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct asr_bcm_cipher *dd = asr_cipher_local;
+
+	if (!dd->rkek_burned)
+		return -EPERM;
+
+	tfm->reqsize = sizeof(struct asr_cipher_reqctx);
+	ctx->start = asr_cipher_start;
+
+	return 0;
+}
+
+static void asr_cipher_exit(struct crypto_skcipher *tfm)
+{
+	struct asr_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+	memset(ctx, 0, sizeof(*ctx));
+}
+
+static void asr_cipher_queue_task(unsigned long data)
+{
+	struct asr_bcm_cipher *dd = (struct asr_bcm_cipher *)data;
+
+	asr_cipher_handle_queue(dd, NULL);
+}
+
+static struct skcipher_alg cipher_algs[] = {
+	/* AES - ECB, using input key*/
+	{
+		.base = {
+			.cra_name = "ecb(aes)",
+			.cra_driver_name = "asr-ecb-aes",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct asr_cipher_ctx),
+			.cra_alignmask = 0xf,
+			.cra_module = THIS_MODULE,
+		},
+		.min_keysize = AES_MIN_KEY_SIZE,
+		.max_keysize = AES_MAX_KEY_SIZE,
+		.setkey = asr_cipher_setkey,
+		.encrypt = asr_aes_ecb_encrypt,
+		.decrypt = asr_aes_ecb_decrypt,
+		.init = asr_cipher_init,
+		.exit = asr_cipher_exit,
+	},
+	/* AES - CBC, using input key,*/
+	{
+		.base = {
+			.cra_name = "cbc(aes)",
+			.cra_driver_name = "asr-cbc-aes",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct asr_cipher_ctx),
+			.cra_alignmask = 0xf,
+			.cra_module = THIS_MODULE,
+		},
+		.min_keysize = AES_MIN_KEY_SIZE,
+		.max_keysize = AES_MAX_KEY_SIZE,
+		.setkey = asr_cipher_setkey,
+		.encrypt = asr_aes_cbc_encrypt,
+		.decrypt = asr_aes_cbc_decrypt,
+		.init = asr_cipher_init,
+		.exit = asr_cipher_exit,
+		.ivsize = AES_BLOCK_SIZE,
+	},
+	/* AES - ECB, using hardware key, a.k.a. RKEK */
+	{
+		.base = {
+			.cra_name = "ecb(aes-hwkey)",
+			.cra_driver_name = "asr-ecb-aes-hwkey",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct asr_cipher_ctx),
+			.cra_alignmask = 0xf,
+			.cra_module = THIS_MODULE,
+		},
+		.min_keysize = AES_MIN_KEY_SIZE,
+		.max_keysize = AES_MAX_KEY_SIZE,
+		.setkey = asr_cipher_set_hwkey,
+		.encrypt = asr_aes_ecb_encrypt,
+		.decrypt = asr_aes_ecb_decrypt,
+		.init = asr_cipher_hwkey_init,
+		.exit = asr_cipher_exit,
+	},
+	/* AES - CBC, using hardware key, a.k.a. RKEK */
+	{
+		.base = {
+			.cra_name = "cbc(aes-hwkey)",
+			.cra_driver_name = "asr-cbc-aes-hwkey",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct asr_cipher_ctx),
+			.cra_alignmask = 0xf,
+			.cra_module = THIS_MODULE,
+		},
+		.min_keysize = AES_MIN_KEY_SIZE,
+		.max_keysize = AES_MAX_KEY_SIZE,
+		.setkey = asr_cipher_set_hwkey,
+		.encrypt = asr_aes_cbc_encrypt,
+		.decrypt = asr_aes_cbc_decrypt,
+		.init = asr_cipher_hwkey_init,
+		.exit = asr_cipher_exit,
+		.ivsize = AES_BLOCK_SIZE,
+	},
+};
+
+int asr_bcm_cipher_register(struct asr_bcm_dev *bcm_dd)
+{
+	int i, j, err;
+	struct asr_bcm_cipher *cipher_dd;
+	struct device *dev = bcm_dd->dev;
+	u32 rkek_state;
+
+    cipher_dd = &bcm_dd->asr_cipher;
+	cipher_dd->dev = bcm_dd->dev;
+
+	asr_cipher_local = cipher_dd;
+
+	err = asr_optee_cipher_get_rkek_state(&rkek_state);
+	if (err) {
+		dev_warn(dev, "can't get hwkey(rkek) state\n");
+		cipher_dd->rkek_burned = 0;
+	} else {
+		if (rkek_state)
+			cipher_dd->rkek_burned = 1;
+		else
+			cipher_dd->rkek_burned = 0;
+		switch (rkek_state) {
+		case 2:
+			dev_warn(dev, "hwkey(rkek) burned, SW access not disabled\n");
+			break;
+		case 1:
+			dev_warn(dev, "hwkey(rkek) burned, SW access disabled\n");
+			break;
+		case 0:
+			dev_warn(dev, "hwkey(rkek) not burned\n");
+			break;
+		}
+	}
+
+	spin_lock_init(&cipher_dd->lock);
+	tasklet_init(&cipher_dd->queue_task, asr_cipher_queue_task,
+					(unsigned long)cipher_dd);
+
+	crypto_init_queue(&cipher_dd->queue, ASR_CIPHER_QUEUE_LENGTH);
+
+	for (i = 0; i < ARRAY_SIZE(cipher_algs); i++) {
+		err = crypto_register_skcipher(&cipher_algs[i]);
+		if (err){
+			for (j = 0; j < i; j++)
+				crypto_unregister_skcipher(&cipher_algs[j]);
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+int asr_bcm_cipher_unregister(struct asr_bcm_dev *bcm_dd)
+{
+	int i;
+	struct asr_bcm_cipher *cipher_dd = &bcm_dd->asr_cipher;
+	struct device *dev = bcm_dd->dev;
+
+	for (i = 0; i < ARRAY_SIZE(cipher_algs); i++)
+		crypto_unregister_skcipher(&cipher_algs[i]);
+
+	tasklet_kill(&cipher_dd->queue_task);
+
+	devm_kfree(dev, cipher_dd);
+
+	return 0;
+}
+
+MODULE_DESCRIPTION("ASR HWKey CIPHER driver with optee-os.");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Yonggan Wang");
\ No newline at end of file