ASR_BASE

Change-Id: Icf3719cc0afe3eeb3edc7fa80a2eb5199ca9dda1
diff --git a/marvell/linux/drivers/crypto/asr/te200/asr-cipher.c b/marvell/linux/drivers/crypto/asr/te200/asr-cipher.c
new file mode 100644
index 0000000..582aae7
--- /dev/null
+++ b/marvell/linux/drivers/crypto/asr/te200/asr-cipher.c
@@ -0,0 +1,1078 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/hw_random.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include <linux/cputype.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/algapi.h>
+#include <linux/jiffies.h>
+#include <crypto/aes.h>
+#include <crypto/sm4.h>
+#include <crypto/internal/skcipher.h>
+#include "asr-te200.h"
+#include "asr-cipher.h"
+
+#define CIPHER_BLOCK_SIZE AES_BLOCK_SIZE
+#define CIPHER_MIN_KEY_SIZE AES_MIN_KEY_SIZE
+#define CIPHER_MAX_KEY_SIZE AES_MAX_KEY_SIZE
+
+static struct asr_te200_cipher *asr_cipher_local = NULL;
+
+static inline u32 asr_cipher_read(struct asr_te200_cipher *dd, u32 offset)
+{
+	u32 value = readl_relaxed(dd->io_base + offset);
+
+	return value;
+}
+
+static inline void asr_cipher_write(struct asr_te200_cipher *dd,
+					u32 offset, u32 value)
+{
+	writel_relaxed(value, dd->io_base + offset);
+}
+
+static int sca_clock_switch(struct asr_te200_cipher *dd, int enable)
+{
+	uint32_t value;
+	value = asr_cipher_read(dd, TE200_CLOCK_CTRL);
+	if (enable) {
+		value |= SCA_CLK_EN;
+	} else {
+		value &= ~SCA_CLK_EN;
+	}
+	asr_cipher_write(dd, TE200_CLOCK_CTRL, value);
+	return 0;
+}
+
+static int sca_start_run(struct asr_te200_cipher *dd)
+{
+	uint32_t value;
+	value = asr_cipher_read(dd, TE200_SSCA_CTRL);
+	value |= SCA_RUN;
+	asr_cipher_write(dd, TE200_SSCA_CTRL, value);
+	return 0; 
+}
+
+static int sca_set_alg(int alg_type, uint32_t *value)
+{
+	switch (alg_type) {
+	case NORMAL_AES:
+		*value &= SCA_NORMAL_AES;
+		break;
+	case SM4:
+		*value |= SCA_SM4;
+		break;
+	default:
+		return -1;
+	}
+	return 0; 
+}
+
+static int sca_set_cipher_mode(int mode, uint32_t *value)
+{
+	switch (mode) {
+	case ECB:
+		*value &= SCA_MODE_ECB;
+		break;
+	case CTR:
+		*value |= SCA_MODE_CTR;
+		break;
+	case CBC:
+		*value |= SCA_MODE_CBC;
+		break;
+	default:
+		return -1;
+	}
+	return 0; 
+}
+
+static int sca_set_iv(const uint8_t *iv, uint32_t *value)
+{
+	if (iv) {
+		*value |= SCA_SET_IV | SCA_SET_IV_ADDR;
+	} else {
+		*value &= (~(SCA_SET_IV | SCA_SET_IV_ADDR));
+	}
+	return 0;
+}
+
+static int sca_set_key(const uint8_t *key, uint32_t key_len, uint32_t *value)
+{
+	switch (key_len) {
+	case 16:
+		*value &= SCA_KEY_128_BITS;
+		break;
+	case 24:
+		*value |= SCA_KEY_192_BITS;
+		break;
+	case 32:
+		*value |= SCA_KEY_256_BITS;
+		break;  
+	default:
+		return -1;
+	}
+
+	if (key) {
+		*value |= SCA_EXTERNAL_KEY | SCA_KEY_IS_ADDR;
+	} else {
+		*value |= SCA_DEVICE_ROOT_KEY | SCA_KEY_IS_ADDR;
+	}
+
+	return 0;
+}
+
+static int sca_wait_intr(struct asr_te200_cipher *dd)
+{
+	int ret = 0;
+	uint32_t value;
+	uint32_t time_start;
+	time_start = jiffies;
+	value = asr_cipher_read(dd, TE200_SSCA_INTR_STAT);
+
+	while (1) {
+		value = asr_cipher_read(dd, TE200_SSCA_INTR_STAT);
+
+		if (value & SCA_INVALID_CMD) {
+			dev_err(dd->dev, "invallid cmd\n");
+			ret = -1;
+			break;
+		}
+
+		if (value & SCA_INVALID_KEY) {
+			dev_err(dd->dev, "invallid key\n");
+			ret = -1;
+			break;
+		}
+
+		if (value & SCA_BUS_ERROR) {
+			dev_err(dd->dev, "bus err\n");
+			ret = -1;
+			break;
+		}
+
+		if ((jiffies - time_start) > 500) {
+			dev_err(dd->dev, "wait intr timeout !\n");
+			ret = -1;
+			break;
+		}
+
+		if (value & SCA_CMD_INTR) {
+			break;
+		}
+	}
+
+	value = asr_cipher_read(dd, TE200_SSCA_INTR_STAT);
+	value |= SCA_CMD_INTR;
+	asr_cipher_write(dd, TE200_SSCA_INTR_STAT, value);
+	return ret;
+}
+
+static inline void cipher_cache_operation(void *addr, int size)
+{
+	__cpuc_flush_dcache_area(addr, size);
+}
+
+/* sync the same key ladder in /tos/uboot/kernel te200 driver */
+static const struct {
+	__attribute__ ((aligned (16))) uint8_t ek3[16];
+	__attribute__ ((aligned (16))) uint8_t ek2[16];
+	__attribute__ ((aligned (16))) uint8_t ek1[16];
+} key_ladder = {
+	{ 0x50,0xCF,0x0F,0x29,0xD1,0xCF,0x32,0x41,0xC5,0x64,0xAC,0xDB,0xDD,0x9A,0xFC,0xF4 },
+	{ 0x9C,0xAB,0x04,0x57,0xB7,0x17,0xD9,0x4A,0x34,0x74,0x28,0x30,0x34,0x16,0x3B,0x52 },
+	{ 0xF5,0xA0,0x33,0x7B,0x4B,0xE8,0x18,0x84,0x51,0x4E,0x38,0x86,0x6D,0x08,0xBB,0x6E },
+};
+
+static int rkek_cfg_init(struct asr_te200_cipher *dd)
+{
+#define SYS_SEC_CTRL0           (0x0C)
+
+    uint32_t value;
+	struct device_node *np;
+	struct resource res;
+	void __iomem *io_base;
+
+    value = asr_cipher_read(dd, TE200_CLOCK_CTRL);
+    value &= ~OTP_CLK_EN;
+    asr_cipher_write(dd, TE200_CLOCK_CTRL, value);
+
+    value = asr_cipher_read(dd, TE200_CLOCK_CTRL);
+    value |= OTP_CLK_EN;
+    asr_cipher_read(dd, TE200_CLOCK_CTRL);
+
+	/* set opt key sel */
+	np = of_find_compatible_node(NULL, NULL, "marvell,mmp-ciu");
+	if (!np) {
+		dev_err(dd->dev, "can't find ciu node for set opt key sel");
+		return -1;
+	}
+
+	if (of_address_to_resource(np, 0, &res)) {
+		return -1;
+	}
+
+	io_base = ioremap(res.start, res.end - res.start);
+	if (!io_base) {
+		dev_err(dd->dev, "geu regs can't remap");
+		return 0;
+	}
+
+	value = readl_relaxed(io_base + SYS_SEC_CTRL0);
+	value |= (1 << 24);
+	writel_relaxed(value, io_base + SYS_SEC_CTRL0);
+
+	/* enable lock */
+	value = asr_cipher_read(dd, TE200_OTP_DUMMY_CFG);
+	value |= 0x10;
+    asr_cipher_write(dd, TE200_OTP_DUMMY_CFG, value);
+
+	iounmap(io_base);
+	return 0;
+}
+
+static int sca_cipher_init(struct asr_te200_cipher *dd, int alg_type, int mode, 
+						const uint8_t *iv, const uint8_t *key, uint32_t key_len)
+{
+	int ret;
+	uint32_t cmd = 0;
+	uint32_t param;
+	uint32_t key_phys;
+	uint32_t iv_phys;
+
+	sca_start_run(dd);
+
+	ret = sca_set_alg(alg_type, &cmd);
+	if (ret) {
+		return -1;
+	}
+	ret = sca_set_cipher_mode(mode, &cmd);
+	if (ret) {
+		return -1;
+	}
+
+	ret = sca_set_key(key, key_len, &cmd);
+	if (ret) {
+		return -1;
+	}
+
+	if (iv && ((mode == CBC) || (mode == CTR))) {
+		ret = sca_set_iv(iv, &cmd);
+		if (ret) {
+			return -1;
+		}
+	}
+
+	cmd |= SCA_INTER_TRIGGERD;
+	cmd |= SCA_INIT_CMD;
+
+	asr_cipher_write(dd, TE200_SSCA_QUEUE, cmd);
+
+	/* set key params */
+	if (key) {
+		key_phys = virt_to_phys((void *)key);
+		cipher_cache_operation((void*)key, key_len);
+		param = (uint32_t)key_phys;
+		asr_cipher_write(dd, TE200_SSCA_QUEUE, param);
+	} else {		/* use rkek */
+		key_phys = virt_to_phys((void *)key_ladder.ek3);
+		cipher_cache_operation((void*)key_ladder.ek3, key_len);
+		param = (uint32_t)key_phys;
+		asr_cipher_write(dd, TE200_SSCA_QUEUE, param);
+
+		key_phys = virt_to_phys((void *)key_ladder.ek2);
+		cipher_cache_operation((void*)key_ladder.ek2, key_len);
+		param = (uint32_t)key_phys;
+		asr_cipher_write(dd, TE200_SSCA_QUEUE, param);
+
+		key_phys = virt_to_phys((void *)key_ladder.ek1);
+		cipher_cache_operation((void*)key_ladder.ek1, key_len);
+		param = (uint32_t)key_phys;
+		asr_cipher_write(dd, TE200_SSCA_QUEUE, param);
+	}
+
+	/* set iv params */
+	if (iv && ((mode == CBC) || (mode == CTR))) {
+		/* set iv addr */
+		iv_phys = virt_to_phys((void *)iv);
+		cipher_cache_operation((void*)iv, 16);
+		param = (uint32_t)iv_phys;
+		asr_cipher_write(dd, TE200_SSCA_QUEUE, param);
+	}
+
+	return sca_wait_intr(dd);
+}
+
+static int sca_cipher_process(struct asr_te200_cipher *dd, int encrypt, 
+							int last_one, const void *in, uint32_t size, void *out)
+{
+	uint32_t cmd = 0;
+	uint32_t param;
+	uint8_t *psrc = (uint8_t *)in;
+	uint8_t *pdst = (uint8_t *)out;
+	uint32_t in_phys, out_phys;
+	uint32_t len;
+
+	len = (size + 0xf) & (~0xf);
+
+	/* set big endain */
+	if (encrypt) {
+		cmd |= SCA_ENCRYPTION;
+	} else {
+		cmd &= (~SCA_ENCRYPTION);
+	}
+
+	cmd |= SCA_INTER_TRIGGERD;
+	cmd |= SCA_PROCESS_CMD;
+	if (last_one) {
+		cmd |= SCA_LAST_ONE_SESSION;
+	} else {
+		cmd &= ~SCA_LAST_ONE_SESSION;
+	}
+	asr_cipher_write(dd, TE200_SSCA_QUEUE, cmd);
+
+	in_phys = (uint32_t)virt_to_phys((void *)psrc);
+	out_phys = (uint32_t)virt_to_phys((void *)pdst);
+	cipher_cache_operation((void*)psrc, len);
+	cipher_cache_operation((void*)pdst, len);
+
+	/* set src addr */
+	param = (uint32_t)in_phys;
+	asr_cipher_write(dd, TE200_SSCA_QUEUE, param);
+
+	/* set data length */
+	param = (uint32_t)size;
+	asr_cipher_write(dd, TE200_SSCA_QUEUE, param);
+
+	/* set dst addr */
+	if (out_phys) {
+		param = (uint32_t)out_phys;
+		asr_cipher_write(dd, TE200_SSCA_QUEUE, param);
+	}
+
+	sca_start_run(dd);
+	return sca_wait_intr(dd);
+}
+
+static int sca_cipher_finish(struct asr_te200_cipher *dd)
+{
+	uint32_t cmd = 0;
+	/* set cmd*/
+	cmd |= SCA_INTER_TRIGGERD;
+	cmd |= SCA_FINISH_CMD;
+	asr_cipher_write(dd, TE200_SSCA_QUEUE, cmd);
+
+	sca_start_run(dd);
+	return sca_wait_intr(dd);
+}
+
+static int asr_cipher_hw_init(struct asr_te200_cipher *dd)
+{
+	asr_cipher_write(dd, TE200_SSCA_INTR_MSK, 0x1f);
+	return 0;
+}
+
+static int sca_cipher_handle(struct asr_te200_cipher *dd, struct sca_data *psca_data, const uint8_t *iv, 
+			const uint8_t *key, uint32_t key_len, const void *in, uint32_t size, void *out)
+{
+	int ret = 0;
+
+	sca_clock_switch(dd, 0);
+	sca_clock_switch(dd, 1);
+
+	ret = asr_cipher_hw_init(dd);
+	if (ret) {
+		goto exit;
+	}
+
+	if (psca_data->use_rkek) {
+		ret = rkek_cfg_init(dd);
+		if (ret) {
+			goto exit;
+		}
+		ret = sca_cipher_init(dd, psca_data->alg_type, psca_data->mode, iv, NULL, key_len);
+	} else {
+		ret = sca_cipher_init(dd, psca_data->alg_type, psca_data->mode, iv, key, key_len);
+	}
+	if (ret) {
+		goto exit;
+	}
+
+	ret = sca_cipher_process(dd, psca_data->encrypt, 1, in, size, out);
+	if (ret) {
+		goto exit;
+	}        
+
+	ret = sca_cipher_finish(dd);
+	if (ret) {
+		goto exit;
+	}
+
+exit:
+	sca_clock_switch(dd, 0);
+	return ret;
+}
+
+static void asr_cipher_set_iv_as_last_ciphertext_block(struct asr_te200_cipher*dd)
+{
+	struct skcipher_request *req = skcipher_request_cast(dd->areq);
+	struct asr_cipher_reqctx *rctx = skcipher_request_ctx(req);
+	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+	unsigned int ivsize = crypto_skcipher_ivsize(cipher);
+
+	if (req->cryptlen < ivsize)
+		return;
+
+	if (rctx->mode & FLAGS_ENCRYPT) {
+		scatterwalk_map_and_copy(req->iv, req->dst,
+					 req->cryptlen - ivsize, ivsize, 0);
+	} else {
+		if (req->src == req->dst)
+			memcpy(req->iv, rctx->lastc, ivsize);
+		else
+			scatterwalk_map_and_copy(req->iv, req->src,
+						 req->cryptlen - ivsize,
+						 ivsize, 0);
+	}
+}
+
+static int asr_cipher_complete(struct asr_te200_cipher *dd, int err)
+{
+	struct asr_te200_dev *te200_dd = dev_get_drvdata(dd->dev);
+	struct asr_te200_ops *te200_ops = te200_dd->te200_ops;
+
+	dd->flags &= ~FLAGS_BUSY;
+
+	asr_cipher_set_iv_as_last_ciphertext_block(dd);
+
+	if (dd->is_async)
+		dd->areq->complete(dd->areq, err);
+
+	te200_ops->dev_put(te200_dd);
+
+	tasklet_schedule(&dd->queue_task);
+
+	return err;
+}
+
+static int asr_complete(struct asr_te200_cipher *dd)
+{
+	return asr_cipher_complete(dd, 0);
+}
+
+static inline size_t asr_cipher_padlen(size_t len, size_t block_size)
+{
+	len &= block_size - 1;
+	return len ? block_size - len : 0;
+}
+
+static void get_sca_data_info(struct sca_data *psca_data, struct asr_te200_cipher *dd)
+{
+
+	psca_data->alg_type = (dd->flags & FLAGS_SM4) ? SM4 : NORMAL_AES;
+	psca_data->encrypt = (dd->flags & FLAGS_ENCRYPT) ? 1 : 0;
+
+	if ((dd->flags & FLAGS_OPMODE_MASK) == FLAGS_ECB)
+		psca_data->mode = ECB;
+	else if ((dd->flags & FLAGS_OPMODE_MASK) == FLAGS_CBC)
+		psca_data->mode = CBC;
+	else if ((dd->flags & FLAGS_OPMODE_MASK) == FLAGS_CTR)
+		psca_data->mode = CTR;
+}
+
+static int asr_cipher_buff_init(struct asr_te200_cipher *dd, uint32_t len)
+{
+	dd->buf = (void *)__get_free_pages(GFP_KERNEL, get_order(len));
+
+	if (!dd->buf) {
+		dev_err(dd->dev, "unable to alloc pages.\n");
+		return -ENOMEM;
+	}
+
+	dd->buflen = PAGE_SIZE << get_order(len);
+
+	return 0;
+}
+
+static void asr_cipher_buff_cleanup(struct asr_te200_cipher *dd, uint32_t len)
+{
+	free_pages((unsigned long)dd->buf, get_order(len));
+	dd->buflen = 0;
+}
+
+static inline void asr_cipher_get(struct asr_te200_cipher *dd)
+{
+	mutex_lock(&dd->cipher_lock);
+}
+
+static inline void asr_cipher_put(struct asr_te200_cipher *dd)
+{
+	if(mutex_is_locked(&dd->cipher_lock))
+		mutex_unlock(&dd->cipher_lock);
+}
+
+static int asr_sca_cipher_process(struct asr_te200_cipher *dd, 
+					struct skcipher_request *req, asr_cipher_fn_t resume)
+{
+	int ret;
+	struct sca_data sca_data = {0};
+	size_t padlen = asr_cipher_padlen(req->cryptlen, CIPHER_BLOCK_SIZE);
+	struct asr_cipher_reqctx *rctx = skcipher_request_ctx(req);
+
+	asr_cipher_get(dd);
+
+	if (unlikely(req->cryptlen == 0)) {
+		asr_cipher_put(dd);
+		return -EINVAL;
+	}
+
+	dd->datalen = req->cryptlen + padlen;
+	ret = asr_cipher_buff_init(dd, dd->datalen);
+	if (ret) {
+		asr_cipher_put(dd);
+		return ret;
+	}
+
+	sg_copy_to_buffer(req->src, sg_nents(req->src), dd->buf, req->cryptlen);
+
+	dd->total = req->cryptlen;
+	dd->real_dst = req->dst;
+	dd->resume = resume;
+	dd->data = (u32 *)dd->buf;
+
+	get_sca_data_info(&sca_data, dd);
+	sca_data.use_rkek = rctx->use_rkek;
+	ret = sca_cipher_handle(dd, &sca_data, req->iv, (uint8_t *)dd->ctx->key, 
+						dd->ctx->keylen, dd->data, dd->datalen, dd->data);
+	if (!sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst),
+				 dd->buf, dd->total)) 
+		ret = -EINVAL;
+
+	asr_cipher_buff_cleanup(dd, dd->datalen);
+	asr_cipher_put(dd);
+
+	return asr_cipher_complete(dd, ret);
+}
+
+static inline void asr_cipher_set_mode(struct asr_te200_cipher *dd,
+					  const struct asr_cipher_reqctx *rctx)
+{
+	/* Clear all but persistent flags and set request flags. */
+	dd->flags = (dd->flags & CIPHER_FLAGS_PERSISTENT) | rctx->mode;
+}
+
+static int asr_cipher_start(struct asr_te200_cipher *dd)
+{
+	struct skcipher_request *req = skcipher_request_cast(dd->areq);
+	struct asr_cipher_reqctx *rctx = skcipher_request_ctx(req);
+	struct asr_te200_dev *te200_dd = dev_get_drvdata(dd->dev);
+	struct asr_te200_ops *te200_ops = te200_dd->te200_ops;
+
+	te200_ops->dev_get(te200_dd);
+
+	asr_cipher_set_mode(dd, rctx);
+	return asr_sca_cipher_process(dd, req, asr_complete);
+}
+
+static int asr_cipher_handle_queue(struct asr_te200_cipher *dd,
+				  struct crypto_async_request *new_areq)
+{
+	struct crypto_async_request *areq, *backlog;
+	struct asr_cipher_ctx *ctx;
+	unsigned long flags;
+	bool start_async;
+	int err, ret = 0;
+
+	spin_lock_irqsave(&dd->lock, flags);
+	if (new_areq)
+		ret = crypto_enqueue_request(&dd->queue, new_areq);
+	if (dd->flags & FLAGS_BUSY) {
+		spin_unlock_irqrestore(&dd->lock, flags);
+		return ret;
+	}
+
+	backlog = crypto_get_backlog(&dd->queue);
+	areq = crypto_dequeue_request(&dd->queue);
+	if (areq) {
+		dd->flags |= FLAGS_BUSY;
+	}
+	spin_unlock_irqrestore(&dd->lock, flags);
+	if (!areq)
+		return ret;
+
+	if (backlog)
+		backlog->complete(backlog, -EINPROGRESS);
+
+	ctx = crypto_tfm_ctx(areq->tfm);
+	dd->areq = areq;
+	dd->ctx = ctx;
+	start_async = (areq != new_areq);
+	dd->is_async = start_async;
+
+	/* WARNING: ctx->start() MAY change dd->is_async. */
+	err = ctx->start(dd);
+	return (start_async) ? ret : err;
+}
+
+static int asr_cipher(struct skcipher_request *req, unsigned long mode)
+{
+	int ret;
+	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+	struct asr_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+	struct asr_cipher_reqctx *rctx;
+
+	ctx->block_size = CIPHER_BLOCK_SIZE;
+	rctx = skcipher_request_ctx(req);
+	rctx->mode = mode;
+	rctx->use_rkek = ctx->use_rkek;
+
+	if (!(mode & FLAGS_ENCRYPT) && (req->src == req->dst)) {
+		unsigned int ivsize = crypto_skcipher_ivsize(cipher);
+		if (req->cryptlen >= ivsize) {
+			scatterwalk_map_and_copy(rctx->lastc, req->src,
+						 req->cryptlen - ivsize,
+						 ivsize, 0);
+		}
+	}
+
+	ret = asr_cipher_handle_queue(ctx->dd, &req->base);
+
+	asr_cipher_put(ctx->dd);
+	return ret;
+}
+
+static int asr_cipher_setkey(struct crypto_skcipher *cipher, const u8 *key,
+			   unsigned int keylen)
+{
+	struct asr_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+	struct asr_te200_cipher *dd = asr_cipher_local;
+	
+	ctx->dd = dd;
+	ctx->use_rkek = false;
+
+	if (keylen != AES_KEYSIZE_128 &&
+		keylen != AES_KEYSIZE_192 &&
+		keylen != AES_KEYSIZE_256) {
+		crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	memcpy(ctx->key, key, keylen);
+	ctx->keylen = keylen;
+
+	return 0;
+}
+
+static int asr_cipher_set_hwkey(struct crypto_skcipher *cipher, const u8 *key,
+			   unsigned int keylen)
+{
+	struct asr_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+	struct asr_te200_cipher *dd = asr_cipher_local;
+	
+	ctx->dd = dd;
+	if (!dd->rkek_burned)
+		return -EPERM;
+
+	ctx->use_rkek = true;
+
+	if (keylen != AES_KEYSIZE_128 &&
+		keylen != AES_KEYSIZE_192 &&
+		keylen != AES_KEYSIZE_256) {
+		crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	memcpy(ctx->key, key, keylen);
+	ctx->keylen = keylen;
+
+	return 0;
+}
+
+static int asr_cipher_rkek_fused(struct asr_te200_cipher *dd)
+{
+#define GEU_FUSE_VAL_APCFG2					(0x0408)
+#define GEU_SECURE_KEY_ACCESS_DISABLED		(1 << 29)
+
+    uint32_t value;
+	struct device_node *np;
+	struct resource res;
+	void __iomem *io_base;
+
+	/* get geu node */
+	np = of_find_compatible_node(NULL, NULL, "asr,asr-geu");
+	if (!np) {
+		dev_err(dd->dev, "can't find geu node to check rkek burned");
+		return 0;
+	}
+
+	if (of_address_to_resource(np, 0, &res)) {
+		dev_err(dd->dev, "can't find geu address");
+		return 0;
+	}
+
+	io_base = ioremap(res.start, res.end - res.start);
+	if (!io_base) {
+		dev_err(dd->dev, "geu regs can't remap");
+		return 0;
+	}
+
+	value = readl_relaxed(io_base + GEU_FUSE_VAL_APCFG2);
+	if (value & GEU_SECURE_KEY_ACCESS_DISABLED) {
+		iounmap(io_base);
+		return 1;
+	}
+
+	iounmap(io_base);
+	return 0;
+}
+
+static int asr_aes_ecb_encrypt(struct skcipher_request *req)
+{
+	return asr_cipher(req, FLAGS_AES | FLAGS_ECB | FLAGS_ENCRYPT);
+}
+
+static int asr_aes_ecb_decrypt(struct skcipher_request *req)
+{
+	return asr_cipher(req, FLAGS_AES | FLAGS_ECB);
+}
+
+static int asr_aes_cbc_encrypt(struct skcipher_request *req)
+{
+	return asr_cipher(req, FLAGS_AES | FLAGS_CBC | FLAGS_ENCRYPT);
+}
+
+static int asr_aes_cbc_decrypt(struct skcipher_request *req)
+{
+	return asr_cipher(req, FLAGS_AES | FLAGS_CBC);
+}
+
+static int asr_aes_ctr_encrypt(struct skcipher_request *req)
+{
+	return asr_cipher(req, FLAGS_AES | FLAGS_CTR | FLAGS_ENCRYPT);
+}
+
+static int asr_aes_ctr_decrypt(struct skcipher_request *req)
+{
+	return asr_cipher(req, FLAGS_AES | FLAGS_CTR);
+}
+
+static int asr_sm4_ecb_encrypt(struct skcipher_request *req)
+{
+	return asr_cipher(req, FLAGS_SM4 | FLAGS_ECB | FLAGS_ENCRYPT);
+}
+
+static int asr_sm4_ecb_decrypt(struct skcipher_request *req)
+{
+	return asr_cipher(req, FLAGS_SM4 | FLAGS_ECB);
+}
+
+static int asr_sm4_cbc_encrypt(struct skcipher_request *req)
+{
+	return asr_cipher(req, FLAGS_SM4 | FLAGS_CBC | FLAGS_ENCRYPT);
+}
+
+static int asr_sm4_cbc_decrypt(struct skcipher_request *req)
+{
+	return asr_cipher(req, FLAGS_SM4 | FLAGS_CBC);
+}
+
+static int asr_sm4_ctr_encrypt(struct skcipher_request *req)
+{
+	return asr_cipher(req, FLAGS_SM4 | FLAGS_CTR | FLAGS_ENCRYPT);
+}
+
+static int asr_sm4_ctr_decrypt(struct skcipher_request *req)
+{
+	return asr_cipher(req, FLAGS_SM4 | FLAGS_CTR);
+}
+
+static int asr_cipher_init(struct crypto_skcipher *tfm)
+{
+	struct asr_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+	tfm->reqsize = sizeof(struct asr_cipher_reqctx);
+	ctx->start = asr_cipher_start;
+
+	return 0;
+}
+
+static int asr_cipher_hwkey_init(struct crypto_skcipher *tfm)
+{
+	struct asr_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct asr_te200_cipher *dd = asr_cipher_local;
+
+	if (!dd->rkek_burned)
+		return -EPERM;
+
+	tfm->reqsize = sizeof(struct asr_cipher_reqctx);
+	ctx->start = asr_cipher_start;
+
+	return 0;
+}
+
+static void asr_cipher_exit(struct crypto_skcipher *tfm)
+{
+	struct asr_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+	memset(ctx, 0, sizeof(*ctx));
+}
+
+static struct skcipher_alg cipher_algs[] = {
+	/* AES - ECB */
+	{
+		.base = {
+			.cra_name = "ecb(aes)",
+			.cra_driver_name = "asr-ecb-aes",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct asr_cipher_ctx),
+			.cra_alignmask = 0xf,
+			.cra_module = THIS_MODULE,
+		},
+		.min_keysize = CIPHER_MIN_KEY_SIZE,
+		.max_keysize = CIPHER_MAX_KEY_SIZE,
+		.setkey = asr_cipher_setkey,
+		.encrypt = asr_aes_ecb_encrypt,
+		.decrypt = asr_aes_ecb_decrypt,
+		.init = asr_cipher_init,
+		.exit = asr_cipher_exit,
+	},
+	/* AES - CBC */
+	{
+		.base = {
+			.cra_name = "cbc(aes)",
+			.cra_driver_name = "asr-cbc-aes",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct asr_cipher_ctx),
+			.cra_alignmask = 0xf,
+			.cra_module = THIS_MODULE,
+		},
+		.min_keysize = CIPHER_MIN_KEY_SIZE,
+		.max_keysize = CIPHER_MAX_KEY_SIZE,
+		.setkey = asr_cipher_setkey,
+		.encrypt = asr_aes_cbc_encrypt,
+		.decrypt = asr_aes_cbc_decrypt,
+		.init = asr_cipher_init,
+		.exit = asr_cipher_exit,
+		.ivsize = AES_BLOCK_SIZE,
+	},
+	/* AES - CTR */
+	{
+		.base = {
+			.cra_name = "ctr(aes)",
+			.cra_driver_name = "asr-ctr-aes",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct asr_cipher_ctx),
+			.cra_alignmask = 0xf,
+			.cra_module = THIS_MODULE,
+		},
+		.min_keysize = CIPHER_MIN_KEY_SIZE,
+		.max_keysize = CIPHER_MAX_KEY_SIZE,
+		.setkey = asr_cipher_setkey,
+		.encrypt = asr_aes_ctr_encrypt,
+		.decrypt = asr_aes_ctr_decrypt,
+		.init = asr_cipher_init,
+		.exit = asr_cipher_exit,
+		.ivsize = AES_BLOCK_SIZE,
+	},
+
+	/* SM4 - ECB */
+	{
+		.base = {
+			.cra_name = "ecb(sm4)",
+			.cra_driver_name = "asr-ecb-sm4",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC,
+			.cra_blocksize = SM4_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct asr_cipher_ctx),
+			.cra_alignmask = 0xf,
+			.cra_module = THIS_MODULE,
+		},
+		.min_keysize = CIPHER_MIN_KEY_SIZE,
+		.max_keysize = CIPHER_MAX_KEY_SIZE,
+		.setkey = asr_cipher_setkey,
+		.encrypt = asr_sm4_ecb_encrypt,
+		.decrypt = asr_sm4_ecb_decrypt,
+		.init = asr_cipher_init,
+		.exit = asr_cipher_exit,
+	},
+	/* SM4 - CBC */
+	{
+		.base = {
+			.cra_name = "cbc(sm4)",
+			.cra_driver_name = "asr-cbc-sm4",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC,
+			.cra_blocksize = SM4_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct asr_cipher_ctx),
+			.cra_alignmask = 0xf,
+			.cra_module = THIS_MODULE,
+		},
+		.min_keysize = CIPHER_MIN_KEY_SIZE,
+		.max_keysize = CIPHER_MAX_KEY_SIZE,
+		.setkey = asr_cipher_setkey,
+		.encrypt = asr_sm4_cbc_encrypt,
+		.decrypt = asr_sm4_cbc_decrypt,
+		.init = asr_cipher_init,
+		.exit = asr_cipher_exit,
+		.ivsize = SM4_BLOCK_SIZE,
+	},
+	/* SM4 - CTR */
+	{
+		.base = {
+			.cra_name = "ctr(sm4)",
+			.cra_driver_name = "asr-ctr-sm4",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC,
+			.cra_blocksize = SM4_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct asr_cipher_ctx),
+			.cra_alignmask = 0xf,
+			.cra_module = THIS_MODULE,
+		},
+		.min_keysize = CIPHER_MIN_KEY_SIZE,
+		.max_keysize = CIPHER_MAX_KEY_SIZE,
+		.setkey = asr_cipher_setkey,
+		.encrypt = asr_sm4_ctr_encrypt,
+		.decrypt = asr_sm4_ctr_decrypt,
+		.init = asr_cipher_init,
+		.exit = asr_cipher_exit,
+		.ivsize = SM4_BLOCK_SIZE,
+	},
+
+	/* hardware key AES - ECB */
+	{
+		.base = {
+			.cra_name = "ecb(aes-hwkey)",
+			.cra_driver_name = "asr-ecb-aes",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct asr_cipher_ctx),
+			.cra_alignmask = 0xf,
+			.cra_module = THIS_MODULE,
+		},
+		.min_keysize = CIPHER_MIN_KEY_SIZE,
+		.max_keysize = CIPHER_MAX_KEY_SIZE,
+		.setkey = asr_cipher_set_hwkey,
+		.encrypt = asr_aes_ecb_encrypt,
+		.decrypt = asr_aes_ecb_decrypt,
+		.init = asr_cipher_hwkey_init,
+		.exit = asr_cipher_exit,
+	},
+	/* AES - CBC */
+	{
+		.base = {
+			.cra_name = "cbc(aes-hwkey)",
+			.cra_driver_name = "asr-cbc-aes",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct asr_cipher_ctx),
+			.cra_alignmask = 0xf,
+			.cra_module = THIS_MODULE,
+		},
+		.min_keysize = CIPHER_MIN_KEY_SIZE,
+		.max_keysize = CIPHER_MAX_KEY_SIZE,
+		.setkey = asr_cipher_set_hwkey,
+		.encrypt = asr_aes_cbc_encrypt,
+		.decrypt = asr_aes_cbc_decrypt,
+		.init = asr_cipher_hwkey_init,
+		.exit = asr_cipher_exit,
+		.ivsize = AES_BLOCK_SIZE,
+	},
+};
+
+static void asr_cipher_queue_task(unsigned long data)
+{
+	struct asr_te200_cipher *dd = (struct asr_te200_cipher *)data;
+
+	asr_cipher_handle_queue(dd, NULL);
+}
+
+static void asr_cipher_done_task(unsigned long data)
+{
+	struct asr_te200_cipher *dd = (struct asr_te200_cipher *)data;
+
+	dd->is_async = true;
+	(void)dd->resume(dd);
+}
+
+int asr_te200_cipher_register(struct asr_te200_dev *te200_dd)
+{
+	int err, i, j;
+	struct device_node *np = NULL;
+	struct asr_te200_cipher *cipher_dd;
+
+	cipher_dd = &te200_dd->asr_cipher;
+	cipher_dd->dev = te200_dd->dev;
+	cipher_dd->io_base = te200_dd->io_base;
+	cipher_dd->phys_base = te200_dd->phys_base;
+
+	np = cipher_dd->dev->of_node;
+
+
+	cipher_dd->rkek_burned = asr_cipher_rkek_fused(cipher_dd);
+
+	asr_cipher_local = cipher_dd;
+
+	spin_lock_init(&cipher_dd->lock);
+	mutex_init(&cipher_dd->cipher_lock);
+	tasklet_init(&cipher_dd->done_task, asr_cipher_done_task,
+					(unsigned long)cipher_dd);
+	tasklet_init(&cipher_dd->queue_task, asr_cipher_queue_task,
+					(unsigned long)cipher_dd);
+	crypto_init_queue(&cipher_dd->queue, ASR_CIPHER_QUEUE_LENGTH);
+
+	for (i = 0; i < ARRAY_SIZE(cipher_algs); i++) {
+		err = crypto_register_skcipher(&cipher_algs[i]);
+		if (err){
+			for (j = 0; j < i; j++)
+				crypto_unregister_skcipher(&cipher_algs[j]);
+			return err;
+		}
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(asr_te200_cipher_register);
+
+int asr_te200_cipher_unregister(struct asr_te200_dev *te200_dd)
+{
+	int i;
+	struct asr_te200_cipher *cipher_dd = &te200_dd->asr_cipher;
+
+	for (i = 0; i < ARRAY_SIZE(cipher_algs); i++)
+		crypto_unregister_skcipher(&cipher_algs[i]);
+
+	tasklet_kill(&cipher_dd->done_task);
+	tasklet_kill(&cipher_dd->queue_task);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(asr_te200_cipher_unregister);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("wangyonggan <yongganwang@asrmicro.com>");
+MODULE_DESCRIPTION("ASR te200 cipher driver");
\ No newline at end of file