ASR_BASE

Change-Id: Icf3719cc0afe3eeb3edc7fa80a2eb5199ca9dda1
diff --git a/marvell/linux/drivers/soc/asr/geu/asr-aes-optee.c b/marvell/linux/drivers/soc/asr/geu/asr-aes-optee.c
new file mode 100644
index 0000000..c2cd5b9
--- /dev/null
+++ b/marvell/linux/drivers/soc/asr/geu/asr-aes-optee.c
@@ -0,0 +1,384 @@
+// SPDX-License-Identifier: GPL-2.0

+/*

+ * Copyright (C) 2023 ASR Micro Limited

+ *

+ */

+

+#include <linux/module.h>

+#include <linux/kernel.h>

+#include <linux/platform_device.h>

+#include <linux/of.h>

+#include <linux/clk.h>

+#include <linux/io.h>

+#include <linux/slab.h>

+#include <linux/sched.h>

+#include <linux/fs.h>

+#include <linux/uaccess.h>

+#include <linux/errno.h>

+#include <linux/interrupt.h>

+#include <linux/irq.h>

+#ifdef CONFIG_TEE

+#include <linux/tee_drv.h>

+#endif

+#include <linux/crypto.h>

+#include <linux/cputype.h>

+#include <crypto/scatterwalk.h>

+#include <crypto/algapi.h>

+#include <crypto/aes.h>

+#include <crypto/internal/skcipher.h>

+

+#include "asr-aes-optee.h"

+#include "asr-geu-optee.h"

+

+struct asr_geu_aes *asr_aes_local;

+

+static struct teec_uuid pta_aes_uuid = ASR_AES_ACCESS_UUID;

+

+static int asr_optee_aes_get_rkek_state(u32 *state)

+{

+	return asrgeu_optee_acquire_ta_data(&pta_aes_uuid, CMD_AES_HWKEY_STATUS, state);

+}

+

+static int asr_optee_aes_hwkey_process(uint32_t aes_mode, uint32_t op_mode,

+		struct scatterlist *src, struct scatterlist *dst,

+		size_t len, uint32_t key_size,

+		u8 *iv, uint32_t ivsize)

+{

+	return asrgeu_optee_aes_acquire_ta_dma(&pta_aes_uuid, aes_mode,

+		src, dst, len, len, key_size, op_mode, iv, ivsize);

+}

+

+static inline void asr_aes_set_mode(struct asr_geu_aes *dd,

+				      const struct asr_aes_reqctx *rctx)

+{

+	/* Clear all but persistent flags and set request flags. */

+	dd->flags = (dd->flags & AES_FLAGS_PERSISTENT) | rctx->mode;

+}

+

+static void asr_aes_set_iv_as_last_ciphertext_block(struct asr_geu_aes *dd)

+{

+	struct skcipher_request *req = skcipher_request_cast(dd->areq);

+	struct asr_aes_reqctx *rctx = skcipher_request_ctx(req);

+	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);

+	unsigned int ivsize = crypto_skcipher_ivsize(cipher);

+

+	if (req->cryptlen < ivsize)

+		return;

+

+	if (rctx->mode & AES_FLAGS_ENCRYPT) {

+		scatterwalk_map_and_copy(req->iv, req->dst,

+					 req->cryptlen - ivsize, ivsize, 0);

+	} else {

+		if (req->src == req->dst)

+			memcpy(req->iv, rctx->lastc, ivsize);

+		else

+			scatterwalk_map_and_copy(req->iv, req->src,

+						 req->cryptlen - ivsize,

+						 ivsize, 0);

+	}

+}

+

+static int asr_aes_handle_queue(struct asr_geu_aes *dd,

+				  struct crypto_async_request *new_areq)

+{

+	struct crypto_async_request *areq, *backlog;

+	struct asr_aes_ctx *ctx;

+	unsigned long flags;

+	bool start_async;

+	int err, ret = 0;

+

+	spin_lock_irqsave(&dd->lock, flags);

+	if (new_areq)

+		ret = crypto_enqueue_request(&dd->queue, new_areq);

+	if (dd->flags & AES_FLAGS_BUSY) {

+		spin_unlock_irqrestore(&dd->lock, flags);

+		return ret;

+	}

+

+	backlog = crypto_get_backlog(&dd->queue);

+	areq = crypto_dequeue_request(&dd->queue);

+	if (areq) {

+		dd->flags |= AES_FLAGS_BUSY;

+	}

+	spin_unlock_irqrestore(&dd->lock, flags);

+

+	if (!areq)

+		return ret;

+

+	if (backlog)

+		backlog->complete(backlog, -EINPROGRESS);

+

+	ctx = crypto_tfm_ctx(areq->tfm);

+

+	dd->areq = areq;

+	dd->ctx = ctx;

+	start_async = (areq != new_areq);

+	dd->is_async = start_async;

+

+	/* WARNING: ctx->start() MAY change dd->is_async. */

+	err = ctx->start(dd);

+	return (start_async) ? ret : err;

+}

+

+static inline int asr_aes_complete(struct asr_geu_aes *dd, int err)

+{

+

+	dd->flags &= ~AES_FLAGS_BUSY;

+

+	asr_aes_set_iv_as_last_ciphertext_block(dd);

+

+	if (dd->is_async)

+		dd->areq->complete(dd->areq, err);

+

+	tasklet_schedule(&dd->queue_task);

+

+	return err;

+}

+

+static int asr_aes_start(struct asr_geu_aes *dd)

+{

+	struct skcipher_request *req = skcipher_request_cast(dd->areq);

+	struct asr_aes_reqctx *rctx = skcipher_request_ctx(req);

+	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);

+	u8 *iv;

+	u32 flags, aes_mode, op_mode, keylen, ivsize;

+	int err;

+

+	asr_aes_set_mode(dd, rctx);

+

+	flags = dd->flags & AES_FLAGS_MODE_MASK;

+

+	if ((flags & AES_FLAGS_OPMODE_MASK) == AES_FLAGS_CBC){

+		aes_mode = CMD_AES_HWKEY_CBC;

+		ivsize = crypto_skcipher_ivsize(cipher);

+		iv = req->iv;

+	}

+	else {

+		iv = NULL;

+		ivsize = 0;

+		aes_mode = CMD_AES_HWKEY_ECB;

+	}

+

+	if (flags & AES_FLAGS_ENCRYPT)

+		op_mode = AES_ENCRYPT;

+	else

+		op_mode = AES_DECRYPT;

+

+	keylen = dd->ctx->keylen;

+

+	err = asr_optee_aes_hwkey_process(aes_mode, op_mode, req->src,

+			req->dst, req->cryptlen, keylen, iv, ivsize);

+

+	return asr_aes_complete(dd, err);

+}

+

+static int asr_aes_crypt(struct skcipher_request *req, unsigned long mode)

+{

+	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);

+	struct asr_aes_ctx *ctx = crypto_skcipher_ctx(cipher);

+	struct asr_aes_reqctx *rctx;

+	struct asr_geu_aes *dd = asr_aes_local;

+

+	ctx->block_size = AES_BLOCK_SIZE;

+	ctx->dd = dd;

+

+	rctx = skcipher_request_ctx(req);

+	rctx->mode = mode;

+	rctx->use_rkek = ctx->use_rkek;

+

+	if (!(mode & AES_FLAGS_ENCRYPT) && (req->src == req->dst)) {

+		unsigned int ivsize = crypto_skcipher_ivsize(cipher);

+		if (req->cryptlen >= ivsize) {

+			scatterwalk_map_and_copy(rctx->lastc, req->src,

+						 req->cryptlen - ivsize,

+						 ivsize, 0);

+		}

+	}

+

+	return asr_aes_handle_queue(dd, &req->base);

+}

+

+static int asr_aes_set_hwkey(struct crypto_skcipher *cipher, const u8 *key,

+			   unsigned int keylen)

+{

+	struct asr_aes_ctx *ctx = crypto_skcipher_ctx(cipher);

+	struct asr_geu_aes *dd = asr_aes_local;

+

+	(void)key; /* ignore the sw key */

+

+	if (!dd->rkek_burned)

+		return -EPERM;

+

+	if (keylen != AES_KEYSIZE_128 &&

+	    keylen != AES_KEYSIZE_192 &&

+	    keylen != AES_KEYSIZE_256) {

+		crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);

+		return -EINVAL;

+	}

+

+	ctx->keylen = keylen;

+

+	return 0;

+}

+

+static int asr_aes_ecb_encrypt(struct skcipher_request *req)

+{

+	return asr_aes_crypt(req, AES_FLAGS_ECB | AES_FLAGS_ENCRYPT);

+}

+

+static int asr_aes_ecb_decrypt(struct skcipher_request *req)

+{

+	return asr_aes_crypt(req, AES_FLAGS_ECB);

+}

+

+static int asr_aes_cbc_encrypt(struct skcipher_request *req)

+{

+	return asr_aes_crypt(req, AES_FLAGS_CBC | AES_FLAGS_ENCRYPT);

+}

+

+static int asr_aes_cbc_decrypt(struct skcipher_request *req)

+{

+	return asr_aes_crypt(req, AES_FLAGS_CBC);

+}

+

+static int asr_aes_hwkey_init(struct crypto_skcipher *tfm)

+{

+	struct asr_aes_ctx *ctx = crypto_skcipher_ctx(tfm);

+

+	tfm->reqsize = sizeof(struct asr_aes_reqctx);

+	ctx->start = asr_aes_start;

+

+	return 0;

+}

+

+static void asr_aes_exit(struct crypto_skcipher *tfm)

+{

+	struct asr_aes_ctx *ctx = crypto_skcipher_ctx(tfm);

+

+	memset(ctx, 0, sizeof(*ctx));

+}

+

+static void asr_aes_queue_task(unsigned long data)

+{

+	struct asr_geu_aes *dd = (struct asr_geu_aes *)data;

+

+	asr_aes_handle_queue(dd, NULL);

+}

+

+static struct skcipher_alg aes_algs[] = {

+	/* AES - ECB, using hardware key, a.k.a. RKEK */

+	{

+		.base = {

+			.cra_name = "ecb(aes-hwkey)",

+			.cra_driver_name = "asr-ecb-aes-hwkey",

+			.cra_priority = 300,

+			.cra_flags = CRYPTO_ALG_ASYNC,

+			.cra_blocksize = AES_BLOCK_SIZE,

+			.cra_ctxsize = sizeof(struct asr_aes_ctx),

+			.cra_alignmask = 0xf,

+			.cra_module = THIS_MODULE,

+		},

+		.min_keysize = AES_MIN_KEY_SIZE,

+		.max_keysize = AES_MAX_KEY_SIZE | ASR_AES_HWKEY,

+		.setkey = asr_aes_set_hwkey,

+		.encrypt = asr_aes_ecb_encrypt,

+		.decrypt = asr_aes_ecb_decrypt,

+		.init = asr_aes_hwkey_init,

+		.exit = asr_aes_exit,

+	},

+	/* AES - CBC, using hardware key, a.k.a. RKEK */

+	{

+		.base = {

+			.cra_name = "cbc(aes-hwkey)",

+			.cra_driver_name = "asr-cbc-aes-hwkey",

+			.cra_priority = 300,

+			.cra_flags = CRYPTO_ALG_ASYNC,

+			.cra_blocksize = AES_BLOCK_SIZE,

+			.cra_ctxsize = sizeof(struct asr_aes_ctx),

+			.cra_alignmask = 0xf,

+			.cra_module = THIS_MODULE,

+		},

+		.min_keysize = AES_MIN_KEY_SIZE,

+		.max_keysize = AES_MAX_KEY_SIZE | ASR_AES_HWKEY,

+		.setkey = asr_aes_set_hwkey,

+		.encrypt = asr_aes_cbc_encrypt,

+		.decrypt = asr_aes_cbc_decrypt,

+		.init = asr_aes_hwkey_init,

+		.exit = asr_aes_exit,

+		.ivsize = AES_BLOCK_SIZE,

+	},

+};

+

+int asr_geu_aes_register(struct asr_geu_dev *geu_dd)

+{

+	int i, j, err;

+	struct asr_geu_aes *aes_dd = NULL;

+	struct device *dev = geu_dd->dev;

+	u32 rkek_state;

+

+	aes_dd = devm_kzalloc(dev, sizeof(struct asr_geu_aes), GFP_KERNEL);

+	if (!aes_dd)

+		return -ENOMEM;

+

+	asr_aes_local = aes_dd;

+	geu_dd->asr_aes = aes_dd;

+

+	err = asr_optee_aes_get_rkek_state(&rkek_state);

+	if (err) {

+		dev_warn(dev, "can't get hwkey(rkek) state\n");

+		aes_dd->rkek_burned = 0;

+	} else {

+		if (rkek_state)

+			aes_dd->rkek_burned = 1;

+		else

+			aes_dd->rkek_burned = 0;

+		switch (rkek_state) {

+		case 2:

+			dev_warn(dev, "hwkey(rkek) burned, SW access not disabled\n");

+			break;

+		case 1:

+			dev_warn(dev, "hwkey(rkek) burned, SW access disabled\n");

+			break;

+		case 0:

+			dev_warn(dev, "hwkey(rkek) not burned\n");

+			break;

+		}

+	}

+

+	spin_lock_init(&aes_dd->lock);

+	tasklet_init(&aes_dd->queue_task, asr_aes_queue_task,

+					(unsigned long)aes_dd);

+

+	crypto_init_queue(&aes_dd->queue, ASR_AES_QUEUE_LENGTH);

+

+	for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {

+		err = crypto_register_skcipher(&aes_algs[i]);

+		if (err){

+			for (j = 0; j < i; j++)

+				crypto_unregister_skcipher(&aes_algs[j]);

+			return err;

+		}

+	}

+

+	return 0;

+}

+

+int asr_geu_aes_unregister(struct asr_geu_dev *geu_dd)

+{

+	int i;

+	struct asr_geu_aes *aes_dd = geu_dd->asr_aes;

+	struct device *dev = geu_dd->dev;

+

+	for (i = 0; i < ARRAY_SIZE(aes_algs); i++)

+		crypto_unregister_skcipher(&aes_algs[i]);

+

+	tasklet_kill(&aes_dd->queue_task);

+

+	devm_kfree(dev, aes_dd);

+

+	return 0;

+}

+

+MODULE_DESCRIPTION("ASR HWKey AES driver with optee-os.");

+MODULE_LICENSE("GPL v2");

+MODULE_AUTHOR("Yu Zhang");
\ No newline at end of file