ASR_BASE
Change-Id: Icf3719cc0afe3eeb3edc7fa80a2eb5199ca9dda1
diff --git a/marvell/linux/drivers/crypto/asr/bcm_optee/asr-sha-optee.c b/marvell/linux/drivers/crypto/asr/bcm_optee/asr-sha-optee.c
new file mode 100644
index 0000000..fcb9bb7
--- /dev/null
+++ b/marvell/linux/drivers/crypto/asr/bcm_optee/asr-sha-optee.c
@@ -0,0 +1,1119 @@
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/hw_random.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <crypto/scatterwalk.h>
+#include <linux/of_device.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <crypto/hmac.h>
+#include <crypto/md5.h>
+#include <crypto/sha.h>
+
+#include "asr-sha-optee.h"
+
+static struct asr_bcm_sha *asr_sha_local = NULL;
+
+static struct teec_uuid pta_sha_uuid = ASR_SHA_ACCESS_UUID;
+
+static int asrbcm_optee_acquire_hash_init(struct asr_optee_sha_reqctx *ctx, struct teec_uuid *uuid, u32 cmd, u32 alg)
+{
+ struct tee_ioctl_invoke_arg invoke_arg;
+ struct tee_param params[2];
+ int ret = 0;
+
+ ret = asrbcm_optee_open_ta(&ctx->asrbcm_tee_ctx, uuid);
+ if (ret != 0) {
+ return ret;
+ }
+
+ memset(&invoke_arg, 0x0, sizeof(struct tee_ioctl_invoke_arg));
+ invoke_arg.func = cmd;
+ invoke_arg.session = ctx->asrbcm_tee_ctx.session;
+ invoke_arg.num_params = 2;
+
+ params[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+ params[0].u.value.a = alg;
+
+ params[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+ params[1].u.value.a = (uint32_t)ctx;
+
+ ret = tee_client_invoke_func(ctx->asrbcm_tee_ctx.tee_ctx, &invoke_arg, params);
+ if (ret != 0) {
+ goto exit;
+ } else if (invoke_arg.ret != 0) {
+ ret = -EIO;
+ goto exit;
+ }
+
+ return ret;
+
+exit:
+ asrbcm_optee_close_ta(&ctx->asrbcm_tee_ctx);
+ return ret;
+}
+
+static int asrbcm_optee_acquire_hash_update(struct asr_optee_sha_reqctx *ctx, struct teec_uuid *uuid, u32 cmd, \
+ u32 alg, uint8_t *in, u32 inlen)
+{
+ struct tee_ioctl_invoke_arg invoke_arg;
+ struct tee_param params[2];
+ int ret = 0;
+ struct tee_shm *shm = NULL;
+ u8 *pbuf = NULL;
+
+ memset(&invoke_arg, 0x0, sizeof(struct tee_ioctl_invoke_arg));
+ invoke_arg.func = cmd;
+ invoke_arg.session = ctx->asrbcm_tee_ctx.session;
+ invoke_arg.num_params = 2;
+
+ shm = tee_shm_alloc(ctx->asrbcm_tee_ctx.tee_ctx, inlen, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
+ if (!shm) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ pbuf = tee_shm_get_va(shm, 0);
+ memcpy(pbuf, in, inlen);
+
+ params[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+ params[0].u.memref.shm_offs = 0;
+ params[0].u.memref.size = inlen;
+ params[0].u.memref.shm = shm;
+
+ params[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+ params[1].u.value.a = (uint32_t)ctx;
+
+ ret = tee_client_invoke_func(ctx->asrbcm_tee_ctx.tee_ctx, &invoke_arg, params);
+ if (ret != 0) {
+ goto exit;
+ } else if (invoke_arg.ret != 0) {
+ ret = -EIO;
+ goto exit;
+ }
+
+ tee_shm_free(shm);
+ return ret;
+
+exit:
+ tee_shm_free(shm);
+ asrbcm_optee_close_ta(&ctx->asrbcm_tee_ctx);
+ return ret;
+}
+
+static int asrbcm_optee_acquire_hash_final(struct asr_optee_sha_reqctx *ctx, struct teec_uuid *uuid, u32 cmd, u32 alg, u8 *out, u8 outlen)
+{
+ struct tee_ioctl_invoke_arg invoke_arg;
+ struct tee_param params[2];
+ int ret = 0;
+ struct tee_shm *shm = NULL;
+ u8 *pbuf = NULL;
+
+ memset(&invoke_arg, 0x0, sizeof(struct tee_ioctl_invoke_arg));
+ invoke_arg.func = cmd;
+ invoke_arg.session = ctx->asrbcm_tee_ctx.session;
+ invoke_arg.num_params = 2;
+
+ shm = tee_shm_alloc(ctx->asrbcm_tee_ctx.tee_ctx, outlen, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
+ if (!shm) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ params[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
+ params[0].u.memref.shm_offs = 0;
+ params[0].u.memref.size = outlen;
+ params[0].u.memref.shm = shm;
+
+ params[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+ params[1].u.value.a = (uint32_t)ctx;
+
+ ret = tee_client_invoke_func(ctx->asrbcm_tee_ctx.tee_ctx, &invoke_arg, params);
+ if (ret != 0) {
+ goto exit;
+ } else if (invoke_arg.ret != 0) {
+ ret = -EIO;
+ goto exit;
+ }
+
+ pbuf = tee_shm_get_va(shm, 0);
+ memcpy(out, pbuf, outlen);
+
+exit:
+ tee_shm_free(shm);
+ asrbcm_optee_close_ta(&ctx->asrbcm_tee_ctx);
+ return ret;
+}
+
+static int asr_sha_handle_queue(struct asr_bcm_sha *dd,
+ struct ahash_request *req)
+{
+ struct crypto_async_request *async_req, *backlog;
+ struct asr_sha_ctx *ctx;
+ unsigned long flags;
+ bool start_async;
+ int err = 0, ret = 0;
+
+ spin_lock_irqsave(&dd->lock, flags);
+ if (req)
+ ret = ahash_enqueue_request(&dd->queue, req);
+
+ if (SHA_FLAGS_BUSY & dd->flags) {
+ spin_unlock_irqrestore(&dd->lock, flags);
+ return ret;
+ }
+
+ backlog = crypto_get_backlog(&dd->queue);
+ async_req = crypto_dequeue_request(&dd->queue);
+ if (async_req)
+ dd->flags |= SHA_FLAGS_BUSY;
+
+ spin_unlock_irqrestore(&dd->lock, flags);
+
+ if (!async_req) {
+ return ret;
+ }
+
+ if (backlog)
+ backlog->complete(backlog, -EINPROGRESS);
+
+ ctx = crypto_tfm_ctx(async_req->tfm);
+
+ dd->req = ahash_request_cast(async_req);
+ start_async = (dd->req != req);
+ dd->is_async = start_async;
+ dd->force_complete = false;
+
+ /* WARNING: ctx->start() MAY change dd->is_async. */
+ err = ctx->start(dd);
+ return (start_async) ? ret : err;
+}
+
+static int asr_sha_enqueue(struct ahash_request *req, unsigned int op)
+{
+ struct asr_optee_sha_reqctx *optee_ctx = ahash_request_ctx(req);
+ struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
+
+ struct asr_bcm_sha *dd = ctx->dd;
+
+ ctx->op = op;
+
+ return asr_sha_handle_queue(dd, req);
+}
+
+static void asr_sha_copy_ready_hash(struct ahash_request *req)
+{
+ struct asr_optee_sha_reqctx *optee_ctx = ahash_request_ctx(req);
+ struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
+
+ if (!req->result)
+ return;
+
+ switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
+ case SHA_FLAGS_MD5:
+ memcpy(req->result, ctx->digest, MD5_DIGEST_SIZE);
+ break;
+ case SHA_FLAGS_SHA1:
+ memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE);
+ break;
+ case SHA_FLAGS_SHA224:
+ memcpy(req->result, ctx->digest, SHA224_DIGEST_SIZE);
+ break;
+ case SHA_FLAGS_SHA256:
+ memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE);
+ break;
+ case SHA_FLAGS_SHA384:
+ memcpy(req->result, ctx->digest, SHA384_DIGEST_SIZE);
+ break;
+ case SHA_FLAGS_SHA512:
+ memcpy(req->result, ctx->digest, SHA512_DIGEST_SIZE);
+ break;
+ default:
+ return;
+ }
+}
+
+static inline int asr_sha_complete(struct asr_bcm_sha *dd, int err)
+{
+ struct ahash_request *req = dd->req;
+ struct asr_optee_sha_reqctx *optee_ctx = ahash_request_ctx(req);
+ struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
+
+ dd->flags &= ~(SHA_FLAGS_BUSY);
+ ctx->flags &= ~(SHA_FLAGS_FINAL);
+
+ if ((dd->is_async || dd->force_complete) && req->base.complete)
+ req->base.complete(&req->base, err);
+
+ /* handle new request */
+ tasklet_schedule(&dd->queue_task);
+
+ return err;
+}
+
+static size_t asr_sha_append_sg(struct asr_sha_reqctx *ctx)
+{
+ size_t count;
+
+ while ((ctx->bufcnt < ctx->buflen) && ctx->total) {
+ count = min(ctx->sg->length - ctx->offset, ctx->total);
+ count = min(count, ctx->buflen - ctx->bufcnt);
+
+ if (count <= 0) {
+ /*
+ * Check if count <= 0 because the buffer is full or
+ * because the sg length is 0. In the latest case,
+ * check if there is another sg in the list, a 0 length
+ * sg doesn't necessarily mean the end of the sg list.
+ */
+ if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) {
+ ctx->sg = sg_next(ctx->sg);
+ continue;
+ } else {
+ break;
+ }
+ }
+
+ scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg,
+ ctx->offset, count, 0);
+
+ ctx->bufcnt += count;
+ ctx->offset += count;
+ ctx->total -= count;
+
+ if (ctx->offset == ctx->sg->length) {
+ ctx->sg = sg_next(ctx->sg);
+ if (ctx->sg)
+ ctx->offset = 0;
+ else
+ ctx->total = 0;
+ }
+ }
+
+ return 0;
+}
+
+static int asr_sha_buff_init(struct asr_bcm_sha *dd, uint32_t len)
+{
+ struct ahash_request *req = dd->req;
+ struct asr_optee_sha_reqctx *optee_ctx = ahash_request_ctx(req);
+ struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
+
+ ctx->buffer = (void *)__get_free_pages(GFP_KERNEL, get_order(len));
+ if (!ctx->buffer) {
+ dev_err(dd->dev, "unable to alloc pages.\n");
+ return -ENOMEM;
+ }
+
+ ctx->buflen = PAGE_SIZE << get_order(len);
+
+ return 0;
+}
+
+static void asr_sha_buff_cleanup(struct asr_bcm_sha *dd, uint32_t len)
+{
+ struct ahash_request *req = dd->req;
+ struct asr_optee_sha_reqctx *optee_ctx = ahash_request_ctx(req);
+ struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
+
+ free_pages((unsigned long)ctx->buffer, get_order(len));
+ ctx->buflen = 0;
+}
+
+static int sha_init_req(struct asr_optee_sha_reqctx *optee_ctx)
+{
+ int ret = 0;
+ struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
+
+ /* hardware: hash init */
+ ret = asrbcm_optee_acquire_hash_init(optee_ctx, &pta_sha_uuid, \
+ CMD_SHA_INIT, ctx->md.alg);
+ if (ret)
+ return -EINVAL;
+ return 0;
+}
+
+static int sha_update_req(struct asr_optee_sha_reqctx *optee_ctx)
+{
+ int ret = 0;
+ int bufcnt;
+ uint8_t *pdata;
+ struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
+ uint32_t buflen = ctx->total;
+
+ ret = asr_sha_buff_init(ctx->dd, ctx->total);
+ if (ret)
+ return -ENOMEM;
+
+ asr_sha_append_sg(ctx);
+ bufcnt = ctx->bufcnt;
+ ctx->bufcnt = 0;
+
+ pdata = (uint8_t *)ctx->buffer;
+
+ /* hashware: hash process */
+ ret = asrbcm_optee_acquire_hash_update(optee_ctx, &pta_sha_uuid, \
+ CMD_SHA_UPDATE, ctx->md.alg, pdata, bufcnt);
+ if (ret)
+ ret = -EINVAL;
+
+ asr_sha_buff_cleanup(ctx->dd, buflen);
+ return ret;
+}
+
+static void sha_finish_req(struct asr_optee_sha_reqctx *optee_ctx, int *err)
+{
+ struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(ctx->dd->req);
+ uint8_t *hash = (uint8_t *)ctx->digest;
+ uint32_t outlen = crypto_ahash_digestsize(tfm);
+
+ if (!(*err) && (ctx->flags & SHA_FLAGS_FINAL)) {
+ *err = asrbcm_optee_acquire_hash_final(optee_ctx, &pta_sha_uuid, CMD_SHA_FINAL, \
+ ctx->md.alg, (uint8_t *)hash, outlen);
+ ctx->flags &= (~SHA_FLAGS_FINAL);
+ asr_sha_copy_ready_hash(ctx->dd->req);
+ } else {
+ ctx->flags |= SHA_FLAGS_ERROR;
+ }
+}
+
+static void sha_next_req(struct asr_optee_sha_reqctx *optee_ctx, int *err)
+{
+ struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
+
+ if (likely(!(*err) && (SHA_FLAGS_FINAL & ctx->flags)))
+ sha_finish_req(optee_ctx, err);
+
+ (void)asr_sha_complete(ctx->dd, *err);
+}
+
+static int asr_sha_done(struct asr_bcm_sha *dd);
+
+static int asr_sha_start(struct asr_bcm_sha *dd)
+{
+ int err = 0;
+ struct ahash_request *req = dd->req;
+ struct asr_optee_sha_reqctx *optee_ctx = ahash_request_ctx(req);
+ struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
+
+ mutex_lock(&dd->queue_lock);
+
+ dd->resume = asr_sha_done;
+
+ if ((ctx->flags & SHA_FLAGS_INIT)) {
+ err = sha_init_req(optee_ctx);
+ ctx->flags &= (~SHA_FLAGS_INIT);
+ }
+
+ if (!err) {
+ if (ctx->op == SHA_OP_UPDATE) {
+ err = sha_update_req(optee_ctx);
+ if (!err && (ctx->flags & SHA_FLAGS_FINUP))
+ /* no final() after finup() */
+ sha_finish_req(optee_ctx, &err);
+ } else if (ctx->op == SHA_OP_FINAL) {
+ sha_finish_req(optee_ctx, &err);
+ }
+ }
+
+ if (unlikely(err != -EINPROGRESS))
+ /* Task will not finish it, so do it here */
+ sha_next_req(optee_ctx, &err);
+
+ mutex_unlock(&dd->queue_lock);
+ return err;
+}
+
+static int asr_sha_cra_init(struct crypto_tfm *tfm)
+{
+ struct asr_sha_ctx *ctx = crypto_tfm_ctx(tfm);
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct asr_optee_sha_reqctx));
+ ctx->start = asr_sha_start;
+
+ return 0;
+}
+
+static void asr_sha_cra_exit(struct crypto_tfm *tfm)
+{
+ struct asr_sha_ctx *ctx = crypto_tfm_ctx(tfm);
+ memset(ctx, 0, sizeof(*ctx));
+}
+
+static inline void asr_sha_get(struct asr_bcm_sha *dd)
+{
+ mutex_lock(&dd->sha_lock);
+}
+
+static inline void asr_sha_put(struct asr_bcm_sha *dd)
+{
+ if(mutex_is_locked(&dd->sha_lock))
+ mutex_unlock(&dd->sha_lock);
+}
+
+static int asr_sha_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct asr_optee_sha_reqctx *optee_ctx = ahash_request_ctx(req);
+ struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
+ struct asr_bcm_sha *dd = asr_sha_local;
+
+ asr_sha_get(dd);
+
+ ctx->dd = dd;
+ memset(&ctx->md, 0, sizeof(ctx->md));
+ ctx->flags = 0;
+
+ switch (crypto_ahash_digestsize(tfm)) {
+ case MD5_DIGEST_SIZE:
+ ctx->flags |= SHA_FLAGS_MD5;
+ ctx->md.alg = TEE_ALG_MD5;
+ ctx->md.block_size = MD5_HMAC_BLOCK_SIZE;
+ break;
+ case SHA1_DIGEST_SIZE:
+ ctx->flags |= SHA_FLAGS_SHA1;
+ ctx->md.alg = TEE_ALG_SHA1;
+ ctx->md.block_size = SHA1_BLOCK_SIZE;
+ break;
+ case SHA224_DIGEST_SIZE:
+ ctx->flags |= SHA_FLAGS_SHA224;
+ ctx->md.alg = TEE_ALG_SHA224;
+ ctx->md.block_size = SHA224_BLOCK_SIZE;
+ break;
+ case SHA256_DIGEST_SIZE:
+ ctx->flags |= SHA_FLAGS_SHA256;
+ ctx->md.alg = TEE_ALG_SHA256;
+ ctx->md.block_size = SHA256_BLOCK_SIZE;
+ break;
+ case SHA384_DIGEST_SIZE:
+ ctx->flags |= SHA_FLAGS_SHA384;
+ ctx->md.alg = TEE_ALG_SHA384;
+ ctx->md.block_size = SHA384_BLOCK_SIZE;
+ break;
+ case SHA512_DIGEST_SIZE:
+ ctx->flags |= SHA_FLAGS_SHA512;
+ ctx->md.alg = TEE_ALG_SHA512;
+ ctx->md.block_size = SHA512_BLOCK_SIZE;
+ break;
+ default:
+ asr_sha_put(dd);
+ return -EINVAL;
+ }
+
+ ctx->bufcnt = 0;
+ ctx->flags |= SHA_FLAGS_INIT;
+
+ asr_sha_put(dd);
+ return 0;
+}
+
+static int asr_sha_update(struct ahash_request *req)
+{
+ int ret = 0;
+ struct asr_optee_sha_reqctx *optee_ctx = ahash_request_ctx(req);
+ struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
+
+ asr_sha_get(ctx->dd);
+ ctx->total = req->nbytes;
+ ctx->sg = req->src;
+ ctx->offset = 0;
+
+ ret = asr_sha_enqueue(req, SHA_OP_UPDATE);
+
+ asr_sha_put(ctx->dd);
+ return ret;
+}
+
+static int asr_sha_final(struct ahash_request *req)
+{
+ int ret = 0;
+ struct asr_optee_sha_reqctx *optee_ctx = ahash_request_ctx(req);
+ struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
+
+ asr_sha_get(ctx->dd);
+ ctx->flags |= SHA_FLAGS_FINAL;
+ if (ctx->flags & SHA_FLAGS_ERROR) {
+ asr_sha_put(ctx->dd);
+ return 0; /* uncompleted hash is not needed */
+ }
+ ret = asr_sha_enqueue(req, SHA_OP_FINAL);
+
+ asr_sha_put(ctx->dd);
+ return ret;
+}
+
+static int asr_sha_finup(struct ahash_request *req)
+{
+ struct asr_optee_sha_reqctx *optee_ctx = ahash_request_ctx(req);
+ struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
+ int err1, err2;
+
+ ctx->flags |= SHA_FLAGS_FINUP;
+
+ err1 = asr_sha_update(req);
+ if (err1 == -EINPROGRESS ||
+ (err1 == -EBUSY && (ahash_request_flags(req) &
+ CRYPTO_TFM_REQ_MAY_BACKLOG))) {
+ asr_sha_put(ctx->dd);
+ return err1;
+ }
+ /*
+ * final() has to be always called to cleanup resources
+ * even if udpate() failed, except EINPROGRESS
+ */
+ err2 = asr_sha_final(req);
+
+ return err1 ?: err2;
+}
+
+static int asr_sha_digest(struct ahash_request *req)
+{
+ return asr_sha_init(req) ?: asr_sha_finup(req);
+}
+
+static int asr_sha_export(struct ahash_request *req, void *out)
+{
+ const struct asr_optee_sha_reqctx *ctx = ahash_request_ctx(req);
+
+ memcpy(out, ctx, sizeof(*ctx));
+ return 0;
+}
+
+static int asr_sha_import(struct ahash_request *req, const void *in)
+{
+ struct asr_optee_sha_reqctx *ctx = ahash_request_ctx(req);
+
+ memcpy(ctx, in, sizeof(*ctx));
+ return 0;
+}
+
+static struct ahash_alg sha_algs[] = {
+ /* md5 */
+ {
+ .init = asr_sha_init,
+ .update = asr_sha_update,
+ .final = asr_sha_final,
+ .finup = asr_sha_finup,
+ .digest = asr_sha_digest,
+ .export = asr_sha_export,
+ .import = asr_sha_import,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .statesize = sizeof(struct asr_optee_sha_reqctx),
+ .base = {
+ .cra_name = "md5",
+ .cra_driver_name = "asr-md5",
+ .cra_priority = ASR_SHA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct asr_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = asr_sha_cra_init,
+ .cra_exit = asr_sha_cra_exit,
+ }
+ }
+ },
+
+ /* sha1 */
+ {
+ .init = asr_sha_init,
+ .update = asr_sha_update,
+ .final = asr_sha_final,
+ .finup = asr_sha_finup,
+ .digest = asr_sha_digest,
+ .export = asr_sha_export,
+ .import = asr_sha_import,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct asr_optee_sha_reqctx),
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "asr-sha1",
+ .cra_priority = ASR_SHA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct asr_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = asr_sha_cra_init,
+ .cra_exit = asr_sha_cra_exit,
+ }
+ }
+ },
+
+ /* sha224 */
+ {
+ .init = asr_sha_init,
+ .update = asr_sha_update,
+ .final = asr_sha_final,
+ .finup = asr_sha_finup,
+ .digest = asr_sha_digest,
+ .export = asr_sha_export,
+ .import = asr_sha_import,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct asr_optee_sha_reqctx),
+ .base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "asr-sha224",
+ .cra_priority = ASR_SHA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct asr_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = asr_sha_cra_init,
+ .cra_exit = asr_sha_cra_exit,
+ }
+ }
+ },
+
+ /* sha256 */
+ {
+ .init = asr_sha_init,
+ .update = asr_sha_update,
+ .final = asr_sha_final,
+ .finup = asr_sha_finup,
+ .digest = asr_sha_digest,
+ .export = asr_sha_export,
+ .import = asr_sha_import,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct asr_optee_sha_reqctx),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "asr-sha256",
+ .cra_priority = ASR_SHA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct asr_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = asr_sha_cra_init,
+ .cra_exit = asr_sha_cra_exit,
+ }
+ }
+ },
+
+ /* sha384 */
+ {
+ .init = asr_sha_init,
+ .update = asr_sha_update,
+ .final = asr_sha_final,
+ .finup = asr_sha_finup,
+ .digest = asr_sha_digest,
+ .export = asr_sha_export,
+ .import = asr_sha_import,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct asr_optee_sha_reqctx),
+ .base = {
+ .cra_name = "sha384",
+ .cra_driver_name = "asr-sha384",
+ .cra_priority = ASR_SHA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct asr_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = asr_sha_cra_init,
+ .cra_exit = asr_sha_cra_exit,
+ }
+ }
+ },
+
+ /* sha512 */
+ {
+ .init = asr_sha_init,
+ .update = asr_sha_update,
+ .final = asr_sha_final,
+ .finup = asr_sha_finup,
+ .digest = asr_sha_digest,
+ .export = asr_sha_export,
+ .import = asr_sha_import,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct asr_optee_sha_reqctx),
+ .base = {
+ .cra_name = "sha512",
+ .cra_driver_name = "asr-sha512",
+ .cra_priority = ASR_SHA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct asr_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = asr_sha_cra_init,
+ .cra_exit = asr_sha_cra_exit,
+ }
+ }
+ },
+};
+
+static void asr_sha_queue_task(unsigned long data)
+{
+ struct asr_bcm_sha *dd = (struct asr_bcm_sha *)data;
+
+ asr_sha_handle_queue(dd, NULL);
+}
+
+static int asr_sha_done(struct asr_bcm_sha *dd)
+{
+ int err = 0;
+ struct ahash_request *req = dd->req;
+ struct asr_optee_sha_reqctx *ctx = ahash_request_ctx(req);
+
+ sha_finish_req(ctx, 0);
+
+ return err;
+}
+
+static void asr_sha_done_task(unsigned long data)
+{
+ struct asr_bcm_sha *dd = (struct asr_bcm_sha *)data;
+
+ dd->is_async = true;
+ (void)dd->resume(dd);
+}
+
+static int hash_handle(int alg, uint8_t *in, uint32_t inlen, uint8_t *out)
+{
+ int ret = 0;
+ uint32_t outlen;
+ struct asr_optee_sha_reqctx ctx;
+
+ switch(alg) {
+ case TEE_ALG_SHA512:
+ outlen = HASH_LEN_SHA512;
+ break;
+ case TEE_ALG_SHA384:
+ outlen = HASH_LEN_SHA384;
+ break;
+ case TEE_ALG_SHA256:
+ outlen = HASH_LEN_SHA256;
+ break;
+ case TEE_ALG_SHA224:
+ outlen = HASH_LEN_SHA224;
+ break;
+ case TEE_ALG_SHA1:
+ outlen = HASH_LEN_SHA1;
+ break;
+ case TEE_ALG_MD5:
+ outlen = HASH_LEN_MD5;
+ break;
+ default:
+ printk("err: not support hash alg\n");
+ ret = -1;
+ goto exit;
+ }
+
+ ret = asrbcm_optee_acquire_hash_init(&ctx, &pta_sha_uuid, CMD_SHA_INIT, alg);
+ if (ret) {
+ ret = -1;
+ goto exit;
+ }
+
+ ret = asrbcm_optee_acquire_hash_update(&ctx, &pta_sha_uuid, CMD_SHA_UPDATE, alg, in, inlen);
+ if (ret) {
+ ret = -1;
+ goto exit;
+ }
+
+ ret = asrbcm_optee_acquire_hash_final(&ctx, &pta_sha_uuid, CMD_SHA_FINAL, alg, out, outlen);
+ if (ret) {
+ ret = -1;
+ goto exit;
+ }
+
+exit:
+ return ret;
+}
+
+static int tee_hwhash_func_verify(void)
+{
+ int ret = 0;
+ unsigned char out_sha256[32] = {0};
+ const struct {
+ const char *msg;
+ uint8_t hash[32];
+ } sha256_tests = {
+ "abc",
+ { 0xBA, 0x78, 0x16, 0xBF, 0x8F, 0x01,
+ 0xCF, 0xEA, 0x41, 0x41, 0x40, 0xDE,
+ 0x5D, 0xAE, 0x22, 0x23, 0xB0, 0x03,
+ 0x61, 0xA3, 0x96, 0x17, 0x7A, 0x9C,
+ 0xB4, 0x10, 0xFF, 0x61, 0xF2, 0x00,
+ 0x15, 0xAD
+ }
+ };
+
+ ret = hash_handle(TEE_ALG_SHA256, (uint8_t *)sha256_tests.msg, strlen(sha256_tests.msg), out_sha256);
+ if (ret)
+ return ret;
+
+ if (memcmp(out_sha256, sha256_tests.hash, sizeof(out_sha256))) {
+ return -1;
+ }
+
+ return 0;
+}
+
+// #define ASR_BCM_SHA_TEST
+
+#ifdef ASR_BCM_SHA_TEST
+static int bcm_sha_test(void);
+#endif
+
+int asr_bcm_sha_register(struct asr_bcm_dev *bcm_dd)
+{
+ int err, i, j;
+ struct asr_bcm_sha *sha_dd;
+
+ sha_dd = &bcm_dd->asr_sha;
+ sha_dd->dev = bcm_dd->dev;
+
+ asr_sha_local = sha_dd;
+
+ spin_lock_init(&sha_dd->lock);
+ mutex_init(&sha_dd->sha_lock);
+ mutex_init(&sha_dd->queue_lock);
+ tasklet_init(&sha_dd->done_task, asr_sha_done_task,
+ (unsigned long)sha_dd);
+ tasklet_init(&sha_dd->queue_task, asr_sha_queue_task,
+ (unsigned long)sha_dd);
+ crypto_init_queue(&sha_dd->queue, ASR_SHA_QUEUE_LENGTH);
+
+ /* don't register sha if hash verify err in tos */
+ err = tee_hwhash_func_verify();
+ if (err)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(sha_algs); i++) {
+ err = crypto_register_ahash(&sha_algs[i]);
+ if (err)
+ goto err_sha_algs;
+ }
+
+#ifdef ASR_BCM_SHA_TEST
+ bcm_sha_test();
+#endif
+
+ return 0;
+
+err_sha_algs:
+ for (j = 0; j < i; j++)
+ crypto_unregister_ahash(&sha_algs[j]);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(asr_bcm_sha_register);
+
+int asr_bcm_sha_unregister(struct asr_bcm_dev *bcm_dd)
+{
+ int i;
+ struct asr_bcm_sha *sha_dd = &bcm_dd->asr_sha;
+
+
+ for (i = 0; i < ARRAY_SIZE(sha_algs); i++)
+ crypto_unregister_ahash(&sha_algs[i]);
+
+ tasklet_kill(&sha_dd->queue_task);
+ tasklet_kill(&sha_dd->done_task);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(asr_bcm_sha_unregister);
+
+
+
+#ifdef ASR_BCM_SHA_TEST
+
+static int bcm_sha_test(void)
+{
+ int ret = 0;
+ uint32_t i;
+
+ const struct {
+ const char *msg;
+ uint8_t hash[20];
+ } sha1_tests[] = {
+ {
+ "abc",
+ { 0xa9, 0x99, 0x3e, 0x36, 0x47, 0x06,
+ 0x81, 0x6a, 0xba, 0x3e, 0x25, 0x71,
+ 0x78, 0x50, 0xc2, 0x6c, 0x9c, 0xd0,
+ 0xd8, 0x9d
+ }
+ },
+ {
+ "asjhsdjljfdsdjjkdfwyqeuwouzxkmcxjkmwqdsjklfdfjlkdfkfs" \
+ "fkjlfskjdflioherfjjfdjkfdnkfdfdojjodfjdfjflj;sljjlfkkl" \
+ "nfnkgbhhoigfhigfopojpfjojpoffkjlfskjdflioherfjjfdjkfdn" \
+ "kfdfdojjodfjdfjfljnfnkgbhhoigfhigfoponfnkgbhhoigfhigfopojpfjo",
+ {
+ 0x93, 0x84, 0x7f, 0x98, 0x22, 0x5e,
+ 0x6d, 0xf2, 0x09, 0x1c, 0xc9, 0xac,
+ 0xbb, 0x5d, 0x00, 0x2d, 0x64, 0x81,
+ 0xe3, 0xcd
+ }
+ },
+ {
+ "asjhsdjljfdsdjjkdfwyqeuwouzxkmcxjkmwqdsjklfdfjlkdfkfs" \
+ "fkjlfskjdflioherfjjfdjkfdnkfdfdojjodfjdfjflj;sljjlfkkl" \
+ "nfnkgbhhoigfhigfopojpfjojpoffkjlfskjdflioherfjjfdjkfdn" \
+ "kfdfdojjodfjdfjfljnfnkgbhhoigfhigfoponfnkgbhhoigfhigfopojpfjoewiroiowiod",
+ {
+ 0x6a, 0x66, 0xc2, 0x87, 0x84, 0x36,
+ 0x14, 0x90, 0x99, 0x03, 0x90, 0xf0,
+ 0xaa, 0x7e, 0xbd, 0xc7, 0xdb, 0x38,
+ 0x54, 0x09
+ }
+ },
+ {
+ "asjhsdjljfdsdjjkdfwyqeuwouzxkmcxjkmwqds"
+ "jklfdfjlkdfkfsfkjlfskjdflioherfjjfdjkfd"
+ "nkfdfdojjodfjdfjflj;sljjlfkklnfnkgbhhoi"
+ "gfhigfopojpfjojpoffkjlfskjdflioherfjjfd"
+ "jkfdnkfdfdojjodfjdfjfljnfnkgbhhoigfhigf"
+ "oponfnkgbhhoigfhigfopojpfjoewiroiowiods"
+ "djkisijdknknkskdnknflnnesniewinoinknmdn"
+ "kknknsdnjjfsnnkfnkknslnklknfnknkflksnlk"
+ "lskldklklklnmlflmlmlfmlfml",
+ {
+ 0xc4, 0x53, 0xca, 0x24, 0xfa, 0xe5,
+ 0x39, 0x53, 0x08, 0x8c, 0x57, 0x1a,
+ 0x96, 0xe9, 0x64, 0x7f, 0xd5, 0xf9,
+ 0x13, 0x91
+ }
+ }
+ };
+
+ struct asr_optee_sha_reqctx ctx1;
+ struct asr_optee_sha_reqctx ctx2;
+ struct asr_optee_sha_reqctx ctx3;
+ struct asr_optee_sha_reqctx ctx4;
+ unsigned char out_sha1_1[20] = {0};
+ unsigned char out_sha1_2[20] = {0};
+ unsigned char out_sha1_3[20] = {0};
+ unsigned char out_sha1_4[20] = {0};
+
+ ret = asrbcm_optee_acquire_hash_init(&ctx1, &pta_sha_uuid, CMD_SHA_INIT, TEE_ALG_SHA1);
+ if (ret) {
+ return ret;
+ }
+
+ ret = asrbcm_optee_acquire_hash_init(&ctx2, &pta_sha_uuid, CMD_SHA_INIT, TEE_ALG_SHA1);
+ if (ret) {
+ return ret;
+ }
+
+ ret = asrbcm_optee_acquire_hash_update(&ctx1, &pta_sha_uuid, CMD_SHA_UPDATE, TEE_ALG_SHA1,
+ (uint8_t *)sha1_tests[0].msg, strlen(sha1_tests[0].msg));
+ if (ret) {
+ return ret;
+ }
+
+ ret = asrbcm_optee_acquire_hash_init(&ctx3, &pta_sha_uuid, CMD_SHA_INIT, TEE_ALG_SHA1);
+ if (ret) {
+ return ret;
+ }
+
+ if (ret) {
+ return ret;
+ }
+
+ ret = asrbcm_optee_acquire_hash_update(&ctx2, &pta_sha_uuid, CMD_SHA_UPDATE, TEE_ALG_SHA1,
+ (uint8_t *)(((uint32_t)sha1_tests[1].msg)+10), strlen(sha1_tests[1].msg) - 10);
+ if (ret) {
+ return ret;
+ }
+
+ ret = asrbcm_optee_acquire_hash_final(&ctx1, &pta_sha_uuid, CMD_SHA_FINAL, TEE_ALG_SHA1,
+ out_sha1_1, sizeof(out_sha1_1));
+ if (ret) {
+ return ret;
+ }
+
+ ret = asrbcm_optee_acquire_hash_update(&ctx3, &pta_sha_uuid, CMD_SHA_UPDATE, TEE_ALG_SHA1,
+ (uint8_t *)sha1_tests[2].msg, 25);
+ if (ret) {
+ return ret;
+ }
+
+ ret = asrbcm_optee_acquire_hash_init(&ctx4, &pta_sha_uuid, CMD_SHA_INIT, TEE_ALG_SHA1);
+ if (ret) {
+ return ret;
+ }
+
+ ret = asrbcm_optee_acquire_hash_final(&ctx2, &pta_sha_uuid, CMD_SHA_FINAL, TEE_ALG_SHA1,
+ out_sha1_2, sizeof(out_sha1_2));
+ if (ret) {
+ return ret;
+ }
+
+ ret = asrbcm_optee_acquire_hash_update(&ctx3, &pta_sha_uuid, CMD_SHA_UPDATE, TEE_ALG_SHA1,
+ (uint8_t *)(((uint32_t)sha1_tests[2].msg)+25), strlen(sha1_tests[2].msg)-25);
+ if (ret) {
+ return ret;
+ }
+
+ ret = asrbcm_optee_acquire_hash_final(&ctx3, &pta_sha_uuid, CMD_SHA_FINAL, TEE_ALG_SHA1,
+ out_sha1_3, sizeof(out_sha1_3));
+ if (ret) {
+ return ret;
+ }
+
+ ret = asrbcm_optee_acquire_hash_update(&ctx4, &pta_sha_uuid, CMD_SHA_UPDATE, TEE_ALG_SHA1,
+ (uint8_t *)sha1_tests[3].msg, 43);
+ if (ret) {
+ return ret;
+ }
+ ret = asrbcm_optee_acquire_hash_update(&ctx4, &pta_sha_uuid, CMD_SHA_UPDATE, TEE_ALG_SHA1,
+ (uint8_t *)(((uint32_t)sha1_tests[3].msg)+43), strlen(sha1_tests[3].msg)-43);
+ if (ret) {
+ return ret;
+ }
+
+ ret = asrbcm_optee_acquire_hash_final(&ctx4, &pta_sha_uuid, CMD_SHA_FINAL, TEE_ALG_SHA1,
+ out_sha1_4, sizeof(out_sha1_4));
+ if (ret) {
+ return ret;
+ }
+
+ if (memcmp(out_sha1_1, sha1_tests[0].hash, sizeof(out_sha1_1))) {
+ printk("sha1 test 0 failed");
+ } else {
+ printk("sha1 test 0 pass");
+ }
+ if (memcmp(out_sha1_2, sha1_tests[1].hash, sizeof(out_sha1_2))) {
+ printk("sha1 test 1 failed");
+ } else {
+ printk("sha1 test 1 pass");
+ }
+ if (memcmp(out_sha1_3, sha1_tests[2].hash, sizeof(out_sha1_3))) {
+ printk("sha1 test 2 failed");
+ } else {
+ printk("sha1 test 2 pass");
+ }
+ if (memcmp(out_sha1_4, sha1_tests[3].hash, sizeof(out_sha1_4))) {
+ printk("sha1 test 3 failed");
+ } else {
+ printk("sha1 test 4 pass");
+ }
+
+ return 0;
+}
+#endif
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("wangyonggan <yongganwang@asrmicro.com>");
+MODULE_DESCRIPTION("ASR bcm sha driver");
\ No newline at end of file